From 504f73f7b3724c885317b6b236620e9048f50c0a Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Fri, 28 Jun 2019 20:54:11 +0100 Subject: trace: add mmu_index to mem_info We are going to re-use mem_info later for plugins and will need to track the mmu_idx for softmmu code. Signed-off-by: Alex Bennée --- include/exec/cpu_ldst_template.h | 15 ++++++--------- include/exec/cpu_ldst_useronly_template.h | 6 +++--- 2 files changed, 9 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h index af7e0b49f2..5750a26b9e 100644 --- a/include/exec/cpu_ldst_template.h +++ b/include/exec/cpu_ldst_template.h @@ -84,17 +84,16 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, CPUTLBEntry *entry; RES_TYPE res; target_ulong addr; - int mmu_idx; + int mmu_idx = CPU_MMU_INDEX; TCGMemOpIdx oi; #if !defined(SOFTMMU_CODE_ACCESS) trace_guest_mem_before_exec( env_cpu(env), ptr, - trace_mem_build_info(SHIFT, false, MO_TE, false)); + trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx)); #endif addr = ptr; - mmu_idx = CPU_MMU_INDEX; entry = tlb_entry(env, mmu_idx, addr); if (unlikely(entry->ADDR_READ != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { @@ -123,17 +122,16 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, CPUTLBEntry *entry; int res; target_ulong addr; - int mmu_idx; + int mmu_idx = CPU_MMU_INDEX; TCGMemOpIdx oi; #if !defined(SOFTMMU_CODE_ACCESS) trace_guest_mem_before_exec( env_cpu(env), ptr, - trace_mem_build_info(SHIFT, true, MO_TE, false)); + trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx)); #endif addr = ptr; - mmu_idx = CPU_MMU_INDEX; entry = tlb_entry(env, mmu_idx, addr); if (unlikely(entry->ADDR_READ != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { @@ -165,17 +163,16 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, { CPUTLBEntry *entry; target_ulong addr; - int mmu_idx; + int mmu_idx = CPU_MMU_INDEX; TCGMemOpIdx oi; #if !defined(SOFTMMU_CODE_ACCESS) trace_guest_mem_before_exec( env_cpu(env), ptr, - trace_mem_build_info(SHIFT, false, MO_TE, true)); + trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx)); #endif addr = ptr; - mmu_idx = CPU_MMU_INDEX; entry = tlb_entry(env, mmu_idx, addr); if (unlikely(tlb_addr_write(entry) != (addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))))) { diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h index 2378f2958c..93ad532f94 100644 --- a/include/exec/cpu_ldst_useronly_template.h +++ b/include/exec/cpu_ldst_useronly_template.h @@ -73,7 +73,7 @@ glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) #else trace_guest_mem_before_exec( env_cpu(env), ptr, - trace_mem_build_info(SHIFT, false, MO_TE, false)); + trace_mem_build_info(SHIFT, false, MO_TE, false, MMU_USER_IDX)); return glue(glue(ld, USUFFIX), _p)(g2h(ptr)); #endif } @@ -105,7 +105,7 @@ glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) #else trace_guest_mem_before_exec( env_cpu(env), ptr, - trace_mem_build_info(SHIFT, true, MO_TE, false)); + trace_mem_build_info(SHIFT, true, MO_TE, false, MMU_USER_IDX)); return glue(glue(lds, SUFFIX), _p)(g2h(ptr)); #endif } @@ -132,7 +132,7 @@ glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr, { trace_guest_mem_before_exec( env_cpu(env), ptr, - trace_mem_build_info(SHIFT, false, MO_TE, true)); + trace_mem_build_info(SHIFT, false, MO_TE, true, MMU_USER_IDX)); glue(glue(st, SUFFIX), _p)(g2h(ptr), v); } -- cgit v1.2.3-55-g7522 From cfbc3c6083dbdd0fdd9cc98965182e79431d3c63 Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Mon, 26 Nov 2018 17:14:43 -0500 Subject: cpu: introduce cpu_in_exclusive_context() Suggested-by: Alex Bennée Reviewed-by: Alex Bennée Signed-off-by: Emilio G. Cota [AJB: moved inside start/end_exclusive fns + cleanup] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- accel/tcg/cpu-exec.c | 5 +---- cpus-common.c | 4 ++++ include/hw/core/cpu.h | 13 +++++++++++++ 3 files changed, 18 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 48272c781b..81c33d6475 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -238,8 +238,6 @@ void cpu_exec_step_atomic(CPUState *cpu) uint32_t flags; uint32_t cflags = 1; uint32_t cf_mask = cflags & CF_HASH_MASK; - /* volatile because we modify it between setjmp and longjmp */ - volatile bool in_exclusive_region = false; if (sigsetjmp(cpu->jmp_env, 0) == 0) { tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); @@ -253,7 +251,6 @@ void cpu_exec_step_atomic(CPUState *cpu) /* Since we got here, we know that parallel_cpus must be true. */ parallel_cpus = false; - in_exclusive_region = true; cc->cpu_exec_enter(cpu); /* execute the generated code */ trace_exec_tb(tb, pc); @@ -273,7 +270,7 @@ void cpu_exec_step_atomic(CPUState *cpu) assert_no_pages_locked(); } - if (in_exclusive_region) { + if (cpu_in_exclusive_context(cpu)) { /* We might longjump out of either the codegen or the * execution, so must make sure we only end the exclusive * region if we started it. diff --git a/cpus-common.c b/cpus-common.c index af3385a296..eaf590cb38 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -200,11 +200,15 @@ void start_exclusive(void) * section until end_exclusive resets pending_cpus to 0. */ qemu_mutex_unlock(&qemu_cpu_list_lock); + + current_cpu->in_exclusive_context = true; } /* Finish an exclusive operation. */ void end_exclusive(void) { + current_cpu->in_exclusive_context = false; + qemu_mutex_lock(&qemu_cpu_list_lock); atomic_set(&pending_cpus, 0); qemu_cond_broadcast(&exclusive_resume); diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 031f587e51..07f2ab0590 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -372,6 +372,7 @@ struct CPUState { bool unplug; bool crash_occurred; bool exit_request; + bool in_exclusive_context; uint32_t cflags_next_tb; /* updates protected by BQL */ uint32_t interrupt_request; @@ -783,6 +784,18 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) */ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data); +/** + * cpu_in_exclusive_context() + * @cpu: The vCPU to check + * + * Returns true if @cpu is an exclusive context, for example running + * something which has previously been queued via async_safe_run_on_cpu(). + */ +static inline bool cpu_in_exclusive_context(const CPUState *cpu) +{ + return cpu->in_exclusive_context; +} + /** * qemu_get_cpu: * @index: The CPUState@cpu_index value of the CPU to obtain. -- cgit v1.2.3-55-g7522 From 975c455346f9aaabe044252a3960be1a2b4ed27b Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Sat, 20 Oct 2018 20:05:49 -0400 Subject: plugin: add user-facing API Add the API first to ease review. Signed-off-by: Emilio G. Cota Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- include/qemu/qemu-plugin.h | 351 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 351 insertions(+) create mode 100644 include/qemu/qemu-plugin.h (limited to 'include') diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h new file mode 100644 index 0000000000..d9c1ca3b4c --- /dev/null +++ b/include/qemu/qemu-plugin.h @@ -0,0 +1,351 @@ +/* + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019, Linaro + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef QEMU_PLUGIN_API_H +#define QEMU_PLUGIN_API_H + +#include +#include + +/* + * For best performance, build the plugin with -fvisibility=hidden so that + * QEMU_PLUGIN_LOCAL is implicit. Then, just mark qemu_plugin_install with + * QEMU_PLUGIN_EXPORT. For more info, see + * https://gcc.gnu.org/wiki/Visibility + */ +#if defined _WIN32 || defined __CYGWIN__ + #ifdef BUILDING_DLL + #define QEMU_PLUGIN_EXPORT __declspec(dllexport) + #else + #define QEMU_PLUGIN_EXPORT __declspec(dllimport) + #endif + #define QEMU_PLUGIN_LOCAL +#else + #if __GNUC__ >= 4 + #define QEMU_PLUGIN_EXPORT __attribute__((visibility("default"))) + #define QEMU_PLUGIN_LOCAL __attribute__((visibility("hidden"))) + #else + #define QEMU_PLUGIN_EXPORT + #define QEMU_PLUGIN_LOCAL + #endif +#endif + +typedef uint64_t qemu_plugin_id_t; + +/** + * qemu_plugin_install() - Install a plugin + * @id: this plugin's opaque ID + * @argc: number of arguments + * @argv: array of arguments (@argc elements) + * + * All plugins must export this symbol. + * + * Note: Calling qemu_plugin_uninstall() from this function is a bug. To raise + * an error during install, return !0. + * + * Note: @argv remains valid throughout the lifetime of the loaded plugin. + */ +QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, int argc, + char **argv); + +/* + * Prototypes for the various callback styles we will be registering + * in the following functions. + */ +typedef void (*qemu_plugin_simple_cb_t)(qemu_plugin_id_t id); + +typedef void (*qemu_plugin_udata_cb_t)(qemu_plugin_id_t id, void *userdata); + +typedef void (*qemu_plugin_vcpu_simple_cb_t)(qemu_plugin_id_t id, + unsigned int vcpu_index); + +typedef void (*qemu_plugin_vcpu_udata_cb_t)(unsigned int vcpu_index, + void *userdata); + +/** + * qemu_plugin_uninstall() - Uninstall a plugin + * @id: this plugin's opaque ID + * @cb: callback to be called once the plugin has been removed + * + * Do NOT assume that the plugin has been uninstalled once this function + * returns. Plugins are uninstalled asynchronously, and therefore the given + * plugin receives callbacks until @cb is called. + * + * Note: Calling this function from qemu_plugin_install() is a bug. + */ +void qemu_plugin_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb); + +/** + * qemu_plugin_reset() - Reset a plugin + * @id: this plugin's opaque ID + * @cb: callback to be called once the plugin has been reset + * + * Unregisters all callbacks for the plugin given by @id. + * + * Do NOT assume that the plugin has been reset once this function returns. + * Plugins are reset asynchronously, and therefore the given plugin receives + * callbacks until @cb is called. + */ +void qemu_plugin_reset(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb); + +/** + * qemu_plugin_register_vcpu_init_cb() - register a vCPU initialization callback + * @id: plugin ID + * @cb: callback function + * + * The @cb function is called every time a vCPU is initialized. + * + * See also: qemu_plugin_register_vcpu_exit_cb() + */ +void qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb); + +/** + * qemu_plugin_register_vcpu_exit_cb() - register a vCPU exit callback + * @id: plugin ID + * @cb: callback function + * + * The @cb function is called every time a vCPU exits. + * + * See also: qemu_plugin_register_vcpu_init_cb() + */ +void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb); + +/** + * qemu_plugin_register_vcpu_idle_cb() - register a vCPU idle callback + * @id: plugin ID + * @cb: callback function + * + * The @cb function is called every time a vCPU idles. + */ +void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb); + +/** + * qemu_plugin_register_vcpu_resume_cb() - register a vCPU resume callback + * @id: plugin ID + * @cb: callback function + * + * The @cb function is called every time a vCPU resumes execution. + */ +void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb); + +/* + * Opaque types that the plugin is given during the translation and + * instrumentation phase. + */ +struct qemu_plugin_tb; +struct qemu_plugin_insn; + +enum qemu_plugin_cb_flags { + QEMU_PLUGIN_CB_NO_REGS, /* callback does not access the CPU's regs */ + QEMU_PLUGIN_CB_R_REGS, /* callback reads the CPU's regs */ + QEMU_PLUGIN_CB_RW_REGS, /* callback reads and writes the CPU's regs */ +}; + +enum qemu_plugin_mem_rw { + QEMU_PLUGIN_MEM_R = 1, + QEMU_PLUGIN_MEM_W, + QEMU_PLUGIN_MEM_RW, +}; + +/** + * qemu_plugin_register_vcpu_tb_trans_cb() - register a translate cb + * @id: plugin ID + * @cb: callback function + * + * The @cb function is called every time a translation occurs. The @cb + * function is passed an opaque qemu_plugin_type which it can query + * for additional information including the list of translated + * instructions. At this point the plugin can register further + * callbacks to be triggered when the block or individual instruction + * executes. + */ +typedef void (*qemu_plugin_vcpu_tb_trans_cb_t)(qemu_plugin_id_t id, + struct qemu_plugin_tb *tb); + +void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_tb_trans_cb_t cb); + +/** + * qemu_plugin_register_vcpu_tb_trans_exec_cb() - register execution callback + * @tb: the opaque qemu_plugin_tb handle for the translation + * @cb: callback function + * @flags: does the plugin read or write the CPU's registers? + * @userdata: any plugin data to pass to the @cb? + * + * The @cb function is called every time a translated unit executes. + */ +void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb, + qemu_plugin_vcpu_udata_cb_t cb, + enum qemu_plugin_cb_flags flags, + void *userdata); + +enum qemu_plugin_op { + QEMU_PLUGIN_INLINE_ADD_U64, +}; + +/** + * qemu_plugin_register_vcpu_tb_trans_exec_inline() - execution inline op + * @tb: the opaque qemu_plugin_tb handle for the translation + * @op: the type of qemu_plugin_op (e.g. ADD_U64) + * @ptr: the target memory location for the op + * @imm: the op data (e.g. 1) + * + * Insert an inline op to every time a translated unit executes. + * Useful if you just want to increment a single counter somewhere in + * memory. + */ +void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, + enum qemu_plugin_op op, + void *ptr, uint64_t imm); + +/** + * qemu_plugin_register_vcpu_insn_exec_cb() - register insn execution cb + * @insn: the opaque qemu_plugin_insn handle for an instruction + * @cb: callback function + * @flags: does the plugin read or write the CPU's registers? + * @userdata: any plugin data to pass to the @cb? + * + * The @cb function is called every time an instruction is executed + */ +void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn, + qemu_plugin_vcpu_udata_cb_t cb, + enum qemu_plugin_cb_flags flags, + void *userdata); + +/** + * qemu_plugin_register_vcpu_insn_exec_inline() - insn execution inline op + * @insn: the opaque qemu_plugin_insn handle for an instruction + * @cb: callback function + * @op: the type of qemu_plugin_op (e.g. ADD_U64) + * @ptr: the target memory location for the op + * @imm: the op data (e.g. 1) + * + * Insert an inline op to every time an instruction executes. Useful + * if you just want to increment a single counter somewhere in memory. + */ +void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn, + enum qemu_plugin_op op, + void *ptr, uint64_t imm); + +/* + * Helpers to query information about the instructions in a block + */ +size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb); + +uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb); + +struct qemu_plugin_insn * +qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx); + +const void *qemu_plugin_insn_data(const struct qemu_plugin_insn *insn); + +size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn); + +uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn); +void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn); + +/* + * Memory Instrumentation + * + * The anonymous qemu_plugin_meminfo_t and qemu_plugin_hwaddr types + * can be used in queries to QEMU to get more information about a + * given memory access. + */ +typedef uint32_t qemu_plugin_meminfo_t; +struct qemu_plugin_hwaddr; + +/* meminfo queries */ +unsigned int qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info); +bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info); +bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info); +bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info); + +/* + * qemu_plugin_get_hwaddr(): + * @vaddr: the virtual address of the memory operation + * + * For system emulation returns a qemu_plugin_hwaddr handle to query + * details about the actual physical address backing the virtual + * address. For linux-user guests it just returns NULL. + * + * This handle is *only* valid for the duration of the callback. Any + * information about the handle should be recovered before the + * callback returns. + */ +struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, + uint64_t vaddr); + +bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr); +uint64_t qemu_plugin_hwaddr_to_raddr(const struct qemu_plugin_hwaddr *haddr); + +typedef void +(*qemu_plugin_vcpu_mem_cb_t)(unsigned int vcpu_index, + qemu_plugin_meminfo_t info, uint64_t vaddr, + void *userdata); + +void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn, + qemu_plugin_vcpu_mem_cb_t cb, + enum qemu_plugin_cb_flags flags, + enum qemu_plugin_mem_rw rw, + void *userdata); + +void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, void *ptr, + uint64_t imm); + + + +typedef void +(*qemu_plugin_vcpu_syscall_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_index, + int64_t num, uint64_t a1, uint64_t a2, + uint64_t a3, uint64_t a4, uint64_t a5, + uint64_t a6, uint64_t a7, uint64_t a8); + +void qemu_plugin_register_vcpu_syscall_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_syscall_cb_t cb); + +typedef void +(*qemu_plugin_vcpu_syscall_ret_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_idx, + int64_t num, int64_t ret); + +void +qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_syscall_ret_cb_t cb); + + +/** + * qemu_plugin_vcpu_for_each() - iterate over the existing vCPU + * @id: plugin ID + * @cb: callback function + * + * The @cb function is called once for each existing vCPU. + * + * See also: qemu_plugin_register_vcpu_init_cb() + */ +void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb); + +void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, + qemu_plugin_simple_cb_t cb); + +void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, + qemu_plugin_udata_cb_t cb, void *userdata); + +/* returns -1 in user-mode */ +int qemu_plugin_n_vcpus(void); + +/* returns -1 in user-mode */ +int qemu_plugin_n_max_vcpus(void); + +#endif /* QEMU_PLUGIN_API_H */ -- cgit v1.2.3-55-g7522 From 54cb65d8588b67853a8898eb73e82d76c1a1e2e1 Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Wed, 30 Aug 2017 18:39:53 -0400 Subject: plugin: add core code Signed-off-by: Emilio G. Cota [AJB: moved directory and merged various fixes] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- Makefile | 9 +- Makefile.target | 2 + configure | 3 + include/hw/core/cpu.h | 6 + include/qemu/plugin.h | 255 +++++++++++++++++++++++++ plugins/Makefile.objs | 6 + plugins/core.c | 502 ++++++++++++++++++++++++++++++++++++++++++++++++++ plugins/loader.c | 362 ++++++++++++++++++++++++++++++++++++ plugins/plugin.h | 97 ++++++++++ 9 files changed, 1241 insertions(+), 1 deletion(-) create mode 100644 include/qemu/plugin.h create mode 100644 plugins/Makefile.objs create mode 100644 plugins/core.c create mode 100644 plugins/loader.c create mode 100644 plugins/plugin.h (limited to 'include') diff --git a/Makefile b/Makefile index 0e994a275d..94d2518551 100644 --- a/Makefile +++ b/Makefile @@ -853,7 +853,11 @@ endif ICON_SIZES=16x16 24x24 32x32 48x48 64x64 128x128 256x256 512x512 -install: all $(if $(BUILD_DOCS),install-doc) install-datadir install-localstatedir \ +install-includedir: + $(INSTALL_DIR) "$(DESTDIR)$(includedir)" + +install: all $(if $(BUILD_DOCS),install-doc) \ + install-datadir install-localstatedir install-includedir \ $(if $(INSTALL_BLOBS),$(edk2-decompressed)) \ recurse-install ifneq ($(TOOLS),) @@ -915,6 +919,9 @@ endif "$(DESTDIR)$(qemu_desktopdir)/qemu.desktop" ifdef CONFIG_GTK $(MAKE) -C po $@ +endif +ifeq ($(CONFIG_PLUGIN),y) + $(INSTALL_DATA) $(SRC_PATH)/include/qemu/qemu-plugin.h "$(DESTDIR)$(includedir)/qemu-plugin.h" endif $(INSTALL_DIR) "$(DESTDIR)$(qemu_datadir)/keymaps" set -e; for x in $(KEYMAPS); do \ diff --git a/Makefile.target b/Makefile.target index 5e916230c4..5de6c95efb 100644 --- a/Makefile.target +++ b/Makefile.target @@ -119,6 +119,8 @@ obj-y += disas.o obj-$(call notempty,$(TARGET_XML_FILES)) += gdbstub-xml.o LIBS := $(libs_cpu) $(LIBS) +obj-$(CONFIG_PLUGIN) += plugins/ + ######################################################### # Linux user emulator target diff --git a/configure b/configure index 145fcabbb3..d6fbcf45e4 100755 --- a/configure +++ b/configure @@ -3616,6 +3616,9 @@ glib_modules=gthread-2.0 if test "$modules" = yes; then glib_modules="$glib_modules gmodule-export-2.0" fi +if test "$plugins" = yes; then + glib_modules="$glib_modules gmodule-2.0" +fi # This workaround is required due to a bug in pkg-config file for glib as it # doesn't define GLIB_STATIC_COMPILATION for pkg-config --static diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 07f2ab0590..e1c383ba84 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -29,6 +29,7 @@ #include "qemu/rcu_queue.h" #include "qemu/queue.h" #include "qemu/thread.h" +#include "qemu/plugin.h" typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size, void *opaque); @@ -344,6 +345,7 @@ struct qemu_work_item; * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes * to @trace_dstate). * @trace_dstate: Dynamic tracing state of events for this vCPU (bitmask). + * @plugin_mask: Plugin event bitmap. Modified only via async work. * @ignore_memory_transaction_failures: Cached copy of the MachineState * flag of the same name: allows the board to suppress calling of the * CPU do_transaction_failed hook function. @@ -428,6 +430,10 @@ struct CPUState { DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS); DECLARE_BITMAP(trace_dstate, CPU_TRACE_DSTATE_MAX_EVENTS); + DECLARE_BITMAP(plugin_mask, QEMU_PLUGIN_EV_MAX); + + GArray *plugin_mem_cbs; + /* TODO Move common fields from CPUArchState here. */ int cpu_index; int cluster_index; diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h new file mode 100644 index 0000000000..11687e8cdc --- /dev/null +++ b/include/qemu/plugin.h @@ -0,0 +1,255 @@ +/* + * Copyright (C) 2017, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_PLUGIN_H +#define QEMU_PLUGIN_H + +#include "qemu/config-file.h" +#include "qemu/qemu-plugin.h" +#include "qemu/error-report.h" +#include "qemu/queue.h" +#include "qemu/option.h" + +/* + * Option parsing/processing. + * Note that we can load an arbitrary number of plugins. + */ +struct qemu_plugin_desc; +typedef QTAILQ_HEAD(, qemu_plugin_desc) QemuPluginList; + +#ifdef CONFIG_PLUGIN +extern QemuOptsList qemu_plugin_opts; + +static inline void qemu_plugin_add_opts(void) +{ + qemu_add_opts(&qemu_plugin_opts); +} + +void qemu_plugin_opt_parse(const char *optarg, QemuPluginList *head); +int qemu_plugin_load_list(QemuPluginList *head); +#else /* !CONFIG_PLUGIN */ +static inline void qemu_plugin_add_opts(void) +{ } + +static inline void qemu_plugin_opt_parse(const char *optarg, + QemuPluginList *head) +{ + error_report("plugin interface not enabled in this build"); + exit(1); +} + +static inline int qemu_plugin_load_list(QemuPluginList *head) +{ + return 0; +} +#endif /* !CONFIG_PLUGIN */ + +/* + * Events that plugins can subscribe to. + */ +enum qemu_plugin_event { + QEMU_PLUGIN_EV_VCPU_INIT, + QEMU_PLUGIN_EV_VCPU_EXIT, + QEMU_PLUGIN_EV_VCPU_TB_TRANS, + QEMU_PLUGIN_EV_VCPU_IDLE, + QEMU_PLUGIN_EV_VCPU_RESUME, + QEMU_PLUGIN_EV_VCPU_SYSCALL, + QEMU_PLUGIN_EV_VCPU_SYSCALL_RET, + QEMU_PLUGIN_EV_FLUSH, + QEMU_PLUGIN_EV_ATEXIT, + QEMU_PLUGIN_EV_MAX, /* total number of plugin events we support */ +}; + +union qemu_plugin_cb_sig { + qemu_plugin_simple_cb_t simple; + qemu_plugin_udata_cb_t udata; + qemu_plugin_vcpu_simple_cb_t vcpu_simple; + qemu_plugin_vcpu_udata_cb_t vcpu_udata; + qemu_plugin_vcpu_tb_trans_cb_t vcpu_tb_trans; + qemu_plugin_vcpu_mem_cb_t vcpu_mem; + qemu_plugin_vcpu_syscall_cb_t vcpu_syscall; + qemu_plugin_vcpu_syscall_ret_cb_t vcpu_syscall_ret; + void *generic; +}; + +enum plugin_dyn_cb_type { + PLUGIN_CB_INSN, + PLUGIN_CB_MEM, + PLUGIN_N_CB_TYPES, +}; + +enum plugin_dyn_cb_subtype { + PLUGIN_CB_REGULAR, + PLUGIN_CB_INLINE, + PLUGIN_N_CB_SUBTYPES, +}; + +/* + * A dynamic callback has an insertion point that is determined at run-time. + * Usually the insertion point is somewhere in the code cache; think for + * instance of a callback to be called upon the execution of a particular TB. + */ +struct qemu_plugin_dyn_cb { + union qemu_plugin_cb_sig f; + void *userp; + unsigned tcg_flags; + enum plugin_dyn_cb_subtype type; + /* @rw applies to mem callbacks only (both regular and inline) */ + enum qemu_plugin_mem_rw rw; + /* fields specific to each dyn_cb type go here */ + union { + struct { + enum qemu_plugin_op op; + uint64_t imm; + } inline_insn; + }; +}; + +struct qemu_plugin_insn { + GByteArray *data; + uint64_t vaddr; + void *haddr; + GArray *cbs[PLUGIN_N_CB_TYPES][PLUGIN_N_CB_SUBTYPES]; + bool calls_helpers; + bool mem_helper; +}; + +/* + * qemu_plugin_insn allocate and cleanup functions. We don't expect to + * cleanup many of these structures. They are reused for each fresh + * translation. + */ + +static inline void qemu_plugin_insn_cleanup_fn(gpointer data) +{ + struct qemu_plugin_insn *insn = (struct qemu_plugin_insn *) data; + g_byte_array_free(insn->data, true); +} + +static inline struct qemu_plugin_insn *qemu_plugin_insn_alloc(void) +{ + int i, j; + struct qemu_plugin_insn *insn = g_new0(struct qemu_plugin_insn, 1); + insn->data = g_byte_array_sized_new(4); + + for (i = 0; i < PLUGIN_N_CB_TYPES; i++) { + for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) { + insn->cbs[i][j] = g_array_new(false, false, + sizeof(struct qemu_plugin_dyn_cb)); + } + } + return insn; +} + +struct qemu_plugin_tb { + GPtrArray *insns; + size_t n; + uint64_t vaddr; + uint64_t vaddr2; + void *haddr1; + void *haddr2; + GArray *cbs[PLUGIN_N_CB_SUBTYPES]; +}; + +/** + * qemu_plugin_tb_insn_get(): get next plugin record for translation. + * + */ +static inline +struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb) +{ + struct qemu_plugin_insn *insn; + int i, j; + + if (unlikely(tb->n == tb->insns->len)) { + struct qemu_plugin_insn *new_insn = qemu_plugin_insn_alloc(); + g_ptr_array_add(tb->insns, new_insn); + } + insn = g_ptr_array_index(tb->insns, tb->n++); + g_byte_array_set_size(insn->data, 0); + insn->calls_helpers = false; + insn->mem_helper = false; + + for (i = 0; i < PLUGIN_N_CB_TYPES; i++) { + for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) { + g_array_set_size(insn->cbs[i][j], 0); + } + } + + return insn; +} + +#ifdef CONFIG_PLUGIN + +void qemu_plugin_vcpu_init_hook(CPUState *cpu); +void qemu_plugin_vcpu_exit_hook(CPUState *cpu); +void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb); +void qemu_plugin_vcpu_idle_cb(CPUState *cpu); +void qemu_plugin_vcpu_resume_cb(CPUState *cpu); +void +qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, + uint64_t a2, uint64_t a3, uint64_t a4, uint64_t a5, + uint64_t a6, uint64_t a7, uint64_t a8); +void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret); + +void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t meminfo); + +void qemu_plugin_flush_cb(void); + +void qemu_plugin_atexit_cb(void); + +void qemu_plugin_add_dyn_cb_arr(GArray *arr); + +void qemu_plugin_disable_mem_helpers(CPUState *cpu); + +#else /* !CONFIG_PLUGIN */ + +static inline void qemu_plugin_vcpu_init_hook(CPUState *cpu) +{ } + +static inline void qemu_plugin_vcpu_exit_hook(CPUState *cpu) +{ } + +static inline void qemu_plugin_tb_trans_cb(CPUState *cpu, + struct qemu_plugin_tb *tb) +{ } + +static inline void qemu_plugin_vcpu_idle_cb(CPUState *cpu) +{ } + +static inline void qemu_plugin_vcpu_resume_cb(CPUState *cpu) +{ } + +static inline void +qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, + uint64_t a3, uint64_t a4, uint64_t a5, uint64_t a6, + uint64_t a7, uint64_t a8) +{ } + +static inline +void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) +{ } + +static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, + uint32_t meminfo) +{ } + +static inline void qemu_plugin_flush_cb(void) +{ } + +static inline void qemu_plugin_atexit_cb(void) +{ } + +static inline +void qemu_plugin_add_dyn_cb_arr(GArray *arr) +{ } + +static inline void qemu_plugin_disable_mem_helpers(CPUState *cpu) +{ } + +#endif /* !CONFIG_PLUGIN */ + +#endif /* QEMU_PLUGIN_H */ diff --git a/plugins/Makefile.objs b/plugins/Makefile.objs new file mode 100644 index 0000000000..58940335bc --- /dev/null +++ b/plugins/Makefile.objs @@ -0,0 +1,6 @@ +# +# Plugin Support +# + +obj-y += loader.o +obj-y += core.o diff --git a/plugins/core.c b/plugins/core.c new file mode 100644 index 0000000000..9e1b9e7a91 --- /dev/null +++ b/plugins/core.c @@ -0,0 +1,502 @@ +/* + * QEMU Plugin Core code + * + * This is the core code that deals with injecting instrumentation into the code + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019, Linaro + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/config-file.h" +#include "qapi/error.h" +#include "qemu/option.h" +#include "qemu/rcu_queue.h" +#include "qemu/xxhash.h" +#include "qemu/rcu.h" +#include "hw/core/cpu.h" +#include "exec/cpu-common.h" + +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "sysemu/sysemu.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "trace/mem-internal.h" /* mem_info macros */ +#include "plugin.h" + +struct qemu_plugin_cb { + struct qemu_plugin_ctx *ctx; + union qemu_plugin_cb_sig f; + void *udata; + QLIST_ENTRY(qemu_plugin_cb) entry; +}; + +struct qemu_plugin_state plugin; + +struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id) +{ + struct qemu_plugin_ctx *ctx; + qemu_plugin_id_t *id_p; + + id_p = g_hash_table_lookup(plugin.id_ht, &id); + ctx = container_of(id_p, struct qemu_plugin_ctx, id); + if (ctx == NULL) { + error_report("plugin: invalid plugin id %" PRIu64, id); + abort(); + } + return ctx; +} + +static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data) +{ + bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX); + cpu_tb_jmp_cache_clear(cpu); +} + +static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata) +{ + CPUState *cpu = container_of(k, CPUState, cpu_index); + run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask); + + if (cpu->created) { + async_run_on_cpu(cpu, plugin_cpu_update__async, mask); + } else { + plugin_cpu_update__async(cpu, mask); + } +} + +void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, + enum qemu_plugin_event ev) +{ + struct qemu_plugin_cb *cb = ctx->callbacks[ev]; + + if (cb == NULL) { + return; + } + QLIST_REMOVE_RCU(cb, entry); + g_free(cb); + ctx->callbacks[ev] = NULL; + if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) { + clear_bit(ev, plugin.mask); + g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL); + } +} + +static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev) +{ + struct qemu_plugin_cb *cb, *next; + + switch (ev) { + case QEMU_PLUGIN_EV_VCPU_INIT: + case QEMU_PLUGIN_EV_VCPU_EXIT: + case QEMU_PLUGIN_EV_VCPU_IDLE: + case QEMU_PLUGIN_EV_VCPU_RESUME: + /* iterate safely; plugins might uninstall themselves at any time */ + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { + qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple; + + func(cb->ctx->id, cpu->cpu_index); + } + break; + default: + g_assert_not_reached(); + } +} + +static void plugin_cb__simple(enum qemu_plugin_event ev) +{ + struct qemu_plugin_cb *cb, *next; + + switch (ev) { + case QEMU_PLUGIN_EV_FLUSH: + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { + qemu_plugin_simple_cb_t func = cb->f.simple; + + func(cb->ctx->id); + } + break; + default: + g_assert_not_reached(); + } +} + +static void plugin_cb__udata(enum qemu_plugin_event ev) +{ + struct qemu_plugin_cb *cb, *next; + + switch (ev) { + case QEMU_PLUGIN_EV_ATEXIT: + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { + qemu_plugin_udata_cb_t func = cb->f.udata; + + func(cb->ctx->id, cb->udata); + } + break; + default: + g_assert_not_reached(); + } +} + +static void +do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, + void *func, void *udata) +{ + struct qemu_plugin_ctx *ctx; + + qemu_rec_mutex_lock(&plugin.lock); + ctx = plugin_id_to_ctx_locked(id); + /* if the plugin is on its way out, ignore this request */ + if (unlikely(ctx->uninstalling)) { + goto out_unlock; + } + if (func) { + struct qemu_plugin_cb *cb = ctx->callbacks[ev]; + + if (cb) { + cb->f.generic = func; + cb->udata = udata; + } else { + cb = g_new(struct qemu_plugin_cb, 1); + cb->ctx = ctx; + cb->f.generic = func; + cb->udata = udata; + ctx->callbacks[ev] = cb; + QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry); + if (!test_bit(ev, plugin.mask)) { + set_bit(ev, plugin.mask); + g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, + NULL); + } + } + } else { + plugin_unregister_cb__locked(ctx, ev); + } + out_unlock: + qemu_rec_mutex_unlock(&plugin.lock); +} + +void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, + void *func) +{ + do_plugin_register_cb(id, ev, func, NULL); +} + +void +plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, + void *func, void *udata) +{ + do_plugin_register_cb(id, ev, func, udata); +} + +void qemu_plugin_vcpu_init_hook(CPUState *cpu) +{ + bool success; + + qemu_rec_mutex_lock(&plugin.lock); + plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL); + success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index, + &cpu->cpu_index); + g_assert(success); + qemu_rec_mutex_unlock(&plugin.lock); + + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT); +} + +void qemu_plugin_vcpu_exit_hook(CPUState *cpu) +{ + bool success; + + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT); + + qemu_rec_mutex_lock(&plugin.lock); + success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index); + g_assert(success); + qemu_rec_mutex_unlock(&plugin.lock); +} + +struct plugin_for_each_args { + struct qemu_plugin_ctx *ctx; + qemu_plugin_vcpu_simple_cb_t cb; +}; + +static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata) +{ + struct plugin_for_each_args *args = udata; + int cpu_index = *(int *)k; + + args->cb(args->ctx->id, cpu_index); +} + +void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb) +{ + struct plugin_for_each_args args; + + if (cb == NULL) { + return; + } + qemu_rec_mutex_lock(&plugin.lock); + args.ctx = plugin_id_to_ctx_locked(id); + args.cb = cb; + g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args); + qemu_rec_mutex_unlock(&plugin.lock); +} + +/* Allocate and return a callback record */ +static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr) +{ + GArray *cbs = *arr; + + if (!cbs) { + cbs = g_array_sized_new(false, false, + sizeof(struct qemu_plugin_dyn_cb), 1); + *arr = cbs; + } + + g_array_set_size(cbs, cbs->len + 1); + return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1); +} + +void plugin_register_inline_op(GArray **arr, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, void *ptr, + uint64_t imm) +{ + struct qemu_plugin_dyn_cb *dyn_cb; + + dyn_cb = plugin_get_dyn_cb(arr); + dyn_cb->userp = ptr; + dyn_cb->type = PLUGIN_CB_INLINE; + dyn_cb->rw = rw; + dyn_cb->inline_insn.op = op; + dyn_cb->inline_insn.imm = imm; +} + +static inline uint32_t cb_to_tcg_flags(enum qemu_plugin_cb_flags flags) +{ + uint32_t ret; + + switch (flags) { + case QEMU_PLUGIN_CB_RW_REGS: + ret = 0; + case QEMU_PLUGIN_CB_R_REGS: + ret = TCG_CALL_NO_WG; + break; + case QEMU_PLUGIN_CB_NO_REGS: + default: + ret = TCG_CALL_NO_RWG; + } + return ret; +} + +inline void +plugin_register_dyn_cb__udata(GArray **arr, + qemu_plugin_vcpu_udata_cb_t cb, + enum qemu_plugin_cb_flags flags, void *udata) +{ + struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr); + + dyn_cb->userp = udata; + dyn_cb->tcg_flags = cb_to_tcg_flags(flags); + dyn_cb->f.vcpu_udata = cb; + dyn_cb->type = PLUGIN_CB_REGULAR; +} + +void plugin_register_vcpu_mem_cb(GArray **arr, + void *cb, + enum qemu_plugin_cb_flags flags, + enum qemu_plugin_mem_rw rw, + void *udata) +{ + struct qemu_plugin_dyn_cb *dyn_cb; + + dyn_cb = plugin_get_dyn_cb(arr); + dyn_cb->userp = udata; + dyn_cb->tcg_flags = cb_to_tcg_flags(flags); + dyn_cb->type = PLUGIN_CB_REGULAR; + dyn_cb->rw = rw; + dyn_cb->f.generic = cb; +} + +void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb) +{ + struct qemu_plugin_cb *cb, *next; + enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS; + + /* no plugin_mask check here; caller should have checked */ + + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { + qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans; + + func(cb->ctx->id, tb); + } +} + +void +qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2, + uint64_t a3, uint64_t a4, uint64_t a5, + uint64_t a6, uint64_t a7, uint64_t a8) +{ + struct qemu_plugin_cb *cb, *next; + enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL; + + if (!test_bit(ev, cpu->plugin_mask)) { + return; + } + + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { + qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall; + + func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8); + } +} + +void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) +{ + struct qemu_plugin_cb *cb, *next; + enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET; + + if (!test_bit(ev, cpu->plugin_mask)) { + return; + } + + QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) { + qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret; + + func(cb->ctx->id, cpu->cpu_index, num, ret); + } +} + +void qemu_plugin_vcpu_idle_cb(CPUState *cpu) +{ + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE); +} + +void qemu_plugin_vcpu_resume_cb(CPUState *cpu) +{ + plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME); +} + +void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb) +{ + plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb); +} + +void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb) +{ + plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb); +} + +void qemu_plugin_register_flush_cb(qemu_plugin_id_t id, + qemu_plugin_simple_cb_t cb) +{ + plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb); +} + +static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp) +{ + g_array_free((GArray *) p, true); + return true; +} + +void qemu_plugin_flush_cb(void) +{ + qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL); + qht_reset(&plugin.dyn_cb_arr_ht); + + plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH); +} + +void exec_inline_op(struct qemu_plugin_dyn_cb *cb) +{ + uint64_t *val = cb->userp; + + switch (cb->inline_insn.op) { + case QEMU_PLUGIN_INLINE_ADD_U64: + *val += cb->inline_insn.imm; + break; + default: + g_assert_not_reached(); + } +} + +void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info) +{ + GArray *arr = cpu->plugin_mem_cbs; + size_t i; + + if (arr == NULL) { + return; + } + for (i = 0; i < arr->len; i++) { + struct qemu_plugin_dyn_cb *cb = + &g_array_index(arr, struct qemu_plugin_dyn_cb, i); + int w = !!(info & TRACE_MEM_ST) + 1; + + if (!(w & cb->rw)) { + break; + } + switch (cb->type) { + case PLUGIN_CB_REGULAR: + cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp); + break; + case PLUGIN_CB_INLINE: + exec_inline_op(cb); + break; + default: + g_assert_not_reached(); + } + } +} + +void qemu_plugin_atexit_cb(void) +{ + plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT); +} + +void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id, + qemu_plugin_udata_cb_t cb, + void *udata) +{ + plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata); +} + +/* + * Call this function after longjmp'ing to the main loop. It's possible that the + * last instruction of a TB might have used helpers, and therefore the + * "disable" instruction will never execute because it ended up as dead code. + */ +void qemu_plugin_disable_mem_helpers(CPUState *cpu) +{ + cpu->plugin_mem_cbs = NULL; +} + +static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp) +{ + return ap == bp; +} + +static void __attribute__((__constructor__)) plugin_init(void) +{ + int i; + + for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) { + QLIST_INIT(&plugin.cb_lists[i]); + } + qemu_rec_mutex_init(&plugin.lock); + plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal); + plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal); + QTAILQ_INIT(&plugin.ctxs); + qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16, + QHT_MODE_AUTO_RESIZE); + atexit(qemu_plugin_atexit_cb); +} diff --git a/plugins/loader.c b/plugins/loader.c new file mode 100644 index 0000000000..eaedff577c --- /dev/null +++ b/plugins/loader.c @@ -0,0 +1,362 @@ +/* + * QEMU Plugin Core Loader Code + * + * This is the code responsible for loading and unloading the plugins. + * Aside from the basic housekeeping tasks we also need to ensure any + * generated code is flushed when we remove a plugin so we cannot end + * up calling and unloaded helper function. + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019, Linaro + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/config-file.h" +#include "qapi/error.h" +#include "qemu/option.h" +#include "qemu/rcu_queue.h" +#include "qemu/qht.h" +#include "qemu/bitmap.h" +#include "qemu/xxhash.h" +#include "qemu/plugin.h" +#include "hw/core/cpu.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "plugin.h" + +/* + * For convenience we use a bitmap for plugin.mask, but really all we need is a + * u32, which is what we store in TranslationBlock. + */ +QEMU_BUILD_BUG_ON(QEMU_PLUGIN_EV_MAX > 32); + +struct qemu_plugin_desc { + char *path; + char **argv; + QTAILQ_ENTRY(qemu_plugin_desc) entry; + int argc; +}; + +struct qemu_plugin_parse_arg { + QemuPluginList *head; + struct qemu_plugin_desc *curr; +}; + +QemuOptsList qemu_plugin_opts = { + .name = "plugin", + .implied_opt_name = "file", + .head = QTAILQ_HEAD_INITIALIZER(qemu_plugin_opts.head), + .desc = { + /* do our own parsing to support multiple plugins */ + { /* end of list */ } + }, +}; + +typedef int (*qemu_plugin_install_func_t)(qemu_plugin_id_t, int, char **); + +extern struct qemu_plugin_state plugin; + +void qemu_plugin_add_dyn_cb_arr(GArray *arr) +{ + uint32_t hash = qemu_xxhash2((uint64_t)(uintptr_t)arr); + bool inserted; + + inserted = qht_insert(&plugin.dyn_cb_arr_ht, arr, hash, NULL); + g_assert(inserted); +} + +static struct qemu_plugin_desc *plugin_find_desc(QemuPluginList *head, + const char *path) +{ + struct qemu_plugin_desc *desc; + + QTAILQ_FOREACH(desc, head, entry) { + if (strcmp(desc->path, path) == 0) { + return desc; + } + } + return NULL; +} + +static int plugin_add(void *opaque, const char *name, const char *value, + Error **errp) +{ + struct qemu_plugin_parse_arg *arg = opaque; + struct qemu_plugin_desc *p; + + if (strcmp(name, "file") == 0) { + if (strcmp(value, "") == 0) { + error_setg(errp, "requires a non-empty argument"); + return 1; + } + p = plugin_find_desc(arg->head, value); + if (p == NULL) { + p = g_new0(struct qemu_plugin_desc, 1); + p->path = g_strdup(value); + QTAILQ_INSERT_TAIL(arg->head, p, entry); + } + arg->curr = p; + } else if (strcmp(name, "arg") == 0) { + if (arg->curr == NULL) { + error_setg(errp, "missing earlier '-plugin file=' option"); + return 1; + } + p = arg->curr; + p->argc++; + p->argv = g_realloc_n(p->argv, p->argc, sizeof(char *)); + p->argv[p->argc - 1] = g_strdup(value); + } else { + error_setg(errp, "-plugin: unexpected parameter '%s'; ignored", name); + } + return 0; +} + +void qemu_plugin_opt_parse(const char *optarg, QemuPluginList *head) +{ + struct qemu_plugin_parse_arg arg; + QemuOpts *opts; + + opts = qemu_opts_parse_noisily(qemu_find_opts("plugin"), optarg, true); + if (opts == NULL) { + exit(1); + } + arg.head = head; + arg.curr = NULL; + qemu_opt_foreach(opts, plugin_add, &arg, &error_fatal); + qemu_opts_del(opts); +} + +/* + * From: https://en.wikipedia.org/wiki/Xorshift + * This is faster than rand_r(), and gives us a wider range (RAND_MAX is only + * guaranteed to be >= INT_MAX). + */ +static uint64_t xorshift64star(uint64_t x) +{ + x ^= x >> 12; /* a */ + x ^= x << 25; /* b */ + x ^= x >> 27; /* c */ + return x * UINT64_C(2685821657736338717); +} + +static int plugin_load(struct qemu_plugin_desc *desc) +{ + qemu_plugin_install_func_t install; + struct qemu_plugin_ctx *ctx; + gpointer sym; + int rc; + + ctx = qemu_memalign(qemu_dcache_linesize, sizeof(*ctx)); + memset(ctx, 0, sizeof(*ctx)); + ctx->desc = desc; + + ctx->handle = g_module_open(desc->path, G_MODULE_BIND_LOCAL); + if (ctx->handle == NULL) { + error_report("%s: %s", __func__, g_module_error()); + goto err_dlopen; + } + + if (!g_module_symbol(ctx->handle, "qemu_plugin_install", &sym)) { + error_report("%s: %s", __func__, g_module_error()); + goto err_symbol; + } + install = (qemu_plugin_install_func_t) sym; + /* symbol was found; it could be NULL though */ + if (install == NULL) { + error_report("%s: %s: qemu_plugin_install is NULL", + __func__, desc->path); + goto err_symbol; + } + + qemu_rec_mutex_lock(&plugin.lock); + + /* find an unused random id with &ctx as the seed */ + ctx->id = (uint64_t)(uintptr_t)ctx; + for (;;) { + void *existing; + + ctx->id = xorshift64star(ctx->id); + existing = g_hash_table_lookup(plugin.id_ht, &ctx->id); + if (likely(existing == NULL)) { + bool success; + + success = g_hash_table_insert(plugin.id_ht, &ctx->id, &ctx->id); + g_assert(success); + break; + } + } + QTAILQ_INSERT_TAIL(&plugin.ctxs, ctx, entry); + ctx->installing = true; + rc = install(ctx->id, desc->argc, desc->argv); + ctx->installing = false; + if (rc) { + error_report("%s: qemu_plugin_install returned error code %d", + __func__, rc); + /* + * we cannot rely on the plugin doing its own cleanup, so + * call a full uninstall if the plugin did not yet call it. + */ + if (!ctx->uninstalling) { + plugin_reset_uninstall(ctx->id, NULL, false); + } + } + + qemu_rec_mutex_unlock(&plugin.lock); + return rc; + + err_symbol: + err_dlopen: + qemu_vfree(ctx); + return 1; +} + +/* call after having removed @desc from the list */ +static void plugin_desc_free(struct qemu_plugin_desc *desc) +{ + int i; + + for (i = 0; i < desc->argc; i++) { + g_free(desc->argv[i]); + } + g_free(desc->argv); + g_free(desc->path); + g_free(desc); +} + +/** + * qemu_plugin_load_list - load a list of plugins + * @head: head of the list of descriptors of the plugins to be loaded + * + * Returns 0 if all plugins in the list are installed, !0 otherwise. + * + * Note: the descriptor of each successfully installed plugin is removed + * from the list given by @head. + */ +int qemu_plugin_load_list(QemuPluginList *head) +{ + struct qemu_plugin_desc *desc, *next; + + QTAILQ_FOREACH_SAFE(desc, head, entry, next) { + int err; + + err = plugin_load(desc); + if (err) { + return err; + } + QTAILQ_REMOVE(head, desc, entry); + } + return 0; +} + +struct qemu_plugin_reset_data { + struct qemu_plugin_ctx *ctx; + qemu_plugin_simple_cb_t cb; + bool reset; +}; + +static void plugin_reset_destroy__locked(struct qemu_plugin_reset_data *data) +{ + struct qemu_plugin_ctx *ctx = data->ctx; + enum qemu_plugin_event ev; + bool success; + + /* + * After updating the subscription lists there is no need to wait for an RCU + * grace period to elapse, because right now we either are in a "safe async" + * work environment (i.e. all vCPUs are asleep), or no vCPUs have yet been + * created. + */ + for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) { + plugin_unregister_cb__locked(ctx, ev); + } + + if (data->reset) { + g_assert(ctx->resetting); + if (data->cb) { + data->cb(ctx->id); + } + ctx->resetting = false; + g_free(data); + return; + } + + g_assert(ctx->uninstalling); + /* we cannot dlclose if we are going to return to plugin code */ + if (ctx->installing) { + error_report("Calling qemu_plugin_uninstall from the install function " + "is a bug. Instead, return !0 from the install function."); + abort(); + } + + success = g_hash_table_remove(plugin.id_ht, &ctx->id); + g_assert(success); + QTAILQ_REMOVE(&plugin.ctxs, ctx, entry); + if (data->cb) { + data->cb(ctx->id); + } + if (!g_module_close(ctx->handle)) { + warn_report("%s: %s", __func__, g_module_error()); + } + plugin_desc_free(ctx->desc); + qemu_vfree(ctx); + g_free(data); +} + +static void plugin_reset_destroy(struct qemu_plugin_reset_data *data) +{ + qemu_rec_mutex_lock(&plugin.lock); + plugin_reset_destroy__locked(data); + qemu_rec_mutex_lock(&plugin.lock); +} + +static void plugin_flush_destroy(CPUState *cpu, run_on_cpu_data arg) +{ + struct qemu_plugin_reset_data *data = arg.host_ptr; + + g_assert(cpu_in_exclusive_context(cpu)); + tb_flush(cpu); + plugin_reset_destroy(data); +} + +void plugin_reset_uninstall(qemu_plugin_id_t id, + qemu_plugin_simple_cb_t cb, + bool reset) +{ + struct qemu_plugin_reset_data *data; + struct qemu_plugin_ctx *ctx; + + qemu_rec_mutex_lock(&plugin.lock); + ctx = plugin_id_to_ctx_locked(id); + if (ctx->uninstalling || (reset && ctx->resetting)) { + qemu_rec_mutex_unlock(&plugin.lock); + return; + } + ctx->resetting = reset; + ctx->uninstalling = !reset; + qemu_rec_mutex_unlock(&plugin.lock); + + data = g_new(struct qemu_plugin_reset_data, 1); + data->ctx = ctx; + data->cb = cb; + data->reset = reset; + /* + * Only flush the code cache if the vCPUs have been created. If so, + * current_cpu must be non-NULL. + */ + if (current_cpu) { + async_safe_run_on_cpu(current_cpu, plugin_flush_destroy, + RUN_ON_CPU_HOST_PTR(data)); + } else { + /* + * If current_cpu isn't set, then we don't have yet any vCPU threads + * and we therefore can remove the callbacks synchronously. + */ + plugin_reset_destroy(data); + } +} diff --git a/plugins/plugin.h b/plugins/plugin.h new file mode 100644 index 0000000000..5482168d79 --- /dev/null +++ b/plugins/plugin.h @@ -0,0 +1,97 @@ +/* + * Plugin Shared Internal Functions + * + * Copyright (C) 2019, Linaro + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef _PLUGIN_INTERNAL_H_ +#define _PLUGIN_INTERNAL_H_ + +#include + +/* global state */ +struct qemu_plugin_state { + QTAILQ_HEAD(, qemu_plugin_ctx) ctxs; + QLIST_HEAD(, qemu_plugin_cb) cb_lists[QEMU_PLUGIN_EV_MAX]; + /* + * Use the HT as a hash map by inserting k == v, which saves memory as + * documented by GLib. The parent struct is obtained with container_of(). + */ + GHashTable *id_ht; + /* + * Use the HT as a hash map. Note that we could use a list here, + * but with the HT we avoid adding a field to CPUState. + */ + GHashTable *cpu_ht; + DECLARE_BITMAP(mask, QEMU_PLUGIN_EV_MAX); + /* + * @lock protects the struct as well as ctx->uninstalling. + * The lock must be acquired by all API ops. + * The lock is recursive, which greatly simplifies things, e.g. + * callback registration from qemu_plugin_vcpu_for_each(). + */ + QemuRecMutex lock; + /* + * HT of callbacks invoked from helpers. All entries are freed when + * the code cache is flushed. + */ + struct qht dyn_cb_arr_ht; +}; + + +struct qemu_plugin_ctx { + GModule *handle; + qemu_plugin_id_t id; + struct qemu_plugin_cb *callbacks[QEMU_PLUGIN_EV_MAX]; + QTAILQ_ENTRY(qemu_plugin_ctx) entry; + /* + * keep a reference to @desc until uninstall, so that plugins do not have + * to strdup plugin args. + */ + struct qemu_plugin_desc *desc; + bool installing; + bool uninstalling; + bool resetting; +}; + +struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id); + +void plugin_register_inline_op(GArray **arr, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, void *ptr, + uint64_t imm); + +void plugin_reset_uninstall(qemu_plugin_id_t id, + qemu_plugin_simple_cb_t cb, + bool reset); + +void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev, + void *func); + +void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx, + enum qemu_plugin_event ev); + +void +plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev, + void *func, void *udata); + +void +plugin_register_dyn_cb__udata(GArray **arr, + qemu_plugin_vcpu_udata_cb_t cb, + enum qemu_plugin_cb_flags flags, void *udata); + + +void plugin_register_vcpu_mem_cb(GArray **arr, + void *cb, + enum qemu_plugin_cb_flags flags, + enum qemu_plugin_mem_rw rw, + void *udata); + +void exec_inline_op(struct qemu_plugin_dyn_cb *cb); + +#endif /* _PLUGIN_INTERNAL_H_ */ -- cgit v1.2.3-55-g7522 From 5c5d69b0d523ad335b901800fc443aec77ec4688 Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Thu, 13 Jun 2019 14:58:58 +0100 Subject: plugin: add implementation of the api [AJB: split from the core code commit] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- include/qemu/qemu-plugin.h | 3 - plugins/Makefile.objs | 1 + plugins/api.c | 277 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 278 insertions(+), 3 deletions(-) create mode 100644 plugins/api.c (limited to 'include') diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index d9c1ca3b4c..b9a4a4b684 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -285,9 +285,6 @@ bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info); struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, uint64_t vaddr); -bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr); -uint64_t qemu_plugin_hwaddr_to_raddr(const struct qemu_plugin_hwaddr *haddr); - typedef void (*qemu_plugin_vcpu_mem_cb_t)(unsigned int vcpu_index, qemu_plugin_meminfo_t info, uint64_t vaddr, diff --git a/plugins/Makefile.objs b/plugins/Makefile.objs index 58940335bc..95baabf3d2 100644 --- a/plugins/Makefile.objs +++ b/plugins/Makefile.objs @@ -4,3 +4,4 @@ obj-y += loader.o obj-y += core.o +obj-y += api.o diff --git a/plugins/api.c b/plugins/api.c new file mode 100644 index 0000000000..facf2a132d --- /dev/null +++ b/plugins/api.c @@ -0,0 +1,277 @@ +/* + * QEMU Plugin API + * + * This provides the API that is available to the plugins to interact + * with QEMU. We have to be careful not to expose internal details of + * how QEMU works so we abstract out things like translation and + * instructions to anonymous data types: + * + * qemu_plugin_tb + * qemu_plugin_insn + * + * Which can then be passed back into the API to do additional things. + * As such all the public functions in here are exported in + * qemu-plugin.h. + * + * The general life-cycle of a plugin is: + * + * - plugin is loaded, public qemu_plugin_install called + * - the install func registers callbacks for events + * - usually an atexit_cb is registered to dump info at the end + * - when a registered event occurs the plugin is called + * - some events pass additional info + * - during translation the plugin can decide to instrument any + * instruction + * - when QEMU exits all the registered atexit callbacks are called + * + * Copyright (C) 2017, Emilio G. Cota + * Copyright (C) 2019, Linaro + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + */ + +#include "qemu/osdep.h" +#include "qemu/plugin.h" +#include "cpu.h" +#include "sysemu/sysemu.h" +#include "tcg/tcg.h" +#include "trace/mem-internal.h" /* mem_info macros */ +#include "plugin.h" +#ifndef CONFIG_USER_ONLY +#include "hw/boards.h" +#endif + +/* Uninstall and Reset handlers */ + +void qemu_plugin_uninstall(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb) +{ + plugin_reset_uninstall(id, cb, false); +} + +void qemu_plugin_reset(qemu_plugin_id_t id, qemu_plugin_simple_cb_t cb) +{ + plugin_reset_uninstall(id, cb, true); +} + +/* + * Plugin Register Functions + * + * This allows the plugin to register callbacks for various events + * during the translation. + */ + +void qemu_plugin_register_vcpu_init_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb) +{ + plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_INIT, cb); +} + +void qemu_plugin_register_vcpu_exit_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_simple_cb_t cb) +{ + plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_EXIT, cb); +} + +void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb, + qemu_plugin_vcpu_udata_cb_t cb, + enum qemu_plugin_cb_flags flags, + void *udata) +{ + plugin_register_dyn_cb__udata(&tb->cbs[PLUGIN_CB_REGULAR], + cb, flags, udata); +} + +void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, + enum qemu_plugin_op op, + void *ptr, uint64_t imm) +{ + plugin_register_inline_op(&tb->cbs[PLUGIN_CB_INLINE], 0, op, ptr, imm); +} + +void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn, + qemu_plugin_vcpu_udata_cb_t cb, + enum qemu_plugin_cb_flags flags, + void *udata) +{ + plugin_register_dyn_cb__udata(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], + cb, flags, udata); +} + +void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn, + enum qemu_plugin_op op, + void *ptr, uint64_t imm) +{ + plugin_register_inline_op(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], + 0, op, ptr, imm); +} + + + +void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn, + qemu_plugin_vcpu_mem_cb_t cb, + enum qemu_plugin_cb_flags flags, + enum qemu_plugin_mem_rw rw, + void *udata) +{ + plugin_register_vcpu_mem_cb(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], + cb, flags, rw, udata); +} + +void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, void *ptr, + uint64_t imm) +{ + plugin_register_inline_op(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], + rw, op, ptr, imm); +} + +void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_tb_trans_cb_t cb) +{ + plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_TB_TRANS, cb); +} + +void qemu_plugin_register_vcpu_syscall_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_syscall_cb_t cb) +{ + plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL, cb); +} + +void +qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id, + qemu_plugin_vcpu_syscall_ret_cb_t cb) +{ + plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_SYSCALL_RET, cb); +} + +/* + * Plugin Queries + * + * These are queries that the plugin can make to gauge information + * from our opaque data types. We do not want to leak internal details + * here just information useful to the plugin. + */ + +/* + * Translation block information: + * + * A plugin can query the virtual address of the start of the block + * and the number of instructions in it. It can also get access to + * each translated instruction. + */ + +size_t qemu_plugin_tb_n_insns(const struct qemu_plugin_tb *tb) +{ + return tb->n; +} + +uint64_t qemu_plugin_tb_vaddr(const struct qemu_plugin_tb *tb) +{ + return tb->vaddr; +} + +struct qemu_plugin_insn * +qemu_plugin_tb_get_insn(const struct qemu_plugin_tb *tb, size_t idx) +{ + if (unlikely(idx >= tb->n)) { + return NULL; + } + return g_ptr_array_index(tb->insns, idx); +} + +/* + * Instruction information + * + * These queries allow the plugin to retrieve information about each + * instruction being translated. + */ + +const void *qemu_plugin_insn_data(const struct qemu_plugin_insn *insn) +{ + return insn->data->data; +} + +size_t qemu_plugin_insn_size(const struct qemu_plugin_insn *insn) +{ + return insn->data->len; +} + +uint64_t qemu_plugin_insn_vaddr(const struct qemu_plugin_insn *insn) +{ + return insn->vaddr; +} + +void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn) +{ + return insn->haddr; +} + +/* + * The memory queries allow the plugin to query information about a + * memory access. + */ + +unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info) +{ + return info & TRACE_MEM_SZ_SHIFT_MASK; +} + +bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info) +{ + return !!(info & TRACE_MEM_SE); +} + +bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info) +{ + return !!(info & TRACE_MEM_BE); +} + +bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info) +{ + return !!(info & TRACE_MEM_ST); +} + +/* + * Virtual Memory queries + */ + +struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, + uint64_t vaddr) +{ + return NULL; +} + +/* + * Queries to the number and potential maximum number of vCPUs there + * will be. This helps the plugin dimension per-vcpu arrays. + */ + +#ifndef CONFIG_USER_ONLY +static MachineState * get_ms(void) +{ + return MACHINE(qdev_get_machine()); +} +#endif + +int qemu_plugin_n_vcpus(void) +{ +#ifdef CONFIG_USER_ONLY + return -1; +#else + return get_ms()->smp.cpus; +#endif +} + +int qemu_plugin_n_max_vcpus(void) +{ +#ifdef CONFIG_USER_ONLY + return -1; +#else + return get_ms()->smp.max_cpus; +#endif +} -- cgit v1.2.3-55-g7522 From 050ec8cc18f020b3231db3311241cd3534e7a15f Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Thu, 6 Dec 2018 19:04:07 -0500 Subject: queue: add QTAILQ_REMOVE_SEVERAL This is faster than removing elements one by one. Will gain a user soon. Signed-off-by: Emilio G. Cota Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- include/qemu/queue.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/qemu/queue.h b/include/qemu/queue.h index 73bf4a984d..4764d93ea3 100644 --- a/include/qemu/queue.h +++ b/include/qemu/queue.h @@ -420,6 +420,16 @@ union { \ (elm)->field.tqe_circ.tql_prev = NULL; \ } while (/*CONSTCOND*/0) +/* remove @left, @right and all elements in between from @head */ +#define QTAILQ_REMOVE_SEVERAL(head, left, right, field) do { \ + if (((right)->field.tqe_next) != NULL) \ + (right)->field.tqe_next->field.tqe_circ.tql_prev = \ + (left)->field.tqe_circ.tql_prev; \ + else \ + (head)->tqh_circ.tql_prev = (left)->field.tqe_circ.tql_prev; \ + (left)->field.tqe_circ.tql_prev->tql_next = (right)->field.tqe_next; \ + } while (/*CONSTCOND*/0) + #define QTAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->tqh_first); \ (var); \ -- cgit v1.2.3-55-g7522 From 8c01eb78c6c4706ae85f6f1183fe87f9afc62732 Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Wed, 6 Feb 2019 17:11:04 -0500 Subject: cputlb: document get_page_addr_code Suggested-by: Alex Bennée Signed-off-by: Emilio G. Cota Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- include/exec/exec-all.h | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 04795c49bf..e4206cb173 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -504,7 +504,15 @@ void mmap_lock(void); void mmap_unlock(void); bool have_mmap_lock(void); -static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) +/** + * get_page_addr_code() - user-mode version + * @env: CPUArchState + * @addr: guest virtual address of guest code + * + * Returns @addr. + */ +static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, + target_ulong addr) { return addr; } @@ -512,8 +520,18 @@ static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong static inline void mmap_lock(void) {} static inline void mmap_unlock(void) {} -/* cputlb.c */ -tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); +/** + * get_page_addr_code() - full-system version + * @env: CPUArchState + * @addr: guest virtual address of guest code + * + * If we cannot translate and execute from the entire RAM page, or if + * the region is not backed by RAM, returns -1. Otherwise, returns the + * ram_addr_t corresponding to the guest code at @addr. + * + * Note: this function can trigger an exception. + */ +tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr); void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); -- cgit v1.2.3-55-g7522 From 4b2190dabd1fe1db5e07732de311662382d40fb9 Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Sat, 3 Nov 2018 17:40:22 -0400 Subject: cputlb: introduce get_page_addr_code_hostp This will be used by plugins to get the host address of instructions. Signed-off-by: Emilio G. Cota Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- accel/tcg/cputlb.c | 14 +++++++++++++- include/exec/exec-all.h | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index a8f9069582..82a5783a2b 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1051,7 +1051,8 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, * NOTE: This function will trigger an exception if the page is * not executable. */ -tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, + void **hostp) { uintptr_t mmu_idx = cpu_mmu_index(env, true); uintptr_t index = tlb_index(env, mmu_idx, addr); @@ -1077,13 +1078,24 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) if (unlikely(entry->addr_code & TLB_MMIO)) { /* The region is not backed by RAM. */ + if (hostp) { + *hostp = NULL; + } return -1; } p = (void *)((uintptr_t)addr + entry->addend); + if (hostp) { + *hostp = p; + } return qemu_ram_addr_from_host_nofail(p); } +tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) +{ + return get_page_addr_code_hostp(env, addr, NULL); +} + static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) { diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index e4206cb173..eadcf29d0c 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -22,6 +22,7 @@ #include "cpu.h" #include "exec/tb-context.h" +#include "exec/cpu_ldst.h" #include "sysemu/cpus.h" /* allow to see translation results - the slowdown should be negligible, so we leave it */ @@ -516,6 +517,26 @@ static inline tb_page_addr_t get_page_addr_code(CPUArchState *env, { return addr; } + +/** + * get_page_addr_code_hostp() - user-mode version + * @env: CPUArchState + * @addr: guest virtual address of guest code + * + * Returns @addr. + * + * If @hostp is non-NULL, sets *@hostp to the host address where @addr's content + * is kept. + */ +static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, + target_ulong addr, + void **hostp) +{ + if (hostp) { + *hostp = g2h(addr); + } + return addr; +} #else static inline void mmap_lock(void) {} static inline void mmap_unlock(void) {} @@ -533,6 +554,23 @@ static inline void mmap_unlock(void) {} */ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr); +/** + * get_page_addr_code_hostp() - full-system version + * @env: CPUArchState + * @addr: guest virtual address of guest code + * + * See get_page_addr_code() (full-system version) for documentation on the + * return value. + * + * Sets *@hostp (when @hostp is non-NULL) as follows. + * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp + * to the host address where @addr's content is kept. + * + * Note: this function can trigger an exception. + */ +tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, + void **hostp); + void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); -- cgit v1.2.3-55-g7522 From 38b47b19ec3adf6a96d68726dc29096b3aad780a Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Fri, 7 Dec 2018 15:33:56 -0500 Subject: plugin-gen: add module for TCG-related code We first inject empty instrumentation from translator_loop. After translation, we go through the plugins to see what they want to register for, filling in the empty instrumentation. If if turns out that some instrumentation remains unused, we remove it. This approach supports the following features: - Inlining TCG code for simple operations. Note that we do not export TCG ops to plugins. Instead, we give them a C API to insert inlined ops. So far we only support adding an immediate to a u64, e.g. to count events. - "Direct" callbacks. These are callbacks that do not go via a helper. Instead, the helper is defined at run-time, so that the plugin code is directly called from TCG. This makes direct callbacks as efficient as possible; they are therefore used for very frequent events, e.g. memory callbacks. - Passing the host address to memory callbacks. Most of this is implemented in a later patch though. - Instrumentation of memory accesses performed from helpers. See the corresponding comment, as well as a later patch. Signed-off-by: Emilio G. Cota [AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- accel/tcg/Makefile.objs | 1 + accel/tcg/plugin-gen.c | 932 ++++++++++++++++++++++++++++++++++++++++++++ accel/tcg/plugin-helpers.h | 5 + include/exec/helper-gen.h | 1 + include/exec/helper-proto.h | 1 + include/exec/helper-tcg.h | 1 + include/exec/plugin-gen.h | 57 +++ tcg/tcg-op.h | 11 + tcg/tcg-opc.h | 3 + tcg/tcg.c | 22 ++ tcg/tcg.h | 20 + 11 files changed, 1054 insertions(+) create mode 100644 accel/tcg/plugin-gen.c create mode 100644 accel/tcg/plugin-helpers.h create mode 100644 include/exec/plugin-gen.h (limited to 'include') diff --git a/accel/tcg/Makefile.objs b/accel/tcg/Makefile.objs index d381a02f34..a92f2c454b 100644 --- a/accel/tcg/Makefile.objs +++ b/accel/tcg/Makefile.objs @@ -6,3 +6,4 @@ obj-y += translator.o obj-$(CONFIG_USER_ONLY) += user-exec.o obj-$(call lnot,$(CONFIG_SOFTMMU)) += user-exec-stub.o +obj-$(CONFIG_PLUGIN) += plugin-gen.o diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c new file mode 100644 index 0000000000..51580d51a0 --- /dev/null +++ b/accel/tcg/plugin-gen.c @@ -0,0 +1,932 @@ +/* + * plugin-gen.c - TCG-related bits of plugin infrastructure + * + * Copyright (C) 2018, Emilio G. Cota + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * We support instrumentation at an instruction granularity. That is, + * if a plugin wants to instrument the memory accesses performed by a + * particular instruction, it can just do that instead of instrumenting + * all memory accesses. Thus, in order to do this we first have to + * translate a TB, so that plugins can decide what/where to instrument. + * + * Injecting the desired instrumentation could be done with a second + * translation pass that combined the instrumentation requests, but that + * would be ugly and inefficient since we would decode the guest code twice. + * Instead, during TB translation we add "empty" instrumentation calls for all + * possible instrumentation events, and then once we collect the instrumentation + * requests from plugins, we either "fill in" those empty events or remove them + * if they have no requests. + * + * When "filling in" an event we first copy the empty callback's TCG ops. This + * might seem unnecessary, but it is done to support an arbitrary number + * of callbacks per event. Take for example a regular instruction callback. + * We first generate a callback to an empty helper function. Then, if two + * plugins register one callback each for this instruction, we make two copies + * of the TCG ops generated for the empty callback, substituting the function + * pointer that points to the empty helper function with the plugins' desired + * callback functions. After that we remove the empty callback's ops. + * + * Note that the location in TCGOp.args[] of the pointer to a helper function + * varies across different guest and host architectures. Instead of duplicating + * the logic that figures this out, we rely on the fact that the empty + * callbacks point to empty functions that are unique pointers in the program. + * Thus, to find the right location we just have to look for a match in + * TCGOp.args[]. This is the main reason why we first copy an empty callback's + * TCG ops and then fill them in; regardless of whether we have one or many + * callbacks for that event, the logic to add all of them is the same. + * + * When generating more than one callback per event, we make a small + * optimization to avoid generating redundant operations. For instance, for the + * second and all subsequent callbacks of an event, we do not need to reload the + * CPU's index into a TCG temp, since the first callback did it already. + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "tcg/tcg.h" +#include "tcg/tcg-op.h" +#include "trace/mem.h" +#include "exec/exec-all.h" +#include "exec/plugin-gen.h" +#include "exec/translator.h" + +#ifdef CONFIG_SOFTMMU +# define CONFIG_SOFTMMU_GATE 1 +#else +# define CONFIG_SOFTMMU_GATE 0 +#endif + +/* + * plugin_cb_start TCG op args[]: + * 0: enum plugin_gen_from + * 1: enum plugin_gen_cb + * 2: set to 1 for mem callback that is a write, 0 otherwise. + */ + +enum plugin_gen_from { + PLUGIN_GEN_FROM_TB, + PLUGIN_GEN_FROM_INSN, + PLUGIN_GEN_FROM_MEM, + PLUGIN_GEN_AFTER_INSN, + PLUGIN_GEN_N_FROMS, +}; + +enum plugin_gen_cb { + PLUGIN_GEN_CB_UDATA, + PLUGIN_GEN_CB_INLINE, + PLUGIN_GEN_CB_MEM, + PLUGIN_GEN_ENABLE_MEM_HELPER, + PLUGIN_GEN_DISABLE_MEM_HELPER, + PLUGIN_GEN_N_CBS, +}; + +/* + * These helpers are stubs that get dynamically switched out for calls + * direct to the plugin if they are subscribed to. + */ +void HELPER(plugin_vcpu_udata_cb)(uint32_t cpu_index, void *udata) +{ } + +void HELPER(plugin_vcpu_mem_cb)(unsigned int vcpu_index, + qemu_plugin_meminfo_t info, uint64_t vaddr, + void *userdata) +{ } + +static void do_gen_mem_cb(TCGv vaddr, uint32_t info) +{ + TCGv_i32 cpu_index = tcg_temp_new_i32(); + TCGv_i32 meminfo = tcg_const_i32(info); + TCGv_i64 vaddr64 = tcg_temp_new_i64(); + TCGv_ptr udata = tcg_const_ptr(NULL); + + tcg_gen_ld_i32(cpu_index, cpu_env, + -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + tcg_gen_extu_tl_i64(vaddr64, vaddr); + + gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, vaddr64, udata); + + tcg_temp_free_ptr(udata); + tcg_temp_free_i64(vaddr64); + tcg_temp_free_i32(meminfo); + tcg_temp_free_i32(cpu_index); +} + +static void gen_empty_udata_cb(void) +{ + TCGv_i32 cpu_index = tcg_temp_new_i32(); + TCGv_ptr udata = tcg_const_ptr(NULL); /* will be overwritten later */ + + tcg_gen_ld_i32(cpu_index, cpu_env, + -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); + gen_helper_plugin_vcpu_udata_cb(cpu_index, udata); + + tcg_temp_free_ptr(udata); + tcg_temp_free_i32(cpu_index); +} + +/* + * For now we only support addi_i64. + * When we support more ops, we can generate one empty inline cb for each. + */ +static void gen_empty_inline_cb(void) +{ + TCGv_i64 val = tcg_temp_new_i64(); + TCGv_ptr ptr = tcg_const_ptr(NULL); /* overwritten later */ + + tcg_gen_ld_i64(val, ptr, 0); + /* pass an immediate != 0 so that it doesn't get optimized away */ + tcg_gen_addi_i64(val, val, 0xdeadface); + tcg_gen_st_i64(val, ptr, 0); + tcg_temp_free_ptr(ptr); + tcg_temp_free_i64(val); +} + +static void gen_empty_mem_cb(TCGv addr, uint32_t info) +{ + do_gen_mem_cb(addr, info); +} + +/* + * Share the same function for enable/disable. When enabling, the NULL + * pointer will be overwritten later. + */ +static void gen_empty_mem_helper(void) +{ + TCGv_ptr ptr; + + ptr = tcg_const_ptr(NULL); + tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - + offsetof(ArchCPU, env)); + tcg_temp_free_ptr(ptr); +} + +static inline +void gen_plugin_cb_start(enum plugin_gen_from from, + enum plugin_gen_cb type, unsigned wr) +{ + TCGOp *op; + + tcg_gen_plugin_cb_start(from, type, wr); + op = tcg_last_op(); + QSIMPLEQ_INSERT_TAIL(&tcg_ctx->plugin_ops, op, plugin_link); +} + +static void gen_wrapped(enum plugin_gen_from from, + enum plugin_gen_cb type, void (*func)(void)) +{ + gen_plugin_cb_start(from, type, 0); + func(); + tcg_gen_plugin_cb_end(); +} + +static inline void plugin_gen_empty_callback(enum plugin_gen_from from) +{ + switch (from) { + case PLUGIN_GEN_AFTER_INSN: + gen_wrapped(from, PLUGIN_GEN_DISABLE_MEM_HELPER, + gen_empty_mem_helper); + break; + case PLUGIN_GEN_FROM_INSN: + /* + * Note: plugin_gen_inject() relies on ENABLE_MEM_HELPER being + * the first callback of an instruction + */ + gen_wrapped(from, PLUGIN_GEN_ENABLE_MEM_HELPER, + gen_empty_mem_helper); + /* fall through */ + case PLUGIN_GEN_FROM_TB: + gen_wrapped(from, PLUGIN_GEN_CB_UDATA, gen_empty_udata_cb); + gen_wrapped(from, PLUGIN_GEN_CB_INLINE, gen_empty_inline_cb); + break; + default: + g_assert_not_reached(); + } +} + +union mem_gen_fn { + void (*mem_fn)(TCGv, uint32_t); + void (*inline_fn)(void); +}; + +static void gen_mem_wrapped(enum plugin_gen_cb type, + const union mem_gen_fn *f, TCGv addr, + uint32_t info, bool is_mem) +{ + int wr = !!(info & TRACE_MEM_ST); + + gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr); + if (is_mem) { + f->mem_fn(addr, info); + } else { + f->inline_fn(); + } + tcg_gen_plugin_cb_end(); +} + +void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) +{ + union mem_gen_fn fn; + + fn.mem_fn = gen_empty_mem_cb; + gen_mem_wrapped(PLUGIN_GEN_CB_MEM, &fn, addr, info, true); + + fn.inline_fn = gen_empty_inline_cb; + gen_mem_wrapped(PLUGIN_GEN_CB_INLINE, &fn, 0, info, false); +} + +static TCGOp *find_op(TCGOp *op, TCGOpcode opc) +{ + while (op) { + if (op->opc == opc) { + return op; + } + op = QTAILQ_NEXT(op, link); + } + return NULL; +} + +static TCGOp *rm_ops_range(TCGOp *begin, TCGOp *end) +{ + TCGOp *ret = QTAILQ_NEXT(end, link); + + QTAILQ_REMOVE_SEVERAL(&tcg_ctx->ops, begin, end, link); + return ret; +} + +/* remove all ops until (and including) plugin_cb_end */ +static TCGOp *rm_ops(TCGOp *op) +{ + TCGOp *end_op = find_op(op, INDEX_op_plugin_cb_end); + + tcg_debug_assert(end_op); + return rm_ops_range(op, end_op); +} + +static TCGOp *copy_op_nocheck(TCGOp **begin_op, TCGOp *op) +{ + *begin_op = QTAILQ_NEXT(*begin_op, link); + tcg_debug_assert(*begin_op); + op = tcg_op_insert_after(tcg_ctx, op, (*begin_op)->opc); + memcpy(op->args, (*begin_op)->args, sizeof(op->args)); + return op; +} + +static TCGOp *copy_op(TCGOp **begin_op, TCGOp *op, TCGOpcode opc) +{ + op = copy_op_nocheck(begin_op, op); + tcg_debug_assert((*begin_op)->opc == opc); + return op; +} + +static TCGOp *copy_extu_i32_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* mov_i32 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + /* movi_i32 */ + op = copy_op(begin_op, op, INDEX_op_movi_i32); + } else { + /* extu_i32_i64 */ + op = copy_op(begin_op, op, INDEX_op_extu_i32_i64); + } + return op; +} + +static TCGOp *copy_mov_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x mov_i32 */ + op = copy_op(begin_op, op, INDEX_op_mov_i32); + op = copy_op(begin_op, op, INDEX_op_mov_i32); + } else { + /* mov_i64 */ + op = copy_op(begin_op, op, INDEX_op_mov_i64); + } + return op; +} + +static TCGOp *copy_movi_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x movi_i32 */ + op = copy_op(begin_op, op, INDEX_op_movi_i32); + op->args[1] = v; + + op = copy_op(begin_op, op, INDEX_op_movi_i32); + op->args[1] = v >> 32; + } else { + /* movi_i64 */ + op = copy_op(begin_op, op, INDEX_op_movi_i64); + op->args[1] = v; + } + return op; +} + +static TCGOp *copy_const_ptr(TCGOp **begin_op, TCGOp *op, void *ptr) +{ + if (UINTPTR_MAX == UINT32_MAX) { + /* movi_i32 */ + op = copy_op(begin_op, op, INDEX_op_movi_i32); + op->args[1] = (uintptr_t)ptr; + } else { + /* movi_i64 */ + op = copy_movi_i64(begin_op, op, (uint64_t)(uintptr_t)ptr); + } + return op; +} + +static TCGOp *copy_const_i64(TCGOp **begin_op, TCGOp *op, uint64_t v) +{ + return copy_movi_i64(begin_op, op, v); +} + +static TCGOp *copy_extu_tl_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TARGET_LONG_BITS == 32) { + /* extu_i32_i64 */ + op = copy_extu_i32_i64(begin_op, op); + } else { + /* mov_i64 */ + op = copy_mov_i64(begin_op, op); + } + return op; +} + +static TCGOp *copy_ld_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x ld_i32 */ + op = copy_op(begin_op, op, INDEX_op_ld_i32); + op = copy_op(begin_op, op, INDEX_op_ld_i32); + } else { + /* ld_i64 */ + op = copy_op(begin_op, op, INDEX_op_ld_i64); + } + return op; +} + +static TCGOp *copy_st_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* 2x st_i32 */ + op = copy_op(begin_op, op, INDEX_op_st_i32); + op = copy_op(begin_op, op, INDEX_op_st_i32); + } else { + /* st_i64 */ + op = copy_op(begin_op, op, INDEX_op_st_i64); + } + return op; +} + +static TCGOp *copy_add_i64(TCGOp **begin_op, TCGOp *op) +{ + if (TCG_TARGET_REG_BITS == 32) { + /* all 32-bit backends must implement add2_i32 */ + g_assert(TCG_TARGET_HAS_add2_i32); + op = copy_op(begin_op, op, INDEX_op_add2_i32); + } else { + op = copy_op(begin_op, op, INDEX_op_add_i64); + } + return op; +} + +static TCGOp *copy_st_ptr(TCGOp **begin_op, TCGOp *op) +{ + if (UINTPTR_MAX == UINT32_MAX) { + /* st_i32 */ + op = copy_op(begin_op, op, INDEX_op_st_i32); + } else { + /* st_i64 */ + op = copy_st_i64(begin_op, op); + } + return op; +} + +static TCGOp *copy_call(TCGOp **begin_op, TCGOp *op, void *empty_func, + void *func, unsigned tcg_flags, int *cb_idx) +{ + /* copy all ops until the call */ + do { + op = copy_op_nocheck(begin_op, op); + } while (op->opc != INDEX_op_call); + + /* fill in the op call */ + op->param1 = (*begin_op)->param1; + op->param2 = (*begin_op)->param2; + tcg_debug_assert(op->life == 0); + if (*cb_idx == -1) { + int i; + + /* + * Instead of working out the position of the callback in args[], just + * look for @empty_func, since it should be a unique pointer. + */ + for (i = 0; i < MAX_OPC_PARAM_ARGS; i++) { + if ((uintptr_t)(*begin_op)->args[i] == (uintptr_t)empty_func) { + *cb_idx = i; + break; + } + } + tcg_debug_assert(i < MAX_OPC_PARAM_ARGS); + } + op->args[*cb_idx] = (uintptr_t)func; + op->args[*cb_idx + 1] = tcg_flags; + + return op; +} + +static TCGOp *append_udata_cb(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, int *cb_idx) +{ + /* const_ptr */ + op = copy_const_ptr(&begin_op, op, cb->userp); + + /* copy the ld_i32, but note that we only have to copy it once */ + begin_op = QTAILQ_NEXT(begin_op, link); + tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); + if (*cb_idx == -1) { + op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32); + memcpy(op->args, begin_op->args, sizeof(op->args)); + } + + /* call */ + op = copy_call(&begin_op, op, HELPER(plugin_vcpu_udata_cb), + cb->f.vcpu_udata, cb->tcg_flags, cb_idx); + + return op; +} + +static TCGOp *append_inline_cb(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, + int *unused) +{ + /* const_ptr */ + op = copy_const_ptr(&begin_op, op, cb->userp); + + /* ld_i64 */ + op = copy_ld_i64(&begin_op, op); + + /* const_i64 */ + op = copy_const_i64(&begin_op, op, cb->inline_insn.imm); + + /* add_i64 */ + op = copy_add_i64(&begin_op, op); + + /* st_i64 */ + op = copy_st_i64(&begin_op, op); + + return op; +} + +static TCGOp *append_mem_cb(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, int *cb_idx) +{ + enum plugin_gen_cb type = begin_op->args[1]; + + tcg_debug_assert(type == PLUGIN_GEN_CB_MEM); + + /* const_i32 == movi_i32 ("info", so it remains as is) */ + op = copy_op(&begin_op, op, INDEX_op_movi_i32); + + /* const_ptr */ + op = copy_const_ptr(&begin_op, op, cb->userp); + + /* copy the ld_i32, but note that we only have to copy it once */ + begin_op = QTAILQ_NEXT(begin_op, link); + tcg_debug_assert(begin_op && begin_op->opc == INDEX_op_ld_i32); + if (*cb_idx == -1) { + op = tcg_op_insert_after(tcg_ctx, op, INDEX_op_ld_i32); + memcpy(op->args, begin_op->args, sizeof(op->args)); + } + + /* extu_tl_i64 */ + op = copy_extu_tl_i64(&begin_op, op); + + if (type == PLUGIN_GEN_CB_MEM) { + /* call */ + op = copy_call(&begin_op, op, HELPER(plugin_vcpu_mem_cb), + cb->f.vcpu_udata, cb->tcg_flags, cb_idx); + } + + return op; +} + +typedef TCGOp *(*inject_fn)(const struct qemu_plugin_dyn_cb *cb, + TCGOp *begin_op, TCGOp *op, int *intp); +typedef bool (*op_ok_fn)(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb); + +static bool op_ok(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) +{ + return true; +} + +static bool op_rw(const TCGOp *op, const struct qemu_plugin_dyn_cb *cb) +{ + int w; + + w = op->args[2]; + return !!(cb->rw & (w + 1)); +} + +static inline +void inject_cb_type(const GArray *cbs, TCGOp *begin_op, inject_fn inject, + op_ok_fn ok) +{ + TCGOp *end_op; + TCGOp *op; + int cb_idx = -1; + int i; + + if (!cbs || cbs->len == 0) { + rm_ops(begin_op); + return; + } + + end_op = find_op(begin_op, INDEX_op_plugin_cb_end); + tcg_debug_assert(end_op); + + op = end_op; + for (i = 0; i < cbs->len; i++) { + struct qemu_plugin_dyn_cb *cb = + &g_array_index(cbs, struct qemu_plugin_dyn_cb, i); + + if (!ok(begin_op, cb)) { + continue; + } + op = inject(cb, begin_op, op, &cb_idx); + } + rm_ops_range(begin_op, end_op); +} + +static void +inject_udata_cb(const GArray *cbs, TCGOp *begin_op) +{ + inject_cb_type(cbs, begin_op, append_udata_cb, op_ok); +} + +static void +inject_inline_cb(const GArray *cbs, TCGOp *begin_op, op_ok_fn ok) +{ + inject_cb_type(cbs, begin_op, append_inline_cb, ok); +} + +static void +inject_mem_cb(const GArray *cbs, TCGOp *begin_op) +{ + inject_cb_type(cbs, begin_op, append_mem_cb, op_rw); +} + +/* we could change the ops in place, but we can reuse more code by copying */ +static void inject_mem_helper(TCGOp *begin_op, GArray *arr) +{ + TCGOp *orig_op = begin_op; + TCGOp *end_op; + TCGOp *op; + + end_op = find_op(begin_op, INDEX_op_plugin_cb_end); + tcg_debug_assert(end_op); + + /* const ptr */ + op = copy_const_ptr(&begin_op, end_op, arr); + + /* st_ptr */ + op = copy_st_ptr(&begin_op, op); + + rm_ops_range(orig_op, end_op); +} + +/* + * Tracking memory accesses performed from helpers requires extra work. + * If an instruction is emulated with helpers, we do two things: + * (1) copy the CB descriptors, and keep track of it so that they can be + * freed later on, and (2) point CPUState.plugin_mem_cbs to the descriptors, so + * that we can read them at run-time (i.e. when the helper executes). + * This run-time access is performed from qemu_plugin_vcpu_mem_cb. + * + * Note that plugin_gen_disable_mem_helpers undoes (2). Since it + * is possible that the code we generate after the instruction is + * dead, we also add checks before generating tb_exit etc. + */ +static void inject_mem_enable_helper(struct qemu_plugin_insn *plugin_insn, + TCGOp *begin_op) +{ + GArray *cbs[2]; + GArray *arr; + size_t n_cbs, i; + + cbs[0] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR]; + cbs[1] = plugin_insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; + + n_cbs = 0; + for (i = 0; i < ARRAY_SIZE(cbs); i++) { + n_cbs += cbs[i]->len; + } + + plugin_insn->mem_helper = plugin_insn->calls_helpers && n_cbs; + if (likely(!plugin_insn->mem_helper)) { + rm_ops(begin_op); + return; + } + + arr = g_array_sized_new(false, false, + sizeof(struct qemu_plugin_dyn_cb), n_cbs); + + for (i = 0; i < ARRAY_SIZE(cbs); i++) { + g_array_append_vals(arr, cbs[i]->data, cbs[i]->len); + } + + qemu_plugin_add_dyn_cb_arr(arr); + inject_mem_helper(begin_op, arr); +} + +static void inject_mem_disable_helper(struct qemu_plugin_insn *plugin_insn, + TCGOp *begin_op) +{ + if (likely(!plugin_insn->mem_helper)) { + rm_ops(begin_op); + return; + } + inject_mem_helper(begin_op, NULL); +} + +/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */ +void plugin_gen_disable_mem_helpers(void) +{ + TCGv_ptr ptr; + + if (likely(tcg_ctx->plugin_insn == NULL || + !tcg_ctx->plugin_insn->mem_helper)) { + return; + } + ptr = tcg_const_ptr(NULL); + tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) - + offsetof(ArchCPU, env)); + tcg_temp_free_ptr(ptr); + tcg_ctx->plugin_insn->mem_helper = false; +} + +static void plugin_gen_tb_udata(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op) +{ + inject_udata_cb(ptb->cbs[PLUGIN_CB_REGULAR], begin_op); +} + +static void plugin_gen_tb_inline(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op) +{ + inject_inline_cb(ptb->cbs[PLUGIN_CB_INLINE], begin_op, op_ok); +} + +static void plugin_gen_insn_udata(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + + inject_udata_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_REGULAR], begin_op); +} + +static void plugin_gen_insn_inline(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_inline_cb(insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], + begin_op, op_ok); +} + +static void plugin_gen_mem_regular(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_mem_cb(insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], begin_op); +} + +static void plugin_gen_mem_inline(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + const GArray *cbs; + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + + cbs = insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE]; + inject_inline_cb(cbs, begin_op, op_rw); +} + +static void plugin_gen_enable_mem_helper(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_mem_enable_helper(insn, begin_op); +} + +static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb, + TCGOp *begin_op, int insn_idx) +{ + struct qemu_plugin_insn *insn = g_ptr_array_index(ptb->insns, insn_idx); + inject_mem_disable_helper(insn, begin_op); +} + +static void plugin_inject_cb(const struct qemu_plugin_tb *ptb, TCGOp *begin_op, + int insn_idx) +{ + enum plugin_gen_from from = begin_op->args[0]; + enum plugin_gen_cb type = begin_op->args[1]; + + switch (from) { + case PLUGIN_GEN_FROM_TB: + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_tb_udata(ptb, begin_op); + return; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_tb_inline(ptb, begin_op); + return; + default: + g_assert_not_reached(); + } + case PLUGIN_GEN_FROM_INSN: + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_insn_udata(ptb, begin_op, insn_idx); + return; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_insn_inline(ptb, begin_op, insn_idx); + return; + case PLUGIN_GEN_ENABLE_MEM_HELPER: + plugin_gen_enable_mem_helper(ptb, begin_op, insn_idx); + return; + default: + g_assert_not_reached(); + } + case PLUGIN_GEN_FROM_MEM: + switch (type) { + case PLUGIN_GEN_CB_MEM: + plugin_gen_mem_regular(ptb, begin_op, insn_idx); + return; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_mem_inline(ptb, begin_op, insn_idx); + return; + default: + g_assert_not_reached(); + } + case PLUGIN_GEN_AFTER_INSN: + switch (type) { + case PLUGIN_GEN_DISABLE_MEM_HELPER: + plugin_gen_disable_mem_helper(ptb, begin_op, insn_idx); + return; + default: + g_assert_not_reached(); + } + default: + g_assert_not_reached(); + } +} + +/* #define DEBUG_PLUGIN_GEN_OPS */ +static void pr_ops(void) +{ +#ifdef DEBUG_PLUGIN_GEN_OPS + TCGOp *op; + int i = 0; + + QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { + const char *name = ""; + const char *type = ""; + + if (op->opc == INDEX_op_plugin_cb_start) { + switch (op->args[0]) { + case PLUGIN_GEN_FROM_TB: + name = "tb"; + break; + case PLUGIN_GEN_FROM_INSN: + name = "insn"; + break; + case PLUGIN_GEN_FROM_MEM: + name = "mem"; + break; + case PLUGIN_GEN_AFTER_INSN: + name = "after insn"; + break; + default: + break; + } + switch (op->args[1]) { + case PLUGIN_GEN_CB_UDATA: + type = "udata"; + break; + case PLUGIN_GEN_CB_INLINE: + type = "inline"; + break; + case PLUGIN_GEN_CB_MEM: + type = "mem"; + break; + case PLUGIN_GEN_ENABLE_MEM_HELPER: + type = "enable mem helper"; + break; + case PLUGIN_GEN_DISABLE_MEM_HELPER: + type = "disable mem helper"; + break; + default: + break; + } + } + printf("op[%2i]: %s %s %s\n", i, tcg_op_defs[op->opc].name, name, type); + i++; + } +#endif +} + +static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb) +{ + TCGOp *op; + int insn_idx; + + pr_ops(); + insn_idx = -1; + QSIMPLEQ_FOREACH(op, &tcg_ctx->plugin_ops, plugin_link) { + enum plugin_gen_from from = op->args[0]; + enum plugin_gen_cb type = op->args[1]; + + tcg_debug_assert(op->opc == INDEX_op_plugin_cb_start); + /* ENABLE_MEM_HELPER is the first callback of an instruction */ + if (from == PLUGIN_GEN_FROM_INSN && + type == PLUGIN_GEN_ENABLE_MEM_HELPER) { + insn_idx++; + } + plugin_inject_cb(plugin_tb, op, insn_idx); + } + pr_ops(); +} + +bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb) +{ + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + bool ret = false; + + if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) { + ret = true; + + QSIMPLEQ_INIT(&tcg_ctx->plugin_ops); + ptb->vaddr = tb->pc; + ptb->vaddr2 = -1; + get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1); + ptb->haddr2 = NULL; + + plugin_gen_empty_callback(PLUGIN_GEN_FROM_TB); + } + return ret; +} + +void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) +{ + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + struct qemu_plugin_insn *pinsn; + + pinsn = qemu_plugin_tb_insn_get(ptb); + tcg_ctx->plugin_insn = pinsn; + pinsn->vaddr = db->pc_next; + plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN); + + /* + * Detect page crossing to get the new host address. + * Note that we skip this when haddr1 == NULL, e.g. when we're + * fetching instructions from a region not backed by RAM. + */ + if (likely(ptb->haddr1 != NULL && ptb->vaddr2 == -1) && + unlikely((db->pc_next & TARGET_PAGE_MASK) != + (db->pc_first & TARGET_PAGE_MASK))) { + get_page_addr_code_hostp(cpu->env_ptr, db->pc_next, + &ptb->haddr2); + ptb->vaddr2 = db->pc_next; + } + if (likely(ptb->vaddr2 == -1)) { + pinsn->haddr = ptb->haddr1 + pinsn->vaddr - ptb->vaddr; + } else { + pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2; + } +} + +void plugin_gen_insn_end(void) +{ + plugin_gen_empty_callback(PLUGIN_GEN_AFTER_INSN); +} + +void plugin_gen_tb_end(CPUState *cpu) +{ + struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; + int i; + + /* collect instrumentation requests */ + qemu_plugin_tb_trans_cb(cpu, ptb); + + /* inject the instrumentation at the appropriate places */ + plugin_gen_inject(ptb); + + /* clean up */ + for (i = 0; i < PLUGIN_N_CB_SUBTYPES; i++) { + if (ptb->cbs[i]) { + g_array_set_size(ptb->cbs[i], 0); + } + } + ptb->n = 0; + tcg_ctx->plugin_insn = NULL; +} diff --git a/accel/tcg/plugin-helpers.h b/accel/tcg/plugin-helpers.h new file mode 100644 index 0000000000..1916ee7920 --- /dev/null +++ b/accel/tcg/plugin-helpers.h @@ -0,0 +1,5 @@ +#ifdef CONFIG_PLUGIN +/* Note: no TCG flags because those are overwritten later */ +DEF_HELPER_2(plugin_vcpu_udata_cb, void, i32, ptr) +DEF_HELPER_4(plugin_vcpu_mem_cb, void, i32, i32, i64, ptr) +#endif diff --git a/include/exec/helper-gen.h b/include/exec/helper-gen.h index 22381a1708..236ff40524 100644 --- a/include/exec/helper-gen.h +++ b/include/exec/helper-gen.h @@ -70,6 +70,7 @@ static inline void glue(gen_helper_, name)(dh_retvar_decl(ret) \ #include "trace/generated-helpers.h" #include "trace/generated-helpers-wrappers.h" #include "tcg-runtime.h" +#include "plugin-helpers.h" #undef DEF_HELPER_FLAGS_0 #undef DEF_HELPER_FLAGS_1 diff --git a/include/exec/helper-proto.h b/include/exec/helper-proto.h index 74943edb13..1c4ba9bc78 100644 --- a/include/exec/helper-proto.h +++ b/include/exec/helper-proto.h @@ -33,6 +33,7 @@ dh_ctype(ret) HELPER(name) (dh_ctype(t1), dh_ctype(t2), dh_ctype(t3), \ #include "helper.h" #include "trace/generated-helpers.h" #include "tcg-runtime.h" +#include "plugin-helpers.h" #undef DEF_HELPER_FLAGS_0 #undef DEF_HELPER_FLAGS_1 diff --git a/include/exec/helper-tcg.h b/include/exec/helper-tcg.h index 268e0f804b..573c2ce2e9 100644 --- a/include/exec/helper-tcg.h +++ b/include/exec/helper-tcg.h @@ -55,6 +55,7 @@ #include "helper.h" #include "trace/generated-helpers.h" #include "tcg-runtime.h" +#include "plugin-helpers.h" #undef str #undef DEF_HELPER_FLAGS_0 diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h new file mode 100644 index 0000000000..de519883b1 --- /dev/null +++ b/include/exec/plugin-gen.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2017, Emilio G. Cota + * + * License: GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + * plugin-gen.h - TCG-dependent definitions for generating plugin code + * + * This header should be included only from plugin.c and C files that emit + * TCG code. + */ +#ifndef QEMU_PLUGIN_GEN_H +#define QEMU_PLUGIN_GEN_H + +#include "qemu/plugin.h" +#include "tcg/tcg.h" + +struct DisasContextBase; + +#ifdef CONFIG_PLUGIN + +bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb); +void plugin_gen_tb_end(CPUState *cpu); +void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db); +void plugin_gen_insn_end(void); + +void plugin_gen_disable_mem_helpers(void); +void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info); + +#else /* !CONFIG_PLUGIN */ + +static inline +bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb) +{ + return false; +} + +static inline +void plugin_gen_insn_start(CPUState *cpu, const struct DisasContextBase *db) +{ } + +static inline void plugin_gen_insn_end(void) +{ } + +static inline void plugin_gen_tb_end(CPUState *cpu) +{ } + +static inline void plugin_gen_disable_mem_helpers(void) +{ } + +static inline void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) +{ } + +#endif /* CONFIG_PLUGIN */ + +#endif /* QEMU_PLUGIN_GEN_H */ + diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 7c778f96f3..4af272daa5 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -833,6 +833,17 @@ void tcg_gen_goto_tb(unsigned idx); */ void tcg_gen_lookup_and_goto_ptr(void); +static inline void tcg_gen_plugin_cb_start(unsigned from, unsigned type, + unsigned wr) +{ + tcg_gen_op3(INDEX_op_plugin_cb_start, from, type, wr); +} + +static inline void tcg_gen_plugin_cb_end(void) +{ + tcg_emit_op(INDEX_op_plugin_cb_end); +} + #if TARGET_LONG_BITS == 32 #define tcg_temp_new() tcg_temp_new_i32() #define tcg_global_reg_new tcg_global_reg_new_i32 diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h index 242d608e6d..9288a04946 100644 --- a/tcg/tcg-opc.h +++ b/tcg/tcg-opc.h @@ -198,6 +198,9 @@ DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | IMPL(TCG_TARGET_HAS_goto_ptr)) +DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT) +DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT) + DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1, diff --git a/tcg/tcg.c b/tcg/tcg.c index 16b2d0e0ec..5475d49ed1 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -736,6 +736,15 @@ void tcg_region_init(void) #endif } +static void alloc_tcg_plugin_context(TCGContext *s) +{ +#ifdef CONFIG_PLUGIN + s->plugin_tb = g_new0(struct qemu_plugin_tb, 1); + s->plugin_tb->insns = + g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn); +#endif +} + /* * All TCG threads except the parent (i.e. the one that called tcg_context_init * and registered the target's TCG globals) must register with this function @@ -780,6 +789,10 @@ void tcg_register_thread(void) g_assert(n < ms->smp.max_cpus); atomic_set(&tcg_ctxs[n], s); + if (n > 0) { + alloc_tcg_plugin_context(s); + } + tcg_ctx = s; qemu_mutex_lock(®ion.lock); err = tcg_region_initial_alloc__locked(tcg_ctx); @@ -976,6 +989,8 @@ void tcg_context_init(TCGContext *s) indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i]; } + alloc_tcg_plugin_context(s); + tcg_ctx = s; /* * In user-mode we simply share the init context among threads, since we @@ -1681,6 +1696,13 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) flags = info->flags; sizemask = info->sizemask; +#ifdef CONFIG_PLUGIN + /* detect non-plugin helpers */ + if (tcg_ctx->plugin_insn && unlikely(strncmp(info->name, "plugin_", 7))) { + tcg_ctx->plugin_insn->calls_helpers = true; + } +#endif + #if defined(__sparc__) && !defined(__arch64__) \ && !defined(CONFIG_TCG_INTERPRETER) /* We have 64-bit values in one register, but need to pass as two diff --git a/tcg/tcg.h b/tcg/tcg.h index a37181c899..d234b4c9a5 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -538,6 +538,9 @@ typedef struct TCGOp { /* Next and previous opcodes. */ QTAILQ_ENTRY(TCGOp) link; +#ifdef CONFIG_PLUGIN + QSIMPLEQ_ENTRY(TCGOp) plugin_link; +#endif /* Arguments for the opcode. */ TCGArg args[MAX_OPC_PARAM]; @@ -639,6 +642,23 @@ struct TCGContext { TCGLabel *exitreq_label; +#ifdef CONFIG_PLUGIN + /* + * We keep one plugin_tb struct per TCGContext. Note that on every TB + * translation we clear but do not free its contents; this way we + * avoid a lot of malloc/free churn, since after a few TB's it's + * unlikely that we'll need to allocate either more instructions or more + * space for instructions (for variable-instruction-length ISAs). + */ + struct qemu_plugin_tb *plugin_tb; + + /* descriptor of the instruction being translated */ + struct qemu_plugin_insn *plugin_insn; + + /* list to quickly access the injected ops */ + QSIMPLEQ_HEAD(, TCGOp) plugin_ops; +#endif + TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ -- cgit v1.2.3-55-g7522 From e6d86bed50d20101c565e149c33e07a5cc764c72 Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Sun, 21 Oct 2018 13:24:26 -0400 Subject: tcg: let plugins instrument virtual memory accesses To capture all memory accesses we need hook into all the various helper functions that are involved in memory operations as well as the injected inline helper calls. A later commit will allow us to resolve the actual guest HW addresses by replaying the lookup. Signed-off-by: Emilio G. Cota [AJB: drop haddr handling, just deal in vaddr] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- accel/tcg/atomic_common.inc.c | 4 ++++ accel/tcg/atomic_template.h | 1 + accel/tcg/cpu-exec.c | 3 +++ include/exec/cpu-defs.h | 1 + include/exec/cpu_ldst_template.h | 28 ++++++++++++---------- include/exec/cpu_ldst_useronly_template.h | 32 +++++++++++++------------ tcg/tcg-op.c | 40 ++++++++++++++++++++++++------- tcg/tcg.h | 1 + 8 files changed, 74 insertions(+), 36 deletions(-) (limited to 'include') diff --git a/accel/tcg/atomic_common.inc.c b/accel/tcg/atomic_common.inc.c index a86098fb2d..344525b0bb 100644 --- a/accel/tcg/atomic_common.inc.c +++ b/accel/tcg/atomic_common.inc.c @@ -25,6 +25,8 @@ void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, uint16_t info) static inline void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, uint16_t info) { + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST); } static inline @@ -36,6 +38,7 @@ void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, uint16_t info) static inline void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, uint16_t info) { + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); } static inline @@ -47,4 +50,5 @@ void atomic_trace_st_pre(CPUArchState *env, target_ulong addr, uint16_t info) static inline void atomic_trace_st_post(CPUArchState *env, target_ulong addr, uint16_t info) { + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); } diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index 84d3370bf0..837676231f 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -18,6 +18,7 @@ * License along with this library; if not, see . */ +#include "qemu/plugin.h" #include "trace/mem.h" #if DATA_SIZE == 16 diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 81c33d6475..c01f59c743 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -268,6 +268,7 @@ void cpu_exec_step_atomic(CPUState *cpu) qemu_mutex_unlock_iothread(); } assert_no_pages_locked(); + qemu_plugin_disable_mem_helpers(cpu); } if (cpu_in_exclusive_context(cpu)) { @@ -701,6 +702,8 @@ int cpu_exec(CPUState *cpu) if (qemu_mutex_iothread_locked()) { qemu_mutex_unlock_iothread(); } + qemu_plugin_disable_mem_helpers(cpu); + assert_no_pages_locked(); } diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index be946ba1ce..8c44abefa2 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -214,6 +214,7 @@ typedef struct CPUTLBCommon { * Since this is placed within CPUNegativeOffsetState, the smallest * negative offsets are at the end of the struct. */ + typedef struct CPUTLB { CPUTLBCommon c; CPUTLBDesc d[NB_MMU_MODES]; diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h index 5750a26b9e..6f0d340797 100644 --- a/include/exec/cpu_ldst_template.h +++ b/include/exec/cpu_ldst_template.h @@ -28,6 +28,7 @@ #include "trace-root.h" #endif +#include "qemu/plugin.h" #include "trace/mem.h" #if DATA_SIZE == 8 @@ -86,11 +87,9 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, target_ulong addr; int mmu_idx = CPU_MMU_INDEX; TCGMemOpIdx oi; - #if !defined(SOFTMMU_CODE_ACCESS) - trace_guest_mem_before_exec( - env_cpu(env), ptr, - trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx)); + uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, mmu_idx); + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); #endif addr = ptr; @@ -104,6 +103,9 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, uintptr_t hostaddr = addr + entry->addend; res = glue(glue(ld, USUFFIX), _p)((uint8_t *)hostaddr); } +#ifndef SOFTMMU_CODE_ACCESS + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +#endif return res; } @@ -124,11 +126,9 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, target_ulong addr; int mmu_idx = CPU_MMU_INDEX; TCGMemOpIdx oi; - #if !defined(SOFTMMU_CODE_ACCESS) - trace_guest_mem_before_exec( - env_cpu(env), ptr, - trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx)); + uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, mmu_idx); + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); #endif addr = ptr; @@ -142,6 +142,9 @@ glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, uintptr_t hostaddr = addr + entry->addend; res = glue(glue(lds, SUFFIX), _p)((uint8_t *)hostaddr); } +#ifndef SOFTMMU_CODE_ACCESS + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +#endif return res; } @@ -165,11 +168,9 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, target_ulong addr; int mmu_idx = CPU_MMU_INDEX; TCGMemOpIdx oi; - #if !defined(SOFTMMU_CODE_ACCESS) - trace_guest_mem_before_exec( - env_cpu(env), ptr, - trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx)); + uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, mmu_idx); + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); #endif addr = ptr; @@ -183,6 +184,9 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, uintptr_t hostaddr = addr + entry->addend; glue(glue(st, SUFFIX), _p)((uint8_t *)hostaddr, v); } +#ifndef SOFTMMU_CODE_ACCESS + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +#endif } static inline void diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h index 93ad532f94..dbdc7a845d 100644 --- a/include/exec/cpu_ldst_useronly_template.h +++ b/include/exec/cpu_ldst_useronly_template.h @@ -64,18 +64,18 @@ static inline RES_TYPE glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) { -#ifdef CODE_ACCESS RES_TYPE ret; +#ifdef CODE_ACCESS set_helper_retaddr(1); ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr)); clear_helper_retaddr(); - return ret; #else - trace_guest_mem_before_exec( - env_cpu(env), ptr, - trace_mem_build_info(SHIFT, false, MO_TE, false, MMU_USER_IDX)); - return glue(glue(ld, USUFFIX), _p)(g2h(ptr)); + uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, false, + MMU_USER_IDX); + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr)); #endif + return ret; } #ifndef CODE_ACCESS @@ -96,18 +96,19 @@ glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env, static inline int glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr) { -#ifdef CODE_ACCESS int ret; +#ifdef CODE_ACCESS set_helper_retaddr(1); ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr)); clear_helper_retaddr(); - return ret; #else - trace_guest_mem_before_exec( - env_cpu(env), ptr, - trace_mem_build_info(SHIFT, true, MO_TE, false, MMU_USER_IDX)); - return glue(glue(lds, SUFFIX), _p)(g2h(ptr)); + uint16_t meminfo = trace_mem_build_info(SHIFT, true, MO_TE, false, + MMU_USER_IDX); + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); + ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr)); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); #endif + return ret; } #ifndef CODE_ACCESS @@ -130,10 +131,11 @@ static inline void glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr, RES_TYPE v) { - trace_guest_mem_before_exec( - env_cpu(env), ptr, - trace_mem_build_info(SHIFT, false, MO_TE, true, MMU_USER_IDX)); + uint16_t meminfo = trace_mem_build_info(SHIFT, false, MO_TE, true, + MMU_USER_IDX); + trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); glue(glue(st, SUFFIX), _p)(g2h(ptr), v); + qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); } static inline void diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 1388bd344d..c245126f98 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -30,6 +30,7 @@ #include "tcg-mo.h" #include "trace-tcg.h" #include "trace/mem.h" +#include "exec/plugin-gen.h" /* Reduce the number of ifdefs below. This assumes that all uses of TCGV_HIGH and TCGV_LOW are properly protected by a conditional that @@ -2684,6 +2685,7 @@ void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx) tcg_debug_assert(idx == TB_EXIT_REQUESTED); } + plugin_gen_disable_mem_helpers(); tcg_gen_op1i(INDEX_op_exit_tb, val); } @@ -2696,6 +2698,7 @@ void tcg_gen_goto_tb(unsigned idx) tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0); tcg_ctx->goto_tb_issue_mask |= 1 << idx; #endif + plugin_gen_disable_mem_helpers(); /* When not chaining, we simply fall through to the "fallback" exit. */ if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { tcg_gen_op1i(INDEX_op_goto_tb, idx); @@ -2705,7 +2708,10 @@ void tcg_gen_goto_tb(unsigned idx) void tcg_gen_lookup_and_goto_ptr(void) { if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { - TCGv_ptr ptr = tcg_temp_new_ptr(); + TCGv_ptr ptr; + + plugin_gen_disable_mem_helpers(); + ptr = tcg_temp_new_ptr(); gen_helper_lookup_tb_ptr(ptr, cpu_env); tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); tcg_temp_free_ptr(ptr); @@ -2788,14 +2794,24 @@ static void tcg_gen_req_mo(TCGBar type) } } +static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn == NULL) { + return; + } + plugin_gen_empty_mem_callback(vaddr, info); +#endif +} + void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; + uint16_t info = trace_mem_get_info(memop, idx, 0); tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, idx, 0)); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2807,6 +2823,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -2828,11 +2845,11 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; + uint16_t info = trace_mem_get_info(memop, idx, 1); tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, idx, 1)); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i32(); @@ -2852,6 +2869,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if (swap) { tcg_temp_free_i32(swap); @@ -2861,6 +2879,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; + uint16_t info; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); @@ -2874,8 +2893,8 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 1, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, idx, 0)); + info = trace_mem_get_info(memop, idx, 0); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2887,6 +2906,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -2914,6 +2934,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; + uint16_t info; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); @@ -2922,8 +2943,8 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 1, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, idx, 1)); + info = trace_mem_get_info(memop, idx, 1); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i64(); @@ -2947,6 +2968,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if (swap) { tcg_temp_free_i64(swap); diff --git a/tcg/tcg.h b/tcg/tcg.h index d234b4c9a5..a38659ea5b 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -29,6 +29,7 @@ #include "exec/memop.h" #include "exec/tb-context.h" #include "qemu/bitops.h" +#include "qemu/plugin.h" #include "qemu/queue.h" #include "tcg-mo.h" #include "tcg-target.h" -- cgit v1.2.3-55-g7522 From 235537fa7347a151ebd7a755e81819a52b3b2195 Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Wed, 19 Jun 2019 20:20:08 +0100 Subject: plugins: implement helpers for resolving hwaddr We need to keep a local per-cpu copy of the data as other threads may be running. Currently we can provide insight as to if the access was IO or not and give the offset into a given device (usually the main RAMBlock). We store enough information to get details such as the MemoryRegion which might be useful in later expansions to the API. Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- accel/tcg/cputlb.c | 42 +++++++++++++++++++++++++++++++++++++ include/qemu/plugin-memory.h | 40 ++++++++++++++++++++++++++++++++++++ include/qemu/qemu-plugin.h | 8 ++++++++ plugins/api.c | 49 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 139 insertions(+) create mode 100644 include/qemu/plugin-memory.h (limited to 'include') diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 82282d30d9..2c06b57272 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -34,6 +34,9 @@ #include "qemu/atomic.h" #include "qemu/atomic128.h" #include "translate-all.h" +#ifdef CONFIG_PLUGIN +#include "qemu/plugin-memory.h" +#endif /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -1247,6 +1250,45 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, return (void *)((uintptr_t)addr + entry->addend); } + +#ifdef CONFIG_PLUGIN +/* + * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. + * This should be a hot path as we will have just looked this path up + * in the softmmu lookup code (or helper). We don't handle re-fills or + * checking the victim table. This is purely informational. + * + * This should never fail as the memory access being instrumented + * should have just filled the TLB. + */ + +bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, + bool is_store, struct qemu_plugin_hwaddr *data) +{ + CPUArchState *env = cpu->env_ptr; + CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); + uintptr_t index = tlb_index(env, mmu_idx, addr); + target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; + + if (likely(tlb_hit(tlb_addr, addr))) { + /* We must have an iotlb entry for MMIO */ + if (tlb_addr & TLB_MMIO) { + CPUIOTLBEntry *iotlbentry; + iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; + data->is_io = true; + data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); + data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; + } else { + data->is_io = false; + data->v.ram.hostaddr = addr + tlbe->addend; + } + return true; + } + return false; +} + +#endif + /* Probe for a read-modify-write atomic operation. Do not allow unaligned * operations, or io operations to proceed. Return the host address. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, diff --git a/include/qemu/plugin-memory.h b/include/qemu/plugin-memory.h new file mode 100644 index 0000000000..fbbe99474b --- /dev/null +++ b/include/qemu/plugin-memory.h @@ -0,0 +1,40 @@ +/* + * Plugin Memory API + * + * Copyright (c) 2019 Linaro Ltd + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef _PLUGIN_MEMORY_H_ +#define _PLUGIN_MEMORY_H_ + +struct qemu_plugin_hwaddr { + bool is_io; + bool is_store; + union { + struct { + MemoryRegionSection *section; + hwaddr offset; + } io; + struct { + uint64_t hostaddr; + } ram; + } v; +}; + +/** + * tlb_plugin_lookup: query last TLB lookup + * @cpu: cpu environment + * + * This function can be used directly after a memory operation to + * query information about the access. It is used by the plugin + * infrastructure to expose more information about the address. + * + * It would only fail if not called from an instrumented memory access + * which would be an abuse of the API. + */ +bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, + bool is_store, struct qemu_plugin_hwaddr *data); + +#endif /* _PLUGIN_MEMORY_H_ */ diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index b9a4a4b684..c213d1dd19 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -285,6 +285,14 @@ bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info); struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, uint64_t vaddr); +/* + * The following additional queries can be run on the hwaddr structure + * to return information about it. For non-IO accesses the device + * offset will be into the appropriate block of RAM. + */ +bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr); +uint64_t qemu_plugin_hwaddr_device_offset(const struct qemu_plugin_hwaddr *haddr); + typedef void (*qemu_plugin_vcpu_mem_cb_t)(unsigned int vcpu_index, qemu_plugin_meminfo_t info, uint64_t vaddr, diff --git a/plugins/api.c b/plugins/api.c index facf2a132d..33dac8e790 100644 --- a/plugins/api.c +++ b/plugins/api.c @@ -42,6 +42,7 @@ #include "trace/mem-internal.h" /* mem_info macros */ #include "plugin.h" #ifndef CONFIG_USER_ONLY +#include "qemu/plugin-memory.h" #include "hw/boards.h" #endif @@ -240,11 +241,59 @@ bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info) * Virtual Memory queries */ +#ifdef CONFIG_SOFTMMU +static __thread struct qemu_plugin_hwaddr hwaddr_info; + +struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, + uint64_t vaddr) +{ + CPUState *cpu = current_cpu; + unsigned int mmu_idx = info >> TRACE_MEM_MMU_SHIFT; + hwaddr_info.is_store = info & TRACE_MEM_ST; + + if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx, + info & TRACE_MEM_ST, &hwaddr_info)) { + error_report("invalid use of qemu_plugin_get_hwaddr"); + return NULL; + } + + return &hwaddr_info; +} +#else struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, uint64_t vaddr) { return NULL; } +#endif + +bool qemu_plugin_hwaddr_is_io(struct qemu_plugin_hwaddr *hwaddr) +{ +#ifdef CONFIG_SOFTMMU + return hwaddr->is_io; +#else + return false; +#endif +} + +uint64_t qemu_plugin_hwaddr_device_offset(const struct qemu_plugin_hwaddr *haddr) +{ +#ifdef CONFIG_SOFTMMU + if (haddr) { + if (!haddr->is_io) { + ram_addr_t ram_addr = qemu_ram_addr_from_host((void *) haddr->v.ram.hostaddr); + if (ram_addr == RAM_ADDR_INVALID) { + error_report("Bad ram pointer %"PRIx64"", haddr->v.ram.hostaddr); + abort(); + } + return ram_addr; + } else { + return haddr->v.io.offset; + } + } +#endif + return 0; +} /* * Queries to the number and potential maximum number of vCPUs there -- cgit v1.2.3-55-g7522 From c36f7a642cd81cff566ffe23e0a863ac4d7f1f91 Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Sun, 21 Oct 2018 13:27:44 -0400 Subject: *-user: plugin syscalls To avoid too much duplication add a wrapper that the existing trace and the new plugin calls can live in. We could move the -strace code here as well but that is left for a future series as the code is subtly different between the bsd and linux. Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson [AJB: wrap in syscall-trace.h, expand commit msg] Signed-off-by: Alex Bennée --- bsd-user/syscall.c | 21 +++++++++++++++------ include/user/syscall-trace.h | 40 ++++++++++++++++++++++++++++++++++++++++ linux-user/syscall.c | 7 ++++--- 3 files changed, 59 insertions(+), 9 deletions(-) create mode 100644 include/user/syscall-trace.h (limited to 'include') diff --git a/bsd-user/syscall.c b/bsd-user/syscall.c index 84a983a9a1..0d45b654bb 100644 --- a/bsd-user/syscall.c +++ b/bsd-user/syscall.c @@ -26,6 +26,7 @@ #include "qemu.h" #include "qemu-common.h" +#include "user/syscall-trace.h" //#define DEBUG @@ -322,7 +323,8 @@ abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1, #ifdef DEBUG gemu_log("freebsd syscall %d\n", num); #endif - trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); + record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); + if(do_strace) print_freebsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); @@ -403,7 +405,8 @@ abi_long do_freebsd_syscall(void *cpu_env, int num, abi_long arg1, #endif if (do_strace) print_freebsd_syscall_ret(num, ret); - trace_guest_user_syscall_ret(cpu, num, ret); + + record_syscall_return(cpu, num, ret); return ret; efault: ret = -TARGET_EFAULT; @@ -421,7 +424,9 @@ abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1, #ifdef DEBUG gemu_log("netbsd syscall %d\n", num); #endif - trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); + + record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); + if(do_strace) print_netbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); @@ -479,7 +484,8 @@ abi_long do_netbsd_syscall(void *cpu_env, int num, abi_long arg1, #endif if (do_strace) print_netbsd_syscall_ret(num, ret); - trace_guest_user_syscall_ret(cpu, num, ret); + + record_syscall_return(cpu, num, ret); return ret; efault: ret = -TARGET_EFAULT; @@ -497,7 +503,9 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1, #ifdef DEBUG gemu_log("openbsd syscall %d\n", num); #endif - trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); + + record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, 0, 0); + if(do_strace) print_openbsd_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); @@ -555,7 +563,8 @@ abi_long do_openbsd_syscall(void *cpu_env, int num, abi_long arg1, #endif if (do_strace) print_openbsd_syscall_ret(num, ret); - trace_guest_user_syscall_ret(cpu, num, ret); + + record_syscall_return(cpu, num, ret); return ret; efault: ret = -TARGET_EFAULT; diff --git a/include/user/syscall-trace.h b/include/user/syscall-trace.h new file mode 100644 index 0000000000..9e60473643 --- /dev/null +++ b/include/user/syscall-trace.h @@ -0,0 +1,40 @@ +/* + * Common System Call Tracing Wrappers for *-user + * + * Copyright (c) 2019 Linaro + * Written by Alex Bennée + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef _SYSCALL_TRACE_H_ +#define _SYSCALL_TRACE_H_ + +/* + * These helpers just provide a common place for the various + * subsystems that want to track syscalls to put their hooks in. We + * could potentially unify the -strace code here as well. + */ + +static inline void record_syscall_start(void *cpu, int num, + abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, + abi_long arg5, abi_long arg6, + abi_long arg7, abi_long arg8) +{ + trace_guest_user_syscall(cpu, num, + arg1, arg2, arg3, arg4, + arg5, arg6, arg7, arg8); + qemu_plugin_vcpu_syscall(cpu, num, + arg1, arg2, arg3, arg4, + arg5, arg6, arg7, arg8); +} + +static inline void record_syscall_return(void *cpu, int num, abi_long ret) +{ + trace_guest_user_syscall_ret(cpu, num, ret); + qemu_plugin_vcpu_syscall_ret(cpu, num, ret); +} + + +#endif /* _SYSCALL_TRACE_H_ */ diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 530c843303..f6751eecb7 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -112,6 +112,7 @@ #include "qemu.h" #include "qemu/guest-random.h" +#include "user/syscall-trace.h" #include "qapi/error.h" #include "fd-trans.h" @@ -11984,8 +11985,8 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, } #endif - trace_guest_user_syscall(cpu, num, arg1, arg2, arg3, arg4, - arg5, arg6, arg7, arg8); + record_syscall_start(cpu, num, arg1, + arg2, arg3, arg4, arg5, arg6, arg7, arg8); if (unlikely(do_strace)) { print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6); @@ -11997,6 +11998,6 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, arg5, arg6, arg7, arg8); } - trace_guest_user_syscall_ret(cpu, num, ret); + record_syscall_return(cpu, num, ret); return ret; } -- cgit v1.2.3-55-g7522 From 763f7e12090fa8a4f4d1902a51c7d5ac7c454090 Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Fri, 7 Dec 2018 20:53:09 -0500 Subject: plugin-gen: add plugin_insn_append By adding it to plugin-gen's header file, we can export is as an inline, since tcg.h is included in the header (we need tcg_ctx). Signed-off-by: Emilio G. Cota [AJB: use g_byte_array] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- include/exec/plugin-gen.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'include') diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h index de519883b1..4834a9e2f4 100644 --- a/include/exec/plugin-gen.h +++ b/include/exec/plugin-gen.h @@ -27,6 +27,17 @@ void plugin_gen_insn_end(void); void plugin_gen_disable_mem_helpers(void); void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info); +static inline void plugin_insn_append(const void *from, size_t size) +{ + struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn; + + if (insn == NULL) { + return; + } + + insn->data = g_byte_array_append(insn->data, from, size); +} + #else /* !CONFIG_PLUGIN */ static inline @@ -51,6 +62,9 @@ static inline void plugin_gen_disable_mem_helpers(void) static inline void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) { } +static inline void plugin_insn_append(const void *from, size_t size) +{ } + #endif /* CONFIG_PLUGIN */ #endif /* QEMU_PLUGIN_GEN_H */ -- cgit v1.2.3-55-g7522 From 7dec71d5ff06493184352cec5ec6332889128e3e Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Mon, 21 Oct 2019 16:09:10 +0100 Subject: cputlb: ensure _cmmu helper functions follow the naming standard We document this in docs/devel/load-stores.rst so lets follow it. The 32 bit and 64 bit access functions have historically not included the sign so we leave those as is. We also introduce some signed helpers which are used for loading immediate values in the translator. Fixes: 282dffc8 Signed-off-by: Alex Bennée Message-Id: <20191021150910.23216-1-alex.bennee@linaro.org> Reviewed-by: Richard Henderson --- accel/tcg/cputlb.c | 24 +++++++++++++++++++++--- include/exec/cpu_ldst_template.h | 4 ++-- target/cris/translate_v10.inc.c | 3 +-- tcg/tcg.h | 20 ++++++++++++++------ 4 files changed, 38 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 2c06b57272..c7a986d7cb 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1920,12 +1920,18 @@ static uint64_t full_ldub_cmmu(CPUArchState *env, target_ulong addr, return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_cmmu); } -uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, +uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return full_ldub_cmmu(env, addr, oi, retaddr); } +int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (int8_t) full_ldub_cmmu(env, addr, oi, retaddr); +} + static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { @@ -1933,12 +1939,18 @@ static uint64_t full_le_lduw_cmmu(CPUArchState *env, target_ulong addr, full_le_lduw_cmmu); } -uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, +uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return full_le_lduw_cmmu(env, addr, oi, retaddr); } +int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (int16_t) full_le_lduw_cmmu(env, addr, oi, retaddr); +} + static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { @@ -1946,12 +1958,18 @@ static uint64_t full_be_lduw_cmmu(CPUArchState *env, target_ulong addr, full_be_lduw_cmmu); } -uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, +uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { return full_be_lduw_cmmu(env, addr, oi, retaddr); } +int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr) +{ + return (int16_t) full_be_lduw_cmmu(env, addr, oi, retaddr); +} + static uint64_t full_le_ldul_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { diff --git a/include/exec/cpu_ldst_template.h b/include/exec/cpu_ldst_template.h index 6f0d340797..54b5e858ce 100644 --- a/include/exec/cpu_ldst_template.h +++ b/include/exec/cpu_ldst_template.h @@ -66,8 +66,8 @@ #ifdef SOFTMMU_CODE_ACCESS #define ADDR_READ addr_code #define MMUSUFFIX _cmmu -#define URETSUFFIX SUFFIX -#define SRETSUFFIX SUFFIX +#define URETSUFFIX USUFFIX +#define SRETSUFFIX glue(s, SUFFIX) #else #define ADDR_READ addr_read #define MMUSUFFIX _mmu diff --git a/target/cris/translate_v10.inc.c b/target/cris/translate_v10.inc.c index a87b8bb281..ae34a0d1a3 100644 --- a/target/cris/translate_v10.inc.c +++ b/target/cris/translate_v10.inc.c @@ -1202,8 +1202,7 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) case CRISV10_IND_BCC_M: cris_cc_mask(dc, 0); - imm = cpu_ldsw_code(env, dc->pc + 2); - simm = (int16_t)imm; + simm = cpu_ldsw_code(env, dc->pc + 2); simm += 4; LOG_DIS("bcc_m: b%s %x\n", cc_name(dc->cond), dc->pc + simm); diff --git a/tcg/tcg.h b/tcg/tcg.h index a38659ea5b..92ca10dffc 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -1290,16 +1290,22 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr); -uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, +uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); -uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, +int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); +uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); -uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); +uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, @@ -1316,7 +1322,8 @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, # define helper_ret_stw_mmu helper_be_stw_mmu # define helper_ret_stl_mmu helper_be_stl_mmu # define helper_ret_stq_mmu helper_be_stq_mmu -# define helper_ret_ldw_cmmu helper_be_ldw_cmmu +# define helper_ret_lduw_cmmu helper_be_lduw_cmmu +# define helper_ret_ldsw_cmmu helper_be_ldsw_cmmu # define helper_ret_ldl_cmmu helper_be_ldl_cmmu # define helper_ret_ldq_cmmu helper_be_ldq_cmmu #else @@ -1329,7 +1336,8 @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, # define helper_ret_stw_mmu helper_le_stw_mmu # define helper_ret_stl_mmu helper_le_stl_mmu # define helper_ret_stq_mmu helper_le_stq_mmu -# define helper_ret_ldw_cmmu helper_le_ldw_cmmu +# define helper_ret_lduw_cmmu helper_le_lduw_cmmu +# define helper_ret_ldsw_cmmu helper_le_ldsw_cmmu # define helper_ret_ldl_cmmu helper_le_ldl_cmmu # define helper_ret_ldq_cmmu helper_le_ldq_cmmu #endif -- cgit v1.2.3-55-g7522 From 409c1a0bf0f1e0bf1548004d87e8ba229022193d Mon Sep 17 00:00:00 2001 From: Emilio G. Cota Date: Tue, 27 Nov 2018 19:10:51 -0500 Subject: translator: add translator_ld{ub,sw,uw,l,q} We don't bother with replicating the fast path (tlb_hit) of the old cpu_ldst helpers as it has no measurable effect on performance. This probably indicates we should consider flattening the whole set of helpers but that is out of scope for this change. Suggested-by: Richard Henderson Signed-off-by: Emilio G. Cota [AJB: directly plumb into softmmu/user helpers] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- include/exec/cpu_ldst.h | 11 +++++++++ include/exec/translator.h | 62 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 72 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h index 9151fdb042..fd499f7e2f 100644 --- a/include/exec/cpu_ldst.h +++ b/include/exec/cpu_ldst.h @@ -129,6 +129,11 @@ static inline void clear_helper_retaddr(void) #include "exec/cpu_ldst_useronly_template.h" #undef MEMSUFFIX +/* + * Code access is deprecated in favour of translator_ld* functions + * (see translator.h). However there are still users that need to + * converted so for now these stay. + */ #define MEMSUFFIX _code #define CODE_ACCESS #define DATA_SIZE 1 @@ -427,6 +432,12 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, #undef CPU_MMU_INDEX #undef MEMSUFFIX +/* + * Code access is deprecated in favour of translator_ld* functions + * (see translator.h). However there are still users that need to + * converted so for now these stay. + */ + #define CPU_MMU_INDEX (cpu_mmu_index(env, true)) #define MEMSUFFIX _code #define SOFTMMU_CODE_ACCESS diff --git a/include/exec/translator.h b/include/exec/translator.h index 180c51d509..459dd72aab 100644 --- a/include/exec/translator.h +++ b/include/exec/translator.h @@ -19,7 +19,10 @@ */ +#include "qemu/bswap.h" #include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "exec/plugin-gen.h" #include "tcg/tcg.h" @@ -142,4 +145,61 @@ void translator_loop(const TranslatorOps *ops, DisasContextBase *db, void translator_loop_temp_check(DisasContextBase *db); -#endif /* EXEC__TRANSLATOR_H */ +/* + * Translator Load Functions + * + * These are intended to replace the old cpu_ld*_code functions and + * are mandatory for front-ends that have been migrated to the common + * translator_loop. These functions are only intended to be called + * from the translation stage and should not be called from helper + * functions. Those functions should be converted to encode the + * relevant information at translation time. + */ + +#ifdef CONFIG_USER_ONLY + +#define DO_LOAD(type, name, shift) \ + do { \ + set_helper_retaddr(1); \ + ret = name ## _p(g2h(pc)); \ + clear_helper_retaddr(); \ + } while (0) + +#else + +#define DO_LOAD(type, name, shift) \ + do { \ + int mmu_idx = cpu_mmu_index(env, true); \ + TCGMemOpIdx oi = make_memop_idx(shift, mmu_idx); \ + ret = helper_ret_ ## name ## _cmmu(env, pc, oi, 0); \ + } while (0) + +#endif + +#define GEN_TRANSLATOR_LD(fullname, name, type, shift, swap_fn) \ + static inline type \ + fullname ## _swap(CPUArchState *env, abi_ptr pc, bool do_swap) \ + { \ + type ret; \ + DO_LOAD(type, name, shift); \ + \ + if (do_swap) { \ + ret = swap_fn(ret); \ + } \ + plugin_insn_append(&ret, sizeof(ret)); \ + return ret; \ + } \ + \ + static inline type fullname(CPUArchState *env, abi_ptr pc) \ + { \ + return fullname ## _swap(env, pc, false); \ + } + +GEN_TRANSLATOR_LD(translator_ldub, ldub, uint8_t, 0, /* no swap */ ) +GEN_TRANSLATOR_LD(translator_ldsw, ldsw, int16_t, 1, bswap16) +GEN_TRANSLATOR_LD(translator_lduw, lduw, uint16_t, 1, bswap16) +GEN_TRANSLATOR_LD(translator_ldl, ldl, uint32_t, 2, bswap32) +GEN_TRANSLATOR_LD(translator_ldq, ldq, uint64_t, 3, bswap64) +#undef GEN_TRANSLATOR_LD + +#endif /* EXEC__TRANSLATOR_H */ -- cgit v1.2.3-55-g7522 From 5901b2e15b673720b050fc88e7912e33f0e53604 Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Thu, 12 Sep 2019 17:24:27 +0100 Subject: plugin: expand the plugin_init function to include an info block This provides a limited amount of info to plugins about the guest system that will allow them to make some additional decisions on setup. Signed-off-by: Alex Bennée --- include/qemu/qemu-plugin.h | 26 ++++++++++++++++++++++++-- plugins/loader.c | 23 +++++++++++++++++++---- 2 files changed, 43 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index c213d1dd19..784f1dfc3d 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -38,9 +38,27 @@ typedef uint64_t qemu_plugin_id_t; +typedef struct { + /* string describing architecture */ + const char *target_name; + /* is this a full system emulation? */ + bool system_emulation; + union { + /* + * smp_vcpus may change if vCPUs can be hot-plugged, max_vcpus + * is the system-wide limit. + */ + struct { + int smp_vcpus; + int max_vcpus; + } system; + }; +} qemu_info_t; + /** * qemu_plugin_install() - Install a plugin * @id: this plugin's opaque ID + * @info: a block describing some details about the guest * @argc: number of arguments * @argv: array of arguments (@argc elements) * @@ -49,10 +67,14 @@ typedef uint64_t qemu_plugin_id_t; * Note: Calling qemu_plugin_uninstall() from this function is a bug. To raise * an error during install, return !0. * + * Note: @info is only live during the call. Copy any information we + * want to keep. + * * Note: @argv remains valid throughout the lifetime of the loaded plugin. */ -QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, int argc, - char **argv); +QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, + const qemu_info_t *info, + int argc, char **argv); /* * Prototypes for the various callback styles we will be registering diff --git a/plugins/loader.c b/plugins/loader.c index eaedff577c..ce724ed583 100644 --- a/plugins/loader.c +++ b/plugins/loader.c @@ -28,6 +28,10 @@ #include "hw/core/cpu.h" #include "cpu.h" #include "exec/exec-all.h" +#ifndef CONFIG_USER_ONLY +#include "hw/boards.h" +#endif + #include "plugin.h" /* @@ -58,7 +62,7 @@ QemuOptsList qemu_plugin_opts = { }, }; -typedef int (*qemu_plugin_install_func_t)(qemu_plugin_id_t, int, char **); +typedef int (*qemu_plugin_install_func_t)(qemu_plugin_id_t, const qemu_info_t *, int, char **); extern struct qemu_plugin_state plugin; @@ -145,7 +149,7 @@ static uint64_t xorshift64star(uint64_t x) return x * UINT64_C(2685821657736338717); } -static int plugin_load(struct qemu_plugin_desc *desc) +static int plugin_load(struct qemu_plugin_desc *desc, const qemu_info_t *info) { qemu_plugin_install_func_t install; struct qemu_plugin_ctx *ctx; @@ -193,7 +197,7 @@ static int plugin_load(struct qemu_plugin_desc *desc) } QTAILQ_INSERT_TAIL(&plugin.ctxs, ctx, entry); ctx->installing = true; - rc = install(ctx->id, desc->argc, desc->argv); + rc = install(ctx->id, info, desc->argc, desc->argv); ctx->installing = false; if (rc) { error_report("%s: qemu_plugin_install returned error code %d", @@ -241,11 +245,22 @@ static void plugin_desc_free(struct qemu_plugin_desc *desc) int qemu_plugin_load_list(QemuPluginList *head) { struct qemu_plugin_desc *desc, *next; + g_autofree qemu_info_t *info = g_new0(qemu_info_t, 1); + + info->target_name = TARGET_NAME; +#ifndef CONFIG_USER_ONLY + MachineState *ms = MACHINE(qdev_get_machine()); + info->system_emulation = true; + info->system.smp_vcpus = ms->smp.cpus; + info->system.max_vcpus = ms->smp.max_cpus; +#else + info->system_emulation = false; +#endif QTAILQ_FOREACH_SAFE(desc, head, entry, next) { int err; - err = plugin_load(desc); + err = plugin_load(desc, info); if (err) { return err; } -- cgit v1.2.3-55-g7522 From cbafa2362ab8d96af39d6b01a79ea4ed16d47dda Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Wed, 22 May 2019 10:27:14 +0100 Subject: plugin: add qemu_plugin_insn_disas helper Give the plugins access to the QEMU dissasembler so they don't have to re-invent the wheel. We generate a warning when there are spare bytes in the decode buffer. This is usually due to the front end loading in more bytes than decoded. Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- disas.c | 110 +++++++++++++++++++++++++++++++++++++++++++ include/disas/disas.h | 2 + include/qemu/qemu-plugin.h | 9 ++++ plugins/api.c | 9 +++- plugins/qemu-plugins.symbols | 1 + 5 files changed, 130 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/disas.c b/disas.c index 3e2bfa572b..3937da6157 100644 --- a/disas.c +++ b/disas.c @@ -418,6 +418,7 @@ static bool cap_disas_monitor(disassemble_info *info, uint64_t pc, int count) # define cap_disas_target(i, p, s) false # define cap_disas_host(i, p, s) false # define cap_disas_monitor(i, p, c) false +# define cap_disas_plugin(i, p, c) false #endif /* CONFIG_CAPSTONE */ /* Disassemble this for me please... (debugging). */ @@ -475,6 +476,115 @@ void target_disas(FILE *out, CPUState *cpu, target_ulong code, } } +static __thread GString plugin_disas_output; + +static int plugin_printf(FILE *stream, const char *fmt, ...) +{ + va_list va; + GString *s = &plugin_disas_output; + int initial_len = s->len; + + va_start(va, fmt); + g_string_append_vprintf(s, fmt, va); + va_end(va); + + return s->len - initial_len; +} + +static void plugin_print_address(bfd_vma addr, struct disassemble_info *info) +{ + /* does nothing */ +} + + +#ifdef CONFIG_CAPSTONE +/* Disassemble a single instruction directly into plugin output */ +static +bool cap_disas_plugin(disassemble_info *info, uint64_t pc, size_t size) +{ + uint8_t cap_buf[1024]; + csh handle; + cs_insn *insn; + size_t csize = 0; + int count; + GString *s = &plugin_disas_output; + + if (cap_disas_start(info, &handle) != CS_ERR_OK) { + return false; + } + insn = cap_insn; + + size_t tsize = MIN(sizeof(cap_buf) - csize, size); + const uint8_t *cbuf = cap_buf; + target_read_memory(pc, cap_buf, tsize, info); + + count = cs_disasm(handle, cbuf, size, 0, 1, &insn); + + if (count) { + g_string_printf(s, "%s %s", insn->mnemonic, insn->op_str); + } else { + g_string_printf(s, "cs_disasm failed"); + } + + cs_close(&handle); + return true; +} +#endif + +/* + * We should only be dissembling one instruction at a time here. If + * there is left over it usually indicates the front end has read more + * bytes than it needed. + */ +char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + int count; + CPUDebug s; + GString *ds = g_string_set_size(&plugin_disas_output, 0); + + g_assert(ds == &plugin_disas_output); + + INIT_DISASSEMBLE_INFO(s.info, NULL, plugin_printf); + + s.cpu = cpu; + s.info.read_memory_func = target_read_memory; + s.info.buffer_vma = addr; + s.info.buffer_length = size; + s.info.print_address_func = plugin_print_address; + s.info.cap_arch = -1; + s.info.cap_mode = 0; + s.info.cap_insn_unit = 4; + s.info.cap_insn_split = 4; + +#ifdef TARGET_WORDS_BIGENDIAN + s.info.endian = BFD_ENDIAN_BIG; +#else + s.info.endian = BFD_ENDIAN_LITTLE; +#endif + + if (cc->disas_set_info) { + cc->disas_set_info(cpu, &s.info); + } + + if (s.info.cap_arch >= 0 && cap_disas_plugin(&s.info, addr, size)) { + return g_strdup(ds->str); + } + + if (s.info.print_insn == NULL) { + s.info.print_insn = print_insn_od_target; + } + + count = s.info.print_insn(addr, &s.info); + + /* The decoder probably read more than it needed it's not critical */ + if (count < size) { + warn_report("%s: %zu bytes left over", __func__, size - count); + } + + return g_strdup(ds->str); +} + /* Disassemble this for me please... (debugging). */ void disas(FILE *out, void *code, unsigned long size) { diff --git a/include/disas/disas.h b/include/disas/disas.h index ba47e9197c..36c33f6f19 100644 --- a/include/disas/disas.h +++ b/include/disas/disas.h @@ -14,6 +14,8 @@ void target_disas(FILE *out, CPUState *cpu, target_ulong code, void monitor_disas(Monitor *mon, CPUState *cpu, target_ulong pc, int nb_insn, int is_physical); +char *plugin_disas(CPUState *cpu, uint64_t addr, size_t size); + /* Look up symbol for debugging purpose. Returns "" if unknown. */ const char *lookup_symbol(target_ulong orig_addr); #endif diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index 784f1dfc3d..ddf267fbfe 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -351,6 +351,15 @@ qemu_plugin_register_vcpu_syscall_ret_cb(qemu_plugin_id_t id, qemu_plugin_vcpu_syscall_ret_cb_t cb); +/** + * qemu_plugin_insn_disas() - return disassembly string for instruction + * @insn: instruction reference + * + * Returns an allocated string containing the disassembly + */ + +char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn); + /** * qemu_plugin_vcpu_for_each() - iterate over the existing vCPU * @id: plugin ID diff --git a/plugins/api.c b/plugins/api.c index 33dac8e790..5adc4d25a1 100644 --- a/plugins/api.c +++ b/plugins/api.c @@ -39,7 +39,8 @@ #include "cpu.h" #include "sysemu/sysemu.h" #include "tcg/tcg.h" -#include "trace/mem-internal.h" /* mem_info macros */ +#include "exec/exec-all.h" +#include "disas/disas.h" #include "plugin.h" #ifndef CONFIG_USER_ONLY #include "qemu/plugin-memory.h" @@ -212,6 +213,12 @@ void *qemu_plugin_insn_haddr(const struct qemu_plugin_insn *insn) return insn->haddr; } +char *qemu_plugin_insn_disas(const struct qemu_plugin_insn *insn) +{ + CPUState *cpu = current_cpu; + return plugin_disas(cpu, insn->vaddr, insn->data->len); +} + /* * The memory queries allow the plugin to query information about a * memory access. diff --git a/plugins/qemu-plugins.symbols b/plugins/qemu-plugins.symbols index 40c0d1abd2..267ec381b4 100644 --- a/plugins/qemu-plugins.symbols +++ b/plugins/qemu-plugins.symbols @@ -25,6 +25,7 @@ qemu_plugin_insn_size; qemu_plugin_insn_vaddr; qemu_plugin_insn_haddr; + qemu_plugin_insn_disas; qemu_plugin_mem_size_shift; qemu_plugin_mem_is_sign_extended; qemu_plugin_mem_is_big_endian; -- cgit v1.2.3-55-g7522 From ca76a66975f018c323cb609c0c55a4c8d4acde3b Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Fri, 11 Oct 2019 16:34:05 +0100 Subject: plugin: add qemu_plugin_outs helper Having the plugins grab stdout and spew stuff there is a bit ugly and certainly makes the tests look ugly. Provide a hook back into QEMU which can be redirected as needed. Signed-off-by: Alex Bennée Reviewed-by: Aaron Lindsay --- include/qemu/log.h | 1 + include/qemu/qemu-plugin.h | 6 ++++++ plugins/api.c | 8 ++++++++ plugins/qemu-plugins.symbols | 1 + util/log.c | 3 +++ 5 files changed, 19 insertions(+) (limited to 'include') diff --git a/include/qemu/log.h b/include/qemu/log.h index b097a6cae1..a91105b2ad 100644 --- a/include/qemu/log.h +++ b/include/qemu/log.h @@ -45,6 +45,7 @@ static inline bool qemu_log_separate(void) /* LOG_TRACE (1 << 15) is defined in log-for-trace.h */ #define CPU_LOG_TB_OP_IND (1 << 16) #define CPU_LOG_TB_FPU (1 << 17) +#define CPU_LOG_PLUGIN (1 << 18) /* Lock output for a series of related logs. Since this is not needed * for a single qemu_log / qemu_log_mask / qemu_log_mask_and_addr, we diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index ddf267fbfe..a00a7deb46 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -384,4 +384,10 @@ int qemu_plugin_n_vcpus(void); /* returns -1 in user-mode */ int qemu_plugin_n_max_vcpus(void); +/** + * qemu_plugin_outs() - output string via QEMU's logging system + * @string: a string + */ +void qemu_plugin_outs(const char *string); + #endif /* QEMU_PLUGIN_API_H */ diff --git a/plugins/api.c b/plugins/api.c index 5adc4d25a1..fa1d9f276d 100644 --- a/plugins/api.c +++ b/plugins/api.c @@ -331,3 +331,11 @@ int qemu_plugin_n_max_vcpus(void) return get_ms()->smp.max_cpus; #endif } + +/* + * Plugin output + */ +void qemu_plugin_outs(const char *string) +{ + qemu_log_mask(CPU_LOG_PLUGIN, "%s", string); +} diff --git a/plugins/qemu-plugins.symbols b/plugins/qemu-plugins.symbols index 267ec381b4..4bdb381f48 100644 --- a/plugins/qemu-plugins.symbols +++ b/plugins/qemu-plugins.symbols @@ -36,4 +36,5 @@ qemu_plugin_vcpu_for_each; qemu_plugin_n_vcpus; qemu_plugin_n_max_vcpus; + qemu_plugin_outs; }; diff --git a/util/log.c b/util/log.c index 1d1b33f7d9..1ca13059ee 100644 --- a/util/log.c +++ b/util/log.c @@ -273,6 +273,9 @@ const QEMULogItem qemu_log_items[] = { { CPU_LOG_TB_NOCHAIN, "nochain", "do not chain compiled TBs so that \"exec\" and \"cpu\" show\n" "complete traces" }, +#ifdef CONFIG_PLUGIN + { CPU_LOG_PLUGIN, "plugin", "output from TCG plugins\n"}, +#endif { 0, NULL, NULL }, }; -- cgit v1.2.3-55-g7522 From dc069b22a1c2fe988ad58fa9de7d768bf67cc9a0 Mon Sep 17 00:00:00 2001 From: Alex Bennée Date: Fri, 17 May 2019 12:36:10 +0100 Subject: include/exec: wrap cpu_ldst.h in CONFIG_TCG This gets around a build problem with --disable-tcg. Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- include/exec/exec-all.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include') diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index eadcf29d0c..d85e610e85 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -22,7 +22,9 @@ #include "cpu.h" #include "exec/tb-context.h" +#ifdef CONFIG_TCG #include "exec/cpu_ldst.h" +#endif #include "sysemu/cpus.h" /* allow to see translation results - the slowdown should be negligible, so we leave it */ -- cgit v1.2.3-55-g7522