summaryrefslogtreecommitdiffstats
path: root/contrib/plugins
diff options
context:
space:
mode:
authorAlex Bennée2020-09-09 13:27:41 +0200
committerAlex Bennée2020-09-10 11:47:03 +0200
commitc17a386b6afe608086aa4d260e29662865680b7f (patch)
treec00d1a6d5d98c3d8fcf2240086aeee94bb81b72b /contrib/plugins
parenttests/acceptance: Add Test.fetch_asset(cancel_on_missing=True) (diff)
downloadqemu-c17a386b6afe608086aa4d260e29662865680b7f.tar.gz
qemu-c17a386b6afe608086aa4d260e29662865680b7f.tar.xz
qemu-c17a386b6afe608086aa4d260e29662865680b7f.zip
plugins: move the more involved plugins to contrib
We have an exploding complexity problem in the testing so lets just move the more involved plugins into contrib. tests/plugins still exist for the basic plugins that exercise the API. We restore the old pre-meson style Makefile for contrib as it also doubles as a guide for out-of-tree plugin builds. While we are at it add some examples to the documentation and a specific plugins build target. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20200909112742.25730-11-alex.bennee@linaro.org>
Diffstat (limited to 'contrib/plugins')
-rw-r--r--contrib/plugins/Makefile42
-rw-r--r--contrib/plugins/hotblocks.c145
-rw-r--r--contrib/plugins/hotpages.c193
-rw-r--r--contrib/plugins/howvec.c362
-rw-r--r--contrib/plugins/lockstep.c340
5 files changed, 1082 insertions, 0 deletions
diff --git a/contrib/plugins/Makefile b/contrib/plugins/Makefile
new file mode 100644
index 0000000000..7801b08b0d
--- /dev/null
+++ b/contrib/plugins/Makefile
@@ -0,0 +1,42 @@
+# -*- Mode: makefile -*-
+#
+# This Makefile example is fairly independent from the main makefile
+# so users can take and adapt it for their build. We only really
+# include config-host.mak so we don't have to repeat probing for
+# cflags that the main configure has already done for us.
+#
+
+BUILD_DIR := $(CURDIR)/../..
+
+include $(BUILD_DIR)/config-host.mak
+
+VPATH += $(SRC_PATH)/contrib/plugins
+
+NAMES :=
+NAMES += hotblocks
+NAMES += hotpages
+NAMES += howvec
+NAMES += lockstep
+
+SONAMES := $(addsuffix .so,$(addprefix lib,$(NAMES)))
+
+# The main QEMU uses Glib extensively so it's perfectly fine to use it
+# in plugins (which many example do).
+CFLAGS = $(GLIB_CFLAGS)
+CFLAGS += -fPIC
+CFLAGS += $(if $(findstring no-psabi,$(QEMU_CFLAGS)),-Wpsabi)
+CFLAGS += -I$(SRC_PATH)/include/qemu
+
+all: $(SONAMES)
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c -o $@ $<
+
+lib%.so: %.o
+ $(CC) -shared -Wl,-soname,$@ -o $@ $^ $(LDLIBS)
+
+clean:
+ rm -f *.o *.so *.d
+ rm -Rf .libs
+
+.PHONY: all clean
diff --git a/contrib/plugins/hotblocks.c b/contrib/plugins/hotblocks.c
new file mode 100644
index 0000000000..3942a2ca54
--- /dev/null
+++ b/contrib/plugins/hotblocks.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2019, Alex Bennée <alex.bennee@linaro.org>
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include <inttypes.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <glib.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+static bool do_inline;
+
+/* Plugins need to take care of their own locking */
+static GMutex lock;
+static GHashTable *hotblocks;
+static guint64 limit = 20;
+
+/*
+ * Counting Structure
+ *
+ * The internals of the TCG are not exposed to plugins so we can only
+ * get the starting PC for each block. We cheat this slightly by
+ * xor'ing the number of instructions to the hash to help
+ * differentiate.
+ */
+typedef struct {
+ uint64_t start_addr;
+ uint64_t exec_count;
+ int trans_count;
+ unsigned long insns;
+} ExecCount;
+
+static gint cmp_exec_count(gconstpointer a, gconstpointer b)
+{
+ ExecCount *ea = (ExecCount *) a;
+ ExecCount *eb = (ExecCount *) b;
+ return ea->exec_count > eb->exec_count ? -1 : 1;
+}
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ g_autoptr(GString) report = g_string_new("collected ");
+ GList *counts, *it;
+ int i;
+
+ g_mutex_lock(&lock);
+ g_string_append_printf(report, "%d entries in the hash table\n",
+ g_hash_table_size(hotblocks));
+ counts = g_hash_table_get_values(hotblocks);
+ it = g_list_sort(counts, cmp_exec_count);
+
+ if (it) {
+ g_string_append_printf(report, "pc, tcount, icount, ecount\n");
+
+ for (i = 0; i < limit && it->next; i++, it = it->next) {
+ ExecCount *rec = (ExecCount *) it->data;
+ g_string_append_printf(report, "%#016"PRIx64", %d, %ld, %"PRId64"\n",
+ rec->start_addr, rec->trans_count,
+ rec->insns, rec->exec_count);
+ }
+
+ g_list_free(it);
+ g_mutex_unlock(&lock);
+ }
+
+ qemu_plugin_outs(report->str);
+}
+
+static void plugin_init(void)
+{
+ hotblocks = g_hash_table_new(NULL, g_direct_equal);
+}
+
+static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
+{
+ ExecCount *cnt;
+ uint64_t hash = (uint64_t) udata;
+
+ g_mutex_lock(&lock);
+ cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash);
+ /* should always succeed */
+ g_assert(cnt);
+ cnt->exec_count++;
+ g_mutex_unlock(&lock);
+}
+
+/*
+ * When do_inline we ask the plugin to increment the counter for us.
+ * Otherwise a helper is inserted which calls the vcpu_tb_exec
+ * callback.
+ */
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ ExecCount *cnt;
+ uint64_t pc = qemu_plugin_tb_vaddr(tb);
+ unsigned long insns = qemu_plugin_tb_n_insns(tb);
+ uint64_t hash = pc ^ insns;
+
+ g_mutex_lock(&lock);
+ cnt = (ExecCount *) g_hash_table_lookup(hotblocks, (gconstpointer) hash);
+ if (cnt) {
+ cnt->trans_count++;
+ } else {
+ cnt = g_new0(ExecCount, 1);
+ cnt->start_addr = pc;
+ cnt->trans_count = 1;
+ cnt->insns = insns;
+ g_hash_table_insert(hotblocks, (gpointer) hash, (gpointer) cnt);
+ }
+
+ g_mutex_unlock(&lock);
+
+ if (do_inline) {
+ qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64,
+ &cnt->exec_count, 1);
+ } else {
+ qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
+ QEMU_PLUGIN_CB_NO_REGS,
+ (void *)hash);
+ }
+}
+
+QEMU_PLUGIN_EXPORT
+int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
+ int argc, char **argv)
+{
+ if (argc && strcmp(argv[0], "inline") == 0) {
+ do_inline = true;
+ }
+
+ plugin_init();
+
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+ return 0;
+}
diff --git a/contrib/plugins/hotpages.c b/contrib/plugins/hotpages.c
new file mode 100644
index 0000000000..ecd6c18732
--- /dev/null
+++ b/contrib/plugins/hotpages.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2019, Alex Bennée <alex.bennee@linaro.org>
+ *
+ * Hot Pages - show which pages saw the most memory accesses.
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <inttypes.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <glib.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+static uint64_t page_size = 4096;
+static uint64_t page_mask;
+static int limit = 50;
+static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW;
+static bool track_io;
+
+enum sort_type {
+ SORT_RW = 0,
+ SORT_R,
+ SORT_W,
+ SORT_A
+};
+
+static int sort_by = SORT_RW;
+
+typedef struct {
+ uint64_t page_address;
+ int cpu_read;
+ int cpu_write;
+ uint64_t reads;
+ uint64_t writes;
+} PageCounters;
+
+static GMutex lock;
+static GHashTable *pages;
+
+static gint cmp_access_count(gconstpointer a, gconstpointer b)
+{
+ PageCounters *ea = (PageCounters *) a;
+ PageCounters *eb = (PageCounters *) b;
+ int r;
+ switch (sort_by) {
+ case SORT_RW:
+ r = (ea->reads + ea->writes) > (eb->reads + eb->writes) ? -1 : 1;
+ break;
+ case SORT_R:
+ r = ea->reads > eb->reads ? -1 : 1;
+ break;
+ case SORT_W:
+ r = ea->writes > eb->writes ? -1 : 1;
+ break;
+ case SORT_A:
+ r = ea->page_address > eb->page_address ? -1 : 1;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ return r;
+}
+
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ g_autoptr(GString) report = g_string_new("Addr, RCPUs, Reads, WCPUs, Writes\n");
+ int i;
+ GList *counts;
+
+ counts = g_hash_table_get_values(pages);
+ if (counts && g_list_next(counts)) {
+ GList *it;
+
+ it = g_list_sort(counts, cmp_access_count);
+
+ for (i = 0; i < limit && it->next; i++, it = it->next) {
+ PageCounters *rec = (PageCounters *) it->data;
+ g_string_append_printf(report,
+ "%#016"PRIx64", 0x%04x, %"PRId64
+ ", 0x%04x, %"PRId64"\n",
+ rec->page_address,
+ rec->cpu_read, rec->reads,
+ rec->cpu_write, rec->writes);
+ }
+ g_list_free(it);
+ }
+
+ qemu_plugin_outs(report->str);
+}
+
+static void plugin_init(void)
+{
+ page_mask = (page_size - 1);
+ pages = g_hash_table_new(NULL, g_direct_equal);
+}
+
+static void vcpu_haddr(unsigned int cpu_index, qemu_plugin_meminfo_t meminfo,
+ uint64_t vaddr, void *udata)
+{
+ struct qemu_plugin_hwaddr *hwaddr = qemu_plugin_get_hwaddr(meminfo, vaddr);
+ uint64_t page;
+ PageCounters *count;
+
+ /* We only get a hwaddr for system emulation */
+ if (track_io) {
+ if (hwaddr && qemu_plugin_hwaddr_is_io(hwaddr)) {
+ page = vaddr;
+ } else {
+ return;
+ }
+ } else {
+ if (hwaddr && !qemu_plugin_hwaddr_is_io(hwaddr)) {
+ page = (uint64_t) qemu_plugin_hwaddr_device_offset(hwaddr);
+ } else {
+ page = vaddr;
+ }
+ }
+ page &= ~page_mask;
+
+ g_mutex_lock(&lock);
+ count = (PageCounters *) g_hash_table_lookup(pages, GUINT_TO_POINTER(page));
+
+ if (!count) {
+ count = g_new0(PageCounters, 1);
+ count->page_address = page;
+ g_hash_table_insert(pages, GUINT_TO_POINTER(page), (gpointer) count);
+ }
+ if (qemu_plugin_mem_is_store(meminfo)) {
+ count->writes++;
+ count->cpu_write |= (1 << cpu_index);
+ } else {
+ count->reads++;
+ count->cpu_read |= (1 << cpu_index);
+ }
+
+ g_mutex_unlock(&lock);
+}
+
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ size_t n = qemu_plugin_tb_n_insns(tb);
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
+ qemu_plugin_register_vcpu_mem_cb(insn, vcpu_haddr,
+ QEMU_PLUGIN_CB_NO_REGS,
+ rw, NULL);
+ }
+}
+
+QEMU_PLUGIN_EXPORT
+int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
+ int argc, char **argv)
+{
+ int i;
+
+ for (i = 0; i < argc; i++) {
+ char *opt = argv[i];
+ if (g_strcmp0(opt, "reads") == 0) {
+ sort_by = SORT_R;
+ } else if (g_strcmp0(opt, "writes") == 0) {
+ sort_by = SORT_W;
+ } else if (g_strcmp0(opt, "address") == 0) {
+ sort_by = SORT_A;
+ } else if (g_strcmp0(opt, "io") == 0) {
+ track_io = true;
+ } else if (g_str_has_prefix(opt, "pagesize=")) {
+ page_size = g_ascii_strtoull(opt + 9, NULL, 10);
+ } else {
+ fprintf(stderr, "option parsing failed: %s\n", opt);
+ return -1;
+ }
+ }
+
+ plugin_init();
+
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+ return 0;
+}
diff --git a/contrib/plugins/howvec.c b/contrib/plugins/howvec.c
new file mode 100644
index 0000000000..3b9a6939f2
--- /dev/null
+++ b/contrib/plugins/howvec.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (C) 2019, Alex Bennée <alex.bennee@linaro.org>
+ *
+ * How vectorised is this code?
+ *
+ * Attempt to measure the amount of vectorisation that has been done
+ * on some code by counting classes of instruction.
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include <inttypes.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <glib.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+typedef enum {
+ COUNT_CLASS,
+ COUNT_INDIVIDUAL,
+ COUNT_NONE
+} CountType;
+
+static int limit = 50;
+static bool do_inline;
+static bool verbose;
+
+static GMutex lock;
+static GHashTable *insns;
+
+typedef struct {
+ const char *class;
+ const char *opt;
+ uint32_t mask;
+ uint32_t pattern;
+ CountType what;
+ uint64_t count;
+} InsnClassExecCount;
+
+typedef struct {
+ char *insn;
+ uint32_t opcode;
+ uint64_t count;
+ InsnClassExecCount *class;
+} InsnExecCount;
+
+/*
+ * Matchers for classes of instructions, order is important.
+ *
+ * Your most precise match must be before looser matches. If no match
+ * is found in the table we can create an individual entry.
+ *
+ * 31..28 27..24 23..20 19..16 15..12 11..8 7..4 3..0
+ */
+static InsnClassExecCount aarch64_insn_classes[] = {
+ /* "Reserved"" */
+ { " UDEF", "udef", 0xffff0000, 0x00000000, COUNT_NONE},
+ { " SVE", "sve", 0x1e000000, 0x04000000, COUNT_CLASS},
+ { "Reserved", "res", 0x1e000000, 0x00000000, COUNT_CLASS},
+ /* Data Processing Immediate */
+ { " PCrel addr", "pcrel", 0x1f000000, 0x10000000, COUNT_CLASS},
+ { " Add/Sub (imm,tags)","asit", 0x1f800000, 0x11800000, COUNT_CLASS},
+ { " Add/Sub (imm)", "asi", 0x1f000000, 0x11000000, COUNT_CLASS},
+ { " Logical (imm)", "logi", 0x1f800000, 0x12000000, COUNT_CLASS},
+ { " Move Wide (imm)", "movwi", 0x1f800000, 0x12800000, COUNT_CLASS},
+ { " Bitfield", "bitf", 0x1f800000, 0x13000000, COUNT_CLASS},
+ { " Extract", "extr", 0x1f800000, 0x13800000, COUNT_CLASS},
+ { "Data Proc Imm", "dpri", 0x1c000000, 0x10000000, COUNT_CLASS},
+ /* Branches */
+ { " Cond Branch (imm)", "cndb", 0xfe000000, 0x54000000, COUNT_CLASS},
+ { " Exception Gen", "excp", 0xff000000, 0xd4000000, COUNT_CLASS},
+ { " NOP", "nop", 0xffffffff, 0xd503201f, COUNT_NONE},
+ { " Hints", "hint", 0xfffff000, 0xd5032000, COUNT_CLASS},
+ { " Barriers", "barr", 0xfffff000, 0xd5033000, COUNT_CLASS},
+ { " PSTATE", "psta", 0xfff8f000, 0xd5004000, COUNT_CLASS},
+ { " System Insn", "sins", 0xffd80000, 0xd5080000, COUNT_CLASS},
+ { " System Reg", "sreg", 0xffd00000, 0xd5100000, COUNT_CLASS},
+ { " Branch (reg)", "breg", 0xfe000000, 0xd6000000, COUNT_CLASS},
+ { " Branch (imm)", "bimm", 0x7c000000, 0x14000000, COUNT_CLASS},
+ { " Cmp & Branch", "cmpb", 0x7e000000, 0x34000000, COUNT_CLASS},
+ { " Tst & Branch", "tstb", 0x7e000000, 0x36000000, COUNT_CLASS},
+ { "Branches", "branch", 0x1c000000, 0x14000000, COUNT_CLASS},
+ /* Loads and Stores */
+ { " AdvSimd ldstmult", "advlsm", 0xbfbf0000, 0x0c000000, COUNT_CLASS},
+ { " AdvSimd ldstmult++","advlsmp",0xbfb00000, 0x0c800000, COUNT_CLASS},
+ { " AdvSimd ldst", "advlss", 0xbf9f0000, 0x0d000000, COUNT_CLASS},
+ { " AdvSimd ldst++", "advlssp",0xbf800000, 0x0d800000, COUNT_CLASS},
+ { " ldst excl", "ldstx", 0x3f000000, 0x08000000, COUNT_CLASS},
+ { " Prefetch", "prfm", 0xff000000, 0xd8000000, COUNT_CLASS},
+ { " Load Reg (lit)", "ldlit", 0x1b000000, 0x18000000, COUNT_CLASS},
+ { " ldst noalloc pair", "ldstnap",0x3b800000, 0x28000000, COUNT_CLASS},
+ { " ldst pair", "ldstp", 0x38000000, 0x28000000, COUNT_CLASS},
+ { " ldst reg", "ldstr", 0x3b200000, 0x38000000, COUNT_CLASS},
+ { " Atomic ldst", "atomic", 0x3b200c00, 0x38200000, COUNT_CLASS},
+ { " ldst reg (reg off)","ldstro", 0x3b200b00, 0x38200800, COUNT_CLASS},
+ { " ldst reg (pac)", "ldstpa", 0x3b200200, 0x38200800, COUNT_CLASS},
+ { " ldst reg (imm)", "ldsti", 0x3b000000, 0x39000000, COUNT_CLASS},
+ { "Loads & Stores", "ldst", 0x0a000000, 0x08000000, COUNT_CLASS},
+ /* Data Processing Register */
+ { "Data Proc Reg", "dprr", 0x0e000000, 0x0a000000, COUNT_CLASS},
+ /* Scalar FP */
+ { "Scalar FP ", "fpsimd", 0x0e000000, 0x0e000000, COUNT_CLASS},
+ /* Unclassified */
+ { "Unclassified", "unclas", 0x00000000, 0x00000000, COUNT_CLASS},
+};
+
+static InsnClassExecCount sparc32_insn_classes[] = {
+ { "Call", "call", 0xc0000000, 0x40000000, COUNT_CLASS},
+ { "Branch ICond", "bcc", 0xc1c00000, 0x00800000, COUNT_CLASS},
+ { "Branch Fcond", "fbcc", 0xc1c00000, 0x01800000, COUNT_CLASS},
+ { "SetHi", "sethi", 0xc1c00000, 0x01000000, COUNT_CLASS},
+ { "FPU ALU", "fpu", 0xc1f00000, 0x81a00000, COUNT_CLASS},
+ { "ALU", "alu", 0xc0000000, 0x80000000, COUNT_CLASS},
+ { "Load/Store", "ldst", 0xc0000000, 0xc0000000, COUNT_CLASS},
+ /* Unclassified */
+ { "Unclassified", "unclas", 0x00000000, 0x00000000, COUNT_INDIVIDUAL},
+};
+
+static InsnClassExecCount sparc64_insn_classes[] = {
+ { "SetHi & Branches", "op0", 0xc0000000, 0x00000000, COUNT_CLASS},
+ { "Call", "op1", 0xc0000000, 0x40000000, COUNT_CLASS},
+ { "Arith/Logical/Move", "op2", 0xc0000000, 0x80000000, COUNT_CLASS},
+ { "Arith/Logical/Move", "op3", 0xc0000000, 0xc0000000, COUNT_CLASS},
+ /* Unclassified */
+ { "Unclassified", "unclas", 0x00000000, 0x00000000, COUNT_INDIVIDUAL},
+};
+
+/* Default matcher for currently unclassified architectures */
+static InsnClassExecCount default_insn_classes[] = {
+ { "Unclassified", "unclas", 0x00000000, 0x00000000, COUNT_INDIVIDUAL},
+};
+
+typedef struct {
+ const char *qemu_target;
+ InsnClassExecCount *table;
+ int table_sz;
+} ClassSelector;
+
+static ClassSelector class_tables[] =
+{
+ { "aarch64", aarch64_insn_classes, ARRAY_SIZE(aarch64_insn_classes) },
+ { "sparc", sparc32_insn_classes, ARRAY_SIZE(sparc32_insn_classes) },
+ { "sparc64", sparc64_insn_classes, ARRAY_SIZE(sparc64_insn_classes) },
+ { NULL, default_insn_classes, ARRAY_SIZE(default_insn_classes) },
+};
+
+static InsnClassExecCount *class_table;
+static int class_table_sz;
+
+static gint cmp_exec_count(gconstpointer a, gconstpointer b)
+{
+ InsnExecCount *ea = (InsnExecCount *) a;
+ InsnExecCount *eb = (InsnExecCount *) b;
+ return ea->count > eb->count ? -1 : 1;
+}
+
+static void free_record(gpointer data)
+{
+ InsnExecCount *rec = (InsnExecCount *) data;
+ g_free(rec->insn);
+ g_free(rec);
+}
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ g_autoptr(GString) report = g_string_new("Instruction Classes:\n");
+ int i;
+ GList *counts;
+ InsnClassExecCount *class = NULL;
+
+ for (i = 0; i < class_table_sz; i++) {
+ class = &class_table[i];
+ switch (class->what) {
+ case COUNT_CLASS:
+ if (class->count || verbose) {
+ g_string_append_printf(report, "Class: %-24s\t(%ld hits)\n",
+ class->class,
+ class->count);
+ }
+ break;
+ case COUNT_INDIVIDUAL:
+ g_string_append_printf(report, "Class: %-24s\tcounted individually\n",
+ class->class);
+ break;
+ case COUNT_NONE:
+ g_string_append_printf(report, "Class: %-24s\tnot counted\n",
+ class->class);
+ break;
+ default:
+ break;
+ }
+ }
+
+ counts = g_hash_table_get_values(insns);
+ if (counts && g_list_next(counts)) {
+ g_string_append_printf(report,"Individual Instructions:\n");
+ counts = g_list_sort(counts, cmp_exec_count);
+
+ for (i = 0; i < limit && g_list_next(counts);
+ i++, counts = g_list_next(counts)) {
+ InsnExecCount *rec = (InsnExecCount *) counts->data;
+ g_string_append_printf(report,
+ "Instr: %-24s\t(%ld hits)\t(op=%#08x/%s)\n",
+ rec->insn,
+ rec->count,
+ rec->opcode,
+ rec->class ?
+ rec->class->class : "un-categorised");
+ }
+ g_list_free(counts);
+ }
+
+ g_hash_table_destroy(insns);
+
+ qemu_plugin_outs(report->str);
+}
+
+static void plugin_init(void)
+{
+ insns = g_hash_table_new_full(NULL, g_direct_equal, NULL, &free_record);
+}
+
+static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata)
+{
+ uint64_t *count = (uint64_t *) udata;
+ (*count)++;
+}
+
+static uint64_t * find_counter(struct qemu_plugin_insn *insn)
+{
+ int i;
+ uint64_t *cnt = NULL;
+ uint32_t opcode;
+ InsnClassExecCount *class = NULL;
+
+ /*
+ * We only match the first 32 bits of the instruction which is
+ * fine for most RISCs but a bit limiting for CISC architectures.
+ * They would probably benefit from a more tailored plugin.
+ * However we can fall back to individual instruction counting.
+ */
+ opcode = *((uint32_t *)qemu_plugin_insn_data(insn));
+
+ for (i = 0; !cnt && i < class_table_sz; i++) {
+ class = &class_table[i];
+ uint32_t masked_bits = opcode & class->mask;
+ if (masked_bits == class->pattern) {
+ break;
+ }
+ }
+
+ g_assert(class);
+
+ switch (class->what) {
+ case COUNT_NONE:
+ return NULL;
+ case COUNT_CLASS:
+ return &class->count;
+ case COUNT_INDIVIDUAL:
+ {
+ InsnExecCount *icount;
+
+ g_mutex_lock(&lock);
+ icount = (InsnExecCount *) g_hash_table_lookup(insns,
+ GUINT_TO_POINTER(opcode));
+
+ if (!icount) {
+ icount = g_new0(InsnExecCount, 1);
+ icount->opcode = opcode;
+ icount->insn = qemu_plugin_insn_disas(insn);
+ icount->class = class;
+
+ g_hash_table_insert(insns, GUINT_TO_POINTER(opcode),
+ (gpointer) icount);
+ }
+ g_mutex_unlock(&lock);
+
+ return &icount->count;
+ }
+ default:
+ g_assert_not_reached();
+ }
+
+ return NULL;
+}
+
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ size_t n = qemu_plugin_tb_n_insns(tb);
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ uint64_t *cnt;
+ struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i);
+ cnt = find_counter(insn);
+
+ if (cnt) {
+ if (do_inline) {
+ qemu_plugin_register_vcpu_insn_exec_inline(
+ insn, QEMU_PLUGIN_INLINE_ADD_U64, cnt, 1);
+ } else {
+ qemu_plugin_register_vcpu_insn_exec_cb(
+ insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, cnt);
+ }
+ }
+ }
+}
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
+ const qemu_info_t *info,
+ int argc, char **argv)
+{
+ int i;
+
+ /* Select a class table appropriate to the guest architecture */
+ for (i = 0; i < ARRAY_SIZE(class_tables); i++) {
+ ClassSelector *entry = &class_tables[i];
+ if (!entry->qemu_target ||
+ strcmp(entry->qemu_target, info->target_name) == 0) {
+ class_table = entry->table;
+ class_table_sz = entry->table_sz;
+ break;
+ }
+ }
+
+ for (i = 0; i < argc; i++) {
+ char *p = argv[i];
+ if (strcmp(p, "inline") == 0) {
+ do_inline = true;
+ } else if (strcmp(p, "verbose") == 0) {
+ verbose = true;
+ } else {
+ int j;
+ CountType type = COUNT_INDIVIDUAL;
+ if (*p == '!') {
+ type = COUNT_NONE;
+ p++;
+ }
+ for (j = 0; j < class_table_sz; j++) {
+ if (strcmp(p, class_table[j].opt) == 0) {
+ class_table[j].what = type;
+ break;
+ }
+ }
+ }
+ }
+
+ plugin_init();
+
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+ return 0;
+}
diff --git a/contrib/plugins/lockstep.c b/contrib/plugins/lockstep.c
new file mode 100644
index 0000000000..a696673dff
--- /dev/null
+++ b/contrib/plugins/lockstep.c
@@ -0,0 +1,340 @@
+/*
+ * Lockstep Execution Plugin
+ *
+ * Allows you to execute two QEMU instances in lockstep and report
+ * when their execution diverges. This is mainly useful for developers
+ * who want to see where a change to TCG code generation has
+ * introduced a subtle and hard to find bug.
+ *
+ * Caveats:
+ * - single-threaded linux-user apps only with non-deterministic syscalls
+ * - no MTTCG enabled system emulation (icount may help)
+ *
+ * While icount makes things more deterministic it doesn't mean a
+ * particular run may execute the exact same sequence of blocks. An
+ * asynchronous event (for example X11 graphics update) may cause a
+ * block to end early and a new partial block to start. This means
+ * serial only test cases are a better bet. -d nochain may also help.
+ *
+ * This code is not thread safe!
+ *
+ * Copyright (c) 2020 Linaro Ltd
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include <glib.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <stdio.h>
+#include <errno.h>
+
+#include <qemu-plugin.h>
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+/* saved so we can uninstall later */
+static qemu_plugin_id_t our_id;
+
+static unsigned long bb_count;
+static unsigned long insn_count;
+
+/* Information about a translated block */
+typedef struct {
+ uint64_t pc;
+ uint64_t insns;
+} BlockInfo;
+
+/* Information about an execution state in the log */
+typedef struct {
+ BlockInfo *block;
+ unsigned long insn_count;
+ unsigned long block_count;
+} ExecInfo;
+
+/* The execution state we compare */
+typedef struct {
+ uint64_t pc;
+ unsigned long insn_count;
+} ExecState;
+
+typedef struct {
+ GSList *log_pos;
+ int distance;
+} DivergeState;
+
+/* list of translated block info */
+static GSList *blocks;
+
+/* execution log and points of divergence */
+static GSList *log, *divergence_log;
+
+static int socket_fd;
+static char *path_to_unlink;
+
+static bool verbose;
+
+static void plugin_cleanup(qemu_plugin_id_t id)
+{
+ /* Free our block data */
+ g_slist_free_full(blocks, &g_free);
+ g_slist_free_full(log, &g_free);
+ g_slist_free(divergence_log);
+
+ close(socket_fd);
+ if (path_to_unlink) {
+ unlink(path_to_unlink);
+ }
+}
+
+static void plugin_exit(qemu_plugin_id_t id, void *p)
+{
+ g_autoptr(GString) out = g_string_new("No divergence :-)\n");
+ g_string_append_printf(out, "Executed %ld/%d blocks\n",
+ bb_count, g_slist_length(log));
+ g_string_append_printf(out, "Executed ~%ld instructions\n", insn_count);
+ qemu_plugin_outs(out->str);
+
+ plugin_cleanup(id);
+}
+
+static void report_divergance(ExecState *us, ExecState *them)
+{
+ DivergeState divrec = { log, 0 };
+ g_autoptr(GString) out = g_string_new("");
+ bool diverged = false;
+
+ /*
+ * If we have diverged before did we get back on track or are we
+ * totally loosing it?
+ */
+ if (divergence_log) {
+ DivergeState *last = (DivergeState *) divergence_log->data;
+ GSList *entry;
+
+ for (entry = log; g_slist_next(entry); entry = g_slist_next(entry)) {
+ if (entry == last->log_pos) {
+ break;
+ }
+ divrec.distance++;
+ }
+
+ /*
+ * If the last two records are so close it is likely we will
+ * not recover synchronisation with the other end.
+ */
+ if (divrec.distance == 1 && last->distance == 1) {
+ diverged = true;
+ }
+ }
+ divergence_log = g_slist_prepend(divergence_log,
+ g_memdup(&divrec, sizeof(divrec)));
+
+ /* Output short log entry of going out of sync... */
+ if (verbose || divrec.distance == 1 || diverged) {
+ g_string_printf(out, "@ %#016lx vs %#016lx (%d/%d since last)\n",
+ us->pc, them->pc, g_slist_length(divergence_log),
+ divrec.distance);
+ qemu_plugin_outs(out->str);
+ }
+
+ if (diverged) {
+ int i;
+ GSList *entry;
+
+ g_string_printf(out, "Δ insn_count @ %#016lx (%ld) vs %#016lx (%ld)\n",
+ us->pc, us->insn_count, them->pc, them->insn_count);
+
+ for (entry = log, i = 0;
+ g_slist_next(entry) && i < 5;
+ entry = g_slist_next(entry), i++) {
+ ExecInfo *prev = (ExecInfo *) entry->data;
+ g_string_append_printf(out,
+ " previously @ %#016lx/%ld (%ld insns)\n",
+ prev->block->pc, prev->block->insns,
+ prev->insn_count);
+ }
+ qemu_plugin_outs(out->str);
+ qemu_plugin_outs("too much divergence... giving up.");
+ qemu_plugin_uninstall(our_id, plugin_cleanup);
+ }
+}
+
+static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
+{
+ BlockInfo *bi = (BlockInfo *) udata;
+ ExecState us, them;
+ ssize_t bytes;
+ ExecInfo *exec;
+
+ us.pc = bi->pc;
+ us.insn_count = insn_count;
+
+ /*
+ * Write our current position to the other end. If we fail the
+ * other end has probably died and we should shut down gracefully.
+ */
+ bytes = write(socket_fd, &us, sizeof(ExecState));
+ if (bytes < sizeof(ExecState)) {
+ qemu_plugin_outs(bytes < 0 ?
+ "problem writing to socket" :
+ "wrote less than expected to socket");
+ qemu_plugin_uninstall(our_id, plugin_cleanup);
+ return;
+ }
+
+ /*
+ * Now read where our peer has reached. Again a failure probably
+ * indicates the other end died and we should close down cleanly.
+ */
+ bytes = read(socket_fd, &them, sizeof(ExecState));
+ if (bytes < sizeof(ExecState)) {
+ qemu_plugin_outs(bytes < 0 ?
+ "problem reading from socket" :
+ "read less than expected");
+ qemu_plugin_uninstall(our_id, plugin_cleanup);
+ return;
+ }
+
+ /*
+ * Compare and report if we have diverged.
+ */
+ if (us.pc != them.pc) {
+ report_divergance(&us, &them);
+ }
+
+ /*
+ * Assume this block will execute fully and record it
+ * in the execution log.
+ */
+ insn_count += bi->insns;
+ bb_count++;
+ exec = g_new0(ExecInfo, 1);
+ exec->block = bi;
+ exec->insn_count = insn_count;
+ exec->block_count = bb_count;
+ log = g_slist_prepend(log, exec);
+}
+
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ BlockInfo *bi = g_new0(BlockInfo, 1);
+ bi->pc = qemu_plugin_tb_vaddr(tb);
+ bi->insns = qemu_plugin_tb_n_insns(tb);
+
+ /* save a reference so we can free later */
+ blocks = g_slist_prepend(blocks, bi);
+ qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
+ QEMU_PLUGIN_CB_NO_REGS, (void *)bi);
+}
+
+
+/*
+ * Instead of encoding master/slave status into what is essentially
+ * two peers we shall just take the simple approach of checking for
+ * the existence of the pipe and assuming if it's not there we are the
+ * first process.
+ */
+static bool setup_socket(const char *path)
+{
+ struct sockaddr_un sockaddr;
+ int fd;
+
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0) {
+ perror("create socket");
+ return false;
+ }
+
+ sockaddr.sun_family = AF_UNIX;
+ g_strlcpy(sockaddr.sun_path, path, sizeof(sockaddr.sun_path) - 1);
+ if (bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)) < 0) {
+ perror("bind socket");
+ close(fd);
+ return false;
+ }
+
+ /* remember to clean-up */
+ path_to_unlink = g_strdup(path);
+
+ if (listen(fd, 1) < 0) {
+ perror("listen socket");
+ close(fd);
+ return false;
+ }
+
+ socket_fd = accept(fd, NULL, NULL);
+ if (socket_fd < 0 && errno != EINTR) {
+ perror("accept socket");
+ return false;
+ }
+
+ qemu_plugin_outs("setup_socket::ready\n");
+
+ return true;
+}
+
+static bool connect_socket(const char *path)
+{
+ int fd;
+ struct sockaddr_un sockaddr;
+
+ fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0) {
+ perror("create socket");
+ return false;
+ }
+
+ sockaddr.sun_family = AF_UNIX;
+ g_strlcpy(sockaddr.sun_path, path, sizeof(sockaddr.sun_path) - 1);
+
+ if (connect(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr)) < 0) {
+ perror("failed to connect");
+ return false;
+ }
+
+ qemu_plugin_outs("connect_socket::ready\n");
+
+ socket_fd = fd;
+ return true;
+}
+
+static bool setup_unix_socket(const char *path)
+{
+ if (g_file_test(path, G_FILE_TEST_EXISTS)) {
+ return connect_socket(path);
+ } else {
+ return setup_socket(path);
+ }
+}
+
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id,
+ const qemu_info_t *info,
+ int argc, char **argv)
+{
+ int i;
+
+ if (!argc || !argv[0]) {
+ qemu_plugin_outs("Need a socket path to talk to other instance.");
+ return -1;
+ }
+
+ for (i = 0; i < argc; i++) {
+ char *p = argv[i];
+ if (strcmp(p, "verbose") == 0) {
+ verbose = true;
+ } else if (!setup_unix_socket(argv[0])) {
+ qemu_plugin_outs("Failed to setup socket for communications.");
+ return -1;
+ }
+ }
+
+ our_id = id;
+
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+ return 0;
+}