diff options
84 files changed, 2307 insertions, 710 deletions
diff --git a/Makefile.objs b/Makefile.objs index 571ac1101d..02fb8e76b8 100644 --- a/Makefile.objs +++ b/Makefile.objs @@ -89,7 +89,7 @@ endif ####################################################################### # Target-independent parts used in system and user emulation -common-obj-y += tcg-runtime.o +common-obj-y += tcg-runtime.o cpus-common.o common-obj-y += hw/ common-obj-y += qom/ common-obj-y += disas/ diff --git a/aio-posix.c b/aio-posix.c index 43162a9f29..4ef34dd175 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -431,11 +431,13 @@ bool aio_poll(AioContext *ctx, bool blocking) assert(npfd == 0); /* fill pollfds */ - QLIST_FOREACH(node, &ctx->aio_handlers, node) { - if (!node->deleted && node->pfd.events - && !aio_epoll_enabled(ctx) - && aio_node_check(ctx, node->is_external)) { - add_pollfd(node); + + if (!aio_epoll_enabled(ctx)) { + QLIST_FOREACH(node, &ctx->aio_handlers, node) { + if (!node->deleted && node->pfd.events + && aio_node_check(ctx, node->is_external)) { + add_pollfd(node); + } } } diff --git a/block/blkreplay.c b/block/blkreplay.c index 30f9d5ff6c..a741654d35 100755 --- a/block/blkreplay.c +++ b/block/blkreplay.c @@ -20,11 +20,6 @@ typedef struct Request { QEMUBH *bh; } Request; -/* Next request id. - This counter is global, because requests from different - block devices should not get overlapping ids. */ -static uint64_t request_id; - static int blkreplay_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { @@ -84,7 +79,7 @@ static void block_request_create(uint64_t reqid, BlockDriverState *bs, static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { - uint64_t reqid = request_id++; + uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); block_request_create(reqid, bs, qemu_coroutine_self()); qemu_coroutine_yield(); @@ -95,7 +90,7 @@ static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs, static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { - uint64_t reqid = request_id++; + uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags); block_request_create(reqid, bs, qemu_coroutine_self()); qemu_coroutine_yield(); @@ -106,7 +101,7 @@ static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs, static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int count, BdrvRequestFlags flags) { - uint64_t reqid = request_id++; + uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_pwrite_zeroes(bs->file, offset, count, flags); block_request_create(reqid, bs, qemu_coroutine_self()); qemu_coroutine_yield(); @@ -117,7 +112,7 @@ static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs, static int coroutine_fn blkreplay_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) { - uint64_t reqid = request_id++; + uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_pdiscard(bs->file->bs, offset, count); block_request_create(reqid, bs, qemu_coroutine_self()); qemu_coroutine_yield(); @@ -127,7 +122,7 @@ static int coroutine_fn blkreplay_co_pdiscard(BlockDriverState *bs, static int coroutine_fn blkreplay_co_flush(BlockDriverState *bs) { - uint64_t reqid = request_id++; + uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_flush(bs->file->bs); block_request_create(reqid, bs, qemu_coroutine_self()); qemu_coroutine_yield(); diff --git a/block/linux-aio.c b/block/linux-aio.c index d4e19d444c..1685ec29a3 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -94,9 +94,12 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb) laiocb->ret = ret; if (laiocb->co) { - /* Jump and continue completion for foreign requests, don't do - * anything for current request, it will be completed shortly. */ - if (laiocb->co != qemu_coroutine_self()) { + /* If the coroutine is already entered it must be in ioq_submit() and + * will notice laio->ret has been filled in when it eventually runs + * later. Coroutines cannot be entered recursively so avoid doing + * that! + */ + if (!qemu_coroutine_entered(laiocb->co)) { qemu_coroutine_enter(laiocb->co); } } else { diff --git a/bsd-user/main.c b/bsd-user/main.c index f58bb43579..d803d3e10c 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -67,23 +67,6 @@ int cpu_get_pic_interrupt(CPUX86State *env) } #endif -/* These are no-ops because we are not threadsafe. */ -static inline void cpu_exec_start(CPUArchState *env) -{ -} - -static inline void cpu_exec_end(CPUArchState *env) -{ -} - -static inline void start_exclusive(void) -{ -} - -static inline void end_exclusive(void) -{ -} - void fork_start(void) { } @@ -95,14 +78,6 @@ void fork_end(int child) } } -void cpu_list_lock(void) -{ -} - -void cpu_list_unlock(void) -{ -} - #ifdef TARGET_I386 /***********************************************************/ /* CPUX86 core interface */ @@ -172,7 +147,11 @@ void cpu_loop(CPUX86State *env) //target_siginfo_t info; for(;;) { + cpu_exec_start(cs); trapnr = cpu_exec(cs); + cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch(trapnr) { case 0x80: /* syscall from int $0x80 */ @@ -513,7 +492,10 @@ void cpu_loop(CPUSPARCState *env) //target_siginfo_t info; while (1) { + cpu_exec_start(cs); trapnr = cpu_exec(cs); + cpu_exec_end(cs); + process_queued_cpu_work(cs); switch (trapnr) { #ifndef TARGET_SPARC64 @@ -748,6 +730,7 @@ int main(int argc, char **argv) if (argc <= 1) usage(); + qemu_init_cpu_list(); module_call_init(MODULE_INIT_QOM); if ((envlist = envlist_create()) == NULL) { @@ -1955,6 +1955,61 @@ EOF /* * If we have stable libs the we don't want the libxc compat * layers, regardless of what CFLAGS we may have been given. + * + * Also, check if xengnttab_grant_copy_segment_t is defined and + * grant copy operation is implemented. + */ +#undef XC_WANT_COMPAT_EVTCHN_API +#undef XC_WANT_COMPAT_GNTTAB_API +#undef XC_WANT_COMPAT_MAP_FOREIGN_API +#include <xenctrl.h> +#include <xenstore.h> +#include <xenevtchn.h> +#include <xengnttab.h> +#include <xenforeignmemory.h> +#include <stdint.h> +#include <xen/hvm/hvm_info_table.h> +#if !defined(HVM_MAX_VCPUS) +# error HVM_MAX_VCPUS not defined +#endif +int main(void) { + xc_interface *xc = NULL; + xenforeignmemory_handle *xfmem; + xenevtchn_handle *xe; + xengnttab_handle *xg; + xen_domain_handle_t handle; + xengnttab_grant_copy_segment_t* seg = NULL; + + xs_daemon_open(); + + xc = xc_interface_open(0, 0, 0); + xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0); + xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0); + xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000); + xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL); + xc_domain_create(xc, 0, handle, 0, NULL, NULL); + + xfmem = xenforeignmemory_open(0, 0); + xenforeignmemory_map(xfmem, 0, 0, 0, 0, 0); + + xe = xenevtchn_open(0, 0); + xenevtchn_fd(xe); + + xg = xengnttab_open(0, 0); + xengnttab_grant_copy(xg, 0, seg); + + return 0; +} +EOF + compile_prog "" "$xen_libs $xen_stable_libs" + then + xen_ctrl_version=480 + xen=yes + elif + cat > $TMPC <<EOF && +/* + * If we have stable libs the we don't want the libxc compat + * layers, regardless of what CFLAGS we may have been given. */ #undef XC_WANT_COMPAT_EVTCHN_API #undef XC_WANT_COMPAT_GNTTAB_API @@ -2933,7 +2988,7 @@ for i in $glib_modules; do if $pkg_config --atleast-version=$glib_req_ver $i; then glib_cflags=$($pkg_config --cflags $i) glib_libs=$($pkg_config --libs $i) - CFLAGS="$glib_cflags $CFLAGS" + QEMU_CFLAGS="$glib_cflags $QEMU_CFLAGS" LIBS="$glib_libs $LIBS" libs_qga="$glib_libs $libs_qga" else @@ -5140,7 +5195,6 @@ fi if test "$glib_subprocess" = "yes" ; then echo "CONFIG_HAS_GLIB_SUBPROCESS_TESTS=y" >> $config_host_mak fi -echo "GLIB_CFLAGS=$glib_cflags" >> $config_host_mak if test "$gtk" = "yes" ; then echo "CONFIG_GTK=y" >> $config_host_mak echo "CONFIG_GTKABI=$gtkabi" >> $config_host_mak diff --git a/cpu-exec.c b/cpu-exec.c index 9f4bd0b6dd..8823d23df7 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -204,20 +204,16 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles, TranslationBlock *orig_tb, bool ignore_icount) { TranslationBlock *tb; - bool old_tb_flushed; /* Should never happen. We only end up here when an existing TB is too long. */ if (max_cycles > CF_COUNT_MASK) max_cycles = CF_COUNT_MASK; - old_tb_flushed = cpu->tb_flushed; - cpu->tb_flushed = false; tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, max_cycles | CF_NOCACHE | (ignore_icount ? CF_IGNORE_ICOUNT : 0)); - tb->orig_tb = cpu->tb_flushed ? NULL : orig_tb; - cpu->tb_flushed |= old_tb_flushed; + tb->orig_tb = orig_tb; /* execute the generated code */ trace_exec_tb_nocache(tb, tb->pc); cpu_tb_exec(cpu, tb); @@ -338,10 +334,7 @@ static inline TranslationBlock *tb_find(CPUState *cpu, tb_lock(); have_tb_lock = true; } - /* Check if translation buffer has been flushed */ - if (cpu->tb_flushed) { - cpu->tb_flushed = false; - } else if (!tb->invalid) { + if (!tb->invalid) { tb_add_jump(last_tb, tb_exit, tb); } } @@ -606,7 +599,6 @@ int cpu_exec(CPUState *cpu) break; } - atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */ for(;;) { cpu_handle_interrupt(cpu, &last_tb); tb = tb_find(cpu, last_tb, tb_exit); diff --git a/cpus-common.c b/cpus-common.c new file mode 100644 index 0000000000..3e114529c9 --- /dev/null +++ b/cpus-common.c @@ -0,0 +1,352 @@ +/* + * CPU thread main loop - common bits for user and system mode emulation + * + * Copyright (c) 2003-2005 Fabrice Bellard + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "exec/cpu-common.h" +#include "qom/cpu.h" +#include "sysemu/cpus.h" + +static QemuMutex qemu_cpu_list_lock; +static QemuCond exclusive_cond; +static QemuCond exclusive_resume; +static QemuCond qemu_work_cond; + +/* >= 1 if a thread is inside start_exclusive/end_exclusive. Written + * under qemu_cpu_list_lock, read with atomic operations. + */ +static int pending_cpus; + +void qemu_init_cpu_list(void) +{ + /* This is needed because qemu_init_cpu_list is also called by the + * child process in a fork. */ + pending_cpus = 0; + + qemu_mutex_init(&qemu_cpu_list_lock); + qemu_cond_init(&exclusive_cond); + qemu_cond_init(&exclusive_resume); + qemu_cond_init(&qemu_work_cond); +} + +void cpu_list_lock(void) +{ + qemu_mutex_lock(&qemu_cpu_list_lock); +} + +void cpu_list_unlock(void) +{ + qemu_mutex_unlock(&qemu_cpu_list_lock); +} + +static bool cpu_index_auto_assigned; + +static int cpu_get_free_index(void) +{ + CPUState *some_cpu; + int cpu_index = 0; + + cpu_index_auto_assigned = true; + CPU_FOREACH(some_cpu) { + cpu_index++; + } + return cpu_index; +} + +static void finish_safe_work(CPUState *cpu) +{ + cpu_exec_start(cpu); + cpu_exec_end(cpu); +} + +void cpu_list_add(CPUState *cpu) +{ + qemu_mutex_lock(&qemu_cpu_list_lock); + if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { + cpu->cpu_index = cpu_get_free_index(); + assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); + } else { + assert(!cpu_index_auto_assigned); + } + QTAILQ_INSERT_TAIL(&cpus, cpu, node); + qemu_mutex_unlock(&qemu_cpu_list_lock); + + finish_safe_work(cpu); +} + +void cpu_list_remove(CPUState *cpu) +{ + qemu_mutex_lock(&qemu_cpu_list_lock); + if (!QTAILQ_IN_USE(cpu, node)) { + /* there is nothing to undo since cpu_exec_init() hasn't been called */ + qemu_mutex_unlock(&qemu_cpu_list_lock); + return; + } + + assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ))); + + QTAILQ_REMOVE(&cpus, cpu, node); + cpu->cpu_index = UNASSIGNED_CPU_INDEX; + qemu_mutex_unlock(&qemu_cpu_list_lock); +} + +struct qemu_work_item { + struct qemu_work_item *next; + run_on_cpu_func func; + void *data; + bool free, exclusive, done; +}; + +static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) +{ + qemu_mutex_lock(&cpu->work_mutex); + if (cpu->queued_work_first == NULL) { + cpu->queued_work_first = wi; + } else { + cpu->queued_work_last->next = wi; + } + cpu->queued_work_last = wi; + wi->next = NULL; + wi->done = false; + qemu_mutex_unlock(&cpu->work_mutex); + + qemu_cpu_kick(cpu); +} + +void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data, + QemuMutex *mutex) +{ + struct qemu_work_item wi; + + if (qemu_cpu_is_self(cpu)) { + func(cpu, data); + return; + } + + wi.func = func; + wi.data = data; + wi.done = false; + wi.free = false; + wi.exclusive = false; + + queue_work_on_cpu(cpu, &wi); + while (!atomic_mb_read(&wi.done)) { + CPUState *self_cpu = current_cpu; + + qemu_cond_wait(&qemu_work_cond, mutex); + current_cpu = self_cpu; + } +} + +void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) +{ + struct qemu_work_item *wi; + + wi = g_malloc0(sizeof(struct qemu_work_item)); + wi->func = func; + wi->data = data; + wi->free = true; + + queue_work_on_cpu(cpu, wi); +} + +/* Wait for pending exclusive operations to complete. The CPU list lock + must be held. */ +static inline void exclusive_idle(void) +{ + while (pending_cpus) { + qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock); + } +} + +/* Start an exclusive operation. + Must only be called from outside cpu_exec. */ +void start_exclusive(void) +{ + CPUState *other_cpu; + int running_cpus; + + qemu_mutex_lock(&qemu_cpu_list_lock); + exclusive_idle(); + + /* Make all other cpus stop executing. */ + atomic_set(&pending_cpus, 1); + + /* Write pending_cpus before reading other_cpu->running. */ + smp_mb(); + running_cpus = 0; + CPU_FOREACH(other_cpu) { + if (atomic_read(&other_cpu->running)) { + other_cpu->has_waiter = true; + running_cpus++; + qemu_cpu_kick(other_cpu); + } + } + + atomic_set(&pending_cpus, running_cpus + 1); + while (pending_cpus > 1) { + qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); + } + + /* Can release mutex, no one will enter another exclusive + * section until end_exclusive resets pending_cpus to 0. + */ + qemu_mutex_unlock(&qemu_cpu_list_lock); +} + +/* Finish an exclusive operation. */ +void end_exclusive(void) +{ + qemu_mutex_lock(&qemu_cpu_list_lock); + atomic_set(&pending_cpus, 0); + qemu_cond_broadcast(&exclusive_resume); + qemu_mutex_unlock(&qemu_cpu_list_lock); +} + +/* Wait for exclusive ops to finish, and begin cpu execution. */ +void cpu_exec_start(CPUState *cpu) +{ + atomic_set(&cpu->running, true); + + /* Write cpu->running before reading pending_cpus. */ + smp_mb(); + + /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1. + * After taking the lock we'll see cpu->has_waiter == true and run---not + * for long because start_exclusive kicked us. cpu_exec_end will + * decrement pending_cpus and signal the waiter. + * + * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1. + * This includes the case when an exclusive item is running now. + * Then we'll see cpu->has_waiter == false and wait for the item to + * complete. + * + * 3. pending_cpus == 0. Then start_exclusive is definitely going to + * see cpu->running == true, and it will kick the CPU. + */ + if (unlikely(atomic_read(&pending_cpus))) { + qemu_mutex_lock(&qemu_cpu_list_lock); + if (!cpu->has_waiter) { + /* Not counted in pending_cpus, let the exclusive item + * run. Since we have the lock, just set cpu->running to true + * while holding it; no need to check pending_cpus again. + */ + atomic_set(&cpu->running, false); + exclusive_idle(); + /* Now pending_cpus is zero. */ + atomic_set(&cpu->running, true); + } else { + /* Counted in pending_cpus, go ahead and release the + * waiter at cpu_exec_end. + */ + } + qemu_mutex_unlock(&qemu_cpu_list_lock); + } +} + +/* Mark cpu as not executing, and release pending exclusive ops. */ +void cpu_exec_end(CPUState *cpu) +{ + atomic_set(&cpu->running, false); + + /* Write cpu->running before reading pending_cpus. */ + smp_mb(); + + /* 1. start_exclusive saw cpu->running == true. Then it will increment + * pending_cpus and wait for exclusive_cond. After taking the lock + * we'll see cpu->has_waiter == true. + * + * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1. + * This includes the case when an exclusive item started after setting + * cpu->running to false and before we read pending_cpus. Then we'll see + * cpu->has_waiter == false and not touch pending_cpus. The next call to + * cpu_exec_start will run exclusive_idle if still necessary, thus waiting + * for the item to complete. + * + * 3. pending_cpus == 0. Then start_exclusive is definitely going to + * see cpu->running == false, and it can ignore this CPU until the + * next cpu_exec_start. + */ + if (unlikely(atomic_read(&pending_cpus))) { + qemu_mutex_lock(&qemu_cpu_list_lock); + if (cpu->has_waiter) { + cpu->has_waiter = false; + atomic_set(&pending_cpus, pending_cpus - 1); + if (pending_cpus == 1) { + qemu_cond_signal(&exclusive_cond); + } + } + qemu_mutex_unlock(&qemu_cpu_list_lock); + } +} + +void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) +{ + struct qemu_work_item *wi; + + wi = g_malloc0(sizeof(struct qemu_work_item)); + wi->func = func; + wi->data = data; + wi->free = true; + wi->exclusive = true; + + queue_work_on_cpu(cpu, wi); +} + +void process_queued_cpu_work(CPUState *cpu) +{ + struct qemu_work_item *wi; + + if (cpu->queued_work_first == NULL) { + return; + } + + qemu_mutex_lock(&cpu->work_mutex); + while (cpu->queued_work_first != NULL) { + wi = cpu->queued_work_first; + cpu->queued_work_first = wi->next; + if (!cpu->queued_work_first) { + cpu->queued_work_last = NULL; + } + qemu_mutex_unlock(&cpu->work_mutex); + if (wi->exclusive) { + /* Running work items outside the BQL avoids the following deadlock: + * 1) start_exclusive() is called with the BQL taken while another + * CPU is running; 2) cpu_exec in the other CPU tries to takes the + * BQL, so it goes to sleep; start_exclusive() is sleeping too, so + * neither CPU can proceed. + */ + qemu_mutex_unlock_iothread(); + start_exclusive(); + wi->func(cpu, wi->data); + end_exclusive(); + qemu_mutex_lock_iothread(); + } else { + wi->func(cpu, wi->data); + } + qemu_mutex_lock(&cpu->work_mutex); + if (wi->free) { + g_free(wi); + } else { + atomic_mb_set(&wi->done, true); + } + } + qemu_mutex_unlock(&cpu->work_mutex); + qemu_cond_broadcast(&qemu_work_cond); +} @@ -557,9 +557,8 @@ static const VMStateDescription vmstate_timers = { } }; -static void cpu_throttle_thread(void *opaque) +static void cpu_throttle_thread(CPUState *cpu, void *opaque) { - CPUState *cpu = opaque; double pct; double throttle_ratio; long sleeptime_ns; @@ -589,7 +588,7 @@ static void cpu_throttle_timer_tick(void *opaque) } CPU_FOREACH(cpu) { if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { - async_run_on_cpu(cpu, cpu_throttle_thread, cpu); + async_run_on_cpu(cpu, cpu_throttle_thread, NULL); } } @@ -751,6 +750,7 @@ static int do_vm_stop(RunState state) } bdrv_drain_all(); + replay_disable_events(); ret = blk_flush_all(); return ret; @@ -903,79 +903,21 @@ static QemuThread io_thread; static QemuCond qemu_cpu_cond; /* system init */ static QemuCond qemu_pause_cond; -static QemuCond qemu_work_cond; void qemu_init_cpu_loop(void) { qemu_init_sigbus(); qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_pause_cond); - qemu_cond_init(&qemu_work_cond); qemu_cond_init(&qemu_io_proceeded_cond); qemu_mutex_init(&qemu_global_mutex); qemu_thread_get_self(&io_thread); } -void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) +void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) { - struct qemu_work_item wi; - - if (qemu_cpu_is_self(cpu)) { - func(data); - return; - } - - wi.func = func; - wi.data = data; - wi.free = false; - - qemu_mutex_lock(&cpu->work_mutex); - if (cpu->queued_work_first == NULL) { - cpu->queued_work_first = &wi; - } else { - cpu->queued_work_last->next = &wi; - } - cpu->queued_work_last = &wi; - wi.next = NULL; - wi.done = false; - qemu_mutex_unlock(&cpu->work_mutex); - - qemu_cpu_kick(cpu); - while (!atomic_mb_read(&wi.done)) { - CPUState *self_cpu = current_cpu; - - qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); - current_cpu = self_cpu; - } -} - -void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) -{ - struct qemu_work_item *wi; - - if (qemu_cpu_is_self(cpu)) { - func(data); - return; - } - - wi = g_malloc0(sizeof(struct qemu_work_item)); - wi->func = func; - wi->data = data; - wi->free = true; - - qemu_mutex_lock(&cpu->work_mutex); - if (cpu->queued_work_first == NULL) { - cpu->queued_work_first = wi; - } else { - cpu->queued_work_last->next = wi; - } - cpu->queued_work_last = wi; - wi->next = NULL; - wi->done = false; - qemu_mutex_unlock(&cpu->work_mutex); - - qemu_cpu_kick(cpu); + do_run_on_cpu(cpu, func, data, &qemu_global_mutex); } static void qemu_kvm_destroy_vcpu(CPUState *cpu) @@ -990,34 +932,6 @@ static void qemu_tcg_destroy_vcpu(CPUState *cpu) { } -static void flush_queued_work(CPUState *cpu) -{ - struct qemu_work_item *wi; - - if (cpu->queued_work_first == NULL) { - return; - } - - qemu_mutex_lock(&cpu->work_mutex); - while (cpu->queued_work_first != NULL) { - wi = cpu->queued_work_first; - cpu->queued_work_first = wi->next; - if (!cpu->queued_work_first) { - cpu->queued_work_last = NULL; - } - qemu_mutex_unlock(&cpu->work_mutex); - wi->func(wi->data); - qemu_mutex_lock(&cpu->work_mutex); - if (wi->free) { - g_free(wi); - } else { - atomic_mb_set(&wi->done, true); - } - } - qemu_mutex_unlock(&cpu->work_mutex); - qemu_cond_broadcast(&qemu_work_cond); -} - static void qemu_wait_io_event_common(CPUState *cpu) { if (cpu->stop) { @@ -1025,7 +939,7 @@ static void qemu_wait_io_event_common(CPUState *cpu) cpu->stopped = true; qemu_cond_broadcast(&qemu_pause_cond); } - flush_queued_work(cpu); + process_queued_cpu_work(cpu); cpu->thread_kicked = false; } @@ -1544,7 +1458,9 @@ static int tcg_cpu_exec(CPUState *cpu) cpu->icount_decr.u16.low = decr; cpu->icount_extra = count; } + cpu_exec_start(cpu); ret = cpu_exec(cpu); + cpu_exec_end(cpu); #ifdef CONFIG_PROFILER tcg_time += profile_getclock() - ti; #endif diff --git a/docs/tcg-exclusive.promela b/docs/tcg-exclusive.promela new file mode 100644 index 0000000000..c91cfca9f7 --- /dev/null +++ b/docs/tcg-exclusive.promela @@ -0,0 +1,225 @@ +/* + * This model describes the implementation of exclusive sections in + * cpus-common.c (start_exclusive, end_exclusive, cpu_exec_start, + * cpu_exec_end). + * + * Author: Paolo Bonzini <pbonzini@redhat.com> + * + * This file is in the public domain. If you really want a license, + * the WTFPL will do. + * + * To verify it: + * spin -a docs/tcg-exclusive.promela + * gcc pan.c -O2 + * ./a.out -a + * + * Tunable processor macros: N_CPUS, N_EXCLUSIVE, N_CYCLES, USE_MUTEX, + * TEST_EXPENSIVE. + */ + +// Define the missing parameters for the model +#ifndef N_CPUS +#define N_CPUS 2 +#warning defaulting to 2 CPU processes +#endif + +// the expensive test is not so expensive for <= 2 CPUs +// If the mutex is used, it's also cheap (300 MB / 4 seconds) for 3 CPUs +// For 3 CPUs and the lock-free option it needs 1.5 GB of RAM +#if N_CPUS <= 2 || (N_CPUS <= 3 && defined USE_MUTEX) +#define TEST_EXPENSIVE +#endif + +#ifndef N_EXCLUSIVE +# if !defined N_CYCLES || N_CYCLES <= 1 || defined TEST_EXPENSIVE +# define N_EXCLUSIVE 2 +# warning defaulting to 2 concurrent exclusive sections +# else +# define N_EXCLUSIVE 1 +# warning defaulting to 1 concurrent exclusive sections +# endif +#endif +#ifndef N_CYCLES +# if N_EXCLUSIVE <= 1 || defined TEST_EXPENSIVE +# define N_CYCLES 2 +# warning defaulting to 2 CPU cycles +# else +# define N_CYCLES 1 +# warning defaulting to 1 CPU cycles +# endif +#endif + + +// synchronization primitives. condition variables require a +// process-local "cond_t saved;" variable. + +#define mutex_t byte +#define MUTEX_LOCK(m) atomic { m == 0 -> m = 1 } +#define MUTEX_UNLOCK(m) m = 0 + +#define cond_t int +#define COND_WAIT(c, m) { \ + saved = c; \ + MUTEX_UNLOCK(m); \ + c != saved -> MUTEX_LOCK(m); \ + } +#define COND_BROADCAST(c) c++ + +// this is the logic from cpus-common.c + +mutex_t mutex; +cond_t exclusive_cond; +cond_t exclusive_resume; +byte pending_cpus; + +byte running[N_CPUS]; +byte has_waiter[N_CPUS]; + +#define exclusive_idle() \ + do \ + :: pending_cpus -> COND_WAIT(exclusive_resume, mutex); \ + :: else -> break; \ + od + +#define start_exclusive() \ + MUTEX_LOCK(mutex); \ + exclusive_idle(); \ + pending_cpus = 1; \ + \ + i = 0; \ + do \ + :: i < N_CPUS -> { \ + if \ + :: running[i] -> has_waiter[i] = 1; pending_cpus++; \ + :: else -> skip; \ + fi; \ + i++; \ + } \ + :: else -> break; \ + od; \ + \ + do \ + :: pending_cpus > 1 -> COND_WAIT(exclusive_cond, mutex); \ + :: else -> break; \ + od; \ + MUTEX_UNLOCK(mutex); + +#define end_exclusive() \ + MUTEX_LOCK(mutex); \ + pending_cpus = 0; \ + COND_BROADCAST(exclusive_resume); \ + MUTEX_UNLOCK(mutex); + +#ifdef USE_MUTEX +// Simple version using mutexes +#define cpu_exec_start(id) \ + MUTEX_LOCK(mutex); \ + exclusive_idle(); \ + running[id] = 1; \ + MUTEX_UNLOCK(mutex); + +#define cpu_exec_end(id) \ + MUTEX_LOCK(mutex); \ + running[id] = 0; \ + if \ + :: pending_cpus -> { \ + pending_cpus--; \ + if \ + :: pending_cpus == 1 -> COND_BROADCAST(exclusive_cond); \ + :: else -> skip; \ + fi; \ + } \ + :: else -> skip; \ + fi; \ + MUTEX_UNLOCK(mutex); +#else +// Wait-free fast path, only needs mutex when concurrent with +// an exclusive section +#define cpu_exec_start(id) \ + running[id] = 1; \ + if \ + :: pending_cpus -> { \ + MUTEX_LOCK(mutex); \ + if \ + :: !has_waiter[id] -> { \ + running[id] = 0; \ + exclusive_idle(); \ + running[id] = 1; \ + } \ + :: else -> skip; \ + fi; \ + MUTEX_UNLOCK(mutex); \ + } \ + :: else -> skip; \ + fi; + +#define cpu_exec_end(id) \ + running[id] = 0; \ + if \ + :: pending_cpus -> { \ + MUTEX_LOCK(mutex); \ + if \ + :: has_waiter[id] -> { \ + has_waiter[id] = 0; \ + pending_cpus--; \ + if \ + :: pending_cpus == 1 -> COND_BROADCAST(exclusive_cond); \ + :: else -> skip; \ + fi; \ + } \ + :: else -> skip; \ + fi; \ + MUTEX_UNLOCK(mutex); \ + } \ + :: else -> skip; \ + fi +#endif + +// Promela processes + +byte done_cpu; +byte in_cpu; +active[N_CPUS] proctype cpu() +{ + byte id = _pid % N_CPUS; + byte cycles = 0; + cond_t saved; + + do + :: cycles == N_CYCLES -> break; + :: else -> { + cycles++; + cpu_exec_start(id) + in_cpu++; + done_cpu++; + in_cpu--; + cpu_exec_end(id) + } + od; +} + +byte done_exclusive; +byte in_exclusive; +active[N_EXCLUSIVE] proctype exclusive() +{ + cond_t saved; + byte i; + + start_exclusive(); + in_exclusive = 1; + done_exclusive++; + in_exclusive = 0; + end_exclusive(); +} + +#define LIVENESS (done_cpu == N_CPUS * N_CYCLES && done_exclusive == N_EXCLUSIVE) +#define SAFETY !(in_exclusive && in_cpu) + +never { /* ! ([] SAFETY && <> [] LIVENESS) */ + do + // once the liveness property is satisfied, this is not executable + // and the never clause is not accepted + :: ! LIVENESS -> accept_liveness: skip + :: 1 -> assert(SAFETY) + od; +} @@ -598,36 +598,11 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) } #endif -static bool cpu_index_auto_assigned; - -static int cpu_get_free_index(void) -{ - CPUState *some_cpu; - int cpu_index = 0; - - cpu_index_auto_assigned = true; - CPU_FOREACH(some_cpu) { - cpu_index++; - } - return cpu_index; -} - void cpu_exec_exit(CPUState *cpu) { CPUClass *cc = CPU_GET_CLASS(cpu); - cpu_list_lock(); - if (!QTAILQ_IN_USE(cpu, node)) { - /* there is nothing to undo since cpu_exec_init() hasn't been called */ - cpu_list_unlock(); - return; - } - - assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ))); - - QTAILQ_REMOVE(&cpus, cpu, node); - cpu->cpu_index = UNASSIGNED_CPU_INDEX; - cpu_list_unlock(); + cpu_list_remove(cpu); if (cc->vmsd != NULL) { vmstate_unregister(NULL, cc->vmsd, cpu); @@ -663,15 +638,7 @@ void cpu_exec_init(CPUState *cpu, Error **errp) object_ref(OBJECT(cpu->memory)); #endif - cpu_list_lock(); - if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { - cpu->cpu_index = cpu_get_free_index(); - assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); - } else { - assert(!cpu_index_auto_assigned); - } - QTAILQ_INSERT_TAIL(&cpus, cpu, node); - cpu_list_unlock(); + cpu_list_add(cpu); #ifndef CONFIG_USER_ONLY if (qdev_get_vmsd(DEVICE(cpu)) == NULL) { diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c index 342868904d..5aa350a1bf 100644 --- a/hw/block/xen_disk.c +++ b/hw/block/xen_disk.c @@ -119,6 +119,9 @@ struct XenBlkDev { unsigned int persistent_gnt_count; unsigned int max_grants; + /* Grant copy */ + gboolean feature_grant_copy; + /* qemu block driver */ DriveInfo *dinfo; BlockBackend *blk; @@ -489,6 +492,106 @@ static int ioreq_map(struct ioreq *ioreq) return 0; } +#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 480 + +static void ioreq_free_copy_buffers(struct ioreq *ioreq) +{ + int i; + + for (i = 0; i < ioreq->v.niov; i++) { + ioreq->page[i] = NULL; + } + + qemu_vfree(ioreq->pages); +} + +static int ioreq_init_copy_buffers(struct ioreq *ioreq) +{ + int i; + + if (ioreq->v.niov == 0) { + return 0; + } + + ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE); + + for (i = 0; i < ioreq->v.niov; i++) { + ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE; + ioreq->v.iov[i].iov_base = ioreq->page[i]; + } + + return 0; +} + +static int ioreq_grant_copy(struct ioreq *ioreq) +{ + xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev; + xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + int i, count, rc; + int64_t file_blk = ioreq->blkdev->file_blk; + + if (ioreq->v.niov == 0) { + return 0; + } + + count = ioreq->v.niov; + + for (i = 0; i < count; i++) { + if (ioreq->req.operation == BLKIF_OP_READ) { + segs[i].flags = GNTCOPY_dest_gref; + segs[i].dest.foreign.ref = ioreq->refs[i]; + segs[i].dest.foreign.domid = ioreq->domids[i]; + segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * file_blk; + segs[i].source.virt = ioreq->v.iov[i].iov_base; + } else { + segs[i].flags = GNTCOPY_source_gref; + segs[i].source.foreign.ref = ioreq->refs[i]; + segs[i].source.foreign.domid = ioreq->domids[i]; + segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * file_blk; + segs[i].dest.virt = ioreq->v.iov[i].iov_base; + } + segs[i].len = (ioreq->req.seg[i].last_sect + - ioreq->req.seg[i].first_sect + 1) * file_blk; + } + + rc = xengnttab_grant_copy(gnt, count, segs); + + if (rc) { + xen_be_printf(&ioreq->blkdev->xendev, 0, + "failed to copy data %d\n", rc); + ioreq->aio_errors++; + return -1; + } + + for (i = 0; i < count; i++) { + if (segs[i].status != GNTST_okay) { + xen_be_printf(&ioreq->blkdev->xendev, 3, + "failed to copy data %d for gref %d, domid %d\n", + segs[i].status, ioreq->refs[i], ioreq->domids[i]); + ioreq->aio_errors++; + rc = -1; + } + } + + return rc; +} +#else +static void ioreq_free_copy_buffers(struct ioreq *ioreq) +{ + abort(); +} + +static int ioreq_init_copy_buffers(struct ioreq *ioreq) +{ + abort(); +} + +static int ioreq_grant_copy(struct ioreq *ioreq) +{ + abort(); +} +#endif + static int ioreq_runio_qemu_aio(struct ioreq *ioreq); static void qemu_aio_complete(void *opaque, int ret) @@ -511,8 +614,31 @@ static void qemu_aio_complete(void *opaque, int ret) return; } + if (ioreq->blkdev->feature_grant_copy) { + switch (ioreq->req.operation) { + case BLKIF_OP_READ: + /* in case of failure ioreq->aio_errors is increased */ + if (ret == 0) { + ioreq_grant_copy(ioreq); + } + ioreq_free_copy_buffers(ioreq); + break; + case BLKIF_OP_WRITE: + case BLKIF_OP_FLUSH_DISKCACHE: + if (!ioreq->req.nr_segments) { + break; + } + ioreq_free_copy_buffers(ioreq); + break; + default: + break; + } + } + ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; - ioreq_unmap(ioreq); + if (!ioreq->blkdev->feature_grant_copy) { + ioreq_unmap(ioreq); + } ioreq_finish(ioreq); switch (ioreq->req.operation) { case BLKIF_OP_WRITE: @@ -538,8 +664,18 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) { struct XenBlkDev *blkdev = ioreq->blkdev; - if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) { - goto err_no_map; + if (ioreq->blkdev->feature_grant_copy) { + ioreq_init_copy_buffers(ioreq); + if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE || + ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) && + ioreq_grant_copy(ioreq)) { + ioreq_free_copy_buffers(ioreq); + goto err; + } + } else { + if (ioreq->req.nr_segments && ioreq_map(ioreq)) { + goto err; + } } ioreq->aio_inflight++; @@ -582,6 +718,9 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) } default: /* unknown operation (shouldn't happen -- parse catches this) */ + if (!ioreq->blkdev->feature_grant_copy) { + ioreq_unmap(ioreq); + } goto err; } @@ -590,8 +729,6 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq) return 0; err: - ioreq_unmap(ioreq); -err_no_map: ioreq_finish(ioreq); ioreq->status = BLKIF_RSP_ERROR; return -1; @@ -1034,6 +1171,12 @@ static int blk_connect(struct XenDevice *xendev) xen_be_bind_evtchn(&blkdev->xendev); + blkdev->feature_grant_copy = + (xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0); + + xen_be_printf(&blkdev->xendev, 3, "grant copy operation %s\n", + blkdev->feature_grant_copy ? "enabled" : "disabled"); + xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " "remote port %d, local port %d\n", blkdev->xendev.protocol, blkdev->ring_ref, diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index a91a1798cb..023de526f6 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -21,6 +21,7 @@ */ #include "qemu/osdep.h" #include "hw/i386/amd_iommu.h" +#include "qemu/error-report.h" #include "trace.h" /* used AMD-Vi MMIO registers */ @@ -1066,13 +1067,18 @@ static const MemoryRegionOps mmio_mem_ops = { } }; -static void amdvi_iommu_notify_started(MemoryRegion *iommu) +static void amdvi_iommu_notify_flag_changed(MemoryRegion *iommu, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new) { AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu); - hw_error("device %02x.%02x.%x requires iommu notifier which is not " - "currently supported", as->bus_num, PCI_SLOT(as->devfn), - PCI_FUNC(as->devfn)); + if (new & IOMMU_NOTIFIER_MAP) { + error_report("device %02x.%02x.%x requires iommu notifier which is not " + "currently supported", as->bus_num, PCI_SLOT(as->devfn), + PCI_FUNC(as->devfn)); + exit(1); + } } static void amdvi_init(AMDVIState *s) @@ -1080,7 +1086,7 @@ static void amdvi_init(AMDVIState *s) amdvi_iotlb_reset(s); s->iommu_ops.translate = amdvi_translate; - s->iommu_ops.notify_started = amdvi_iommu_notify_started; + s->iommu_ops.notify_flag_changed = amdvi_iommu_notify_flag_changed; s->devtab_len = 0; s->cmdbuf_len = 0; s->cmdbuf_head = 0; diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index d6e02c821a..9f4e64af1a 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -1974,14 +1974,20 @@ static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr, return ret; } -static void vtd_iommu_notify_started(MemoryRegion *iommu) +static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new) { VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); - hw_error("Device at bus %s addr %02x.%d requires iommu notifier which " - "is currently not supported by intel-iommu emulation", - vtd_as->bus->qbus.name, PCI_SLOT(vtd_as->devfn), - PCI_FUNC(vtd_as->devfn)); + if (new & IOMMU_NOTIFIER_MAP) { + error_report("Device at bus %s addr %02x.%d requires iommu " + "notifier which is currently not supported by " + "intel-iommu emulation", + vtd_as->bus->qbus.name, PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + exit(1); + } } static const VMStateDescription vtd_vmstate = { @@ -2348,7 +2354,7 @@ static void vtd_init(IntelIOMMUState *s) memset(s->womask, 0, DMAR_REG_SIZE); s->iommu_ops.translate = vtd_iommu_translate; - s->iommu_ops.notify_started = vtd_iommu_notify_started; + s->iommu_ops.notify_flag_changed = vtd_iommu_notify_flag_changed; s->root = 0; s->root_extended = false; s->dmar_enabled = false; diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c index f57fed1cb0..c016e63fc2 100644 --- a/hw/i386/kvm/apic.c +++ b/hw/i386/kvm/apic.c @@ -125,7 +125,7 @@ static void kvm_apic_vapic_base_update(APICCommonState *s) } } -static void kvm_apic_put(void *data) +static void kvm_apic_put(CPUState *cs, void *data) { APICCommonState *s = data; struct kvm_lapic_state kapic; @@ -146,10 +146,9 @@ static void kvm_apic_post_load(APICCommonState *s) run_on_cpu(CPU(s->cpu), kvm_apic_put, s); } -static void do_inject_external_nmi(void *data) +static void do_inject_external_nmi(CPUState *cpu, void *data) { APICCommonState *s = data; - CPUState *cpu = CPU(s->cpu); uint32_t lvt; int ret; diff --git a/hw/i386/kvmvapic.c b/hw/i386/kvmvapic.c index a1cd9b5a29..74a549becf 100644 --- a/hw/i386/kvmvapic.c +++ b/hw/i386/kvmvapic.c @@ -483,7 +483,7 @@ typedef struct VAPICEnableTPRReporting { bool enable; } VAPICEnableTPRReporting; -static void vapic_do_enable_tpr_reporting(void *data) +static void vapic_do_enable_tpr_reporting(CPUState *cpu, void *data) { VAPICEnableTPRReporting *info = data; @@ -734,10 +734,10 @@ static void vapic_realize(DeviceState *dev, Error **errp) nb_option_roms++; } -static void do_vapic_enable(void *data) +static void do_vapic_enable(CPUState *cs, void *data) { VAPICROMState *s = data; - X86CPU *cpu = X86_CPU(first_cpu); + X86CPU *cpu = X86_CPU(cs); static const uint8_t enabled = 1; cpu_physical_memory_write(s->vapic_paddr + offsetof(VAPICState, enabled), diff --git a/hw/input/ps2.c b/hw/input/ps2.c index a8aa36f5c0..0d14de08a6 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -22,6 +22,7 @@ * THE SOFTWARE. */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "hw/hw.h" #include "hw/input/ps2.h" #include "ui/console.h" @@ -94,12 +95,10 @@ typedef struct { typedef struct { PS2State common; int scan_enabled; - /* QEMU uses translated PC scancodes internally. To avoid multiple - conversions we do the translation (if any) in the PS/2 emulation - not the keyboard controller. */ int translate; int scancode_set; /* 1=XT, 2=AT, 3=PS/2 */ int ledstate; + bool need_high_bit; } PS2KbdState; typedef struct { @@ -116,26 +115,430 @@ typedef struct { uint8_t mouse_buttons; } PS2MouseState; -/* Table to convert from PC scancodes to raw scancodes. */ -static const unsigned char ps2_raw_keycode[128] = { - 0, 118, 22, 30, 38, 37, 46, 54, 61, 62, 70, 69, 78, 85, 102, 13, - 21, 29, 36, 45, 44, 53, 60, 67, 68, 77, 84, 91, 90, 20, 28, 27, - 35, 43, 52, 51, 59, 66, 75, 76, 82, 14, 18, 93, 26, 34, 33, 42, - 50, 49, 58, 65, 73, 74, 89, 124, 17, 41, 88, 5, 6, 4, 12, 3, - 11, 2, 10, 1, 9, 119, 126, 108, 117, 125, 123, 107, 115, 116, 121, 105, -114, 122, 112, 113, 127, 96, 97, 120, 7, 15, 23, 31, 39, 47, 55, 63, - 71, 79, 86, 94, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 87, 111, - 19, 25, 57, 81, 83, 92, 95, 98, 99, 100, 101, 103, 104, 106, 109, 110 +/* Table to convert from QEMU codes to scancodes. */ +static const uint16_t qcode_to_keycode_set1[Q_KEY_CODE__MAX] = { + [0 ... Q_KEY_CODE__MAX - 1] = 0, + + [Q_KEY_CODE_A] = 0x1e, + [Q_KEY_CODE_B] = 0x30, + [Q_KEY_CODE_C] = 0x2e, + [Q_KEY_CODE_D] = 0x20, + [Q_KEY_CODE_E] = 0x12, + [Q_KEY_CODE_F] = 0x21, + [Q_KEY_CODE_G] = 0x22, + [Q_KEY_CODE_H] = 0x23, + [Q_KEY_CODE_I] = 0x17, + [Q_KEY_CODE_J] = 0x24, + [Q_KEY_CODE_K] = 0x25, + [Q_KEY_CODE_L] = 0x26, + [Q_KEY_CODE_M] = 0x32, + [Q_KEY_CODE_N] = 0x31, + [Q_KEY_CODE_O] = 0x18, + [Q_KEY_CODE_P] = 0x19, + [Q_KEY_CODE_Q] = 0x10, + [Q_KEY_CODE_R] = 0x13, + [Q_KEY_CODE_S] = 0x1f, + [Q_KEY_CODE_T] = 0x14, + [Q_KEY_CODE_U] = 0x16, + [Q_KEY_CODE_V] = 0x2f, + [Q_KEY_CODE_W] = 0x11, + [Q_KEY_CODE_X] = 0x2d, + [Q_KEY_CODE_Y] = 0x15, + [Q_KEY_CODE_Z] = 0x2c, + [Q_KEY_CODE_0] = 0x0b, + [Q_KEY_CODE_1] = 0x02, + [Q_KEY_CODE_2] = 0x03, + [Q_KEY_CODE_3] = 0x04, + [Q_KEY_CODE_4] = 0x05, + [Q_KEY_CODE_5] = 0x06, + [Q_KEY_CODE_6] = 0x07, + [Q_KEY_CODE_7] = 0x08, + [Q_KEY_CODE_8] = 0x09, + [Q_KEY_CODE_9] = 0x0a, + [Q_KEY_CODE_GRAVE_ACCENT] = 0x29, + [Q_KEY_CODE_MINUS] = 0x0c, + [Q_KEY_CODE_EQUAL] = 0x0d, + [Q_KEY_CODE_BACKSLASH] = 0x2b, + [Q_KEY_CODE_BACKSPACE] = 0x0e, + [Q_KEY_CODE_SPC] = 0x39, + [Q_KEY_CODE_TAB] = 0x0f, + [Q_KEY_CODE_CAPS_LOCK] = 0x3a, + [Q_KEY_CODE_SHIFT] = 0x2a, + [Q_KEY_CODE_CTRL] = 0x1d, + [Q_KEY_CODE_META_L] = 0xe05b, + [Q_KEY_CODE_ALT] = 0x38, + [Q_KEY_CODE_SHIFT_R] = 0x36, + [Q_KEY_CODE_CTRL_R] = 0xe01d, + [Q_KEY_CODE_META_R] = 0xe05c, + [Q_KEY_CODE_ALT_R] = 0xe038, + [Q_KEY_CODE_MENU] = 0xe05d, + [Q_KEY_CODE_RET] = 0x1c, + [Q_KEY_CODE_ESC] = 0x01, + [Q_KEY_CODE_F1] = 0x3b, + [Q_KEY_CODE_F2] = 0x3c, + [Q_KEY_CODE_F3] = 0x3d, + [Q_KEY_CODE_F4] = 0x3e, + [Q_KEY_CODE_F5] = 0x3f, + [Q_KEY_CODE_F6] = 0x40, + [Q_KEY_CODE_F7] = 0x41, + [Q_KEY_CODE_F8] = 0x42, + [Q_KEY_CODE_F9] = 0x43, + [Q_KEY_CODE_F10] = 0x44, + [Q_KEY_CODE_F11] = 0x57, + [Q_KEY_CODE_F12] = 0x58, + /* special handling for Q_KEY_CODE_PRINT */ + [Q_KEY_CODE_SCROLL_LOCK] = 0x46, + /* special handling for Q_KEY_CODE_PAUSE */ + [Q_KEY_CODE_BRACKET_LEFT] = 0x1a, + [Q_KEY_CODE_INSERT] = 0xe052, + [Q_KEY_CODE_HOME] = 0xe047, + [Q_KEY_CODE_PGUP] = 0xe049, + [Q_KEY_CODE_DELETE] = 0xe053, + [Q_KEY_CODE_END] = 0xe04f, + [Q_KEY_CODE_PGDN] = 0xe051, + [Q_KEY_CODE_UP] = 0xe048, + [Q_KEY_CODE_LEFT] = 0xe04b, + [Q_KEY_CODE_DOWN] = 0xe050, + [Q_KEY_CODE_RIGHT] = 0xe04d, + [Q_KEY_CODE_NUM_LOCK] = 0x45, + [Q_KEY_CODE_KP_DIVIDE] = 0xe035, + [Q_KEY_CODE_KP_MULTIPLY] = 0x37, + [Q_KEY_CODE_KP_SUBTRACT] = 0x4a, + [Q_KEY_CODE_KP_ADD] = 0x4e, + [Q_KEY_CODE_KP_ENTER] = 0xe01c, + [Q_KEY_CODE_KP_DECIMAL] = 0x53, + [Q_KEY_CODE_KP_0] = 0x52, + [Q_KEY_CODE_KP_1] = 0x4f, + [Q_KEY_CODE_KP_2] = 0x50, + [Q_KEY_CODE_KP_3] = 0x51, + [Q_KEY_CODE_KP_4] = 0x4b, + [Q_KEY_CODE_KP_5] = 0x4c, + [Q_KEY_CODE_KP_6] = 0x4d, + [Q_KEY_CODE_KP_7] = 0x47, + [Q_KEY_CODE_KP_8] = 0x48, + [Q_KEY_CODE_KP_9] = 0x49, + [Q_KEY_CODE_BRACKET_RIGHT] = 0x1b, + [Q_KEY_CODE_SEMICOLON] = 0x27, + [Q_KEY_CODE_APOSTROPHE] = 0x28, + [Q_KEY_CODE_COMMA] = 0x33, + [Q_KEY_CODE_DOT] = 0x34, + [Q_KEY_CODE_SLASH] = 0x35, + +#if 0 + [Q_KEY_CODE_POWER] = 0x0e5e, + [Q_KEY_CODE_SLEEP] = 0x0e5f, + [Q_KEY_CODE_WAKE] = 0x0e63, + + [Q_KEY_CODE_AUDIONEXT] = 0xe019, + [Q_KEY_CODE_AUDIOPREV] = 0xe010, + [Q_KEY_CODE_AUDIOSTOP] = 0xe024, + [Q_KEY_CODE_AUDIOPLAY] = 0xe022, + [Q_KEY_CODE_AUDIOMUTE] = 0xe020, + [Q_KEY_CODE_VOLUMEUP] = 0xe030, + [Q_KEY_CODE_VOLUMEDOWN] = 0xe02e, + [Q_KEY_CODE_MEDIASELECT] = 0xe06d, + [Q_KEY_CODE_MAIL] = 0xe06c, + [Q_KEY_CODE_CALCULATOR] = 0xe021, + [Q_KEY_CODE_COMPUTER] = 0xe06b, + [Q_KEY_CODE_AC_SEARCH] = 0xe065, + [Q_KEY_CODE_AC_HOME] = 0xe032, + [Q_KEY_CODE_AC_BACK] = 0xe06a, + [Q_KEY_CODE_AC_FORWARD] = 0xe069, + [Q_KEY_CODE_AC_STOP] = 0xe068, + [Q_KEY_CODE_AC_REFRESH] = 0xe067, + [Q_KEY_CODE_AC_BOOKMARKS] = 0xe066, +#endif + + [Q_KEY_CODE_ASTERISK] = 0x37, + [Q_KEY_CODE_LESS] = 0x56, + [Q_KEY_CODE_RO] = 0x73, + [Q_KEY_CODE_KP_COMMA] = 0x7e, }; -static const unsigned char ps2_raw_keycode_set3[128] = { - 0, 8, 22, 30, 38, 37, 46, 54, 61, 62, 70, 69, 78, 85, 102, 13, - 21, 29, 36, 45, 44, 53, 60, 67, 68, 77, 84, 91, 90, 17, 28, 27, - 35, 43, 52, 51, 59, 66, 75, 76, 82, 14, 18, 92, 26, 34, 33, 42, - 50, 49, 58, 65, 73, 74, 89, 126, 25, 41, 20, 7, 15, 23, 31, 39, - 47, 2, 63, 71, 79, 118, 95, 108, 117, 125, 132, 107, 115, 116, 124, 105, -114, 122, 112, 113, 127, 96, 97, 86, 94, 15, 23, 31, 39, 47, 55, 63, - 71, 79, 86, 94, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 87, 111, - 19, 25, 57, 81, 83, 92, 95, 98, 99, 100, 101, 103, 104, 106, 109, 110 + +static const uint16_t qcode_to_keycode_set2[Q_KEY_CODE__MAX] = { + [0 ... Q_KEY_CODE__MAX - 1] = 0, + + [Q_KEY_CODE_A] = 0x1c, + [Q_KEY_CODE_B] = 0x32, + [Q_KEY_CODE_C] = 0x21, + [Q_KEY_CODE_D] = 0x23, + [Q_KEY_CODE_E] = 0x24, + [Q_KEY_CODE_F] = 0x2b, + [Q_KEY_CODE_G] = 0x34, + [Q_KEY_CODE_H] = 0x33, + [Q_KEY_CODE_I] = 0x43, + [Q_KEY_CODE_J] = 0x3b, + [Q_KEY_CODE_K] = 0x42, + [Q_KEY_CODE_L] = 0x4b, + [Q_KEY_CODE_M] = 0x3a, + [Q_KEY_CODE_N] = 0x31, + [Q_KEY_CODE_O] = 0x44, + [Q_KEY_CODE_P] = 0x4d, + [Q_KEY_CODE_Q] = 0x15, + [Q_KEY_CODE_R] = 0x2d, + [Q_KEY_CODE_S] = 0x1b, + [Q_KEY_CODE_T] = 0x2c, + [Q_KEY_CODE_U] = 0x3c, + [Q_KEY_CODE_V] = 0x2a, + [Q_KEY_CODE_W] = 0x1d, + [Q_KEY_CODE_X] = 0x22, + [Q_KEY_CODE_Y] = 0x35, + [Q_KEY_CODE_Z] = 0x1a, + [Q_KEY_CODE_0] = 0x45, + [Q_KEY_CODE_1] = 0x16, + [Q_KEY_CODE_2] = 0x1e, + [Q_KEY_CODE_3] = 0x26, + [Q_KEY_CODE_4] = 0x25, + [Q_KEY_CODE_5] = 0x2e, + [Q_KEY_CODE_6] = 0x36, + [Q_KEY_CODE_7] = 0x3d, + [Q_KEY_CODE_8] = 0x3e, + [Q_KEY_CODE_9] = 0x46, + [Q_KEY_CODE_GRAVE_ACCENT] = 0x0e, + [Q_KEY_CODE_MINUS] = 0x4e, + [Q_KEY_CODE_EQUAL] = 0x55, + [Q_KEY_CODE_BACKSLASH] = 0x5d, + [Q_KEY_CODE_BACKSPACE] = 0x66, + [Q_KEY_CODE_SPC] = 0x29, + [Q_KEY_CODE_TAB] = 0x0d, + [Q_KEY_CODE_CAPS_LOCK] = 0x58, + [Q_KEY_CODE_SHIFT] = 0x12, + [Q_KEY_CODE_CTRL] = 0x14, + [Q_KEY_CODE_META_L] = 0xe01f, + [Q_KEY_CODE_ALT] = 0x11, + [Q_KEY_CODE_SHIFT_R] = 0x59, + [Q_KEY_CODE_CTRL_R] = 0xe014, + [Q_KEY_CODE_META_R] = 0xe027, + [Q_KEY_CODE_ALT_R] = 0xe011, + [Q_KEY_CODE_MENU] = 0xe02f, + [Q_KEY_CODE_RET] = 0x5a, + [Q_KEY_CODE_ESC] = 0x76, + [Q_KEY_CODE_F1] = 0x05, + [Q_KEY_CODE_F2] = 0x06, + [Q_KEY_CODE_F3] = 0x04, + [Q_KEY_CODE_F4] = 0x0c, + [Q_KEY_CODE_F5] = 0x03, + [Q_KEY_CODE_F6] = 0x0b, + [Q_KEY_CODE_F7] = 0x83, + [Q_KEY_CODE_F8] = 0x0a, + [Q_KEY_CODE_F9] = 0x01, + [Q_KEY_CODE_F10] = 0x09, + [Q_KEY_CODE_F11] = 0x78, + [Q_KEY_CODE_F12] = 0x07, + /* special handling for Q_KEY_CODE_PRINT */ + [Q_KEY_CODE_SCROLL_LOCK] = 0x7e, + /* special handling for Q_KEY_CODE_PAUSE */ + [Q_KEY_CODE_BRACKET_LEFT] = 0x54, + [Q_KEY_CODE_INSERT] = 0xe070, + [Q_KEY_CODE_HOME] = 0xe06c, + [Q_KEY_CODE_PGUP] = 0xe07d, + [Q_KEY_CODE_DELETE] = 0xe071, + [Q_KEY_CODE_END] = 0xe069, + [Q_KEY_CODE_PGDN] = 0xe07a, + [Q_KEY_CODE_UP] = 0xe075, + [Q_KEY_CODE_LEFT] = 0xe06b, + [Q_KEY_CODE_DOWN] = 0xe072, + [Q_KEY_CODE_RIGHT] = 0xe074, + [Q_KEY_CODE_NUM_LOCK] = 0x77, + [Q_KEY_CODE_KP_DIVIDE] = 0xe04a, + [Q_KEY_CODE_KP_MULTIPLY] = 0x7c, + [Q_KEY_CODE_KP_SUBTRACT] = 0x7b, + [Q_KEY_CODE_KP_ADD] = 0x79, + [Q_KEY_CODE_KP_ENTER] = 0xe05a, + [Q_KEY_CODE_KP_DECIMAL] = 0x71, + [Q_KEY_CODE_KP_0] = 0x70, + [Q_KEY_CODE_KP_1] = 0x69, + [Q_KEY_CODE_KP_2] = 0x72, + [Q_KEY_CODE_KP_3] = 0x7a, + [Q_KEY_CODE_KP_4] = 0x6b, + [Q_KEY_CODE_KP_5] = 0x73, + [Q_KEY_CODE_KP_6] = 0x74, + [Q_KEY_CODE_KP_7] = 0x6c, + [Q_KEY_CODE_KP_8] = 0x75, + [Q_KEY_CODE_KP_9] = 0x7d, + [Q_KEY_CODE_BRACKET_RIGHT] = 0x5b, + [Q_KEY_CODE_SEMICOLON] = 0x4c, + [Q_KEY_CODE_APOSTROPHE] = 0x52, + [Q_KEY_CODE_COMMA] = 0x41, + [Q_KEY_CODE_DOT] = 0x49, + [Q_KEY_CODE_SLASH] = 0x4a, + +#if 0 + [Q_KEY_CODE_POWER] = 0x0e37, + [Q_KEY_CODE_SLEEP] = 0x0e3f, + [Q_KEY_CODE_WAKE] = 0x0e5e, + + [Q_KEY_CODE_AUDIONEXT] = 0xe04d, + [Q_KEY_CODE_AUDIOPREV] = 0xe015, + [Q_KEY_CODE_AUDIOSTOP] = 0xe03b, + [Q_KEY_CODE_AUDIOPLAY] = 0xe034, + [Q_KEY_CODE_AUDIOMUTE] = 0xe023, + [Q_KEY_CODE_VOLUMEUP] = 0xe032, + [Q_KEY_CODE_VOLUMEDOWN] = 0xe021, + [Q_KEY_CODE_MEDIASELECT] = 0xe050, + [Q_KEY_CODE_MAIL] = 0xe048, + [Q_KEY_CODE_CALCULATOR] = 0xe02b, + [Q_KEY_CODE_COMPUTER] = 0xe040, + [Q_KEY_CODE_AC_SEARCH] = 0xe010, + [Q_KEY_CODE_AC_HOME] = 0xe03a, + [Q_KEY_CODE_AC_BACK] = 0xe038, + [Q_KEY_CODE_AC_FORWARD] = 0xe030, + [Q_KEY_CODE_AC_STOP] = 0xe028, + [Q_KEY_CODE_AC_REFRESH] = 0xe020, + [Q_KEY_CODE_AC_BOOKMARKS] = 0xe018, +#endif + + [Q_KEY_CODE_ALTGR] = 0x08, + [Q_KEY_CODE_ALTGR_R] = 0xe008, + [Q_KEY_CODE_ASTERISK] = 0x7c, + [Q_KEY_CODE_LESS] = 0x61, + [Q_KEY_CODE_SYSRQ] = 0x7f, + [Q_KEY_CODE_RO] = 0x51, + [Q_KEY_CODE_KP_COMMA] = 0x6d, +}; + +static const uint16_t qcode_to_keycode_set3[Q_KEY_CODE__MAX] = { + [0 ... Q_KEY_CODE__MAX - 1] = 0, + + [Q_KEY_CODE_A] = 0x1c, + [Q_KEY_CODE_B] = 0x32, + [Q_KEY_CODE_C] = 0x21, + [Q_KEY_CODE_D] = 0x23, + [Q_KEY_CODE_E] = 0x24, + [Q_KEY_CODE_F] = 0x2b, + [Q_KEY_CODE_G] = 0x34, + [Q_KEY_CODE_H] = 0x33, + [Q_KEY_CODE_I] = 0x43, + [Q_KEY_CODE_J] = 0x3b, + [Q_KEY_CODE_K] = 0x42, + [Q_KEY_CODE_L] = 0x4b, + [Q_KEY_CODE_M] = 0x3a, + [Q_KEY_CODE_N] = 0x31, + [Q_KEY_CODE_O] = 0x44, + [Q_KEY_CODE_P] = 0x4d, + [Q_KEY_CODE_Q] = 0x15, + [Q_KEY_CODE_R] = 0x2d, + [Q_KEY_CODE_S] = 0x1b, + [Q_KEY_CODE_T] = 0x2c, + [Q_KEY_CODE_U] = 0x3c, + [Q_KEY_CODE_V] = 0x2a, + [Q_KEY_CODE_W] = 0x1d, + [Q_KEY_CODE_X] = 0x22, + [Q_KEY_CODE_Y] = 0x35, + [Q_KEY_CODE_Z] = 0x1a, + [Q_KEY_CODE_0] = 0x45, + [Q_KEY_CODE_1] = 0x16, + [Q_KEY_CODE_2] = 0x1e, + [Q_KEY_CODE_3] = 0x26, + [Q_KEY_CODE_4] = 0x25, + [Q_KEY_CODE_5] = 0x2e, + [Q_KEY_CODE_6] = 0x36, + [Q_KEY_CODE_7] = 0x3d, + [Q_KEY_CODE_8] = 0x3e, + [Q_KEY_CODE_9] = 0x46, + [Q_KEY_CODE_GRAVE_ACCENT] = 0x0e, + [Q_KEY_CODE_MINUS] = 0x4e, + [Q_KEY_CODE_EQUAL] = 0x55, + [Q_KEY_CODE_BACKSLASH] = 0x5c, + [Q_KEY_CODE_BACKSPACE] = 0x66, + [Q_KEY_CODE_SPC] = 0x29, + [Q_KEY_CODE_TAB] = 0x0d, + [Q_KEY_CODE_CAPS_LOCK] = 0x14, + [Q_KEY_CODE_SHIFT] = 0x12, + [Q_KEY_CODE_CTRL] = 0x11, + [Q_KEY_CODE_META_L] = 0x8b, + [Q_KEY_CODE_ALT] = 0x19, + [Q_KEY_CODE_SHIFT_R] = 0x59, + [Q_KEY_CODE_CTRL_R] = 0x58, + [Q_KEY_CODE_META_R] = 0x8c, + [Q_KEY_CODE_ALT_R] = 0x39, + [Q_KEY_CODE_MENU] = 0x8d, + [Q_KEY_CODE_RET] = 0x5a, + [Q_KEY_CODE_ESC] = 0x08, + [Q_KEY_CODE_F1] = 0x07, + [Q_KEY_CODE_F2] = 0x0f, + [Q_KEY_CODE_F3] = 0x17, + [Q_KEY_CODE_F4] = 0x1f, + [Q_KEY_CODE_F5] = 0x27, + [Q_KEY_CODE_F6] = 0x2f, + [Q_KEY_CODE_F7] = 0x37, + [Q_KEY_CODE_F8] = 0x3f, + [Q_KEY_CODE_F9] = 0x47, + [Q_KEY_CODE_F10] = 0x4f, + [Q_KEY_CODE_F11] = 0x56, + [Q_KEY_CODE_F12] = 0x5e, + [Q_KEY_CODE_PRINT] = 0x57, + [Q_KEY_CODE_SCROLL_LOCK] = 0x5f, + [Q_KEY_CODE_PAUSE] = 0x62, + [Q_KEY_CODE_BRACKET_LEFT] = 0x54, + [Q_KEY_CODE_INSERT] = 0x67, + [Q_KEY_CODE_HOME] = 0x6e, + [Q_KEY_CODE_PGUP] = 0x6f, + [Q_KEY_CODE_DELETE] = 0x64, + [Q_KEY_CODE_END] = 0x65, + [Q_KEY_CODE_PGDN] = 0x6d, + [Q_KEY_CODE_UP] = 0x63, + [Q_KEY_CODE_LEFT] = 0x61, + [Q_KEY_CODE_DOWN] = 0x60, + [Q_KEY_CODE_RIGHT] = 0x6a, + [Q_KEY_CODE_NUM_LOCK] = 0x76, + [Q_KEY_CODE_KP_DIVIDE] = 0x4a, + [Q_KEY_CODE_KP_MULTIPLY] = 0x7e, + [Q_KEY_CODE_KP_SUBTRACT] = 0x4e, + [Q_KEY_CODE_KP_ADD] = 0x7c, + [Q_KEY_CODE_KP_ENTER] = 0x79, + [Q_KEY_CODE_KP_DECIMAL] = 0x71, + [Q_KEY_CODE_KP_0] = 0x70, + [Q_KEY_CODE_KP_1] = 0x69, + [Q_KEY_CODE_KP_2] = 0x72, + [Q_KEY_CODE_KP_3] = 0x7a, + [Q_KEY_CODE_KP_4] = 0x6b, + [Q_KEY_CODE_KP_5] = 0x73, + [Q_KEY_CODE_KP_6] = 0x74, + [Q_KEY_CODE_KP_7] = 0x6c, + [Q_KEY_CODE_KP_8] = 0x75, + [Q_KEY_CODE_KP_9] = 0x7d, + [Q_KEY_CODE_BRACKET_RIGHT] = 0x5b, + [Q_KEY_CODE_SEMICOLON] = 0x4c, + [Q_KEY_CODE_APOSTROPHE] = 0x52, + [Q_KEY_CODE_COMMA] = 0x41, + [Q_KEY_CODE_DOT] = 0x49, + [Q_KEY_CODE_SLASH] = 0x4a, +}; + +static uint8_t translate_table[256] = { + 0xff, 0x43, 0x41, 0x3f, 0x3d, 0x3b, 0x3c, 0x58, + 0x64, 0x44, 0x42, 0x40, 0x3e, 0x0f, 0x29, 0x59, + 0x65, 0x38, 0x2a, 0x70, 0x1d, 0x10, 0x02, 0x5a, + 0x66, 0x71, 0x2c, 0x1f, 0x1e, 0x11, 0x03, 0x5b, + 0x67, 0x2e, 0x2d, 0x20, 0x12, 0x05, 0x04, 0x5c, + 0x68, 0x39, 0x2f, 0x21, 0x14, 0x13, 0x06, 0x5d, + 0x69, 0x31, 0x30, 0x23, 0x22, 0x15, 0x07, 0x5e, + 0x6a, 0x72, 0x32, 0x24, 0x16, 0x08, 0x09, 0x5f, + 0x6b, 0x33, 0x25, 0x17, 0x18, 0x0b, 0x0a, 0x60, + 0x6c, 0x34, 0x35, 0x26, 0x27, 0x19, 0x0c, 0x61, + 0x6d, 0x73, 0x28, 0x74, 0x1a, 0x0d, 0x62, 0x6e, + 0x3a, 0x36, 0x1c, 0x1b, 0x75, 0x2b, 0x63, 0x76, + 0x55, 0x56, 0x77, 0x78, 0x79, 0x7a, 0x0e, 0x7b, + 0x7c, 0x4f, 0x7d, 0x4b, 0x47, 0x7e, 0x7f, 0x6f, + 0x52, 0x53, 0x50, 0x4c, 0x4d, 0x48, 0x01, 0x45, + 0x57, 0x4e, 0x51, 0x4a, 0x37, 0x49, 0x46, 0x54, + 0x80, 0x81, 0x82, 0x41, 0x54, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, }; void ps2_queue(void *opaque, int b) @@ -152,44 +555,130 @@ void ps2_queue(void *opaque, int b) s->update_irq(s->update_arg, 1); } -/* - keycode is expressed as follow: - bit 7 - 0 key pressed, 1 = key released - bits 6-0 - translated scancode set 2 - */ +/* keycode is the untranslated scancode in the current scancode set. */ static void ps2_put_keycode(void *opaque, int keycode) { PS2KbdState *s = opaque; trace_ps2_put_keycode(opaque, keycode); qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); - /* XXX: add support for scancode set 1 */ - if (!s->translate && keycode < 0xe0 && s->scancode_set > 1) { - if (keycode & 0x80) { - ps2_queue(&s->common, 0xf0); - } - if (s->scancode_set == 2) { - keycode = ps2_raw_keycode[keycode & 0x7f]; - } else if (s->scancode_set == 3) { - keycode = ps2_raw_keycode_set3[keycode & 0x7f]; + + if (s->translate) { + if (keycode == 0xf0) { + s->need_high_bit = true; + } else if (s->need_high_bit) { + ps2_queue(&s->common, translate_table[keycode] | 0x80); + s->need_high_bit = false; + } else { + ps2_queue(&s->common, translate_table[keycode]); } - } - ps2_queue(&s->common, keycode); + } else { + ps2_queue(&s->common, keycode); + } } static void ps2_keyboard_event(DeviceState *dev, QemuConsole *src, InputEvent *evt) { PS2KbdState *s = (PS2KbdState *)dev; - int scancodes[3], i, count; InputKeyEvent *key = evt->u.key.data; + int qcode; + uint16_t keycode; qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); - count = qemu_input_key_value_to_scancode(key->key, - key->down, - scancodes); - for (i = 0; i < count; i++) { - ps2_put_keycode(s, scancodes[i]); + assert(evt->type == INPUT_EVENT_KIND_KEY); + qcode = qemu_input_key_value_to_qcode(key->key); + + if (s->scancode_set == 1) { + if (qcode == Q_KEY_CODE_PAUSE) { + if (key->down) { + ps2_put_keycode(s, 0xe1); + ps2_put_keycode(s, 0x1d); + ps2_put_keycode(s, 0x45); + ps2_put_keycode(s, 0x91); + ps2_put_keycode(s, 0x9d); + ps2_put_keycode(s, 0xc5); + } + } else if (qcode == Q_KEY_CODE_PRINT) { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x2a); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x37); + } else { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xb7); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xaa); + } + } else { + keycode = qcode_to_keycode_set1[qcode]; + if (keycode) { + if (keycode & 0xff00) { + ps2_put_keycode(s, keycode >> 8); + } + if (!key->down) { + keycode |= 0x80; + } + ps2_put_keycode(s, keycode & 0xff); + } else { + qemu_log_mask(LOG_UNIMP, + "ps2: ignoring key with qcode %d\n", qcode); + } + } + } else if (s->scancode_set == 2) { + if (qcode == Q_KEY_CODE_PAUSE) { + if (key->down) { + ps2_put_keycode(s, 0xe1); + ps2_put_keycode(s, 0x14); + ps2_put_keycode(s, 0x77); + ps2_put_keycode(s, 0xe1); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x14); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x77); + } + } else if (qcode == Q_KEY_CODE_PRINT) { + if (key->down) { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x12); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0x7c); + } else { + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x7c); + ps2_put_keycode(s, 0xe0); + ps2_put_keycode(s, 0xf0); + ps2_put_keycode(s, 0x12); + } + } else { + keycode = qcode_to_keycode_set2[qcode]; + if (keycode) { + if (keycode & 0xff00) { + ps2_put_keycode(s, keycode >> 8); + } + if (!key->down) { + ps2_put_keycode(s, 0xf0); + } + ps2_put_keycode(s, keycode & 0xff); + } else { + qemu_log_mask(LOG_UNIMP, + "ps2: ignoring key with qcode %d\n", qcode); + } + } + } else if (s->scancode_set == 3) { + keycode = qcode_to_keycode_set3[qcode]; + if (keycode) { + /* FIXME: break code should be configured on a key by key basis */ + if (!key->down) { + ps2_put_keycode(s, 0xf0); + } + ps2_put_keycode(s, keycode); + } else { + qemu_log_mask(LOG_UNIMP, + "ps2: ignoring key with qcode %d\n", qcode); + } } } @@ -290,22 +779,19 @@ void ps2_write_keyboard(void *opaque, int val) ps2_queue(&s->common, KBD_REPLY_POR); break; default: - ps2_queue(&s->common, KBD_REPLY_ACK); + ps2_queue(&s->common, KBD_REPLY_RESEND); break; } break; case KBD_CMD_SCANCODE: if (val == 0) { - if (s->scancode_set == 1) - ps2_put_keycode(s, 0x43); - else if (s->scancode_set == 2) - ps2_put_keycode(s, 0x41); - else if (s->scancode_set == 3) - ps2_put_keycode(s, 0x3f); - } else { - if (val >= 1 && val <= 3) - s->scancode_set = val; ps2_queue(&s->common, KBD_REPLY_ACK); + ps2_put_keycode(s, s->scancode_set); + } else if (val >= 1 && val <= 3) { + s->scancode_set = val; + ps2_queue(&s->common, KBD_REPLY_ACK); + } else { + ps2_queue(&s->common, KBD_REPLY_RESEND); } s->common.write_cmd = -1; break; @@ -690,6 +1176,23 @@ static const VMStateDescription vmstate_ps2_keyboard_ledstate = { } }; +static bool ps2_keyboard_need_high_bit_needed(void *opaque) +{ + PS2KbdState *s = opaque; + return s->need_high_bit != 0; /* 0 is the usual state */ +} + +static const VMStateDescription vmstate_ps2_keyboard_need_high_bit = { + .name = "ps2kbd/need_high_bit", + .version_id = 1, + .minimum_version_id = 1, + .needed = ps2_keyboard_need_high_bit_needed, + .fields = (VMStateField[]) { + VMSTATE_BOOL(need_high_bit, PS2KbdState), + VMSTATE_END_OF_LIST() + } +}; + static int ps2_kbd_post_load(void* opaque, int version_id) { PS2KbdState *s = (PS2KbdState*)opaque; @@ -726,6 +1229,7 @@ static const VMStateDescription vmstate_ps2_keyboard = { }, .subsections = (const VMStateDescription*[]) { &vmstate_ps2_keyboard_ledstate, + &vmstate_ps2_keyboard_need_high_bit, NULL } }; diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c index 31791b0986..fd9208fde0 100644 --- a/hw/intc/ioapic.c +++ b/hw/intc/ioapic.c @@ -416,7 +416,7 @@ static void ioapic_realize(DeviceState *dev, Error **errp) } static Property ioapic_properties[] = { - DEFINE_PROP_UINT8("version", IOAPICCommonState, version, 0x11), + DEFINE_PROP_UINT8("version", IOAPICCommonState, version, 0x20), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c index 22c584eb8d..8e16f651ea 100644 --- a/hw/ppc/ppce500_spin.c +++ b/hw/ppc/ppce500_spin.c @@ -54,11 +54,6 @@ typedef struct SpinState { SpinInfo spin[MAX_CPUS]; } SpinState; -typedef struct spin_kick { - PowerPCCPU *cpu; - SpinInfo *spin; -} SpinKick; - static void spin_reset(void *opaque) { SpinState *s = opaque; @@ -89,16 +84,15 @@ static void mmubooke_create_initial_mapping(CPUPPCState *env, env->tlb_dirty = true; } -static void spin_kick(void *data) +static void spin_kick(CPUState *cs, void *data) { - SpinKick *kick = data; - CPUState *cpu = CPU(kick->cpu); - CPUPPCState *env = &kick->cpu->env; - SpinInfo *curspin = kick->spin; + PowerPCCPU *cpu = POWERPC_CPU(cs); + CPUPPCState *env = &cpu->env; + SpinInfo *curspin = data; hwaddr map_size = 64 * 1024 * 1024; hwaddr map_start; - cpu_synchronize_state(cpu); + cpu_synchronize_state(cs); stl_p(&curspin->pir, env->spr[SPR_BOOKE_PIR]); env->nip = ldq_p(&curspin->addr) & (map_size - 1); env->gpr[3] = ldq_p(&curspin->r3); @@ -112,10 +106,10 @@ static void spin_kick(void *data) map_start = ldq_p(&curspin->addr) & ~(map_size - 1); mmubooke_create_initial_mapping(env, 0, map_start, map_size); - cpu->halted = 0; - cpu->exception_index = -1; - cpu->stopped = false; - qemu_cpu_kick(cpu); + cs->halted = 0; + cs->exception_index = -1; + cs->stopped = false; + qemu_cpu_kick(cs); } static void spin_write(void *opaque, hwaddr addr, uint64_t value, @@ -153,12 +147,7 @@ static void spin_write(void *opaque, hwaddr addr, uint64_t value, if (!(ldq_p(&curspin->addr) & 1)) { /* run CPU */ - SpinKick kick = { - .cpu = POWERPC_CPU(cpu), - .spin = curspin, - }; - - run_on_cpu(cpu, spin_kick, &kick); + run_on_cpu(cpu, spin_kick, curspin); } } diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 648576e6bd..14b6821a94 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -2132,10 +2132,8 @@ static void spapr_machine_finalizefn(Object *obj) g_free(spapr->kvm_type); } -static void ppc_cpu_do_nmi_on_cpu(void *arg) +static void ppc_cpu_do_nmi_on_cpu(CPUState *cs, void *arg) { - CPUState *cs = arg; - cpu_synchronize_state(cs); ppc_cpu_do_system_reset(cs); } @@ -2145,7 +2143,7 @@ static void spapr_nmi(NMIState *n, int cpu_index, Error **errp) CPUState *cs; CPU_FOREACH(cs) { - async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, cs); + async_run_on_cpu(cs, ppc_cpu_do_nmi_on_cpu, NULL); } } diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 290a7122d4..c5e7e8c995 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -13,19 +13,18 @@ #include "kvm_ppc.h" struct SPRSyncState { - CPUState *cs; int spr; target_ulong value; target_ulong mask; }; -static void do_spr_sync(void *arg) +static void do_spr_sync(CPUState *cs, void *arg) { struct SPRSyncState *s = arg; - PowerPCCPU *cpu = POWERPC_CPU(s->cs); + PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; - cpu_synchronize_state(s->cs); + cpu_synchronize_state(cs); env->spr[s->spr] &= ~s->mask; env->spr[s->spr] |= s->value; } @@ -34,7 +33,6 @@ static void set_spr(CPUState *cs, int spr, target_ulong value, target_ulong mask) { struct SPRSyncState s = { - .cs = cs, .spr = spr, .value = value, .mask = mask @@ -909,17 +907,17 @@ static target_ulong cas_get_option_vector(int vector, target_ulong table) } typedef struct { - PowerPCCPU *cpu; uint32_t cpu_version; Error *err; } SetCompatState; -static void do_set_compat(void *arg) +static void do_set_compat(CPUState *cs, void *arg) { + PowerPCCPU *cpu = POWERPC_CPU(cs); SetCompatState *s = arg; - cpu_synchronize_state(CPU(s->cpu)); - ppc_set_compat(s->cpu, s->cpu_version, &s->err); + cpu_synchronize_state(cs); + ppc_set_compat(cpu, s->cpu_version, &s->err); } #define get_compat_level(cpuver) ( \ @@ -1015,7 +1013,6 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu_, if (old_cpu_version != cpu_version) { CPU_FOREACH(cs) { SetCompatState s = { - .cpu = POWERPC_CPU(cs), .cpu_version = cpu_version, .err = NULL, }; diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c index f20b0b884f..ae30bbe30f 100644 --- a/hw/ppc/spapr_iommu.c +++ b/hw/ppc/spapr_iommu.c @@ -156,14 +156,17 @@ static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu) return 1ULL << tcet->page_shift; } -static void spapr_tce_notify_started(MemoryRegion *iommu) +static void spapr_tce_notify_flag_changed(MemoryRegion *iommu, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new) { - spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), true); -} + struct sPAPRTCETable *tbl = container_of(iommu, sPAPRTCETable, iommu); -static void spapr_tce_notify_stopped(MemoryRegion *iommu) -{ - spapr_tce_set_need_vfio(container_of(iommu, sPAPRTCETable, iommu), false); + if (old == IOMMU_NOTIFIER_NONE && new != IOMMU_NOTIFIER_NONE) { + spapr_tce_set_need_vfio(tbl, true); + } else if (old != IOMMU_NOTIFIER_NONE && new == IOMMU_NOTIFIER_NONE) { + spapr_tce_set_need_vfio(tbl, false); + } } static int spapr_tce_table_post_load(void *opaque, int version_id) @@ -246,8 +249,7 @@ static const VMStateDescription vmstate_spapr_tce_table = { static MemoryRegionIOMMUOps spapr_iommu_ops = { .translate = spapr_tce_translate_iommu, .get_min_page_size = spapr_tce_get_min_page_size, - .notify_started = spapr_tce_notify_started, - .notify_stopped = spapr_tce_notify_stopped, + .notify_flag_changed = spapr_tce_notify_flag_changed, }; static int spapr_tce_table_realize(DeviceState *dev) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index b313e7c2c6..29188a12fc 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -293,11 +293,10 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) section->offset_within_address_space & (1ULL << 63); } -static void vfio_iommu_map_notify(Notifier *n, void *data) +static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) { VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); VFIOContainer *container = giommu->container; - IOMMUTLBEntry *iotlb = data; hwaddr iova = iotlb->iova + giommu->iommu_offset; MemoryRegion *mr; hwaddr xlat; @@ -454,6 +453,7 @@ static void vfio_listener_region_add(MemoryListener *listener, section->offset_within_region; giommu->container = container; giommu->n.notify = vfio_iommu_map_notify; + giommu->n.notifier_flags = IOMMU_NOTIFIER_ALL; QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); memory_region_register_iommu_notifier(giommu->iommu, &giommu->n); diff --git a/include/block/block_int.h b/include/block/block_int.h index ef3c047cb3..3e79228eb0 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -722,7 +722,7 @@ void commit_active_start(const char *job_id, BlockDriverState *bs, * @errp: Error object. * * Start a mirroring operation on @bs. Clusters that are allocated - * in @bs will be written to @bs until the job is cancelled or + * in @bs will be written to @target until the job is cancelled or * manually completed. At the end of a successful mirroring job, * @bs will be switched to read from @target. */ diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 952bcfeb4c..869ba41b0c 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -23,6 +23,11 @@ typedef struct CPUListState { FILE *file; } CPUListState; +/* The CPU list lock nests outside tb_lock/tb_unlock. */ +void qemu_init_cpu_list(void); +void cpu_list_lock(void); +void cpu_list_unlock(void); + #if !defined(CONFIG_USER_ONLY) enum device_endian { diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 008e09a3c1..336a57cde6 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -56,17 +56,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, uint32_t flags, int cflags); -#if defined(CONFIG_USER_ONLY) -void cpu_list_lock(void); -void cpu_list_unlock(void); -#else -static inline void cpu_list_unlock(void) -{ -} -static inline void cpu_list_lock(void) -{ -} -#endif void cpu_exec_init(CPUState *cpu, Error **errp); void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); diff --git a/include/exec/memory.h b/include/exec/memory.h index 3e4d4164cd..10d7eacc40 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -67,6 +67,27 @@ struct IOMMUTLBEntry { IOMMUAccessFlags perm; }; +/* + * Bitmap for different IOMMUNotifier capabilities. Each notifier can + * register with one or multiple IOMMU Notifier capability bit(s). + */ +typedef enum { + IOMMU_NOTIFIER_NONE = 0, + /* Notify cache invalidations */ + IOMMU_NOTIFIER_UNMAP = 0x1, + /* Notify entry changes (newly created entries) */ + IOMMU_NOTIFIER_MAP = 0x2, +} IOMMUNotifierFlag; + +#define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) + +struct IOMMUNotifier { + void (*notify)(struct IOMMUNotifier *notifier, IOMMUTLBEntry *data); + IOMMUNotifierFlag notifier_flags; + QLIST_ENTRY(IOMMUNotifier) node; +}; +typedef struct IOMMUNotifier IOMMUNotifier; + /* New-style MMIO accessors can indicate that the transaction failed. * A zero (MEMTX_OK) response means success; anything else is a failure * of some kind. The memory subsystem will bitwise-OR together results @@ -153,10 +174,10 @@ struct MemoryRegionIOMMUOps { IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write); /* Returns minimum supported page size */ uint64_t (*get_min_page_size)(MemoryRegion *iommu); - /* Called when the first notifier is set */ - void (*notify_started)(MemoryRegion *iommu); - /* Called when the last notifier is removed */ - void (*notify_stopped)(MemoryRegion *iommu); + /* Called when IOMMU Notifier flag changed */ + void (*notify_flag_changed)(MemoryRegion *iommu, + IOMMUNotifierFlag old_flags, + IOMMUNotifierFlag new_flags); }; typedef struct CoalescedMemoryRange CoalescedMemoryRange; @@ -201,7 +222,8 @@ struct MemoryRegion { const char *name; unsigned ioeventfd_nb; MemoryRegionIoeventfd *ioeventfds; - NotifierList iommu_notify; + QLIST_HEAD(, IOMMUNotifier) iommu_notify; + IOMMUNotifierFlag iommu_notify_flags; }; /** @@ -607,6 +629,15 @@ uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr); /** * memory_region_notify_iommu: notify a change in an IOMMU translation entry. * + * The notification type will be decided by entry.perm bits: + * + * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE. + * - For MAP (newly added entry) notifies: set entry.perm to the + * permission of the page (which is definitely !IOMMU_NONE). + * + * Note: for any IOMMU implementation, an in-place mapping change + * should be notified with an UNMAP followed by a MAP. + * * @mr: the memory region that was changed * @entry: the new entry in the IOMMU translation table. The entry * replaces all old entries for the same virtual I/O address range. @@ -620,11 +651,12 @@ void memory_region_notify_iommu(MemoryRegion *mr, * IOMMU translation entries. * * @mr: the memory region to observe - * @n: the notifier to be added; the notifier receives a pointer to an - * #IOMMUTLBEntry as the opaque value; the pointer ceases to be - * valid on exit from the notifier. + * @n: the IOMMUNotifier to be added; the notify callback receives a + * pointer to an #IOMMUTLBEntry as the opaque value; the pointer + * ceases to be valid on exit from the notifier. */ -void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); +void memory_region_register_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n); /** * memory_region_iommu_replay: replay existing IOMMU translations to @@ -636,7 +668,8 @@ void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n); * @is_write: Whether to treat the replay as a translate "write" * through the iommu */ -void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write); +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n, + bool is_write); /** * memory_region_unregister_iommu_notifier: unregister a notifier for @@ -646,7 +679,8 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write); * needs to be called * @n: the notifier to be removed. */ -void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n); +void memory_region_unregister_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n); /** * memory_region_name: get a memory region's name @@ -1154,12 +1188,11 @@ MemoryRegionSection memory_region_find(MemoryRegion *mr, hwaddr addr, uint64_t size); /** - * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory + * memory_global_dirty_log_sync: synchronize the dirty log for all memory * - * Synchronizes the dirty page log for an entire address space. - * @as: the address space that contains the memory being synchronized + * Synchronizes the dirty page log for all address spaces. */ -void address_space_sync_dirty_bitmap(AddressSpace *as); +void memory_global_dirty_log_sync(void); /** * memory_region_transaction_begin: Start a transaction. diff --git a/include/exec/tb-context.h b/include/exec/tb-context.h index dce95d92d6..c7f17f26e0 100644 --- a/include/exec/tb-context.h +++ b/include/exec/tb-context.h @@ -38,7 +38,7 @@ struct TBContext { QemuMutex tb_lock; /* statistics */ - int tb_flush_count; + unsigned tb_flush_count; int tb_phys_invalidate_count; }; diff --git a/include/hw/compat.h b/include/hw/compat.h index a1d6694492..46412b229a 100644 --- a/include/hw/compat.h +++ b/include/hw/compat.h @@ -6,6 +6,10 @@ .driver = "virtio-pci",\ .property = "page-per-vq",\ .value = "on",\ + },{\ + .driver = "ioapic",\ + .property = "version",\ + .value = "0x11",\ }, #define HW_COMPAT_2_6 \ diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index 94dfae387a..c17602e533 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -93,7 +93,7 @@ typedef struct VFIOGuestIOMMU { VFIOContainer *container; MemoryRegion *iommu; hwaddr iommu_offset; - Notifier n; + IOMMUNotifier n; QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; } VFIOGuestIOMMU; diff --git a/include/hw/xen/xen_common.h b/include/hw/xen/xen_common.h index bd39287b8f..8e1580d526 100644 --- a/include/hw/xen/xen_common.h +++ b/include/hw/xen/xen_common.h @@ -424,4 +424,18 @@ static inline int xen_domain_create(xc_interface *xc, uint32_t ssidref, #endif #endif +/* Xen before 4.8 */ + +#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 480 + + +typedef void *xengnttab_grant_copy_segment_t; + +static inline int xengnttab_grant_copy(xengnttab_handle *xgt, uint32_t count, + xengnttab_grant_copy_segment_t *segs) +{ + return -ENOSYS; +} +#endif + #endif /* QEMU_HW_XEN_COMMON_H */ diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index 338d3a65b3..157698bfa9 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -1,4 +1,8 @@ -/* public domain */ +/* compiler.h: macros to abstract away compiler specifics + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ #ifndef COMPILER_H #define COMPILER_H diff --git a/include/qemu/coroutine.h b/include/qemu/coroutine.h index 29a20782f0..e6a60d55fd 100644 --- a/include/qemu/coroutine.h +++ b/include/qemu/coroutine.h @@ -92,6 +92,19 @@ Coroutine *coroutine_fn qemu_coroutine_self(void); */ bool qemu_in_coroutine(void); +/** + * Return true if the coroutine is currently entered + * + * A coroutine is "entered" if it has not yielded from the current + * qemu_coroutine_enter() call used to run it. This does not mean that the + * coroutine is currently executing code since it may have transferred control + * to another coroutine using qemu_coroutine_enter(). + * + * When several coroutines enter each other there may be no way to know which + * ones have already been entered. In such situations this function can be + * used to avoid recursively entering coroutines. + */ +bool qemu_coroutine_entered(Coroutine *co); /** diff --git a/include/qom/cpu.h b/include/qom/cpu.h index ce0c406f27..22b54d6d93 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -232,13 +232,8 @@ struct kvm_run; #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) /* work queue */ -struct qemu_work_item { - struct qemu_work_item *next; - void (*func)(void *data); - void *data; - int done; - bool free; -}; +typedef void (*run_on_cpu_func)(CPUState *cpu, void *data); +struct qemu_work_item; /** * CPUState: @@ -247,7 +242,9 @@ struct qemu_work_item { * @nr_threads: Number of threads within this CPU. * @numa_node: NUMA node this CPU is belonging to. * @host_tid: Host thread ID. - * @running: #true if CPU is currently running (usermode). + * @running: #true if CPU is currently running (lockless). + * @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end; + * valid under cpu_list_lock. * @created: Indicates whether the CPU thread has been successfully created. * @interrupt_request: Indicates a pending interrupt request. * @halted: Nonzero if the CPU is in suspended state. @@ -257,7 +254,6 @@ struct qemu_work_item { * @crash_occurred: Indicates the OS reported a crash (panic) for this CPU * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this * CPU and return to its top level loop. - * @tb_flushed: Indicates the translation buffer has been flushed. * @singlestep_enabled: Flags for single-stepping. * @icount_extra: Instructions until next timer event. * @icount_decr: Number of cycles left, with interrupt flag in high bit. @@ -301,7 +297,7 @@ struct CPUState { #endif int thread_id; uint32_t host_tid; - bool running; + bool running, has_waiter; struct QemuCond *halt_cond; bool thread_kicked; bool created; @@ -310,7 +306,6 @@ struct CPUState { bool unplug; bool crash_occurred; bool exit_request; - bool tb_flushed; uint32_t interrupt_request; int singlestep_enabled; int64_t icount_extra; @@ -543,6 +538,18 @@ static inline int cpu_asidx_from_attrs(CPUState *cpu, MemTxAttrs attrs) #endif /** + * cpu_list_add: + * @cpu: The CPU to be added to the list of CPUs. + */ +void cpu_list_add(CPUState *cpu); + +/** + * cpu_list_remove: + * @cpu: The CPU to be removed from the list of CPUs. + */ +void cpu_list_remove(CPUState *cpu); + +/** * cpu_reset: * @cpu: The CPU whose state is to be reset. */ @@ -616,6 +623,18 @@ void qemu_cpu_kick(CPUState *cpu); bool cpu_is_stopped(CPUState *cpu); /** + * do_run_on_cpu: + * @cpu: The vCPU to run on. + * @func: The function to be executed. + * @data: Data to pass to the function. + * @mutex: Mutex to release while waiting for @func to run. + * + * Used internally in the implementation of run_on_cpu. + */ +void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data, + QemuMutex *mutex); + +/** * run_on_cpu: * @cpu: The vCPU to run on. * @func: The function to be executed. @@ -623,7 +642,7 @@ bool cpu_is_stopped(CPUState *cpu); * * Schedules the function @func for execution on the vCPU @cpu. */ -void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); +void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); /** * async_run_on_cpu: @@ -633,7 +652,21 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); * * Schedules the function @func for execution on the vCPU @cpu asynchronously. */ -void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); +void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); + +/** + * async_safe_run_on_cpu: + * @cpu: The vCPU to run on. + * @func: The function to be executed. + * @data: Data to pass to the function. + * + * Schedules the function @func for execution on the vCPU @cpu asynchronously, + * while all other vCPUs are sleeping. + * + * Unlike run_on_cpu and async_run_on_cpu, the function is run outside the + * BQL. + */ +void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); /** * qemu_get_cpu: @@ -794,6 +827,49 @@ void cpu_remove(CPUState *cpu); void cpu_remove_sync(CPUState *cpu); /** + * process_queued_cpu_work() - process all items on CPU work queue + * @cpu: The CPU which work queue to process. + */ +void process_queued_cpu_work(CPUState *cpu); + +/** + * cpu_exec_start: + * @cpu: The CPU for the current thread. + * + * Record that a CPU has started execution and can be interrupted with + * cpu_exit. + */ +void cpu_exec_start(CPUState *cpu); + +/** + * cpu_exec_end: + * @cpu: The CPU for the current thread. + * + * Record that a CPU has stopped execution and exclusive sections + * can be executed without interrupting it. + */ +void cpu_exec_end(CPUState *cpu); + +/** + * start_exclusive: + * + * Wait for a concurrent exclusive section to end, and then start + * a section of work that is run while other CPUs are not running + * between cpu_exec_start and cpu_exec_end. CPUs that are running + * cpu_exec are exited immediately. CPUs that call cpu_exec_start + * during the exclusive section go to sleep until this CPU calls + * end_exclusive. + */ +void start_exclusive(void); + +/** + * end_exclusive: + * + * Concludes an exclusive execution section started by start_exclusive. + */ +void end_exclusive(void); + +/** * qemu_init_vcpu: * @cpu: The vCPU to initialize. * diff --git a/include/sysemu/replay.h b/include/sysemu/replay.h index 0a88393d2b..f80d6d28e8 100644 --- a/include/sysemu/replay.h +++ b/include/sysemu/replay.h @@ -105,6 +105,8 @@ bool replay_checkpoint(ReplayCheckpoint checkpoint); /*! Disables storing events in the queue */ void replay_disable_events(void); +/*! Enables storing events in the queue */ +void replay_enable_events(void); /*! Returns true when saving events is enabled */ bool replay_events_enabled(void); /*! Adds bottom half event to the queue */ @@ -115,6 +117,8 @@ void replay_input_event(QemuConsole *src, InputEvent *evt); void replay_input_sync_event(void); /*! Adds block layer event to the queue */ void replay_block_event(QEMUBH *bh, uint64_t id); +/*! Returns ID for the next block event */ +uint64_t blkreplay_next_id(void); /* Character device */ diff --git a/include/ui/console.h b/include/ui/console.h index d9c13d20b1..e2589e2134 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -387,6 +387,7 @@ QemuConsole *qemu_console_lookup_by_device_name(const char *device_id, bool qemu_console_is_visible(QemuConsole *con); bool qemu_console_is_graphic(QemuConsole *con); bool qemu_console_is_fixedsize(QemuConsole *con); +bool qemu_console_is_gl_blocked(QemuConsole *con); char *qemu_console_get_label(QemuConsole *con); int qemu_console_get_index(QemuConsole *con); uint32_t qemu_console_get_head(QemuConsole *con); diff --git a/include/ui/spice-display.h b/include/ui/spice-display.h index 42e0fdf775..184d4c373a 100644 --- a/include/ui/spice-display.h +++ b/include/ui/spice-display.h @@ -119,7 +119,10 @@ struct SimpleSpiceDisplay { /* opengl rendering */ QEMUBH *gl_unblock_bh; QEMUTimer *gl_unblock_timer; - int dmabuf_fd; + ConsoleGLState *gls; + int gl_updates; + bool have_scanout; + bool have_surface; #endif }; diff --git a/iothread.c b/iothread.c index fb08a60b4b..fbeb8deb38 100644 --- a/iothread.c +++ b/iothread.c @@ -75,6 +75,9 @@ static void iothread_instance_finalize(Object *obj) iothread_stop(obj, NULL); qemu_cond_destroy(&iothread->init_done_cond); qemu_mutex_destroy(&iothread->init_done_lock); + if (!iothread->ctx) { + return; + } aio_context_unref(iothread->ctx); } @@ -1847,10 +1847,8 @@ void kvm_flush_coalesced_mmio_buffer(void) s->coalesced_flush_in_progress = false; } -static void do_kvm_cpu_synchronize_state(void *arg) +static void do_kvm_cpu_synchronize_state(CPUState *cpu, void *arg) { - CPUState *cpu = arg; - if (!cpu->kvm_vcpu_dirty) { kvm_arch_get_registers(cpu); cpu->kvm_vcpu_dirty = true; @@ -1860,34 +1858,30 @@ static void do_kvm_cpu_synchronize_state(void *arg) void kvm_cpu_synchronize_state(CPUState *cpu) { if (!cpu->kvm_vcpu_dirty) { - run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu); + run_on_cpu(cpu, do_kvm_cpu_synchronize_state, NULL); } } -static void do_kvm_cpu_synchronize_post_reset(void *arg) +static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, void *arg) { - CPUState *cpu = arg; - kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE); cpu->kvm_vcpu_dirty = false; } void kvm_cpu_synchronize_post_reset(CPUState *cpu) { - run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, cpu); + run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, NULL); } -static void do_kvm_cpu_synchronize_post_init(void *arg) +static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, void *arg) { - CPUState *cpu = arg; - kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE); cpu->kvm_vcpu_dirty = false; } void kvm_cpu_synchronize_post_init(CPUState *cpu) { - run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, cpu); + run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, NULL); } int kvm_cpu_exec(CPUState *cpu) @@ -2216,7 +2210,7 @@ struct kvm_set_guest_debug_data { int err; }; -static void kvm_invoke_set_guest_debug(void *data) +static void kvm_invoke_set_guest_debug(CPUState *unused_cpu, void *data) { struct kvm_set_guest_debug_data *dbg_data = data; @@ -2234,7 +2228,6 @@ int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; } kvm_arch_update_guest_debug(cpu, &data.dbg); - data.cpu = cpu; run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data); return data.err; diff --git a/linux-user/main.c b/linux-user/main.c index 424f2b6f7b..9e4b430d66 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -107,21 +107,11 @@ int cpu_get_pic_interrupt(CPUX86State *env) /***********************************************************/ /* Helper routines for implementing atomic operations. */ -/* To implement exclusive operations we force all cpus to syncronise. - We don't require a full sync, only that no cpus are executing guest code. - The alternative is to map target atomic ops onto host equivalents, - which requires quite a lot of per host/target work. */ -static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER; -static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER; -static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER; -static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER; -static int pending_cpus; - /* Make sure everything is in a consistent state for calling fork(). */ void fork_start(void) { + cpu_list_lock(); qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); - pthread_mutex_lock(&exclusive_lock); mmap_fork_start(); } @@ -137,93 +127,15 @@ void fork_end(int child) QTAILQ_REMOVE(&cpus, cpu, node); } } - pending_cpus = 0; - pthread_mutex_init(&exclusive_lock, NULL); - pthread_mutex_init(&cpu_list_mutex, NULL); - pthread_cond_init(&exclusive_cond, NULL); - pthread_cond_init(&exclusive_resume, NULL); qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); + qemu_init_cpu_list(); gdbserver_fork(thread_cpu); } else { - pthread_mutex_unlock(&exclusive_lock); qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); + cpu_list_unlock(); } } -/* Wait for pending exclusive operations to complete. The exclusive lock - must be held. */ -static inline void exclusive_idle(void) -{ - while (pending_cpus) { - pthread_cond_wait(&exclusive_resume, &exclusive_lock); - } -} - -/* Start an exclusive operation. - Must only be called from outside cpu_exec. */ -static inline void start_exclusive(void) -{ - CPUState *other_cpu; - - pthread_mutex_lock(&exclusive_lock); - exclusive_idle(); - - pending_cpus = 1; - /* Make all other cpus stop executing. */ - CPU_FOREACH(other_cpu) { - if (other_cpu->running) { - pending_cpus++; - cpu_exit(other_cpu); - } - } - if (pending_cpus > 1) { - pthread_cond_wait(&exclusive_cond, &exclusive_lock); - } -} - -/* Finish an exclusive operation. */ -static inline void __attribute__((unused)) end_exclusive(void) -{ - pending_cpus = 0; - pthread_cond_broadcast(&exclusive_resume); - pthread_mutex_unlock(&exclusive_lock); -} - -/* Wait for exclusive ops to finish, and begin cpu execution. */ -static inline void cpu_exec_start(CPUState *cpu) -{ - pthread_mutex_lock(&exclusive_lock); - exclusive_idle(); - cpu->running = true; - pthread_mutex_unlock(&exclusive_lock); -} - -/* Mark cpu as not executing, and release pending exclusive ops. */ -static inline void cpu_exec_end(CPUState *cpu) -{ - pthread_mutex_lock(&exclusive_lock); - cpu->running = false; - if (pending_cpus > 1) { - pending_cpus--; - if (pending_cpus == 1) { - pthread_cond_signal(&exclusive_cond); - } - } - exclusive_idle(); - pthread_mutex_unlock(&exclusive_lock); -} - -void cpu_list_lock(void) -{ - pthread_mutex_lock(&cpu_list_mutex); -} - -void cpu_list_unlock(void) -{ - pthread_mutex_unlock(&cpu_list_mutex); -} - - #ifdef TARGET_I386 /***********************************************************/ /* CPUX86 core interface */ @@ -296,6 +208,8 @@ void cpu_loop(CPUX86State *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch(trapnr) { case 0x80: /* linux syscall from int $0x80 */ @@ -737,6 +651,8 @@ void cpu_loop(CPUARMState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch(trapnr) { case EXCP_UDEF: { @@ -1073,6 +989,7 @@ void cpu_loop(CPUARMState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); switch (trapnr) { case EXCP_SWI: @@ -1161,6 +1078,8 @@ void cpu_loop(CPUUniCore32State *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch (trapnr) { case UC32_EXCP_PRIV: { @@ -1366,6 +1285,7 @@ void cpu_loop (CPUSPARCState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); /* Compute PSR before exposing state. */ if (env->cc_op != CC_OP_FLAGS) { @@ -1638,6 +1558,8 @@ void cpu_loop(CPUPPCState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch(trapnr) { case POWERPC_EXCP_NONE: /* Just go on */ @@ -2484,6 +2406,8 @@ void cpu_loop(CPUMIPSState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch(trapnr) { case EXCP_SYSCALL: env->active_tc.PC += 4; @@ -2724,6 +2648,7 @@ void cpu_loop(CPUOpenRISCState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); gdbsig = 0; switch (trapnr) { @@ -2818,6 +2743,7 @@ void cpu_loop(CPUSH4State *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); switch (trapnr) { case 0x160: @@ -2884,6 +2810,8 @@ void cpu_loop(CPUCRISState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch (trapnr) { case 0xaa: { @@ -2949,6 +2877,8 @@ void cpu_loop(CPUMBState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch (trapnr) { case 0xaa: { @@ -3066,6 +2996,8 @@ void cpu_loop(CPUM68KState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch(trapnr) { case EXCP_ILLEGAL: { @@ -3209,6 +3141,7 @@ void cpu_loop(CPUAlphaState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); /* All of the traps imply a transition through PALcode, which implies an REI instruction has been executed. Which means @@ -3401,6 +3334,8 @@ void cpu_loop(CPUS390XState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch (trapnr) { case EXCP_INTERRUPT: /* Just indicate that signals should be handled asap. */ @@ -3710,6 +3645,8 @@ void cpu_loop(CPUTLGState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); + process_queued_cpu_work(cs); + switch (trapnr) { case TILEGX_EXCP_SYSCALL: { @@ -3769,6 +3706,16 @@ void cpu_loop(CPUTLGState *env) THREAD CPUState *thread_cpu; +bool qemu_cpu_is_self(CPUState *cpu) +{ + return thread_cpu == cpu; +} + +void qemu_cpu_kick(CPUState *cpu) +{ + cpu_exit(cpu); +} + void task_settid(TaskState *ts) { if (ts->ts_tid == 0) { @@ -4211,6 +4158,7 @@ int main(int argc, char **argv, char **envp) int ret; int execfd; + qemu_init_cpu_list(); module_call_init(MODULE_INIT_QOM); if ((envlist = envlist_create()) == NULL) { @@ -158,14 +158,10 @@ static bool memory_listener_match(MemoryListener *listener, /* No need to ref/unref .mr, the FlatRange keeps it alive. */ #define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback, _args...) \ - MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) { \ - .mr = (fr)->mr, \ - .address_space = (as), \ - .offset_within_region = (fr)->offset_in_region, \ - .size = (fr)->addr.size, \ - .offset_within_address_space = int128_get64((fr)->addr.start), \ - .readonly = (fr)->readonly, \ - }), ##_args) + do { \ + MemoryRegionSection mrs = section_from_flat_range(fr, as); \ + MEMORY_LISTENER_CALL(callback, dir, &mrs, ##_args); \ + } while(0) struct CoalescedMemoryRange { AddrRange addr; @@ -245,6 +241,19 @@ typedef struct AddressSpaceOps AddressSpaceOps; #define FOR_EACH_FLAT_RANGE(var, view) \ for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var) +static inline MemoryRegionSection +section_from_flat_range(FlatRange *fr, AddressSpace *as) +{ + return (MemoryRegionSection) { + .mr = fr->mr, + .address_space = as, + .offset_within_region = fr->offset_in_region, + .size = fr->addr.size, + .offset_within_address_space = int128_get64(fr->addr.start), + .readonly = fr->readonly, + }; +} + static bool flatrange_equal(FlatRange *a, FlatRange *b) { return a->mr == b->mr @@ -1413,7 +1422,8 @@ void memory_region_init_iommu(MemoryRegion *mr, memory_region_init(mr, owner, name, size); mr->iommu_ops = ops, mr->terminates = true; /* then re-forwards */ - notifier_list_init(&mr->iommu_notify); + QLIST_INIT(&mr->iommu_notify); + mr->iommu_notify_flags = IOMMU_NOTIFIER_NONE; } static void memory_region_finalize(Object *obj) @@ -1508,13 +1518,31 @@ bool memory_region_is_logging(MemoryRegion *mr, uint8_t client) return memory_region_get_dirty_log_mask(mr) & (1 << client); } -void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n) +static void memory_region_update_iommu_notify_flags(MemoryRegion *mr) { - if (mr->iommu_ops->notify_started && - QLIST_EMPTY(&mr->iommu_notify.notifiers)) { - mr->iommu_ops->notify_started(mr); + IOMMUNotifierFlag flags = IOMMU_NOTIFIER_NONE; + IOMMUNotifier *iommu_notifier; + + QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) { + flags |= iommu_notifier->notifier_flags; + } + + if (flags != mr->iommu_notify_flags && + mr->iommu_ops->notify_flag_changed) { + mr->iommu_ops->notify_flag_changed(mr, mr->iommu_notify_flags, + flags); } - notifier_list_add(&mr->iommu_notify, n); + + mr->iommu_notify_flags = flags; +} + +void memory_region_register_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n) +{ + /* We need to register for at least one bitfield */ + assert(n->notifier_flags != IOMMU_NOTIFIER_NONE); + QLIST_INSERT_HEAD(&mr->iommu_notify, n, node); + memory_region_update_iommu_notify_flags(mr); } uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr) @@ -1526,7 +1554,8 @@ uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr) return TARGET_PAGE_SIZE; } -void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write) +void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n, + bool is_write) { hwaddr addr, granularity; IOMMUTLBEntry iotlb; @@ -1547,20 +1576,32 @@ void memory_region_iommu_replay(MemoryRegion *mr, Notifier *n, bool is_write) } } -void memory_region_unregister_iommu_notifier(MemoryRegion *mr, Notifier *n) +void memory_region_unregister_iommu_notifier(MemoryRegion *mr, + IOMMUNotifier *n) { - notifier_remove(n); - if (mr->iommu_ops->notify_stopped && - QLIST_EMPTY(&mr->iommu_notify.notifiers)) { - mr->iommu_ops->notify_stopped(mr); - } + QLIST_REMOVE(n, node); + memory_region_update_iommu_notify_flags(mr); } void memory_region_notify_iommu(MemoryRegion *mr, IOMMUTLBEntry entry) { + IOMMUNotifier *iommu_notifier; + IOMMUNotifierFlag request_flags; + assert(memory_region_is_iommu(mr)); - notifier_list_notify(&mr->iommu_notify, &entry); + + if (entry.perm & IOMMU_RW) { + request_flags = IOMMU_NOTIFIER_MAP; + } else { + request_flags = IOMMU_NOTIFIER_UNMAP; + } + + QLIST_FOREACH(iommu_notifier, &mr->iommu_notify, node) { + if (iommu_notifier->notifier_flags & request_flags) { + iommu_notifier->notify(iommu_notifier, &entry); + } + } } void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client) @@ -2124,16 +2165,27 @@ bool memory_region_present(MemoryRegion *container, hwaddr addr) return mr && mr != container; } -void address_space_sync_dirty_bitmap(AddressSpace *as) +void memory_global_dirty_log_sync(void) { + MemoryListener *listener; + AddressSpace *as; FlatView *view; FlatRange *fr; - view = address_space_get_flatview(as); - FOR_EACH_FLAT_RANGE(fr, view) { - MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync); + QTAILQ_FOREACH(listener, &memory_listeners, link) { + if (!listener->log_sync) { + continue; + } + /* Global listeners are being phased out. */ + assert(listener->address_space_filter); + as = listener->address_space_filter; + view = address_space_get_flatview(as); + FOR_EACH_FLAT_RANGE(fr, view) { + MemoryRegionSection mrs = section_from_flat_range(fr, as); + listener->log_sync(listener, &mrs); + } + flatview_unref(view); } - flatview_unref(view); } void memory_global_dirty_log_start(void) diff --git a/migration/ram.c b/migration/ram.c index a6e1c63a08..c8ec9f268f 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -626,7 +626,7 @@ static void migration_bitmap_sync(void) } trace_migration_bitmap_sync_start(); - address_space_sync_dirty_bitmap(&address_space_memory); + memory_global_dirty_log_sync(); qemu_mutex_lock(&migration_bitmap_mutex); rcu_read_lock(); diff --git a/replay/Makefile.objs b/replay/Makefile.objs index fcb3f74d60..c8ad3ebb89 100644 --- a/replay/Makefile.objs +++ b/replay/Makefile.objs @@ -4,3 +4,4 @@ common-obj-y += replay-events.o common-obj-y += replay-time.o common-obj-y += replay-input.o common-obj-y += replay-char.o +common-obj-y += replay-snapshot.o diff --git a/replay/replay-events.c b/replay/replay-events.c index 3807245ae7..c513913671 100644 --- a/replay/replay-events.c +++ b/replay/replay-events.c @@ -279,7 +279,7 @@ static Event *replay_read_event(int checkpoint) /* Called with replay mutex locked */ void replay_read_events(int checkpoint) { - while (replay_data_kind == EVENT_ASYNC) { + while (replay_state.data_kind == EVENT_ASYNC) { Event *event = replay_read_event(checkpoint); if (!event) { break; @@ -309,3 +309,11 @@ bool replay_events_enabled(void) { return events_enabled; } + +uint64_t blkreplay_next_id(void) +{ + if (replay_events_enabled()) { + return replay_state.block_request_id++; + } + return 0; +} diff --git a/replay/replay-internal.c b/replay/replay-internal.c index 5835e8def3..bea7b4aa6b 100644 --- a/replay/replay-internal.c +++ b/replay/replay-internal.c @@ -16,11 +16,8 @@ #include "qemu/error-report.h" #include "sysemu/sysemu.h" -unsigned int replay_data_kind = -1; -static unsigned int replay_has_unread_data; - /* Mutex to protect reading and writing events to the log. - replay_data_kind and replay_has_unread_data are also protected + data_kind and has_unread_data are also protected by this mutex. It also protects replay events queue which stores events to be written or read to the log. */ @@ -150,15 +147,16 @@ void replay_check_error(void) void replay_fetch_data_kind(void) { if (replay_file) { - if (!replay_has_unread_data) { - replay_data_kind = replay_get_byte(); - if (replay_data_kind == EVENT_INSTRUCTION) { + if (!replay_state.has_unread_data) { + replay_state.data_kind = replay_get_byte(); + if (replay_state.data_kind == EVENT_INSTRUCTION) { replay_state.instructions_count = replay_get_dword(); } replay_check_error(); - replay_has_unread_data = 1; - if (replay_data_kind >= EVENT_COUNT) { - error_report("Replay: unknown event kind %d", replay_data_kind); + replay_state.has_unread_data = 1; + if (replay_state.data_kind >= EVENT_COUNT) { + error_report("Replay: unknown event kind %d", + replay_state.data_kind); exit(1); } } @@ -167,7 +165,7 @@ void replay_fetch_data_kind(void) void replay_finish_event(void) { - replay_has_unread_data = 0; + replay_state.has_unread_data = 0; replay_fetch_data_kind(); } diff --git a/replay/replay-internal.h b/replay/replay-internal.h index efbf14c8a7..9117e442d0 100644 --- a/replay/replay-internal.h +++ b/replay/replay-internal.h @@ -62,11 +62,19 @@ typedef struct ReplayState { uint64_t current_step; /*! Number of instructions to be executed before other events happen. */ int instructions_count; + /*! Type of the currently executed event. */ + unsigned int data_kind; + /*! Flag which indicates that event is not processed yet. */ + unsigned int has_unread_data; + /*! Temporary variable for saving current log offset. */ + uint64_t file_offset; + /*! Next block operation id. + This counter is global, because requests from different + block devices should not get overlapping ids. */ + uint64_t block_request_id; } ReplayState; extern ReplayState replay_state; -extern unsigned int replay_data_kind; - /* File for replay writing */ extern FILE *replay_file; @@ -98,7 +106,7 @@ void replay_check_error(void); the next event from the log. */ void replay_finish_event(void); /*! Reads data type from the file and stores it in the - replay_data_kind variable. */ + data_kind variable. */ void replay_fetch_data_kind(void); /*! Saves queued events (like instructions and sound). */ @@ -119,8 +127,6 @@ void replay_read_next_clock(unsigned int kind); void replay_init_events(void); /*! Clears internal data structures for events handling */ void replay_finish_events(void); -/*! Enables storing events in the queue */ -void replay_enable_events(void); /*! Flushes events queue */ void replay_flush_events(void); /*! Clears events list before loading new VM state */ @@ -155,4 +161,11 @@ void replay_event_char_read_save(void *opaque); /*! Reads char event read from the file. */ void *replay_event_char_read_load(void); +/* VMState-related functions */ + +/* Registers replay VMState. + Should be called before virtual devices initialization + to make cached timers available for post_load functions. */ +void replay_vmstate_register(void); + #endif diff --git a/replay/replay-snapshot.c b/replay/replay-snapshot.c new file mode 100644 index 0000000000..498059734d --- /dev/null +++ b/replay/replay-snapshot.c @@ -0,0 +1,61 @@ +/* + * replay-snapshot.c + * + * Copyright (c) 2010-2016 Institute for System Programming + * of the Russian Academy of Sciences. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "sysemu/replay.h" +#include "replay-internal.h" +#include "sysemu/sysemu.h" +#include "monitor/monitor.h" +#include "qapi/qmp/qstring.h" +#include "qemu/error-report.h" +#include "migration/vmstate.h" + +static void replay_pre_save(void *opaque) +{ + ReplayState *state = opaque; + state->file_offset = ftell(replay_file); +} + +static int replay_post_load(void *opaque, int version_id) +{ + ReplayState *state = opaque; + fseek(replay_file, state->file_offset, SEEK_SET); + /* If this was a vmstate, saved in recording mode, + we need to initialize replay data fields. */ + replay_fetch_data_kind(); + + return 0; +} + +static const VMStateDescription vmstate_replay = { + .name = "replay", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = replay_pre_save, + .post_load = replay_post_load, + .fields = (VMStateField[]) { + VMSTATE_INT64_ARRAY(cached_clock, ReplayState, REPLAY_CLOCK_COUNT), + VMSTATE_UINT64(current_step, ReplayState), + VMSTATE_INT32(instructions_count, ReplayState), + VMSTATE_UINT32(data_kind, ReplayState), + VMSTATE_UINT32(has_unread_data, ReplayState), + VMSTATE_UINT64(file_offset, ReplayState), + VMSTATE_UINT64(block_request_id, ReplayState), + VMSTATE_END_OF_LIST() + }, +}; + +void replay_vmstate_register(void) +{ + vmstate_register(NULL, 0, &vmstate_replay, &replay_state); +} diff --git a/replay/replay-time.c b/replay/replay-time.c index fffe072c55..f70382a88f 100644 --- a/replay/replay-time.c +++ b/replay/replay-time.c @@ -31,7 +31,7 @@ int64_t replay_save_clock(ReplayClockKind kind, int64_t clock) void replay_read_next_clock(ReplayClockKind kind) { - unsigned int read_kind = replay_data_kind - EVENT_CLOCK; + unsigned int read_kind = replay_state.data_kind - EVENT_CLOCK; assert(read_kind == kind); diff --git a/replay/replay.c b/replay/replay.c index 167fd2942d..c797aeae8a 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -38,15 +38,15 @@ bool replay_next_event_is(int event) /* nothing to skip - not all instructions used */ if (replay_state.instructions_count != 0) { - assert(replay_data_kind == EVENT_INSTRUCTION); + assert(replay_state.data_kind == EVENT_INSTRUCTION); return event == EVENT_INSTRUCTION; } while (true) { - if (event == replay_data_kind) { + if (event == replay_state.data_kind) { res = true; } - switch (replay_data_kind) { + switch (replay_state.data_kind) { case EVENT_SHUTDOWN: replay_finish_event(); qemu_system_shutdown_request(); @@ -85,7 +85,7 @@ void replay_account_executed_instructions(void) replay_state.instructions_count -= count; replay_state.current_step += count; if (replay_state.instructions_count == 0) { - assert(replay_data_kind == EVENT_INSTRUCTION); + assert(replay_state.data_kind == EVENT_INSTRUCTION); replay_finish_event(); /* Wake up iothread. This is required because timers will not expire until clock counters @@ -188,7 +188,7 @@ bool replay_checkpoint(ReplayCheckpoint checkpoint) if (replay_mode == REPLAY_MODE_PLAY) { if (replay_next_event_is(EVENT_CHECKPOINT + checkpoint)) { replay_finish_event(); - } else if (replay_data_kind != EVENT_ASYNC) { + } else if (replay_state.data_kind != EVENT_ASYNC) { res = false; goto out; } @@ -196,7 +196,7 @@ bool replay_checkpoint(ReplayCheckpoint checkpoint) /* replay_read_events may leave some unread events. Return false if not all of the events associated with checkpoint were processed */ - res = replay_data_kind != EVENT_ASYNC; + res = replay_state.data_kind != EVENT_ASYNC; } else if (replay_mode == REPLAY_MODE_RECORD) { replay_put_event(EVENT_CHECKPOINT + checkpoint); replay_save_events(checkpoint); @@ -237,9 +237,10 @@ static void replay_enable(const char *fname, int mode) replay_filename = g_strdup(fname); replay_mode = mode; - replay_data_kind = -1; + replay_state.data_kind = -1; replay_state.instructions_count = 0; replay_state.current_step = 0; + replay_state.has_unread_data = 0; /* skip file header for RECORD and check it for PLAY */ if (replay_mode == REPLAY_MODE_RECORD) { @@ -291,6 +292,7 @@ void replay_configure(QemuOpts *opts) exit(1); } + replay_vmstate_register(); replay_enable(fname, mode); out: diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index dde3f5f9d9..3afa19a766 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2407,7 +2407,7 @@ sub process { # we have e.g. CONFIG_LINUX and CONFIG_WIN32 for common cases # where they might be necessary. if ($line =~ m@^.\s*\#\s*if.*\b__@) { - ERROR("architecture specific defines should be avoided\n" . $herecurr); + WARN("architecture specific defines should be avoided\n" . $herecurr); } # Check that the storage class is at the beginning of a declaration diff --git a/stubs/replay.c b/stubs/replay.c index de9fa1ec98..d9a6da99d2 100644 --- a/stubs/replay.c +++ b/stubs/replay.c @@ -67,3 +67,8 @@ void replay_char_read_all_save_buf(uint8_t *buf, int offset) void replay_block_event(QEMUBH *bh, uint64_t id) { } + +uint64_t blkreplay_next_id(void) +{ + return 0; +} diff --git a/target-cris/cpu.c b/target-cris/cpu.c index c5a656bb62..d680cfb52b 100644 --- a/target-cris/cpu.c +++ b/target-cris/cpu.c @@ -246,6 +246,16 @@ static void crisv11_cpu_class_init(ObjectClass *oc, void *data) cc->gdb_read_register = crisv10_cpu_gdb_read_register; } +static void crisv17_cpu_class_init(ObjectClass *oc, void *data) +{ + CPUClass *cc = CPU_CLASS(oc); + CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); + + ccc->vr = 17; + cc->do_interrupt = crisv10_cpu_do_interrupt; + cc->gdb_read_register = crisv10_cpu_gdb_read_register; +} + static void crisv32_cpu_class_init(ObjectClass *oc, void *data) { CRISCPUClass *ccc = CRIS_CPU_CLASS(oc); @@ -273,6 +283,10 @@ static const TypeInfo cris_cpu_model_type_infos[] = { .parent = TYPE_CRIS_CPU, .class_init = crisv11_cpu_class_init, }, { + .name = TYPE("crisv17"), + .parent = TYPE_CRIS_CPU, + .class_init = crisv17_cpu_class_init, + }, { .name = TYPE("crisv32"), .parent = TYPE_CRIS_CPU, .class_init = crisv32_cpu_class_init, diff --git a/target-cris/crisv10-decode.h b/target-cris/crisv10-decode.h index 587fbdd278..bdb4b6d318 100644 --- a/target-cris/crisv10-decode.h +++ b/target-cris/crisv10-decode.h @@ -92,6 +92,7 @@ #define CRISV10_IND_JUMP_M 4 #define CRISV10_IND_DIP 5 #define CRISV10_IND_JUMP_R 6 +#define CRISV17_IND_ADDC 6 #define CRISV10_IND_BOUND 7 #define CRISV10_IND_BCC_M 7 #define CRISV10_IND_MOVE_M_SPR 8 diff --git a/target-cris/translate.c b/target-cris/translate.c index f4a8d7d000..b5ab0a5fb2 100644 --- a/target-cris/translate.c +++ b/target-cris/translate.c @@ -140,14 +140,14 @@ static void gen_BUG(DisasContext *dc, const char *file, int line) cpu_abort(CPU(dc->cpu), "%s:%d\n", file, line); } -static const char *regnames[] = +static const char *regnames_v32[] = { "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$sp", "$acr", }; -static const char *pregnames[] = +static const char *pregnames_v32[] = { "$bz", "$vr", "$pid", "$srs", "$wz", "$exs", "$eda", "$mof", @@ -3327,12 +3327,20 @@ void cris_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, { CRISCPU *cpu = CRIS_CPU(cs); CPUCRISState *env = &cpu->env; + const char **regnames; + const char **pregnames; int i; - uint32_t srs; if (!env || !f) { return; } + if (env->pregs[PR_VR] < 32) { + pregnames = pregnames_v10; + regnames = regnames_v10; + } else { + pregnames = pregnames_v32; + regnames = regnames_v32; + } cpu_fprintf(f, "PC=%x CCS=%x btaken=%d btarget=%x\n" "cc_op=%d cc_src=%d cc_dest=%d cc_result=%x cc_mask=%x\n", @@ -3354,14 +3362,16 @@ void cris_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, cpu_fprintf(f, "\n"); } } - srs = env->pregs[PR_SRS]; - cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs); - if (srs < ARRAY_SIZE(env->sregs)) { - for (i = 0; i < 16; i++) { - cpu_fprintf(f, "s%2.2d=%8.8x ", - i, env->sregs[srs][i]); - if ((i + 1) % 4 == 0) { - cpu_fprintf(f, "\n"); + if (env->pregs[PR_VR] >= 32) { + uint32_t srs = env->pregs[PR_SRS]; + cpu_fprintf(f, "\nsupport function regs bank %x:\n", srs); + if (srs < ARRAY_SIZE(env->sregs)) { + for (i = 0; i < 16; i++) { + cpu_fprintf(f, "s%2.2d=%8.8x ", + i, env->sregs[srs][i]); + if ((i + 1) % 4 == 0) { + cpu_fprintf(f, "\n"); + } } } } @@ -3406,12 +3416,12 @@ void cris_initialize_tcg(void) for (i = 0; i < 16; i++) { cpu_R[i] = tcg_global_mem_new(cpu_env, offsetof(CPUCRISState, regs[i]), - regnames[i]); + regnames_v32[i]); } for (i = 0; i < 16; i++) { cpu_PR[i] = tcg_global_mem_new(cpu_env, offsetof(CPUCRISState, pregs[i]), - pregnames[i]); + pregnames_v32[i]); } } diff --git a/target-cris/translate_v10.c b/target-cris/translate_v10.c index 4707a18e77..4a0b485d8e 100644 --- a/target-cris/translate_v10.c +++ b/target-cris/translate_v10.c @@ -1094,6 +1094,29 @@ static unsigned int dec10_ind(CPUCRISState *env, DisasContext *dc) insn_len = dec10_bdap_m(env, dc, size); break; default: + /* + * ADDC for v17: + * + * Instruction format: ADDC [Rs],Rd + * + * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+ + * |Destination(Rd)| 1 0 0 1 1 0 1 0 | Source(Rs)| + * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+--+--+ + * + * Instruction format: ADDC [Rs+],Rd + * + * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+ + * |Destination(Rd)| 1 1 0 1 1 0 1 0 | Source(Rs)| + * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-+ + */ + if (dc->opcode == CRISV17_IND_ADDC && dc->size == 2 && + env->pregs[PR_VR] == 17) { + LOG_DIS("addc op=%d %d\n", dc->src, dc->dst); + cris_cc_mask(dc, CC_MASK_NZVC); + insn_len += dec10_ind_alu(env, dc, CC_OP_ADDC, size); + break; + } + LOG_DIS("pc=%x var-ind.%d %d r%d r%d\n", dc->pc, size, dc->opcode, dc->src, dc->dst); cpu_abort(CPU(dc->cpu), "Unhandled opcode"); diff --git a/target-i386/helper.c b/target-i386/helper.c index 1c250b8245..9bc961bff3 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -1113,7 +1113,6 @@ out: typedef struct MCEInjectionParams { Monitor *mon; - X86CPU *cpu; int bank; uint64_t status; uint64_t mcg_status; @@ -1122,14 +1121,14 @@ typedef struct MCEInjectionParams { int flags; } MCEInjectionParams; -static void do_inject_x86_mce(void *data) +static void do_inject_x86_mce(CPUState *cs, void *data) { MCEInjectionParams *params = data; - CPUX86State *cenv = ¶ms->cpu->env; - CPUState *cpu = CPU(params->cpu); + X86CPU *cpu = X86_CPU(cs); + CPUX86State *cenv = &cpu->env; uint64_t *banks = cenv->mce_banks + 4 * params->bank; - cpu_synchronize_state(cpu); + cpu_synchronize_state(cs); /* * If there is an MCE exception being processed, ignore this SRAO MCE @@ -1149,7 +1148,7 @@ static void do_inject_x86_mce(void *data) if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) { monitor_printf(params->mon, "CPU %d: Uncorrected error reporting disabled\n", - cpu->cpu_index); + cs->cpu_index); return; } @@ -1161,7 +1160,7 @@ static void do_inject_x86_mce(void *data) monitor_printf(params->mon, "CPU %d: Uncorrected error reporting disabled for" " bank %d\n", - cpu->cpu_index, params->bank); + cs->cpu_index, params->bank); return; } @@ -1170,7 +1169,7 @@ static void do_inject_x86_mce(void *data) monitor_printf(params->mon, "CPU %d: Previous MCE still in progress, raising" " triple fault\n", - cpu->cpu_index); + cs->cpu_index); qemu_log_mask(CPU_LOG_RESET, "Triple fault\n"); qemu_system_reset_request(); return; @@ -1182,7 +1181,7 @@ static void do_inject_x86_mce(void *data) banks[3] = params->misc; cenv->mcg_status = params->mcg_status; banks[1] = params->status; - cpu_interrupt(cpu, CPU_INTERRUPT_MCE); + cpu_interrupt(cs, CPU_INTERRUPT_MCE); } else if (!(banks[1] & MCI_STATUS_VAL) || !(banks[1] & MCI_STATUS_UC)) { if (banks[1] & MCI_STATUS_VAL) { @@ -1204,7 +1203,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, CPUX86State *cenv = &cpu->env; MCEInjectionParams params = { .mon = mon, - .cpu = cpu, .bank = bank, .status = status, .mcg_status = mcg_status, @@ -1245,7 +1243,6 @@ void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, if (other_cs == cs) { continue; } - params.cpu = X86_CPU(other_cs); run_on_cpu(other_cs, do_inject_x86_mce, ¶ms); } } diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 30b63b7a75..ee1f53e569 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -150,10 +150,8 @@ static int kvm_get_tsc(CPUState *cs) return 0; } -static inline void do_kvm_synchronize_tsc(void *arg) +static inline void do_kvm_synchronize_tsc(CPUState *cpu, void *arg) { - CPUState *cpu = arg; - kvm_get_tsc(cpu); } @@ -163,7 +161,7 @@ void kvm_synchronize_all_tsc(void) if (kvm_enabled()) { CPU_FOREACH(cpu) { - run_on_cpu(cpu, do_kvm_synchronize_tsc, cpu); + run_on_cpu(cpu, do_kvm_synchronize_tsc, NULL); } } } diff --git a/target-s390x/cpu.c b/target-s390x/cpu.c index 2f3c8e245d..35ae2cec4b 100644 --- a/target-s390x/cpu.c +++ b/target-s390x/cpu.c @@ -164,7 +164,7 @@ static void s390_cpu_machine_reset_cb(void *opaque) { S390CPU *cpu = opaque; - run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, CPU(cpu)); + run_on_cpu(CPU(cpu), s390_do_cpu_full_reset, NULL); } #endif @@ -220,7 +220,7 @@ static void s390_cpu_realizefn(DeviceState *dev, Error **errp) s390_cpu_gdb_init(cs); qemu_init_vcpu(cs); #if !defined(CONFIG_USER_ONLY) - run_on_cpu(cs, s390_do_cpu_full_reset, cs); + run_on_cpu(cs, s390_do_cpu_full_reset, NULL); #else cpu_reset(cs); #endif diff --git a/target-s390x/cpu.h b/target-s390x/cpu.h index 5645e063af..4fb34b598d 100644 --- a/target-s390x/cpu.h +++ b/target-s390x/cpu.h @@ -502,17 +502,14 @@ static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb, #define decode_basedisp_rs decode_basedisp_s /* helper functions for run_on_cpu() */ -static inline void s390_do_cpu_reset(void *arg) +static inline void s390_do_cpu_reset(CPUState *cs, void *arg) { - CPUState *cs = arg; S390CPUClass *scc = S390_CPU_GET_CLASS(cs); scc->cpu_reset(cs); } -static inline void s390_do_cpu_full_reset(void *arg) +static inline void s390_do_cpu_full_reset(CPUState *cs, void *arg) { - CPUState *cs = arg; - cpu_reset(cs); } diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index 4b847a3be4..fd929e8351 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -1385,7 +1385,6 @@ static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) } typedef struct SigpInfo { - S390CPU *cpu; uint64_t param; int cc; uint64_t *status_reg; @@ -1398,38 +1397,40 @@ static void set_sigp_status(SigpInfo *si, uint64_t status) si->cc = SIGP_CC_STATUS_STORED; } -static void sigp_start(void *arg) +static void sigp_start(CPUState *cs, void *arg) { + S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg; - if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) { + if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; return; } - s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu); + s390_cpu_set_state(CPU_STATE_OPERATING, cpu); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } -static void sigp_stop(void *arg) +static void sigp_stop(CPUState *cs, void *arg) { + S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg; struct kvm_s390_irq irq = { .type = KVM_S390_SIGP_STOP, }; - if (s390_cpu_get_state(si->cpu) != CPU_STATE_OPERATING) { + if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) { si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; return; } /* disabled wait - sleeping in user space */ - if (CPU(si->cpu)->halted) { - s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu); + if (cs->halted) { + s390_cpu_set_state(CPU_STATE_STOPPED, cpu); } else { /* execute the stop function */ - si->cpu->env.sigp_order = SIGP_STOP; - kvm_s390_vcpu_interrupt(si->cpu, &irq); + cpu->env.sigp_order = SIGP_STOP; + kvm_s390_vcpu_interrupt(cpu, &irq); } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } @@ -1496,56 +1497,58 @@ static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch) return 0; } -static void sigp_stop_and_store_status(void *arg) +static void sigp_stop_and_store_status(CPUState *cs, void *arg) { + S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg; struct kvm_s390_irq irq = { .type = KVM_S390_SIGP_STOP, }; /* disabled wait - sleeping in user space */ - if (s390_cpu_get_state(si->cpu) == CPU_STATE_OPERATING && - CPU(si->cpu)->halted) { - s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu); + if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) { + s390_cpu_set_state(CPU_STATE_STOPPED, cpu); } - switch (s390_cpu_get_state(si->cpu)) { + switch (s390_cpu_get_state(cpu)) { case CPU_STATE_OPERATING: - si->cpu->env.sigp_order = SIGP_STOP_STORE_STATUS; - kvm_s390_vcpu_interrupt(si->cpu, &irq); + cpu->env.sigp_order = SIGP_STOP_STORE_STATUS; + kvm_s390_vcpu_interrupt(cpu, &irq); /* store will be performed when handling the stop intercept */ break; case CPU_STATE_STOPPED: /* already stopped, just store the status */ - cpu_synchronize_state(CPU(si->cpu)); - kvm_s390_store_status(si->cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true); + cpu_synchronize_state(cs); + kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true); break; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } -static void sigp_store_status_at_address(void *arg) +static void sigp_store_status_at_address(CPUState *cs, void *arg) { + S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg; uint32_t address = si->param & 0x7ffffe00u; /* cpu has to be stopped */ - if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) { + if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } - cpu_synchronize_state(CPU(si->cpu)); + cpu_synchronize_state(cs); - if (kvm_s390_store_status(si->cpu, address, false)) { + if (kvm_s390_store_status(cpu, address, false)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } -static void sigp_store_adtl_status(void *arg) +static void sigp_store_adtl_status(CPUState *cs, void *arg) { + S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg; if (!s390_has_feat(S390_FEAT_VECTOR)) { @@ -1554,7 +1557,7 @@ static void sigp_store_adtl_status(void *arg) } /* cpu has to be stopped */ - if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) { + if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } @@ -1565,31 +1568,32 @@ static void sigp_store_adtl_status(void *arg) return; } - cpu_synchronize_state(CPU(si->cpu)); + cpu_synchronize_state(cs); - if (kvm_s390_store_adtl_status(si->cpu, si->param)) { + if (kvm_s390_store_adtl_status(cpu, si->param)) { set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); return; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } -static void sigp_restart(void *arg) +static void sigp_restart(CPUState *cs, void *arg) { + S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg; struct kvm_s390_irq irq = { .type = KVM_S390_RESTART, }; - switch (s390_cpu_get_state(si->cpu)) { + switch (s390_cpu_get_state(cpu)) { case CPU_STATE_STOPPED: /* the restart irq has to be delivered prior to any other pending irq */ - cpu_synchronize_state(CPU(si->cpu)); - do_restart_interrupt(&si->cpu->env); - s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu); + cpu_synchronize_state(cs); + do_restart_interrupt(&cpu->env); + s390_cpu_set_state(CPU_STATE_OPERATING, cpu); break; case CPU_STATE_OPERATING: - kvm_s390_vcpu_interrupt(si->cpu, &irq); + kvm_s390_vcpu_interrupt(cpu, &irq); break; } si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; @@ -1597,20 +1601,18 @@ static void sigp_restart(void *arg) int kvm_s390_cpu_restart(S390CPU *cpu) { - SigpInfo si = { - .cpu = cpu, - }; + SigpInfo si = {}; run_on_cpu(CPU(cpu), sigp_restart, &si); DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env); return 0; } -static void sigp_initial_cpu_reset(void *arg) +static void sigp_initial_cpu_reset(CPUState *cs, void *arg) { + S390CPU *cpu = S390_CPU(cs); + S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); SigpInfo *si = arg; - CPUState *cs = CPU(si->cpu); - S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu); cpu_synchronize_state(cs); scc->initial_cpu_reset(cs); @@ -1618,11 +1620,11 @@ static void sigp_initial_cpu_reset(void *arg) si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } -static void sigp_cpu_reset(void *arg) +static void sigp_cpu_reset(CPUState *cs, void *arg) { + S390CPU *cpu = S390_CPU(cs); + S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); SigpInfo *si = arg; - CPUState *cs = CPU(si->cpu); - S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu); cpu_synchronize_state(cs); scc->cpu_reset(cs); @@ -1630,12 +1632,13 @@ static void sigp_cpu_reset(void *arg) si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } -static void sigp_set_prefix(void *arg) +static void sigp_set_prefix(CPUState *cs, void *arg) { + S390CPU *cpu = S390_CPU(cs); SigpInfo *si = arg; uint32_t addr = si->param & 0x7fffe000u; - cpu_synchronize_state(CPU(si->cpu)); + cpu_synchronize_state(cs); if (!address_space_access_valid(&address_space_memory, addr, sizeof(struct LowCore), false)) { @@ -1644,13 +1647,13 @@ static void sigp_set_prefix(void *arg) } /* cpu has to be stopped */ - if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) { + if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); return; } - si->cpu->env.psa = addr; - cpu_synchronize_post_init(CPU(si->cpu)); + cpu->env.psa = addr; + cpu_synchronize_post_init(cs); si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } @@ -1658,7 +1661,6 @@ static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order, uint64_t param, uint64_t *status_reg) { SigpInfo si = { - .cpu = dst_cpu, .param = param, .status_reg = status_reg, }; diff --git a/target-s390x/misc_helper.c b/target-s390x/misc_helper.c index 86da1947b9..4df2ec6c7d 100644 --- a/target-s390x/misc_helper.c +++ b/target-s390x/misc_helper.c @@ -126,7 +126,7 @@ static int modified_clear_reset(S390CPU *cpu) pause_all_vcpus(); cpu_synchronize_all_states(); CPU_FOREACH(t) { - run_on_cpu(t, s390_do_cpu_full_reset, t); + run_on_cpu(t, s390_do_cpu_full_reset, NULL); } s390_cmma_reset(); subsystem_reset(); @@ -145,7 +145,7 @@ static int load_normal_reset(S390CPU *cpu) pause_all_vcpus(); cpu_synchronize_all_states(); CPU_FOREACH(t) { - run_on_cpu(t, s390_do_cpu_reset, t); + run_on_cpu(t, s390_do_cpu_reset, NULL); } s390_cmma_reset(); subsystem_reset(); diff --git a/tests/libqos/virtio.c b/tests/libqos/virtio.c index 37ff860c16..105bccecaa 100644 --- a/tests/libqos/virtio.c +++ b/tests/libqos/virtio.c @@ -147,7 +147,7 @@ void qvring_init(const QGuestAllocator *alloc, QVirtQueue *vq, uint64_t addr) for (i = 0; i < vq->size - 1; i++) { /* vq->desc[i].addr */ - writew(vq->desc + (16 * i), 0); + writeq(vq->desc + (16 * i), 0); /* vq->desc[i].next */ writew(vq->desc + (16 * i) + 14, i + 1); } diff --git a/tests/tcg/cris/Makefile b/tests/tcg/cris/Makefile index d34bfd8f7a..6b3dba446c 100644 --- a/tests/tcg/cris/Makefile +++ b/tests/tcg/cris/Makefile @@ -23,6 +23,7 @@ SYS = sys.o TESTCASES += check_abs.tst TESTCASES += check_addc.tst TESTCASES += check_addcm.tst +TESTCASES += check_addcv17.tst TESTCASES += check_addo.tst TESTCASES += check_addoq.tst TESTCASES += check_addi.tst @@ -108,14 +109,12 @@ TESTCASES += check_stat4.ctst TESTCASES += check_openpf1.ctst TESTCASES += check_openpf2.ctst TESTCASES += check_openpf3.ctst -TESTCASES += check_openpf4.ctst TESTCASES += check_openpf5.ctst TESTCASES += check_mapbrk.ctst TESTCASES += check_mmap1.ctst TESTCASES += check_mmap2.ctst TESTCASES += check_mmap3.ctst TESTCASES += check_sigalrm.ctst -TESTCASES += check_time1.ctst TESTCASES += check_time2.ctst TESTCASES += check_settls1.ctst @@ -136,13 +135,27 @@ all: build %.ctst: %.o $(CC) $(CFLAGS) $(LDLIBS) $< -o $@ + +sysv10.o: sys.c + $(CC) $(CFLAGS) -mcpu=v10 -c $< -o $@ + +crtv10.o: crt.s + $(AS) $(ASFLAGS) -mcpu=v10 -c $< -o $@ + +check_addcv17.tst: ASFLAGS += -mcpu=v10 +check_addcv17.tst: CRT := crtv10.o +check_addcv17.tst: SYS := sysv10.o +check_addcv17.tst: crtv10.o sysv10.o + build: $(CRT) $(SYS) $(TESTCASES) check: $(CRT) $(SYS) $(TESTCASES) @echo -e "\nQEMU simulator." for case in $(TESTCASES); do \ echo -n "$$case "; \ - $(SIM) ./$$case; \ + SIMARGS=; \ + case $$case in *v17*) SIMARGS="-cpu crisv17";; esac; \ + $(SIM) $$SIMARGS ./$$case; \ done check-g: $(CRT) $(SYS) $(TESTCASES) @echo -e "\nGDB simulator." @@ -152,4 +165,4 @@ check-g: $(CRT) $(SYS) $(TESTCASES) done clean: - $(RM) -fr $(TESTCASES) $(CRT) $(SYS) + $(RM) -fr $(TESTCASES) *.o diff --git a/tests/tcg/cris/check_abs.c b/tests/tcg/cris/check_abs.c index 9770a8d9ef..08b67b6ef0 100644 --- a/tests/tcg/cris/check_abs.c +++ b/tests/tcg/cris/check_abs.c @@ -4,14 +4,14 @@ #include "sys.h" #include "crisutils.h" -static inline int cris_abs(int n) +static always_inline int cris_abs(int n) { int r; asm ("abs\t%1, %0\n" : "=r" (r) : "r" (n)); return r; } -static inline void +static always_inline void verify_abs(int val, int res, const int n, const int z, const int v, const int c) { diff --git a/tests/tcg/cris/check_addc.c b/tests/tcg/cris/check_addc.c index facd1bea2d..fc3fb1faa8 100644 --- a/tests/tcg/cris/check_addc.c +++ b/tests/tcg/cris/check_addc.c @@ -4,7 +4,7 @@ #include "sys.h" #include "crisutils.h" -static inline int cris_addc(int a, const int b) +static always_inline int cris_addc(int a, const int b) { asm ("addc\t%1, %0\n" : "+r" (a) : "r" (b)); return a; diff --git a/tests/tcg/cris/check_addcm.c b/tests/tcg/cris/check_addcm.c index 7928bc9999..b355ba164f 100644 --- a/tests/tcg/cris/check_addcm.c +++ b/tests/tcg/cris/check_addcm.c @@ -5,14 +5,14 @@ #include "crisutils.h" /* need to avoid acr as source here. */ -static inline int cris_addc_m(int a, const int *b) +static always_inline int cris_addc_m(int a, const int *b) { asm volatile ("addc [%1], %0\n" : "+r" (a) : "r" (b)); return a; } /* 'b' is a crisv32 constrain to avoid postinc with $acr. */ -static inline int cris_addc_pi_m(int a, int **b) +static always_inline int cris_addc_pi_m(int a, int **b) { asm volatile ("addc [%1+], %0\n" : "+r" (a), "+b" (*b)); return a; diff --git a/tests/tcg/cris/check_addcv17.s b/tests/tcg/cris/check_addcv17.s new file mode 100644 index 0000000000..52ef7a9716 --- /dev/null +++ b/tests/tcg/cris/check_addcv17.s @@ -0,0 +1,65 @@ +# mach: crisv17 + + .include "testutils.inc" + + .macro addc Rs Rd inc=0 +# Create the instruction manually since there is no assembler support yet + .word (\Rd << 12) | \Rs | (\inc << 10) | 0x09a0 + .endm + + start + + .data +mem1: + .dword 0x0 +mem2: + .dword 0x12345678 + + .text + move.d mem1,r4 + clearf nzvc + addc 4 3 + test_cc 0 1 0 0 + checkr3 0 + + move.d mem1,r4 + clearf nzvc + ax + addc 4 3 + test_cc 0 0 0 0 + checkr3 0 + + move.d mem1,r4 + clearf nzvc + setf c + addc 4 3 + test_cc 0 0 0 0 + checkr3 1 + + move.d mem2,r4 + moveq 2, r3 + clearf nzvc + setf c + addc 4 3 + test_cc 0 0 0 0 + checkr3 1234567b + + move.d mem2,r5 + clearf nzvc + cmp.d r4,r5 + test_cc 0 1 0 0 + + move.d mem2,r4 + moveq 2, r3 + clearf nzvc + addc 4 3 inc=1 + test_cc 0 0 0 0 + checkr3 1234567a + + move.d mem2,r5 + clearf nzvc + addq 4,r5 + cmp.d r4,r5 + test_cc 0 1 0 0 + + quit diff --git a/tests/tcg/cris/check_bound.c b/tests/tcg/cris/check_bound.c index e8831754ec..d956ab9ade 100644 --- a/tests/tcg/cris/check_bound.c +++ b/tests/tcg/cris/check_bound.c @@ -4,21 +4,21 @@ #include "sys.h" #include "crisutils.h" -static inline int cris_bound_b(int v, int b) +static always_inline int cris_bound_b(int v, int b) { int r = v; asm ("bound.b\t%1, %0\n" : "+r" (r) : "ri" (b)); return r; } -static inline int cris_bound_w(int v, int b) +static always_inline int cris_bound_w(int v, int b) { int r = v; asm ("bound.w\t%1, %0\n" : "+r" (r) : "ri" (b)); return r; } -static inline int cris_bound_d(int v, int b) +static always_inline int cris_bound_d(int v, int b) { int r = v; asm ("bound.d\t%1, %0\n" : "+r" (r) : "ri" (b)); diff --git a/tests/tcg/cris/check_ftag.c b/tests/tcg/cris/check_ftag.c index 908773a38a..aaa5c97115 100644 --- a/tests/tcg/cris/check_ftag.c +++ b/tests/tcg/cris/check_ftag.c @@ -4,22 +4,22 @@ #include "sys.h" #include "crisutils.h" -static inline void cris_ftag_i(unsigned int x) +static always_inline void cris_ftag_i(unsigned int x) { register unsigned int v asm("$r10") = x; asm ("ftagi\t[%0]\n" : : "r" (v) ); } -static inline void cris_ftag_d(unsigned int x) +static always_inline void cris_ftag_d(unsigned int x) { register unsigned int v asm("$r10") = x; asm ("ftagd\t[%0]\n" : : "r" (v) ); } -static inline void cris_fidx_i(unsigned int x) +static always_inline void cris_fidx_i(unsigned int x) { register unsigned int v asm("$r10") = x; asm ("fidxi\t[%0]\n" : : "r" (v) ); } -static inline void cris_fidx_d(unsigned int x) +static always_inline void cris_fidx_d(unsigned int x) { register unsigned int v asm("$r10") = x; asm ("fidxd\t[%0]\n" : : "r" (v) ); diff --git a/tests/tcg/cris/check_int64.c b/tests/tcg/cris/check_int64.c index fc600176e2..69caec1bb2 100644 --- a/tests/tcg/cris/check_int64.c +++ b/tests/tcg/cris/check_int64.c @@ -5,12 +5,12 @@ #include "crisutils.h" -static inline int64_t add64(const int64_t a, const int64_t b) +static always_inline int64_t add64(const int64_t a, const int64_t b) { return a + b; } -static inline int64_t sub64(const int64_t a, const int64_t b) +static always_inline int64_t sub64(const int64_t a, const int64_t b) { return a - b; } diff --git a/tests/tcg/cris/check_lz.c b/tests/tcg/cris/check_lz.c index 69c2e6d4ec..bf051a6b55 100644 --- a/tests/tcg/cris/check_lz.c +++ b/tests/tcg/cris/check_lz.c @@ -3,7 +3,7 @@ #include <stdint.h> #include "sys.h" -static inline int cris_lz(int x) +static always_inline int cris_lz(int x) { int r; asm ("lz\t%1, %0\n" : "=r" (r) : "r" (x)); diff --git a/tests/tcg/cris/check_openpf4.c b/tests/tcg/cris/check_openpf4.c deleted file mode 100644 index 8bbee41a64..0000000000 --- a/tests/tcg/cris/check_openpf4.c +++ /dev/null @@ -1,5 +0,0 @@ -/* Basic file operations, now *with* sysroot. -#sim: --sysroot=@exedir@ -*/ -#define PREFIX "/" -#include "check_openpf3.c" diff --git a/tests/tcg/cris/check_swap.c b/tests/tcg/cris/check_swap.c index f851cbcef1..9a68c1e5d7 100644 --- a/tests/tcg/cris/check_swap.c +++ b/tests/tcg/cris/check_swap.c @@ -9,7 +9,7 @@ #define B 2 #define R 1 -static inline int cris_swap(const int mode, int x) +static always_inline int cris_swap(const int mode, int x) { switch (mode) { diff --git a/tests/tcg/cris/check_time1.c b/tests/tcg/cris/check_time1.c deleted file mode 100644 index 3fcf0e1535..0000000000 --- a/tests/tcg/cris/check_time1.c +++ /dev/null @@ -1,46 +0,0 @@ -/* Basic time functionality test: check that milliseconds are - incremented for each syscall (does not work on host). */ -#include <stdio.h> -#include <time.h> -#include <sys/time.h> -#include <string.h> -#include <stdlib.h> - -void err (const char *s) -{ - perror (s); - abort (); -} - -int -main (void) -{ - struct timeval t_m = {0, 0}; - struct timezone t_z = {0, 0}; - struct timeval t_m1 = {0, 0}; - int i; - - if (gettimeofday (&t_m, &t_z) != 0) - err ("gettimeofday"); - - for (i = 1; i < 10000; i++) - if (gettimeofday (&t_m1, NULL) != 0) - err ("gettimeofday 1"); - else - if (t_m1.tv_sec * 1000000 + t_m1.tv_usec - != (t_m.tv_sec * 1000000 + t_m.tv_usec + i * 1000)) - { - fprintf (stderr, "t0 (%ld, %ld), i %d, t1 (%ld, %ld)\n", - t_m.tv_sec, t_m.tv_usec, i, t_m1.tv_sec, t_m1.tv_usec); - abort (); - } - - if (time (NULL) != t_m1.tv_sec) - { - fprintf (stderr, "time != gettod\n"); - abort (); - } - - printf ("pass\n"); - exit (0); -} diff --git a/tests/tcg/cris/crisutils.h b/tests/tcg/cris/crisutils.h index 3456b9d50d..bbbe6c5540 100644 --- a/tests/tcg/cris/crisutils.h +++ b/tests/tcg/cris/crisutils.h @@ -13,57 +13,57 @@ void _err(void) { _fail(tst_cc_loc); } -static inline void cris_tst_cc_n1(void) +static always_inline void cris_tst_cc_n1(void) { asm volatile ("bpl _err\n" "nop\n"); } -static inline void cris_tst_cc_n0(void) +static always_inline void cris_tst_cc_n0(void) { asm volatile ("bmi _err\n" "nop\n"); } -static inline void cris_tst_cc_z1(void) +static always_inline void cris_tst_cc_z1(void) { asm volatile ("bne _err\n" "nop\n"); } -static inline void cris_tst_cc_z0(void) +static always_inline void cris_tst_cc_z0(void) { asm volatile ("beq _err\n" "nop\n"); } -static inline void cris_tst_cc_v1(void) +static always_inline void cris_tst_cc_v1(void) { asm volatile ("bvc _err\n" "nop\n"); } -static inline void cris_tst_cc_v0(void) +static always_inline void cris_tst_cc_v0(void) { asm volatile ("bvs _err\n" "nop\n"); } -static inline void cris_tst_cc_c1(void) +static always_inline void cris_tst_cc_c1(void) { asm volatile ("bcc _err\n" "nop\n"); } -static inline void cris_tst_cc_c0(void) +static always_inline void cris_tst_cc_c0(void) { asm volatile ("bcs _err\n" "nop\n"); } -static inline void cris_tst_mov_cc(int n, int z) +static always_inline void cris_tst_mov_cc(int n, int z) { if (n) cris_tst_cc_n1(); else cris_tst_cc_n0(); if (z) cris_tst_cc_z1(); else cris_tst_cc_z0(); asm volatile ("" : : "g" (_err)); } -static inline void cris_tst_cc(const int n, const int z, +static always_inline void cris_tst_cc(const int n, const int z, const int v, const int c) { if (n) cris_tst_cc_n1(); else cris_tst_cc_n0(); diff --git a/tests/tcg/cris/sys.c b/tests/tcg/cris/sys.c index 551c5dd7cb..21f08c0747 100644 --- a/tests/tcg/cris/sys.c +++ b/tests/tcg/cris/sys.c @@ -33,19 +33,27 @@ void *memset (void *s, int c, size_t n) { } void exit (int status) { - asm volatile ("moveq 1, $r9\n" /* NR_exit. */ - "break 13\n"); + register unsigned int callno asm ("r9") = 1; /* NR_exit */ + + asm volatile ("break 13\n" + : + : "r" (callno) + : "memory" ); while(1) ; } ssize_t write (int fd, const void *buf, size_t count) { - int r; - asm ("move.d %0, $r10\n" - "move.d %1, $r11\n" - "move.d %2, $r12\n" - "moveq 4, $r9\n" /* NR_write. */ - "break 13\n" : : "r" (fd), "r" (buf), "r" (count) : "memory"); - asm ("move.d $r10, %0\n" : "=r" (r)); + register unsigned int callno asm ("r9") = 4; /* NR_write */ + register unsigned int r10 asm ("r10") = fd; + register const void *r11 asm ("r11") = buf; + register size_t r12 asm ("r12") = count; + register unsigned int r asm ("r10"); + + asm volatile ("break 13\n" + : "=r" (r) + : "r" (callno), "0" (r10), "r" (r11), "r" (r12) + : "memory"); + return r; } diff --git a/tests/tcg/cris/sys.h b/tests/tcg/cris/sys.h index c5f88e1a29..3dd47bb673 100644 --- a/tests/tcg/cris/sys.h +++ b/tests/tcg/cris/sys.h @@ -3,6 +3,8 @@ #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) +#define always_inline inline __attribute__((always_inline)) + #define CURRENT_LOCATION __FILE__ ":" TOSTRING(__LINE__) #define err() \ diff --git a/tests/test-coroutine.c b/tests/test-coroutine.c index 6431dd6d7c..abd97c23c1 100644 --- a/tests/test-coroutine.c +++ b/tests/test-coroutine.c @@ -53,6 +53,47 @@ static void test_self(void) } /* + * Check that qemu_coroutine_entered() works + */ + +static void coroutine_fn verify_entered_step_2(void *opaque) +{ + Coroutine *caller = (Coroutine *)opaque; + + g_assert(qemu_coroutine_entered(caller)); + g_assert(qemu_coroutine_entered(qemu_coroutine_self())); + qemu_coroutine_yield(); + + /* Once more to check it still works after yielding */ + g_assert(qemu_coroutine_entered(caller)); + g_assert(qemu_coroutine_entered(qemu_coroutine_self())); + qemu_coroutine_yield(); +} + +static void coroutine_fn verify_entered_step_1(void *opaque) +{ + Coroutine *self = qemu_coroutine_self(); + Coroutine *coroutine; + + g_assert(qemu_coroutine_entered(self)); + + coroutine = qemu_coroutine_create(verify_entered_step_2, self); + g_assert(!qemu_coroutine_entered(coroutine)); + qemu_coroutine_enter(coroutine); + g_assert(!qemu_coroutine_entered(coroutine)); + qemu_coroutine_enter(coroutine); +} + +static void test_entered(void) +{ + Coroutine *coroutine; + + coroutine = qemu_coroutine_create(verify_entered_step_1, NULL); + g_assert(!qemu_coroutine_entered(coroutine)); + qemu_coroutine_enter(coroutine); +} + +/* * Check that coroutines may nest multiple levels */ @@ -389,6 +430,7 @@ int main(int argc, char **argv) g_test_add_func("/basic/yield", test_yield); g_test_add_func("/basic/nesting", test_nesting); g_test_add_func("/basic/self", test_self); + g_test_add_func("/basic/entered", test_entered); g_test_add_func("/basic/in_coroutine", test_in_coroutine); g_test_add_func("/basic/order", test_order); if (g_test_perf()) { diff --git a/translate-all.c b/translate-all.c index e9bc90c654..8ca393c9d0 100644 --- a/translate-all.c +++ b/translate-all.c @@ -834,12 +834,19 @@ static void page_flush_tb(void) } /* flush all the translation blocks */ -/* XXX: tb_flush is currently not thread safe */ -void tb_flush(CPUState *cpu) +static void do_tb_flush(CPUState *cpu, void *data) { - if (!tcg_enabled()) { - return; + unsigned tb_flush_req = (unsigned) (uintptr_t) data; + + tb_lock(); + + /* If it's already been done on request of another CPU, + * just retry. + */ + if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) { + goto done; } + #if defined(DEBUG_FLUSH) printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), @@ -858,7 +865,6 @@ void tb_flush(CPUState *cpu) for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) { atomic_set(&cpu->tb_jmp_cache[i], NULL); } - atomic_mb_set(&cpu->tb_flushed, true); } tcg_ctx.tb_ctx.nb_tbs = 0; @@ -868,7 +874,19 @@ void tb_flush(CPUState *cpu) tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; /* XXX: flush processor icache at this point if cache flush is expensive */ - tcg_ctx.tb_ctx.tb_flush_count++; + atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count, + tcg_ctx.tb_ctx.tb_flush_count + 1); + +done: + tb_unlock(); +} + +void tb_flush(CPUState *cpu) +{ + if (tcg_enabled()) { + uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count); + async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req); + } } #ifdef DEBUG_TB_CHECK @@ -1175,9 +1193,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu, buffer_overflow: /* flush must be done */ tb_flush(cpu); - /* cannot fail at this point */ - tb = tb_alloc(pc); - assert(tb != NULL); + mmap_unlock(); + cpu_loop_exit(cpu); } gen_code_buf = tcg_ctx.code_gen_ptr; @@ -1775,7 +1792,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) qht_statistics_destroy(&hst); cpu_fprintf(f, "\nStatistics:\n"); - cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count); + cpu_fprintf(f, "TB flush count %u\n", + atomic_read(&tcg_ctx.tb_ctx.tb_flush_count)); cpu_fprintf(f, "TB invalidate count %d\n", tcg_ctx.tb_ctx.tb_phys_invalidate_count); cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); diff --git a/ui/console.c b/ui/console.c index 3940762851..fa3e658edd 100644 --- a/ui/console.c +++ b/ui/console.c @@ -123,6 +123,7 @@ struct QemuConsole { DisplaySurface *surface; int dcls; DisplayChangeListener *gl; + bool gl_block; /* Graphic console state. */ Object *device; @@ -264,10 +265,10 @@ void graphic_hw_update(QemuConsole *con) void graphic_hw_gl_block(QemuConsole *con, bool block) { - if (!con) { - con = active_console; - } - if (con && con->hw_ops->gl_block) { + assert(con != NULL); + + con->gl_block = block; + if (con->hw_ops->gl_block) { con->hw_ops->gl_block(con->hw, block); } } @@ -1879,6 +1880,12 @@ bool qemu_console_is_fixedsize(QemuConsole *con) return con && (con->console_type != TEXT_CONSOLE); } +bool qemu_console_is_gl_blocked(QemuConsole *con) +{ + assert(con != NULL); + return con->gl_block; +} + char *qemu_console_get_label(QemuConsole *con) { if (con->console_type == GRAPHIC_CONSOLE) { @@ -2101,6 +2108,13 @@ void qemu_console_resize(QemuConsole *s, int width, int height) DisplaySurface *surface; assert(s->console_type == GRAPHIC_CONSOLE); + + if (s->surface && + pixman_image_get_width(s->surface->image) == width && + pixman_image_get_height(s->surface->image) == height) { + return; + } + surface = qemu_create_displaysurface(width, height); dpy_gfx_replace_surface(s, surface); } diff --git a/ui/spice-display.c b/ui/spice-display.c index 99132b69b6..5e6f78a219 100644 --- a/ui/spice-display.c +++ b/ui/spice-display.c @@ -850,6 +850,74 @@ static void qemu_spice_gl_block_timer(void *opaque) fprintf(stderr, "WARNING: spice: no gl-draw-done within one second\n"); } +static void spice_gl_refresh(DisplayChangeListener *dcl) +{ + SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); + uint64_t cookie; + + if (!ssd->ds || qemu_console_is_gl_blocked(ssd->dcl.con)) { + return; + } + + graphic_hw_update(dcl->con); + if (ssd->gl_updates && ssd->have_surface) { + qemu_spice_gl_block(ssd, true); + cookie = (uintptr_t)qxl_cookie_new(QXL_COOKIE_TYPE_GL_DRAW_DONE, 0); + spice_qxl_gl_draw_async(&ssd->qxl, 0, 0, + surface_width(ssd->ds), + surface_height(ssd->ds), + cookie); + ssd->gl_updates = 0; + } +} + +static void spice_gl_update(DisplayChangeListener *dcl, + int x, int y, int w, int h) +{ + SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); + + surface_gl_update_texture(ssd->gls, ssd->ds, x, y, w, h); + ssd->gl_updates++; +} + +static void spice_gl_switch(DisplayChangeListener *dcl, + struct DisplaySurface *new_surface) +{ + SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); + EGLint stride, fourcc; + int fd; + + if (ssd->ds) { + surface_gl_destroy_texture(ssd->gls, ssd->ds); + } + ssd->ds = new_surface; + if (ssd->ds) { + surface_gl_create_texture(ssd->gls, ssd->ds); + fd = egl_get_fd_for_texture(ssd->ds->texture, + &stride, &fourcc); + if (fd < 0) { + surface_gl_destroy_texture(ssd->gls, ssd->ds); + return; + } + + dprint(1, "%s: %dx%d (stride %d/%d, fourcc 0x%x)\n", __func__, + surface_width(ssd->ds), surface_height(ssd->ds), + surface_stride(ssd->ds), stride, fourcc); + + /* note: spice server will close the fd */ + spice_qxl_gl_scanout(&ssd->qxl, fd, + surface_width(ssd->ds), + surface_height(ssd->ds), + stride, fourcc, false); + ssd->have_surface = true; + ssd->have_scanout = false; + + qemu_spice_gl_monitor_config(ssd, 0, 0, + surface_width(ssd->ds), + surface_height(ssd->ds)); + } +} + static QEMUGLContext qemu_spice_gl_create_context(DisplayChangeListener *dcl, QEMUGLParams *params) { @@ -887,6 +955,8 @@ static void qemu_spice_gl_scanout(DisplayChangeListener *dcl, /* note: spice server will close the fd */ spice_qxl_gl_scanout(&ssd->qxl, fd, backing_width, backing_height, stride, fourcc, y_0_top); + ssd->have_surface = false; + ssd->have_scanout = (tex_id != 0); qemu_spice_gl_monitor_config(ssd, x, y, w, h); } @@ -897,6 +967,10 @@ static void qemu_spice_gl_update(DisplayChangeListener *dcl, SimpleSpiceDisplay *ssd = container_of(dcl, SimpleSpiceDisplay, dcl); uint64_t cookie; + if (!ssd->have_scanout) { + return; + } + dprint(2, "%s: %dx%d+%d+%d\n", __func__, w, h, x, y); qemu_spice_gl_block(ssd, true); cookie = (uintptr_t)qxl_cookie_new(QXL_COOKIE_TYPE_GL_DRAW_DONE, 0); @@ -904,13 +978,13 @@ static void qemu_spice_gl_update(DisplayChangeListener *dcl, } static const DisplayChangeListenerOps display_listener_gl_ops = { - .dpy_name = "spice-egl", - .dpy_gfx_update = display_update, - .dpy_gfx_switch = display_switch, - .dpy_gfx_check_format = qemu_pixman_check_format, - .dpy_refresh = display_refresh, - .dpy_mouse_set = display_mouse_set, - .dpy_cursor_define = display_mouse_define, + .dpy_name = "spice-egl", + .dpy_gfx_update = spice_gl_update, + .dpy_gfx_switch = spice_gl_switch, + .dpy_gfx_check_format = console_gl_check_format, + .dpy_refresh = spice_gl_refresh, + .dpy_mouse_set = display_mouse_set, + .dpy_cursor_define = display_mouse_define, .dpy_gl_ctx_create = qemu_spice_gl_create_context, .dpy_gl_ctx_destroy = qemu_egl_destroy_context, @@ -933,10 +1007,12 @@ static void qemu_spice_display_init_one(QemuConsole *con) #ifdef HAVE_SPICE_GL if (display_opengl) { ssd->dcl.ops = &display_listener_gl_ops; - ssd->dmabuf_fd = -1; ssd->gl_unblock_bh = qemu_bh_new(qemu_spice_gl_unblock_bh, ssd); ssd->gl_unblock_timer = timer_new_ms(QEMU_CLOCK_REALTIME, qemu_spice_gl_block_timer, ssd); + ssd->gls = console_gl_init_context(); + ssd->have_surface = false; + ssd->have_scanout = false; } #endif ssd->dcl.con = con; diff --git a/ui/vnc-enc-tight.c b/ui/vnc-enc-tight.c index 49df85e763..1e53b1cf84 100644 --- a/ui/vnc-enc-tight.c +++ b/ui/vnc-enc-tight.c @@ -707,10 +707,8 @@ check_solid_tile32(VncState *vs, int x, int y, int w, int h, static bool check_solid_tile(VncState *vs, int x, int y, int w, int h, uint32_t* color, bool samecolor) { - switch (VNC_SERVER_FB_BYTES) { - case 4: - return check_solid_tile32(vs, x, y, w, h, color, samecolor); - } + QEMU_BUILD_BUG_ON(VNC_SERVER_FB_BYTES != 4); + return check_solid_tile32(vs, x, y, w, h, color, samecolor); } static void find_best_solid_area(VncState *vs, int x, int y, int w, int h, diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c index 3cbf225487..737bffa984 100644 --- a/util/qemu-coroutine.c +++ b/util/qemu-coroutine.c @@ -146,3 +146,8 @@ void coroutine_fn qemu_coroutine_yield(void) self->caller = NULL; qemu_coroutine_switch(self, to, COROUTINE_YIELD); } + +bool qemu_coroutine_entered(Coroutine *co) +{ + return co->caller; +} @@ -784,6 +784,7 @@ void vm_start(void) if (runstate_is_running()) { qapi_event_send_stop(&error_abort); } else { + replay_enable_events(); cpu_enable_ticks(); runstate_set(RUN_STATE_RUNNING); vm_state_notify(1, RUN_STATE_RUNNING); @@ -3019,6 +3020,7 @@ int main(int argc, char **argv, char **envp) Error *err = NULL; bool list_data_dirs = false; + qemu_init_cpu_list(); qemu_init_cpu_loop(); qemu_mutex_lock_iothread(); |