diff options
author | Claudio Fontana | 2021-02-04 17:39:25 +0100 |
---|---|---|
committer | Richard Henderson | 2021-02-05 21:24:15 +0100 |
commit | b86f59c71552591a17dd21ba8f09654bfa19a31e (patch) | |
tree | c73b9240fa6c4dbf46fdf1b67d65f7d283997589 /accel/tcg/tcg-accel-ops-rr.c | |
parent | accel: extend AccelState and AccelClass to user-mode (diff) | |
download | qemu-b86f59c71552591a17dd21ba8f09654bfa19a31e.tar.gz qemu-b86f59c71552591a17dd21ba8f09654bfa19a31e.tar.xz qemu-b86f59c71552591a17dd21ba8f09654bfa19a31e.zip |
accel: replace struct CpusAccel with AccelOpsClass
This will allow us to centralize the registration of
the cpus.c module accelerator operations (in accel/accel-softmmu.c),
and trigger it automatically using object hierarchy lookup from the
new accel_init_interfaces() initialization step, depending just on
which accelerators are available in the code.
Rename all tcg-cpus.c, kvm-cpus.c, etc to tcg-accel-ops.c,
kvm-accel-ops.c, etc, matching the object type names.
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Message-Id: <20210204163931.7358-18-cfontana@suse.de>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel/tcg/tcg-accel-ops-rr.c')
-rw-r--r-- | accel/tcg/tcg-accel-ops-rr.c | 298 |
1 files changed, 298 insertions, 0 deletions
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c new file mode 100644 index 0000000000..4a66055e0d --- /dev/null +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -0,0 +1,298 @@ +/* + * QEMU TCG Single Threaded vCPUs implementation + * + * Copyright (c) 2003-2008 Fabrice Bellard + * Copyright (c) 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "sysemu/tcg.h" +#include "sysemu/replay.h" +#include "qemu/main-loop.h" +#include "qemu/guest-random.h" +#include "exec/exec-all.h" +#include "hw/boards.h" + +#include "tcg-accel-ops.h" +#include "tcg-accel-ops-rr.h" +#include "tcg-accel-ops-icount.h" + +/* Kick all RR vCPUs */ +void rr_kick_vcpu_thread(CPUState *unused) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + cpu_exit(cpu); + }; +} + +/* + * TCG vCPU kick timer + * + * The kick timer is responsible for moving single threaded vCPU + * emulation on to the next vCPU. If more than one vCPU is running a + * timer event with force a cpu->exit so the next vCPU can get + * scheduled. + * + * The timer is removed if all vCPUs are idle and restarted again once + * idleness is complete. + */ + +static QEMUTimer *rr_kick_vcpu_timer; +static CPUState *rr_current_cpu; + +#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) + +static inline int64_t rr_next_kick_time(void) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; +} + +/* Kick the currently round-robin scheduled vCPU to next */ +static void rr_kick_next_cpu(void) +{ + CPUState *cpu; + do { + cpu = qatomic_mb_read(&rr_current_cpu); + if (cpu) { + cpu_exit(cpu); + } + } while (cpu != qatomic_mb_read(&rr_current_cpu)); +} + +static void rr_kick_thread(void *opaque) +{ + timer_mod(rr_kick_vcpu_timer, rr_next_kick_time()); + rr_kick_next_cpu(); +} + +static void rr_start_kick_timer(void) +{ + if (!rr_kick_vcpu_timer && CPU_NEXT(first_cpu)) { + rr_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + rr_kick_thread, NULL); + } + if (rr_kick_vcpu_timer && !timer_pending(rr_kick_vcpu_timer)) { + timer_mod(rr_kick_vcpu_timer, rr_next_kick_time()); + } +} + +static void rr_stop_kick_timer(void) +{ + if (rr_kick_vcpu_timer && timer_pending(rr_kick_vcpu_timer)) { + timer_del(rr_kick_vcpu_timer); + } +} + +static void rr_wait_io_event(void) +{ + CPUState *cpu; + + while (all_cpu_threads_idle()) { + rr_stop_kick_timer(); + qemu_cond_wait_iothread(first_cpu->halt_cond); + } + + rr_start_kick_timer(); + + CPU_FOREACH(cpu) { + qemu_wait_io_event_common(cpu); + } +} + +/* + * Destroy any remaining vCPUs which have been unplugged and have + * finished running + */ +static void rr_deal_with_unplugged_cpus(void) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + if (cpu->unplug && !cpu_can_run(cpu)) { + tcg_cpus_destroy(cpu); + break; + } + } +} + +/* + * In the single-threaded case each vCPU is simulated in turn. If + * there is more than a single vCPU we create a simple timer to kick + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU. + * This is done explicitly rather than relying on side-effects + * elsewhere. + */ + +static void *rr_cpu_thread_fn(void *arg) +{ + CPUState *cpu = arg; + + assert(tcg_enabled()); + rcu_register_thread(); + tcg_register_thread(); + + qemu_mutex_lock_iothread(); + qemu_thread_get_self(cpu->thread); + + cpu->thread_id = qemu_get_thread_id(); + cpu->can_do_io = 1; + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + /* wait for initial kick-off after machine start */ + while (first_cpu->stopped) { + qemu_cond_wait_iothread(first_cpu->halt_cond); + + /* process any pending work */ + CPU_FOREACH(cpu) { + current_cpu = cpu; + qemu_wait_io_event_common(cpu); + } + } + + rr_start_kick_timer(); + + cpu = first_cpu; + + /* process any pending work */ + cpu->exit_request = 1; + + while (1) { + qemu_mutex_unlock_iothread(); + replay_mutex_lock(); + qemu_mutex_lock_iothread(); + + if (icount_enabled()) { + /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ + icount_account_warp_timer(); + /* + * Run the timers here. This is much more efficient than + * waking up the I/O thread and waiting for completion. + */ + icount_handle_deadline(); + } + + replay_mutex_unlock(); + + if (!cpu) { + cpu = first_cpu; + } + + while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { + + qatomic_mb_set(&rr_current_cpu, cpu); + current_cpu = cpu; + + qemu_clock_enable(QEMU_CLOCK_VIRTUAL, + (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); + + if (cpu_can_run(cpu)) { + int r; + + qemu_mutex_unlock_iothread(); + if (icount_enabled()) { + icount_prepare_for_run(cpu); + } + r = tcg_cpus_exec(cpu); + if (icount_enabled()) { + icount_process_data(cpu); + } + qemu_mutex_lock_iothread(); + + if (r == EXCP_DEBUG) { + cpu_handle_guest_debug(cpu); + break; + } else if (r == EXCP_ATOMIC) { + qemu_mutex_unlock_iothread(); + cpu_exec_step_atomic(cpu); + qemu_mutex_lock_iothread(); + break; + } + } else if (cpu->stop) { + if (cpu->unplug) { + cpu = CPU_NEXT(cpu); + } + break; + } + + cpu = CPU_NEXT(cpu); + } /* while (cpu && !cpu->exit_request).. */ + + /* Does not need qatomic_mb_set because a spurious wakeup is okay. */ + qatomic_set(&rr_current_cpu, NULL); + + if (cpu && cpu->exit_request) { + qatomic_mb_set(&cpu->exit_request, 0); + } + + if (icount_enabled() && all_cpu_threads_idle()) { + /* + * When all cpus are sleeping (e.g in WFI), to avoid a deadlock + * in the main_loop, wake it up in order to start the warp timer. + */ + qemu_notify_event(); + } + + rr_wait_io_event(); + rr_deal_with_unplugged_cpus(); + } + + rcu_unregister_thread(); + return NULL; +} + +void rr_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + static QemuCond *single_tcg_halt_cond; + static QemuThread *single_tcg_cpu_thread; + + g_assert(tcg_enabled()); + parallel_cpus = false; + + if (!single_tcg_cpu_thread) { + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + qemu_cond_init(cpu->halt_cond); + + /* share a single thread for all cpus with TCG */ + snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG"); + qemu_thread_create(cpu->thread, thread_name, + rr_cpu_thread_fn, + cpu, QEMU_THREAD_JOINABLE); + + single_tcg_halt_cond = cpu->halt_cond; + single_tcg_cpu_thread = cpu->thread; +#ifdef _WIN32 + cpu->hThread = qemu_thread_get_handle(cpu->thread); +#endif + } else { + /* we share the thread */ + cpu->thread = single_tcg_cpu_thread; + cpu->halt_cond = single_tcg_halt_cond; + cpu->thread_id = first_cpu->thread_id; + cpu->can_do_io = 1; + cpu->created = true; + } +} |