diff options
Diffstat (limited to 'cpus.c')
-rw-r--r-- | cpus.c | 128 |
1 files changed, 25 insertions, 103 deletions
@@ -191,8 +191,12 @@ int64_t cpu_icount_to_ns(int64_t icount) return icount << icount_time_shift; } -/* return the host CPU cycle counter and handle stop/restart */ -/* Caller must hold the BQL */ +/* return the time elapsed in VM between vm_start and vm_stop. Unless + * icount is active, cpu_get_ticks() uses units of the host CPU cycle + * counter. + * + * Caller must hold the BQL + */ int64_t cpu_get_ticks(void) { int64_t ticks; @@ -219,17 +223,19 @@ int64_t cpu_get_ticks(void) static int64_t cpu_get_clock_locked(void) { - int64_t ticks; + int64_t time; - ticks = timers_state.cpu_clock_offset; + time = timers_state.cpu_clock_offset; if (timers_state.cpu_ticks_enabled) { - ticks += get_clock(); + time += get_clock(); } - return ticks; + return time; } -/* return the host CPU monotonic timer and handle stop/restart */ +/* Return the monotonic time elapsed in VM, i.e., + * the time between vm_start and vm_stop + */ int64_t cpu_get_clock(void) { int64_t ti; @@ -244,7 +250,7 @@ int64_t cpu_get_clock(void) } /* enable cpu_get_ticks() - * Caller must hold BQL which server as mutex for vm_clock_seqlock. + * Caller must hold BQL which serves as mutex for vm_clock_seqlock. */ void cpu_enable_ticks(void) { @@ -260,7 +266,7 @@ void cpu_enable_ticks(void) /* disable cpu_get_ticks() : the clock is stopped. You must not call * cpu_get_ticks() after that. - * Caller must hold BQL which server as mutex for vm_clock_seqlock. + * Caller must hold BQL which serves as mutex for vm_clock_seqlock. */ void cpu_disable_ticks(void) { @@ -551,9 +557,8 @@ static const VMStateDescription vmstate_timers = { } }; -static void cpu_throttle_thread(void *opaque) +static void cpu_throttle_thread(CPUState *cpu, void *opaque) { - CPUState *cpu = opaque; double pct; double throttle_ratio; long sleeptime_ns; @@ -583,7 +588,7 @@ static void cpu_throttle_timer_tick(void *opaque) } CPU_FOREACH(cpu) { if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) { - async_run_on_cpu(cpu, cpu_throttle_thread, cpu); + async_run_on_cpu(cpu, cpu_throttle_thread, NULL); } } @@ -745,7 +750,8 @@ static int do_vm_stop(RunState state) } bdrv_drain_all(); - ret = blk_flush_all(); + replay_disable_events(); + ret = bdrv_flush_all(); return ret; } @@ -897,79 +903,21 @@ static QemuThread io_thread; static QemuCond qemu_cpu_cond; /* system init */ static QemuCond qemu_pause_cond; -static QemuCond qemu_work_cond; void qemu_init_cpu_loop(void) { qemu_init_sigbus(); qemu_cond_init(&qemu_cpu_cond); qemu_cond_init(&qemu_pause_cond); - qemu_cond_init(&qemu_work_cond); qemu_cond_init(&qemu_io_proceeded_cond); qemu_mutex_init(&qemu_global_mutex); qemu_thread_get_self(&io_thread); } -void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) +void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) { - struct qemu_work_item wi; - - if (qemu_cpu_is_self(cpu)) { - func(data); - return; - } - - wi.func = func; - wi.data = data; - wi.free = false; - - qemu_mutex_lock(&cpu->work_mutex); - if (cpu->queued_work_first == NULL) { - cpu->queued_work_first = &wi; - } else { - cpu->queued_work_last->next = &wi; - } - cpu->queued_work_last = &wi; - wi.next = NULL; - wi.done = false; - qemu_mutex_unlock(&cpu->work_mutex); - - qemu_cpu_kick(cpu); - while (!atomic_mb_read(&wi.done)) { - CPUState *self_cpu = current_cpu; - - qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex); - current_cpu = self_cpu; - } -} - -void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) -{ - struct qemu_work_item *wi; - - if (qemu_cpu_is_self(cpu)) { - func(data); - return; - } - - wi = g_malloc0(sizeof(struct qemu_work_item)); - wi->func = func; - wi->data = data; - wi->free = true; - - qemu_mutex_lock(&cpu->work_mutex); - if (cpu->queued_work_first == NULL) { - cpu->queued_work_first = wi; - } else { - cpu->queued_work_last->next = wi; - } - cpu->queued_work_last = wi; - wi->next = NULL; - wi->done = false; - qemu_mutex_unlock(&cpu->work_mutex); - - qemu_cpu_kick(cpu); + do_run_on_cpu(cpu, func, data, &qemu_global_mutex); } static void qemu_kvm_destroy_vcpu(CPUState *cpu) @@ -984,34 +932,6 @@ static void qemu_tcg_destroy_vcpu(CPUState *cpu) { } -static void flush_queued_work(CPUState *cpu) -{ - struct qemu_work_item *wi; - - if (cpu->queued_work_first == NULL) { - return; - } - - qemu_mutex_lock(&cpu->work_mutex); - while (cpu->queued_work_first != NULL) { - wi = cpu->queued_work_first; - cpu->queued_work_first = wi->next; - if (!cpu->queued_work_first) { - cpu->queued_work_last = NULL; - } - qemu_mutex_unlock(&cpu->work_mutex); - wi->func(wi->data); - qemu_mutex_lock(&cpu->work_mutex); - if (wi->free) { - g_free(wi); - } else { - atomic_mb_set(&wi->done, true); - } - } - qemu_mutex_unlock(&cpu->work_mutex); - qemu_cond_broadcast(&qemu_work_cond); -} - static void qemu_wait_io_event_common(CPUState *cpu) { if (cpu->stop) { @@ -1019,7 +939,7 @@ static void qemu_wait_io_event_common(CPUState *cpu) cpu->stopped = true; qemu_cond_broadcast(&qemu_pause_cond); } - flush_queued_work(cpu); + process_queued_cpu_work(cpu); cpu->thread_kicked = false; } @@ -1488,7 +1408,7 @@ int vm_stop_force_state(RunState state) bdrv_drain_all(); /* Make sure to return an error if the flush in a previous vm_stop() * failed. */ - return blk_flush_all(); + return bdrv_flush_all(); } } @@ -1538,7 +1458,9 @@ static int tcg_cpu_exec(CPUState *cpu) cpu->icount_decr.u16.low = decr; cpu->icount_extra = count; } + cpu_exec_start(cpu); ret = cpu_exec(cpu); + cpu_exec_end(cpu); #ifdef CONFIG_PROFILER tcg_time += profile_getclock() - ti; #endif |