diff options
author | Peter Maydell | 2018-10-02 19:27:18 +0200 |
---|---|---|
committer | Peter Maydell | 2018-10-02 19:27:18 +0200 |
commit | dafd95053611aa14dda40266857608d12ddce658 (patch) | |
tree | b414d9e2871c2a701ed3c42a15cfd7d289a9db7e /cpus.c | |
parent | Merge remote-tracking branch 'remotes/dgibson/tags/libfdt-20181002' into staging (diff) | |
parent | hw/scsi/mptendian: Avoid taking address of fields in packed structs (diff) | |
download | qemu-dafd95053611aa14dda40266857608d12ddce658.tar.gz qemu-dafd95053611aa14dda40266857608d12ddce658.tar.xz qemu-dafd95053611aa14dda40266857608d12ddce658.zip |
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* configure fix for environment variables (Daniel)
* fix memory leaks (Alex)
* x86_64 MTTCG fixes (Emilio)
* introduce atomic64 (Emilio)
* Fix for virtio hang (Fam, myself)
* SH serial port fix (Geert)
* Deprecate rotation_rate for scsi-block (Fam)
* Extend memory-backend-file availability to all POSIX hosts (Hikaru)
* Memory API cleanups and fixes (Igor, Li Qiang, Peter, Philippe)
* MSI/IOMMU fix (Jan)
* Socket reconnection fixes (Marc-André)
* icount fixes (Emilio, myself)
* QSP fixes for Coverity (myself)
* Some record/replay improovements (Pavel)
* Packed struct fixes (Peter)
* Windows dump fixes and elf2dmp (Viktor)
* kbmclock fix (Yongji)
# gpg: Signature made Tue 02 Oct 2018 18:13:12 BST
# gpg: using RSA key BFFBD25F78C7AE83
# gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>"
# gpg: aka "Paolo Bonzini <pbonzini@redhat.com>"
# Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1
# Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83
* remotes/bonzini/tags/for-upstream: (80 commits)
hw/scsi/mptendian: Avoid taking address of fields in packed structs
cpus: fix TCG kick timer leak
docs/devel/memory.txt: Document _with_attrs accessors
hw/nvram/fw_cfg: Use memberwise copy of MemoryRegionOps struct
memory: Remove old_mmio accessors
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
memory: Refactor common shifting code from accessors
memory: Use MAKE_64BIT_MASK()
virtio: do not take address of packed members
replay: replay BH for IDE trim operation
hostmem-file: make available memory-backend-file on POSIX-based hosts
target/i386: fix translation for icount mode
hvf: drop unused variable
qom/object: add some interface asserts
accel/tcg: Remove dead code
lsi53c895a: convert to trace-events
scsi-block: Deprecate rotation_rate
kvmclock: run KVM_KVMCLOCK_CTRL ioctl in vcpu thread
MAINTAINERS: add myself as elf2dmp maintainer
contrib: add elf2dmp tool
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'cpus.c')
-rw-r--r-- | cpus.c | 84 |
1 files changed, 52 insertions, 32 deletions
@@ -245,21 +245,27 @@ static int64_t cpu_get_icount_executed(CPUState *cpu) * account executed instructions. This is done by the TCG vCPU * thread so the main-loop can see time has moved forward. */ -void cpu_update_icount(CPUState *cpu) +static void cpu_update_icount_locked(CPUState *cpu) { int64_t executed = cpu_get_icount_executed(cpu); cpu->icount_budget -= executed; -#ifndef CONFIG_ATOMIC64 + atomic_set_i64(&timers_state.qemu_icount, + timers_state.qemu_icount + executed); +} + +/* + * Update the global shared timer_state.qemu_icount to take into + * account executed instructions. This is done by the TCG vCPU + * thread so the main-loop can see time has moved forward. + */ +void cpu_update_icount(CPUState *cpu) +{ seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); -#endif - atomic_set__nocheck(&timers_state.qemu_icount, - timers_state.qemu_icount + executed); -#ifndef CONFIG_ATOMIC64 + cpu_update_icount_locked(cpu); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); -#endif } static int64_t cpu_get_icount_raw_locked(void) @@ -272,16 +278,17 @@ static int64_t cpu_get_icount_raw_locked(void) exit(1); } /* Take into account what has run */ - cpu_update_icount(cpu); + cpu_update_icount_locked(cpu); } - /* The read is protected by the seqlock, so __nocheck is okay. */ - return atomic_read__nocheck(&timers_state.qemu_icount); + /* The read is protected by the seqlock, but needs atomic64 to avoid UB */ + return atomic_read_i64(&timers_state.qemu_icount); } static int64_t cpu_get_icount_locked(void) { int64_t icount = cpu_get_icount_raw_locked(); - return atomic_read__nocheck(&timers_state.qemu_icount_bias) + cpu_icount_to_ns(icount); + return atomic_read_i64(&timers_state.qemu_icount_bias) + + cpu_icount_to_ns(icount); } int64_t cpu_get_icount_raw(void) @@ -454,9 +461,9 @@ static void icount_adjust(void) timers_state.icount_time_shift + 1); } last_delta = delta; - atomic_set__nocheck(&timers_state.qemu_icount_bias, - cur_icount - (timers_state.qemu_icount - << timers_state.icount_time_shift)); + atomic_set_i64(&timers_state.qemu_icount_bias, + cur_icount - (timers_state.qemu_icount + << timers_state.icount_time_shift)); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); } @@ -516,8 +523,8 @@ static void icount_warp_rt(void) int64_t delta = clock - cur_icount; warp_delta = MIN(warp_delta, delta); } - atomic_set__nocheck(&timers_state.qemu_icount_bias, - timers_state.qemu_icount_bias + warp_delta); + atomic_set_i64(&timers_state.qemu_icount_bias, + timers_state.qemu_icount_bias + warp_delta); } timers_state.vm_clock_warp_start = -1; seqlock_write_unlock(&timers_state.vm_clock_seqlock, @@ -548,8 +555,8 @@ void qtest_clock_warp(int64_t dest) seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); - atomic_set__nocheck(&timers_state.qemu_icount_bias, - timers_state.qemu_icount_bias + warp); + atomic_set_i64(&timers_state.qemu_icount_bias, + timers_state.qemu_icount_bias + warp); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); @@ -576,18 +583,29 @@ void qemu_start_warp_timer(void) return; } - /* warp clock deterministically in record/replay mode */ - if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) { - return; - } + if (replay_mode != REPLAY_MODE_PLAY) { + if (!all_cpu_threads_idle()) { + return; + } - if (!all_cpu_threads_idle()) { - return; - } + if (qtest_enabled()) { + /* When testing, qtest commands advance icount. */ + return; + } - if (qtest_enabled()) { - /* When testing, qtest commands advance icount. */ - return; + replay_checkpoint(CHECKPOINT_CLOCK_WARP_START); + } else { + /* warp clock deterministically in record/replay mode */ + if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) { + /* vCPU is sleeping and warp can't be started. + It is probably a race condition: notification sent + to vCPU was processed in advance and vCPU went to sleep. + Therefore we have to wake it up for doing someting. */ + if (replay_has_checkpoint()) { + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + } + return; + } } /* We want to use the earliest deadline from ALL vm_clocks */ @@ -620,8 +638,8 @@ void qemu_start_warp_timer(void) */ seqlock_write_lock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); - atomic_set__nocheck(&timers_state.qemu_icount_bias, - timers_state.qemu_icount_bias + deadline); + atomic_set_i64(&timers_state.qemu_icount_bias, + timers_state.qemu_icount_bias + deadline); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); qemu_clock_notify(QEMU_CLOCK_VIRTUAL); @@ -823,6 +841,7 @@ int cpu_throttle_get_percentage(void) void cpu_ticks_init(void) { seqlock_init(&timers_state.vm_clock_seqlock); + qemu_spin_init(&timers_state.vm_clock_lock); vmstate_register(NULL, 0, &vmstate_timers, &timers_state); throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT, cpu_throttle_timer_tick, NULL); @@ -964,6 +983,8 @@ static void start_tcg_kick_timer(void) if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) { tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kick_tcg_thread, NULL); + } + if (tcg_kick_vcpu_timer && !timer_pending(tcg_kick_vcpu_timer)) { timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); } } @@ -971,9 +992,8 @@ static void start_tcg_kick_timer(void) static void stop_tcg_kick_timer(void) { assert(!mttcg_enabled); - if (tcg_kick_vcpu_timer) { + if (tcg_kick_vcpu_timer && timer_pending(tcg_kick_vcpu_timer)) { timer_del(tcg_kick_vcpu_timer); - tcg_kick_vcpu_timer = NULL; } } |