summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorZachary Amsden2010-08-20 10:07:23 +0200
committerAvi Kivity2010-10-24 10:51:23 +0200
commite48672fa25e879f7ae21785c7efd187738139593 (patch)
treebec27bad04ba6e933e72439cc565d2c752a31928 /arch/x86/kvm/x86.c
parentKVM: x86: Warn about unstable TSC (diff)
downloadkernel-qcow2-linux-e48672fa25e879f7ae21785c7efd187738139593.tar.gz
kernel-qcow2-linux-e48672fa25e879f7ae21785c7efd187738139593.tar.xz
kernel-qcow2-linux-e48672fa25e879f7ae21785c7efd187738139593.zip
KVM: x86: Unify TSC logic
Move the TSC control logic from the vendor backends into x86.c by adding adjust_tsc_offset to x86 ops. Now all TSC decisions can be done in one place. Signed-off-by: Zachary Amsden <zamsden@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a8dee58e8716..468fafaed1ae 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -973,9 +973,9 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
return 1;
}
- if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
+ if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
- vcpu->hv_clock_tsc_khz = this_tsc_khz;
+ vcpu->hw_tsc_khz = this_tsc_khz;
}
/* With all the info we got, fill in the values */
@@ -1866,13 +1866,24 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
kvm_x86_ops->vcpu_load(vcpu, cpu);
- kvm_request_guest_time_update(vcpu);
+ if (unlikely(vcpu->cpu != cpu)) {
+ /* Make sure TSC doesn't go backwards */
+ s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
+ native_read_tsc() - vcpu->arch.last_host_tsc;
+ if (tsc_delta < 0)
+ mark_tsc_unstable("KVM discovered backwards TSC");
+ if (check_tsc_unstable())
+ kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
+ kvm_migrate_timers(vcpu);
+ vcpu->cpu = cpu;
+ }
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
+ vcpu->arch.last_host_tsc = native_read_tsc();
}
static int is_efer_nx(void)