summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity2012-05-13 18:53:24 +0200
committerMarcelo Tosatti2012-05-16 21:03:19 +0200
commitb2da15ac26a0c00fc0d399a2bc5cf3c4e15f0b4f (patch)
tree7cf2f30e20976877b7bcef1b76a7441f474f062d /arch/x86/kvm/vmx.c
parentKVM: VMX: Fix %ds/%es clobber (diff)
downloadkernel-qcow2-linux-b2da15ac26a0c00fc0d399a2bc5cf3c4e15f0b4f.tar.gz
kernel-qcow2-linux-b2da15ac26a0c00fc0d399a2bc5cf3c4e15f0b4f.tar.xz
kernel-qcow2-linux-b2da15ac26a0c00fc0d399a2bc5cf3c4e15f0b4f.zip
KVM: VMX: Optimize %ds, %es reload
On x86_64, we can defer %ds and %es reload to the heavyweight context switch, since nothing in the lightweight paths uses the host %ds or %es (they are ignored by the processor). Furthermore we can avoid the load if the segments are null, by letting the hardware load the null segments for us. This is the expected case. On i386, we could avoid the reload entirely, since the entry.S paths take care of reload, except for the SYSEXIT path which leaves %ds and %es set to __USER_DS. So we set them to the same values as well. Saves about 70 cycles out of 1600 (around 4%; noisy measurements). Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c36
1 files changed, 31 insertions, 5 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index f2ee016e1004..32eb58866292 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -393,6 +393,9 @@ struct vcpu_vmx {
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
+#ifdef CONFIG_X86_64
+ u16 ds_sel, es_sel;
+#endif
int gs_ldt_reload_needed;
int fs_reload_needed;
} host_state;
@@ -1418,6 +1421,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
}
#ifdef CONFIG_X86_64
+ savesegment(ds, vmx->host_state.ds_sel);
+ savesegment(es, vmx->host_state.es_sel);
+#endif
+
+#ifdef CONFIG_X86_64
vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
#else
@@ -1457,6 +1465,19 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
}
if (vmx->host_state.fs_reload_needed)
loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+ if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
+ loadsegment(ds, vmx->host_state.ds_sel);
+ loadsegment(es, vmx->host_state.es_sel);
+ }
+#else
+ /*
+ * The sysexit path does not restore ds/es, so we must set them to
+ * a reasonable value ourselves.
+ */
+ loadsegment(ds, __USER_DS);
+ loadsegment(es, __USER_DS);
+#endif
reload_tss();
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
@@ -3640,8 +3661,18 @@ static void vmx_set_constant_host_state(void)
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
+#ifdef CONFIG_X86_64
+ /*
+ * Load null selectors, so we can avoid reloading them in
+ * __vmx_load_host_state(), in case userspace uses the null selectors
+ * too (the expected case).
+ */
+ vmcs_write16(HOST_DS_SELECTOR, 0);
+ vmcs_write16(HOST_ES_SELECTOR, 0);
+#else
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
+#endif
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
@@ -6102,10 +6133,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u16 _ds, _es;
- savesegment(ds, _ds);
- savesegment(es, _es);
if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (vmcs12->idt_vectoring_info_field &
@@ -6266,8 +6294,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
}
}
- loadsegment(ds, _ds);
- loadsegment(es, _es);
vmx->loaded_vmcs->launched = 1;
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);