summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx/vmx.c
diff options
context:
space:
mode:
authorSean Christopherson2019-01-25 16:41:13 +0100
committerPaolo Bonzini2019-02-20 22:48:14 +0100
commitc823dd5c0f3fafa595ed51cc72c6e006efc20ad3 (patch)
tree6a969b379e2d39612074d3cb65074927e54f3abb /arch/x86/kvm/vmx/vmx.c
parentKVM: VMX: Move vCPU-run code to a proper assembly routine (diff)
downloadkernel-qcow2-linux-c823dd5c0f3fafa595ed51cc72c6e006efc20ad3.tar.gz
kernel-qcow2-linux-c823dd5c0f3fafa595ed51cc72c6e006efc20ad3.tar.xz
kernel-qcow2-linux-c823dd5c0f3fafa595ed51cc72c6e006efc20ad3.zip
KVM: VMX: Fold __vmx_vcpu_run() back into vmx_vcpu_run()
...now that the code is no longer tagged with STACK_FRAME_NON_STANDARD. Arguably, providing __vmx_vcpu_run() to break up vmx_vcpu_run() is valuable on its own, but the previous split was purposely made as small as possible to limit the effects STACK_FRAME_NON_STANDARD. In other words, the current split is now completely arbitrary and likely not the most logical. This also allows renaming ____vmx_vcpu_run() to __vmx_vcpu_run() in a future patch. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/vmx.c')
-rw-r--r--arch/x86/kvm/vmx/vmx.c59
1 files changed, 27 insertions, 32 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0e6e2dd6265e..e61bb8da9767 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6371,37 +6371,6 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
}
}
-static void __vmx_vcpu_run(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
-{
- if (static_branch_unlikely(&vmx_l1d_should_flush))
- vmx_l1d_flush(vcpu);
-
- if (vcpu->arch.cr2 != read_cr2())
- write_cr2(vcpu->arch.cr2);
-
- asm(
- "call ____vmx_vcpu_run \n\t"
- : ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
-#ifdef CONFIG_X86_64
- "=D"((int){0}), "=S"((int){0})
- : "D"(vmx), "S"(&vcpu->arch.regs),
-#else
- "=a"((int){0}), "=d"((int){0})
- : "a"(vmx), "d"(&vcpu->arch.regs),
-#endif
- "b"(vmx->loaded_vmcs->launched)
- : "cc", "memory"
-#ifdef CONFIG_X86_64
- , "rax", "rcx", "rdx"
- , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-#else
- , "ecx", "edi", "esi"
-#endif
- );
-
- vcpu->arch.cr2 = read_cr2();
-}
-
static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6469,7 +6438,33 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
*/
x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
- __vmx_vcpu_run(vcpu, vmx);
+ if (static_branch_unlikely(&vmx_l1d_should_flush))
+ vmx_l1d_flush(vcpu);
+
+ if (vcpu->arch.cr2 != read_cr2())
+ write_cr2(vcpu->arch.cr2);
+
+ asm(
+ "call ____vmx_vcpu_run \n\t"
+ : ASM_CALL_CONSTRAINT, "=b"(vmx->fail),
+#ifdef CONFIG_X86_64
+ "=D"((int){0}), "=S"((int){0})
+ : "D"(vmx), "S"(&vcpu->arch.regs),
+#else
+ "=a"((int){0}), "=d"((int){0})
+ : "a"(vmx), "d"(&vcpu->arch.regs),
+#endif
+ "b"(vmx->loaded_vmcs->launched)
+ : "cc", "memory"
+#ifdef CONFIG_X86_64
+ , "rax", "rcx", "rdx"
+ , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+#else
+ , "ecx", "edi", "esi"
+#endif
+ );
+
+ vcpu->arch.cr2 = read_cr2();
/*
* We do not use IBRS in the kernel. If this vCPU has used the