summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLiran Alon2018-06-23 01:35:13 +0200
committerPaolo Bonzini2018-08-06 17:58:48 +0200
commitabfc52c612ddb101549846688bc87dfedbbfd4f4 (patch)
tree78559764cb68ee8fc9c37e9e95cd7259c4023a8c /arch/x86
parentKVM: VMX: Mark vmcs header as shadow in case alloc_vmcs_cpu() allocate shadow... (diff)
downloadkernel-qcow2-linux-abfc52c612ddb101549846688bc87dfedbbfd4f4.tar.gz
kernel-qcow2-linux-abfc52c612ddb101549846688bc87dfedbbfd4f4.tar.xz
kernel-qcow2-linux-abfc52c612ddb101549846688bc87dfedbbfd4f4.zip
KVM: nVMX: Separate logic allocating shadow vmcs to a function
No functionality change. This is done as a preparation for VMCS shadowing virtualization. Signed-off-by: Liran Alon <liran.alon@oracle.com> Signed-off-by: Jim Mattson <jmattson@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx.c37
1 files changed, 28 insertions, 9 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 098926a07cf7..cbe31b9d2b81 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7905,10 +7905,35 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
return 0;
}
+/*
+ * Allocate a shadow VMCS and associate it with the currently loaded
+ * VMCS, unless such a shadow VMCS already exists. The newly allocated
+ * VMCS is also VMCLEARed, so that it is ready for use.
+ */
+static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
+
+ /*
+ * We should allocate a shadow vmcs for vmcs01 only when L1
+ * executes VMXON and free it when L1 executes VMXOFF.
+ * As it is invalid to execute VMXON twice, we shouldn't reach
+ * here when vmcs01 already have an allocated shadow vmcs.
+ */
+ WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
+
+ if (!loaded_vmcs->shadow_vmcs) {
+ loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
+ if (loaded_vmcs->shadow_vmcs)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
+ }
+ return loaded_vmcs->shadow_vmcs;
+}
+
static int enter_vmx_operation(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct vmcs *shadow_vmcs;
int r;
r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
@@ -7923,14 +7948,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (!vmx->nested.cached_shadow_vmcs12)
goto out_cached_shadow_vmcs12;
- if (enable_shadow_vmcs) {
- shadow_vmcs = alloc_vmcs(true);
- if (!shadow_vmcs)
- goto out_shadow_vmcs;
- /* init shadow vmcs */
- vmcs_clear(shadow_vmcs);
- vmx->vmcs01.shadow_vmcs = shadow_vmcs;
- }
+ if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
+ goto out_shadow_vmcs;
hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);