summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorLadi Prosek2017-10-11 16:54:40 +0200
committerPaolo Bonzini2017-10-12 14:01:55 +0200
commit0234bf885236a41ef05376039f2a8ebe7028a388 (patch)
treece9efe96403c64ef235b626357111d1151f94834 /arch/x86/kvm/vmx.c
parentKVM: SVM: limit kvm_handle_page_fault to #PF handling (diff)
downloadkernel-qcow2-linux-0234bf885236a41ef05376039f2a8ebe7028a388.tar.gz
kernel-qcow2-linux-0234bf885236a41ef05376039f2a8ebe7028a388.tar.xz
kernel-qcow2-linux-0234bf885236a41ef05376039f2a8ebe7028a388.zip
KVM: x86: introduce ISA specific SMM entry/exit callbacks
Entering and exiting SMM may require ISA specific handling under certain circumstances. This commit adds two new callbacks with empty implementations. Actual functionality will be added in following commits. * pre_enter_smm() is to be called when injecting an SMM, before any SMM related vcpu state has been changed * pre_leave_smm() is to be called when emulating the RSM instruction, when the vcpu is in real mode and before any SMM related vcpu state has been restored Signed-off-by: Ladi Prosek <lprosek@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c9214e3a01df..1305bb65688b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11916,6 +11916,18 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
~FEATURE_CONTROL_LMCE;
}
+static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
+static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
+{
+ /* TODO: Implement */
+ return 0;
+}
+
static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
@@ -12041,6 +12053,9 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
#endif
.setup_mce = vmx_setup_mce,
+
+ .pre_enter_smm = vmx_pre_enter_smm,
+ .pre_leave_smm = vmx_pre_leave_smm,
};
static int __init vmx_init(void)