summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorPaolo Bonzini2018-02-22 16:43:17 +0100
committerIngo Molnar2018-02-23 08:24:35 +0100
commitecb586bd29c99fb4de599dec388658e74388daad (patch)
tree5c309f49fee88e9690ead6beac4c9b3e70aa7d3c /arch/x86/kvm/svm.c
parentobjtool, retpolines: Integrate objtool with retpoline support more closely (diff)
downloadkernel-qcow2-linux-ecb586bd29c99fb4de599dec388658e74388daad.tar.gz
kernel-qcow2-linux-ecb586bd29c99fb4de599dec388658e74388daad.tar.xz
kernel-qcow2-linux-ecb586bd29c99fb4de599dec388658e74388daad.zip
KVM/x86: Remove indirect MSR op calls from SPEC_CTRL
Having a paravirt indirect call in the IBRS restore path is not a good idea, since we are trying to protect from speculative execution of bogus indirect branch targets. It is also slower, so use native_wrmsrl() on the vmentry path too. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Jim Mattson <jmattson@google.com> Cc: David Woodhouse <dwmw@amazon.co.uk> Cc: KarimAllah Ahmed <karahmed@amazon.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kvm@vger.kernel.org Cc: stable@vger.kernel.org Fixes: d28b387fb74da95d69d2615732f50cceb38e9a4d Link: http://lkml.kernel.org/r/20180222154318.20361-2-pbonzini@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b3e488a74828..1598beeda11c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -49,6 +49,7 @@
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
#include <asm/irq_remapping.h>
+#include <asm/microcode.h>
#include <asm/nospec-branch.h>
#include <asm/virtext.h>
@@ -5355,7 +5356,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken.
*/
if (svm->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile (
"push %%" _ASM_BP "; \n\t"
@@ -5465,10 +5466,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* save it.
*/
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
- rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (svm->spec_ctrl)
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();