summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/bookehv_interrupts.S
diff options
context:
space:
mode:
authorAlexander Graf2012-02-16 00:24:28 +0100
committerAvi Kivity2012-04-08 11:54:56 +0200
commit1d628af78a28c77143bcdd4ed09e93bb235d4198 (patch)
tree68c62eaade6f5e183fc043a3b423bd7cf434b6de /arch/powerpc/kvm/bookehv_interrupts.S
parentKVM: PPC: e500mc: Move r1/r2 restoration very early (diff)
downloadkernel-qcow2-linux-1d628af78a28c77143bcdd4ed09e93bb235d4198.tar.gz
kernel-qcow2-linux-1d628af78a28c77143bcdd4ed09e93bb235d4198.tar.xz
kernel-qcow2-linux-1d628af78a28c77143bcdd4ed09e93bb235d4198.zip
KVM: PPC: e500mc: add load inst fixup
There's always a chance we're unable to read a guest instruction. The guest could have its TLB mapped execute-, but not readable, something odd happens and our TLB gets flushed. So it's a good idea to be prepared for that case and have a fallback that allows us to fix things up in that case. Add fixup code that keeps guest code from potentially crashing our host kernel. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/bookehv_interrupts.S')
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S30
1 files changed, 29 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 63023ae14da4..e9e735057939 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -28,6 +28,7 @@
#include <asm/asm-compat.h>
#include <asm/asm-offsets.h>
#include <asm/bitsperlong.h>
+#include <asm/thread_info.h>
#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
@@ -171,9 +172,36 @@
PPC_STL r30, VCPU_GPR(r30)(r4)
PPC_STL r31, VCPU_GPR(r31)(r4)
mtspr SPRN_EPLC, r8
+
+ /* disable preemption, so we are sure we hit the fixup handler */
+#ifdef CONFIG_PPC64
+ clrrdi r8,r1,THREAD_SHIFT
+#else
+ rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */
+#endif
+ li r7, 1
+ stw r7, TI_PREEMPT(r8)
+
isync
- lwepx r9, 0, r5
+
+ /*
+ * In case the read goes wrong, we catch it and write an invalid value
+ * in LAST_INST instead.
+ */
+1: lwepx r9, 0, r5
+2:
+.section .fixup, "ax"
+3: li r9, KVM_INST_FETCH_FAILED
+ b 2b
+.previous
+.section __ex_table,"a"
+ PPC_LONG_ALIGN
+ PPC_LONG 1b,3b
+.previous
+
mtspr SPRN_EPLC, r3
+ li r7, 0
+ stw r7, TI_PREEMPT(r8)
stw r9, VCPU_LAST_INST(r4)
.endif