summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorScott Wood2011-11-09 01:23:30 +0100
committerAvi Kivity2012-03-05 13:52:26 +0100
commitb59049720dd95021dfe0d9f4e1fa9458a67cfe29 (patch)
tree3b54577e12ba4a84bec409518c6c8f399ebba6e0 /arch/powerpc/kvm
parentKVM: PPC: booke: Paravirtualize wrtee (diff)
downloadkernel-qcow2-linux-b59049720dd95021dfe0d9f4e1fa9458a67cfe29.tar.gz
kernel-qcow2-linux-b59049720dd95021dfe0d9f4e1fa9458a67cfe29.tar.xz
kernel-qcow2-linux-b59049720dd95021dfe0d9f4e1fa9458a67cfe29.zip
KVM: PPC: Paravirtualize SPRG4-7, ESR, PIR, MASn
This allows additional registers to be accessed by the guest in PR-mode KVM without trapping. SPRG4-7 are readable from userspace. On booke, KVM will sync these registers when it enters the guest, so that accesses from guest userspace will work. The guest kernel, OTOH, must consistently use either the real registers or the shared area between exits. This also applies to the already-paravirted SPRG3. On non-booke, it's not clear to what extent SPRG4-7 are supported (they're not architected for book3s, but exist on at least some classic chips). They are copied in the get/set regs ioctls, but I do not see any non-booke emulation. I also do not see any syncing with real registers (in PR-mode) including the user-readable SPRG3. This patch should not make that situation any worse. Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c16
-rw-r--r--arch/powerpc/kvm/booke.c23
-rw-r--r--arch/powerpc/kvm/booke_emulate.c12
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S18
-rw-r--r--arch/powerpc/kvm/e500.c24
-rw-r--r--arch/powerpc/kvm/e500_emulate.c38
-rw-r--r--arch/powerpc/kvm/e500_tlb.c83
-rw-r--r--arch/powerpc/kvm/e500_tlb.h25
-rw-r--r--arch/powerpc/kvm/emulate.c3
-rw-r--r--arch/powerpc/kvm/powerpc.c2
10 files changed, 128 insertions, 116 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 73fc9f046107..5398744cd773 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -423,10 +423,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg1 = vcpu->arch.shared->sprg1;
regs->sprg2 = vcpu->arch.shared->sprg2;
regs->sprg3 = vcpu->arch.shared->sprg3;
- regs->sprg4 = vcpu->arch.sprg4;
- regs->sprg5 = vcpu->arch.sprg5;
- regs->sprg6 = vcpu->arch.sprg6;
- regs->sprg7 = vcpu->arch.sprg7;
+ regs->sprg4 = vcpu->arch.shared->sprg4;
+ regs->sprg5 = vcpu->arch.shared->sprg5;
+ regs->sprg6 = vcpu->arch.shared->sprg6;
+ regs->sprg7 = vcpu->arch.shared->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -450,10 +450,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.shared->sprg1 = regs->sprg1;
vcpu->arch.shared->sprg2 = regs->sprg2;
vcpu->arch.shared->sprg3 = regs->sprg3;
- vcpu->arch.sprg4 = regs->sprg4;
- vcpu->arch.sprg5 = regs->sprg5;
- vcpu->arch.sprg6 = regs->sprg6;
- vcpu->arch.sprg7 = regs->sprg7;
+ vcpu->arch.shared->sprg4 = regs->sprg4;
+ vcpu->arch.shared->sprg5 = regs->sprg5;
+ vcpu->arch.shared->sprg6 = regs->sprg6;
+ vcpu->arch.shared->sprg7 = regs->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8dfc59a8a715..50803dd0b8f2 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -270,7 +270,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
if (update_esr == true)
- vcpu->arch.esr = vcpu->arch.queued_esr;
+ vcpu->arch.shared->esr = vcpu->arch.queued_esr;
if (update_dear == true)
vcpu->arch.shared->dar = vcpu->arch.queued_dear;
kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
@@ -644,6 +644,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.pc = 0;
vcpu->arch.shared->msr = 0;
vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
+ vcpu->arch.shared->pir = vcpu->vcpu_id;
kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
vcpu->arch.shadow_pid = 1;
@@ -678,10 +679,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg1 = vcpu->arch.shared->sprg1;
regs->sprg2 = vcpu->arch.shared->sprg2;
regs->sprg3 = vcpu->arch.shared->sprg3;
- regs->sprg4 = vcpu->arch.sprg4;
- regs->sprg5 = vcpu->arch.sprg5;
- regs->sprg6 = vcpu->arch.sprg6;
- regs->sprg7 = vcpu->arch.sprg7;
+ regs->sprg4 = vcpu->arch.shared->sprg4;
+ regs->sprg5 = vcpu->arch.shared->sprg5;
+ regs->sprg6 = vcpu->arch.shared->sprg6;
+ regs->sprg7 = vcpu->arch.shared->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -706,10 +707,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.shared->sprg1 = regs->sprg1;
vcpu->arch.shared->sprg2 = regs->sprg2;
vcpu->arch.shared->sprg3 = regs->sprg3;
- vcpu->arch.sprg4 = regs->sprg4;
- vcpu->arch.sprg5 = regs->sprg5;
- vcpu->arch.sprg6 = regs->sprg6;
- vcpu->arch.sprg7 = regs->sprg7;
+ vcpu->arch.shared->sprg4 = regs->sprg4;
+ vcpu->arch.shared->sprg5 = regs->sprg5;
+ vcpu->arch.shared->sprg6 = regs->sprg6;
+ vcpu->arch.shared->sprg7 = regs->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -727,7 +728,7 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
sregs->u.e.csrr0 = vcpu->arch.csrr0;
sregs->u.e.csrr1 = vcpu->arch.csrr1;
sregs->u.e.mcsr = vcpu->arch.mcsr;
- sregs->u.e.esr = vcpu->arch.esr;
+ sregs->u.e.esr = vcpu->arch.shared->esr;
sregs->u.e.dear = vcpu->arch.shared->dar;
sregs->u.e.tsr = vcpu->arch.tsr;
sregs->u.e.tcr = vcpu->arch.tcr;
@@ -745,7 +746,7 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
vcpu->arch.csrr0 = sregs->u.e.csrr0;
vcpu->arch.csrr1 = sregs->u.e.csrr1;
vcpu->arch.mcsr = sregs->u.e.mcsr;
- vcpu->arch.esr = sregs->u.e.esr;
+ vcpu->arch.shared->esr = sregs->u.e.esr;
vcpu->arch.shared->dar = sregs->u.e.dear;
vcpu->arch.vrsave = sregs->u.e.vrsave;
vcpu->arch.tcr = sregs->u.e.tcr;
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 1260f5f24c0c..bae9288ac1e1 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -107,7 +107,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
case SPRN_DEAR:
vcpu->arch.shared->dar = spr_val; break;
case SPRN_ESR:
- vcpu->arch.esr = spr_val; break;
+ vcpu->arch.shared->esr = spr_val; break;
case SPRN_DBCR0:
vcpu->arch.dbcr0 = spr_val; break;
case SPRN_DBCR1:
@@ -125,13 +125,13 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
* loaded into the real SPRGs when resuming the
* guest. */
case SPRN_SPRG4:
- vcpu->arch.sprg4 = spr_val; break;
+ vcpu->arch.shared->sprg4 = spr_val; break;
case SPRN_SPRG5:
- vcpu->arch.sprg5 = spr_val; break;
+ vcpu->arch.shared->sprg5 = spr_val; break;
case SPRN_SPRG6:
- vcpu->arch.sprg6 = spr_val; break;
+ vcpu->arch.shared->sprg6 = spr_val; break;
case SPRN_SPRG7:
- vcpu->arch.sprg7 = spr_val; break;
+ vcpu->arch.shared->sprg7 = spr_val; break;
case SPRN_IVPR:
vcpu->arch.ivpr = spr_val;
@@ -202,7 +202,7 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_DEAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
case SPRN_ESR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
case SPRN_DBCR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
case SPRN_DBCR1:
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 42f2fb1f66e9..10d8ef602e5c 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -402,19 +402,25 @@ lightweight_exit:
/* Save vcpu pointer for the exception handlers. */
mtspr SPRN_SPRG_WVCPU, r4
+ lwz r5, VCPU_SHARED(r4)
+
/* Can't switch the stack pointer until after IVPR is switched,
* because host interrupt handlers would get confused. */
lwz r1, VCPU_GPR(r1)(r4)
- /* Host interrupt handlers may have clobbered these guest-readable
- * SPRGs, so we need to reload them here with the guest's values. */
- lwz r3, VCPU_SPRG4(r4)
+ /*
+ * Host interrupt handlers may have clobbered these
+ * guest-readable SPRGs, or the guest kernel may have
+ * written directly to the shared area, so we
+ * need to reload them here with the guest's values.
+ */
+ lwz r3, VCPU_SHARED_SPRG4(r5)
mtspr SPRN_SPRG4W, r3
- lwz r3, VCPU_SPRG5(r4)
+ lwz r3, VCPU_SHARED_SPRG5(r5)
mtspr SPRN_SPRG5W, r3
- lwz r3, VCPU_SPRG6(r4)
+ lwz r3, VCPU_SHARED_SPRG6(r5)
mtspr SPRN_SPRG6W, r3
- lwz r3, VCPU_SPRG7(r4)
+ lwz r3, VCPU_SHARED_SPRG7(r5)
mtspr SPRN_SPRG7W, r3
#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ac3c4bf21677..709d82f956e3 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -115,12 +115,12 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
- sregs->u.e.mas0 = vcpu_e500->mas0;
- sregs->u.e.mas1 = vcpu_e500->mas1;
- sregs->u.e.mas2 = vcpu_e500->mas2;
- sregs->u.e.mas7_3 = vcpu_e500->mas7_3;
- sregs->u.e.mas4 = vcpu_e500->mas4;
- sregs->u.e.mas6 = vcpu_e500->mas6;
+ sregs->u.e.mas0 = vcpu->arch.shared->mas0;
+ sregs->u.e.mas1 = vcpu->arch.shared->mas1;
+ sregs->u.e.mas2 = vcpu->arch.shared->mas2;
+ sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
+ sregs->u.e.mas4 = vcpu->arch.shared->mas4;
+ sregs->u.e.mas6 = vcpu->arch.shared->mas6;
sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
@@ -148,12 +148,12 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
}
if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
- vcpu_e500->mas0 = sregs->u.e.mas0;
- vcpu_e500->mas1 = sregs->u.e.mas1;
- vcpu_e500->mas2 = sregs->u.e.mas2;
- vcpu_e500->mas7_3 = sregs->u.e.mas7_3;
- vcpu_e500->mas4 = sregs->u.e.mas4;
- vcpu_e500->mas6 = sregs->u.e.mas6;
+ vcpu->arch.shared->mas0 = sregs->u.e.mas0;
+ vcpu->arch.shared->mas1 = sregs->u.e.mas1;
+ vcpu->arch.shared->mas2 = sregs->u.e.mas2;
+ vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
+ vcpu->arch.shared->mas4 = sregs->u.e.mas4;
+ vcpu->arch.shared->mas6 = sregs->u.e.mas6;
}
if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index e0d36099c756..6d0b2bd54fb0 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -89,22 +89,22 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
return EMULATE_FAIL;
vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
- vcpu_e500->mas0 = spr_val; break;
+ vcpu->arch.shared->mas0 = spr_val; break;
case SPRN_MAS1:
- vcpu_e500->mas1 = spr_val; break;
+ vcpu->arch.shared->mas1 = spr_val; break;
case SPRN_MAS2:
- vcpu_e500->mas2 = spr_val; break;
+ vcpu->arch.shared->mas2 = spr_val; break;
case SPRN_MAS3:
- vcpu_e500->mas7_3 &= ~(u64)0xffffffff;
- vcpu_e500->mas7_3 |= spr_val;
+ vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
+ vcpu->arch.shared->mas7_3 |= spr_val;
break;
case SPRN_MAS4:
- vcpu_e500->mas4 = spr_val; break;
+ vcpu->arch.shared->mas4 = spr_val; break;
case SPRN_MAS6:
- vcpu_e500->mas6 = spr_val; break;
+ vcpu->arch.shared->mas6 = spr_val; break;
case SPRN_MAS7:
- vcpu_e500->mas7_3 &= (u64)0xffffffff;
- vcpu_e500->mas7_3 |= (u64)spr_val << 32;
+ vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
+ vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
break;
case SPRN_L1CSR0:
vcpu_e500->l1csr0 = spr_val;
@@ -147,6 +147,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
+ unsigned long val;
switch (sprn) {
case SPRN_PID:
@@ -156,20 +157,23 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
case SPRN_PID2:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
case SPRN_MAS0:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
case SPRN_MAS1:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
case SPRN_MAS2:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
case SPRN_MAS3:
- kvmppc_set_gpr(vcpu, rt, (u32)vcpu_e500->mas7_3); break;
+ val = (u32)vcpu->arch.shared->mas7_3;
+ kvmppc_set_gpr(vcpu, rt, val);
+ break;
case SPRN_MAS4:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
case SPRN_MAS6:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
case SPRN_MAS7:
- kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7_3 >> 32); break;
-
+ val = vcpu->arch.shared->mas7_3 >> 32;
+ kvmppc_set_gpr(vcpu, rt, val);
+ break;
case SPRN_TLB0CFG:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
case SPRN_TLB1CFG:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 6fefb9144f23..9cd124a11acd 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -428,13 +428,14 @@ static int htlb0_set_base(gva_t addr)
host_tlb_params[0].ways);
}
-static unsigned int get_tlb_esel(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel)
+static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
{
- unsigned int esel = get_tlb_esel_bit(vcpu_e500);
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int esel = get_tlb_esel_bit(vcpu);
if (tlbsel == 0) {
esel &= vcpu_e500->gtlb_params[0].ways - 1;
- esel += gtlb0_set_base(vcpu_e500, vcpu_e500->mas2);
+ esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
} else {
esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
}
@@ -545,20 +546,20 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
int tlbsel;
/* since we only have two TLBs, only lower bit is used. */
- tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
+ tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
- pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
- tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
+ pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
+ tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
- vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
+ vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
- vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
+ vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
| MAS1_TID(vcpu_e500->pid[pidsel])
| MAS1_TSIZE(tsized);
- vcpu_e500->mas2 = (eaddr & MAS2_EPN)
- | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
- vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
- vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
+ vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
+ | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
+ vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
+ vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
| (get_cur_pid(vcpu) << 16)
| (as ? MAS6_SAS : 0);
}
@@ -844,15 +845,15 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
int tlbsel, esel;
struct kvm_book3e_206_tlb_entry *gtlbe;
- tlbsel = get_tlb_tlbsel(vcpu_e500);
- esel = get_tlb_esel(vcpu_e500, tlbsel);
+ tlbsel = get_tlb_tlbsel(vcpu);
+ esel = get_tlb_esel(vcpu, tlbsel);
gtlbe = get_entry(vcpu_e500, tlbsel, esel);
- vcpu_e500->mas0 &= ~MAS0_NV(~0);
- vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
- vcpu_e500->mas1 = gtlbe->mas1;
- vcpu_e500->mas2 = gtlbe->mas2;
- vcpu_e500->mas7_3 = gtlbe->mas7_3;
+ vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
+ vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
+ vcpu->arch.shared->mas1 = gtlbe->mas1;
+ vcpu->arch.shared->mas2 = gtlbe->mas2;
+ vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
return EMULATE_DONE;
}
@@ -860,8 +861,8 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
- int as = !!get_cur_sas(vcpu_e500);
- unsigned int pid = get_cur_spid(vcpu_e500);
+ int as = !!get_cur_sas(vcpu);
+ unsigned int pid = get_cur_spid(vcpu);
int esel, tlbsel;
struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
gva_t ea;
@@ -879,26 +880,30 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
if (gtlbe) {
esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
- vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
+ vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
- vcpu_e500->mas1 = gtlbe->mas1;
- vcpu_e500->mas2 = gtlbe->mas2;
- vcpu_e500->mas7_3 = gtlbe->mas7_3;
+ vcpu->arch.shared->mas1 = gtlbe->mas1;
+ vcpu->arch.shared->mas2 = gtlbe->mas2;
+ vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
} else {
int victim;
/* since we only have two TLBs, only lower bit is used. */
- tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
+ tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
- vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
+ vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
+ | MAS0_ESEL(victim)
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
- vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
- | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
- | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
- vcpu_e500->mas2 &= MAS2_EPN;
- vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
- vcpu_e500->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
+ vcpu->arch.shared->mas1 =
+ (vcpu->arch.shared->mas6 & MAS6_SPID0)
+ | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
+ | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
+ vcpu->arch.shared->mas2 &= MAS2_EPN;
+ vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
+ MAS2_ATTRIB_MASK;
+ vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
+ MAS3_U2 | MAS3_U3;
}
kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
@@ -929,19 +934,19 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
struct kvm_book3e_206_tlb_entry *gtlbe;
int tlbsel, esel;
- tlbsel = get_tlb_tlbsel(vcpu_e500);
- esel = get_tlb_esel(vcpu_e500, tlbsel);
+ tlbsel = get_tlb_tlbsel(vcpu);
+ esel = get_tlb_esel(vcpu, tlbsel);
gtlbe = get_entry(vcpu_e500, tlbsel, esel);
if (get_tlb_v(gtlbe))
inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
- gtlbe->mas1 = vcpu_e500->mas1;
- gtlbe->mas2 = vcpu_e500->mas2;
- gtlbe->mas7_3 = vcpu_e500->mas7_3;
+ gtlbe->mas1 = vcpu->arch.shared->mas1;
+ gtlbe->mas2 = vcpu->arch.shared->mas2;
+ gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
- trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
+ trace_kvm_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, gtlbe->mas2,
(u32)gtlbe->mas7_3, (u32)(gtlbe->mas7_3 >> 32));
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 2c296407e759..5c6d2d7bf058 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -121,38 +121,33 @@ static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
return !!(vcpu->arch.shared->msr & MSR_PR);
}
-static inline unsigned int get_cur_spid(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
{
- return (vcpu_e500->mas6 >> 16) & 0xff;
+ return (vcpu->arch.shared->mas6 >> 16) & 0xff;
}
-static inline unsigned int get_cur_sas(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
{
- return vcpu_e500->mas6 & 0x1;
+ return vcpu->arch.shared->mas6 & 0x1;
}
-static inline unsigned int get_tlb_tlbsel(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
{
/*
* Manual says that tlbsel has 2 bits wide.
* Since we only have two TLBs, only lower bit is used.
*/
- return (vcpu_e500->mas0 >> 28) & 0x1;
+ return (vcpu->arch.shared->mas0 >> 28) & 0x1;
}
-static inline unsigned int get_tlb_nv_bit(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
{
- return vcpu_e500->mas0 & 0xfff;
+ return vcpu->arch.shared->mas0 & 0xfff;
}
-static inline unsigned int get_tlb_esel_bit(
- const struct kvmppc_vcpu_e500 *vcpu_e500)
+static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
{
- return (vcpu_e500->mas0 >> 16) & 0xfff;
+ return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
}
static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index b6df56dd93ba..bda052e2264b 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -162,7 +162,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case OP_TRAP_64:
kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
- kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
+ kvmppc_core_queue_program(vcpu,
+ vcpu->arch.shared->esr | ESR_PTR);
#endif
advance = 0;
break;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 7411bdd8ff6f..d02e4c84e213 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -66,7 +66,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
vcpu->arch.magic_page_pa = param1;
vcpu->arch.magic_page_ea = param2;
- r2 = KVM_MAGIC_FEAT_SR;
+ r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
r = HC_EV_SUCCESS;
break;