summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Neuling2014-01-08 11:25:32 +0100
committerAlexander Graf2014-01-27 16:01:20 +0100
commit7b490411c37f7ab7965cbdfe5e3ec28eadb6db5b (patch)
tree04f0462eeb7c6d07b158726fbfa323d9dc68227d /arch
parentpowerpc/Kconfig: Make TM select VSX and VMX (diff)
downloadkernel-qcow2-linux-7b490411c37f7ab7965cbdfe5e3ec28eadb6db5b.tar.gz
kernel-qcow2-linux-7b490411c37f7ab7965cbdfe5e3ec28eadb6db5b.tar.xz
kernel-qcow2-linux-7b490411c37f7ab7965cbdfe5e3ec28eadb6db5b.zip
KVM: PPC: Book3S HV: Add new state for transactional memory
Add new state for transactional memory (TM) to kvm_vcpu_arch. Also add asm-offset bits that are going to be required. This also moves the existing TFHAR, TFIAR and TEXASR SPRs into a CONFIG_PPC_TRANSACTIONAL_MEM section. This requires some code changes to ensure we still compile with CONFIG_PPC_TRANSACTIONAL_MEM=N. Much of the added the added #ifdefs are removed in a later patch when the bulk of the TM code is added. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org> [agraf: fix merge conflict] Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h24
-rw-r--r--arch/powerpc/kernel/asm-offsets.c19
-rw-r--r--arch/powerpc/kvm/book3s_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S75
4 files changed, 114 insertions, 8 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 7726a3bc8ff0..1eaea2dea174 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -475,9 +475,6 @@ struct kvm_vcpu_arch {
ulong ppr;
ulong pspb;
ulong fscr;
- ulong tfhar;
- ulong tfiar;
- ulong texasr;
ulong ebbhr;
ulong ebbrr;
ulong bescr;
@@ -526,6 +523,27 @@ struct kvm_vcpu_arch {
u64 siar;
u64 sdar;
u64 sier;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u64 tfhar;
+ u64 texasr;
+ u64 tfiar;
+
+ u32 cr_tm;
+ u64 lr_tm;
+ u64 ctr_tm;
+ u64 amr_tm;
+ u64 ppr_tm;
+ u64 dscr_tm;
+ u64 tar_tm;
+
+ ulong gpr_tm[32];
+
+ struct thread_fp_state fp_tm;
+
+ struct thread_vr_state vr_tm;
+ u32 vrsave_tm; /* also USPRG0 */
+
+#endif
#ifdef CONFIG_KVM_EXIT_TIMING
struct mutex exit_timing_lock;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a60a2fdae5bd..687f2ebf0cce 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -521,9 +521,6 @@ int main(void)
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
- DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
- DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
- DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr));
@@ -545,6 +542,22 @@ int main(void)
DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar));
+ DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar));
+ DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr));
+ DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm));
+ DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr));
+ DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
+ DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
+ DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
+ DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
+ DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
+ DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
+ DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm));
+ DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm));
+ DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm));
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3195e4f8a2ed..f4a4c5c82fb2 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -875,6 +875,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_IAMR:
*val = get_reg_val(id, vcpu->arch.iamr);
break;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
*val = get_reg_val(id, vcpu->arch.tfhar);
break;
@@ -884,6 +885,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TEXASR:
*val = get_reg_val(id, vcpu->arch.texasr);
break;
+#endif
case KVM_REG_PPC_FSCR:
*val = get_reg_val(id, vcpu->arch.fscr);
break;
@@ -1033,6 +1035,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_IAMR:
vcpu->arch.iamr = set_reg_val(id, *val);
break;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
vcpu->arch.tfhar = set_reg_val(id, *val);
break;
@@ -1042,6 +1045,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_TEXASR:
vcpu->arch.texasr = set_reg_val(id, *val);
break;
+#endif
case KVM_REG_PPC_FSCR:
vcpu->arch.fscr = set_reg_val(id, *val);
break;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index ecb76357c9d0..dfa144cb199b 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -701,13 +701,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ld r6, VCPU_VTB(r4)
mtspr SPRN_IC, r5
mtspr SPRN_VTB, r6
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
ld r5, VCPU_TFHAR(r4)
ld r6, VCPU_TFIAR(r4)
ld r7, VCPU_TEXASR(r4)
- ld r8, VCPU_EBBHR(r4)
mtspr SPRN_TFHAR, r5
mtspr SPRN_TFIAR, r6
mtspr SPRN_TEXASR, r7
+#endif
+ ld r8, VCPU_EBBHR(r4)
mtspr SPRN_EBBHR, r8
ld r5, VCPU_EBBRR(r4)
ld r6, VCPU_BESCR(r4)
@@ -1118,13 +1120,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r5, VCPU_IC(r9)
std r6, VCPU_VTB(r9)
std r7, VCPU_TAR(r9)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
mfspr r5, SPRN_TFHAR
mfspr r6, SPRN_TFIAR
mfspr r7, SPRN_TEXASR
- mfspr r8, SPRN_EBBHR
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
std r7, VCPU_TEXASR(r9)
+#endif
+ mfspr r8, SPRN_EBBHR
std r8, VCPU_EBBHR(r9)
mfspr r5, SPRN_EBBRR
mfspr r6, SPRN_BESCR
@@ -1497,6 +1501,73 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1: addi r8,r8,16
.endr
+ /* Save DEC */
+ mfspr r5,SPRN_DEC
+ mftb r6
+ extsw r5,r5
+ add r5,r5,r6
+ std r5,VCPU_DEC_EXPIRES(r9)
+
+BEGIN_FTR_SECTION
+ b 8f
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+ mfmsr r8
+ li r0, 1
+ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r8
+
+ /* Save POWER8-specific registers */
+ mfspr r5, SPRN_IAMR
+ mfspr r6, SPRN_PSPB
+ mfspr r7, SPRN_FSCR
+ std r5, VCPU_IAMR(r9)
+ stw r6, VCPU_PSPB(r9)
+ std r7, VCPU_FSCR(r9)
+ mfspr r5, SPRN_IC
+ mfspr r6, SPRN_VTB
+ mfspr r7, SPRN_TAR
+ std r5, VCPU_IC(r9)
+ std r6, VCPU_VTB(r9)
+ std r7, VCPU_TAR(r9)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ mfspr r5, SPRN_TFHAR
+ mfspr r6, SPRN_TFIAR
+ mfspr r7, SPRN_TEXASR
+ std r5, VCPU_TFHAR(r9)
+ std r6, VCPU_TFIAR(r9)
+ std r7, VCPU_TEXASR(r9)
+#endif
+ mfspr r8, SPRN_EBBHR
+ std r8, VCPU_EBBHR(r9)
+ mfspr r5, SPRN_EBBRR
+ mfspr r6, SPRN_BESCR
+ mfspr r7, SPRN_CSIGR
+ mfspr r8, SPRN_TACR
+ std r5, VCPU_EBBRR(r9)
+ std r6, VCPU_BESCR(r9)
+ std r7, VCPU_CSIGR(r9)
+ std r8, VCPU_TACR(r9)
+ mfspr r5, SPRN_TCSCR
+ mfspr r6, SPRN_ACOP
+ mfspr r7, SPRN_PID
+ mfspr r8, SPRN_WORT
+ std r5, VCPU_TCSCR(r9)
+ std r6, VCPU_ACOP(r9)
+ stw r7, VCPU_GUEST_PID(r9)
+ std r8, VCPU_WORT(r9)
+8:
+
+ /* Save and reset AMR and UAMOR before turning on the MMU */
+BEGIN_FTR_SECTION
+ mfspr r5,SPRN_AMR
+ mfspr r6,SPRN_UAMOR
+ std r5,VCPU_AMR(r9)
+ std r6,VCPU_UAMOR(r9)
+ li r6,0
+ mtspr SPRN_AMR,r6
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+
/* Unset guest mode */
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)