summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorMichael Neuling2012-06-25 15:33:10 +0200
committerBenjamin Herrenschmidt2012-07-10 11:17:55 +0200
commitc75df6f96c59beed8632e3aced5fb4faabaa6c5b (patch)
treeb21ce9394028ec4520a71d87391dad8ab29edd67 /arch/powerpc/kernel
parentpowerpc: Modify macro ready for %r0 register change (diff)
downloadkernel-qcow2-linux-c75df6f96c59beed8632e3aced5fb4faabaa6c5b.tar.gz
kernel-qcow2-linux-c75df6f96c59beed8632e3aced5fb4faabaa6c5b.tar.xz
kernel-qcow2-linux-c75df6f96c59beed8632e3aced5fb4faabaa6c5b.zip
powerpc: Fix usage of register macros getting ready for %r0 change
Anything that uses a constructed instruction (ie. from ppc-opcode.h), need to use the new R0 macro, as %r0 is not going to work. Also convert usages of macros where we are just determining an offset (usually for a load/store), like: std r14,STK_REG(r14)(r1) Can't use STK_REG(r14) as %r14 doesn't work in the STK_REG macro since it's just calculating an offset. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/cpu_setup_a2.S6
-rw-r--r--arch/powerpc/kernel/fpu.S4
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/misc_64.S4
4 files changed, 8 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
index ebc62f42a237..95675a7181dc 100644
--- a/arch/powerpc/kernel/cpu_setup_a2.S
+++ b/arch/powerpc/kernel/cpu_setup_a2.S
@@ -100,19 +100,19 @@ _icswx_skip_guest:
lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_IERAT_SIZE-1
- PPC_ERATWE(r4,r4,3)
+ PPC_ERATWE(R4,R4,3)
/* Now set the D-ERAT watermark to 31 */
lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_DERAT_SIZE-1
- PPC_ERATWE(r4,r4,3)
+ PPC_ERATWE(R4,R4,3)
/* And invalidate the beast just in case. That won't get rid of
* a bolted entry though it will be in LRU and so will go away eventually
* but let's not bother for now
*/
- PPC_ERATILX(0,0,0)
+ PPC_ERATILX(0,R0,R0)
1:
blr
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index de369558bf0a..71c1c73bc65f 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -106,7 +106,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
lfd fr0,THREAD_FPSCR(r5)
MTFSF_L(fr0)
- REST_32FPVSRS(0, r4, r5)
+ REST_32FPVSRS(0, R4, R5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
@@ -140,7 +140,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0
- SAVE_32FPVSRS(0, r4 ,r3)
+ SAVE_32FPVSRS(0, R4 ,R3)
mffs fr0
stfd fr0,THREAD_FPSCR(r3)
beq 1f
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 62bdf2389669..02c167db6ba0 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -302,7 +302,7 @@ static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
if (imm_one) {
p[kvm_emulate_wrtee_reg_offs] =
- KVM_INST_LI | __PPC_RT(30) | MSR_EE;
+ KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
} else {
/* Make clobbered registers work too */
switch (get_rt(rt)) {
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 616921ef1439..6ba08bc91b21 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -314,7 +314,7 @@ _GLOBAL(real_205_readb)
mtmsrd r0
sync
isync
- LBZCIX(r3,0,r3)
+ LBZCIX(R3,0,R3)
isync
mtmsrd r7
sync
@@ -329,7 +329,7 @@ _GLOBAL(real_205_writeb)
mtmsrd r0
sync
isync
- STBCIX(r3,0,r4)
+ STBCIX(R3,0,R4)
isync
mtmsrd r7
sync