diff options
author | Paul Mackerras | 2017-08-30 06:12:39 +0200 |
---|---|---|
committer | Michael Ellerman | 2017-09-01 08:42:41 +0200 |
commit | a53d5182e24c22986ad0e99e52f8fe343ee7d7ac (patch) | |
tree | 83ef99241cf94c354d014bc343c3a8e3b493cf57 /arch/powerpc/lib/sstep.c | |
parent | powerpc: Handle opposite-endian processes in emulation code (diff) | |
download | kernel-qcow2-linux-a53d5182e24c22986ad0e99e52f8fe343ee7d7ac.tar.gz kernel-qcow2-linux-a53d5182e24c22986ad0e99e52f8fe343ee7d7ac.tar.xz kernel-qcow2-linux-a53d5182e24c22986ad0e99e52f8fe343ee7d7ac.zip |
powerpc: Separate out load/store emulation into its own function
This moves the parts of emulate_step() that deal with emulating
load and store instructions into a new function called
emulate_loadstore(). This is to make it possible to reuse this
code in the alignment handler.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/lib/sstep.c')
-rw-r--r-- | arch/powerpc/lib/sstep.c | 258 |
1 files changed, 145 insertions, 113 deletions
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 5d8284938898..423815599063 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -2667,76 +2667,35 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op) } /* - * Emulate instructions that cause a transfer of control, - * loads and stores, and a few other instructions. - * Returns 1 if the step was emulated, 0 if not, - * or -1 if the instruction is one that should not be stepped, - * such as an rfid, or a mtmsrd that would clear MSR_RI. + * Emulate a previously-analysed load or store instruction. + * Return values are: + * 0 = instruction emulated successfully + * -EFAULT = address out of range or access faulted (regs->dar + * contains the faulting address) + * -EACCES = misaligned access, instruction requires alignment + * -EINVAL = unknown operation in *op */ -int emulate_step(struct pt_regs *regs, unsigned int instr) +int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op) { - struct instruction_op op; - int r, err, size, type; - unsigned long val; - unsigned int cr; + int err, size, type; int i, rd, nb; + unsigned int cr; + unsigned long val; unsigned long ea; bool cross_endian; - r = analyse_instr(&op, regs, instr); - if (r < 0) - return r; - if (r > 0) { - emulate_update_regs(regs, &op); - return 1; - } - err = 0; - size = GETSIZE(op.type); - type = op.type & INSTR_TYPE_MASK; + size = GETSIZE(op->type); + type = op->type & INSTR_TYPE_MASK; cross_endian = (regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); - - ea = op.ea; - if (OP_IS_LOAD_STORE(type) || type == CACHEOP) - ea = truncate_if_32bit(regs->msr, op.ea); + ea = truncate_if_32bit(regs->msr, op->ea); switch (type) { - case CACHEOP: - if (!address_ok(regs, ea, 8)) - return 0; - switch (op.type & CACHEOP_MASK) { - case DCBST: - __cacheop_user_asmx(ea, err, "dcbst"); - break; - case DCBF: - __cacheop_user_asmx(ea, err, "dcbf"); - break; - case DCBTST: - if (op.reg == 0) - prefetchw((void *) ea); - break; - case DCBT: - if (op.reg == 0) - prefetch((void *) ea); - break; - case ICBI: - __cacheop_user_asmx(ea, err, "icbi"); - break; - case DCBZ: - err = emulate_dcbz(ea, regs); - break; - } - if (err) { - regs->dar = ea; - return 0; - } - goto instr_done; - case LARX: if (ea & (size - 1)) - break; /* can't handle misaligned */ + return -EACCES; /* can't handle misaligned */ if (!address_ok(regs, ea, size)) - return 0; + return -EFAULT; err = 0; switch (size) { #ifdef __powerpc64__ @@ -2755,49 +2714,49 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) __get_user_asmx(val, ea, err, "ldarx"); break; case 16: - err = do_lqarx(ea, ®s->gpr[op.reg]); + err = do_lqarx(ea, ®s->gpr[op->reg]); break; #endif default: - return 0; + return -EINVAL; } if (err) { regs->dar = ea; - return 0; + break; } if (size < 16) - regs->gpr[op.reg] = val; - goto ldst_done; + regs->gpr[op->reg] = val; + break; case STCX: if (ea & (size - 1)) - break; /* can't handle misaligned */ + return -EACCES; /* can't handle misaligned */ if (!address_ok(regs, ea, size)) - return 0; + return -EFAULT; err = 0; switch (size) { #ifdef __powerpc64__ case 1: - __put_user_asmx(op.val, ea, err, "stbcx.", cr); + __put_user_asmx(op->val, ea, err, "stbcx.", cr); break; case 2: - __put_user_asmx(op.val, ea, err, "stbcx.", cr); + __put_user_asmx(op->val, ea, err, "stbcx.", cr); break; #endif case 4: - __put_user_asmx(op.val, ea, err, "stwcx.", cr); + __put_user_asmx(op->val, ea, err, "stwcx.", cr); break; #ifdef __powerpc64__ case 8: - __put_user_asmx(op.val, ea, err, "stdcx.", cr); + __put_user_asmx(op->val, ea, err, "stdcx.", cr); break; case 16: - err = do_stqcx(ea, regs->gpr[op.reg], - regs->gpr[op.reg + 1], &cr); + err = do_stqcx(ea, regs->gpr[op->reg], + regs->gpr[op->reg + 1], &cr); break; #endif default: - return 0; + return -EINVAL; } if (!err) regs->ccr = (regs->ccr & 0x0fffffff) | @@ -2805,23 +2764,23 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) ((regs->xer >> 3) & 0x10000000); else regs->dar = ea; - goto ldst_done; + break; case LOAD: #ifdef __powerpc64__ if (size == 16) { - err = emulate_lq(regs, ea, op.reg, cross_endian); - goto ldst_done; + err = emulate_lq(regs, ea, op->reg, cross_endian); + break; } #endif - err = read_mem(®s->gpr[op.reg], ea, size, regs); + err = read_mem(®s->gpr[op->reg], ea, size, regs); if (!err) { - if (op.type & SIGNEXT) - do_signext(®s->gpr[op.reg], size); - if ((op.type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) - do_byterev(®s->gpr[op.reg], size); + if (op->type & SIGNEXT) + do_signext(®s->gpr[op->reg], size); + if ((op->type & BYTEREV) == (cross_endian ? 0 : BYTEREV)) + do_byterev(®s->gpr[op->reg], size); } - goto ldst_done; + break; #ifdef CONFIG_PPC_FPU case LOAD_FP: @@ -2833,15 +2792,15 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) */ if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) return 0; - err = do_fp_load(op.reg, ea, size, regs, cross_endian); - goto ldst_done; + err = do_fp_load(op->reg, ea, size, regs, cross_endian); + break; #endif #ifdef CONFIG_ALTIVEC case LOAD_VMX: if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) return 0; - err = do_vec_load(op.reg, ea, size, regs, cross_endian); - goto ldst_done; + err = do_vec_load(op->reg, ea, size, regs, cross_endian); + break; #endif #ifdef CONFIG_VSX case LOAD_VSX: { @@ -2851,18 +2810,18 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX * when the target of the instruction is a vector register. */ - if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC)) + if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) msrbit = MSR_VEC; if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) return 0; - err = do_vsx_load(&op, ea, regs, cross_endian); - goto ldst_done; + err = do_vsx_load(op, ea, regs, cross_endian); + break; } #endif case LOAD_MULTI: if (!address_ok(regs, ea, size)) return -EFAULT; - rd = op.reg; + rd = op->reg; for (i = 0; i < size; i += 4) { unsigned int v32 = 0; @@ -2871,47 +2830,47 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) nb = 4; err = copy_mem_in((u8 *) &v32, ea, nb, regs); if (err) - return 0; + break; if (unlikely(cross_endian)) v32 = byterev_4(v32); regs->gpr[rd] = v32; ea += 4; ++rd; } - goto instr_done; + break; case STORE: #ifdef __powerpc64__ if (size == 16) { - err = emulate_stq(regs, ea, op.reg, cross_endian); - goto ldst_done; + err = emulate_stq(regs, ea, op->reg, cross_endian); + break; } #endif - if ((op.type & UPDATE) && size == sizeof(long) && - op.reg == 1 && op.update_reg == 1 && + if ((op->type & UPDATE) && size == sizeof(long) && + op->reg == 1 && op->update_reg == 1 && !(regs->msr & MSR_PR) && ea >= regs->gpr[1] - STACK_INT_FRAME_SIZE) { err = handle_stack_update(ea, regs); - goto ldst_done; + break; } if (unlikely(cross_endian)) - do_byterev(&op.val, size); - err = write_mem(op.val, ea, size, regs); - goto ldst_done; + do_byterev(&op->val, size); + err = write_mem(op->val, ea, size, regs); + break; #ifdef CONFIG_PPC_FPU case STORE_FP: if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_FP)) return 0; - err = do_fp_store(op.reg, ea, size, regs, cross_endian); - goto ldst_done; + err = do_fp_store(op->reg, ea, size, regs, cross_endian); + break; #endif #ifdef CONFIG_ALTIVEC case STORE_VMX: if (!(regs->msr & MSR_PR) && !(regs->msr & MSR_VEC)) return 0; - err = do_vec_store(op.reg, ea, size, regs, cross_endian); - goto ldst_done; + err = do_vec_store(op->reg, ea, size, regs, cross_endian); + break; #endif #ifdef CONFIG_VSX case STORE_VSX: { @@ -2921,18 +2880,18 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) * Some VSX instructions check the MSR_VEC bit rather than MSR_VSX * when the target of the instruction is a vector register. */ - if (op.reg >= 32 && (op.vsx_flags & VSX_CHECK_VEC)) + if (op->reg >= 32 && (op->vsx_flags & VSX_CHECK_VEC)) msrbit = MSR_VEC; if (!(regs->msr & MSR_PR) && !(regs->msr & msrbit)) return 0; - err = do_vsx_store(&op, ea, regs, cross_endian); - goto ldst_done; + err = do_vsx_store(op, ea, regs, cross_endian); + break; } #endif case STORE_MULTI: if (!address_ok(regs, ea, size)) return -EFAULT; - rd = op.reg; + rd = op->reg; for (i = 0; i < size; i += 4) { unsigned int v32 = regs->gpr[rd]; @@ -2943,10 +2902,89 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) v32 = byterev_4(v32); err = copy_mem_out((u8 *) &v32, ea, nb, regs); if (err) - return 0; + break; ea += 4; ++rd; } + break; + + default: + return -EINVAL; + } + + if (err) + return err; + + if (op->type & UPDATE) + regs->gpr[op->update_reg] = op->ea; + + return 0; +} +NOKPROBE_SYMBOL(emulate_loadstore); + +/* + * Emulate instructions that cause a transfer of control, + * loads and stores, and a few other instructions. + * Returns 1 if the step was emulated, 0 if not, + * or -1 if the instruction is one that should not be stepped, + * such as an rfid, or a mtmsrd that would clear MSR_RI. + */ +int emulate_step(struct pt_regs *regs, unsigned int instr) +{ + struct instruction_op op; + int r, err, type; + unsigned long val; + unsigned long ea; + + r = analyse_instr(&op, regs, instr); + if (r < 0) + return r; + if (r > 0) { + emulate_update_regs(regs, &op); + return 1; + } + + err = 0; + type = op.type & INSTR_TYPE_MASK; + + if (OP_IS_LOAD_STORE(type)) { + err = emulate_loadstore(regs, &op); + if (err) + return 0; + goto instr_done; + } + + switch (type) { + case CACHEOP: + ea = truncate_if_32bit(regs->msr, op.ea); + if (!address_ok(regs, ea, 8)) + return 0; + switch (op.type & CACHEOP_MASK) { + case DCBST: + __cacheop_user_asmx(ea, err, "dcbst"); + break; + case DCBF: + __cacheop_user_asmx(ea, err, "dcbf"); + break; + case DCBTST: + if (op.reg == 0) + prefetchw((void *) ea); + break; + case DCBT: + if (op.reg == 0) + prefetch((void *) ea); + break; + case ICBI: + __cacheop_user_asmx(ea, err, "icbi"); + break; + case DCBZ: + err = emulate_dcbz(ea, regs); + break; + } + if (err) { + regs->dar = ea; + return 0; + } goto instr_done; case MFMSR: @@ -2989,12 +3027,6 @@ int emulate_step(struct pt_regs *regs, unsigned int instr) } return 0; - ldst_done: - if (err) - return 0; - if (op.type & UPDATE) - regs->gpr[op.update_reg] = op.ea; - instr_done: regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); return 1; |