summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/kprobes.c3
-rw-r--r--arch/ia64/kernel/kprobes.c22
-rw-r--r--arch/ppc64/kernel/kprobes.c11
-rw-r--r--arch/x86_64/kernel/kprobes.c3
-rw-r--r--include/asm-ia64/kprobes.h1
-rw-r--r--include/asm-ppc64/kprobes.h3
-rw-r--r--kernel/kprobes.c22
7 files changed, 55 insertions, 10 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c
index e5cec32018a5..6345b430b105 100644
--- a/arch/i386/kernel/kprobes.c
+++ b/arch/i386/kernel/kprobes.c
@@ -177,7 +177,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS) {
+ if (kprobe_status == KPROBE_HIT_SS &&
+ *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->eflags &= ~TF_MASK;
regs->eflags |= kprobe_saved_eflags;
unlock_kprobes();
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 4b1bd539ec47..471086b808a4 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -95,6 +95,17 @@ static void __kprobes update_kprobe_inst_flag(uint template, uint slot,
p->ainsn.inst_flag = 0;
p->ainsn.target_br_reg = 0;
+ /* Check for Break instruction
+ * Bits 37:40 Major opcode to be zero
+ * Bits 27:32 X6 to be zero
+ * Bits 32:35 X3 to be zero
+ */
+ if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) {
+ /* is a break instruction */
+ p->ainsn.inst_flag |= INST_FLAG_BREAK_INST;
+ return;
+ }
+
if (bundle_encoding[template][slot] == B) {
switch (major_opcode) {
case INDIRECT_CALL_OPCODE:
@@ -542,8 +553,11 @@ static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs)
unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
unsigned long slot = (unsigned long)p->addr & 0xf;
- /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
- regs->cr_iip = bundle_addr & ~0xFULL;
+ /* single step inline if break instruction */
+ if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)
+ regs->cr_iip = (unsigned long)p->addr & ~0xFULL;
+ else
+ regs->cr_iip = bundle_addr & ~0xFULL;
if (slot > 2)
slot = 0;
@@ -599,7 +613,9 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS) {
+ if ( (kprobe_status == KPROBE_HIT_SS) &&
+ (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) {
+ ia64_psr(regs)->ss = 0;
unlock_kprobes();
goto no_kprobe;
}
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c
index 591e4b67b5a5..7e80d49c589a 100644
--- a/arch/ppc64/kernel/kprobes.c
+++ b/arch/ppc64/kernel/kprobes.c
@@ -102,7 +102,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
regs->msr |= MSR_SE;
/* single step inline if it is a trap variant */
- if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn))
+ if (is_trap(insn))
regs->nip = (unsigned long)p->addr;
else
regs->nip = (unsigned long)p->ainsn.insn;
@@ -152,7 +152,9 @@ static inline int kprobe_handler(struct pt_regs *regs)
Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS) {
+ kprobe_opcode_t insn = *p->ainsn.insn;
+ if (kprobe_status == KPROBE_HIT_SS &&
+ is_trap(insn)) {
regs->msr &= ~MSR_SE;
regs->msr |= kprobe_saved_msr;
unlock_kprobes();
@@ -192,8 +194,7 @@ static inline int kprobe_handler(struct pt_regs *regs)
* trap variant, it could belong to someone else
*/
kprobe_opcode_t cur_insn = *addr;
- if (IS_TW(cur_insn) || IS_TD(cur_insn) ||
- IS_TWI(cur_insn) || IS_TDI(cur_insn))
+ if (is_trap(cur_insn))
goto no_kprobe;
/*
* The breakpoint instruction was removed right
@@ -403,7 +404,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
default:
break;
}
- preempt_enable();
+ preempt_enable_no_resched();
return ret;
}
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index 2d7658fbbb28..df08c43276a0 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -311,7 +311,8 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
Disarm the probe we just hit, and ignore it. */
p = get_kprobe(addr);
if (p) {
- if (kprobe_status == KPROBE_HIT_SS) {
+ if (kprobe_status == KPROBE_HIT_SS &&
+ *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
regs->eflags &= ~TF_MASK;
regs->eflags |= kprobe_saved_rflags;
unlock_kprobes();
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h
index bf36a32e37e4..573a3574a24f 100644
--- a/include/asm-ia64/kprobes.h
+++ b/include/asm-ia64/kprobes.h
@@ -92,6 +92,7 @@ struct arch_specific_insn {
kprobe_opcode_t insn;
#define INST_FLAG_FIX_RELATIVE_IP_ADDR 1
#define INST_FLAG_FIX_BRANCH_REG 2
+ #define INST_FLAG_BREAK_INST 4
unsigned long inst_flag;
unsigned short target_br_reg;
};
diff --git a/include/asm-ppc64/kprobes.h b/include/asm-ppc64/kprobes.h
index 0802919c3235..d9129d2b038e 100644
--- a/include/asm-ppc64/kprobes.h
+++ b/include/asm-ppc64/kprobes.h
@@ -42,6 +42,9 @@ typedef unsigned int kprobe_opcode_t;
#define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)((func_descr_t *)pentry)
+#define is_trap(instr) (IS_TW(instr) || IS_TD(instr) || \
+ IS_TWI(instr) || IS_TDI(instr))
+
#define ARCH_SUPPORTS_KRETPROBES
void kretprobe_trampoline(void);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3b7653f2e7ae..f3ea492ab44d 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -155,14 +155,36 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot)
/* Locks kprobe: irqs must be disabled */
void __kprobes lock_kprobes(void)
{
+ unsigned long flags = 0;
+
+ /* Avoiding local interrupts to happen right after we take the kprobe_lock
+ * and before we get a chance to update kprobe_cpu, this to prevent
+ * deadlock when we have a kprobe on ISR routine and a kprobe on task
+ * routine
+ */
+ local_irq_save(flags);
+
spin_lock(&kprobe_lock);
kprobe_cpu = smp_processor_id();
+
+ local_irq_restore(flags);
}
void __kprobes unlock_kprobes(void)
{
+ unsigned long flags = 0;
+
+ /* Avoiding local interrupts to happen right after we update
+ * kprobe_cpu and before we get a a chance to release kprobe_lock,
+ * this to prevent deadlock when we have a kprobe on ISR routine and
+ * a kprobe on task routine
+ */
+ local_irq_save(flags);
+
kprobe_cpu = NR_CPUS;
spin_unlock(&kprobe_lock);
+
+ local_irq_restore(flags);
}
/* You have to be holding the kprobe_lock */