From fe6ba88b251aa76a94be2cb441d2e6b7c623b989 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 16 Jul 2019 16:27:01 -0700 Subject: arch: replace _BITUL() in kernel-space headers with BIT() Now that BIT() can be used from assembly code, we can safely replace _BITUL() with equivalent BIT(). UAPI headers are still required to use _BITUL(), but there is no more reason to use it in kernel headers. BIT() is shorter. Link: http://lkml.kernel.org/r/20190609153941.17249-2-yamada.masahiro@socionext.com Signed-off-by: Masahiro Yamada Cc: Heiko Carstens Cc: Vasily Gorbik Cc: Christian Borntraeger Cc: Vineet Gupta Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/include/asm/ctl_reg.h | 42 ++++++++++++++++++------------------- arch/s390/include/asm/nmi.h | 20 +++++++++--------- arch/s390/include/asm/processor.h | 20 +++++++++--------- arch/s390/include/asm/ptrace.h | 10 ++++----- arch/s390/include/asm/setup.h | 40 +++++++++++++++++------------------ arch/s390/include/asm/thread_info.h | 34 +++++++++++++++--------------- 6 files changed, 83 insertions(+), 83 deletions(-) (limited to 'arch/s390') diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h index 0cf6b53587db..60f907516335 100644 --- a/arch/s390/include/asm/ctl_reg.h +++ b/arch/s390/include/asm/ctl_reg.h @@ -8,27 +8,27 @@ #ifndef __ASM_CTL_REG_H #define __ASM_CTL_REG_H -#include - -#define CR0_CLOCK_COMPARATOR_SIGN _BITUL(63 - 10) -#define CR0_EMERGENCY_SIGNAL_SUBMASK _BITUL(63 - 49) -#define CR0_EXTERNAL_CALL_SUBMASK _BITUL(63 - 50) -#define CR0_CLOCK_COMPARATOR_SUBMASK _BITUL(63 - 52) -#define CR0_CPU_TIMER_SUBMASK _BITUL(63 - 53) -#define CR0_SERVICE_SIGNAL_SUBMASK _BITUL(63 - 54) -#define CR0_UNUSED_56 _BITUL(63 - 56) -#define CR0_INTERRUPT_KEY_SUBMASK _BITUL(63 - 57) -#define CR0_MEASUREMENT_ALERT_SUBMASK _BITUL(63 - 58) - -#define CR2_GUARDED_STORAGE _BITUL(63 - 59) - -#define CR14_UNUSED_32 _BITUL(63 - 32) -#define CR14_UNUSED_33 _BITUL(63 - 33) -#define CR14_CHANNEL_REPORT_SUBMASK _BITUL(63 - 35) -#define CR14_RECOVERY_SUBMASK _BITUL(63 - 36) -#define CR14_DEGRADATION_SUBMASK _BITUL(63 - 37) -#define CR14_EXTERNAL_DAMAGE_SUBMASK _BITUL(63 - 38) -#define CR14_WARNING_SUBMASK _BITUL(63 - 39) +#include + +#define CR0_CLOCK_COMPARATOR_SIGN BIT(63 - 10) +#define CR0_EMERGENCY_SIGNAL_SUBMASK BIT(63 - 49) +#define CR0_EXTERNAL_CALL_SUBMASK BIT(63 - 50) +#define CR0_CLOCK_COMPARATOR_SUBMASK BIT(63 - 52) +#define CR0_CPU_TIMER_SUBMASK BIT(63 - 53) +#define CR0_SERVICE_SIGNAL_SUBMASK BIT(63 - 54) +#define CR0_UNUSED_56 BIT(63 - 56) +#define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57) +#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58) + +#define CR2_GUARDED_STORAGE BIT(63 - 59) + +#define CR14_UNUSED_32 BIT(63 - 32) +#define CR14_UNUSED_33 BIT(63 - 33) +#define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35) +#define CR14_RECOVERY_SUBMASK BIT(63 - 36) +#define CR14_DEGRADATION_SUBMASK BIT(63 - 37) +#define CR14_EXTERNAL_DAMAGE_SUBMASK BIT(63 - 38) +#define CR14_WARNING_SUBMASK BIT(63 - 39) #ifndef __ASSEMBLY__ diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h index 1e5dc4537bf2..b160da8fa14b 100644 --- a/arch/s390/include/asm/nmi.h +++ b/arch/s390/include/asm/nmi.h @@ -12,7 +12,7 @@ #ifndef _ASM_S390_NMI_H #define _ASM_S390_NMI_H -#include +#include #include #define MCIC_SUBCLASS_MASK (1ULL<<63 | 1ULL<<62 | 1ULL<<61 | \ @@ -20,15 +20,15 @@ 1ULL<<55 | 1ULL<<54 | 1ULL<<53 | \ 1ULL<<52 | 1ULL<<47 | 1ULL<<46 | \ 1ULL<<45 | 1ULL<<44) -#define MCCK_CODE_SYSTEM_DAMAGE _BITUL(63) -#define MCCK_CODE_EXT_DAMAGE _BITUL(63 - 5) -#define MCCK_CODE_CP _BITUL(63 - 9) -#define MCCK_CODE_CPU_TIMER_VALID _BITUL(63 - 46) -#define MCCK_CODE_PSW_MWP_VALID _BITUL(63 - 20) -#define MCCK_CODE_PSW_IA_VALID _BITUL(63 - 23) -#define MCCK_CODE_CR_VALID _BITUL(63 - 29) -#define MCCK_CODE_GS_VALID _BITUL(63 - 36) -#define MCCK_CODE_FC_VALID _BITUL(63 - 43) +#define MCCK_CODE_SYSTEM_DAMAGE BIT(63) +#define MCCK_CODE_EXT_DAMAGE BIT(63 - 5) +#define MCCK_CODE_CP BIT(63 - 9) +#define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46) +#define MCCK_CODE_PSW_MWP_VALID BIT(63 - 20) +#define MCCK_CODE_PSW_IA_VALID BIT(63 - 23) +#define MCCK_CODE_CR_VALID BIT(63 - 29) +#define MCCK_CODE_GS_VALID BIT(63 - 36) +#define MCCK_CODE_FC_VALID BIT(63 - 43) #ifndef __ASSEMBLY__ diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 14883b1562e0..d56c519bc696 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -12,7 +12,7 @@ #ifndef __ASM_S390_PROCESSOR_H #define __ASM_S390_PROCESSOR_H -#include +#include #define CIF_MCCK_PENDING 0 /* machine check handling is pending */ #define CIF_ASCE_PRIMARY 1 /* primary asce needs fixup / uaccess */ @@ -24,15 +24,15 @@ #define CIF_MCCK_GUEST 7 /* machine check happening in guest */ #define CIF_DEDICATED_CPU 8 /* this CPU is dedicated */ -#define _CIF_MCCK_PENDING _BITUL(CIF_MCCK_PENDING) -#define _CIF_ASCE_PRIMARY _BITUL(CIF_ASCE_PRIMARY) -#define _CIF_ASCE_SECONDARY _BITUL(CIF_ASCE_SECONDARY) -#define _CIF_NOHZ_DELAY _BITUL(CIF_NOHZ_DELAY) -#define _CIF_FPU _BITUL(CIF_FPU) -#define _CIF_IGNORE_IRQ _BITUL(CIF_IGNORE_IRQ) -#define _CIF_ENABLED_WAIT _BITUL(CIF_ENABLED_WAIT) -#define _CIF_MCCK_GUEST _BITUL(CIF_MCCK_GUEST) -#define _CIF_DEDICATED_CPU _BITUL(CIF_DEDICATED_CPU) +#define _CIF_MCCK_PENDING BIT(CIF_MCCK_PENDING) +#define _CIF_ASCE_PRIMARY BIT(CIF_ASCE_PRIMARY) +#define _CIF_ASCE_SECONDARY BIT(CIF_ASCE_SECONDARY) +#define _CIF_NOHZ_DELAY BIT(CIF_NOHZ_DELAY) +#define _CIF_FPU BIT(CIF_FPU) +#define _CIF_IGNORE_IRQ BIT(CIF_IGNORE_IRQ) +#define _CIF_ENABLED_WAIT BIT(CIF_ENABLED_WAIT) +#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST) +#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU) #ifndef __ASSEMBLY__ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index 6f70d81c40f2..f009a13afe71 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -7,7 +7,7 @@ #ifndef _S390_PTRACE_H #define _S390_PTRACE_H -#include +#include #include #define PIF_SYSCALL 0 /* inside a system call */ @@ -15,10 +15,10 @@ #define PIF_SYSCALL_RESTART 2 /* restart the current system call */ #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ -#define _PIF_SYSCALL _BITUL(PIF_SYSCALL) -#define _PIF_PER_TRAP _BITUL(PIF_PER_TRAP) -#define _PIF_SYSCALL_RESTART _BITUL(PIF_SYSCALL_RESTART) -#define _PIF_GUEST_FAULT _BITUL(PIF_GUEST_FAULT) +#define _PIF_SYSCALL BIT(PIF_SYSCALL) +#define _PIF_PER_TRAP BIT(PIF_PER_TRAP) +#define _PIF_SYSCALL_RESTART BIT(PIF_SYSCALL_RESTART) +#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) #ifndef __ASSEMBLY__ diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h index 925889d360c1..82deb8fc8319 100644 --- a/arch/s390/include/asm/setup.h +++ b/arch/s390/include/asm/setup.h @@ -6,7 +6,7 @@ #ifndef _ASM_S390_SETUP_H #define _ASM_S390_SETUP_H -#include +#include #include #define EP_OFFSET 0x10008 @@ -21,25 +21,25 @@ * Machine features detected in early.c */ -#define MACHINE_FLAG_VM _BITUL(0) -#define MACHINE_FLAG_KVM _BITUL(1) -#define MACHINE_FLAG_LPAR _BITUL(2) -#define MACHINE_FLAG_DIAG9C _BITUL(3) -#define MACHINE_FLAG_ESOP _BITUL(4) -#define MACHINE_FLAG_IDTE _BITUL(5) -#define MACHINE_FLAG_DIAG44 _BITUL(6) -#define MACHINE_FLAG_EDAT1 _BITUL(7) -#define MACHINE_FLAG_EDAT2 _BITUL(8) -#define MACHINE_FLAG_TOPOLOGY _BITUL(10) -#define MACHINE_FLAG_TE _BITUL(11) -#define MACHINE_FLAG_TLB_LC _BITUL(12) -#define MACHINE_FLAG_VX _BITUL(13) -#define MACHINE_FLAG_TLB_GUEST _BITUL(14) -#define MACHINE_FLAG_NX _BITUL(15) -#define MACHINE_FLAG_GS _BITUL(16) -#define MACHINE_FLAG_SCC _BITUL(17) - -#define LPP_MAGIC _BITUL(31) +#define MACHINE_FLAG_VM BIT(0) +#define MACHINE_FLAG_KVM BIT(1) +#define MACHINE_FLAG_LPAR BIT(2) +#define MACHINE_FLAG_DIAG9C BIT(3) +#define MACHINE_FLAG_ESOP BIT(4) +#define MACHINE_FLAG_IDTE BIT(5) +#define MACHINE_FLAG_DIAG44 BIT(6) +#define MACHINE_FLAG_EDAT1 BIT(7) +#define MACHINE_FLAG_EDAT2 BIT(8) +#define MACHINE_FLAG_TOPOLOGY BIT(10) +#define MACHINE_FLAG_TE BIT(11) +#define MACHINE_FLAG_TLB_LC BIT(12) +#define MACHINE_FLAG_VX BIT(13) +#define MACHINE_FLAG_TLB_GUEST BIT(14) +#define MACHINE_FLAG_NX BIT(15) +#define MACHINE_FLAG_GS BIT(16) +#define MACHINE_FLAG_SCC BIT(17) + +#define LPP_MAGIC BIT(31) #define LPP_PID_MASK _AC(0xffffffff, UL) /* Offsets to entry points in kernel/head.S */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index ce4e17c9aad6..e582fbe59e20 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -8,7 +8,7 @@ #ifndef _ASM_THREAD_INFO_H #define _ASM_THREAD_INFO_H -#include +#include /* * General size of kernel stacks @@ -82,21 +82,21 @@ void arch_setup_new_exec(void); #define TIF_SECCOMP 26 /* secure computing */ #define TIF_SYSCALL_TRACEPOINT 27 /* syscall tracepoint instrumentation */ -#define _TIF_NOTIFY_RESUME _BITUL(TIF_NOTIFY_RESUME) -#define _TIF_SIGPENDING _BITUL(TIF_SIGPENDING) -#define _TIF_NEED_RESCHED _BITUL(TIF_NEED_RESCHED) -#define _TIF_UPROBE _BITUL(TIF_UPROBE) -#define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE) -#define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING) -#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP) -#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST) - -#define _TIF_31BIT _BITUL(TIF_31BIT) -#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP) - -#define _TIF_SYSCALL_TRACE _BITUL(TIF_SYSCALL_TRACE) -#define _TIF_SYSCALL_AUDIT _BITUL(TIF_SYSCALL_AUDIT) -#define _TIF_SECCOMP _BITUL(TIF_SECCOMP) -#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT) +#define _TIF_NOTIFY_RESUME BIT(TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING BIT(TIF_SIGPENDING) +#define _TIF_NEED_RESCHED BIT(TIF_NEED_RESCHED) +#define _TIF_UPROBE BIT(TIF_UPROBE) +#define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) +#define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) +#define _TIF_ISOLATE_BP BIT(TIF_ISOLATE_BP) +#define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) + +#define _TIF_31BIT BIT(TIF_31BIT) +#define _TIF_SINGLE_STEP BIT(TIF_SINGLE_STEP) + +#define _TIF_SYSCALL_TRACE BIT(TIF_SYSCALL_TRACE) +#define _TIF_SYSCALL_AUDIT BIT(TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP BIT(TIF_SECCOMP) +#define _TIF_SYSCALL_TRACEPOINT BIT(TIF_SYSCALL_TRACEPOINT) #endif /* _ASM_THREAD_INFO_H */ -- cgit v1.2.3-55-g7522 From b98cca444d287a63dd96df04af7fb9793567599e Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Tue, 16 Jul 2019 16:28:00 -0700 Subject: mm, kprobes: generalize and rename notify_page_fault() as kprobe_page_fault() Architectures which support kprobes have very similar boilerplate around calling kprobe_fault_handler(). Use a helper function in kprobes.h to unify them, based on the x86 code. This changes the behaviour for other architectures when preemption is enabled. Previously, they would have disabled preemption while calling the kprobe handler. However, preemption would be disabled if this fault was due to a kprobe, so we know the fault was not due to a kprobe handler and can simply return failure. This behaviour was introduced in commit a980c0ef9f6d ("x86/kprobes: Refactor kprobes_fault() like kprobe_exceptions_notify()") [anshuman.khandual@arm.com: export kprobe_fault_handler()] Link: http://lkml.kernel.org/r/1561133358-8876-1-git-send-email-anshuman.khandual@arm.com Link: http://lkml.kernel.org/r/1560420444-25737-1-git-send-email-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual Reviewed-by: Dave Hansen Cc: Michal Hocko Cc: Matthew Wilcox Cc: Mark Rutland Cc: Christophe Leroy Cc: Stephen Rothwell Cc: Andrey Konovalov Cc: Michael Ellerman Cc: Paul Mackerras Cc: Russell King Cc: Catalin Marinas Cc: Will Deacon Cc: Tony Luck Cc: Fenghua Yu Cc: Martin Schwidefsky Cc: Heiko Carstens Cc: Yoshinori Sato Cc: "David S. Miller" Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Andy Lutomirski Cc: Vineet Gupta Cc: James Hogan Cc: Paul Burton Cc: Ralf Baechle Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/fault.c | 24 +----------------------- arch/arm64/mm/fault.c | 24 +----------------------- arch/ia64/mm/fault.c | 24 +----------------------- arch/mips/include/asm/kprobes.h | 1 + arch/mips/kernel/kprobes.c | 2 +- arch/powerpc/mm/fault.c | 23 ++--------------------- arch/s390/mm/fault.c | 16 +--------------- arch/sh/mm/fault.c | 18 ++---------------- arch/sparc/mm/fault_64.c | 16 +--------------- arch/x86/mm/fault.c | 21 ++------------------- include/linux/kprobes.h | 19 +++++++++++++++++++ 11 files changed, 32 insertions(+), 156 deletions(-) (limited to 'arch/s390') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0e417233dad7..890eeaac3cbb 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -27,28 +27,6 @@ #ifdef CONFIG_MMU -#ifdef CONFIG_KPROBES -static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) -{ - int ret = 0; - - if (!user_mode(regs)) { - /* kprobe_running() needs smp_processor_id() */ - preempt_disable(); - if (kprobe_running() && kprobe_fault_handler(regs, fsr)) - ret = 1; - preempt_enable(); - } - - return ret; -} -#else -static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) -{ - return 0; -} -#endif - /* * This is useful to dump out the page tables associated with * 'addr' in mm 'mm'. @@ -265,7 +243,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) vm_fault_t fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - if (notify_page_fault(regs, fsr)) + if (kprobe_page_fault(regs, fsr)) return 0; tsk = current; diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index c8c61b1eb479..9568c116ac7f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -59,28 +59,6 @@ static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) return debug_fault_info + DBG_ESR_EVT(esr); } -#ifdef CONFIG_KPROBES -static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) -{ - int ret = 0; - - /* kprobe_running() needs smp_processor_id() */ - if (!user_mode(regs)) { - preempt_disable(); - if (kprobe_running() && kprobe_fault_handler(regs, esr)) - ret = 1; - preempt_enable(); - } - - return ret; -} -#else -static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) -{ - return 0; -} -#endif - static void data_abort_decode(unsigned int esr) { pr_alert("Data abort info:\n"); @@ -434,7 +412,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, unsigned long vm_flags = VM_READ | VM_WRITE; unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; - if (notify_page_fault(regs, esr)) + if (kprobe_page_fault(regs, esr)) return 0; /* diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 3c3a283d3172..c2f299fe9e04 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -21,28 +21,6 @@ extern int die(char *, struct pt_regs *, long); -#ifdef CONFIG_KPROBES -static inline int notify_page_fault(struct pt_regs *regs, int trap) -{ - int ret = 0; - - if (!user_mode(regs)) { - /* kprobe_running() needs smp_processor_id() */ - preempt_disable(); - if (kprobe_running() && kprobe_fault_handler(regs, trap)) - ret = 1; - preempt_enable(); - } - - return ret; -} -#else -static inline int notify_page_fault(struct pt_regs *regs, int trap) -{ - return 0; -} -#endif - /* * Return TRUE if ADDRESS points at a page in the kernel's mapped segment * (inside region 5, on ia64) and that page is present. @@ -116,7 +94,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re /* * This is to handle the kprobes on user space access instructions */ - if (notify_page_fault(regs, TRAP_BRKPT)) + if (kprobe_page_fault(regs, TRAP_BRKPT)) return; if (user_mode(regs)) diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h index 3cf8e4d5fa28..68b1e5d458cf 100644 --- a/arch/mips/include/asm/kprobes.h +++ b/arch/mips/include/asm/kprobes.h @@ -41,6 +41,7 @@ do { \ #define kretprobe_blacklist_size 0 void arch_remove_kprobe(struct kprobe *p); +int kprobe_fault_handler(struct pt_regs *regs, int trapnr); /* Architecture specific copy of original instruction*/ struct arch_specific_insn { diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c index 81ba1d3c367c..6cfae2411c04 100644 --- a/arch/mips/kernel/kprobes.c +++ b/arch/mips/kernel/kprobes.c @@ -398,7 +398,7 @@ out: return 1; } -static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) +int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index d989592b6fc8..8432c281de92 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -42,26 +42,6 @@ #include #include -static inline bool notify_page_fault(struct pt_regs *regs) -{ - bool ret = false; - -#ifdef CONFIG_KPROBES - /* kprobe_running() needs smp_processor_id() */ - if (!user_mode(regs)) { - preempt_disable(); - if (kprobe_running() && kprobe_fault_handler(regs, 11)) - ret = true; - preempt_enable(); - } -#endif /* CONFIG_KPROBES */ - - if (unlikely(debugger_fault_handler(regs))) - ret = true; - - return ret; -} - /* * Check whether the instruction inst is a store using * an update addressing form which will update r1. @@ -461,8 +441,9 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address, int is_write = page_fault_is_write(error_code); vm_fault_t fault, major = 0; bool must_retry = false; + bool kprobe_fault = kprobe_page_fault(regs, 11); - if (notify_page_fault(regs)) + if (unlikely(debugger_fault_handler(regs) || kprobe_fault)) return 0; if (unlikely(page_fault_is_bad(error_code))) { diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 0ba174f779da..63507662828f 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -67,20 +67,6 @@ static int __init fault_init(void) } early_initcall(fault_init); -static inline int notify_page_fault(struct pt_regs *regs) -{ - int ret = 0; - - /* kprobe_running() needs smp_processor_id() */ - if (kprobes_built_in() && !user_mode(regs)) { - preempt_disable(); - if (kprobe_running() && kprobe_fault_handler(regs, 14)) - ret = 1; - preempt_enable(); - } - return ret; -} - /* * Find out which address space caused the exception. */ @@ -412,7 +398,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) */ clear_pt_regs_flag(regs, PIF_PER_TRAP); - if (notify_page_fault(regs)) + if (kprobe_page_fault(regs, 14)) return 0; mm = tsk->mm; diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 3093bc372138..5f51456f4fc7 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -24,20 +24,6 @@ #include #include -static inline int notify_page_fault(struct pt_regs *regs, int trap) -{ - int ret = 0; - - if (kprobes_built_in() && !user_mode(regs)) { - preempt_disable(); - if (kprobe_running() && kprobe_fault_handler(regs, trap)) - ret = 1; - preempt_enable(); - } - - return ret; -} - static void force_sig_info_fault(int si_signo, int si_code, unsigned long address) { @@ -412,14 +398,14 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, if (unlikely(fault_in_kernel_space(address))) { if (vmalloc_fault(address) >= 0) return; - if (notify_page_fault(regs, vec)) + if (kprobe_page_fault(regs, vec)) return; bad_area_nosemaphore(regs, error_code, address); return; } - if (unlikely(notify_page_fault(regs, vec))) + if (unlikely(kprobe_page_fault(regs, vec))) return; /* Only enable interrupts if they were on before the fault */ diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 83fda4d9c3b2..2371fb6b97e4 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -38,20 +38,6 @@ int show_unhandled_signals = 1; -static inline __kprobes int notify_page_fault(struct pt_regs *regs) -{ - int ret = 0; - - /* kprobe_running() needs smp_processor_id() */ - if (kprobes_built_in() && !user_mode(regs)) { - preempt_disable(); - if (kprobe_running() && kprobe_fault_handler(regs, 0)) - ret = 1; - preempt_enable(); - } - return ret; -} - static void __kprobes unhandled_fault(unsigned long address, struct task_struct *tsk, struct pt_regs *regs) @@ -285,7 +271,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) fault_code = get_thread_fault_code(); - if (notify_page_fault(regs)) + if (kprobe_page_fault(regs, 0)) goto exit_exception; si_code = SEGV_MAPERR; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 794f364cb882..d1634c59ed56 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -46,23 +46,6 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr) return 0; } -static nokprobe_inline int kprobes_fault(struct pt_regs *regs) -{ - if (!kprobes_built_in()) - return 0; - if (user_mode(regs)) - return 0; - /* - * To be potentially processing a kprobe fault and to be allowed to call - * kprobe_running(), we have to be non-preemptible. - */ - if (preemptible()) - return 0; - if (!kprobe_running()) - return 0; - return kprobe_fault_handler(regs, X86_TRAP_PF); -} - /* * Prefetch quirks: * @@ -1282,7 +1265,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code, return; /* kprobes don't want to hook the spurious faults: */ - if (kprobes_fault(regs)) + if (kprobe_page_fault(regs, X86_TRAP_PF)) return; /* @@ -1313,7 +1296,7 @@ void do_user_addr_fault(struct pt_regs *regs, mm = tsk->mm; /* kprobes don't want to hook the spurious faults: */ - if (unlikely(kprobes_fault(regs))) + if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF))) return; /* diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 443d9800ca3f..04bdaf01112c 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -458,4 +458,23 @@ static inline bool is_kprobe_optinsn_slot(unsigned long addr) } #endif +/* Returns true if kprobes handled the fault */ +static nokprobe_inline bool kprobe_page_fault(struct pt_regs *regs, + unsigned int trap) +{ + if (!kprobes_built_in()) + return false; + if (user_mode(regs)) + return false; + /* + * To be potentially processing a kprobe fault and to be allowed + * to call kprobe_running(), we have to be non-preemptible. + */ + if (preemptible()) + return false; + if (!kprobe_running()) + return false; + return kprobe_fault_handler(regs, trap); +} + #endif /* _LINUX_KPROBES_H */ -- cgit v1.2.3-55-g7522