From 28e192d6e5711477c177918d0661f31836ef72a5 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 3 Aug 2010 17:08:09 +0100 Subject: ARM: 6287/1: ftrace: clean up mcount assembly indentation The mcount implementation currently uses a different indentation style from the rest of the file (and the rest of the ARM assembly in the kernel). Clean it up. Acked-by: Uwe Kleine-König Acked-by: Frederic Weisbecker Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 88 +++++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 44 deletions(-) (limited to 'arch/arm/kernel/entry-common.S') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 2c1db77d7848..0b042bdb11bf 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -94,73 +94,73 @@ ENDPROC(ret_from_fork) #ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) - stmdb sp!, {r0-r3, lr} - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE + stmdb sp!, {r0-r3, lr} + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE .globl mcount_call mcount_call: - bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + bl ftrace_stub + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} ENTRY(ftrace_caller) - stmdb sp!, {r0-r3, lr} - ldr r1, [fp, #-4] - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE + stmdb sp!, {r0-r3, lr} + ldr r1, [fp, #-4] + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE .globl ftrace_call ftrace_call: - bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + bl ftrace_stub + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} #else ENTRY(__gnu_mcount_nc) - stmdb sp!, {r0-r3, lr} - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, ftrace_stub - cmp r0, r2 - bne gnu_trace - ldmia sp!, {r0-r3, ip, lr} - mov pc, ip + stmdb sp!, {r0-r3, lr} + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, ftrace_stub + cmp r0, r2 + bne gnu_trace + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip gnu_trace: - ldr r1, [sp, #20] @ lr of instrumented routine - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc - mov pc, r2 - ldmia sp!, {r0-r3, ip, lr} - mov pc, ip + ldr r1, [sp, #20] @ lr of instrumented routine + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE + mov lr, pc + mov pc, r2 + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip ENTRY(mcount) - stmdb sp!, {r0-r3, lr} - ldr r0, =ftrace_trace_function - ldr r2, [r0] - adr r0, ftrace_stub - cmp r0, r2 - bne trace - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + stmdb sp!, {r0-r3, lr} + ldr r0, =ftrace_trace_function + ldr r2, [r0] + adr r0, ftrace_stub + cmp r0, r2 + bne trace + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} trace: - ldr r1, [fp, #-4] @ lr of instrumented routine - mov r0, lr - sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc - mov pc, r2 - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + ldr r1, [fp, #-4] @ lr of instrumented routine + mov r0, lr + sub r0, r0, #MCOUNT_INSN_SIZE + mov lr, pc + mov pc, r2 + ldr lr, [fp, #-4] @ restore lr + ldmia sp!, {r0-r3, pc} #endif /* CONFIG_DYNAMIC_FTRACE */ .globl ftrace_stub ftrace_stub: - mov pc, lr + mov pc, lr #endif /* CONFIG_FUNCTION_TRACER */ -- cgit v1.2.3-55-g7522 From 686ff22812d908eaa537edd62fb1eb3d64336301 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 3 Aug 2010 17:09:40 +0100 Subject: ARM: 6288/1: ftrace: document mcount formats Add a comment describing the mcount variants and how the callsites look like. Acked-by: Frederic Weisbecker Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) (limited to 'arch/arm/kernel/entry-common.S') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 0b042bdb11bf..f05a35a59694 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -92,6 +92,42 @@ ENDPROC(ret_from_fork) #define CALL(x) .long x #ifdef CONFIG_FUNCTION_TRACER +/* + * When compiling with -pg, gcc inserts a call to the mcount routine at the + * start of every function. In mcount, apart from the function's address (in + * lr), we need to get hold of the function's caller's address. + * + * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this: + * + * bl mcount + * + * These versions have the limitation that in order for the mcount routine to + * be able to determine the function's caller's address, an APCS-style frame + * pointer (which is set up with something like the code below) is required. + * + * mov ip, sp + * push {fp, ip, lr, pc} + * sub fp, ip, #4 + * + * With EABI, these frame pointers are not available unless -mapcs-frame is + * specified, and if building as Thumb-2, not even then. + * + * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount, + * with call sites like: + * + * push {lr} + * bl __gnu_mcount_nc + * + * With these compilers, frame pointers are not necessary. + * + * mcount can be thought of as a function called in the middle of a subroutine + * call. As such, it needs to be transparent for both the caller and the + * callee: the original lr needs to be restored when leaving mcount, and no + * registers should be clobbered. (In the __gnu_mcount_nc implementation, we + * clobber the ip register. This is OK because the ARM calling convention + * allows it to be clobbered in subroutines and doesn't use it to hold + * parameters.) + */ #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) stmdb sp!, {r0-r3, lr} -- cgit v1.2.3-55-g7522 From 09bfafac3e237415cc4b6adde49f9f28b3a42659 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 10 Aug 2010 19:32:37 +0100 Subject: ARM: 6314/1: ftrace: allow build without frame pointers on ARM With a new enough GCC, ARM function tracing can be supported without the need for frame pointers. This is essential for Thumb-2 support, since frame pointers aren't available then. Acked-by: Catalin Marinas Acked-by: Steven Rostedt Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/Kconfig.debug | 5 +++++ arch/arm/kernel/armksyms.c | 2 ++ arch/arm/kernel/entry-common.S | 14 ++++++++++++++ kernel/trace/Kconfig | 2 +- 4 files changed, 22 insertions(+), 1 deletion(-) (limited to 'arch/arm/kernel/entry-common.S') diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 91344af75f39..4dbce538fec4 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -27,6 +27,11 @@ config ARM_UNWIND the performance is not affected. Currently, this feature only works with EABI compilers. If unsure say Y. +config OLD_MCOUNT + bool + depends on FUNCTION_TRACER && FRAME_POINTER + default y + config DEBUG_USER bool "Verbose user fault messages" help diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 8214bfebfaca..e5e1e5387678 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -165,6 +165,8 @@ EXPORT_SYMBOL(_find_next_bit_be); #endif #ifdef CONFIG_FUNCTION_TRACER +#ifdef CONFIG_OLD_MCOUNT EXPORT_SYMBOL(mcount); +#endif EXPORT_SYMBOL(__gnu_mcount_nc); #endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f05a35a59694..6805a7216bf8 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -128,6 +128,13 @@ ENDPROC(ret_from_fork) * allows it to be clobbered in subroutines and doesn't use it to hold * parameters.) */ + +#ifndef CONFIG_OLD_MCOUNT +#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4)) +#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0. +#endif +#endif + #ifdef CONFIG_DYNAMIC_FTRACE ENTRY(mcount) stmdb sp!, {r0-r3, lr} @@ -173,6 +180,12 @@ gnu_trace: ldmia sp!, {r0-r3, ip, lr} mov pc, ip +#ifdef CONFIG_OLD_MCOUNT +/* + * This is under an ifdef in order to force link-time errors for people trying + * to build with !FRAME_POINTER with a GCC which doesn't use the new-style + * mcount. + */ ENTRY(mcount) stmdb sp!, {r0-r3, lr} ldr r0, =ftrace_trace_function @@ -191,6 +204,7 @@ trace: mov pc, r2 ldr lr, [fp, #-4] @ restore lr ldmia sp!, {r0-r3, pc} +#endif #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 538501c6ea50..6329d063b5e4 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -121,7 +121,7 @@ if FTRACE config FUNCTION_TRACER bool "Kernel Function Tracer" depends on HAVE_FUNCTION_TRACER - select FRAME_POINTER + select FRAME_POINTER if (!ARM_UNWIND) select KALLSYMS select GENERIC_TRACER select CONTEXT_SWITCH_TRACER -- cgit v1.2.3-55-g7522 From 72fa62fa5dff0e2e06491dd99c429adb137f299b Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 10 Aug 2010 19:33:52 +0100 Subject: ARM: 6315/1: ftrace: add ENDPROC annotations When building as Thumb-2, the ".type foo, %function" annotation in ENDPROC seems to be required in order for the assembly routines to be recognized as Thumb-2 code. If the ENDPROC annotations are not present, calls to these routines are generated as BLX instead of BL. Acked-by: Catalin Marinas Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/entry-common.S') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 6805a7216bf8..c7a8c208a45c 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -146,6 +146,7 @@ mcount_call: bl ftrace_stub ldr lr, [fp, #-4] @ restore lr ldmia sp!, {r0-r3, pc} +ENDPROC(mcount) ENTRY(ftrace_caller) stmdb sp!, {r0-r3, lr} @@ -158,6 +159,7 @@ ftrace_call: bl ftrace_stub ldr lr, [fp, #-4] @ restore lr ldmia sp!, {r0-r3, pc} +ENDPROC(ftrace_caller) #else @@ -179,6 +181,7 @@ gnu_trace: mov pc, r2 ldmia sp!, {r0-r3, ip, lr} mov pc, ip +ENDPROC(__gnu_mcount_nc) #ifdef CONFIG_OLD_MCOUNT /* @@ -204,13 +207,14 @@ trace: mov pc, r2 ldr lr, [fp, #-4] @ restore lr ldmia sp!, {r0-r3, pc} +ENDPROC(mcount) #endif #endif /* CONFIG_DYNAMIC_FTRACE */ - .globl ftrace_stub -ftrace_stub: +ENTRY(ftrace_stub) mov pc, lr +ENDPROC(ftrace_stub) #endif /* CONFIG_FUNCTION_TRACER */ -- cgit v1.2.3-55-g7522 From a3ba87a614992500cf2c47e6f788e74a971ce91f Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 10 Aug 2010 19:37:21 +0100 Subject: ARM: 6316/1: ftrace: add Thumb-2 support Fix the mcount routines to build and run on a kernel built with the Thumb-2 instruction set by correcting the following errors using the fixes suggested by Catalin Marinas: - Problem: The following assembler errors appear at the "adr r0, ftrace_stub" instruction: entry-common.S: Assembler messages: entry-common.S:179: Error: invalid immediate for address calculation (value = 0x00000004) Fix: The errors don't occur with a non-global symbol, so use one. - Problem: The "mov lr, pc" does not set the lsb when storing the pc in lr. The called function returns with "bx lr", and the mode changes to ARM. Fix: Add a label on the return address and use "adr lr, BSYM(label)". We don't modify the old mcount because it won't be built when using Thumb-2. Acked-by: Catalin Marinas Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm/kernel/entry-common.S') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index c7a8c208a45c..f5e75de0203e 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -167,7 +167,7 @@ ENTRY(__gnu_mcount_nc) stmdb sp!, {r0-r3, lr} ldr r0, =ftrace_trace_function ldr r2, [r0] - adr r0, ftrace_stub + adr r0, .Lftrace_stub cmp r0, r2 bne gnu_trace ldmia sp!, {r0-r3, ip, lr} @@ -177,8 +177,9 @@ gnu_trace: ldr r1, [sp, #20] @ lr of instrumented routine mov r0, lr sub r0, r0, #MCOUNT_INSN_SIZE - mov lr, pc + adr lr, BSYM(1f) mov pc, r2 +1: ldmia sp!, {r0-r3, ip, lr} mov pc, ip ENDPROC(__gnu_mcount_nc) @@ -213,6 +214,7 @@ ENDPROC(mcount) #endif /* CONFIG_DYNAMIC_FTRACE */ ENTRY(ftrace_stub) +.Lftrace_stub: mov pc, lr ENDPROC(ftrace_stub) -- cgit v1.2.3-55-g7522 From 3b6c223b1b97ad60bbb0f4efda57d649414ac2a2 Mon Sep 17 00:00:00 2001 From: Rabin Vincent Date: Tue, 10 Aug 2010 19:43:28 +0100 Subject: ARM: 6318/1: ftrace: fix and update dynamic ftrace This adds mcount recording and updates dynamic ftrace for ARM to work with the new ftrace dyamic tracing implementation. It also adds support for the mcount format used by newer ARM compilers. With dynamic tracing, mcount() is implemented as a nop. Callsites are patched on startup with nops, and dynamically patched to call to the ftrace_caller() routine as needed. Acked-by: Steven Rostedt [recordmcount.pl change] Signed-off-by: Rabin Vincent Signed-off-by: Russell King --- arch/arm/include/asm/ftrace.h | 19 ++++- arch/arm/kernel/entry-common.S | 37 +++++++--- arch/arm/kernel/ftrace.c | 155 ++++++++++++++++++++++++++++------------- scripts/recordmcount.pl | 2 + 4 files changed, 155 insertions(+), 58 deletions(-) (limited to 'arch/arm/kernel/entry-common.S') diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h index 103f7ee97313..4a56a2ee067c 100644 --- a/arch/arm/include/asm/ftrace.h +++ b/arch/arm/include/asm/ftrace.h @@ -2,12 +2,29 @@ #define _ASM_ARM_FTRACE #ifdef CONFIG_FUNCTION_TRACER -#define MCOUNT_ADDR ((long)(mcount)) +#define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc)) #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */ #ifndef __ASSEMBLY__ extern void mcount(void); extern void __gnu_mcount_nc(void); + +#ifdef CONFIG_DYNAMIC_FTRACE +struct dyn_arch_ftrace { +#ifdef CONFIG_OLD_MCOUNT + bool old_mcount; +#endif +}; + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +extern void ftrace_caller_old(void); +extern void ftrace_call_old(void); +#endif + #endif #endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f5e75de0203e..e02790f28879 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -127,6 +127,10 @@ ENDPROC(ret_from_fork) * clobber the ip register. This is OK because the ARM calling convention * allows it to be clobbered in subroutines and doesn't use it to hold * parameters.) + * + * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0" + * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see + * arch/arm/kernel/ftrace.c). */ #ifndef CONFIG_OLD_MCOUNT @@ -136,30 +140,45 @@ ENDPROC(ret_from_fork) #endif #ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(mcount) +ENTRY(__gnu_mcount_nc) + mov ip, lr + ldmia sp!, {lr} + mov pc, ip +ENDPROC(__gnu_mcount_nc) + +ENTRY(ftrace_caller) stmdb sp!, {r0-r3, lr} mov r0, lr sub r0, r0, #MCOUNT_INSN_SIZE + ldr r1, [sp, #20] - .globl mcount_call -mcount_call: + .global ftrace_call +ftrace_call: bl ftrace_stub - ldr lr, [fp, #-4] @ restore lr - ldmia sp!, {r0-r3, pc} + ldmia sp!, {r0-r3, ip, lr} + mov pc, ip +ENDPROC(ftrace_caller) + +#ifdef CONFIG_OLD_MCOUNT +ENTRY(mcount) + stmdb sp!, {lr} + ldr lr, [fp, #-4] + ldmia sp!, {pc} ENDPROC(mcount) -ENTRY(ftrace_caller) +ENTRY(ftrace_caller_old) stmdb sp!, {r0-r3, lr} ldr r1, [fp, #-4] mov r0, lr sub r0, r0, #MCOUNT_INSN_SIZE - .globl ftrace_call -ftrace_call: + .globl ftrace_call_old +ftrace_call_old: bl ftrace_stub ldr lr, [fp, #-4] @ restore lr ldmia sp!, {r0-r3, pc} -ENDPROC(ftrace_caller) +ENDPROC(ftrace_caller_old) +#endif #else diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 0298286ad4ad..f09014cfbf2c 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -2,102 +2,161 @@ * Dynamic function tracing support. * * Copyright (C) 2008 Abhishek Sagar + * Copyright (C) 2010 Rabin Vincent * * For licencing details, see COPYING. * * Defines low-level handling of mcount calls when the kernel * is compiled with the -pg flag. When using dynamic ftrace, the - * mcount call-sites get patched lazily with NOP till they are - * enabled. All code mutation routines here take effect atomically. + * mcount call-sites get patched with NOP till they are enabled. + * All code mutation routines here are called under stop_machine(). */ #include +#include #include #include -#define PC_OFFSET 8 -#define BL_OPCODE 0xeb000000 -#define BL_OFFSET_MASK 0x00ffffff +#define NOP 0xe8bd4000 /* pop {lr} */ -static unsigned long bl_insn; -static const unsigned long NOP = 0xe1a00000; /* mov r0, r0 */ +#ifdef CONFIG_OLD_MCOUNT +#define OLD_MCOUNT_ADDR ((unsigned long) mcount) +#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) -unsigned char *ftrace_nop_replace(void) +#define OLD_NOP 0xe1a00000 /* mov r0, r0 */ + +static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) +{ + return rec->arch.old_mcount ? OLD_NOP : NOP; +} + +static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) +{ + if (!rec->arch.old_mcount) + return addr; + + if (addr == MCOUNT_ADDR) + addr = OLD_MCOUNT_ADDR; + else if (addr == FTRACE_ADDR) + addr = OLD_FTRACE_ADDR; + + return addr; +} +#else +static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) +{ + return NOP; +} + +static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) { - return (char *)&NOP; + return addr; } +#endif /* construct a branch (BL) instruction to addr */ -unsigned char *ftrace_call_replace(unsigned long pc, unsigned long addr) +static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) { long offset; - offset = (long)addr - (long)(pc + PC_OFFSET); + offset = (long)addr - (long)(pc + 8); if (unlikely(offset < -33554432 || offset > 33554428)) { /* Can't generate branches that far (from ARM ARM). Ftrace * doesn't generate branches outside of kernel text. */ WARN_ON_ONCE(1); - return NULL; + return 0; } - offset = (offset >> 2) & BL_OFFSET_MASK; - bl_insn = BL_OPCODE | offset; - return (unsigned char *)&bl_insn; -} -int ftrace_modify_code(unsigned long pc, unsigned char *old_code, - unsigned char *new_code) -{ - unsigned long err = 0, replaced = 0, old, new; + offset = (offset >> 2) & 0x00ffffff; - old = *(unsigned long *)old_code; - new = *(unsigned long *)new_code; + return 0xeb000000 | offset; +} - __asm__ __volatile__ ( - "1: ldr %1, [%2] \n" - " cmp %1, %4 \n" - "2: streq %3, [%2] \n" - " cmpne %1, %3 \n" - " movne %0, #2 \n" - "3:\n" +static int ftrace_modify_code(unsigned long pc, unsigned long old, + unsigned long new) +{ + unsigned long replaced; - ".pushsection .fixup, \"ax\"\n" - "4: mov %0, #1 \n" - " b 3b \n" - ".popsection\n" + if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) + return -EFAULT; - ".pushsection __ex_table, \"a\"\n" - " .long 1b, 4b \n" - " .long 2b, 4b \n" - ".popsection\n" + if (replaced != old) + return -EINVAL; - : "=r"(err), "=r"(replaced) - : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) - : "memory"); + if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) + return -EPERM; - if (!err && (replaced == old)) - flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); + flush_icache_range(pc, pc + MCOUNT_INSN_SIZE); - return err; + return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { - int ret; unsigned long pc, old; - unsigned char *new; + unsigned long new; + int ret; pc = (unsigned long)&ftrace_call; memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); new = ftrace_call_replace(pc, (unsigned long)func); - ret = ftrace_modify_code(pc, (unsigned char *)&old, new); + + ret = ftrace_modify_code(pc, old, new); + +#ifdef CONFIG_OLD_MCOUNT + if (!ret) { + pc = (unsigned long)&ftrace_call_old; + memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE); + new = ftrace_call_replace(pc, (unsigned long)func); + + ret = ftrace_modify_code(pc, old, new); + } +#endif + + return ret; +} + +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long new, old; + unsigned long ip = rec->ip; + + old = ftrace_nop_replace(rec); + new = ftrace_call_replace(ip, adjust_address(rec, addr)); + + return ftrace_modify_code(rec->ip, old, new); +} + +int ftrace_make_nop(struct module *mod, + struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned long ip = rec->ip; + unsigned long old; + unsigned long new; + int ret; + + old = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_nop_replace(rec); + ret = ftrace_modify_code(ip, old, new); + +#ifdef CONFIG_OLD_MCOUNT + if (ret == -EINVAL && addr == MCOUNT_ADDR) { + rec->arch.old_mcount = true; + + old = ftrace_call_replace(ip, adjust_address(rec, addr)); + new = ftrace_nop_replace(rec); + ret = ftrace_modify_code(ip, old, new); + } +#endif + return ret; } -/* run from ftrace_init with irqs disabled */ int __init ftrace_dyn_arch_init(void *data) { - ftrace_mcount_set(data); + *(unsigned long *)data = 0; + return 0; } diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index e67f05486087..022d4679b1b3 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -270,6 +270,8 @@ if ($arch eq "x86_64") { } elsif ($arch eq "arm") { $alignment = 2; $section_type = '%progbits'; + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_ARM_(CALL|PC24)" . + "\\s+(__gnu_mcount_nc|mcount)\$"; } elsif ($arch eq "ia64") { $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s_mcount\$"; -- cgit v1.2.3-55-g7522 From b2b163bb82b12bae2504a5b31399c37d099ad3cc Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 17 Sep 2010 14:56:16 +0100 Subject: ARM: prevent multiple syscall restarts Al Viro reports that calling "sys_sigsuspend(-ERESTARTNOHAND, 0, 0)" with two signals coming and being handled in kernel space results in the syscall restart being done twice. Avoid this by clearing the 'why' flag when we call the signal handling code to prevent further syscall restarts after the first. Acked-by: Al Viro Signed-off-by: Russell King --- arch/arm/kernel/entry-common.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel/entry-common.S') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f05a35a59694..4a560d30793d 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -48,6 +48,8 @@ work_pending: beq no_work_pending mov r0, sp @ 'regs' mov r2, why @ 'syscall' + tst r1, #_TIF_SIGPENDING @ delivering a signal? + movne why, #0 @ prevent further restarts bl do_notify_resume b ret_slow_syscall @ Check work again -- cgit v1.2.3-55-g7522 From 653d48b22166db2d8b1515ebe6f9f0f7c95dfc86 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 17 Sep 2010 14:34:39 +0100 Subject: arm: fix really nasty sigreturn bug If a signal hits us outside of a syscall and another gets delivered when we are in sigreturn (e.g. because it had been in sa_mask for the first one and got sent to us while we'd been in the first handler), we have a chance of returning from the second handler to location one insn prior to where we ought to return. If r0 happens to contain -513 (-ERESTARTNOINTR), sigreturn will get confused into doing restart syscall song and dance. Incredible joy to debug, since it manifests as random, infrequent and very hard to reproduce double execution of instructions in userland code... The fix is simple - mark it "don't bother with restarts" in wrapper, i.e. set r8 to 0 in sys_sigreturn and sys_rt_sigreturn wrappers, suppressing the syscall restart handling on return from these guys. They can't legitimately return a restart-worthy error anyway. Testcase: #include #include #include #include #include void f(int n) { __asm__ __volatile__( "ldr r0, [%0]\n" "b 1f\n" "b 2f\n" "1:b .\n" "2:\n" : : "r"(&n)); } void handler1(int sig) { } void handler2(int sig) { raise(1); } void handler3(int sig) { exit(0); } main() { struct sigaction s = {.sa_handler = handler2}; struct itimerval t1 = { .it_value = {1} }; struct itimerval t2 = { .it_value = {2} }; signal(1, handler1); sigemptyset(&s.sa_mask); sigaddset(&s.sa_mask, 1); sigaction(SIGALRM, &s, NULL); signal(SIGVTALRM, handler3); setitimer(ITIMER_REAL, &t1, NULL); setitimer(ITIMER_VIRTUAL, &t2, NULL); f(-513); /* -ERESTARTNOINTR */ write(1, "buggered\n", 9); return 1; } Signed-off-by: Al Viro Acked-by: Russell King Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- arch/arm/kernel/entry-common.S | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/arm/kernel/entry-common.S') diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index f05a35a59694..1b560825e1cf 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -418,11 +418,13 @@ ENDPROC(sys_clone_wrapper) sys_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_sigreturn ENDPROC(sys_sigreturn_wrapper) sys_rt_sigreturn_wrapper: add r0, sp, #S_OFF + mov why, #0 @ prevent syscall restart handling b sys_rt_sigreturn ENDPROC(sys_rt_sigreturn_wrapper) -- cgit v1.2.3-55-g7522 From 70c70d97809c3cdb8ff04f38ee3718c5385a2a4d Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 26 Aug 2010 15:08:35 -0700 Subject: ARM: SECCOMP support Signed-off-by: Nicolas Pitre --- arch/arm/Kconfig | 14 ++++++++++++++ arch/arm/include/asm/seccomp.h | 11 +++++++++++ arch/arm/include/asm/thread_info.h | 2 ++ arch/arm/kernel/entry-common.S | 15 +++++++++++++-- 4 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 arch/arm/include/asm/seccomp.h (limited to 'arch/arm/kernel/entry-common.S') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 88c97bc7a6f5..1273ee8756be 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1463,6 +1463,20 @@ config UACCESS_WITH_MEMCPY However, if the CPU data cache is using a write-allocate mode, this option is unlikely to provide any performance gain. +config SECCOMP + bool + prompt "Enable seccomp to safely compute untrusted bytecode" + ---help--- + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via prctl(PR_SET_SECCOMP), it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + config CC_STACKPROTECTOR bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" help diff --git a/arch/arm/include/asm/seccomp.h b/arch/arm/include/asm/seccomp.h new file mode 100644 index 000000000000..52b156b341f5 --- /dev/null +++ b/arch/arm/include/asm/seccomp.h @@ -0,0 +1,11 @@ +#ifndef _ASM_ARM_SECCOMP_H +#define _ASM_ARM_SECCOMP_H + +#include + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#define __NR_seccomp_sigreturn __NR_rt_sigreturn + +#endif /* _ASM_ARM_SECCOMP_H */ diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 763e29fa8530..7b5cc8dae06e 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -144,6 +144,7 @@ extern void vfp_flush_hwstate(struct thread_info *); #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 #define TIF_RESTORE_SIGMASK 20 +#define TIF_SECCOMP 21 #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) @@ -153,6 +154,7 @@ extern void vfp_flush_hwstate(struct thread_info *); #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) #define _TIF_FREEZE (1 << TIF_FREEZE) #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) /* * Change these and you break ASM code in entry-common.S diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 7885722bdf4e..0385a8207b67 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -295,7 +295,6 @@ ENTRY(vector_swi) get_thread_info tsk adr tbl, sys_call_table @ load syscall table pointer - ldr ip, [tsk, #TI_FLAGS] @ check for syscall tracing #if defined(CONFIG_OABI_COMPAT) /* @@ -312,8 +311,20 @@ ENTRY(vector_swi) eor scno, scno, #__NR_SYSCALL_BASE @ check OS number #endif + ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing stmdb sp!, {r4, r5} @ push fifth and sixth args - tst ip, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? + +#ifdef CONFIG_SECCOMP + tst r10, #_TIF_SECCOMP + beq 1f + mov r0, scno + bl __secure_computing + add r0, sp, #S_R0 + S_OFF @ pointer to regs + ldmia r0, {r0 - r3} @ have to reload r0 - r3 +1: +#endif + + tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls? bne __sys_trace cmp scno, #NR_syscalls @ check upper syscall limit -- cgit v1.2.3-55-g7522