blob: 2fe914c5f533524edd13abc1affbf85df8f6f854 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
/*
* Stack trace management functions
*
* Copyright (C) 2009 Helge Deller <deller@gmx.de>
* based on arch/x86/kernel/stacktrace.c by Ingo Molnar <mingo@redhat.com>
* and parisc unwind functions by Randolph Chung <tausq@debian.org>
*
* TODO: Userspace stacktrace (CONFIG_USER_STACKTRACE_SUPPORT)
*/
#include <linux/module.h>
#include <linux/stacktrace.h>
#include <asm/unwind.h>
static void dump_trace(struct task_struct *task, struct stack_trace *trace)
{
struct unwind_frame_info info;
/* initialize unwind info */
if (task == current) {
unsigned long sp;
struct pt_regs r;
HERE:
asm volatile ("copy %%r30, %0" : "=r"(sp));
memset(&r, 0, sizeof(struct pt_regs));
r.iaoq[0] = (unsigned long)&&HERE;
r.gr[2] = (unsigned long)__builtin_return_address(0);
r.gr[30] = sp;
unwind_frame_init(&info, task, &r);
} else {
unwind_frame_init_from_blocked_task(&info, task);
}
/* unwind stack and save entries in stack_trace struct */
trace->nr_entries = 0;
while (trace->nr_entries < trace->max_entries) {
if (unwind_once(&info) < 0 || info.ip == 0)
break;
if (__kernel_text_address(info.ip))
trace->entries[trace->nr_entries++] = info.ip;
}
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace(struct stack_trace *trace)
{
dump_trace(current, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
dump_trace(tsk, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|