xref: /linux/arch/x86/kernel/stacktrace.c (revision 86a8280a7fe007d61b05fa8a352edc0595283dad)
1 /*
2  * Stack trace management functions
3  *
4  *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5  */
6 #include <linux/sched.h>
7 #include <linux/stacktrace.h>
8 #include <linux/module.h>
9 #include <linux/uaccess.h>
10 #include <asm/stacktrace.h>
11 
12 static int save_stack_stack(void *data, char *name)
13 {
14 	return 0;
15 }
16 
17 static int
18 __save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
19 {
20 	struct stack_trace *trace = data;
21 #ifdef CONFIG_FRAME_POINTER
22 	if (!reliable)
23 		return 0;
24 #endif
25 	if (nosched && in_sched_functions(addr))
26 		return 0;
27 	if (trace->skip > 0) {
28 		trace->skip--;
29 		return 0;
30 	}
31 	if (trace->nr_entries < trace->max_entries) {
32 		trace->entries[trace->nr_entries++] = addr;
33 		return 0;
34 	} else {
35 		return -1; /* no more room, stop walking the stack */
36 	}
37 }
38 
39 static int save_stack_address(void *data, unsigned long addr, int reliable)
40 {
41 	return __save_stack_address(data, addr, reliable, false);
42 }
43 
44 static int
45 save_stack_address_nosched(void *data, unsigned long addr, int reliable)
46 {
47 	return __save_stack_address(data, addr, reliable, true);
48 }
49 
50 static const struct stacktrace_ops save_stack_ops = {
51 	.stack		= save_stack_stack,
52 	.address	= save_stack_address,
53 	.walk_stack	= print_context_stack,
54 };
55 
56 static const struct stacktrace_ops save_stack_ops_nosched = {
57 	.stack		= save_stack_stack,
58 	.address	= save_stack_address_nosched,
59 	.walk_stack	= print_context_stack,
60 };
61 
62 /*
63  * Save stack-backtrace addresses into a stack_trace buffer.
64  */
65 void save_stack_trace(struct stack_trace *trace)
66 {
67 	dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
68 	if (trace->nr_entries < trace->max_entries)
69 		trace->entries[trace->nr_entries++] = ULONG_MAX;
70 }
71 EXPORT_SYMBOL_GPL(save_stack_trace);
72 
73 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
74 {
75 	dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
76 	if (trace->nr_entries < trace->max_entries)
77 		trace->entries[trace->nr_entries++] = ULONG_MAX;
78 }
79 
80 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
81 {
82 	dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
83 	if (trace->nr_entries < trace->max_entries)
84 		trace->entries[trace->nr_entries++] = ULONG_MAX;
85 }
86 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
87 
88 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
89 
90 struct stack_frame_user {
91 	const void __user	*next_fp;
92 	unsigned long		ret_addr;
93 };
94 
95 static int
96 copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
97 {
98 	int ret;
99 
100 	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
101 		return 0;
102 
103 	ret = 1;
104 	pagefault_disable();
105 	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
106 		ret = 0;
107 	pagefault_enable();
108 
109 	return ret;
110 }
111 
112 static inline void __save_stack_trace_user(struct stack_trace *trace)
113 {
114 	const struct pt_regs *regs = task_pt_regs(current);
115 	const void __user *fp = (const void __user *)regs->bp;
116 
117 	if (trace->nr_entries < trace->max_entries)
118 		trace->entries[trace->nr_entries++] = regs->ip;
119 
120 	while (trace->nr_entries < trace->max_entries) {
121 		struct stack_frame_user frame;
122 
123 		frame.next_fp = NULL;
124 		frame.ret_addr = 0;
125 		if (!copy_stack_frame(fp, &frame))
126 			break;
127 		if ((unsigned long)fp < regs->sp)
128 			break;
129 		if (frame.ret_addr) {
130 			trace->entries[trace->nr_entries++] =
131 				frame.ret_addr;
132 		}
133 		if (fp == frame.next_fp)
134 			break;
135 		fp = frame.next_fp;
136 	}
137 }
138 
139 void save_stack_trace_user(struct stack_trace *trace)
140 {
141 	/*
142 	 * Trace user stack if we are not a kernel thread
143 	 */
144 	if (current->mm) {
145 		__save_stack_trace_user(trace);
146 	}
147 	if (trace->nr_entries < trace->max_entries)
148 		trace->entries[trace->nr_entries++] = ULONG_MAX;
149 }
150 
151