xref: /linux/kernel/trace/trace_stack.c (revision 4949009eb8d40a441dcddcd96e101e77d31cf1b2)
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 
17 #include <asm/setup.h>
18 
19 #include "trace.h"
20 
21 #define STACK_TRACE_ENTRIES 500
22 
23 #ifdef CC_USING_FENTRY
24 # define fentry		1
25 #else
26 # define fentry		0
27 #endif
28 
29 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
30 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
31 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
32 
33 /*
34  * Reserve one entry for the passed in ip. This will allow
35  * us to remove most or all of the stack size overhead
36  * added by the stack tracer itself.
37  */
38 static struct stack_trace max_stack_trace = {
39 	.max_entries		= STACK_TRACE_ENTRIES - 1,
40 	.entries		= &stack_dump_trace[1],
41 };
42 
43 static unsigned long max_stack_size;
44 static arch_spinlock_t max_stack_lock =
45 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
46 
47 static DEFINE_PER_CPU(int, trace_active);
48 static DEFINE_MUTEX(stack_sysctl_mutex);
49 
50 int stack_tracer_enabled;
51 static int last_stack_tracer_enabled;
52 
53 static inline void print_max_stack(void)
54 {
55 	long i;
56 	int size;
57 
58 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
59 			   "        -----    ----   --------\n",
60 			   max_stack_trace.nr_entries - 1);
61 
62 	for (i = 0; i < max_stack_trace.nr_entries; i++) {
63 		if (stack_dump_trace[i] == ULONG_MAX)
64 			break;
65 		if (i+1 == max_stack_trace.nr_entries ||
66 				stack_dump_trace[i+1] == ULONG_MAX)
67 			size = stack_dump_index[i];
68 		else
69 			size = stack_dump_index[i] - stack_dump_index[i+1];
70 
71 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_dump_index[i],
72 				size, (void *)stack_dump_trace[i]);
73 	}
74 }
75 
76 static inline void
77 check_stack(unsigned long ip, unsigned long *stack)
78 {
79 	unsigned long this_size, flags; unsigned long *p, *top, *start;
80 	static int tracer_frame;
81 	int frame_size = ACCESS_ONCE(tracer_frame);
82 	int i;
83 
84 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
85 	this_size = THREAD_SIZE - this_size;
86 	/* Remove the frame of the tracer */
87 	this_size -= frame_size;
88 
89 	if (this_size <= max_stack_size)
90 		return;
91 
92 	/* we do not handle interrupt stacks yet */
93 	if (!object_is_on_stack(stack))
94 		return;
95 
96 	local_irq_save(flags);
97 	arch_spin_lock(&max_stack_lock);
98 
99 	/* In case another CPU set the tracer_frame on us */
100 	if (unlikely(!frame_size))
101 		this_size -= tracer_frame;
102 
103 	/* a race could have already updated it */
104 	if (this_size <= max_stack_size)
105 		goto out;
106 
107 	max_stack_size = this_size;
108 
109 	max_stack_trace.nr_entries = 0;
110 
111 	if (using_ftrace_ops_list_func())
112 		max_stack_trace.skip = 4;
113 	else
114 		max_stack_trace.skip = 3;
115 
116 	save_stack_trace(&max_stack_trace);
117 
118 	/*
119 	 * Add the passed in ip from the function tracer.
120 	 * Searching for this on the stack will skip over
121 	 * most of the overhead from the stack tracer itself.
122 	 */
123 	stack_dump_trace[0] = ip;
124 	max_stack_trace.nr_entries++;
125 
126 	/*
127 	 * Now find where in the stack these are.
128 	 */
129 	i = 0;
130 	start = stack;
131 	top = (unsigned long *)
132 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
133 
134 	/*
135 	 * Loop through all the entries. One of the entries may
136 	 * for some reason be missed on the stack, so we may
137 	 * have to account for them. If they are all there, this
138 	 * loop will only happen once. This code only takes place
139 	 * on a new max, so it is far from a fast path.
140 	 */
141 	while (i < max_stack_trace.nr_entries) {
142 		int found = 0;
143 
144 		stack_dump_index[i] = this_size;
145 		p = start;
146 
147 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
148 			if (*p == stack_dump_trace[i]) {
149 				this_size = stack_dump_index[i++] =
150 					(top - p) * sizeof(unsigned long);
151 				found = 1;
152 				/* Start the search from here */
153 				start = p + 1;
154 				/*
155 				 * We do not want to show the overhead
156 				 * of the stack tracer stack in the
157 				 * max stack. If we haven't figured
158 				 * out what that is, then figure it out
159 				 * now.
160 				 */
161 				if (unlikely(!tracer_frame) && i == 1) {
162 					tracer_frame = (p - stack) *
163 						sizeof(unsigned long);
164 					max_stack_size -= tracer_frame;
165 				}
166 			}
167 		}
168 
169 		if (!found)
170 			i++;
171 	}
172 
173 	if (task_stack_end_corrupted(current)) {
174 		print_max_stack();
175 		BUG();
176 	}
177 
178  out:
179 	arch_spin_unlock(&max_stack_lock);
180 	local_irq_restore(flags);
181 }
182 
183 static void
184 stack_trace_call(unsigned long ip, unsigned long parent_ip,
185 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
186 {
187 	unsigned long stack;
188 	int cpu;
189 
190 	preempt_disable_notrace();
191 
192 	cpu = raw_smp_processor_id();
193 	/* no atomic needed, we only modify this variable by this cpu */
194 	if (per_cpu(trace_active, cpu)++ != 0)
195 		goto out;
196 
197 	/*
198 	 * When fentry is used, the traced function does not get
199 	 * its stack frame set up, and we lose the parent.
200 	 * The ip is pretty useless because the function tracer
201 	 * was called before that function set up its stack frame.
202 	 * In this case, we use the parent ip.
203 	 *
204 	 * By adding the return address of either the parent ip
205 	 * or the current ip we can disregard most of the stack usage
206 	 * caused by the stack tracer itself.
207 	 *
208 	 * The function tracer always reports the address of where the
209 	 * mcount call was, but the stack will hold the return address.
210 	 */
211 	if (fentry)
212 		ip = parent_ip;
213 	else
214 		ip += MCOUNT_INSN_SIZE;
215 
216 	check_stack(ip, &stack);
217 
218  out:
219 	per_cpu(trace_active, cpu)--;
220 	/* prevent recursion in schedule */
221 	preempt_enable_notrace();
222 }
223 
224 static struct ftrace_ops trace_ops __read_mostly =
225 {
226 	.func = stack_trace_call,
227 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
228 };
229 
230 static ssize_t
231 stack_max_size_read(struct file *filp, char __user *ubuf,
232 		    size_t count, loff_t *ppos)
233 {
234 	unsigned long *ptr = filp->private_data;
235 	char buf[64];
236 	int r;
237 
238 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
239 	if (r > sizeof(buf))
240 		r = sizeof(buf);
241 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
242 }
243 
244 static ssize_t
245 stack_max_size_write(struct file *filp, const char __user *ubuf,
246 		     size_t count, loff_t *ppos)
247 {
248 	long *ptr = filp->private_data;
249 	unsigned long val, flags;
250 	int ret;
251 	int cpu;
252 
253 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
254 	if (ret)
255 		return ret;
256 
257 	local_irq_save(flags);
258 
259 	/*
260 	 * In case we trace inside arch_spin_lock() or after (NMI),
261 	 * we will cause circular lock, so we also need to increase
262 	 * the percpu trace_active here.
263 	 */
264 	cpu = smp_processor_id();
265 	per_cpu(trace_active, cpu)++;
266 
267 	arch_spin_lock(&max_stack_lock);
268 	*ptr = val;
269 	arch_spin_unlock(&max_stack_lock);
270 
271 	per_cpu(trace_active, cpu)--;
272 	local_irq_restore(flags);
273 
274 	return count;
275 }
276 
277 static const struct file_operations stack_max_size_fops = {
278 	.open		= tracing_open_generic,
279 	.read		= stack_max_size_read,
280 	.write		= stack_max_size_write,
281 	.llseek		= default_llseek,
282 };
283 
284 static void *
285 __next(struct seq_file *m, loff_t *pos)
286 {
287 	long n = *pos - 1;
288 
289 	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
290 		return NULL;
291 
292 	m->private = (void *)n;
293 	return &m->private;
294 }
295 
296 static void *
297 t_next(struct seq_file *m, void *v, loff_t *pos)
298 {
299 	(*pos)++;
300 	return __next(m, pos);
301 }
302 
303 static void *t_start(struct seq_file *m, loff_t *pos)
304 {
305 	int cpu;
306 
307 	local_irq_disable();
308 
309 	cpu = smp_processor_id();
310 	per_cpu(trace_active, cpu)++;
311 
312 	arch_spin_lock(&max_stack_lock);
313 
314 	if (*pos == 0)
315 		return SEQ_START_TOKEN;
316 
317 	return __next(m, pos);
318 }
319 
320 static void t_stop(struct seq_file *m, void *p)
321 {
322 	int cpu;
323 
324 	arch_spin_unlock(&max_stack_lock);
325 
326 	cpu = smp_processor_id();
327 	per_cpu(trace_active, cpu)--;
328 
329 	local_irq_enable();
330 }
331 
332 static int trace_lookup_stack(struct seq_file *m, long i)
333 {
334 	unsigned long addr = stack_dump_trace[i];
335 
336 	return seq_printf(m, "%pS\n", (void *)addr);
337 }
338 
339 static void print_disabled(struct seq_file *m)
340 {
341 	seq_puts(m, "#\n"
342 		 "#  Stack tracer disabled\n"
343 		 "#\n"
344 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
345 		 "# kernel command line\n"
346 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
347 		 "#\n");
348 }
349 
350 static int t_show(struct seq_file *m, void *v)
351 {
352 	long i;
353 	int size;
354 
355 	if (v == SEQ_START_TOKEN) {
356 		seq_printf(m, "        Depth    Size   Location"
357 			   "    (%d entries)\n"
358 			   "        -----    ----   --------\n",
359 			   max_stack_trace.nr_entries - 1);
360 
361 		if (!stack_tracer_enabled && !max_stack_size)
362 			print_disabled(m);
363 
364 		return 0;
365 	}
366 
367 	i = *(long *)v;
368 
369 	if (i >= max_stack_trace.nr_entries ||
370 	    stack_dump_trace[i] == ULONG_MAX)
371 		return 0;
372 
373 	if (i+1 == max_stack_trace.nr_entries ||
374 	    stack_dump_trace[i+1] == ULONG_MAX)
375 		size = stack_dump_index[i];
376 	else
377 		size = stack_dump_index[i] - stack_dump_index[i+1];
378 
379 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
380 
381 	trace_lookup_stack(m, i);
382 
383 	return 0;
384 }
385 
386 static const struct seq_operations stack_trace_seq_ops = {
387 	.start		= t_start,
388 	.next		= t_next,
389 	.stop		= t_stop,
390 	.show		= t_show,
391 };
392 
393 static int stack_trace_open(struct inode *inode, struct file *file)
394 {
395 	return seq_open(file, &stack_trace_seq_ops);
396 }
397 
398 static const struct file_operations stack_trace_fops = {
399 	.open		= stack_trace_open,
400 	.read		= seq_read,
401 	.llseek		= seq_lseek,
402 	.release	= seq_release,
403 };
404 
405 static int
406 stack_trace_filter_open(struct inode *inode, struct file *file)
407 {
408 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
409 				 inode, file);
410 }
411 
412 static const struct file_operations stack_trace_filter_fops = {
413 	.open = stack_trace_filter_open,
414 	.read = seq_read,
415 	.write = ftrace_filter_write,
416 	.llseek = tracing_lseek,
417 	.release = ftrace_regex_release,
418 };
419 
420 int
421 stack_trace_sysctl(struct ctl_table *table, int write,
422 		   void __user *buffer, size_t *lenp,
423 		   loff_t *ppos)
424 {
425 	int ret;
426 
427 	mutex_lock(&stack_sysctl_mutex);
428 
429 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
430 
431 	if (ret || !write ||
432 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
433 		goto out;
434 
435 	last_stack_tracer_enabled = !!stack_tracer_enabled;
436 
437 	if (stack_tracer_enabled)
438 		register_ftrace_function(&trace_ops);
439 	else
440 		unregister_ftrace_function(&trace_ops);
441 
442  out:
443 	mutex_unlock(&stack_sysctl_mutex);
444 	return ret;
445 }
446 
447 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
448 
449 static __init int enable_stacktrace(char *str)
450 {
451 	if (strncmp(str, "_filter=", 8) == 0)
452 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
453 
454 	stack_tracer_enabled = 1;
455 	last_stack_tracer_enabled = 1;
456 	return 1;
457 }
458 __setup("stacktrace", enable_stacktrace);
459 
460 static __init int stack_trace_init(void)
461 {
462 	struct dentry *d_tracer;
463 
464 	d_tracer = tracing_init_dentry();
465 	if (!d_tracer)
466 		return 0;
467 
468 	trace_create_file("stack_max_size", 0644, d_tracer,
469 			&max_stack_size, &stack_max_size_fops);
470 
471 	trace_create_file("stack_trace", 0444, d_tracer,
472 			NULL, &stack_trace_fops);
473 
474 	trace_create_file("stack_trace_filter", 0444, d_tracer,
475 			NULL, &stack_trace_filter_fops);
476 
477 	if (stack_trace_filter_buf[0])
478 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
479 
480 	if (stack_tracer_enabled)
481 		register_ftrace_function(&trace_ops);
482 
483 	return 0;
484 }
485 
486 device_initcall(stack_trace_init);
487