xref: /linux/kernel/trace/trace_stack.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/sysctl.h>
13 #include <linux/init.h>
14 
15 #include <asm/setup.h>
16 
17 #include "trace.h"
18 
19 #define STACK_TRACE_ENTRIES 500
20 
21 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
22 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
23 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
24 
25 /*
26  * Reserve one entry for the passed in ip. This will allow
27  * us to remove most or all of the stack size overhead
28  * added by the stack tracer itself.
29  */
30 static struct stack_trace max_stack_trace = {
31 	.max_entries		= STACK_TRACE_ENTRIES - 1,
32 	.entries		= &stack_dump_trace[0],
33 };
34 
35 static unsigned long max_stack_size;
36 static arch_spinlock_t max_stack_lock =
37 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
38 
39 static DEFINE_PER_CPU(int, trace_active);
40 static DEFINE_MUTEX(stack_sysctl_mutex);
41 
42 int stack_tracer_enabled;
43 static int last_stack_tracer_enabled;
44 
45 static inline void print_max_stack(void)
46 {
47 	long i;
48 	int size;
49 
50 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
51 			   "        -----    ----   --------\n",
52 			   max_stack_trace.nr_entries);
53 
54 	for (i = 0; i < max_stack_trace.nr_entries; i++) {
55 		if (stack_dump_trace[i] == ULONG_MAX)
56 			break;
57 		if (i+1 == max_stack_trace.nr_entries ||
58 				stack_dump_trace[i+1] == ULONG_MAX)
59 			size = stack_dump_index[i];
60 		else
61 			size = stack_dump_index[i] - stack_dump_index[i+1];
62 
63 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_dump_index[i],
64 				size, (void *)stack_dump_trace[i]);
65 	}
66 }
67 
68 static inline void
69 check_stack(unsigned long ip, unsigned long *stack)
70 {
71 	unsigned long this_size, flags; unsigned long *p, *top, *start;
72 	static int tracer_frame;
73 	int frame_size = ACCESS_ONCE(tracer_frame);
74 	int i, x;
75 
76 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
77 	this_size = THREAD_SIZE - this_size;
78 	/* Remove the frame of the tracer */
79 	this_size -= frame_size;
80 
81 	if (this_size <= max_stack_size)
82 		return;
83 
84 	/* we do not handle interrupt stacks yet */
85 	if (!object_is_on_stack(stack))
86 		return;
87 
88 	/* Can't do this from NMI context (can cause deadlocks) */
89 	if (in_nmi())
90 		return;
91 
92 	local_irq_save(flags);
93 	arch_spin_lock(&max_stack_lock);
94 
95 	/*
96 	 * RCU may not be watching, make it see us.
97 	 * The stack trace code uses rcu_sched.
98 	 */
99 	rcu_irq_enter();
100 
101 	/* In case another CPU set the tracer_frame on us */
102 	if (unlikely(!frame_size))
103 		this_size -= tracer_frame;
104 
105 	/* a race could have already updated it */
106 	if (this_size <= max_stack_size)
107 		goto out;
108 
109 	max_stack_size = this_size;
110 
111 	max_stack_trace.nr_entries = 0;
112 	max_stack_trace.skip = 3;
113 
114 	save_stack_trace(&max_stack_trace);
115 
116 	/* Skip over the overhead of the stack tracer itself */
117 	for (i = 0; i < max_stack_trace.nr_entries; i++) {
118 		if (stack_dump_trace[i] == ip)
119 			break;
120 	}
121 
122 	/*
123 	 * Now find where in the stack these are.
124 	 */
125 	x = 0;
126 	start = stack;
127 	top = (unsigned long *)
128 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
129 
130 	/*
131 	 * Loop through all the entries. One of the entries may
132 	 * for some reason be missed on the stack, so we may
133 	 * have to account for them. If they are all there, this
134 	 * loop will only happen once. This code only takes place
135 	 * on a new max, so it is far from a fast path.
136 	 */
137 	while (i < max_stack_trace.nr_entries) {
138 		int found = 0;
139 
140 		stack_dump_index[x] = this_size;
141 		p = start;
142 
143 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
144 			if (stack_dump_trace[i] == ULONG_MAX)
145 				break;
146 			if (*p == stack_dump_trace[i]) {
147 				stack_dump_trace[x] = stack_dump_trace[i++];
148 				this_size = stack_dump_index[x++] =
149 					(top - p) * sizeof(unsigned long);
150 				found = 1;
151 				/* Start the search from here */
152 				start = p + 1;
153 				/*
154 				 * We do not want to show the overhead
155 				 * of the stack tracer stack in the
156 				 * max stack. If we haven't figured
157 				 * out what that is, then figure it out
158 				 * now.
159 				 */
160 				if (unlikely(!tracer_frame)) {
161 					tracer_frame = (p - stack) *
162 						sizeof(unsigned long);
163 					max_stack_size -= tracer_frame;
164 				}
165 			}
166 		}
167 
168 		if (!found)
169 			i++;
170 	}
171 
172 	max_stack_trace.nr_entries = x;
173 	for (; x < i; x++)
174 		stack_dump_trace[x] = ULONG_MAX;
175 
176 	if (task_stack_end_corrupted(current)) {
177 		print_max_stack();
178 		BUG();
179 	}
180 
181  out:
182 	rcu_irq_exit();
183 	arch_spin_unlock(&max_stack_lock);
184 	local_irq_restore(flags);
185 }
186 
187 static void
188 stack_trace_call(unsigned long ip, unsigned long parent_ip,
189 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
190 {
191 	unsigned long stack;
192 	int cpu;
193 
194 	preempt_disable_notrace();
195 
196 	cpu = raw_smp_processor_id();
197 	/* no atomic needed, we only modify this variable by this cpu */
198 	if (per_cpu(trace_active, cpu)++ != 0)
199 		goto out;
200 
201 	ip += MCOUNT_INSN_SIZE;
202 
203 	check_stack(ip, &stack);
204 
205  out:
206 	per_cpu(trace_active, cpu)--;
207 	/* prevent recursion in schedule */
208 	preempt_enable_notrace();
209 }
210 
211 static struct ftrace_ops trace_ops __read_mostly =
212 {
213 	.func = stack_trace_call,
214 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
215 };
216 
217 static ssize_t
218 stack_max_size_read(struct file *filp, char __user *ubuf,
219 		    size_t count, loff_t *ppos)
220 {
221 	unsigned long *ptr = filp->private_data;
222 	char buf[64];
223 	int r;
224 
225 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
226 	if (r > sizeof(buf))
227 		r = sizeof(buf);
228 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
229 }
230 
231 static ssize_t
232 stack_max_size_write(struct file *filp, const char __user *ubuf,
233 		     size_t count, loff_t *ppos)
234 {
235 	long *ptr = filp->private_data;
236 	unsigned long val, flags;
237 	int ret;
238 	int cpu;
239 
240 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
241 	if (ret)
242 		return ret;
243 
244 	local_irq_save(flags);
245 
246 	/*
247 	 * In case we trace inside arch_spin_lock() or after (NMI),
248 	 * we will cause circular lock, so we also need to increase
249 	 * the percpu trace_active here.
250 	 */
251 	cpu = smp_processor_id();
252 	per_cpu(trace_active, cpu)++;
253 
254 	arch_spin_lock(&max_stack_lock);
255 	*ptr = val;
256 	arch_spin_unlock(&max_stack_lock);
257 
258 	per_cpu(trace_active, cpu)--;
259 	local_irq_restore(flags);
260 
261 	return count;
262 }
263 
264 static const struct file_operations stack_max_size_fops = {
265 	.open		= tracing_open_generic,
266 	.read		= stack_max_size_read,
267 	.write		= stack_max_size_write,
268 	.llseek		= default_llseek,
269 };
270 
271 static void *
272 __next(struct seq_file *m, loff_t *pos)
273 {
274 	long n = *pos - 1;
275 
276 	if (n > max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
277 		return NULL;
278 
279 	m->private = (void *)n;
280 	return &m->private;
281 }
282 
283 static void *
284 t_next(struct seq_file *m, void *v, loff_t *pos)
285 {
286 	(*pos)++;
287 	return __next(m, pos);
288 }
289 
290 static void *t_start(struct seq_file *m, loff_t *pos)
291 {
292 	int cpu;
293 
294 	local_irq_disable();
295 
296 	cpu = smp_processor_id();
297 	per_cpu(trace_active, cpu)++;
298 
299 	arch_spin_lock(&max_stack_lock);
300 
301 	if (*pos == 0)
302 		return SEQ_START_TOKEN;
303 
304 	return __next(m, pos);
305 }
306 
307 static void t_stop(struct seq_file *m, void *p)
308 {
309 	int cpu;
310 
311 	arch_spin_unlock(&max_stack_lock);
312 
313 	cpu = smp_processor_id();
314 	per_cpu(trace_active, cpu)--;
315 
316 	local_irq_enable();
317 }
318 
319 static void trace_lookup_stack(struct seq_file *m, long i)
320 {
321 	unsigned long addr = stack_dump_trace[i];
322 
323 	seq_printf(m, "%pS\n", (void *)addr);
324 }
325 
326 static void print_disabled(struct seq_file *m)
327 {
328 	seq_puts(m, "#\n"
329 		 "#  Stack tracer disabled\n"
330 		 "#\n"
331 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
332 		 "# kernel command line\n"
333 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
334 		 "#\n");
335 }
336 
337 static int t_show(struct seq_file *m, void *v)
338 {
339 	long i;
340 	int size;
341 
342 	if (v == SEQ_START_TOKEN) {
343 		seq_printf(m, "        Depth    Size   Location"
344 			   "    (%d entries)\n"
345 			   "        -----    ----   --------\n",
346 			   max_stack_trace.nr_entries);
347 
348 		if (!stack_tracer_enabled && !max_stack_size)
349 			print_disabled(m);
350 
351 		return 0;
352 	}
353 
354 	i = *(long *)v;
355 
356 	if (i >= max_stack_trace.nr_entries ||
357 	    stack_dump_trace[i] == ULONG_MAX)
358 		return 0;
359 
360 	if (i+1 == max_stack_trace.nr_entries ||
361 	    stack_dump_trace[i+1] == ULONG_MAX)
362 		size = stack_dump_index[i];
363 	else
364 		size = stack_dump_index[i] - stack_dump_index[i+1];
365 
366 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
367 
368 	trace_lookup_stack(m, i);
369 
370 	return 0;
371 }
372 
373 static const struct seq_operations stack_trace_seq_ops = {
374 	.start		= t_start,
375 	.next		= t_next,
376 	.stop		= t_stop,
377 	.show		= t_show,
378 };
379 
380 static int stack_trace_open(struct inode *inode, struct file *file)
381 {
382 	return seq_open(file, &stack_trace_seq_ops);
383 }
384 
385 static const struct file_operations stack_trace_fops = {
386 	.open		= stack_trace_open,
387 	.read		= seq_read,
388 	.llseek		= seq_lseek,
389 	.release	= seq_release,
390 };
391 
392 static int
393 stack_trace_filter_open(struct inode *inode, struct file *file)
394 {
395 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
396 				 inode, file);
397 }
398 
399 static const struct file_operations stack_trace_filter_fops = {
400 	.open = stack_trace_filter_open,
401 	.read = seq_read,
402 	.write = ftrace_filter_write,
403 	.llseek = tracing_lseek,
404 	.release = ftrace_regex_release,
405 };
406 
407 int
408 stack_trace_sysctl(struct ctl_table *table, int write,
409 		   void __user *buffer, size_t *lenp,
410 		   loff_t *ppos)
411 {
412 	int ret;
413 
414 	mutex_lock(&stack_sysctl_mutex);
415 
416 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
417 
418 	if (ret || !write ||
419 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
420 		goto out;
421 
422 	last_stack_tracer_enabled = !!stack_tracer_enabled;
423 
424 	if (stack_tracer_enabled)
425 		register_ftrace_function(&trace_ops);
426 	else
427 		unregister_ftrace_function(&trace_ops);
428 
429  out:
430 	mutex_unlock(&stack_sysctl_mutex);
431 	return ret;
432 }
433 
434 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
435 
436 static __init int enable_stacktrace(char *str)
437 {
438 	if (strncmp(str, "_filter=", 8) == 0)
439 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
440 
441 	stack_tracer_enabled = 1;
442 	last_stack_tracer_enabled = 1;
443 	return 1;
444 }
445 __setup("stacktrace", enable_stacktrace);
446 
447 static __init int stack_trace_init(void)
448 {
449 	struct dentry *d_tracer;
450 
451 	d_tracer = tracing_init_dentry();
452 	if (IS_ERR(d_tracer))
453 		return 0;
454 
455 	trace_create_file("stack_max_size", 0644, d_tracer,
456 			&max_stack_size, &stack_max_size_fops);
457 
458 	trace_create_file("stack_trace", 0444, d_tracer,
459 			NULL, &stack_trace_fops);
460 
461 	trace_create_file("stack_trace_filter", 0444, d_tracer,
462 			NULL, &stack_trace_filter_fops);
463 
464 	if (stack_trace_filter_buf[0])
465 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
466 
467 	if (stack_tracer_enabled)
468 		register_ftrace_function(&trace_ops);
469 
470 	return 0;
471 }
472 
473 device_initcall(stack_trace_init);
474