xref: /linux/kernel/trace/trace_stack.c (revision b889fcf63cb62e7fdb7816565e28f44dbe4a76a5)
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 
17 #include <asm/setup.h>
18 
19 #include "trace.h"
20 
21 #define STACK_TRACE_ENTRIES 500
22 
23 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
24 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
25 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
26 
27 static struct stack_trace max_stack_trace = {
28 	.max_entries		= STACK_TRACE_ENTRIES,
29 	.entries		= stack_dump_trace,
30 };
31 
32 static unsigned long max_stack_size;
33 static arch_spinlock_t max_stack_lock =
34 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35 
36 static DEFINE_PER_CPU(int, trace_active);
37 static DEFINE_MUTEX(stack_sysctl_mutex);
38 
39 int stack_tracer_enabled;
40 static int last_stack_tracer_enabled;
41 
42 static inline void check_stack(void)
43 {
44 	unsigned long this_size, flags;
45 	unsigned long *p, *top, *start;
46 	int i;
47 
48 	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
49 	this_size = THREAD_SIZE - this_size;
50 
51 	if (this_size <= max_stack_size)
52 		return;
53 
54 	/* we do not handle interrupt stacks yet */
55 	if (!object_is_on_stack(&this_size))
56 		return;
57 
58 	local_irq_save(flags);
59 	arch_spin_lock(&max_stack_lock);
60 
61 	/* a race could have already updated it */
62 	if (this_size <= max_stack_size)
63 		goto out;
64 
65 	max_stack_size = this_size;
66 
67 	max_stack_trace.nr_entries	= 0;
68 	max_stack_trace.skip		= 3;
69 
70 	save_stack_trace(&max_stack_trace);
71 
72 	/*
73 	 * Now find where in the stack these are.
74 	 */
75 	i = 0;
76 	start = &this_size;
77 	top = (unsigned long *)
78 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
79 
80 	/*
81 	 * Loop through all the entries. One of the entries may
82 	 * for some reason be missed on the stack, so we may
83 	 * have to account for them. If they are all there, this
84 	 * loop will only happen once. This code only takes place
85 	 * on a new max, so it is far from a fast path.
86 	 */
87 	while (i < max_stack_trace.nr_entries) {
88 		int found = 0;
89 
90 		stack_dump_index[i] = this_size;
91 		p = start;
92 
93 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
94 			if (*p == stack_dump_trace[i]) {
95 				this_size = stack_dump_index[i++] =
96 					(top - p) * sizeof(unsigned long);
97 				found = 1;
98 				/* Start the search from here */
99 				start = p + 1;
100 			}
101 		}
102 
103 		if (!found)
104 			i++;
105 	}
106 
107  out:
108 	arch_spin_unlock(&max_stack_lock);
109 	local_irq_restore(flags);
110 }
111 
112 static void
113 stack_trace_call(unsigned long ip, unsigned long parent_ip,
114 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
115 {
116 	int cpu;
117 
118 	preempt_disable_notrace();
119 
120 	cpu = raw_smp_processor_id();
121 	/* no atomic needed, we only modify this variable by this cpu */
122 	if (per_cpu(trace_active, cpu)++ != 0)
123 		goto out;
124 
125 	check_stack();
126 
127  out:
128 	per_cpu(trace_active, cpu)--;
129 	/* prevent recursion in schedule */
130 	preempt_enable_notrace();
131 }
132 
133 static struct ftrace_ops trace_ops __read_mostly =
134 {
135 	.func = stack_trace_call,
136 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
137 };
138 
139 static ssize_t
140 stack_max_size_read(struct file *filp, char __user *ubuf,
141 		    size_t count, loff_t *ppos)
142 {
143 	unsigned long *ptr = filp->private_data;
144 	char buf[64];
145 	int r;
146 
147 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
148 	if (r > sizeof(buf))
149 		r = sizeof(buf);
150 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
151 }
152 
153 static ssize_t
154 stack_max_size_write(struct file *filp, const char __user *ubuf,
155 		     size_t count, loff_t *ppos)
156 {
157 	long *ptr = filp->private_data;
158 	unsigned long val, flags;
159 	int ret;
160 	int cpu;
161 
162 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
163 	if (ret)
164 		return ret;
165 
166 	local_irq_save(flags);
167 
168 	/*
169 	 * In case we trace inside arch_spin_lock() or after (NMI),
170 	 * we will cause circular lock, so we also need to increase
171 	 * the percpu trace_active here.
172 	 */
173 	cpu = smp_processor_id();
174 	per_cpu(trace_active, cpu)++;
175 
176 	arch_spin_lock(&max_stack_lock);
177 	*ptr = val;
178 	arch_spin_unlock(&max_stack_lock);
179 
180 	per_cpu(trace_active, cpu)--;
181 	local_irq_restore(flags);
182 
183 	return count;
184 }
185 
186 static const struct file_operations stack_max_size_fops = {
187 	.open		= tracing_open_generic,
188 	.read		= stack_max_size_read,
189 	.write		= stack_max_size_write,
190 	.llseek		= default_llseek,
191 };
192 
193 static void *
194 __next(struct seq_file *m, loff_t *pos)
195 {
196 	long n = *pos - 1;
197 
198 	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
199 		return NULL;
200 
201 	m->private = (void *)n;
202 	return &m->private;
203 }
204 
205 static void *
206 t_next(struct seq_file *m, void *v, loff_t *pos)
207 {
208 	(*pos)++;
209 	return __next(m, pos);
210 }
211 
212 static void *t_start(struct seq_file *m, loff_t *pos)
213 {
214 	int cpu;
215 
216 	local_irq_disable();
217 
218 	cpu = smp_processor_id();
219 	per_cpu(trace_active, cpu)++;
220 
221 	arch_spin_lock(&max_stack_lock);
222 
223 	if (*pos == 0)
224 		return SEQ_START_TOKEN;
225 
226 	return __next(m, pos);
227 }
228 
229 static void t_stop(struct seq_file *m, void *p)
230 {
231 	int cpu;
232 
233 	arch_spin_unlock(&max_stack_lock);
234 
235 	cpu = smp_processor_id();
236 	per_cpu(trace_active, cpu)--;
237 
238 	local_irq_enable();
239 }
240 
241 static int trace_lookup_stack(struct seq_file *m, long i)
242 {
243 	unsigned long addr = stack_dump_trace[i];
244 
245 	return seq_printf(m, "%pS\n", (void *)addr);
246 }
247 
248 static void print_disabled(struct seq_file *m)
249 {
250 	seq_puts(m, "#\n"
251 		 "#  Stack tracer disabled\n"
252 		 "#\n"
253 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
254 		 "# kernel command line\n"
255 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
256 		 "#\n");
257 }
258 
259 static int t_show(struct seq_file *m, void *v)
260 {
261 	long i;
262 	int size;
263 
264 	if (v == SEQ_START_TOKEN) {
265 		seq_printf(m, "        Depth    Size   Location"
266 			   "    (%d entries)\n"
267 			   "        -----    ----   --------\n",
268 			   max_stack_trace.nr_entries - 1);
269 
270 		if (!stack_tracer_enabled && !max_stack_size)
271 			print_disabled(m);
272 
273 		return 0;
274 	}
275 
276 	i = *(long *)v;
277 
278 	if (i >= max_stack_trace.nr_entries ||
279 	    stack_dump_trace[i] == ULONG_MAX)
280 		return 0;
281 
282 	if (i+1 == max_stack_trace.nr_entries ||
283 	    stack_dump_trace[i+1] == ULONG_MAX)
284 		size = stack_dump_index[i];
285 	else
286 		size = stack_dump_index[i] - stack_dump_index[i+1];
287 
288 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
289 
290 	trace_lookup_stack(m, i);
291 
292 	return 0;
293 }
294 
295 static const struct seq_operations stack_trace_seq_ops = {
296 	.start		= t_start,
297 	.next		= t_next,
298 	.stop		= t_stop,
299 	.show		= t_show,
300 };
301 
302 static int stack_trace_open(struct inode *inode, struct file *file)
303 {
304 	return seq_open(file, &stack_trace_seq_ops);
305 }
306 
307 static const struct file_operations stack_trace_fops = {
308 	.open		= stack_trace_open,
309 	.read		= seq_read,
310 	.llseek		= seq_lseek,
311 	.release	= seq_release,
312 };
313 
314 static int
315 stack_trace_filter_open(struct inode *inode, struct file *file)
316 {
317 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
318 				 inode, file);
319 }
320 
321 static const struct file_operations stack_trace_filter_fops = {
322 	.open = stack_trace_filter_open,
323 	.read = seq_read,
324 	.write = ftrace_filter_write,
325 	.llseek = ftrace_regex_lseek,
326 	.release = ftrace_regex_release,
327 };
328 
329 int
330 stack_trace_sysctl(struct ctl_table *table, int write,
331 		   void __user *buffer, size_t *lenp,
332 		   loff_t *ppos)
333 {
334 	int ret;
335 
336 	mutex_lock(&stack_sysctl_mutex);
337 
338 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
339 
340 	if (ret || !write ||
341 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
342 		goto out;
343 
344 	last_stack_tracer_enabled = !!stack_tracer_enabled;
345 
346 	if (stack_tracer_enabled)
347 		register_ftrace_function(&trace_ops);
348 	else
349 		unregister_ftrace_function(&trace_ops);
350 
351  out:
352 	mutex_unlock(&stack_sysctl_mutex);
353 	return ret;
354 }
355 
356 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
357 
358 static __init int enable_stacktrace(char *str)
359 {
360 	if (strncmp(str, "_filter=", 8) == 0)
361 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
362 
363 	stack_tracer_enabled = 1;
364 	last_stack_tracer_enabled = 1;
365 	return 1;
366 }
367 __setup("stacktrace", enable_stacktrace);
368 
369 static __init int stack_trace_init(void)
370 {
371 	struct dentry *d_tracer;
372 
373 	d_tracer = tracing_init_dentry();
374 
375 	trace_create_file("stack_max_size", 0644, d_tracer,
376 			&max_stack_size, &stack_max_size_fops);
377 
378 	trace_create_file("stack_trace", 0444, d_tracer,
379 			NULL, &stack_trace_fops);
380 
381 	trace_create_file("stack_trace_filter", 0444, d_tracer,
382 			NULL, &stack_trace_filter_fops);
383 
384 	if (stack_trace_filter_buf[0])
385 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
386 
387 	if (stack_tracer_enabled)
388 		register_ftrace_function(&trace_ops);
389 
390 	return 0;
391 }
392 
393 device_initcall(stack_trace_init);
394