xref: /linux/kernel/trace/trace_stack.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include "trace.h"
17 
18 #define STACK_TRACE_ENTRIES 500
19 
20 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
21 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
23 
24 static struct stack_trace max_stack_trace = {
25 	.max_entries		= STACK_TRACE_ENTRIES,
26 	.entries		= stack_dump_trace,
27 };
28 
29 static unsigned long max_stack_size;
30 static raw_spinlock_t max_stack_lock =
31 	(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
32 
33 static int stack_trace_disabled __read_mostly;
34 static DEFINE_PER_CPU(int, trace_active);
35 static DEFINE_MUTEX(stack_sysctl_mutex);
36 
37 int stack_tracer_enabled;
38 static int last_stack_tracer_enabled;
39 
40 static inline void check_stack(void)
41 {
42 	unsigned long this_size, flags;
43 	unsigned long *p, *top, *start;
44 	int i;
45 
46 	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
47 	this_size = THREAD_SIZE - this_size;
48 
49 	if (this_size <= max_stack_size)
50 		return;
51 
52 	/* we do not handle interrupt stacks yet */
53 	if (!object_is_on_stack(&this_size))
54 		return;
55 
56 	local_irq_save(flags);
57 	__raw_spin_lock(&max_stack_lock);
58 
59 	/* a race could have already updated it */
60 	if (this_size <= max_stack_size)
61 		goto out;
62 
63 	max_stack_size = this_size;
64 
65 	max_stack_trace.nr_entries	= 0;
66 	max_stack_trace.skip		= 3;
67 
68 	save_stack_trace(&max_stack_trace);
69 
70 	/*
71 	 * Now find where in the stack these are.
72 	 */
73 	i = 0;
74 	start = &this_size;
75 	top = (unsigned long *)
76 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
77 
78 	/*
79 	 * Loop through all the entries. One of the entries may
80 	 * for some reason be missed on the stack, so we may
81 	 * have to account for them. If they are all there, this
82 	 * loop will only happen once. This code only takes place
83 	 * on a new max, so it is far from a fast path.
84 	 */
85 	while (i < max_stack_trace.nr_entries) {
86 		int found = 0;
87 
88 		stack_dump_index[i] = this_size;
89 		p = start;
90 
91 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
92 			if (*p == stack_dump_trace[i]) {
93 				this_size = stack_dump_index[i++] =
94 					(top - p) * sizeof(unsigned long);
95 				found = 1;
96 				/* Start the search from here */
97 				start = p + 1;
98 			}
99 		}
100 
101 		if (!found)
102 			i++;
103 	}
104 
105  out:
106 	__raw_spin_unlock(&max_stack_lock);
107 	local_irq_restore(flags);
108 }
109 
110 static void
111 stack_trace_call(unsigned long ip, unsigned long parent_ip)
112 {
113 	int cpu, resched;
114 
115 	if (unlikely(!ftrace_enabled || stack_trace_disabled))
116 		return;
117 
118 	resched = ftrace_preempt_disable();
119 
120 	cpu = raw_smp_processor_id();
121 	/* no atomic needed, we only modify this variable by this cpu */
122 	if (per_cpu(trace_active, cpu)++ != 0)
123 		goto out;
124 
125 	check_stack();
126 
127  out:
128 	per_cpu(trace_active, cpu)--;
129 	/* prevent recursion in schedule */
130 	ftrace_preempt_enable(resched);
131 }
132 
133 static struct ftrace_ops trace_ops __read_mostly =
134 {
135 	.func = stack_trace_call,
136 };
137 
138 static ssize_t
139 stack_max_size_read(struct file *filp, char __user *ubuf,
140 		    size_t count, loff_t *ppos)
141 {
142 	unsigned long *ptr = filp->private_data;
143 	char buf[64];
144 	int r;
145 
146 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
147 	if (r > sizeof(buf))
148 		r = sizeof(buf);
149 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
150 }
151 
152 static ssize_t
153 stack_max_size_write(struct file *filp, const char __user *ubuf,
154 		     size_t count, loff_t *ppos)
155 {
156 	long *ptr = filp->private_data;
157 	unsigned long val, flags;
158 	char buf[64];
159 	int ret;
160 
161 	if (count >= sizeof(buf))
162 		return -EINVAL;
163 
164 	if (copy_from_user(&buf, ubuf, count))
165 		return -EFAULT;
166 
167 	buf[count] = 0;
168 
169 	ret = strict_strtoul(buf, 10, &val);
170 	if (ret < 0)
171 		return ret;
172 
173 	local_irq_save(flags);
174 	__raw_spin_lock(&max_stack_lock);
175 	*ptr = val;
176 	__raw_spin_unlock(&max_stack_lock);
177 	local_irq_restore(flags);
178 
179 	return count;
180 }
181 
182 static const struct file_operations stack_max_size_fops = {
183 	.open		= tracing_open_generic,
184 	.read		= stack_max_size_read,
185 	.write		= stack_max_size_write,
186 };
187 
188 static void *
189 t_next(struct seq_file *m, void *v, loff_t *pos)
190 {
191 	long i;
192 
193 	(*pos)++;
194 
195 	if (v == SEQ_START_TOKEN)
196 		i = 0;
197 	else {
198 		i = *(long *)v;
199 		i++;
200 	}
201 
202 	if (i >= max_stack_trace.nr_entries ||
203 	    stack_dump_trace[i] == ULONG_MAX)
204 		return NULL;
205 
206 	m->private = (void *)i;
207 
208 	return &m->private;
209 }
210 
211 static void *t_start(struct seq_file *m, loff_t *pos)
212 {
213 	void *t = SEQ_START_TOKEN;
214 	loff_t l = 0;
215 
216 	local_irq_disable();
217 	__raw_spin_lock(&max_stack_lock);
218 
219 	if (*pos == 0)
220 		return SEQ_START_TOKEN;
221 
222 	for (; t && l < *pos; t = t_next(m, t, &l))
223 		;
224 
225 	return t;
226 }
227 
228 static void t_stop(struct seq_file *m, void *p)
229 {
230 	__raw_spin_unlock(&max_stack_lock);
231 	local_irq_enable();
232 }
233 
234 static int trace_lookup_stack(struct seq_file *m, long i)
235 {
236 	unsigned long addr = stack_dump_trace[i];
237 #ifdef CONFIG_KALLSYMS
238 	char str[KSYM_SYMBOL_LEN];
239 
240 	sprint_symbol(str, addr);
241 
242 	return seq_printf(m, "%s\n", str);
243 #else
244 	return seq_printf(m, "%p\n", (void*)addr);
245 #endif
246 }
247 
248 static int t_show(struct seq_file *m, void *v)
249 {
250 	long i;
251 	int size;
252 
253 	if (v == SEQ_START_TOKEN) {
254 		seq_printf(m, "        Depth   Size      Location"
255 			   "    (%d entries)\n"
256 			   "        -----   ----      --------\n",
257 			   max_stack_trace.nr_entries);
258 		return 0;
259 	}
260 
261 	i = *(long *)v;
262 
263 	if (i >= max_stack_trace.nr_entries ||
264 	    stack_dump_trace[i] == ULONG_MAX)
265 		return 0;
266 
267 	if (i+1 == max_stack_trace.nr_entries ||
268 	    stack_dump_trace[i+1] == ULONG_MAX)
269 		size = stack_dump_index[i];
270 	else
271 		size = stack_dump_index[i] - stack_dump_index[i+1];
272 
273 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
274 
275 	trace_lookup_stack(m, i);
276 
277 	return 0;
278 }
279 
280 static const struct seq_operations stack_trace_seq_ops = {
281 	.start		= t_start,
282 	.next		= t_next,
283 	.stop		= t_stop,
284 	.show		= t_show,
285 };
286 
287 static int stack_trace_open(struct inode *inode, struct file *file)
288 {
289 	int ret;
290 
291 	ret = seq_open(file, &stack_trace_seq_ops);
292 
293 	return ret;
294 }
295 
296 static const struct file_operations stack_trace_fops = {
297 	.open		= stack_trace_open,
298 	.read		= seq_read,
299 	.llseek		= seq_lseek,
300 };
301 
302 int
303 stack_trace_sysctl(struct ctl_table *table, int write,
304 		   struct file *file, void __user *buffer, size_t *lenp,
305 		   loff_t *ppos)
306 {
307 	int ret;
308 
309 	mutex_lock(&stack_sysctl_mutex);
310 
311 	ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
312 
313 	if (ret || !write ||
314 	    (last_stack_tracer_enabled == stack_tracer_enabled))
315 		goto out;
316 
317 	last_stack_tracer_enabled = stack_tracer_enabled;
318 
319 	if (stack_tracer_enabled)
320 		register_ftrace_function(&trace_ops);
321 	else
322 		unregister_ftrace_function(&trace_ops);
323 
324  out:
325 	mutex_unlock(&stack_sysctl_mutex);
326 	return ret;
327 }
328 
329 static __init int enable_stacktrace(char *str)
330 {
331 	stack_tracer_enabled = 1;
332 	last_stack_tracer_enabled = 1;
333 	return 1;
334 }
335 __setup("stacktrace", enable_stacktrace);
336 
337 static __init int stack_trace_init(void)
338 {
339 	struct dentry *d_tracer;
340 	struct dentry *entry;
341 
342 	d_tracer = tracing_init_dentry();
343 
344 	entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
345 				    &max_stack_size, &stack_max_size_fops);
346 	if (!entry)
347 		pr_warning("Could not create debugfs 'stack_max_size' entry\n");
348 
349 	entry = debugfs_create_file("stack_trace", 0444, d_tracer,
350 				    NULL, &stack_trace_fops);
351 	if (!entry)
352 		pr_warning("Could not create debugfs 'stack_trace' entry\n");
353 
354 	if (stack_tracer_enabled)
355 		register_ftrace_function(&trace_ops);
356 
357 	return 0;
358 }
359 
360 device_initcall(stack_trace_init);
361