xref: /linux/kernel/stacktrace.c (revision 63307d015b91e626c97bb82e88054af3d0b74643)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/stacktrace.c
4  *
5  * Stack trace management functions
6  *
7  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8  */
9 #include <linux/sched/task_stack.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/export.h>
14 #include <linux/kallsyms.h>
15 #include <linux/stacktrace.h>
16 
17 /**
18  * stack_trace_print - Print the entries in the stack trace
19  * @entries:	Pointer to storage array
20  * @nr_entries:	Number of entries in the storage array
21  * @spaces:	Number of leading spaces to print
22  */
23 void stack_trace_print(unsigned long *entries, unsigned int nr_entries,
24 		       int spaces)
25 {
26 	unsigned int i;
27 
28 	if (WARN_ON(!entries))
29 		return;
30 
31 	for (i = 0; i < nr_entries; i++)
32 		printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
33 }
34 EXPORT_SYMBOL_GPL(stack_trace_print);
35 
36 /**
37  * stack_trace_snprint - Print the entries in the stack trace into a buffer
38  * @buf:	Pointer to the print buffer
39  * @size:	Size of the print buffer
40  * @entries:	Pointer to storage array
41  * @nr_entries:	Number of entries in the storage array
42  * @spaces:	Number of leading spaces to print
43  *
44  * Return: Number of bytes printed.
45  */
46 int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
47 			unsigned int nr_entries, int spaces)
48 {
49 	unsigned int generated, i, total = 0;
50 
51 	if (WARN_ON(!entries))
52 		return 0;
53 
54 	for (i = 0; i < nr_entries && size; i++) {
55 		generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
56 				     (void *)entries[i]);
57 
58 		total += generated;
59 		if (generated >= size) {
60 			buf += size;
61 			size = 0;
62 		} else {
63 			buf += generated;
64 			size -= generated;
65 		}
66 	}
67 
68 	return total;
69 }
70 EXPORT_SYMBOL_GPL(stack_trace_snprint);
71 
72 #ifdef CONFIG_ARCH_STACKWALK
73 
74 struct stacktrace_cookie {
75 	unsigned long	*store;
76 	unsigned int	size;
77 	unsigned int	skip;
78 	unsigned int	len;
79 };
80 
81 static bool stack_trace_consume_entry(void *cookie, unsigned long addr,
82 				      bool reliable)
83 {
84 	struct stacktrace_cookie *c = cookie;
85 
86 	if (c->len >= c->size)
87 		return false;
88 
89 	if (c->skip > 0) {
90 		c->skip--;
91 		return true;
92 	}
93 	c->store[c->len++] = addr;
94 	return c->len < c->size;
95 }
96 
97 static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr,
98 					      bool reliable)
99 {
100 	if (in_sched_functions(addr))
101 		return true;
102 	return stack_trace_consume_entry(cookie, addr, reliable);
103 }
104 
105 /**
106  * stack_trace_save - Save a stack trace into a storage array
107  * @store:	Pointer to storage array
108  * @size:	Size of the storage array
109  * @skipnr:	Number of entries to skip at the start of the stack trace
110  *
111  * Return: Number of trace entries stored.
112  */
113 unsigned int stack_trace_save(unsigned long *store, unsigned int size,
114 			      unsigned int skipnr)
115 {
116 	stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
117 	struct stacktrace_cookie c = {
118 		.store	= store,
119 		.size	= size,
120 		.skip	= skipnr + 1,
121 	};
122 
123 	arch_stack_walk(consume_entry, &c, current, NULL);
124 	return c.len;
125 }
126 EXPORT_SYMBOL_GPL(stack_trace_save);
127 
128 /**
129  * stack_trace_save_tsk - Save a task stack trace into a storage array
130  * @task:	The task to examine
131  * @store:	Pointer to storage array
132  * @size:	Size of the storage array
133  * @skipnr:	Number of entries to skip at the start of the stack trace
134  *
135  * Return: Number of trace entries stored.
136  */
137 unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
138 				  unsigned int size, unsigned int skipnr)
139 {
140 	stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
141 	struct stacktrace_cookie c = {
142 		.store	= store,
143 		.size	= size,
144 		.skip	= skipnr + 1,
145 	};
146 
147 	if (!try_get_task_stack(tsk))
148 		return 0;
149 
150 	arch_stack_walk(consume_entry, &c, tsk, NULL);
151 	put_task_stack(tsk);
152 	return c.len;
153 }
154 
155 /**
156  * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
157  * @regs:	Pointer to pt_regs to examine
158  * @store:	Pointer to storage array
159  * @size:	Size of the storage array
160  * @skipnr:	Number of entries to skip at the start of the stack trace
161  *
162  * Return: Number of trace entries stored.
163  */
164 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
165 				   unsigned int size, unsigned int skipnr)
166 {
167 	stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
168 	struct stacktrace_cookie c = {
169 		.store	= store,
170 		.size	= size,
171 		.skip	= skipnr,
172 	};
173 
174 	arch_stack_walk(consume_entry, &c, current, regs);
175 	return c.len;
176 }
177 
178 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
179 /**
180  * stack_trace_save_tsk_reliable - Save task stack with verification
181  * @tsk:	Pointer to the task to examine
182  * @store:	Pointer to storage array
183  * @size:	Size of the storage array
184  *
185  * Return:	An error if it detects any unreliable features of the
186  *		stack. Otherwise it guarantees that the stack trace is
187  *		reliable and returns the number of entries stored.
188  *
189  * If the task is not 'current', the caller *must* ensure the task is inactive.
190  */
191 int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
192 				  unsigned int size)
193 {
194 	stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
195 	struct stacktrace_cookie c = {
196 		.store	= store,
197 		.size	= size,
198 	};
199 	int ret;
200 
201 	/*
202 	 * If the task doesn't have a stack (e.g., a zombie), the stack is
203 	 * "reliably" empty.
204 	 */
205 	if (!try_get_task_stack(tsk))
206 		return 0;
207 
208 	ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
209 	put_task_stack(tsk);
210 	return ret ? ret : c.len;
211 }
212 #endif
213 
214 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
215 /**
216  * stack_trace_save_user - Save a user space stack trace into a storage array
217  * @store:	Pointer to storage array
218  * @size:	Size of the storage array
219  *
220  * Return: Number of trace entries stored.
221  */
222 unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
223 {
224 	stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
225 	struct stacktrace_cookie c = {
226 		.store	= store,
227 		.size	= size,
228 	};
229 
230 	/* Trace user stack if not a kernel thread */
231 	if (!current->mm)
232 		return 0;
233 
234 	arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
235 	return c.len;
236 }
237 #endif
238 
239 #else /* CONFIG_ARCH_STACKWALK */
240 
241 /*
242  * Architectures that do not implement save_stack_trace_*()
243  * get these weak aliases and once-per-bootup warnings
244  * (whenever this facility is utilized - for example by procfs):
245  */
246 __weak void
247 save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
248 {
249 	WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
250 }
251 
252 __weak void
253 save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
254 {
255 	WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
256 }
257 
258 __weak int
259 save_stack_trace_tsk_reliable(struct task_struct *tsk,
260 			      struct stack_trace *trace)
261 {
262 	WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
263 	return -ENOSYS;
264 }
265 
266 /**
267  * stack_trace_save - Save a stack trace into a storage array
268  * @store:	Pointer to storage array
269  * @size:	Size of the storage array
270  * @skipnr:	Number of entries to skip at the start of the stack trace
271  *
272  * Return: Number of trace entries stored
273  */
274 unsigned int stack_trace_save(unsigned long *store, unsigned int size,
275 			      unsigned int skipnr)
276 {
277 	struct stack_trace trace = {
278 		.entries	= store,
279 		.max_entries	= size,
280 		.skip		= skipnr + 1,
281 	};
282 
283 	save_stack_trace(&trace);
284 	return trace.nr_entries;
285 }
286 EXPORT_SYMBOL_GPL(stack_trace_save);
287 
288 /**
289  * stack_trace_save_tsk - Save a task stack trace into a storage array
290  * @task:	The task to examine
291  * @store:	Pointer to storage array
292  * @size:	Size of the storage array
293  * @skipnr:	Number of entries to skip at the start of the stack trace
294  *
295  * Return: Number of trace entries stored
296  */
297 unsigned int stack_trace_save_tsk(struct task_struct *task,
298 				  unsigned long *store, unsigned int size,
299 				  unsigned int skipnr)
300 {
301 	struct stack_trace trace = {
302 		.entries	= store,
303 		.max_entries	= size,
304 		.skip		= skipnr + 1,
305 	};
306 
307 	save_stack_trace_tsk(task, &trace);
308 	return trace.nr_entries;
309 }
310 
311 /**
312  * stack_trace_save_regs - Save a stack trace based on pt_regs into a storage array
313  * @regs:	Pointer to pt_regs to examine
314  * @store:	Pointer to storage array
315  * @size:	Size of the storage array
316  * @skipnr:	Number of entries to skip at the start of the stack trace
317  *
318  * Return: Number of trace entries stored
319  */
320 unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
321 				   unsigned int size, unsigned int skipnr)
322 {
323 	struct stack_trace trace = {
324 		.entries	= store,
325 		.max_entries	= size,
326 		.skip		= skipnr,
327 	};
328 
329 	save_stack_trace_regs(regs, &trace);
330 	return trace.nr_entries;
331 }
332 
333 #ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
334 /**
335  * stack_trace_save_tsk_reliable - Save task stack with verification
336  * @tsk:	Pointer to the task to examine
337  * @store:	Pointer to storage array
338  * @size:	Size of the storage array
339  *
340  * Return:	An error if it detects any unreliable features of the
341  *		stack. Otherwise it guarantees that the stack trace is
342  *		reliable and returns the number of entries stored.
343  *
344  * If the task is not 'current', the caller *must* ensure the task is inactive.
345  */
346 int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
347 				  unsigned int size)
348 {
349 	struct stack_trace trace = {
350 		.entries	= store,
351 		.max_entries	= size,
352 	};
353 	int ret = save_stack_trace_tsk_reliable(tsk, &trace);
354 
355 	return ret ? ret : trace.nr_entries;
356 }
357 #endif
358 
359 #ifdef CONFIG_USER_STACKTRACE_SUPPORT
360 /**
361  * stack_trace_save_user - Save a user space stack trace into a storage array
362  * @store:	Pointer to storage array
363  * @size:	Size of the storage array
364  *
365  * Return: Number of trace entries stored
366  */
367 unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
368 {
369 	struct stack_trace trace = {
370 		.entries	= store,
371 		.max_entries	= size,
372 	};
373 
374 	save_stack_trace_user(&trace);
375 	return trace.nr_entries;
376 }
377 #endif /* CONFIG_USER_STACKTRACE_SUPPORT */
378 
379 #endif /* !CONFIG_ARCH_STACKWALK */
380