xref: /linux/kernel/unwind/deferred.c (revision 5e32d0f15cc5c843a4115c4644d984d42524c794)
1*5e32d0f1SSteven Rostedt // SPDX-License-Identifier: GPL-2.0
2*5e32d0f1SSteven Rostedt /*
3*5e32d0f1SSteven Rostedt  * Deferred user space unwinding
4*5e32d0f1SSteven Rostedt  */
5*5e32d0f1SSteven Rostedt #include <linux/kernel.h>
6*5e32d0f1SSteven Rostedt #include <linux/sched.h>
7*5e32d0f1SSteven Rostedt #include <linux/slab.h>
8*5e32d0f1SSteven Rostedt #include <linux/unwind_deferred.h>
9*5e32d0f1SSteven Rostedt 
10*5e32d0f1SSteven Rostedt #define UNWIND_MAX_ENTRIES 512
11*5e32d0f1SSteven Rostedt 
12*5e32d0f1SSteven Rostedt /**
13*5e32d0f1SSteven Rostedt  * unwind_user_faultable - Produce a user stacktrace in faultable context
14*5e32d0f1SSteven Rostedt  * @trace: The descriptor that will store the user stacktrace
15*5e32d0f1SSteven Rostedt  *
16*5e32d0f1SSteven Rostedt  * This must be called in a known faultable context (usually when entering
17*5e32d0f1SSteven Rostedt  * or exiting user space). Depending on the available implementations
18*5e32d0f1SSteven Rostedt  * the @trace will be loaded with the addresses of the user space stacktrace
19*5e32d0f1SSteven Rostedt  * if it can be found.
20*5e32d0f1SSteven Rostedt  *
21*5e32d0f1SSteven Rostedt  * Return: 0 on success and negative on error
22*5e32d0f1SSteven Rostedt  *         On success @trace will contain the user space stacktrace
23*5e32d0f1SSteven Rostedt  */
24*5e32d0f1SSteven Rostedt int unwind_user_faultable(struct unwind_stacktrace *trace)
25*5e32d0f1SSteven Rostedt {
26*5e32d0f1SSteven Rostedt 	struct unwind_task_info *info = &current->unwind_info;
27*5e32d0f1SSteven Rostedt 
28*5e32d0f1SSteven Rostedt 	/* Should always be called from faultable context */
29*5e32d0f1SSteven Rostedt 	might_fault();
30*5e32d0f1SSteven Rostedt 
31*5e32d0f1SSteven Rostedt 	if (current->flags & PF_EXITING)
32*5e32d0f1SSteven Rostedt 		return -EINVAL;
33*5e32d0f1SSteven Rostedt 
34*5e32d0f1SSteven Rostedt 	if (!info->entries) {
35*5e32d0f1SSteven Rostedt 		info->entries = kmalloc_array(UNWIND_MAX_ENTRIES, sizeof(long),
36*5e32d0f1SSteven Rostedt 					      GFP_KERNEL);
37*5e32d0f1SSteven Rostedt 		if (!info->entries)
38*5e32d0f1SSteven Rostedt 			return -ENOMEM;
39*5e32d0f1SSteven Rostedt 	}
40*5e32d0f1SSteven Rostedt 
41*5e32d0f1SSteven Rostedt 	trace->nr = 0;
42*5e32d0f1SSteven Rostedt 	trace->entries = info->entries;
43*5e32d0f1SSteven Rostedt 	unwind_user(trace, UNWIND_MAX_ENTRIES);
44*5e32d0f1SSteven Rostedt 
45*5e32d0f1SSteven Rostedt 	return 0;
46*5e32d0f1SSteven Rostedt }
47*5e32d0f1SSteven Rostedt 
48*5e32d0f1SSteven Rostedt void unwind_task_init(struct task_struct *task)
49*5e32d0f1SSteven Rostedt {
50*5e32d0f1SSteven Rostedt 	struct unwind_task_info *info = &task->unwind_info;
51*5e32d0f1SSteven Rostedt 
52*5e32d0f1SSteven Rostedt 	memset(info, 0, sizeof(*info));
53*5e32d0f1SSteven Rostedt }
54*5e32d0f1SSteven Rostedt 
55*5e32d0f1SSteven Rostedt void unwind_task_free(struct task_struct *task)
56*5e32d0f1SSteven Rostedt {
57*5e32d0f1SSteven Rostedt 	struct unwind_task_info *info = &task->unwind_info;
58*5e32d0f1SSteven Rostedt 
59*5e32d0f1SSteven Rostedt 	kfree(info->entries);
60*5e32d0f1SSteven Rostedt }
61