xref: /linux/kernel/unwind/deferred.c (revision b9c73524106e1c0c857006fb9ff2e5a510dc4021)
15e32d0f1SSteven Rostedt // SPDX-License-Identifier: GPL-2.0
25e32d0f1SSteven Rostedt /*
35e32d0f1SSteven Rostedt  * Deferred user space unwinding
45e32d0f1SSteven Rostedt  */
55e32d0f1SSteven Rostedt #include <linux/kernel.h>
65e32d0f1SSteven Rostedt #include <linux/sched.h>
7*b9c73524SJosh Poimboeuf #include <linux/sizes.h>
85e32d0f1SSteven Rostedt #include <linux/slab.h>
95e32d0f1SSteven Rostedt #include <linux/unwind_deferred.h>
105e32d0f1SSteven Rostedt 
11*b9c73524SJosh Poimboeuf /* Make the cache fit in a 4K page */
12*b9c73524SJosh Poimboeuf #define UNWIND_MAX_ENTRIES					\
13*b9c73524SJosh Poimboeuf 	((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long))
145e32d0f1SSteven Rostedt 
155e32d0f1SSteven Rostedt /**
165e32d0f1SSteven Rostedt  * unwind_user_faultable - Produce a user stacktrace in faultable context
175e32d0f1SSteven Rostedt  * @trace: The descriptor that will store the user stacktrace
185e32d0f1SSteven Rostedt  *
195e32d0f1SSteven Rostedt  * This must be called in a known faultable context (usually when entering
205e32d0f1SSteven Rostedt  * or exiting user space). Depending on the available implementations
215e32d0f1SSteven Rostedt  * the @trace will be loaded with the addresses of the user space stacktrace
225e32d0f1SSteven Rostedt  * if it can be found.
235e32d0f1SSteven Rostedt  *
245e32d0f1SSteven Rostedt  * Return: 0 on success and negative on error
255e32d0f1SSteven Rostedt  *         On success @trace will contain the user space stacktrace
265e32d0f1SSteven Rostedt  */
275e32d0f1SSteven Rostedt int unwind_user_faultable(struct unwind_stacktrace *trace)
285e32d0f1SSteven Rostedt {
295e32d0f1SSteven Rostedt 	struct unwind_task_info *info = &current->unwind_info;
30*b9c73524SJosh Poimboeuf 	struct unwind_cache *cache;
315e32d0f1SSteven Rostedt 
325e32d0f1SSteven Rostedt 	/* Should always be called from faultable context */
335e32d0f1SSteven Rostedt 	might_fault();
345e32d0f1SSteven Rostedt 
355e32d0f1SSteven Rostedt 	if (current->flags & PF_EXITING)
365e32d0f1SSteven Rostedt 		return -EINVAL;
375e32d0f1SSteven Rostedt 
38*b9c73524SJosh Poimboeuf 	if (!info->cache) {
39*b9c73524SJosh Poimboeuf 		info->cache = kzalloc(struct_size(cache, entries, UNWIND_MAX_ENTRIES),
405e32d0f1SSteven Rostedt 				      GFP_KERNEL);
41*b9c73524SJosh Poimboeuf 		if (!info->cache)
425e32d0f1SSteven Rostedt 			return -ENOMEM;
435e32d0f1SSteven Rostedt 	}
445e32d0f1SSteven Rostedt 
45*b9c73524SJosh Poimboeuf 	cache = info->cache;
46*b9c73524SJosh Poimboeuf 	trace->entries = cache->entries;
47*b9c73524SJosh Poimboeuf 
48*b9c73524SJosh Poimboeuf 	if (cache->nr_entries) {
49*b9c73524SJosh Poimboeuf 		/*
50*b9c73524SJosh Poimboeuf 		 * The user stack has already been previously unwound in this
51*b9c73524SJosh Poimboeuf 		 * entry context.  Skip the unwind and use the cache.
52*b9c73524SJosh Poimboeuf 		 */
53*b9c73524SJosh Poimboeuf 		trace->nr = cache->nr_entries;
54*b9c73524SJosh Poimboeuf 		return 0;
55*b9c73524SJosh Poimboeuf 	}
56*b9c73524SJosh Poimboeuf 
575e32d0f1SSteven Rostedt 	trace->nr = 0;
585e32d0f1SSteven Rostedt 	unwind_user(trace, UNWIND_MAX_ENTRIES);
595e32d0f1SSteven Rostedt 
60*b9c73524SJosh Poimboeuf 	cache->nr_entries = trace->nr;
61*b9c73524SJosh Poimboeuf 
625e32d0f1SSteven Rostedt 	return 0;
635e32d0f1SSteven Rostedt }
645e32d0f1SSteven Rostedt 
655e32d0f1SSteven Rostedt void unwind_task_init(struct task_struct *task)
665e32d0f1SSteven Rostedt {
675e32d0f1SSteven Rostedt 	struct unwind_task_info *info = &task->unwind_info;
685e32d0f1SSteven Rostedt 
695e32d0f1SSteven Rostedt 	memset(info, 0, sizeof(*info));
705e32d0f1SSteven Rostedt }
715e32d0f1SSteven Rostedt 
725e32d0f1SSteven Rostedt void unwind_task_free(struct task_struct *task)
735e32d0f1SSteven Rostedt {
745e32d0f1SSteven Rostedt 	struct unwind_task_info *info = &task->unwind_info;
755e32d0f1SSteven Rostedt 
76*b9c73524SJosh Poimboeuf 	kfree(info->cache);
775e32d0f1SSteven Rostedt }
78