xref: /linux/arch/arm/kernel/stacktrace.c (revision ed5c2f5fd10dda07263f79f338a512c0f49f76f5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/kprobes.h>
4 #include <linux/sched.h>
5 #include <linux/sched/debug.h>
6 #include <linux/stacktrace.h>
7 
8 #include <asm/sections.h>
9 #include <asm/stacktrace.h>
10 #include <asm/traps.h>
11 
12 #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
13 /*
14  * Unwind the current stack frame and store the new register values in the
15  * structure passed as argument. Unwinding is equivalent to a function return,
16  * hence the new PC value rather than LR should be used for backtrace.
17  *
18  * With framepointer enabled, a simple function prologue looks like this:
19  *	mov	ip, sp
20  *	stmdb	sp!, {fp, ip, lr, pc}
21  *	sub	fp, ip, #4
22  *
23  * A simple function epilogue looks like this:
24  *	ldm	sp, {fp, sp, pc}
25  *
26  * When compiled with clang, pc and sp are not pushed. A simple function
27  * prologue looks like this when built with clang:
28  *
29  *	stmdb	{..., fp, lr}
30  *	add	fp, sp, #x
31  *	sub	sp, sp, #y
32  *
33  * A simple function epilogue looks like this when built with clang:
34  *
35  *	sub	sp, fp, #x
36  *	ldm	{..., fp, pc}
37  *
38  *
39  * Note that with framepointer enabled, even the leaf functions have the same
40  * prologue and epilogue, therefore we can ignore the LR value in this case.
41  */
42 int notrace unwind_frame(struct stackframe *frame)
43 {
44 	unsigned long high, low;
45 	unsigned long fp = frame->fp;
46 
47 	/* only go to a higher address on the stack */
48 	low = frame->sp;
49 	high = ALIGN(low, THREAD_SIZE);
50 
51 #ifdef CONFIG_CC_IS_CLANG
52 	/* check current frame pointer is within bounds */
53 	if (fp < low + 4 || fp > high - 4)
54 		return -EINVAL;
55 
56 	frame->sp = frame->fp;
57 	frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
58 	frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 4));
59 #else
60 	/* check current frame pointer is within bounds */
61 	if (fp < low + 12 || fp > high - 4)
62 		return -EINVAL;
63 
64 	/* restore the registers from the stack frame */
65 	frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 12));
66 	frame->sp = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 8));
67 	frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp - 4));
68 #endif
69 #ifdef CONFIG_KRETPROBES
70 	if (is_kretprobe_trampoline(frame->pc))
71 		frame->pc = kretprobe_find_ret_addr(frame->tsk,
72 					(void *)frame->fp, &frame->kr_cur);
73 #endif
74 
75 	return 0;
76 }
77 #endif
78 
79 void notrace walk_stackframe(struct stackframe *frame,
80 		     int (*fn)(struct stackframe *, void *), void *data)
81 {
82 	while (1) {
83 		int ret;
84 
85 		if (fn(frame, data))
86 			break;
87 		ret = unwind_frame(frame);
88 		if (ret < 0)
89 			break;
90 	}
91 }
92 EXPORT_SYMBOL(walk_stackframe);
93 
94 #ifdef CONFIG_STACKTRACE
95 struct stack_trace_data {
96 	struct stack_trace *trace;
97 	unsigned int no_sched_functions;
98 	unsigned int skip;
99 };
100 
101 static int save_trace(struct stackframe *frame, void *d)
102 {
103 	struct stack_trace_data *data = d;
104 	struct stack_trace *trace = data->trace;
105 	struct pt_regs *regs;
106 	unsigned long addr = frame->pc;
107 
108 	if (data->no_sched_functions && in_sched_functions(addr))
109 		return 0;
110 	if (data->skip) {
111 		data->skip--;
112 		return 0;
113 	}
114 
115 	trace->entries[trace->nr_entries++] = addr;
116 
117 	if (trace->nr_entries >= trace->max_entries)
118 		return 1;
119 
120 	if (!in_entry_text(frame->pc))
121 		return 0;
122 
123 	regs = (struct pt_regs *)frame->sp;
124 	if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
125 		return 0;
126 
127 	trace->entries[trace->nr_entries++] = regs->ARM_pc;
128 
129 	return trace->nr_entries >= trace->max_entries;
130 }
131 
132 /* This must be noinline to so that our skip calculation works correctly */
133 static noinline void __save_stack_trace(struct task_struct *tsk,
134 	struct stack_trace *trace, unsigned int nosched)
135 {
136 	struct stack_trace_data data;
137 	struct stackframe frame;
138 
139 	data.trace = trace;
140 	data.skip = trace->skip;
141 	data.no_sched_functions = nosched;
142 
143 	if (tsk != current) {
144 #ifdef CONFIG_SMP
145 		/*
146 		 * What guarantees do we have here that 'tsk' is not
147 		 * running on another CPU?  For now, ignore it as we
148 		 * can't guarantee we won't explode.
149 		 */
150 		return;
151 #else
152 		frame.fp = thread_saved_fp(tsk);
153 		frame.sp = thread_saved_sp(tsk);
154 		frame.lr = 0;		/* recovered from the stack */
155 		frame.pc = thread_saved_pc(tsk);
156 #endif
157 	} else {
158 		/* We don't want this function nor the caller */
159 		data.skip += 2;
160 		frame.fp = (unsigned long)__builtin_frame_address(0);
161 		frame.sp = current_stack_pointer;
162 		frame.lr = (unsigned long)__builtin_return_address(0);
163 here:
164 		frame.pc = (unsigned long)&&here;
165 	}
166 #ifdef CONFIG_KRETPROBES
167 	frame.kr_cur = NULL;
168 	frame.tsk = tsk;
169 #endif
170 
171 	walk_stackframe(&frame, save_trace, &data);
172 }
173 
174 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
175 {
176 	struct stack_trace_data data;
177 	struct stackframe frame;
178 
179 	data.trace = trace;
180 	data.skip = trace->skip;
181 	data.no_sched_functions = 0;
182 
183 	frame.fp = regs->ARM_fp;
184 	frame.sp = regs->ARM_sp;
185 	frame.lr = regs->ARM_lr;
186 	frame.pc = regs->ARM_pc;
187 #ifdef CONFIG_KRETPROBES
188 	frame.kr_cur = NULL;
189 	frame.tsk = current;
190 #endif
191 
192 	walk_stackframe(&frame, save_trace, &data);
193 }
194 
195 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
196 {
197 	__save_stack_trace(tsk, trace, 1);
198 }
199 EXPORT_SYMBOL(save_stack_trace_tsk);
200 
201 void save_stack_trace(struct stack_trace *trace)
202 {
203 	__save_stack_trace(current, trace, 0);
204 }
205 EXPORT_SYMBOL_GPL(save_stack_trace);
206 #endif
207