xref: /linux/arch/arm/kernel/stacktrace.c (revision 00389c58ffe993782a8ba4bb5a34a102b1f6fe24)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/kprobes.h>
4 #include <linux/rethook.h>
5 #include <linux/sched.h>
6 #include <linux/sched/debug.h>
7 #include <linux/stacktrace.h>
8 
9 #include <asm/sections.h>
10 #include <asm/stacktrace.h>
11 #include <asm/traps.h>
12 
13 #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
14 /*
15  * Unwind the current stack frame and store the new register values in the
16  * structure passed as argument. Unwinding is equivalent to a function return,
17  * hence the new PC value rather than LR should be used for backtrace.
18  *
19  * With framepointer enabled, a simple function prologue looks like this:
20  *	mov	ip, sp
21  *	stmdb	sp!, {fp, ip, lr, pc}
22  *	sub	fp, ip, #4
23  *
24  * A simple function epilogue looks like this:
25  *	ldm	sp, {fp, sp, pc}
26  *
27  * When compiled with clang, pc and sp are not pushed. A simple function
28  * prologue looks like this when built with clang:
29  *
30  *	stmdb	{..., fp, lr}
31  *	add	fp, sp, #x
32  *	sub	sp, sp, #y
33  *
34  * A simple function epilogue looks like this when built with clang:
35  *
36  *	sub	sp, fp, #x
37  *	ldm	{..., fp, pc}
38  *
39  *
40  * Note that with framepointer enabled, even the leaf functions have the same
41  * prologue and epilogue, therefore we can ignore the LR value in this case.
42  */
43 int notrace unwind_frame(struct stackframe *frame)
44 {
45 	unsigned long high, low;
46 	unsigned long fp = frame->fp;
47 
48 	/* only go to a higher address on the stack */
49 	low = frame->sp;
50 	high = ALIGN(low, THREAD_SIZE);
51 
52 #ifdef CONFIG_CC_IS_CLANG
53 	/* check current frame pointer is within bounds */
54 	if (fp < low + 4 || fp > high - 4)
55 		return -EINVAL;
56 
57 	frame->sp = frame->fp;
58 	frame->fp = *(unsigned long *)(fp);
59 	frame->pc = *(unsigned long *)(fp + 4);
60 #else
61 	/* check current frame pointer is within bounds */
62 	if (fp < low + 12 || fp > high - 4)
63 		return -EINVAL;
64 
65 	/* restore the registers from the stack frame */
66 	frame->fp = *(unsigned long *)(fp - 12);
67 	frame->sp = *(unsigned long *)(fp - 8);
68 	frame->pc = *(unsigned long *)(fp - 4);
69 #endif
70 #ifdef CONFIG_RETHOOK
71 	if (is_rethook_trampoline(frame->pc))
72 		frame->pc = rethook_find_ret_addr(frame->tsk, frame->fp,
73 						  &frame->kr_cur);
74 #endif
75 #ifdef CONFIG_KRETPROBES
76 	if (is_kretprobe_trampoline(frame->pc))
77 		frame->pc = kretprobe_find_ret_addr(frame->tsk,
78 					(void *)frame->fp, &frame->kr_cur);
79 #endif
80 
81 	return 0;
82 }
83 #endif
84 
85 void notrace walk_stackframe(struct stackframe *frame,
86 		     int (*fn)(struct stackframe *, void *), void *data)
87 {
88 	while (1) {
89 		int ret;
90 
91 		if (fn(frame, data))
92 			break;
93 		ret = unwind_frame(frame);
94 		if (ret < 0)
95 			break;
96 	}
97 }
98 EXPORT_SYMBOL(walk_stackframe);
99 
100 #ifdef CONFIG_STACKTRACE
101 struct stack_trace_data {
102 	struct stack_trace *trace;
103 	unsigned int no_sched_functions;
104 	unsigned int skip;
105 };
106 
107 static int save_trace(struct stackframe *frame, void *d)
108 {
109 	struct stack_trace_data *data = d;
110 	struct stack_trace *trace = data->trace;
111 	struct pt_regs *regs;
112 	unsigned long addr = frame->pc;
113 
114 	if (data->no_sched_functions && in_sched_functions(addr))
115 		return 0;
116 	if (data->skip) {
117 		data->skip--;
118 		return 0;
119 	}
120 
121 	trace->entries[trace->nr_entries++] = addr;
122 
123 	if (trace->nr_entries >= trace->max_entries)
124 		return 1;
125 
126 	if (!in_entry_text(frame->pc))
127 		return 0;
128 
129 	regs = (struct pt_regs *)frame->sp;
130 	if ((unsigned long)&regs[1] > ALIGN(frame->sp, THREAD_SIZE))
131 		return 0;
132 
133 	trace->entries[trace->nr_entries++] = regs->ARM_pc;
134 
135 	return trace->nr_entries >= trace->max_entries;
136 }
137 
138 /* This must be noinline to so that our skip calculation works correctly */
139 static noinline void __save_stack_trace(struct task_struct *tsk,
140 	struct stack_trace *trace, unsigned int nosched)
141 {
142 	struct stack_trace_data data;
143 	struct stackframe frame;
144 
145 	data.trace = trace;
146 	data.skip = trace->skip;
147 	data.no_sched_functions = nosched;
148 
149 	if (tsk != current) {
150 #ifdef CONFIG_SMP
151 		/*
152 		 * What guarantees do we have here that 'tsk' is not
153 		 * running on another CPU?  For now, ignore it as we
154 		 * can't guarantee we won't explode.
155 		 */
156 		return;
157 #else
158 		frame.fp = thread_saved_fp(tsk);
159 		frame.sp = thread_saved_sp(tsk);
160 		frame.lr = 0;		/* recovered from the stack */
161 		frame.pc = thread_saved_pc(tsk);
162 #endif
163 	} else {
164 		/* We don't want this function nor the caller */
165 		data.skip += 2;
166 		frame.fp = (unsigned long)__builtin_frame_address(0);
167 		frame.sp = current_stack_pointer;
168 		frame.lr = (unsigned long)__builtin_return_address(0);
169 		frame.pc = (unsigned long)__save_stack_trace;
170 	}
171 #ifdef CONFIG_KRETPROBES
172 	frame.kr_cur = NULL;
173 	frame.tsk = tsk;
174 #endif
175 
176 	walk_stackframe(&frame, save_trace, &data);
177 }
178 
179 void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
180 {
181 	struct stack_trace_data data;
182 	struct stackframe frame;
183 
184 	data.trace = trace;
185 	data.skip = trace->skip;
186 	data.no_sched_functions = 0;
187 
188 	frame.fp = regs->ARM_fp;
189 	frame.sp = regs->ARM_sp;
190 	frame.lr = regs->ARM_lr;
191 	frame.pc = regs->ARM_pc;
192 #ifdef CONFIG_KRETPROBES
193 	frame.kr_cur = NULL;
194 	frame.tsk = current;
195 #endif
196 
197 	walk_stackframe(&frame, save_trace, &data);
198 }
199 
200 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
201 {
202 	__save_stack_trace(tsk, trace, 1);
203 }
204 EXPORT_SYMBOL(save_stack_trace_tsk);
205 
206 void save_stack_trace(struct stack_trace *trace)
207 {
208 	__save_stack_trace(current, trace, 0);
209 }
210 EXPORT_SYMBOL_GPL(save_stack_trace);
211 #endif
212