xref: /linux/arch/arm64/kernel/stacktrace.c (revision bc75dffadc063eb46200611cc41d1e2373219e11)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack tracing support
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #include <linux/kernel.h>
8 #include <linux/efi.h>
9 #include <linux/export.h>
10 #include <linux/ftrace.h>
11 #include <linux/kprobes.h>
12 #include <linux/sched.h>
13 #include <linux/sched/debug.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/stacktrace.h>
16 
17 #include <asm/efi.h>
18 #include <asm/irq.h>
19 #include <asm/stack_pointer.h>
20 #include <asm/stacktrace.h>
21 
22 /*
23  * Kernel unwind state
24  *
25  * @common:      Common unwind state.
26  * @task:        The task being unwound.
27  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
28  *               associated with the most recently encountered replacement lr
29  *               value.
30  */
31 struct kunwind_state {
32 	struct unwind_state common;
33 	struct task_struct *task;
34 #ifdef CONFIG_KRETPROBES
35 	struct llist_node *kr_cur;
36 #endif
37 };
38 
39 static __always_inline void
40 kunwind_init(struct kunwind_state *state,
41 	     struct task_struct *task)
42 {
43 	unwind_init_common(&state->common);
44 	state->task = task;
45 }
46 
47 /*
48  * Start an unwind from a pt_regs.
49  *
50  * The unwind will begin at the PC within the regs.
51  *
52  * The regs must be on a stack currently owned by the calling task.
53  */
54 static __always_inline void
55 kunwind_init_from_regs(struct kunwind_state *state,
56 		       struct pt_regs *regs)
57 {
58 	kunwind_init(state, current);
59 
60 	state->common.fp = regs->regs[29];
61 	state->common.pc = regs->pc;
62 }
63 
64 /*
65  * Start an unwind from a caller.
66  *
67  * The unwind will begin at the caller of whichever function this is inlined
68  * into.
69  *
70  * The function which invokes this must be noinline.
71  */
72 static __always_inline void
73 kunwind_init_from_caller(struct kunwind_state *state)
74 {
75 	kunwind_init(state, current);
76 
77 	state->common.fp = (unsigned long)__builtin_frame_address(1);
78 	state->common.pc = (unsigned long)__builtin_return_address(0);
79 }
80 
81 /*
82  * Start an unwind from a blocked task.
83  *
84  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
85  * cpu_switch_to()).
86  *
87  * The caller should ensure the task is blocked in cpu_switch_to() for the
88  * duration of the unwind, or the unwind will be bogus. It is never valid to
89  * call this for the current task.
90  */
91 static __always_inline void
92 kunwind_init_from_task(struct kunwind_state *state,
93 		       struct task_struct *task)
94 {
95 	kunwind_init(state, task);
96 
97 	state->common.fp = thread_saved_fp(task);
98 	state->common.pc = thread_saved_pc(task);
99 }
100 
101 static __always_inline int
102 kunwind_recover_return_address(struct kunwind_state *state)
103 {
104 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
105 	if (state->task->ret_stack &&
106 	    (state->common.pc == (unsigned long)return_to_handler)) {
107 		unsigned long orig_pc;
108 		orig_pc = ftrace_graph_ret_addr(state->task, NULL,
109 						state->common.pc,
110 						(void *)state->common.fp);
111 		if (WARN_ON_ONCE(state->common.pc == orig_pc))
112 			return -EINVAL;
113 		state->common.pc = orig_pc;
114 	}
115 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
116 
117 #ifdef CONFIG_KRETPROBES
118 	if (is_kretprobe_trampoline(state->common.pc)) {
119 		unsigned long orig_pc;
120 		orig_pc = kretprobe_find_ret_addr(state->task,
121 						  (void *)state->common.fp,
122 						  &state->kr_cur);
123 		state->common.pc = orig_pc;
124 	}
125 #endif /* CONFIG_KRETPROBES */
126 
127 	return 0;
128 }
129 
130 /*
131  * Unwind from one frame record (A) to the next frame record (B).
132  *
133  * We terminate early if the location of B indicates a malformed chain of frame
134  * records (e.g. a cycle), determined based on the location and fp value of A
135  * and the location (but not the fp value) of B.
136  */
137 static __always_inline int
138 kunwind_next(struct kunwind_state *state)
139 {
140 	struct task_struct *tsk = state->task;
141 	unsigned long fp = state->common.fp;
142 	int err;
143 
144 	/* Final frame; nothing to unwind */
145 	if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
146 		return -ENOENT;
147 
148 	err = unwind_next_frame_record(&state->common);
149 	if (err)
150 		return err;
151 
152 	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
153 
154 	return kunwind_recover_return_address(state);
155 }
156 
157 typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
158 
159 static __always_inline void
160 do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
161 	   void *cookie)
162 {
163 	if (kunwind_recover_return_address(state))
164 		return;
165 
166 	while (1) {
167 		int ret;
168 
169 		if (!consume_state(state, cookie))
170 			break;
171 		ret = kunwind_next(state);
172 		if (ret < 0)
173 			break;
174 	}
175 }
176 
177 /*
178  * Per-cpu stacks are only accessible when unwinding the current task in a
179  * non-preemptible context.
180  */
181 #define STACKINFO_CPU(name)					\
182 	({							\
183 		((task == current) && !preemptible())		\
184 			? stackinfo_get_##name()		\
185 			: stackinfo_get_unknown();		\
186 	})
187 
188 /*
189  * SDEI stacks are only accessible when unwinding the current task in an NMI
190  * context.
191  */
192 #define STACKINFO_SDEI(name)					\
193 	({							\
194 		((task == current) && in_nmi())			\
195 			? stackinfo_get_sdei_##name()		\
196 			: stackinfo_get_unknown();		\
197 	})
198 
199 #define STACKINFO_EFI						\
200 	({							\
201 		((task == current) && current_in_efi())		\
202 			? stackinfo_get_efi()			\
203 			: stackinfo_get_unknown();		\
204 	})
205 
206 static __always_inline void
207 kunwind_stack_walk(kunwind_consume_fn consume_state,
208 		   void *cookie, struct task_struct *task,
209 		   struct pt_regs *regs)
210 {
211 	struct stack_info stacks[] = {
212 		stackinfo_get_task(task),
213 		STACKINFO_CPU(irq),
214 #if defined(CONFIG_VMAP_STACK)
215 		STACKINFO_CPU(overflow),
216 #endif
217 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
218 		STACKINFO_SDEI(normal),
219 		STACKINFO_SDEI(critical),
220 #endif
221 #ifdef CONFIG_EFI
222 		STACKINFO_EFI,
223 #endif
224 	};
225 	struct kunwind_state state = {
226 		.common = {
227 			.stacks = stacks,
228 			.nr_stacks = ARRAY_SIZE(stacks),
229 		},
230 	};
231 
232 	if (regs) {
233 		if (task != current)
234 			return;
235 		kunwind_init_from_regs(&state, regs);
236 	} else if (task == current) {
237 		kunwind_init_from_caller(&state);
238 	} else {
239 		kunwind_init_from_task(&state, task);
240 	}
241 
242 	do_kunwind(&state, consume_state, cookie);
243 }
244 
245 struct kunwind_consume_entry_data {
246 	stack_trace_consume_fn consume_entry;
247 	void *cookie;
248 };
249 
250 static bool
251 arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
252 {
253 	struct kunwind_consume_entry_data *data = cookie;
254 	return data->consume_entry(data->cookie, state->common.pc);
255 }
256 
257 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
258 			      void *cookie, struct task_struct *task,
259 			      struct pt_regs *regs)
260 {
261 	struct kunwind_consume_entry_data data = {
262 		.consume_entry = consume_entry,
263 		.cookie = cookie,
264 	};
265 
266 	kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
267 }
268 
269 static bool dump_backtrace_entry(void *arg, unsigned long where)
270 {
271 	char *loglvl = arg;
272 	printk("%s %pSb\n", loglvl, (void *)where);
273 	return true;
274 }
275 
276 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
277 		    const char *loglvl)
278 {
279 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
280 
281 	if (regs && user_mode(regs))
282 		return;
283 
284 	if (!tsk)
285 		tsk = current;
286 
287 	if (!try_get_task_stack(tsk))
288 		return;
289 
290 	printk("%sCall trace:\n", loglvl);
291 	arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
292 
293 	put_task_stack(tsk);
294 }
295 
296 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
297 {
298 	dump_backtrace(NULL, tsk, loglvl);
299 	barrier();
300 }
301