xref: /linux/arch/arm64/kernel/stacktrace.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack tracing support
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #include <linux/kernel.h>
8 #include <linux/efi.h>
9 #include <linux/export.h>
10 #include <linux/filter.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/stacktrace.h>
17 
18 #include <asm/efi.h>
19 #include <asm/irq.h>
20 #include <asm/stack_pointer.h>
21 #include <asm/stacktrace.h>
22 
23 enum kunwind_source {
24 	KUNWIND_SOURCE_UNKNOWN,
25 	KUNWIND_SOURCE_FRAME,
26 	KUNWIND_SOURCE_CALLER,
27 	KUNWIND_SOURCE_TASK,
28 	KUNWIND_SOURCE_REGS_PC,
29 	KUNWIND_SOURCE_REGS_LR,
30 };
31 
32 union unwind_flags {
33 	unsigned long	all;
34 	struct {
35 		unsigned long	fgraph : 1,
36 				kretprobe : 1;
37 	};
38 };
39 
40 /*
41  * Kernel unwind state
42  *
43  * @common:      Common unwind state.
44  * @task:        The task being unwound.
45  * @graph_idx:   Used by ftrace_graph_ret_addr() for optimized stack unwinding.
46  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
47  *               associated with the most recently encountered replacement lr
48  *               value.
49  */
50 struct kunwind_state {
51 	struct unwind_state common;
52 	struct task_struct *task;
53 	int graph_idx;
54 #ifdef CONFIG_KRETPROBES
55 	struct llist_node *kr_cur;
56 #endif
57 	enum kunwind_source source;
58 	union unwind_flags flags;
59 	struct pt_regs *regs;
60 };
61 
62 static __always_inline void
63 kunwind_init(struct kunwind_state *state,
64 	     struct task_struct *task)
65 {
66 	unwind_init_common(&state->common);
67 	state->task = task;
68 	state->source = KUNWIND_SOURCE_UNKNOWN;
69 	state->flags.all = 0;
70 	state->regs = NULL;
71 }
72 
73 /*
74  * Start an unwind from a pt_regs.
75  *
76  * The unwind will begin at the PC within the regs.
77  *
78  * The regs must be on a stack currently owned by the calling task.
79  */
80 static __always_inline void
81 kunwind_init_from_regs(struct kunwind_state *state,
82 		       struct pt_regs *regs)
83 {
84 	kunwind_init(state, current);
85 
86 	state->regs = regs;
87 	state->common.fp = regs->regs[29];
88 	state->common.pc = regs->pc;
89 	state->source = KUNWIND_SOURCE_REGS_PC;
90 }
91 
92 /*
93  * Start an unwind from a caller.
94  *
95  * The unwind will begin at the caller of whichever function this is inlined
96  * into.
97  *
98  * The function which invokes this must be noinline.
99  */
100 static __always_inline void
101 kunwind_init_from_caller(struct kunwind_state *state)
102 {
103 	kunwind_init(state, current);
104 
105 	state->common.fp = (unsigned long)__builtin_frame_address(1);
106 	state->common.pc = (unsigned long)__builtin_return_address(0);
107 	state->source = KUNWIND_SOURCE_CALLER;
108 }
109 
110 /*
111  * Start an unwind from a blocked task.
112  *
113  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
114  * cpu_switch_to()).
115  *
116  * The caller should ensure the task is blocked in cpu_switch_to() for the
117  * duration of the unwind, or the unwind will be bogus. It is never valid to
118  * call this for the current task.
119  */
120 static __always_inline void
121 kunwind_init_from_task(struct kunwind_state *state,
122 		       struct task_struct *task)
123 {
124 	kunwind_init(state, task);
125 
126 	state->common.fp = thread_saved_fp(task);
127 	state->common.pc = thread_saved_pc(task);
128 	state->source = KUNWIND_SOURCE_TASK;
129 }
130 
131 static __always_inline int
132 kunwind_recover_return_address(struct kunwind_state *state)
133 {
134 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
135 	if (state->task->ret_stack &&
136 	    (state->common.pc == (unsigned long)return_to_handler)) {
137 		unsigned long orig_pc;
138 		orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
139 						state->common.pc,
140 						(void *)state->common.fp);
141 		if (WARN_ON_ONCE(state->common.pc == orig_pc))
142 			return -EINVAL;
143 		state->common.pc = orig_pc;
144 		state->flags.fgraph = 1;
145 	}
146 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
147 
148 #ifdef CONFIG_KRETPROBES
149 	if (is_kretprobe_trampoline(state->common.pc)) {
150 		unsigned long orig_pc;
151 		orig_pc = kretprobe_find_ret_addr(state->task,
152 						  (void *)state->common.fp,
153 						  &state->kr_cur);
154 		state->common.pc = orig_pc;
155 		state->flags.kretprobe = 1;
156 	}
157 #endif /* CONFIG_KRETPROBES */
158 
159 	return 0;
160 }
161 
162 static __always_inline
163 int kunwind_next_regs_pc(struct kunwind_state *state)
164 {
165 	struct stack_info *info;
166 	unsigned long fp = state->common.fp;
167 	struct pt_regs *regs;
168 
169 	regs = container_of((u64 *)fp, struct pt_regs, stackframe.record.fp);
170 
171 	info = unwind_find_stack(&state->common, (unsigned long)regs, sizeof(*regs));
172 	if (!info)
173 		return -EINVAL;
174 
175 	unwind_consume_stack(&state->common, info, (unsigned long)regs,
176 			     sizeof(*regs));
177 
178 	state->regs = regs;
179 	state->common.pc = regs->pc;
180 	state->common.fp = regs->regs[29];
181 	state->source = KUNWIND_SOURCE_REGS_PC;
182 	return 0;
183 }
184 
185 static __always_inline int
186 kunwind_next_regs_lr(struct kunwind_state *state)
187 {
188 	/*
189 	 * The stack for the regs was consumed by kunwind_next_regs_pc(), so we
190 	 * cannot consume that again here, but we know the regs are safe to
191 	 * access.
192 	 */
193 	state->common.pc = state->regs->regs[30];
194 	state->common.fp = state->regs->regs[29];
195 	state->regs = NULL;
196 	state->source = KUNWIND_SOURCE_REGS_LR;
197 
198 	return 0;
199 }
200 
201 static __always_inline int
202 kunwind_next_frame_record_meta(struct kunwind_state *state)
203 {
204 	struct task_struct *tsk = state->task;
205 	unsigned long fp = state->common.fp;
206 	struct frame_record_meta *meta;
207 	struct stack_info *info;
208 
209 	info = unwind_find_stack(&state->common, fp, sizeof(*meta));
210 	if (!info)
211 		return -EINVAL;
212 
213 	meta = (struct frame_record_meta *)fp;
214 	switch (READ_ONCE(meta->type)) {
215 	case FRAME_META_TYPE_FINAL:
216 		if (meta == &task_pt_regs(tsk)->stackframe)
217 			return -ENOENT;
218 		WARN_ON_ONCE(1);
219 		return -EINVAL;
220 	case FRAME_META_TYPE_PT_REGS:
221 		return kunwind_next_regs_pc(state);
222 	default:
223 		WARN_ON_ONCE(1);
224 		return -EINVAL;
225 	}
226 }
227 
228 static __always_inline int
229 kunwind_next_frame_record(struct kunwind_state *state)
230 {
231 	unsigned long fp = state->common.fp;
232 	struct frame_record *record;
233 	struct stack_info *info;
234 	unsigned long new_fp, new_pc;
235 
236 	if (fp & 0x7)
237 		return -EINVAL;
238 
239 	info = unwind_find_stack(&state->common, fp, sizeof(*record));
240 	if (!info)
241 		return -EINVAL;
242 
243 	record = (struct frame_record *)fp;
244 	new_fp = READ_ONCE(record->fp);
245 	new_pc = READ_ONCE(record->lr);
246 
247 	if (!new_fp && !new_pc)
248 		return kunwind_next_frame_record_meta(state);
249 
250 	unwind_consume_stack(&state->common, info, fp, sizeof(*record));
251 
252 	state->common.fp = new_fp;
253 	state->common.pc = new_pc;
254 	state->source = KUNWIND_SOURCE_FRAME;
255 
256 	return 0;
257 }
258 
259 /*
260  * Unwind from one frame record (A) to the next frame record (B).
261  *
262  * We terminate early if the location of B indicates a malformed chain of frame
263  * records (e.g. a cycle), determined based on the location and fp value of A
264  * and the location (but not the fp value) of B.
265  */
266 static __always_inline int
267 kunwind_next(struct kunwind_state *state)
268 {
269 	int err;
270 
271 	state->flags.all = 0;
272 
273 	switch (state->source) {
274 	case KUNWIND_SOURCE_FRAME:
275 	case KUNWIND_SOURCE_CALLER:
276 	case KUNWIND_SOURCE_TASK:
277 	case KUNWIND_SOURCE_REGS_LR:
278 		err = kunwind_next_frame_record(state);
279 		break;
280 	case KUNWIND_SOURCE_REGS_PC:
281 		err = kunwind_next_regs_lr(state);
282 		break;
283 	default:
284 		err = -EINVAL;
285 	}
286 
287 	if (err)
288 		return err;
289 
290 	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
291 
292 	return kunwind_recover_return_address(state);
293 }
294 
295 typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
296 
297 static __always_inline void
298 do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
299 	   void *cookie)
300 {
301 	if (kunwind_recover_return_address(state))
302 		return;
303 
304 	while (1) {
305 		int ret;
306 
307 		if (!consume_state(state, cookie))
308 			break;
309 		ret = kunwind_next(state);
310 		if (ret < 0)
311 			break;
312 	}
313 }
314 
315 /*
316  * Per-cpu stacks are only accessible when unwinding the current task in a
317  * non-preemptible context.
318  */
319 #define STACKINFO_CPU(name)					\
320 	({							\
321 		((task == current) && !preemptible())		\
322 			? stackinfo_get_##name()		\
323 			: stackinfo_get_unknown();		\
324 	})
325 
326 /*
327  * SDEI stacks are only accessible when unwinding the current task in an NMI
328  * context.
329  */
330 #define STACKINFO_SDEI(name)					\
331 	({							\
332 		((task == current) && in_nmi())			\
333 			? stackinfo_get_sdei_##name()		\
334 			: stackinfo_get_unknown();		\
335 	})
336 
337 #define STACKINFO_EFI						\
338 	({							\
339 		((task == current) && current_in_efi())		\
340 			? stackinfo_get_efi()			\
341 			: stackinfo_get_unknown();		\
342 	})
343 
344 static __always_inline void
345 kunwind_stack_walk(kunwind_consume_fn consume_state,
346 		   void *cookie, struct task_struct *task,
347 		   struct pt_regs *regs)
348 {
349 	struct stack_info stacks[] = {
350 		stackinfo_get_task(task),
351 		STACKINFO_CPU(irq),
352 #if defined(CONFIG_VMAP_STACK)
353 		STACKINFO_CPU(overflow),
354 #endif
355 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
356 		STACKINFO_SDEI(normal),
357 		STACKINFO_SDEI(critical),
358 #endif
359 #ifdef CONFIG_EFI
360 		STACKINFO_EFI,
361 #endif
362 	};
363 	struct kunwind_state state = {
364 		.common = {
365 			.stacks = stacks,
366 			.nr_stacks = ARRAY_SIZE(stacks),
367 		},
368 	};
369 
370 	if (regs) {
371 		if (task != current)
372 			return;
373 		kunwind_init_from_regs(&state, regs);
374 	} else if (task == current) {
375 		kunwind_init_from_caller(&state);
376 	} else {
377 		kunwind_init_from_task(&state, task);
378 	}
379 
380 	do_kunwind(&state, consume_state, cookie);
381 }
382 
383 struct kunwind_consume_entry_data {
384 	stack_trace_consume_fn consume_entry;
385 	void *cookie;
386 };
387 
388 static __always_inline bool
389 arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
390 {
391 	struct kunwind_consume_entry_data *data = cookie;
392 	return data->consume_entry(data->cookie, state->common.pc);
393 }
394 
395 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
396 			      void *cookie, struct task_struct *task,
397 			      struct pt_regs *regs)
398 {
399 	struct kunwind_consume_entry_data data = {
400 		.consume_entry = consume_entry,
401 		.cookie = cookie,
402 	};
403 
404 	kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
405 }
406 
407 struct bpf_unwind_consume_entry_data {
408 	bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
409 	void *cookie;
410 };
411 
412 static bool
413 arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
414 {
415 	struct bpf_unwind_consume_entry_data *data = cookie;
416 
417 	return data->consume_entry(data->cookie, state->common.pc, 0,
418 				   state->common.fp);
419 }
420 
421 noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
422 								u64 fp), void *cookie)
423 {
424 	struct bpf_unwind_consume_entry_data data = {
425 		.consume_entry = consume_entry,
426 		.cookie = cookie,
427 	};
428 
429 	kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
430 }
431 
432 static const char *state_source_string(const struct kunwind_state *state)
433 {
434 	switch (state->source) {
435 	case KUNWIND_SOURCE_FRAME:	return NULL;
436 	case KUNWIND_SOURCE_CALLER:	return "C";
437 	case KUNWIND_SOURCE_TASK:	return "T";
438 	case KUNWIND_SOURCE_REGS_PC:	return "P";
439 	case KUNWIND_SOURCE_REGS_LR:	return "L";
440 	default:			return "U";
441 	}
442 }
443 
444 static bool dump_backtrace_entry(const struct kunwind_state *state, void *arg)
445 {
446 	const char *source = state_source_string(state);
447 	union unwind_flags flags = state->flags;
448 	bool has_info = source || flags.all;
449 	char *loglvl = arg;
450 
451 	printk("%s %pSb%s%s%s%s%s\n", loglvl,
452 		(void *)state->common.pc,
453 		has_info ? " (" : "",
454 		source ? source : "",
455 		flags.fgraph ? "F" : "",
456 		flags.kretprobe ? "K" : "",
457 		has_info ? ")" : "");
458 
459 	return true;
460 }
461 
462 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
463 		    const char *loglvl)
464 {
465 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
466 
467 	if (regs && user_mode(regs))
468 		return;
469 
470 	if (!tsk)
471 		tsk = current;
472 
473 	if (!try_get_task_stack(tsk))
474 		return;
475 
476 	printk("%sCall trace:\n", loglvl);
477 	kunwind_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
478 
479 	put_task_stack(tsk);
480 }
481 
482 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
483 {
484 	dump_backtrace(NULL, tsk, loglvl);
485 	barrier();
486 }
487 
488 /*
489  * The struct defined for userspace stack frame in AARCH64 mode.
490  */
491 struct frame_tail {
492 	struct frame_tail	__user *fp;
493 	unsigned long		lr;
494 } __attribute__((packed));
495 
496 /*
497  * Get the return address for a single stackframe and return a pointer to the
498  * next frame tail.
499  */
500 static struct frame_tail __user *
501 unwind_user_frame(struct frame_tail __user *tail, void *cookie,
502 	       stack_trace_consume_fn consume_entry)
503 {
504 	struct frame_tail buftail;
505 	unsigned long err;
506 	unsigned long lr;
507 
508 	/* Also check accessibility of one struct frame_tail beyond */
509 	if (!access_ok(tail, sizeof(buftail)))
510 		return NULL;
511 
512 	pagefault_disable();
513 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
514 	pagefault_enable();
515 
516 	if (err)
517 		return NULL;
518 
519 	lr = ptrauth_strip_user_insn_pac(buftail.lr);
520 
521 	if (!consume_entry(cookie, lr))
522 		return NULL;
523 
524 	/*
525 	 * Frame pointers should strictly progress back up the stack
526 	 * (towards higher addresses).
527 	 */
528 	if (tail >= buftail.fp)
529 		return NULL;
530 
531 	return buftail.fp;
532 }
533 
534 #ifdef CONFIG_COMPAT
535 /*
536  * The registers we're interested in are at the end of the variable
537  * length saved register structure. The fp points at the end of this
538  * structure so the address of this struct is:
539  * (struct compat_frame_tail *)(xxx->fp)-1
540  *
541  * This code has been adapted from the ARM OProfile support.
542  */
543 struct compat_frame_tail {
544 	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
545 	u32		sp;
546 	u32		lr;
547 } __attribute__((packed));
548 
549 static struct compat_frame_tail __user *
550 unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
551 				stack_trace_consume_fn consume_entry)
552 {
553 	struct compat_frame_tail buftail;
554 	unsigned long err;
555 
556 	/* Also check accessibility of one struct frame_tail beyond */
557 	if (!access_ok(tail, sizeof(buftail)))
558 		return NULL;
559 
560 	pagefault_disable();
561 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
562 	pagefault_enable();
563 
564 	if (err)
565 		return NULL;
566 
567 	if (!consume_entry(cookie, buftail.lr))
568 		return NULL;
569 
570 	/*
571 	 * Frame pointers should strictly progress back up the stack
572 	 * (towards higher addresses).
573 	 */
574 	if (tail + 1 >= (struct compat_frame_tail __user *)
575 			compat_ptr(buftail.fp))
576 		return NULL;
577 
578 	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
579 }
580 #endif /* CONFIG_COMPAT */
581 
582 
583 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
584 					const struct pt_regs *regs)
585 {
586 	if (!consume_entry(cookie, regs->pc))
587 		return;
588 
589 	if (!compat_user_mode(regs)) {
590 		/* AARCH64 mode */
591 		struct frame_tail __user *tail;
592 
593 		tail = (struct frame_tail __user *)regs->regs[29];
594 		while (tail && !((unsigned long)tail & 0x7))
595 			tail = unwind_user_frame(tail, cookie, consume_entry);
596 	} else {
597 #ifdef CONFIG_COMPAT
598 		/* AARCH32 compat mode */
599 		struct compat_frame_tail __user *tail;
600 
601 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
602 		while (tail && !((unsigned long)tail & 0x3))
603 			tail = unwind_compat_user_frame(tail, cookie, consume_entry);
604 #endif
605 	}
606 }
607