xref: /linux/arch/arm64/kernel/stacktrace.c (revision 8cbd01ba9c38eb16f3a572300da486ac544519b7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack tracing support
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #include <linux/kernel.h>
8 #include <linux/efi.h>
9 #include <linux/export.h>
10 #include <linux/filter.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/stacktrace.h>
17 
18 #include <asm/efi.h>
19 #include <asm/irq.h>
20 #include <asm/stack_pointer.h>
21 #include <asm/stacktrace.h>
22 
23 enum kunwind_source {
24 	KUNWIND_SOURCE_UNKNOWN,
25 	KUNWIND_SOURCE_FRAME,
26 	KUNWIND_SOURCE_CALLER,
27 	KUNWIND_SOURCE_TASK,
28 	KUNWIND_SOURCE_REGS_PC,
29 };
30 
31 union unwind_flags {
32 	unsigned long	all;
33 	struct {
34 		unsigned long	fgraph : 1,
35 				kretprobe : 1;
36 	};
37 };
38 
39 /*
40  * Kernel unwind state
41  *
42  * @common:      Common unwind state.
43  * @task:        The task being unwound.
44  * @graph_idx:   Used by ftrace_graph_ret_addr() for optimized stack unwinding.
45  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
46  *               associated with the most recently encountered replacement lr
47  *               value.
48  */
49 struct kunwind_state {
50 	struct unwind_state common;
51 	struct task_struct *task;
52 	int graph_idx;
53 #ifdef CONFIG_KRETPROBES
54 	struct llist_node *kr_cur;
55 #endif
56 	enum kunwind_source source;
57 	union unwind_flags flags;
58 	struct pt_regs *regs;
59 };
60 
61 static __always_inline void
kunwind_init(struct kunwind_state * state,struct task_struct * task)62 kunwind_init(struct kunwind_state *state,
63 	     struct task_struct *task)
64 {
65 	unwind_init_common(&state->common);
66 	state->task = task;
67 	state->source = KUNWIND_SOURCE_UNKNOWN;
68 	state->flags.all = 0;
69 	state->regs = NULL;
70 }
71 
72 /*
73  * Start an unwind from a pt_regs.
74  *
75  * The unwind will begin at the PC within the regs.
76  *
77  * The regs must be on a stack currently owned by the calling task.
78  */
79 static __always_inline void
kunwind_init_from_regs(struct kunwind_state * state,struct pt_regs * regs)80 kunwind_init_from_regs(struct kunwind_state *state,
81 		       struct pt_regs *regs)
82 {
83 	kunwind_init(state, current);
84 
85 	state->regs = regs;
86 	state->common.fp = regs->regs[29];
87 	state->common.pc = regs->pc;
88 	state->source = KUNWIND_SOURCE_REGS_PC;
89 }
90 
91 /*
92  * Start an unwind from a caller.
93  *
94  * The unwind will begin at the caller of whichever function this is inlined
95  * into.
96  *
97  * The function which invokes this must be noinline.
98  */
99 static __always_inline void
kunwind_init_from_caller(struct kunwind_state * state)100 kunwind_init_from_caller(struct kunwind_state *state)
101 {
102 	kunwind_init(state, current);
103 
104 	state->common.fp = (unsigned long)__builtin_frame_address(1);
105 	state->common.pc = (unsigned long)__builtin_return_address(0);
106 	state->source = KUNWIND_SOURCE_CALLER;
107 }
108 
109 /*
110  * Start an unwind from a blocked task.
111  *
112  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
113  * cpu_switch_to()).
114  *
115  * The caller should ensure the task is blocked in cpu_switch_to() for the
116  * duration of the unwind, or the unwind will be bogus. It is never valid to
117  * call this for the current task.
118  */
119 static __always_inline void
kunwind_init_from_task(struct kunwind_state * state,struct task_struct * task)120 kunwind_init_from_task(struct kunwind_state *state,
121 		       struct task_struct *task)
122 {
123 	kunwind_init(state, task);
124 
125 	state->common.fp = thread_saved_fp(task);
126 	state->common.pc = thread_saved_pc(task);
127 	state->source = KUNWIND_SOURCE_TASK;
128 }
129 
130 static __always_inline int
kunwind_recover_return_address(struct kunwind_state * state)131 kunwind_recover_return_address(struct kunwind_state *state)
132 {
133 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
134 	if (state->task->ret_stack &&
135 	    (state->common.pc == (unsigned long)return_to_handler)) {
136 		unsigned long orig_pc;
137 		orig_pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
138 						state->common.pc,
139 						(void *)state->common.fp);
140 		if (state->common.pc == orig_pc) {
141 			WARN_ON_ONCE(state->task == current);
142 			return -EINVAL;
143 		}
144 		state->common.pc = orig_pc;
145 		state->flags.fgraph = 1;
146 	}
147 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
148 
149 #ifdef CONFIG_KRETPROBES
150 	if (is_kretprobe_trampoline(state->common.pc)) {
151 		unsigned long orig_pc;
152 		orig_pc = kretprobe_find_ret_addr(state->task,
153 						  (void *)state->common.fp,
154 						  &state->kr_cur);
155 		state->common.pc = orig_pc;
156 		state->flags.kretprobe = 1;
157 	}
158 #endif /* CONFIG_KRETPROBES */
159 
160 	return 0;
161 }
162 
163 static __always_inline
kunwind_next_regs_pc(struct kunwind_state * state)164 int kunwind_next_regs_pc(struct kunwind_state *state)
165 {
166 	struct stack_info *info;
167 	unsigned long fp = state->common.fp;
168 	struct pt_regs *regs;
169 
170 	regs = container_of((u64 *)fp, struct pt_regs, stackframe.record.fp);
171 
172 	info = unwind_find_stack(&state->common, (unsigned long)regs, sizeof(*regs));
173 	if (!info)
174 		return -EINVAL;
175 
176 	unwind_consume_stack(&state->common, info, (unsigned long)regs,
177 			     sizeof(*regs));
178 
179 	state->regs = regs;
180 	state->common.pc = regs->pc;
181 	state->common.fp = regs->regs[29];
182 	state->regs = NULL;
183 	state->source = KUNWIND_SOURCE_REGS_PC;
184 	return 0;
185 }
186 
187 static __always_inline int
kunwind_next_frame_record_meta(struct kunwind_state * state)188 kunwind_next_frame_record_meta(struct kunwind_state *state)
189 {
190 	struct task_struct *tsk = state->task;
191 	unsigned long fp = state->common.fp;
192 	struct frame_record_meta *meta;
193 	struct stack_info *info;
194 
195 	info = unwind_find_stack(&state->common, fp, sizeof(*meta));
196 	if (!info)
197 		return -EINVAL;
198 
199 	meta = (struct frame_record_meta *)fp;
200 	switch (READ_ONCE(meta->type)) {
201 	case FRAME_META_TYPE_FINAL:
202 		if (meta == &task_pt_regs(tsk)->stackframe)
203 			return -ENOENT;
204 		WARN_ON_ONCE(tsk == current);
205 		return -EINVAL;
206 	case FRAME_META_TYPE_PT_REGS:
207 		return kunwind_next_regs_pc(state);
208 	default:
209 		WARN_ON_ONCE(tsk == current);
210 		return -EINVAL;
211 	}
212 }
213 
214 static __always_inline int
kunwind_next_frame_record(struct kunwind_state * state)215 kunwind_next_frame_record(struct kunwind_state *state)
216 {
217 	unsigned long fp = state->common.fp;
218 	struct frame_record *record;
219 	struct stack_info *info;
220 	unsigned long new_fp, new_pc;
221 
222 	if (fp & 0x7)
223 		return -EINVAL;
224 
225 	info = unwind_find_stack(&state->common, fp, sizeof(*record));
226 	if (!info)
227 		return -EINVAL;
228 
229 	record = (struct frame_record *)fp;
230 	new_fp = READ_ONCE(record->fp);
231 	new_pc = READ_ONCE(record->lr);
232 
233 	if (!new_fp && !new_pc)
234 		return kunwind_next_frame_record_meta(state);
235 
236 	unwind_consume_stack(&state->common, info, fp, sizeof(*record));
237 
238 	state->common.fp = new_fp;
239 	state->common.pc = new_pc;
240 	state->source = KUNWIND_SOURCE_FRAME;
241 
242 	return 0;
243 }
244 
245 /*
246  * Unwind from one frame record (A) to the next frame record (B).
247  *
248  * We terminate early if the location of B indicates a malformed chain of frame
249  * records (e.g. a cycle), determined based on the location and fp value of A
250  * and the location (but not the fp value) of B.
251  */
252 static __always_inline int
kunwind_next(struct kunwind_state * state)253 kunwind_next(struct kunwind_state *state)
254 {
255 	int err;
256 
257 	state->flags.all = 0;
258 
259 	switch (state->source) {
260 	case KUNWIND_SOURCE_FRAME:
261 	case KUNWIND_SOURCE_CALLER:
262 	case KUNWIND_SOURCE_TASK:
263 	case KUNWIND_SOURCE_REGS_PC:
264 		err = kunwind_next_frame_record(state);
265 		break;
266 	default:
267 		err = -EINVAL;
268 	}
269 
270 	if (err)
271 		return err;
272 
273 	state->common.pc = ptrauth_strip_kernel_insn_pac(state->common.pc);
274 
275 	return kunwind_recover_return_address(state);
276 }
277 
278 typedef bool (*kunwind_consume_fn)(const struct kunwind_state *state, void *cookie);
279 
280 static __always_inline void
do_kunwind(struct kunwind_state * state,kunwind_consume_fn consume_state,void * cookie)281 do_kunwind(struct kunwind_state *state, kunwind_consume_fn consume_state,
282 	   void *cookie)
283 {
284 	if (kunwind_recover_return_address(state))
285 		return;
286 
287 	while (1) {
288 		int ret;
289 
290 		if (!consume_state(state, cookie))
291 			break;
292 		ret = kunwind_next(state);
293 		if (ret < 0)
294 			break;
295 	}
296 }
297 
298 /*
299  * Per-cpu stacks are only accessible when unwinding the current task in a
300  * non-preemptible context.
301  */
302 #define STACKINFO_CPU(name)					\
303 	({							\
304 		((task == current) && !preemptible())		\
305 			? stackinfo_get_##name()		\
306 			: stackinfo_get_unknown();		\
307 	})
308 
309 /*
310  * SDEI stacks are only accessible when unwinding the current task in an NMI
311  * context.
312  */
313 #define STACKINFO_SDEI(name)					\
314 	({							\
315 		((task == current) && in_nmi())			\
316 			? stackinfo_get_sdei_##name()		\
317 			: stackinfo_get_unknown();		\
318 	})
319 
320 #define STACKINFO_EFI						\
321 	({							\
322 		((task == current) && current_in_efi())		\
323 			? stackinfo_get_efi()			\
324 			: stackinfo_get_unknown();		\
325 	})
326 
327 static __always_inline void
kunwind_stack_walk(kunwind_consume_fn consume_state,void * cookie,struct task_struct * task,struct pt_regs * regs)328 kunwind_stack_walk(kunwind_consume_fn consume_state,
329 		   void *cookie, struct task_struct *task,
330 		   struct pt_regs *regs)
331 {
332 	struct stack_info stacks[] = {
333 		stackinfo_get_task(task),
334 		STACKINFO_CPU(irq),
335 #if defined(CONFIG_VMAP_STACK)
336 		STACKINFO_CPU(overflow),
337 #endif
338 #if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
339 		STACKINFO_SDEI(normal),
340 		STACKINFO_SDEI(critical),
341 #endif
342 #ifdef CONFIG_EFI
343 		STACKINFO_EFI,
344 #endif
345 	};
346 	struct kunwind_state state = {
347 		.common = {
348 			.stacks = stacks,
349 			.nr_stacks = ARRAY_SIZE(stacks),
350 		},
351 	};
352 
353 	if (regs) {
354 		if (task != current)
355 			return;
356 		kunwind_init_from_regs(&state, regs);
357 	} else if (task == current) {
358 		kunwind_init_from_caller(&state);
359 	} else {
360 		kunwind_init_from_task(&state, task);
361 	}
362 
363 	do_kunwind(&state, consume_state, cookie);
364 }
365 
366 struct kunwind_consume_entry_data {
367 	stack_trace_consume_fn consume_entry;
368 	void *cookie;
369 };
370 
371 static __always_inline bool
arch_kunwind_consume_entry(const struct kunwind_state * state,void * cookie)372 arch_kunwind_consume_entry(const struct kunwind_state *state, void *cookie)
373 {
374 	struct kunwind_consume_entry_data *data = cookie;
375 	return data->consume_entry(data->cookie, state->common.pc);
376 }
377 
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)378 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
379 			      void *cookie, struct task_struct *task,
380 			      struct pt_regs *regs)
381 {
382 	struct kunwind_consume_entry_data data = {
383 		.consume_entry = consume_entry,
384 		.cookie = cookie,
385 	};
386 
387 	kunwind_stack_walk(arch_kunwind_consume_entry, &data, task, regs);
388 }
389 
390 struct bpf_unwind_consume_entry_data {
391 	bool (*consume_entry)(void *cookie, u64 ip, u64 sp, u64 fp);
392 	void *cookie;
393 };
394 
395 static bool
arch_bpf_unwind_consume_entry(const struct kunwind_state * state,void * cookie)396 arch_bpf_unwind_consume_entry(const struct kunwind_state *state, void *cookie)
397 {
398 	struct bpf_unwind_consume_entry_data *data = cookie;
399 
400 	return data->consume_entry(data->cookie, state->common.pc, 0,
401 				   state->common.fp);
402 }
403 
arch_bpf_stack_walk(bool (* consume_entry)(void * cookie,u64 ip,u64 sp,u64 fp),void * cookie)404 noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u64 ip, u64 sp,
405 								u64 fp), void *cookie)
406 {
407 	struct bpf_unwind_consume_entry_data data = {
408 		.consume_entry = consume_entry,
409 		.cookie = cookie,
410 	};
411 
412 	kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
413 }
414 
state_source_string(const struct kunwind_state * state)415 static const char *state_source_string(const struct kunwind_state *state)
416 {
417 	switch (state->source) {
418 	case KUNWIND_SOURCE_FRAME:	return NULL;
419 	case KUNWIND_SOURCE_CALLER:	return "C";
420 	case KUNWIND_SOURCE_TASK:	return "T";
421 	case KUNWIND_SOURCE_REGS_PC:	return "P";
422 	default:			return "U";
423 	}
424 }
425 
dump_backtrace_entry(const struct kunwind_state * state,void * arg)426 static bool dump_backtrace_entry(const struct kunwind_state *state, void *arg)
427 {
428 	const char *source = state_source_string(state);
429 	union unwind_flags flags = state->flags;
430 	bool has_info = source || flags.all;
431 	char *loglvl = arg;
432 
433 	printk("%s %pSb%s%s%s%s%s\n", loglvl,
434 		(void *)state->common.pc,
435 		has_info ? " (" : "",
436 		source ? source : "",
437 		flags.fgraph ? "F" : "",
438 		flags.kretprobe ? "K" : "",
439 		has_info ? ")" : "");
440 
441 	return true;
442 }
443 
dump_backtrace(struct pt_regs * regs,struct task_struct * tsk,const char * loglvl)444 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
445 		    const char *loglvl)
446 {
447 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
448 
449 	if (regs && user_mode(regs))
450 		return;
451 
452 	if (!tsk)
453 		tsk = current;
454 
455 	if (!try_get_task_stack(tsk))
456 		return;
457 
458 	printk("%sCall trace:\n", loglvl);
459 	kunwind_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
460 
461 	put_task_stack(tsk);
462 }
463 
show_stack(struct task_struct * tsk,unsigned long * sp,const char * loglvl)464 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
465 {
466 	dump_backtrace(NULL, tsk, loglvl);
467 	barrier();
468 }
469 
470 /*
471  * The struct defined for userspace stack frame in AARCH64 mode.
472  */
473 struct frame_tail {
474 	struct frame_tail	__user *fp;
475 	unsigned long		lr;
476 } __attribute__((packed));
477 
478 /*
479  * Get the return address for a single stackframe and return a pointer to the
480  * next frame tail.
481  */
482 static struct frame_tail __user *
unwind_user_frame(struct frame_tail __user * tail,void * cookie,stack_trace_consume_fn consume_entry)483 unwind_user_frame(struct frame_tail __user *tail, void *cookie,
484 	       stack_trace_consume_fn consume_entry)
485 {
486 	struct frame_tail buftail;
487 	unsigned long err;
488 	unsigned long lr;
489 
490 	/* Also check accessibility of one struct frame_tail beyond */
491 	if (!access_ok(tail, sizeof(buftail)))
492 		return NULL;
493 
494 	pagefault_disable();
495 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
496 	pagefault_enable();
497 
498 	if (err)
499 		return NULL;
500 
501 	lr = ptrauth_strip_user_insn_pac(buftail.lr);
502 
503 	if (!consume_entry(cookie, lr))
504 		return NULL;
505 
506 	/*
507 	 * Frame pointers should strictly progress back up the stack
508 	 * (towards higher addresses).
509 	 */
510 	if (tail >= buftail.fp)
511 		return NULL;
512 
513 	return buftail.fp;
514 }
515 
516 #ifdef CONFIG_COMPAT
517 /*
518  * The registers we're interested in are at the end of the variable
519  * length saved register structure. The fp points at the end of this
520  * structure so the address of this struct is:
521  * (struct compat_frame_tail *)(xxx->fp)-1
522  *
523  * This code has been adapted from the ARM OProfile support.
524  */
525 struct compat_frame_tail {
526 	compat_uptr_t	fp; /* a (struct compat_frame_tail *) in compat mode */
527 	u32		sp;
528 	u32		lr;
529 } __attribute__((packed));
530 
531 static struct compat_frame_tail __user *
unwind_compat_user_frame(struct compat_frame_tail __user * tail,void * cookie,stack_trace_consume_fn consume_entry)532 unwind_compat_user_frame(struct compat_frame_tail __user *tail, void *cookie,
533 				stack_trace_consume_fn consume_entry)
534 {
535 	struct compat_frame_tail buftail;
536 	unsigned long err;
537 
538 	/* Also check accessibility of one struct frame_tail beyond */
539 	if (!access_ok(tail, sizeof(buftail)))
540 		return NULL;
541 
542 	pagefault_disable();
543 	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
544 	pagefault_enable();
545 
546 	if (err)
547 		return NULL;
548 
549 	if (!consume_entry(cookie, buftail.lr))
550 		return NULL;
551 
552 	/*
553 	 * Frame pointers should strictly progress back up the stack
554 	 * (towards higher addresses).
555 	 */
556 	if (tail + 1 >= (struct compat_frame_tail __user *)
557 			compat_ptr(buftail.fp))
558 		return NULL;
559 
560 	return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
561 }
562 #endif /* CONFIG_COMPAT */
563 
564 
arch_stack_walk_user(stack_trace_consume_fn consume_entry,void * cookie,const struct pt_regs * regs)565 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
566 					const struct pt_regs *regs)
567 {
568 	if (!consume_entry(cookie, regs->pc))
569 		return;
570 
571 	if (!compat_user_mode(regs)) {
572 		/* AARCH64 mode */
573 		struct frame_tail __user *tail;
574 
575 		tail = (struct frame_tail __user *)regs->regs[29];
576 		while (tail && !((unsigned long)tail & 0x7))
577 			tail = unwind_user_frame(tail, cookie, consume_entry);
578 	} else {
579 #ifdef CONFIG_COMPAT
580 		/* AARCH32 compat mode */
581 		struct compat_frame_tail __user *tail;
582 
583 		tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
584 		while (tail && !((unsigned long)tail & 0x3))
585 			tail = unwind_compat_user_frame(tail, cookie, consume_entry);
586 #endif
587 	}
588 }
589