xref: /linux/arch/arm64/kernel/stacktrace.c (revision cfeafd94668910334a77c9437a18212baf9f5610)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Stack tracing support
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/ftrace.h>
10 #include <linux/kprobes.h>
11 #include <linux/sched.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/stacktrace.h>
15 
16 #include <asm/irq.h>
17 #include <asm/pointer_auth.h>
18 #include <asm/stack_pointer.h>
19 #include <asm/stacktrace.h>
20 
21 /*
22  * A snapshot of a frame record or fp/lr register values, along with some
23  * accounting information necessary for robust unwinding.
24  *
25  * @fp:          The fp value in the frame record (or the real fp)
26  * @pc:          The lr value in the frame record (or the real lr)
27  *
28  * @stacks_done: Stacks which have been entirely unwound, for which it is no
29  *               longer valid to unwind to.
30  *
31  * @prev_fp:     The fp that pointed to this frame record, or a synthetic value
32  *               of 0. This is used to ensure that within a stack, each
33  *               subsequent frame record is at an increasing address.
34  * @prev_type:   The type of stack this frame record was on, or a synthetic
35  *               value of STACK_TYPE_UNKNOWN. This is used to detect a
36  *               transition from one stack to another.
37  *
38  * @kr_cur:      When KRETPROBES is selected, holds the kretprobe instance
39  *               associated with the most recently encountered replacement lr
40  *               value.
41  *
42  * @task:        The task being unwound.
43  */
44 struct unwind_state {
45 	unsigned long fp;
46 	unsigned long pc;
47 	DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
48 	unsigned long prev_fp;
49 	enum stack_type prev_type;
50 #ifdef CONFIG_KRETPROBES
51 	struct llist_node *kr_cur;
52 #endif
53 	struct task_struct *task;
54 };
55 
56 static void unwind_init_common(struct unwind_state *state,
57 			       struct task_struct *task)
58 {
59 	state->task = task;
60 #ifdef CONFIG_KRETPROBES
61 	state->kr_cur = NULL;
62 #endif
63 
64 	/*
65 	 * Prime the first unwind.
66 	 *
67 	 * In unwind_next() we'll check that the FP points to a valid stack,
68 	 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
69 	 * treated as a transition to whichever stack that happens to be. The
70 	 * prev_fp value won't be used, but we set it to 0 such that it is
71 	 * definitely not an accessible stack address.
72 	 */
73 	bitmap_zero(state->stacks_done, __NR_STACK_TYPES);
74 	state->prev_fp = 0;
75 	state->prev_type = STACK_TYPE_UNKNOWN;
76 }
77 
78 /*
79  * Start an unwind from a pt_regs.
80  *
81  * The unwind will begin at the PC within the regs.
82  *
83  * The regs must be on a stack currently owned by the calling task.
84  */
85 static inline void unwind_init_from_regs(struct unwind_state *state,
86 					 struct pt_regs *regs)
87 {
88 	unwind_init_common(state, current);
89 
90 	state->fp = regs->regs[29];
91 	state->pc = regs->pc;
92 }
93 
94 /*
95  * Start an unwind from a caller.
96  *
97  * The unwind will begin at the caller of whichever function this is inlined
98  * into.
99  *
100  * The function which invokes this must be noinline.
101  */
102 static __always_inline void unwind_init_from_caller(struct unwind_state *state)
103 {
104 	unwind_init_common(state, current);
105 
106 	state->fp = (unsigned long)__builtin_frame_address(1);
107 	state->pc = (unsigned long)__builtin_return_address(0);
108 }
109 
110 /*
111  * Start an unwind from a blocked task.
112  *
113  * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
114  * cpu_switch_to()).
115  *
116  * The caller should ensure the task is blocked in cpu_switch_to() for the
117  * duration of the unwind, or the unwind will be bogus. It is never valid to
118  * call this for the current task.
119  */
120 static inline void unwind_init_from_task(struct unwind_state *state,
121 					 struct task_struct *task)
122 {
123 	unwind_init_common(state, task);
124 
125 	state->fp = thread_saved_fp(task);
126 	state->pc = thread_saved_pc(task);
127 }
128 
129 /*
130  * Unwind from one frame record (A) to the next frame record (B).
131  *
132  * We terminate early if the location of B indicates a malformed chain of frame
133  * records (e.g. a cycle), determined based on the location and fp value of A
134  * and the location (but not the fp value) of B.
135  */
136 static int notrace unwind_next(struct unwind_state *state)
137 {
138 	struct task_struct *tsk = state->task;
139 	unsigned long fp = state->fp;
140 	struct stack_info info;
141 
142 	/* Final frame; nothing to unwind */
143 	if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
144 		return -ENOENT;
145 
146 	if (fp & 0x7)
147 		return -EINVAL;
148 
149 	if (!on_accessible_stack(tsk, fp, 16, &info))
150 		return -EINVAL;
151 
152 	if (test_bit(info.type, state->stacks_done))
153 		return -EINVAL;
154 
155 	/*
156 	 * As stacks grow downward, any valid record on the same stack must be
157 	 * at a strictly higher address than the prior record.
158 	 *
159 	 * Stacks can nest in several valid orders, e.g.
160 	 *
161 	 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
162 	 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
163 	 *
164 	 * ... but the nesting itself is strict. Once we transition from one
165 	 * stack to another, it's never valid to unwind back to that first
166 	 * stack.
167 	 */
168 	if (info.type == state->prev_type) {
169 		if (fp <= state->prev_fp)
170 			return -EINVAL;
171 	} else {
172 		__set_bit(state->prev_type, state->stacks_done);
173 	}
174 
175 	/*
176 	 * Record this frame record's values and location. The prev_fp and
177 	 * prev_type are only meaningful to the next unwind_next() invocation.
178 	 */
179 	state->fp = READ_ONCE(*(unsigned long *)(fp));
180 	state->pc = READ_ONCE(*(unsigned long *)(fp + 8));
181 	state->prev_fp = fp;
182 	state->prev_type = info.type;
183 
184 	state->pc = ptrauth_strip_insn_pac(state->pc);
185 
186 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
187 	if (tsk->ret_stack &&
188 		(state->pc == (unsigned long)return_to_handler)) {
189 		unsigned long orig_pc;
190 		/*
191 		 * This is a case where function graph tracer has
192 		 * modified a return address (LR) in a stack frame
193 		 * to hook a function return.
194 		 * So replace it to an original value.
195 		 */
196 		orig_pc = ftrace_graph_ret_addr(tsk, NULL, state->pc,
197 						(void *)state->fp);
198 		if (WARN_ON_ONCE(state->pc == orig_pc))
199 			return -EINVAL;
200 		state->pc = orig_pc;
201 	}
202 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
203 #ifdef CONFIG_KRETPROBES
204 	if (is_kretprobe_trampoline(state->pc))
205 		state->pc = kretprobe_find_ret_addr(tsk, (void *)state->fp, &state->kr_cur);
206 #endif
207 
208 	return 0;
209 }
210 NOKPROBE_SYMBOL(unwind_next);
211 
212 static void notrace unwind(struct unwind_state *state,
213 			   stack_trace_consume_fn consume_entry, void *cookie)
214 {
215 	while (1) {
216 		int ret;
217 
218 		if (!consume_entry(cookie, state->pc))
219 			break;
220 		ret = unwind_next(state);
221 		if (ret < 0)
222 			break;
223 	}
224 }
225 NOKPROBE_SYMBOL(unwind);
226 
227 static bool dump_backtrace_entry(void *arg, unsigned long where)
228 {
229 	char *loglvl = arg;
230 	printk("%s %pSb\n", loglvl, (void *)where);
231 	return true;
232 }
233 
234 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
235 		    const char *loglvl)
236 {
237 	pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
238 
239 	if (regs && user_mode(regs))
240 		return;
241 
242 	if (!tsk)
243 		tsk = current;
244 
245 	if (!try_get_task_stack(tsk))
246 		return;
247 
248 	printk("%sCall trace:\n", loglvl);
249 	arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
250 
251 	put_task_stack(tsk);
252 }
253 
254 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
255 {
256 	dump_backtrace(NULL, tsk, loglvl);
257 	barrier();
258 }
259 
260 noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
261 			      void *cookie, struct task_struct *task,
262 			      struct pt_regs *regs)
263 {
264 	struct unwind_state state;
265 
266 	if (regs) {
267 		if (task != current)
268 			return;
269 		unwind_init_from_regs(&state, regs);
270 	} else if (task == current) {
271 		unwind_init_from_caller(&state);
272 	} else {
273 		unwind_init_from_task(&state, task);
274 	}
275 
276 	unwind(&state, consume_entry, cookie);
277 }
278