xref: /linux/arch/riscv/kernel/stacktrace.c (revision 52a5a22d8afe3bd195f7b470c7535c63717f5ff7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008 ARM Limited
4  * Copyright (C) 2014 Regents of the University of California
5  */
6 
7 #include <linux/export.h>
8 #include <linux/kallsyms.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/stacktrace.h>
13 #include <linux/ftrace.h>
14 
15 #include <asm/stacktrace.h>
16 
17 #ifdef CONFIG_FRAME_POINTER
18 
19 extern asmlinkage void handle_exception(void);
20 extern unsigned long ret_from_exception_end;
21 
22 static inline int fp_is_valid(unsigned long fp, unsigned long sp)
23 {
24 	unsigned long low, high;
25 
26 	low = sp + sizeof(struct stackframe);
27 	high = ALIGN(sp, THREAD_SIZE);
28 
29 	return !(fp < low || fp > high || fp & 0x07);
30 }
31 
32 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
33 			     bool (*fn)(void *, unsigned long), void *arg)
34 {
35 	unsigned long fp, sp, pc;
36 	int graph_idx = 0;
37 	int level = 0;
38 
39 	if (regs) {
40 		fp = frame_pointer(regs);
41 		sp = user_stack_pointer(regs);
42 		pc = instruction_pointer(regs);
43 	} else if (task == NULL || task == current) {
44 		fp = (unsigned long)__builtin_frame_address(0);
45 		sp = current_stack_pointer;
46 		pc = (unsigned long)walk_stackframe;
47 		level = -1;
48 	} else {
49 		/* task blocked in __switch_to */
50 		fp = task->thread.s[0];
51 		sp = task->thread.sp;
52 		pc = task->thread.ra;
53 	}
54 
55 	for (;;) {
56 		struct stackframe *frame;
57 
58 		if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
59 			break;
60 
61 		if (unlikely(!fp_is_valid(fp, sp)))
62 			break;
63 
64 		/* Unwind stack frame */
65 		frame = (struct stackframe *)fp - 1;
66 		sp = fp;
67 		if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
68 			/* We hit function where ra is not saved on the stack */
69 			fp = frame->ra;
70 			pc = regs->ra;
71 		} else {
72 			fp = frame->fp;
73 			pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
74 						   &frame->ra);
75 			if (pc >= (unsigned long)handle_exception &&
76 			    pc < (unsigned long)&ret_from_exception_end) {
77 				if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
78 					break;
79 
80 				pc = ((struct pt_regs *)sp)->epc;
81 				fp = ((struct pt_regs *)sp)->s0;
82 			}
83 		}
84 
85 	}
86 }
87 
88 #else /* !CONFIG_FRAME_POINTER */
89 
90 void notrace walk_stackframe(struct task_struct *task,
91 	struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
92 {
93 	unsigned long sp, pc;
94 	unsigned long *ksp;
95 
96 	if (regs) {
97 		sp = user_stack_pointer(regs);
98 		pc = instruction_pointer(regs);
99 	} else if (task == NULL || task == current) {
100 		sp = current_stack_pointer;
101 		pc = (unsigned long)walk_stackframe;
102 	} else {
103 		/* task blocked in __switch_to */
104 		sp = task->thread.sp;
105 		pc = task->thread.ra;
106 	}
107 
108 	if (unlikely(sp & 0x7))
109 		return;
110 
111 	ksp = (unsigned long *)sp;
112 	while (!kstack_end(ksp)) {
113 		if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
114 			break;
115 		pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
116 	}
117 }
118 
119 #endif /* CONFIG_FRAME_POINTER */
120 
121 static bool print_trace_address(void *arg, unsigned long pc)
122 {
123 	const char *loglvl = arg;
124 
125 	print_ip_sym(loglvl, pc);
126 	return true;
127 }
128 
129 noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
130 		    const char *loglvl)
131 {
132 	walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
133 }
134 
135 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
136 {
137 	pr_cont("%sCall Trace:\n", loglvl);
138 	dump_backtrace(NULL, task, loglvl);
139 }
140 
141 static bool save_wchan(void *arg, unsigned long pc)
142 {
143 	if (!in_sched_functions(pc)) {
144 		unsigned long *p = arg;
145 		*p = pc;
146 		return false;
147 	}
148 	return true;
149 }
150 
151 unsigned long __get_wchan(struct task_struct *task)
152 {
153 	unsigned long pc = 0;
154 
155 	if (!try_get_task_stack(task))
156 		return 0;
157 	walk_stackframe(task, NULL, save_wchan, &pc);
158 	put_task_stack(task);
159 	return pc;
160 }
161 
162 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
163 		     struct task_struct *task, struct pt_regs *regs)
164 {
165 	walk_stackframe(task, regs, consume_entry, cookie);
166 }
167 
168 /*
169  * Get the return address for a single stackframe and return a pointer to the
170  * next frame tail.
171  */
172 static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
173 				       void *cookie, unsigned long fp,
174 				       unsigned long reg_ra)
175 {
176 	struct stackframe buftail;
177 	unsigned long ra = 0;
178 	unsigned long __user *user_frame_tail =
179 		(unsigned long __user *)(fp - sizeof(struct stackframe));
180 
181 	/* Check accessibility of one struct frame_tail beyond */
182 	if (!access_ok(user_frame_tail, sizeof(buftail)))
183 		return 0;
184 	if (__copy_from_user_inatomic(&buftail, user_frame_tail,
185 				      sizeof(buftail)))
186 		return 0;
187 
188 	ra = reg_ra ? : buftail.ra;
189 
190 	fp = buftail.fp;
191 	if (!ra || !consume_entry(cookie, ra))
192 		return 0;
193 
194 	return fp;
195 }
196 
197 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
198 			  const struct pt_regs *regs)
199 {
200 	unsigned long fp = 0;
201 
202 	fp = regs->s0;
203 	if (!consume_entry(cookie, regs->epc))
204 		return;
205 
206 	fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
207 	while (fp && !(fp & 0x7))
208 		fp = unwind_user_frame(consume_entry, cookie, fp, 0);
209 }
210