xref: /linux/arch/riscv/kernel/stacktrace.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008 ARM Limited
4  * Copyright (C) 2014 Regents of the University of California
5  */
6 
7 #include <linux/export.h>
8 #include <linux/kallsyms.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/stacktrace.h>
13 #include <linux/ftrace.h>
14 
15 #include <asm/stacktrace.h>
16 
17 #ifdef CONFIG_FRAME_POINTER
18 
19 extern asmlinkage void handle_exception(void);
20 
21 static inline int fp_is_valid(unsigned long fp, unsigned long sp)
22 {
23 	unsigned long low, high;
24 
25 	low = sp + sizeof(struct stackframe);
26 	high = ALIGN(sp, THREAD_SIZE);
27 
28 	return !(fp < low || fp > high || fp & 0x07);
29 }
30 
31 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
32 			     bool (*fn)(void *, unsigned long), void *arg)
33 {
34 	unsigned long fp, sp, pc;
35 	int graph_idx = 0;
36 	int level = 0;
37 
38 	if (regs) {
39 		fp = frame_pointer(regs);
40 		sp = user_stack_pointer(regs);
41 		pc = instruction_pointer(regs);
42 	} else if (task == NULL || task == current) {
43 		fp = (unsigned long)__builtin_frame_address(0);
44 		sp = current_stack_pointer;
45 		pc = (unsigned long)walk_stackframe;
46 		level = -1;
47 	} else {
48 		/* task blocked in __switch_to */
49 		fp = task->thread.s[0];
50 		sp = task->thread.sp;
51 		pc = task->thread.ra;
52 	}
53 
54 	for (;;) {
55 		struct stackframe *frame;
56 
57 		if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
58 			break;
59 
60 		if (unlikely(!fp_is_valid(fp, sp)))
61 			break;
62 
63 		/* Unwind stack frame */
64 		frame = (struct stackframe *)fp - 1;
65 		sp = fp;
66 		if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
67 			/* We hit function where ra is not saved on the stack */
68 			fp = frame->ra;
69 			pc = regs->ra;
70 		} else {
71 			fp = frame->fp;
72 			pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
73 						   &frame->ra);
74 			if (pc == (unsigned long)handle_exception) {
75 				if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
76 					break;
77 
78 				pc = ((struct pt_regs *)sp)->epc;
79 				fp = ((struct pt_regs *)sp)->s0;
80 			}
81 		}
82 
83 	}
84 }
85 
86 #else /* !CONFIG_FRAME_POINTER */
87 
88 void notrace walk_stackframe(struct task_struct *task,
89 	struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
90 {
91 	unsigned long sp, pc;
92 	unsigned long *ksp;
93 
94 	if (regs) {
95 		sp = user_stack_pointer(regs);
96 		pc = instruction_pointer(regs);
97 	} else if (task == NULL || task == current) {
98 		sp = current_stack_pointer;
99 		pc = (unsigned long)walk_stackframe;
100 	} else {
101 		/* task blocked in __switch_to */
102 		sp = task->thread.sp;
103 		pc = task->thread.ra;
104 	}
105 
106 	if (unlikely(sp & 0x7))
107 		return;
108 
109 	ksp = (unsigned long *)sp;
110 	while (!kstack_end(ksp)) {
111 		if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
112 			break;
113 		pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
114 	}
115 }
116 
117 #endif /* CONFIG_FRAME_POINTER */
118 
119 static bool print_trace_address(void *arg, unsigned long pc)
120 {
121 	const char *loglvl = arg;
122 
123 	print_ip_sym(loglvl, pc);
124 	return true;
125 }
126 
127 noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
128 		    const char *loglvl)
129 {
130 	walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
131 }
132 
133 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
134 {
135 	pr_cont("%sCall Trace:\n", loglvl);
136 	dump_backtrace(NULL, task, loglvl);
137 }
138 
139 static bool save_wchan(void *arg, unsigned long pc)
140 {
141 	if (!in_sched_functions(pc)) {
142 		unsigned long *p = arg;
143 		*p = pc;
144 		return false;
145 	}
146 	return true;
147 }
148 
149 unsigned long __get_wchan(struct task_struct *task)
150 {
151 	unsigned long pc = 0;
152 
153 	if (!try_get_task_stack(task))
154 		return 0;
155 	walk_stackframe(task, NULL, save_wchan, &pc);
156 	put_task_stack(task);
157 	return pc;
158 }
159 
160 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
161 		     struct task_struct *task, struct pt_regs *regs)
162 {
163 	walk_stackframe(task, regs, consume_entry, cookie);
164 }
165 
166 /*
167  * Get the return address for a single stackframe and return a pointer to the
168  * next frame tail.
169  */
170 static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
171 				       void *cookie, unsigned long fp,
172 				       unsigned long reg_ra)
173 {
174 	struct stackframe buftail;
175 	unsigned long ra = 0;
176 	unsigned long __user *user_frame_tail =
177 		(unsigned long __user *)(fp - sizeof(struct stackframe));
178 
179 	/* Check accessibility of one struct frame_tail beyond */
180 	if (!access_ok(user_frame_tail, sizeof(buftail)))
181 		return 0;
182 	if (__copy_from_user_inatomic(&buftail, user_frame_tail,
183 				      sizeof(buftail)))
184 		return 0;
185 
186 	ra = reg_ra ? : buftail.ra;
187 
188 	fp = buftail.fp;
189 	if (!ra || !consume_entry(cookie, ra))
190 		return 0;
191 
192 	return fp;
193 }
194 
195 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
196 			  const struct pt_regs *regs)
197 {
198 	unsigned long fp = 0;
199 
200 	fp = regs->s0;
201 	if (!consume_entry(cookie, regs->epc))
202 		return;
203 
204 	fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
205 	while (fp && !(fp & 0x7))
206 		fp = unwind_user_frame(consume_entry, cookie, fp, 0);
207 }
208