xref: /linux/arch/riscv/kernel/stacktrace.c (revision 225a97d6d45456a7627633da09cb842a43ef1b85)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008 ARM Limited
4  * Copyright (C) 2014 Regents of the University of California
5  */
6 
7 #include <linux/export.h>
8 #include <linux/kallsyms.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/stacktrace.h>
13 #include <linux/ftrace.h>
14 
15 #include <asm/stacktrace.h>
16 
17 #ifdef CONFIG_FRAME_POINTER
18 
19 /*
20  * This disables KASAN checking when reading a value from another task's stack,
21  * since the other task could be running on another CPU and could have poisoned
22  * the stack in the meantime.
23  */
24 #define READ_ONCE_TASK_STACK(task, x)			\
25 ({							\
26 	unsigned long val;				\
27 	unsigned long addr = x;				\
28 	if ((task) == current)				\
29 		val = READ_ONCE(addr);			\
30 	else						\
31 		val = READ_ONCE_NOCHECK(addr);		\
32 	val;						\
33 })
34 
35 extern asmlinkage void handle_exception(void);
36 extern unsigned long ret_from_exception_end;
37 
fp_is_valid(unsigned long fp,unsigned long sp)38 static inline int fp_is_valid(unsigned long fp, unsigned long sp)
39 {
40 	unsigned long low, high;
41 
42 	low = sp + sizeof(struct stackframe);
43 	high = ALIGN(sp, THREAD_SIZE);
44 
45 	return !(fp < low || fp > high || fp & 0x07);
46 }
47 
walk_stackframe(struct task_struct * task,struct pt_regs * regs,bool (* fn)(void *,unsigned long),void * arg)48 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
49 			     bool (*fn)(void *, unsigned long), void *arg)
50 {
51 	unsigned long fp, sp, pc;
52 	int graph_idx = 0;
53 	int level = 0;
54 
55 	if (regs) {
56 		fp = frame_pointer(regs);
57 		sp = user_stack_pointer(regs);
58 		pc = instruction_pointer(regs);
59 	} else if (task == NULL || task == current) {
60 		fp = (unsigned long)__builtin_frame_address(0);
61 		sp = current_stack_pointer;
62 		pc = (unsigned long)walk_stackframe;
63 		level = -1;
64 	} else {
65 		/* task blocked in __switch_to */
66 		fp = task->thread.s[0];
67 		sp = task->thread.sp;
68 		pc = task->thread.ra;
69 	}
70 
71 	for (;;) {
72 		struct stackframe *frame;
73 
74 		if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
75 			break;
76 
77 		if (unlikely(!fp_is_valid(fp, sp)))
78 			break;
79 
80 		/* Unwind stack frame */
81 		frame = (struct stackframe *)fp - 1;
82 		sp = fp;
83 		if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
84 			/* We hit function where ra is not saved on the stack */
85 			fp = frame->ra;
86 			pc = regs->ra;
87 		} else {
88 			fp = READ_ONCE_TASK_STACK(task, frame->fp);
89 			pc = READ_ONCE_TASK_STACK(task, frame->ra);
90 			pc = ftrace_graph_ret_addr(current, &graph_idx, pc,
91 						   &frame->ra);
92 			if (pc >= (unsigned long)handle_exception &&
93 			    pc < (unsigned long)&ret_from_exception_end) {
94 				if (unlikely(!fn(arg, pc)))
95 					break;
96 
97 				pc = ((struct pt_regs *)sp)->epc;
98 				fp = ((struct pt_regs *)sp)->s0;
99 			}
100 		}
101 
102 	}
103 }
104 
105 #else /* !CONFIG_FRAME_POINTER */
106 
walk_stackframe(struct task_struct * task,struct pt_regs * regs,bool (* fn)(void *,unsigned long),void * arg)107 void notrace walk_stackframe(struct task_struct *task,
108 	struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
109 {
110 	unsigned long sp, pc;
111 	unsigned long *ksp;
112 
113 	if (regs) {
114 		sp = user_stack_pointer(regs);
115 		pc = instruction_pointer(regs);
116 	} else if (task == NULL || task == current) {
117 		sp = current_stack_pointer;
118 		pc = (unsigned long)walk_stackframe;
119 	} else {
120 		/* task blocked in __switch_to */
121 		sp = task->thread.sp;
122 		pc = task->thread.ra;
123 	}
124 
125 	if (unlikely(sp & 0x7))
126 		return;
127 
128 	ksp = (unsigned long *)sp;
129 	while (!kstack_end(ksp)) {
130 		if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
131 			break;
132 		pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
133 	}
134 }
135 
136 #endif /* CONFIG_FRAME_POINTER */
137 
print_trace_address(void * arg,unsigned long pc)138 static bool print_trace_address(void *arg, unsigned long pc)
139 {
140 	const char *loglvl = arg;
141 
142 	print_ip_sym(loglvl, pc);
143 	return true;
144 }
145 
dump_backtrace(struct pt_regs * regs,struct task_struct * task,const char * loglvl)146 noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
147 		    const char *loglvl)
148 {
149 	walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
150 }
151 
show_stack(struct task_struct * task,unsigned long * sp,const char * loglvl)152 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
153 {
154 	pr_cont("%sCall Trace:\n", loglvl);
155 	dump_backtrace(NULL, task, loglvl);
156 }
157 
save_wchan(void * arg,unsigned long pc)158 static bool save_wchan(void *arg, unsigned long pc)
159 {
160 	if (!in_sched_functions(pc)) {
161 		unsigned long *p = arg;
162 		*p = pc;
163 		return false;
164 	}
165 	return true;
166 }
167 
__get_wchan(struct task_struct * task)168 unsigned long __get_wchan(struct task_struct *task)
169 {
170 	unsigned long pc = 0;
171 
172 	if (!try_get_task_stack(task))
173 		return 0;
174 	walk_stackframe(task, NULL, save_wchan, &pc);
175 	put_task_stack(task);
176 	return pc;
177 }
178 
arch_stack_walk(stack_trace_consume_fn consume_entry,void * cookie,struct task_struct * task,struct pt_regs * regs)179 noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
180 		     struct task_struct *task, struct pt_regs *regs)
181 {
182 	walk_stackframe(task, regs, consume_entry, cookie);
183 }
184 
185 /*
186  * Get the return address for a single stackframe and return a pointer to the
187  * next frame tail.
188  */
unwind_user_frame(stack_trace_consume_fn consume_entry,void * cookie,unsigned long fp,unsigned long reg_ra)189 static unsigned long unwind_user_frame(stack_trace_consume_fn consume_entry,
190 				       void *cookie, unsigned long fp,
191 				       unsigned long reg_ra)
192 {
193 	struct stackframe buftail;
194 	unsigned long ra = 0;
195 	unsigned long __user *user_frame_tail =
196 		(unsigned long __user *)(fp - sizeof(struct stackframe));
197 
198 	/* Check accessibility of one struct frame_tail beyond */
199 	if (!access_ok(user_frame_tail, sizeof(buftail)))
200 		return 0;
201 	if (__copy_from_user_inatomic(&buftail, user_frame_tail,
202 				      sizeof(buftail)))
203 		return 0;
204 
205 	ra = reg_ra ? : buftail.ra;
206 
207 	fp = buftail.fp;
208 	if (!ra || !consume_entry(cookie, ra))
209 		return 0;
210 
211 	return fp;
212 }
213 
arch_stack_walk_user(stack_trace_consume_fn consume_entry,void * cookie,const struct pt_regs * regs)214 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
215 			  const struct pt_regs *regs)
216 {
217 	unsigned long fp = 0;
218 
219 	fp = regs->s0;
220 	if (!consume_entry(cookie, regs->epc))
221 		return;
222 
223 	fp = unwind_user_frame(consume_entry, cookie, fp, regs->ra);
224 	while (fp && !(fp & 0x7))
225 		fp = unwind_user_frame(consume_entry, cookie, fp, 0);
226 }
227