xref: /linux/arch/parisc/kernel/unwind.c (revision 38a2c275c3d3f7d7180d012386cd6fcf87854400)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Kernel unwinding support
4  *
5  * (c) 2002-2004 Randolph Chung <tausq@debian.org>
6  *
7  * Derived partially from the IA64 implementation. The PA-RISC
8  * Runtime Architecture Document is also a useful reference to
9  * understand what is happening here
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
17 #include <linux/sched/task_stack.h>
18 
19 #include <linux/uaccess.h>
20 #include <asm/assembly.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/ptrace.h>
23 
24 #include <asm/unwind.h>
25 #include <asm/switch_to.h>
26 #include <asm/sections.h>
27 #include <asm/ftrace.h>
28 
29 /* #define DEBUG 1 */
30 #ifdef DEBUG
31 #define dbg(x...) pr_debug(x)
32 #else
33 #define dbg(x...) do { } while (0)
34 #endif
35 
36 #define KERNEL_START (KERNEL_BINARY_TEXT_START)
37 
38 #define ALIGNMENT_OK(ptr, type) (((ptr) & (sizeof(type) - 1)) == 0)
39 
40 extern struct unwind_table_entry __start___unwind[];
41 extern struct unwind_table_entry __stop___unwind[];
42 
43 static DEFINE_SPINLOCK(unwind_lock);
44 /*
45  * the kernel unwind block is not dynamically allocated so that
46  * we can call unwind_init as early in the bootup process as
47  * possible (before the slab allocator is initialized)
48  */
49 static struct unwind_table kernel_unwind_table __ro_after_init;
50 static LIST_HEAD(unwind_tables);
51 
52 static inline const struct unwind_table_entry *
find_unwind_entry_in_table(const struct unwind_table * table,unsigned long addr)53 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
54 {
55 	const struct unwind_table_entry *e = NULL;
56 	unsigned long lo, hi, mid;
57 
58 	lo = 0;
59 	hi = table->length - 1;
60 
61 	while (lo <= hi) {
62 		mid = (hi - lo) / 2 + lo;
63 		e = &table->table[mid];
64 		if (addr < e->region_start)
65 			hi = mid - 1;
66 		else if (addr > e->region_end)
67 			lo = mid + 1;
68 		else
69 			return e;
70 	}
71 
72 	return NULL;
73 }
74 
75 static const struct unwind_table_entry *
find_unwind_entry(unsigned long addr)76 find_unwind_entry(unsigned long addr)
77 {
78 	struct unwind_table *table;
79 	const struct unwind_table_entry *e = NULL;
80 
81 	if (addr >= kernel_unwind_table.start &&
82 	    addr <= kernel_unwind_table.end)
83 		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
84 	else {
85 		unsigned long flags;
86 
87 		spin_lock_irqsave(&unwind_lock, flags);
88 		list_for_each_entry(table, &unwind_tables, list) {
89 			if (addr >= table->start &&
90 			    addr <= table->end)
91 				e = find_unwind_entry_in_table(table, addr);
92 			if (e) {
93 				/* Move-to-front to exploit common traces */
94 				list_move(&table->list, &unwind_tables);
95 				break;
96 			}
97 		}
98 		spin_unlock_irqrestore(&unwind_lock, flags);
99 	}
100 
101 	return e;
102 }
103 
104 static void
unwind_table_init(struct unwind_table * table,const char * name,unsigned long base_addr,unsigned long gp,void * table_start,void * table_end)105 unwind_table_init(struct unwind_table *table, const char *name,
106 		  unsigned long base_addr, unsigned long gp,
107 		  void *table_start, void *table_end)
108 {
109 	struct unwind_table_entry *start = table_start;
110 	struct unwind_table_entry *end =
111 		(struct unwind_table_entry *)table_end - 1;
112 
113 	table->name = name;
114 	table->base_addr = base_addr;
115 	table->gp = gp;
116 	table->start = base_addr + start->region_start;
117 	table->end = base_addr + end->region_end;
118 	table->table = (struct unwind_table_entry *)table_start;
119 	table->length = end - start + 1;
120 	INIT_LIST_HEAD(&table->list);
121 
122 	for (; start <= end; start++) {
123 		if (start < end &&
124 		    start->region_end > (start+1)->region_start) {
125 			pr_warn("Out of order unwind entry! %px and %px\n",
126 				start, start+1);
127 		}
128 
129 		start->region_start += base_addr;
130 		start->region_end += base_addr;
131 	}
132 }
133 
cmp_unwind_table_entry(const void * a,const void * b)134 static int cmp_unwind_table_entry(const void *a, const void *b)
135 {
136 	return ((const struct unwind_table_entry *)a)->region_start
137 	     - ((const struct unwind_table_entry *)b)->region_start;
138 }
139 
140 static void
unwind_table_sort(struct unwind_table_entry * start,struct unwind_table_entry * finish)141 unwind_table_sort(struct unwind_table_entry *start,
142 		  struct unwind_table_entry *finish)
143 {
144 	sort(start, finish - start, sizeof(struct unwind_table_entry),
145 	     cmp_unwind_table_entry, NULL);
146 }
147 
148 struct unwind_table *
unwind_table_add(const char * name,unsigned long base_addr,unsigned long gp,void * start,void * end)149 unwind_table_add(const char *name, unsigned long base_addr,
150 		 unsigned long gp,
151                  void *start, void *end)
152 {
153 	struct unwind_table *table;
154 	unsigned long flags;
155 	struct unwind_table_entry *s = (struct unwind_table_entry *)start;
156 	struct unwind_table_entry *e = (struct unwind_table_entry *)end;
157 
158 	unwind_table_sort(s, e);
159 
160 	table = kmalloc(sizeof(struct unwind_table), GFP_USER);
161 	if (table == NULL)
162 		return NULL;
163 	unwind_table_init(table, name, base_addr, gp, start, end);
164 	spin_lock_irqsave(&unwind_lock, flags);
165 	list_add_tail(&table->list, &unwind_tables);
166 	spin_unlock_irqrestore(&unwind_lock, flags);
167 
168 	return table;
169 }
170 
unwind_table_remove(struct unwind_table * table)171 void unwind_table_remove(struct unwind_table *table)
172 {
173 	unsigned long flags;
174 
175 	spin_lock_irqsave(&unwind_lock, flags);
176 	list_del(&table->list);
177 	spin_unlock_irqrestore(&unwind_lock, flags);
178 
179 	kfree(table);
180 }
181 
182 /* Called from setup_arch to import the kernel unwind info */
unwind_init(void)183 int __init unwind_init(void)
184 {
185 	long start __maybe_unused, stop __maybe_unused;
186 	register unsigned long gp __asm__ ("r27");
187 
188 	start = (long)&__start___unwind[0];
189 	stop = (long)&__stop___unwind[0];
190 
191 	dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
192 	    start, stop,
193 	    (stop - start) / sizeof(struct unwind_table_entry));
194 
195 	unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
196 			  gp,
197 			  &__start___unwind[0], &__stop___unwind[0]);
198 #if 0
199 	{
200 		int i;
201 		for (i = 0; i < 10; i++)
202 		{
203 			printk("region 0x%x-0x%x\n",
204 				__start___unwind[i].region_start,
205 				__start___unwind[i].region_end);
206 		}
207 	}
208 #endif
209 	return 0;
210 }
211 
pc_is_kernel_fn(unsigned long pc,void * fn)212 static bool pc_is_kernel_fn(unsigned long pc, void *fn)
213 {
214 	return (unsigned long)dereference_kernel_function_descriptor(fn) == pc;
215 }
216 
unwind_special(struct unwind_frame_info * info,unsigned long pc,int frame_size)217 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size)
218 {
219 	/*
220 	 * We have to use void * instead of a function pointer, because
221 	 * function pointers aren't a pointer to the function on 64-bit.
222 	 * Make them const so the compiler knows they live in .text
223 	 * Note: We could use dereference_kernel_function_descriptor()
224 	 * instead but we want to keep it simple here.
225 	 */
226 	extern void * const ret_from_kernel_thread;
227 	extern void * const syscall_exit;
228 	extern void * const intr_return;
229 	extern void * const _switch_to_ret;
230 #ifdef CONFIG_IRQSTACKS
231 	extern void * const _call_on_stack;
232 #endif /* CONFIG_IRQSTACKS */
233 
234 	if (pc_is_kernel_fn(pc, handle_interruption)) {
235 		struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN);
236 		dbg("Unwinding through handle_interruption()\n");
237 		info->prev_sp = regs->gr[30];
238 		info->prev_ip = regs->iaoq[0];
239 		return 1;
240 	}
241 
242 	if (pc == (unsigned long)&ret_from_kernel_thread ||
243 	    pc == (unsigned long)&syscall_exit) {
244 		info->prev_sp = info->prev_ip = 0;
245 		return 1;
246 	}
247 
248 	if (pc == (unsigned long)&intr_return) {
249 		struct pt_regs *regs;
250 
251 		dbg("Found intr_return()\n");
252 		regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN);
253 		info->prev_sp = regs->gr[30];
254 		info->prev_ip = regs->iaoq[0];
255 		info->rp = regs->gr[2];
256 		return 1;
257 	}
258 
259 	if (pc_is_kernel_fn(pc, _switch_to) ||
260 	    pc == (unsigned long)&_switch_to_ret) {
261 		info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
262 		if (ALIGNMENT_OK(info->prev_sp, long))
263 			info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
264 		else
265 			info->prev_ip = info->prev_sp = 0;
266 		return 1;
267 	}
268 
269 #ifdef CONFIG_IRQSTACKS
270 	if (pc == (unsigned long)&_call_on_stack && ALIGNMENT_OK(info->sp, long)) {
271 		info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ);
272 		info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET);
273 		return 1;
274 	}
275 #endif
276 	return 0;
277 }
278 
unwind_frame_regs(struct unwind_frame_info * info)279 static void unwind_frame_regs(struct unwind_frame_info *info)
280 {
281 	const struct unwind_table_entry *e;
282 	unsigned long npc;
283 	unsigned int insn;
284 	long frame_size = 0;
285 	int looking_for_rp, rpoffset = 0;
286 
287 	e = find_unwind_entry(info->ip);
288 	if (e == NULL) {
289 		unsigned long sp;
290 
291 		dbg("Cannot find unwind entry for %pS; forced unwinding\n",
292 			(void *) info->ip);
293 
294 		/* Since we are doing the unwinding blind, we don't know if
295 		   we are adjusting the stack correctly or extracting the rp
296 		   correctly. The rp is checked to see if it belongs to the
297 		   kernel text section, if not we assume we don't have a
298 		   correct stack frame and we continue to unwind the stack.
299 		   This is not quite correct, and will fail for loadable
300 		   modules. */
301 		sp = info->sp & ~63;
302 		do {
303 			unsigned long tmp;
304 
305 			info->prev_sp = sp - 64;
306 			info->prev_ip = 0;
307 
308 			/* Check if stack is inside kernel stack area */
309 			if ((info->prev_sp - (unsigned long) task_stack_page(info->t))
310 					>= THREAD_SIZE) {
311 				info->prev_sp = 0;
312 				break;
313 			}
314 
315 			if (copy_from_kernel_nofault(&tmp,
316 			    (void *)info->prev_sp - RP_OFFSET, sizeof(tmp)))
317 				break;
318 			info->prev_ip = tmp;
319 			sp = info->prev_sp;
320 		} while (!kernel_text_address(info->prev_ip));
321 
322 		info->rp = 0;
323 
324 		dbg("analyzing func @ %lx with no unwind info, setting "
325 		    "prev_sp=%lx prev_ip=%lx\n", info->ip,
326 		    info->prev_sp, info->prev_ip);
327 	} else {
328 		dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
329 		    "Save_RP = %d, Millicode = %d size = %u\n",
330 		    e->region_start, e->region_end, e->Save_SP, e->Save_RP,
331 		    e->Millicode, e->Total_frame_size);
332 
333 		looking_for_rp = e->Save_RP;
334 
335 		for (npc = e->region_start;
336 		     (frame_size < (e->Total_frame_size << 3) ||
337 		      looking_for_rp) &&
338 		     npc < info->ip;
339 		     npc += 4) {
340 
341 			insn = *(unsigned int *)npc;
342 
343 			if ((insn & 0xffffc001) == 0x37de0000 ||
344 			    (insn & 0xffe00001) == 0x6fc00000) {
345 				/* ldo X(sp), sp, or stwm X,D(sp) */
346 				frame_size += (insn & 0x3fff) >> 1;
347 				dbg("analyzing func @ %lx, insn=%08x @ "
348 				    "%lx, frame_size = %ld\n", info->ip,
349 				    insn, npc, frame_size);
350 			} else if ((insn & 0xffe00009) == 0x73c00008) {
351 				/* std,ma X,D(sp) */
352 				frame_size += ((insn >> 4) & 0x3ff) << 3;
353 				dbg("analyzing func @ %lx, insn=%08x @ "
354 				    "%lx, frame_size = %ld\n", info->ip,
355 				    insn, npc, frame_size);
356 			} else if (insn == 0x6bc23fd9) {
357 				/* stw rp,-20(sp) */
358 				rpoffset = 20;
359 				looking_for_rp = 0;
360 				dbg("analyzing func @ %lx, insn=stw rp,"
361 				    "-20(sp) @ %lx\n", info->ip, npc);
362 			} else if (insn == 0x0fc212c1) {
363 				/* std rp,-16(sr0,sp) */
364 				rpoffset = 16;
365 				looking_for_rp = 0;
366 				dbg("analyzing func @ %lx, insn=std rp,"
367 				    "-16(sp) @ %lx\n", info->ip, npc);
368 			}
369 		}
370 
371 		if (frame_size > e->Total_frame_size << 3)
372 			frame_size = e->Total_frame_size << 3;
373 
374 		if (!unwind_special(info, e->region_start, frame_size)) {
375 			info->prev_sp = info->sp - frame_size;
376 			if (e->Millicode)
377 				info->rp = info->r31;
378 			else if (rpoffset && ALIGNMENT_OK(info->prev_sp, long))
379 				info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
380 			else
381 				info->rp = 0;
382 			info->prev_ip = info->rp;
383 			info->rp = 0;
384 		}
385 
386 		dbg("analyzing func @ %lx, setting prev_sp=%lx "
387 		    "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
388 		    info->prev_ip, npc);
389 	}
390 }
391 
unwind_frame_init(struct unwind_frame_info * info,struct task_struct * t,struct pt_regs * regs)392 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
393 		       struct pt_regs *regs)
394 {
395 	memset(info, 0, sizeof(struct unwind_frame_info));
396 	info->t = t;
397 	info->sp = regs->gr[30];
398 	info->ip = regs->iaoq[0];
399 	info->rp = regs->gr[2];
400 	info->r31 = regs->gr[31];
401 
402 	dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
403 	    t ? (int)t->pid : -1, info->sp, info->ip);
404 }
405 
unwind_frame_init_from_blocked_task(struct unwind_frame_info * info,struct task_struct * t)406 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
407 {
408 	struct pt_regs *r = &t->thread.regs;
409 	struct pt_regs *r2;
410 
411 	r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC);
412 	if (!r2)
413 		return;
414 	*r2 = *r;
415 	r2->gr[30] = r->ksp;
416 	r2->iaoq[0] = r->kpc;
417 	unwind_frame_init(info, t, r2);
418 	kfree(r2);
419 }
420 
421 #define get_parisc_stackpointer() ({ \
422 	unsigned long sp; \
423 	__asm__("copy %%r30, %0" : "=r"(sp)); \
424 	(sp); \
425 })
426 
unwind_frame_init_task(struct unwind_frame_info * info,struct task_struct * task,struct pt_regs * regs)427 void unwind_frame_init_task(struct unwind_frame_info *info,
428 	struct task_struct *task, struct pt_regs *regs)
429 {
430 	task = task ? task : current;
431 
432 	if (task == current) {
433 		struct pt_regs r;
434 
435 		if (!regs) {
436 			memset(&r, 0, sizeof(r));
437 			r.iaoq[0] =  _THIS_IP_;
438 			r.gr[2] = _RET_IP_;
439 			r.gr[30] = get_parisc_stackpointer();
440 			regs = &r;
441 		}
442 		unwind_frame_init(info, task, regs);
443 	} else {
444 		unwind_frame_init_from_blocked_task(info, task);
445 	}
446 }
447 
unwind_once(struct unwind_frame_info * next_frame)448 int unwind_once(struct unwind_frame_info *next_frame)
449 {
450 	unwind_frame_regs(next_frame);
451 
452 	if (next_frame->prev_sp == 0 ||
453 	    next_frame->prev_ip == 0)
454 		return -1;
455 
456 	next_frame->sp = next_frame->prev_sp;
457 	next_frame->ip = next_frame->prev_ip;
458 	next_frame->prev_sp = 0;
459 	next_frame->prev_ip = 0;
460 
461 	dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
462 	    next_frame->t ? (int)next_frame->t->pid : -1,
463 	    next_frame->sp, next_frame->ip);
464 
465 	return 0;
466 }
467 
unwind_to_user(struct unwind_frame_info * info)468 int unwind_to_user(struct unwind_frame_info *info)
469 {
470 	int ret;
471 
472 	do {
473 		ret = unwind_once(info);
474 	} while (!ret && !(info->ip & 3));
475 
476 	return ret;
477 }
478 
return_address(unsigned int level)479 unsigned long return_address(unsigned int level)
480 {
481 	struct unwind_frame_info info;
482 
483 	/* initialize unwind info */
484 	unwind_frame_init_task(&info, current, NULL);
485 
486 	/* unwind stack */
487 	level += 2;
488 	do {
489 		if (unwind_once(&info) < 0 || info.ip == 0)
490 			return 0;
491 		if (!kernel_text_address(info.ip))
492 			return 0;
493 	} while (info.ip && level--);
494 
495 	return info.ip;
496 }
497