xref: /linux/arch/parisc/kernel/unwind.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * Kernel unwinding support
3  *
4  * (c) 2002-2004 Randolph Chung <tausq@debian.org>
5  *
6  * Derived partially from the IA64 implementation. The PA-RISC
7  * Runtime Architecture Document is also a useful reference to
8  * understand what is happening here
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <linux/kallsyms.h>
15 
16 #include <asm/uaccess.h>
17 #include <asm/assembly.h>
18 
19 #include <asm/unwind.h>
20 
21 /* #define DEBUG 1 */
22 #ifdef DEBUG
23 #define dbg(x...) printk(x)
24 #else
25 #define dbg(x...)
26 #endif
27 
28 extern struct unwind_table_entry __start___unwind[];
29 extern struct unwind_table_entry __stop___unwind[];
30 
31 static spinlock_t unwind_lock;
32 /*
33  * the kernel unwind block is not dynamically allocated so that
34  * we can call unwind_init as early in the bootup process as
35  * possible (before the slab allocator is initialized)
36  */
37 static struct unwind_table kernel_unwind_table __read_mostly;
38 static LIST_HEAD(unwind_tables);
39 
40 static inline const struct unwind_table_entry *
41 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr)
42 {
43 	const struct unwind_table_entry *e = NULL;
44 	unsigned long lo, hi, mid;
45 
46 	lo = 0;
47 	hi = table->length - 1;
48 
49 	while (lo <= hi) {
50 		mid = (hi - lo) / 2 + lo;
51 		e = &table->table[mid];
52 		if (addr < e->region_start)
53 			hi = mid - 1;
54 		else if (addr > e->region_end)
55 			lo = mid + 1;
56 		else
57 			return e;
58 	}
59 
60 	return NULL;
61 }
62 
63 static const struct unwind_table_entry *
64 find_unwind_entry(unsigned long addr)
65 {
66 	struct unwind_table *table;
67 	const struct unwind_table_entry *e = NULL;
68 
69 	if (addr >= kernel_unwind_table.start &&
70 	    addr <= kernel_unwind_table.end)
71 		e = find_unwind_entry_in_table(&kernel_unwind_table, addr);
72 	else
73 		list_for_each_entry(table, &unwind_tables, list) {
74 			if (addr >= table->start &&
75 			    addr <= table->end)
76 				e = find_unwind_entry_in_table(table, addr);
77 			if (e)
78 				break;
79 		}
80 
81 	return e;
82 }
83 
84 static void
85 unwind_table_init(struct unwind_table *table, const char *name,
86 		  unsigned long base_addr, unsigned long gp,
87 		  void *table_start, void *table_end)
88 {
89 	struct unwind_table_entry *start = table_start;
90 	struct unwind_table_entry *end =
91 		(struct unwind_table_entry *)table_end - 1;
92 
93 	table->name = name;
94 	table->base_addr = base_addr;
95 	table->gp = gp;
96 	table->start = base_addr + start->region_start;
97 	table->end = base_addr + end->region_end;
98 	table->table = (struct unwind_table_entry *)table_start;
99 	table->length = end - start + 1;
100 	INIT_LIST_HEAD(&table->list);
101 
102 	for (; start <= end; start++) {
103 		if (start < end &&
104 		    start->region_end > (start+1)->region_start) {
105 			printk("WARNING: Out of order unwind entry! %p and %p\n", start, start+1);
106 		}
107 
108 		start->region_start += base_addr;
109 		start->region_end += base_addr;
110 	}
111 }
112 
113 static void
114 unwind_table_sort(struct unwind_table_entry *start,
115 		  struct unwind_table_entry *finish)
116 {
117 	struct unwind_table_entry el, *p, *q;
118 
119 	for (p = start + 1; p < finish; ++p) {
120 		if (p[0].region_start < p[-1].region_start) {
121 			el = *p;
122 			q = p;
123 			do {
124 				q[0] = q[-1];
125 				--q;
126 			} while (q > start &&
127 				 el.region_start < q[-1].region_start);
128 			*q = el;
129 		}
130 	}
131 }
132 
133 struct unwind_table *
134 unwind_table_add(const char *name, unsigned long base_addr,
135 		 unsigned long gp,
136                  void *start, void *end)
137 {
138 	struct unwind_table *table;
139 	unsigned long flags;
140 	struct unwind_table_entry *s = (struct unwind_table_entry *)start;
141 	struct unwind_table_entry *e = (struct unwind_table_entry *)end;
142 
143 	unwind_table_sort(s, e);
144 
145 	table = kmalloc(sizeof(struct unwind_table), GFP_USER);
146 	if (table == NULL)
147 		return NULL;
148 	unwind_table_init(table, name, base_addr, gp, start, end);
149 	spin_lock_irqsave(&unwind_lock, flags);
150 	list_add_tail(&table->list, &unwind_tables);
151 	spin_unlock_irqrestore(&unwind_lock, flags);
152 
153 	return table;
154 }
155 
156 void unwind_table_remove(struct unwind_table *table)
157 {
158 	unsigned long flags;
159 
160 	spin_lock_irqsave(&unwind_lock, flags);
161 	list_del(&table->list);
162 	spin_unlock_irqrestore(&unwind_lock, flags);
163 
164 	kfree(table);
165 }
166 
167 /* Called from setup_arch to import the kernel unwind info */
168 static int unwind_init(void)
169 {
170 	long start, stop;
171 	register unsigned long gp __asm__ ("r27");
172 
173 	start = (long)&__start___unwind[0];
174 	stop = (long)&__stop___unwind[0];
175 
176 	spin_lock_init(&unwind_lock);
177 
178 	printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
179 	    start, stop,
180 	    (stop - start) / sizeof(struct unwind_table_entry));
181 
182 	unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START,
183 			  gp,
184 			  &__start___unwind[0], &__stop___unwind[0]);
185 #if 0
186 	{
187 		int i;
188 		for (i = 0; i < 10; i++)
189 		{
190 			printk("region 0x%x-0x%x\n",
191 				__start___unwind[i].region_start,
192 				__start___unwind[i].region_end);
193 		}
194 	}
195 #endif
196 	return 0;
197 }
198 
199 static void unwind_frame_regs(struct unwind_frame_info *info)
200 {
201 	const struct unwind_table_entry *e;
202 	unsigned long npc;
203 	unsigned int insn;
204 	long frame_size = 0;
205 	int looking_for_rp, rpoffset = 0;
206 
207 	e = find_unwind_entry(info->ip);
208 	if (e == NULL) {
209 		unsigned long sp;
210 		extern char _stext[], _etext[];
211 
212 		dbg("Cannot find unwind entry for 0x%lx; forced unwinding\n", info->ip);
213 
214 #ifdef CONFIG_KALLSYMS
215 		/* Handle some frequent special cases.... */
216 		{
217 			char symname[KSYM_NAME_LEN+1];
218 			char *modname;
219 			unsigned long symsize, offset;
220 
221 			kallsyms_lookup(info->ip, &symsize, &offset,
222 					&modname, symname);
223 
224 			dbg("info->ip = 0x%lx, name = %s\n", info->ip, symname);
225 
226 			if (strcmp(symname, "_switch_to_ret") == 0) {
227 				info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE;
228 				info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET);
229 				dbg("_switch_to_ret @ %lx - setting "
230 				    "prev_sp=%lx prev_ip=%lx\n",
231 				    info->ip, info->prev_sp,
232 				    info->prev_ip);
233 				return;
234 			} else if (strcmp(symname, "ret_from_kernel_thread") == 0 ||
235 				   strcmp(symname, "syscall_exit") == 0) {
236 				info->prev_ip = info->prev_sp = 0;
237 				return;
238 			}
239 		}
240 #endif
241 
242 		/* Since we are doing the unwinding blind, we don't know if
243 		   we are adjusting the stack correctly or extracting the rp
244 		   correctly. The rp is checked to see if it belongs to the
245 		   kernel text section, if not we assume we don't have a
246 		   correct stack frame and we continue to unwind the stack.
247 		   This is not quite correct, and will fail for loadable
248 		   modules. */
249 		sp = info->sp & ~63;
250 		do {
251 			unsigned long tmp;
252 
253 			info->prev_sp = sp - 64;
254 			info->prev_ip = 0;
255 			if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET)))
256 				break;
257 			info->prev_ip = tmp;
258 			sp = info->prev_sp;
259 		} while (info->prev_ip < (unsigned long)_stext ||
260 			 info->prev_ip > (unsigned long)_etext);
261 
262 		info->rp = 0;
263 
264 		dbg("analyzing func @ %lx with no unwind info, setting "
265 		    "prev_sp=%lx prev_ip=%lx\n", info->ip,
266 		    info->prev_sp, info->prev_ip);
267 	} else {
268 		dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, "
269 		    "Save_RP = %d, Millicode = %d size = %u\n",
270 		    e->region_start, e->region_end, e->Save_SP, e->Save_RP,
271 		    e->Millicode, e->Total_frame_size);
272 
273 		looking_for_rp = e->Save_RP;
274 
275 		for (npc = e->region_start;
276 		     (frame_size < (e->Total_frame_size << 3) ||
277 		      looking_for_rp) &&
278 		     npc < info->ip;
279 		     npc += 4) {
280 
281 			insn = *(unsigned int *)npc;
282 
283 			if ((insn & 0xffffc000) == 0x37de0000 ||
284 			    (insn & 0xffe00000) == 0x6fc00000) {
285 				/* ldo X(sp), sp, or stwm X,D(sp) */
286 				frame_size += (insn & 0x1 ? -1 << 13 : 0) |
287 					((insn & 0x3fff) >> 1);
288 				dbg("analyzing func @ %lx, insn=%08x @ "
289 				    "%lx, frame_size = %ld\n", info->ip,
290 				    insn, npc, frame_size);
291 			} else if ((insn & 0xffe00008) == 0x73c00008) {
292 				/* std,ma X,D(sp) */
293 				frame_size += (insn & 0x1 ? -1 << 13 : 0) |
294 					(((insn >> 4) & 0x3ff) << 3);
295 				dbg("analyzing func @ %lx, insn=%08x @ "
296 				    "%lx, frame_size = %ld\n", info->ip,
297 				    insn, npc, frame_size);
298 			} else if (insn == 0x6bc23fd9) {
299 				/* stw rp,-20(sp) */
300 				rpoffset = 20;
301 				looking_for_rp = 0;
302 				dbg("analyzing func @ %lx, insn=stw rp,"
303 				    "-20(sp) @ %lx\n", info->ip, npc);
304 			} else if (insn == 0x0fc212c1) {
305 				/* std rp,-16(sr0,sp) */
306 				rpoffset = 16;
307 				looking_for_rp = 0;
308 				dbg("analyzing func @ %lx, insn=std rp,"
309 				    "-16(sp) @ %lx\n", info->ip, npc);
310 			}
311 		}
312 
313 		info->prev_sp = info->sp - frame_size;
314 		if (e->Millicode)
315 			info->rp = info->r31;
316 		else if (rpoffset)
317 			info->rp = *(unsigned long *)(info->prev_sp - rpoffset);
318 		info->prev_ip = info->rp;
319 		info->rp = 0;
320 
321 		dbg("analyzing func @ %lx, setting prev_sp=%lx "
322 		    "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp,
323 		    info->prev_ip, npc);
324 	}
325 }
326 
327 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t,
328 		       struct pt_regs *regs)
329 {
330 	memset(info, 0, sizeof(struct unwind_frame_info));
331 	info->t = t;
332 	info->sp = regs->gr[30];
333 	info->ip = regs->iaoq[0];
334 	info->rp = regs->gr[2];
335 	info->r31 = regs->gr[31];
336 
337 	dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n",
338 	    t ? (int)t->pid : -1, info->sp, info->ip);
339 }
340 
341 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t)
342 {
343 	struct pt_regs *r = &t->thread.regs;
344 	struct pt_regs *r2;
345 
346 	r2 = (struct pt_regs *)kmalloc(sizeof(struct pt_regs), GFP_KERNEL);
347 	if (!r2)
348 		return;
349 	*r2 = *r;
350 	r2->gr[30] = r->ksp;
351 	r2->iaoq[0] = r->kpc;
352 	unwind_frame_init(info, t, r2);
353 	kfree(r2);
354 }
355 
356 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs)
357 {
358 	unwind_frame_init(info, current, regs);
359 }
360 
361 int unwind_once(struct unwind_frame_info *next_frame)
362 {
363 	unwind_frame_regs(next_frame);
364 
365 	if (next_frame->prev_sp == 0 ||
366 	    next_frame->prev_ip == 0)
367 		return -1;
368 
369 	next_frame->sp = next_frame->prev_sp;
370 	next_frame->ip = next_frame->prev_ip;
371 	next_frame->prev_sp = 0;
372 	next_frame->prev_ip = 0;
373 
374 	dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n",
375 	    next_frame->t ? (int)next_frame->t->pid : -1,
376 	    next_frame->sp, next_frame->ip);
377 
378 	return 0;
379 }
380 
381 int unwind_to_user(struct unwind_frame_info *info)
382 {
383 	int ret;
384 
385 	do {
386 		ret = unwind_once(info);
387 	} while (!ret && !(info->ip & 3));
388 
389 	return ret;
390 }
391 
392 module_init(unwind_init);
393