1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Kernel unwinding support 4 * 5 * (c) 2002-2004 Randolph Chung <tausq@debian.org> 6 * 7 * Derived partially from the IA64 implementation. The PA-RISC 8 * Runtime Architecture Document is also a useful reference to 9 * understand what is happening here 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/init.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/sort.h> 17 18 #include <linux/uaccess.h> 19 #include <asm/assembly.h> 20 #include <asm/asm-offsets.h> 21 #include <asm/ptrace.h> 22 23 #include <asm/unwind.h> 24 25 /* #define DEBUG 1 */ 26 #ifdef DEBUG 27 #define dbg(x...) pr_debug(x) 28 #else 29 #define dbg(x...) 30 #endif 31 32 #define KERNEL_START (KERNEL_BINARY_TEXT_START) 33 34 extern struct unwind_table_entry __start___unwind[]; 35 extern struct unwind_table_entry __stop___unwind[]; 36 37 static DEFINE_SPINLOCK(unwind_lock); 38 /* 39 * the kernel unwind block is not dynamically allocated so that 40 * we can call unwind_init as early in the bootup process as 41 * possible (before the slab allocator is initialized) 42 */ 43 static struct unwind_table kernel_unwind_table __read_mostly; 44 static LIST_HEAD(unwind_tables); 45 46 static inline const struct unwind_table_entry * 47 find_unwind_entry_in_table(const struct unwind_table *table, unsigned long addr) 48 { 49 const struct unwind_table_entry *e = NULL; 50 unsigned long lo, hi, mid; 51 52 lo = 0; 53 hi = table->length - 1; 54 55 while (lo <= hi) { 56 mid = (hi - lo) / 2 + lo; 57 e = &table->table[mid]; 58 if (addr < e->region_start) 59 hi = mid - 1; 60 else if (addr > e->region_end) 61 lo = mid + 1; 62 else 63 return e; 64 } 65 66 return NULL; 67 } 68 69 static const struct unwind_table_entry * 70 find_unwind_entry(unsigned long addr) 71 { 72 struct unwind_table *table; 73 const struct unwind_table_entry *e = NULL; 74 75 if (addr >= kernel_unwind_table.start && 76 addr <= kernel_unwind_table.end) 77 e = find_unwind_entry_in_table(&kernel_unwind_table, addr); 78 else { 79 unsigned long flags; 80 81 spin_lock_irqsave(&unwind_lock, flags); 82 list_for_each_entry(table, &unwind_tables, list) { 83 if (addr >= table->start && 84 addr <= table->end) 85 e = find_unwind_entry_in_table(table, addr); 86 if (e) { 87 /* Move-to-front to exploit common traces */ 88 list_move(&table->list, &unwind_tables); 89 break; 90 } 91 } 92 spin_unlock_irqrestore(&unwind_lock, flags); 93 } 94 95 return e; 96 } 97 98 static void 99 unwind_table_init(struct unwind_table *table, const char *name, 100 unsigned long base_addr, unsigned long gp, 101 void *table_start, void *table_end) 102 { 103 struct unwind_table_entry *start = table_start; 104 struct unwind_table_entry *end = 105 (struct unwind_table_entry *)table_end - 1; 106 107 table->name = name; 108 table->base_addr = base_addr; 109 table->gp = gp; 110 table->start = base_addr + start->region_start; 111 table->end = base_addr + end->region_end; 112 table->table = (struct unwind_table_entry *)table_start; 113 table->length = end - start + 1; 114 INIT_LIST_HEAD(&table->list); 115 116 for (; start <= end; start++) { 117 if (start < end && 118 start->region_end > (start+1)->region_start) { 119 pr_warn("Out of order unwind entry! %px and %px\n", 120 start, start+1); 121 } 122 123 start->region_start += base_addr; 124 start->region_end += base_addr; 125 } 126 } 127 128 static int cmp_unwind_table_entry(const void *a, const void *b) 129 { 130 return ((const struct unwind_table_entry *)a)->region_start 131 - ((const struct unwind_table_entry *)b)->region_start; 132 } 133 134 static void 135 unwind_table_sort(struct unwind_table_entry *start, 136 struct unwind_table_entry *finish) 137 { 138 sort(start, finish - start, sizeof(struct unwind_table_entry), 139 cmp_unwind_table_entry, NULL); 140 } 141 142 struct unwind_table * 143 unwind_table_add(const char *name, unsigned long base_addr, 144 unsigned long gp, 145 void *start, void *end) 146 { 147 struct unwind_table *table; 148 unsigned long flags; 149 struct unwind_table_entry *s = (struct unwind_table_entry *)start; 150 struct unwind_table_entry *e = (struct unwind_table_entry *)end; 151 152 unwind_table_sort(s, e); 153 154 table = kmalloc(sizeof(struct unwind_table), GFP_USER); 155 if (table == NULL) 156 return NULL; 157 unwind_table_init(table, name, base_addr, gp, start, end); 158 spin_lock_irqsave(&unwind_lock, flags); 159 list_add_tail(&table->list, &unwind_tables); 160 spin_unlock_irqrestore(&unwind_lock, flags); 161 162 return table; 163 } 164 165 void unwind_table_remove(struct unwind_table *table) 166 { 167 unsigned long flags; 168 169 spin_lock_irqsave(&unwind_lock, flags); 170 list_del(&table->list); 171 spin_unlock_irqrestore(&unwind_lock, flags); 172 173 kfree(table); 174 } 175 176 /* Called from setup_arch to import the kernel unwind info */ 177 int __init unwind_init(void) 178 { 179 long start, stop; 180 register unsigned long gp __asm__ ("r27"); 181 182 start = (long)&__start___unwind[0]; 183 stop = (long)&__stop___unwind[0]; 184 185 dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 186 start, stop, 187 (stop - start) / sizeof(struct unwind_table_entry)); 188 189 unwind_table_init(&kernel_unwind_table, "kernel", KERNEL_START, 190 gp, 191 &__start___unwind[0], &__stop___unwind[0]); 192 #if 0 193 { 194 int i; 195 for (i = 0; i < 10; i++) 196 { 197 printk("region 0x%x-0x%x\n", 198 __start___unwind[i].region_start, 199 __start___unwind[i].region_end); 200 } 201 } 202 #endif 203 return 0; 204 } 205 206 static int unwind_special(struct unwind_frame_info *info, unsigned long pc, int frame_size) 207 { 208 /* 209 * We have to use void * instead of a function pointer, because 210 * function pointers aren't a pointer to the function on 64-bit. 211 * Make them const so the compiler knows they live in .text 212 */ 213 extern void * const handle_interruption; 214 extern void * const ret_from_kernel_thread; 215 extern void * const syscall_exit; 216 extern void * const intr_return; 217 extern void * const _switch_to_ret; 218 #ifdef CONFIG_IRQSTACKS 219 extern void * const call_on_stack; 220 #endif /* CONFIG_IRQSTACKS */ 221 222 if (pc == (unsigned long) &handle_interruption) { 223 struct pt_regs *regs = (struct pt_regs *)(info->sp - frame_size - PT_SZ_ALGN); 224 dbg("Unwinding through handle_interruption()\n"); 225 info->prev_sp = regs->gr[30]; 226 info->prev_ip = regs->iaoq[0]; 227 return 1; 228 } 229 230 if (pc == (unsigned long) &ret_from_kernel_thread || 231 pc == (unsigned long) &syscall_exit) { 232 info->prev_sp = info->prev_ip = 0; 233 return 1; 234 } 235 236 if (pc == (unsigned long) &intr_return) { 237 struct pt_regs *regs; 238 239 dbg("Found intr_return()\n"); 240 regs = (struct pt_regs *)(info->sp - PT_SZ_ALGN); 241 info->prev_sp = regs->gr[30]; 242 info->prev_ip = regs->iaoq[0]; 243 info->rp = regs->gr[2]; 244 return 1; 245 } 246 247 if (pc == (unsigned long) &_switch_to_ret) { 248 info->prev_sp = info->sp - CALLEE_SAVE_FRAME_SIZE; 249 info->prev_ip = *(unsigned long *)(info->prev_sp - RP_OFFSET); 250 return 1; 251 } 252 253 #ifdef CONFIG_IRQSTACKS 254 if (pc == (unsigned long) &call_on_stack) { 255 info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ); 256 info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET); 257 return 1; 258 } 259 #endif 260 261 return 0; 262 } 263 264 static void unwind_frame_regs(struct unwind_frame_info *info) 265 { 266 const struct unwind_table_entry *e; 267 unsigned long npc; 268 unsigned int insn; 269 long frame_size = 0; 270 int looking_for_rp, rpoffset = 0; 271 272 e = find_unwind_entry(info->ip); 273 if (e == NULL) { 274 unsigned long sp; 275 276 dbg("Cannot find unwind entry for %pS; forced unwinding\n", 277 (void *) info->ip); 278 279 /* Since we are doing the unwinding blind, we don't know if 280 we are adjusting the stack correctly or extracting the rp 281 correctly. The rp is checked to see if it belongs to the 282 kernel text section, if not we assume we don't have a 283 correct stack frame and we continue to unwind the stack. 284 This is not quite correct, and will fail for loadable 285 modules. */ 286 sp = info->sp & ~63; 287 do { 288 unsigned long tmp; 289 290 info->prev_sp = sp - 64; 291 info->prev_ip = 0; 292 293 /* The stack is at the end inside the thread_union 294 * struct. If we reach data, we have reached the 295 * beginning of the stack and should stop unwinding. */ 296 if (info->prev_sp >= (unsigned long) task_thread_info(info->t) && 297 info->prev_sp < ((unsigned long) task_thread_info(info->t) 298 + THREAD_SZ_ALGN)) { 299 info->prev_sp = 0; 300 break; 301 } 302 303 if (get_user(tmp, (unsigned long *)(info->prev_sp - RP_OFFSET))) 304 break; 305 info->prev_ip = tmp; 306 sp = info->prev_sp; 307 } while (!kernel_text_address(info->prev_ip)); 308 309 info->rp = 0; 310 311 dbg("analyzing func @ %lx with no unwind info, setting " 312 "prev_sp=%lx prev_ip=%lx\n", info->ip, 313 info->prev_sp, info->prev_ip); 314 } else { 315 dbg("e->start = 0x%x, e->end = 0x%x, Save_SP = %d, " 316 "Save_RP = %d, Millicode = %d size = %u\n", 317 e->region_start, e->region_end, e->Save_SP, e->Save_RP, 318 e->Millicode, e->Total_frame_size); 319 320 looking_for_rp = e->Save_RP; 321 322 for (npc = e->region_start; 323 (frame_size < (e->Total_frame_size << 3) || 324 looking_for_rp) && 325 npc < info->ip; 326 npc += 4) { 327 328 insn = *(unsigned int *)npc; 329 330 if ((insn & 0xffffc001) == 0x37de0000 || 331 (insn & 0xffe00001) == 0x6fc00000) { 332 /* ldo X(sp), sp, or stwm X,D(sp) */ 333 frame_size += (insn & 0x3fff) >> 1; 334 dbg("analyzing func @ %lx, insn=%08x @ " 335 "%lx, frame_size = %ld\n", info->ip, 336 insn, npc, frame_size); 337 } else if ((insn & 0xffe00009) == 0x73c00008) { 338 /* std,ma X,D(sp) */ 339 frame_size += ((insn >> 4) & 0x3ff) << 3; 340 dbg("analyzing func @ %lx, insn=%08x @ " 341 "%lx, frame_size = %ld\n", info->ip, 342 insn, npc, frame_size); 343 } else if (insn == 0x6bc23fd9) { 344 /* stw rp,-20(sp) */ 345 rpoffset = 20; 346 looking_for_rp = 0; 347 dbg("analyzing func @ %lx, insn=stw rp," 348 "-20(sp) @ %lx\n", info->ip, npc); 349 } else if (insn == 0x0fc212c1) { 350 /* std rp,-16(sr0,sp) */ 351 rpoffset = 16; 352 looking_for_rp = 0; 353 dbg("analyzing func @ %lx, insn=std rp," 354 "-16(sp) @ %lx\n", info->ip, npc); 355 } 356 } 357 358 if (frame_size > e->Total_frame_size << 3) 359 frame_size = e->Total_frame_size << 3; 360 361 if (!unwind_special(info, e->region_start, frame_size)) { 362 info->prev_sp = info->sp - frame_size; 363 if (e->Millicode) 364 info->rp = info->r31; 365 else if (rpoffset) 366 info->rp = *(unsigned long *)(info->prev_sp - rpoffset); 367 info->prev_ip = info->rp; 368 info->rp = 0; 369 } 370 371 dbg("analyzing func @ %lx, setting prev_sp=%lx " 372 "prev_ip=%lx npc=%lx\n", info->ip, info->prev_sp, 373 info->prev_ip, npc); 374 } 375 } 376 377 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 378 struct pt_regs *regs) 379 { 380 memset(info, 0, sizeof(struct unwind_frame_info)); 381 info->t = t; 382 info->sp = regs->gr[30]; 383 info->ip = regs->iaoq[0]; 384 info->rp = regs->gr[2]; 385 info->r31 = regs->gr[31]; 386 387 dbg("(%d) Start unwind from sp=%08lx ip=%08lx\n", 388 t ? (int)t->pid : -1, info->sp, info->ip); 389 } 390 391 void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t) 392 { 393 struct pt_regs *r = &t->thread.regs; 394 struct pt_regs *r2; 395 396 r2 = kmalloc(sizeof(struct pt_regs), GFP_ATOMIC); 397 if (!r2) 398 return; 399 *r2 = *r; 400 r2->gr[30] = r->ksp; 401 r2->iaoq[0] = r->kpc; 402 unwind_frame_init(info, t, r2); 403 kfree(r2); 404 } 405 406 void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs) 407 { 408 unwind_frame_init(info, current, regs); 409 } 410 411 int unwind_once(struct unwind_frame_info *next_frame) 412 { 413 unwind_frame_regs(next_frame); 414 415 if (next_frame->prev_sp == 0 || 416 next_frame->prev_ip == 0) 417 return -1; 418 419 next_frame->sp = next_frame->prev_sp; 420 next_frame->ip = next_frame->prev_ip; 421 next_frame->prev_sp = 0; 422 next_frame->prev_ip = 0; 423 424 dbg("(%d) Continue unwind to sp=%08lx ip=%08lx\n", 425 next_frame->t ? (int)next_frame->t->pid : -1, 426 next_frame->sp, next_frame->ip); 427 428 return 0; 429 } 430 431 int unwind_to_user(struct unwind_frame_info *info) 432 { 433 int ret; 434 435 do { 436 ret = unwind_once(info); 437 } while (!ret && !(info->ip & 3)); 438 439 return ret; 440 } 441 442 unsigned long return_address(unsigned int level) 443 { 444 struct unwind_frame_info info; 445 struct pt_regs r; 446 unsigned long sp; 447 448 /* initialize unwind info */ 449 asm volatile ("copy %%r30, %0" : "=r"(sp)); 450 memset(&r, 0, sizeof(struct pt_regs)); 451 r.iaoq[0] = _THIS_IP_; 452 r.gr[2] = _RET_IP_; 453 r.gr[30] = sp; 454 unwind_frame_init(&info, current, &r); 455 456 /* unwind stack */ 457 ++level; 458 do { 459 if (unwind_once(&info) < 0 || info.ip == 0) 460 return 0; 461 if (!kernel_text_address(info.ip)) 462 return 0; 463 } while (info.ip && level--); 464 465 return info.ip; 466 } 467