1 /* 2 * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org> 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * This is an implementation of a DWARF unwinder. Its main purpose is 9 * for generating stacktrace information. Based on the DWARF 3 10 * specification from http://www.dwarfstd.org. 11 * 12 * TODO: 13 * - DWARF64 doesn't work. 14 */ 15 16 /* #define DEBUG */ 17 #include <linux/kernel.h> 18 #include <linux/io.h> 19 #include <linux/list.h> 20 #include <linux/mm.h> 21 #include <asm/dwarf.h> 22 #include <asm/unwinder.h> 23 #include <asm/sections.h> 24 #include <asm/unaligned.h> 25 #include <asm/dwarf.h> 26 #include <asm/stacktrace.h> 27 28 static LIST_HEAD(dwarf_cie_list); 29 DEFINE_SPINLOCK(dwarf_cie_lock); 30 31 static LIST_HEAD(dwarf_fde_list); 32 DEFINE_SPINLOCK(dwarf_fde_lock); 33 34 static struct dwarf_cie *cached_cie; 35 36 /* 37 * Figure out whether we need to allocate some dwarf registers. If dwarf 38 * registers have already been allocated then we may need to realloc 39 * them. "reg" is a register number that we need to be able to access 40 * after this call. 41 * 42 * Register numbers start at zero, therefore we need to allocate space 43 * for "reg" + 1 registers. 44 */ 45 static void dwarf_frame_alloc_regs(struct dwarf_frame *frame, 46 unsigned int reg) 47 { 48 struct dwarf_reg *regs; 49 unsigned int num_regs = reg + 1; 50 size_t new_size; 51 size_t old_size; 52 53 new_size = num_regs * sizeof(*regs); 54 old_size = frame->num_regs * sizeof(*regs); 55 56 /* Fast path: don't allocate any regs if we've already got enough. */ 57 if (frame->num_regs >= num_regs) 58 return; 59 60 regs = kzalloc(new_size, GFP_ATOMIC); 61 if (!regs) { 62 printk(KERN_WARNING "Unable to allocate DWARF registers\n"); 63 /* 64 * Let's just bomb hard here, we have no way to 65 * gracefully recover. 66 */ 67 BUG(); 68 } 69 70 if (frame->regs) { 71 memcpy(regs, frame->regs, old_size); 72 kfree(frame->regs); 73 } 74 75 frame->regs = regs; 76 frame->num_regs = num_regs; 77 } 78 79 /** 80 * dwarf_read_addr - read dwarf data 81 * @src: source address of data 82 * @dst: destination address to store the data to 83 * 84 * Read 'n' bytes from @src, where 'n' is the size of an address on 85 * the native machine. We return the number of bytes read, which 86 * should always be 'n'. We also have to be careful when reading 87 * from @src and writing to @dst, because they can be arbitrarily 88 * aligned. Return 'n' - the number of bytes read. 89 */ 90 static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst) 91 { 92 u32 val = get_unaligned(src); 93 put_unaligned(val, dst); 94 return sizeof(unsigned long *); 95 } 96 97 /** 98 * dwarf_read_uleb128 - read unsigned LEB128 data 99 * @addr: the address where the ULEB128 data is stored 100 * @ret: address to store the result 101 * 102 * Decode an unsigned LEB128 encoded datum. The algorithm is taken 103 * from Appendix C of the DWARF 3 spec. For information on the 104 * encodings refer to section "7.6 - Variable Length Data". Return 105 * the number of bytes read. 106 */ 107 static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret) 108 { 109 unsigned int result; 110 unsigned char byte; 111 int shift, count; 112 113 result = 0; 114 shift = 0; 115 count = 0; 116 117 while (1) { 118 byte = __raw_readb(addr); 119 addr++; 120 count++; 121 122 result |= (byte & 0x7f) << shift; 123 shift += 7; 124 125 if (!(byte & 0x80)) 126 break; 127 } 128 129 *ret = result; 130 131 return count; 132 } 133 134 /** 135 * dwarf_read_leb128 - read signed LEB128 data 136 * @addr: the address of the LEB128 encoded data 137 * @ret: address to store the result 138 * 139 * Decode signed LEB128 data. The algorithm is taken from Appendix 140 * C of the DWARF 3 spec. Return the number of bytes read. 141 */ 142 static inline unsigned long dwarf_read_leb128(char *addr, int *ret) 143 { 144 unsigned char byte; 145 int result, shift; 146 int num_bits; 147 int count; 148 149 result = 0; 150 shift = 0; 151 count = 0; 152 153 while (1) { 154 byte = __raw_readb(addr); 155 addr++; 156 result |= (byte & 0x7f) << shift; 157 shift += 7; 158 count++; 159 160 if (!(byte & 0x80)) 161 break; 162 } 163 164 /* The number of bits in a signed integer. */ 165 num_bits = 8 * sizeof(result); 166 167 if ((shift < num_bits) && (byte & 0x40)) 168 result |= (-1 << shift); 169 170 *ret = result; 171 172 return count; 173 } 174 175 /** 176 * dwarf_read_encoded_value - return the decoded value at @addr 177 * @addr: the address of the encoded value 178 * @val: where to write the decoded value 179 * @encoding: the encoding with which we can decode @addr 180 * 181 * GCC emits encoded address in the .eh_frame FDE entries. Decode 182 * the value at @addr using @encoding. The decoded value is written 183 * to @val and the number of bytes read is returned. 184 */ 185 static int dwarf_read_encoded_value(char *addr, unsigned long *val, 186 char encoding) 187 { 188 unsigned long decoded_addr = 0; 189 int count = 0; 190 191 switch (encoding & 0x70) { 192 case DW_EH_PE_absptr: 193 break; 194 case DW_EH_PE_pcrel: 195 decoded_addr = (unsigned long)addr; 196 break; 197 default: 198 pr_debug("encoding=0x%x\n", (encoding & 0x70)); 199 BUG(); 200 } 201 202 if ((encoding & 0x07) == 0x00) 203 encoding |= DW_EH_PE_udata4; 204 205 switch (encoding & 0x0f) { 206 case DW_EH_PE_sdata4: 207 case DW_EH_PE_udata4: 208 count += 4; 209 decoded_addr += get_unaligned((u32 *)addr); 210 __raw_writel(decoded_addr, val); 211 break; 212 default: 213 pr_debug("encoding=0x%x\n", encoding); 214 BUG(); 215 } 216 217 return count; 218 } 219 220 /** 221 * dwarf_entry_len - return the length of an FDE or CIE 222 * @addr: the address of the entry 223 * @len: the length of the entry 224 * 225 * Read the initial_length field of the entry and store the size of 226 * the entry in @len. We return the number of bytes read. Return a 227 * count of 0 on error. 228 */ 229 static inline int dwarf_entry_len(char *addr, unsigned long *len) 230 { 231 u32 initial_len; 232 int count; 233 234 initial_len = get_unaligned((u32 *)addr); 235 count = 4; 236 237 /* 238 * An initial length field value in the range DW_LEN_EXT_LO - 239 * DW_LEN_EXT_HI indicates an extension, and should not be 240 * interpreted as a length. The only extension that we currently 241 * understand is the use of DWARF64 addresses. 242 */ 243 if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) { 244 /* 245 * The 64-bit length field immediately follows the 246 * compulsory 32-bit length field. 247 */ 248 if (initial_len == DW_EXT_DWARF64) { 249 *len = get_unaligned((u64 *)addr + 4); 250 count = 12; 251 } else { 252 printk(KERN_WARNING "Unknown DWARF extension\n"); 253 count = 0; 254 } 255 } else 256 *len = initial_len; 257 258 return count; 259 } 260 261 /** 262 * dwarf_lookup_cie - locate the cie 263 * @cie_ptr: pointer to help with lookup 264 */ 265 static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr) 266 { 267 struct dwarf_cie *cie, *n; 268 unsigned long flags; 269 270 spin_lock_irqsave(&dwarf_cie_lock, flags); 271 272 /* 273 * We've cached the last CIE we looked up because chances are 274 * that the FDE wants this CIE. 275 */ 276 if (cached_cie && cached_cie->cie_pointer == cie_ptr) { 277 cie = cached_cie; 278 goto out; 279 } 280 281 list_for_each_entry_safe(cie, n, &dwarf_cie_list, link) { 282 if (cie->cie_pointer == cie_ptr) { 283 cached_cie = cie; 284 break; 285 } 286 } 287 288 /* Couldn't find the entry in the list. */ 289 if (&cie->link == &dwarf_cie_list) 290 cie = NULL; 291 out: 292 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 293 return cie; 294 } 295 296 /** 297 * dwarf_lookup_fde - locate the FDE that covers pc 298 * @pc: the program counter 299 */ 300 struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) 301 { 302 unsigned long flags; 303 struct dwarf_fde *fde, *n; 304 305 spin_lock_irqsave(&dwarf_fde_lock, flags); 306 list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) { 307 unsigned long start, end; 308 309 start = fde->initial_location; 310 end = fde->initial_location + fde->address_range; 311 312 if (pc >= start && pc < end) 313 break; 314 } 315 316 /* Couldn't find the entry in the list. */ 317 if (&fde->link == &dwarf_fde_list) 318 fde = NULL; 319 320 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 321 322 return fde; 323 } 324 325 /** 326 * dwarf_cfa_execute_insns - execute instructions to calculate a CFA 327 * @insn_start: address of the first instruction 328 * @insn_end: address of the last instruction 329 * @cie: the CIE for this function 330 * @fde: the FDE for this function 331 * @frame: the instructions calculate the CFA for this frame 332 * @pc: the program counter of the address we're interested in 333 * 334 * Execute the Call Frame instruction sequence starting at 335 * @insn_start and ending at @insn_end. The instructions describe 336 * how to calculate the Canonical Frame Address of a stackframe. 337 * Store the results in @frame. 338 */ 339 static int dwarf_cfa_execute_insns(unsigned char *insn_start, 340 unsigned char *insn_end, 341 struct dwarf_cie *cie, 342 struct dwarf_fde *fde, 343 struct dwarf_frame *frame, 344 unsigned long pc) 345 { 346 unsigned char insn; 347 unsigned char *current_insn; 348 unsigned int count, delta, reg, expr_len, offset; 349 350 current_insn = insn_start; 351 352 while (current_insn < insn_end && frame->pc <= pc) { 353 insn = __raw_readb(current_insn++); 354 355 /* 356 * Firstly, handle the opcodes that embed their operands 357 * in the instructions. 358 */ 359 switch (DW_CFA_opcode(insn)) { 360 case DW_CFA_advance_loc: 361 delta = DW_CFA_operand(insn); 362 delta *= cie->code_alignment_factor; 363 frame->pc += delta; 364 continue; 365 /* NOTREACHED */ 366 case DW_CFA_offset: 367 reg = DW_CFA_operand(insn); 368 count = dwarf_read_uleb128(current_insn, &offset); 369 current_insn += count; 370 offset *= cie->data_alignment_factor; 371 dwarf_frame_alloc_regs(frame, reg); 372 frame->regs[reg].addr = offset; 373 frame->regs[reg].flags |= DWARF_REG_OFFSET; 374 continue; 375 /* NOTREACHED */ 376 case DW_CFA_restore: 377 reg = DW_CFA_operand(insn); 378 continue; 379 /* NOTREACHED */ 380 } 381 382 /* 383 * Secondly, handle the opcodes that don't embed their 384 * operands in the instruction. 385 */ 386 switch (insn) { 387 case DW_CFA_nop: 388 continue; 389 case DW_CFA_advance_loc1: 390 delta = *current_insn++; 391 frame->pc += delta * cie->code_alignment_factor; 392 break; 393 case DW_CFA_advance_loc2: 394 delta = get_unaligned((u16 *)current_insn); 395 current_insn += 2; 396 frame->pc += delta * cie->code_alignment_factor; 397 break; 398 case DW_CFA_advance_loc4: 399 delta = get_unaligned((u32 *)current_insn); 400 current_insn += 4; 401 frame->pc += delta * cie->code_alignment_factor; 402 break; 403 case DW_CFA_offset_extended: 404 count = dwarf_read_uleb128(current_insn, ®); 405 current_insn += count; 406 count = dwarf_read_uleb128(current_insn, &offset); 407 current_insn += count; 408 offset *= cie->data_alignment_factor; 409 break; 410 case DW_CFA_restore_extended: 411 count = dwarf_read_uleb128(current_insn, ®); 412 current_insn += count; 413 break; 414 case DW_CFA_undefined: 415 count = dwarf_read_uleb128(current_insn, ®); 416 current_insn += count; 417 break; 418 case DW_CFA_def_cfa: 419 count = dwarf_read_uleb128(current_insn, 420 &frame->cfa_register); 421 current_insn += count; 422 count = dwarf_read_uleb128(current_insn, 423 &frame->cfa_offset); 424 current_insn += count; 425 426 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; 427 break; 428 case DW_CFA_def_cfa_register: 429 count = dwarf_read_uleb128(current_insn, 430 &frame->cfa_register); 431 current_insn += count; 432 frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; 433 break; 434 case DW_CFA_def_cfa_offset: 435 count = dwarf_read_uleb128(current_insn, &offset); 436 current_insn += count; 437 frame->cfa_offset = offset; 438 break; 439 case DW_CFA_def_cfa_expression: 440 count = dwarf_read_uleb128(current_insn, &expr_len); 441 current_insn += count; 442 443 frame->cfa_expr = current_insn; 444 frame->cfa_expr_len = expr_len; 445 current_insn += expr_len; 446 447 frame->flags |= DWARF_FRAME_CFA_REG_EXP; 448 break; 449 case DW_CFA_offset_extended_sf: 450 count = dwarf_read_uleb128(current_insn, ®); 451 current_insn += count; 452 count = dwarf_read_leb128(current_insn, &offset); 453 current_insn += count; 454 offset *= cie->data_alignment_factor; 455 dwarf_frame_alloc_regs(frame, reg); 456 frame->regs[reg].flags |= DWARF_REG_OFFSET; 457 frame->regs[reg].addr = offset; 458 break; 459 case DW_CFA_val_offset: 460 count = dwarf_read_uleb128(current_insn, ®); 461 current_insn += count; 462 count = dwarf_read_leb128(current_insn, &offset); 463 offset *= cie->data_alignment_factor; 464 frame->regs[reg].flags |= DWARF_REG_OFFSET; 465 frame->regs[reg].addr = offset; 466 break; 467 case DW_CFA_GNU_args_size: 468 count = dwarf_read_uleb128(current_insn, &offset); 469 current_insn += count; 470 break; 471 case DW_CFA_GNU_negative_offset_extended: 472 count = dwarf_read_uleb128(current_insn, ®); 473 current_insn += count; 474 count = dwarf_read_uleb128(current_insn, &offset); 475 offset *= cie->data_alignment_factor; 476 dwarf_frame_alloc_regs(frame, reg); 477 frame->regs[reg].flags |= DWARF_REG_OFFSET; 478 frame->regs[reg].addr = -offset; 479 break; 480 default: 481 pr_debug("unhandled DWARF instruction 0x%x\n", insn); 482 break; 483 } 484 } 485 486 return 0; 487 } 488 489 /** 490 * dwarf_unwind_stack - recursively unwind the stack 491 * @pc: address of the function to unwind 492 * @prev: struct dwarf_frame of the previous stackframe on the callstack 493 * 494 * Return a struct dwarf_frame representing the most recent frame 495 * on the callstack. Each of the lower (older) stack frames are 496 * linked via the "prev" member. 497 */ 498 struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, 499 struct dwarf_frame *prev) 500 { 501 struct dwarf_frame *frame; 502 struct dwarf_cie *cie; 503 struct dwarf_fde *fde; 504 unsigned long addr; 505 int i, offset; 506 507 /* 508 * If this is the first invocation of this recursive function we 509 * need get the contents of a physical register to get the CFA 510 * in order to begin the virtual unwinding of the stack. 511 * 512 * NOTE: the return address is guaranteed to be setup by the 513 * time this function makes its first function call. 514 */ 515 if (!pc && !prev) 516 pc = (unsigned long)current_text_addr(); 517 518 frame = kzalloc(sizeof(*frame), GFP_ATOMIC); 519 if (!frame) 520 return NULL; 521 522 frame->prev = prev; 523 524 fde = dwarf_lookup_fde(pc); 525 if (!fde) { 526 /* 527 * This is our normal exit path - the one that stops the 528 * recursion. There's two reasons why we might exit 529 * here, 530 * 531 * a) pc has no asscociated DWARF frame info and so 532 * we don't know how to unwind this frame. This is 533 * usually the case when we're trying to unwind a 534 * frame that was called from some assembly code 535 * that has no DWARF info, e.g. syscalls. 536 * 537 * b) the DEBUG info for pc is bogus. There's 538 * really no way to distinguish this case from the 539 * case above, which sucks because we could print a 540 * warning here. 541 */ 542 return NULL; 543 } 544 545 cie = dwarf_lookup_cie(fde->cie_pointer); 546 547 frame->pc = fde->initial_location; 548 549 /* CIE initial instructions */ 550 dwarf_cfa_execute_insns(cie->initial_instructions, 551 cie->instructions_end, cie, fde, 552 frame, pc); 553 554 /* FDE instructions */ 555 dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, 556 fde, frame, pc); 557 558 /* Calculate the CFA */ 559 switch (frame->flags) { 560 case DWARF_FRAME_CFA_REG_OFFSET: 561 if (prev) { 562 BUG_ON(!prev->regs[frame->cfa_register].flags); 563 564 addr = prev->cfa; 565 addr += prev->regs[frame->cfa_register].addr; 566 frame->cfa = __raw_readl(addr); 567 568 } else { 569 /* 570 * Again, this is the first invocation of this 571 * recurisve function. We need to physically 572 * read the contents of a register in order to 573 * get the Canonical Frame Address for this 574 * function. 575 */ 576 frame->cfa = dwarf_read_arch_reg(frame->cfa_register); 577 } 578 579 frame->cfa += frame->cfa_offset; 580 break; 581 default: 582 BUG(); 583 } 584 585 /* If we haven't seen the return address reg, we're screwed. */ 586 BUG_ON(!frame->regs[DWARF_ARCH_RA_REG].flags); 587 588 for (i = 0; i <= frame->num_regs; i++) { 589 struct dwarf_reg *reg = &frame->regs[i]; 590 591 if (!reg->flags) 592 continue; 593 594 offset = reg->addr; 595 offset += frame->cfa; 596 } 597 598 addr = frame->cfa + frame->regs[DWARF_ARCH_RA_REG].addr; 599 frame->return_addr = __raw_readl(addr); 600 601 frame->next = dwarf_unwind_stack(frame->return_addr, frame); 602 return frame; 603 } 604 605 static int dwarf_parse_cie(void *entry, void *p, unsigned long len, 606 unsigned char *end) 607 { 608 struct dwarf_cie *cie; 609 unsigned long flags; 610 int count; 611 612 cie = kzalloc(sizeof(*cie), GFP_KERNEL); 613 if (!cie) 614 return -ENOMEM; 615 616 cie->length = len; 617 618 /* 619 * Record the offset into the .eh_frame section 620 * for this CIE. It allows this CIE to be 621 * quickly and easily looked up from the 622 * corresponding FDE. 623 */ 624 cie->cie_pointer = (unsigned long)entry; 625 626 cie->version = *(char *)p++; 627 BUG_ON(cie->version != 1); 628 629 cie->augmentation = p; 630 p += strlen(cie->augmentation) + 1; 631 632 count = dwarf_read_uleb128(p, &cie->code_alignment_factor); 633 p += count; 634 635 count = dwarf_read_leb128(p, &cie->data_alignment_factor); 636 p += count; 637 638 /* 639 * Which column in the rule table contains the 640 * return address? 641 */ 642 if (cie->version == 1) { 643 cie->return_address_reg = __raw_readb(p); 644 p++; 645 } else { 646 count = dwarf_read_uleb128(p, &cie->return_address_reg); 647 p += count; 648 } 649 650 if (cie->augmentation[0] == 'z') { 651 unsigned int length, count; 652 cie->flags |= DWARF_CIE_Z_AUGMENTATION; 653 654 count = dwarf_read_uleb128(p, &length); 655 p += count; 656 657 BUG_ON((unsigned char *)p > end); 658 659 cie->initial_instructions = p + length; 660 cie->augmentation++; 661 } 662 663 while (*cie->augmentation) { 664 /* 665 * "L" indicates a byte showing how the 666 * LSDA pointer is encoded. Skip it. 667 */ 668 if (*cie->augmentation == 'L') { 669 p++; 670 cie->augmentation++; 671 } else if (*cie->augmentation == 'R') { 672 /* 673 * "R" indicates a byte showing 674 * how FDE addresses are 675 * encoded. 676 */ 677 cie->encoding = *(char *)p++; 678 cie->augmentation++; 679 } else if (*cie->augmentation == 'P') { 680 /* 681 * "R" indicates a personality 682 * routine in the CIE 683 * augmentation. 684 */ 685 BUG(); 686 } else if (*cie->augmentation == 'S') { 687 BUG(); 688 } else { 689 /* 690 * Unknown augmentation. Assume 691 * 'z' augmentation. 692 */ 693 p = cie->initial_instructions; 694 BUG_ON(!p); 695 break; 696 } 697 } 698 699 cie->initial_instructions = p; 700 cie->instructions_end = end; 701 702 /* Add to list */ 703 spin_lock_irqsave(&dwarf_cie_lock, flags); 704 list_add_tail(&cie->link, &dwarf_cie_list); 705 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 706 707 return 0; 708 } 709 710 static int dwarf_parse_fde(void *entry, u32 entry_type, 711 void *start, unsigned long len) 712 { 713 struct dwarf_fde *fde; 714 struct dwarf_cie *cie; 715 unsigned long flags; 716 int count; 717 void *p = start; 718 719 fde = kzalloc(sizeof(*fde), GFP_KERNEL); 720 if (!fde) 721 return -ENOMEM; 722 723 fde->length = len; 724 725 /* 726 * In a .eh_frame section the CIE pointer is the 727 * delta between the address within the FDE 728 */ 729 fde->cie_pointer = (unsigned long)(p - entry_type - 4); 730 731 cie = dwarf_lookup_cie(fde->cie_pointer); 732 fde->cie = cie; 733 734 if (cie->encoding) 735 count = dwarf_read_encoded_value(p, &fde->initial_location, 736 cie->encoding); 737 else 738 count = dwarf_read_addr(p, &fde->initial_location); 739 740 p += count; 741 742 if (cie->encoding) 743 count = dwarf_read_encoded_value(p, &fde->address_range, 744 cie->encoding & 0x0f); 745 else 746 count = dwarf_read_addr(p, &fde->address_range); 747 748 p += count; 749 750 if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) { 751 unsigned int length; 752 count = dwarf_read_uleb128(p, &length); 753 p += count + length; 754 } 755 756 /* Call frame instructions. */ 757 fde->instructions = p; 758 fde->end = start + len; 759 760 /* Add to list. */ 761 spin_lock_irqsave(&dwarf_fde_lock, flags); 762 list_add_tail(&fde->link, &dwarf_fde_list); 763 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 764 765 return 0; 766 } 767 768 static void dwarf_unwinder_dump(struct task_struct *task, struct pt_regs *regs, 769 unsigned long *sp, 770 const struct stacktrace_ops *ops, void *data) 771 { 772 struct dwarf_frame *frame; 773 774 frame = dwarf_unwind_stack(0, NULL); 775 776 while (frame && frame->return_addr) { 777 ops->address(data, frame->return_addr, 1); 778 frame = frame->next; 779 } 780 } 781 782 static struct unwinder dwarf_unwinder = { 783 .name = "dwarf-unwinder", 784 .dump = dwarf_unwinder_dump, 785 .rating = 150, 786 }; 787 788 static void dwarf_unwinder_cleanup(void) 789 { 790 struct dwarf_cie *cie, *m; 791 struct dwarf_fde *fde, *n; 792 unsigned long flags; 793 794 /* 795 * Deallocate all the memory allocated for the DWARF unwinder. 796 * Traverse all the FDE/CIE lists and remove and free all the 797 * memory associated with those data structures. 798 */ 799 spin_lock_irqsave(&dwarf_cie_lock, flags); 800 list_for_each_entry_safe(cie, m, &dwarf_cie_list, link) 801 kfree(cie); 802 spin_unlock_irqrestore(&dwarf_cie_lock, flags); 803 804 spin_lock_irqsave(&dwarf_fde_lock, flags); 805 list_for_each_entry_safe(fde, n, &dwarf_fde_list, link) 806 kfree(fde); 807 spin_unlock_irqrestore(&dwarf_fde_lock, flags); 808 } 809 810 /** 811 * dwarf_unwinder_init - initialise the dwarf unwinder 812 * 813 * Build the data structures describing the .dwarf_frame section to 814 * make it easier to lookup CIE and FDE entries. Because the 815 * .eh_frame section is packed as tightly as possible it is not 816 * easy to lookup the FDE for a given PC, so we build a list of FDE 817 * and CIE entries that make it easier. 818 */ 819 void dwarf_unwinder_init(void) 820 { 821 u32 entry_type; 822 void *p, *entry; 823 int count, err; 824 unsigned long len; 825 unsigned int c_entries, f_entries; 826 unsigned char *end; 827 INIT_LIST_HEAD(&dwarf_cie_list); 828 INIT_LIST_HEAD(&dwarf_fde_list); 829 830 c_entries = 0; 831 f_entries = 0; 832 entry = &__start_eh_frame; 833 834 while ((char *)entry < __stop_eh_frame) { 835 p = entry; 836 837 count = dwarf_entry_len(p, &len); 838 if (count == 0) { 839 /* 840 * We read a bogus length field value. There is 841 * nothing we can do here apart from disabling 842 * the DWARF unwinder. We can't even skip this 843 * entry and move to the next one because 'len' 844 * tells us where our next entry is. 845 */ 846 goto out; 847 } else 848 p += count; 849 850 /* initial length does not include itself */ 851 end = p + len; 852 853 entry_type = get_unaligned((u32 *)p); 854 p += 4; 855 856 if (entry_type == DW_EH_FRAME_CIE) { 857 err = dwarf_parse_cie(entry, p, len, end); 858 if (err < 0) 859 goto out; 860 else 861 c_entries++; 862 } else { 863 err = dwarf_parse_fde(entry, entry_type, p, len); 864 if (err < 0) 865 goto out; 866 else 867 f_entries++; 868 } 869 870 entry = (char *)entry + len + 4; 871 } 872 873 printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n", 874 c_entries, f_entries); 875 876 err = unwinder_register(&dwarf_unwinder); 877 if (err) 878 goto out; 879 880 return; 881 882 out: 883 printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err); 884 dwarf_unwinder_cleanup(); 885 } 886