1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-annotate.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include "util.h" 11 #include "ui/ui.h" 12 #include "sort.h" 13 #include "build-id.h" 14 #include "color.h" 15 #include "cache.h" 16 #include "symbol.h" 17 #include "debug.h" 18 #include "annotate.h" 19 #include "evsel.h" 20 #include "block-range.h" 21 #include "arch/common.h" 22 #include <regex.h> 23 #include <pthread.h> 24 #include <linux/bitops.h> 25 #include <sys/utsname.h> 26 27 const char *disassembler_style; 28 const char *objdump_path; 29 static regex_t file_lineno; 30 31 static struct ins_ops *ins__find(struct arch *arch, const char *name); 32 static void ins__sort(struct arch *arch); 33 static int disasm_line__parse(char *line, const char **namep, char **rawp); 34 35 struct arch { 36 const char *name; 37 struct ins *instructions; 38 size_t nr_instructions; 39 size_t nr_instructions_allocated; 40 struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name); 41 bool sorted_instructions; 42 bool initialized; 43 void *priv; 44 int (*init)(struct arch *arch); 45 struct { 46 char comment_char; 47 char skip_functions_char; 48 } objdump; 49 }; 50 51 static struct ins_ops call_ops; 52 static struct ins_ops dec_ops; 53 static struct ins_ops jump_ops; 54 static struct ins_ops mov_ops; 55 static struct ins_ops nop_ops; 56 static struct ins_ops lock_ops; 57 static struct ins_ops ret_ops; 58 59 static int arch__grow_instructions(struct arch *arch) 60 { 61 struct ins *new_instructions; 62 size_t new_nr_allocated; 63 64 if (arch->nr_instructions_allocated == 0 && arch->instructions) 65 goto grow_from_non_allocated_table; 66 67 new_nr_allocated = arch->nr_instructions_allocated + 128; 68 new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins)); 69 if (new_instructions == NULL) 70 return -1; 71 72 out_update_instructions: 73 arch->instructions = new_instructions; 74 arch->nr_instructions_allocated = new_nr_allocated; 75 return 0; 76 77 grow_from_non_allocated_table: 78 new_nr_allocated = arch->nr_instructions + 128; 79 new_instructions = calloc(new_nr_allocated, sizeof(struct ins)); 80 if (new_instructions == NULL) 81 return -1; 82 83 memcpy(new_instructions, arch->instructions, arch->nr_instructions); 84 goto out_update_instructions; 85 } 86 87 static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops) 88 { 89 struct ins *ins; 90 91 if (arch->nr_instructions == arch->nr_instructions_allocated && 92 arch__grow_instructions(arch)) 93 return -1; 94 95 ins = &arch->instructions[arch->nr_instructions]; 96 ins->name = strdup(name); 97 if (!ins->name) 98 return -1; 99 100 ins->ops = ops; 101 arch->nr_instructions++; 102 103 ins__sort(arch); 104 return 0; 105 } 106 107 #include "arch/arm/annotate/instructions.c" 108 #include "arch/arm64/annotate/instructions.c" 109 #include "arch/x86/annotate/instructions.c" 110 #include "arch/powerpc/annotate/instructions.c" 111 112 static struct arch architectures[] = { 113 { 114 .name = "arm", 115 .init = arm__annotate_init, 116 }, 117 { 118 .name = "arm64", 119 .init = arm64__annotate_init, 120 }, 121 { 122 .name = "x86", 123 .instructions = x86__instructions, 124 .nr_instructions = ARRAY_SIZE(x86__instructions), 125 .objdump = { 126 .comment_char = '#', 127 }, 128 }, 129 { 130 .name = "powerpc", 131 .init = powerpc__annotate_init, 132 }, 133 }; 134 135 static void ins__delete(struct ins_operands *ops) 136 { 137 if (ops == NULL) 138 return; 139 zfree(&ops->source.raw); 140 zfree(&ops->source.name); 141 zfree(&ops->target.raw); 142 zfree(&ops->target.name); 143 } 144 145 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, 146 struct ins_operands *ops) 147 { 148 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw); 149 } 150 151 int ins__scnprintf(struct ins *ins, char *bf, size_t size, 152 struct ins_operands *ops) 153 { 154 if (ins->ops->scnprintf) 155 return ins->ops->scnprintf(ins, bf, size, ops); 156 157 return ins__raw_scnprintf(ins, bf, size, ops); 158 } 159 160 static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *map) 161 { 162 char *endptr, *tok, *name; 163 164 ops->target.addr = strtoull(ops->raw, &endptr, 16); 165 166 name = strchr(endptr, '<'); 167 if (name == NULL) 168 goto indirect_call; 169 170 name++; 171 172 if (arch->objdump.skip_functions_char && 173 strchr(name, arch->objdump.skip_functions_char)) 174 return -1; 175 176 tok = strchr(name, '>'); 177 if (tok == NULL) 178 return -1; 179 180 *tok = '\0'; 181 ops->target.name = strdup(name); 182 *tok = '>'; 183 184 return ops->target.name == NULL ? -1 : 0; 185 186 indirect_call: 187 tok = strchr(endptr, '*'); 188 if (tok == NULL) { 189 struct symbol *sym = map__find_symbol(map, map->map_ip(map, ops->target.addr)); 190 if (sym != NULL) 191 ops->target.name = strdup(sym->name); 192 else 193 ops->target.addr = 0; 194 return 0; 195 } 196 197 ops->target.addr = strtoull(tok + 1, NULL, 16); 198 return 0; 199 } 200 201 static int call__scnprintf(struct ins *ins, char *bf, size_t size, 202 struct ins_operands *ops) 203 { 204 if (ops->target.name) 205 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name); 206 207 if (ops->target.addr == 0) 208 return ins__raw_scnprintf(ins, bf, size, ops); 209 210 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr); 211 } 212 213 static struct ins_ops call_ops = { 214 .parse = call__parse, 215 .scnprintf = call__scnprintf, 216 }; 217 218 bool ins__is_call(const struct ins *ins) 219 { 220 return ins->ops == &call_ops; 221 } 222 223 static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused) 224 { 225 const char *s = strchr(ops->raw, '+'); 226 227 ops->target.addr = strtoull(ops->raw, NULL, 16); 228 229 if (s++ != NULL) 230 ops->target.offset = strtoull(s, NULL, 16); 231 else 232 ops->target.offset = UINT64_MAX; 233 234 return 0; 235 } 236 237 static int jump__scnprintf(struct ins *ins, char *bf, size_t size, 238 struct ins_operands *ops) 239 { 240 if (!ops->target.addr) 241 return ins__raw_scnprintf(ins, bf, size, ops); 242 243 return scnprintf(bf, size, "%-6.6s %" PRIx64, ins->name, ops->target.offset); 244 } 245 246 static struct ins_ops jump_ops = { 247 .parse = jump__parse, 248 .scnprintf = jump__scnprintf, 249 }; 250 251 bool ins__is_jump(const struct ins *ins) 252 { 253 return ins->ops == &jump_ops; 254 } 255 256 static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep) 257 { 258 char *endptr, *name, *t; 259 260 if (strstr(raw, "(%rip)") == NULL) 261 return 0; 262 263 *addrp = strtoull(comment, &endptr, 16); 264 name = strchr(endptr, '<'); 265 if (name == NULL) 266 return -1; 267 268 name++; 269 270 t = strchr(name, '>'); 271 if (t == NULL) 272 return 0; 273 274 *t = '\0'; 275 *namep = strdup(name); 276 *t = '>'; 277 278 return 0; 279 } 280 281 static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map *map) 282 { 283 ops->locked.ops = zalloc(sizeof(*ops->locked.ops)); 284 if (ops->locked.ops == NULL) 285 return 0; 286 287 if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0) 288 goto out_free_ops; 289 290 ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name); 291 292 if (ops->locked.ins.ops == NULL) 293 goto out_free_ops; 294 295 if (ops->locked.ins.ops->parse && 296 ops->locked.ins.ops->parse(arch, ops->locked.ops, map) < 0) 297 goto out_free_ops; 298 299 return 0; 300 301 out_free_ops: 302 zfree(&ops->locked.ops); 303 return 0; 304 } 305 306 static int lock__scnprintf(struct ins *ins, char *bf, size_t size, 307 struct ins_operands *ops) 308 { 309 int printed; 310 311 if (ops->locked.ins.ops == NULL) 312 return ins__raw_scnprintf(ins, bf, size, ops); 313 314 printed = scnprintf(bf, size, "%-6.6s ", ins->name); 315 return printed + ins__scnprintf(&ops->locked.ins, bf + printed, 316 size - printed, ops->locked.ops); 317 } 318 319 static void lock__delete(struct ins_operands *ops) 320 { 321 struct ins *ins = &ops->locked.ins; 322 323 if (ins->ops && ins->ops->free) 324 ins->ops->free(ops->locked.ops); 325 else 326 ins__delete(ops->locked.ops); 327 328 zfree(&ops->locked.ops); 329 zfree(&ops->target.raw); 330 zfree(&ops->target.name); 331 } 332 333 static struct ins_ops lock_ops = { 334 .free = lock__delete, 335 .parse = lock__parse, 336 .scnprintf = lock__scnprintf, 337 }; 338 339 static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *map __maybe_unused) 340 { 341 char *s = strchr(ops->raw, ','), *target, *comment, prev; 342 343 if (s == NULL) 344 return -1; 345 346 *s = '\0'; 347 ops->source.raw = strdup(ops->raw); 348 *s = ','; 349 350 if (ops->source.raw == NULL) 351 return -1; 352 353 target = ++s; 354 comment = strchr(s, arch->objdump.comment_char); 355 356 if (comment != NULL) 357 s = comment - 1; 358 else 359 s = strchr(s, '\0') - 1; 360 361 while (s > target && isspace(s[0])) 362 --s; 363 s++; 364 prev = *s; 365 *s = '\0'; 366 367 ops->target.raw = strdup(target); 368 *s = prev; 369 370 if (ops->target.raw == NULL) 371 goto out_free_source; 372 373 if (comment == NULL) 374 return 0; 375 376 while (comment[0] != '\0' && isspace(comment[0])) 377 ++comment; 378 379 comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name); 380 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name); 381 382 return 0; 383 384 out_free_source: 385 zfree(&ops->source.raw); 386 return -1; 387 } 388 389 static int mov__scnprintf(struct ins *ins, char *bf, size_t size, 390 struct ins_operands *ops) 391 { 392 return scnprintf(bf, size, "%-6.6s %s,%s", ins->name, 393 ops->source.name ?: ops->source.raw, 394 ops->target.name ?: ops->target.raw); 395 } 396 397 static struct ins_ops mov_ops = { 398 .parse = mov__parse, 399 .scnprintf = mov__scnprintf, 400 }; 401 402 static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused) 403 { 404 char *target, *comment, *s, prev; 405 406 target = s = ops->raw; 407 408 while (s[0] != '\0' && !isspace(s[0])) 409 ++s; 410 prev = *s; 411 *s = '\0'; 412 413 ops->target.raw = strdup(target); 414 *s = prev; 415 416 if (ops->target.raw == NULL) 417 return -1; 418 419 comment = strchr(s, arch->objdump.comment_char); 420 if (comment == NULL) 421 return 0; 422 423 while (comment[0] != '\0' && isspace(comment[0])) 424 ++comment; 425 426 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name); 427 428 return 0; 429 } 430 431 static int dec__scnprintf(struct ins *ins, char *bf, size_t size, 432 struct ins_operands *ops) 433 { 434 return scnprintf(bf, size, "%-6.6s %s", ins->name, 435 ops->target.name ?: ops->target.raw); 436 } 437 438 static struct ins_ops dec_ops = { 439 .parse = dec__parse, 440 .scnprintf = dec__scnprintf, 441 }; 442 443 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, 444 struct ins_operands *ops __maybe_unused) 445 { 446 return scnprintf(bf, size, "%-6.6s", "nop"); 447 } 448 449 static struct ins_ops nop_ops = { 450 .scnprintf = nop__scnprintf, 451 }; 452 453 static struct ins_ops ret_ops = { 454 .scnprintf = ins__raw_scnprintf, 455 }; 456 457 bool ins__is_ret(const struct ins *ins) 458 { 459 return ins->ops == &ret_ops; 460 } 461 462 static int ins__key_cmp(const void *name, const void *insp) 463 { 464 const struct ins *ins = insp; 465 466 return strcmp(name, ins->name); 467 } 468 469 static int ins__cmp(const void *a, const void *b) 470 { 471 const struct ins *ia = a; 472 const struct ins *ib = b; 473 474 return strcmp(ia->name, ib->name); 475 } 476 477 static void ins__sort(struct arch *arch) 478 { 479 const int nmemb = arch->nr_instructions; 480 481 qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp); 482 } 483 484 static struct ins_ops *__ins__find(struct arch *arch, const char *name) 485 { 486 struct ins *ins; 487 const int nmemb = arch->nr_instructions; 488 489 if (!arch->sorted_instructions) { 490 ins__sort(arch); 491 arch->sorted_instructions = true; 492 } 493 494 ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp); 495 return ins ? ins->ops : NULL; 496 } 497 498 static struct ins_ops *ins__find(struct arch *arch, const char *name) 499 { 500 struct ins_ops *ops = __ins__find(arch, name); 501 502 if (!ops && arch->associate_instruction_ops) 503 ops = arch->associate_instruction_ops(arch, name); 504 505 return ops; 506 } 507 508 static int arch__key_cmp(const void *name, const void *archp) 509 { 510 const struct arch *arch = archp; 511 512 return strcmp(name, arch->name); 513 } 514 515 static int arch__cmp(const void *a, const void *b) 516 { 517 const struct arch *aa = a; 518 const struct arch *ab = b; 519 520 return strcmp(aa->name, ab->name); 521 } 522 523 static void arch__sort(void) 524 { 525 const int nmemb = ARRAY_SIZE(architectures); 526 527 qsort(architectures, nmemb, sizeof(struct arch), arch__cmp); 528 } 529 530 static struct arch *arch__find(const char *name) 531 { 532 const int nmemb = ARRAY_SIZE(architectures); 533 static bool sorted; 534 535 if (!sorted) { 536 arch__sort(); 537 sorted = true; 538 } 539 540 return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp); 541 } 542 543 int symbol__alloc_hist(struct symbol *sym) 544 { 545 struct annotation *notes = symbol__annotation(sym); 546 const size_t size = symbol__size(sym); 547 size_t sizeof_sym_hist; 548 549 /* Check for overflow when calculating sizeof_sym_hist */ 550 if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(u64)) 551 return -1; 552 553 sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(u64)); 554 555 /* Check for overflow in zalloc argument */ 556 if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src)) 557 / symbol_conf.nr_events) 558 return -1; 559 560 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); 561 if (notes->src == NULL) 562 return -1; 563 notes->src->sizeof_sym_hist = sizeof_sym_hist; 564 notes->src->nr_histograms = symbol_conf.nr_events; 565 INIT_LIST_HEAD(¬es->src->source); 566 return 0; 567 } 568 569 /* The cycles histogram is lazily allocated. */ 570 static int symbol__alloc_hist_cycles(struct symbol *sym) 571 { 572 struct annotation *notes = symbol__annotation(sym); 573 const size_t size = symbol__size(sym); 574 575 notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist)); 576 if (notes->src->cycles_hist == NULL) 577 return -1; 578 return 0; 579 } 580 581 void symbol__annotate_zero_histograms(struct symbol *sym) 582 { 583 struct annotation *notes = symbol__annotation(sym); 584 585 pthread_mutex_lock(¬es->lock); 586 if (notes->src != NULL) { 587 memset(notes->src->histograms, 0, 588 notes->src->nr_histograms * notes->src->sizeof_sym_hist); 589 if (notes->src->cycles_hist) 590 memset(notes->src->cycles_hist, 0, 591 symbol__size(sym) * sizeof(struct cyc_hist)); 592 } 593 pthread_mutex_unlock(¬es->lock); 594 } 595 596 static int __symbol__account_cycles(struct annotation *notes, 597 u64 start, 598 unsigned offset, unsigned cycles, 599 unsigned have_start) 600 { 601 struct cyc_hist *ch; 602 603 ch = notes->src->cycles_hist; 604 /* 605 * For now we can only account one basic block per 606 * final jump. But multiple could be overlapping. 607 * Always account the longest one. So when 608 * a shorter one has been already seen throw it away. 609 * 610 * We separately always account the full cycles. 611 */ 612 ch[offset].num_aggr++; 613 ch[offset].cycles_aggr += cycles; 614 615 if (!have_start && ch[offset].have_start) 616 return 0; 617 if (ch[offset].num) { 618 if (have_start && (!ch[offset].have_start || 619 ch[offset].start > start)) { 620 ch[offset].have_start = 0; 621 ch[offset].cycles = 0; 622 ch[offset].num = 0; 623 if (ch[offset].reset < 0xffff) 624 ch[offset].reset++; 625 } else if (have_start && 626 ch[offset].start < start) 627 return 0; 628 } 629 ch[offset].have_start = have_start; 630 ch[offset].start = start; 631 ch[offset].cycles += cycles; 632 ch[offset].num++; 633 return 0; 634 } 635 636 static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map, 637 struct annotation *notes, int evidx, u64 addr) 638 { 639 unsigned offset; 640 struct sym_hist *h; 641 642 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); 643 644 if (addr < sym->start || addr >= sym->end) { 645 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n", 646 __func__, __LINE__, sym->name, sym->start, addr, sym->end); 647 return -ERANGE; 648 } 649 650 offset = addr - sym->start; 651 h = annotation__histogram(notes, evidx); 652 h->sum++; 653 h->addr[offset]++; 654 655 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 656 ", evidx=%d] => %" PRIu64 "\n", sym->start, sym->name, 657 addr, addr - sym->start, evidx, h->addr[offset]); 658 return 0; 659 } 660 661 static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles) 662 { 663 struct annotation *notes = symbol__annotation(sym); 664 665 if (notes->src == NULL) { 666 if (symbol__alloc_hist(sym) < 0) 667 return NULL; 668 } 669 if (!notes->src->cycles_hist && cycles) { 670 if (symbol__alloc_hist_cycles(sym) < 0) 671 return NULL; 672 } 673 return notes; 674 } 675 676 static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, 677 int evidx, u64 addr) 678 { 679 struct annotation *notes; 680 681 if (sym == NULL) 682 return 0; 683 notes = symbol__get_annotation(sym, false); 684 if (notes == NULL) 685 return -ENOMEM; 686 return __symbol__inc_addr_samples(sym, map, notes, evidx, addr); 687 } 688 689 static int symbol__account_cycles(u64 addr, u64 start, 690 struct symbol *sym, unsigned cycles) 691 { 692 struct annotation *notes; 693 unsigned offset; 694 695 if (sym == NULL) 696 return 0; 697 notes = symbol__get_annotation(sym, true); 698 if (notes == NULL) 699 return -ENOMEM; 700 if (addr < sym->start || addr >= sym->end) 701 return -ERANGE; 702 703 if (start) { 704 if (start < sym->start || start >= sym->end) 705 return -ERANGE; 706 if (start >= addr) 707 start = 0; 708 } 709 offset = addr - sym->start; 710 return __symbol__account_cycles(notes, 711 start ? start - sym->start : 0, 712 offset, cycles, 713 !!start); 714 } 715 716 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, 717 struct addr_map_symbol *start, 718 unsigned cycles) 719 { 720 u64 saddr = 0; 721 int err; 722 723 if (!cycles) 724 return 0; 725 726 /* 727 * Only set start when IPC can be computed. We can only 728 * compute it when the basic block is completely in a single 729 * function. 730 * Special case the case when the jump is elsewhere, but 731 * it starts on the function start. 732 */ 733 if (start && 734 (start->sym == ams->sym || 735 (ams->sym && 736 start->addr == ams->sym->start + ams->map->start))) 737 saddr = start->al_addr; 738 if (saddr == 0) 739 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n", 740 ams->addr, 741 start ? start->addr : 0, 742 ams->sym ? ams->sym->start + ams->map->start : 0, 743 saddr); 744 err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles); 745 if (err) 746 pr_debug2("account_cycles failed %d\n", err); 747 return err; 748 } 749 750 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx) 751 { 752 return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr); 753 } 754 755 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) 756 { 757 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); 758 } 759 760 static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map *map) 761 { 762 dl->ins.ops = ins__find(arch, dl->ins.name); 763 764 if (!dl->ins.ops) 765 return; 766 767 if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, map) < 0) 768 dl->ins.ops = NULL; 769 } 770 771 static int disasm_line__parse(char *line, const char **namep, char **rawp) 772 { 773 char *name = line, tmp; 774 775 while (isspace(name[0])) 776 ++name; 777 778 if (name[0] == '\0') 779 return -1; 780 781 *rawp = name + 1; 782 783 while ((*rawp)[0] != '\0' && !isspace((*rawp)[0])) 784 ++*rawp; 785 786 tmp = (*rawp)[0]; 787 (*rawp)[0] = '\0'; 788 *namep = strdup(name); 789 790 if (*namep == NULL) 791 goto out_free_name; 792 793 (*rawp)[0] = tmp; 794 795 if ((*rawp)[0] != '\0') { 796 (*rawp)++; 797 while (isspace((*rawp)[0])) 798 ++(*rawp); 799 } 800 801 return 0; 802 803 out_free_name: 804 free((void *)namep); 805 *namep = NULL; 806 return -1; 807 } 808 809 static struct disasm_line *disasm_line__new(s64 offset, char *line, 810 size_t privsize, int line_nr, 811 struct arch *arch, 812 struct map *map) 813 { 814 struct disasm_line *dl = zalloc(sizeof(*dl) + privsize); 815 816 if (dl != NULL) { 817 dl->offset = offset; 818 dl->line = strdup(line); 819 dl->line_nr = line_nr; 820 if (dl->line == NULL) 821 goto out_delete; 822 823 if (offset != -1) { 824 if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0) 825 goto out_free_line; 826 827 disasm_line__init_ins(dl, arch, map); 828 } 829 } 830 831 return dl; 832 833 out_free_line: 834 zfree(&dl->line); 835 out_delete: 836 free(dl); 837 return NULL; 838 } 839 840 void disasm_line__free(struct disasm_line *dl) 841 { 842 zfree(&dl->line); 843 if (dl->ins.ops && dl->ins.ops->free) 844 dl->ins.ops->free(&dl->ops); 845 else 846 ins__delete(&dl->ops); 847 free((void *)dl->ins.name); 848 dl->ins.name = NULL; 849 free(dl); 850 } 851 852 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw) 853 { 854 if (raw || !dl->ins.ops) 855 return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw); 856 857 return ins__scnprintf(&dl->ins, bf, size, &dl->ops); 858 } 859 860 static void disasm__add(struct list_head *head, struct disasm_line *line) 861 { 862 list_add_tail(&line->node, head); 863 } 864 865 struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos) 866 { 867 list_for_each_entry_continue(pos, head, node) 868 if (pos->offset >= 0) 869 return pos; 870 871 return NULL; 872 } 873 874 double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, 875 s64 end, const char **path, u64 *nr_samples) 876 { 877 struct source_line *src_line = notes->src->lines; 878 double percent = 0.0; 879 *nr_samples = 0; 880 881 if (src_line) { 882 size_t sizeof_src_line = sizeof(*src_line) + 883 sizeof(src_line->samples) * (src_line->nr_pcnt - 1); 884 885 while (offset < end) { 886 src_line = (void *)notes->src->lines + 887 (sizeof_src_line * offset); 888 889 if (*path == NULL) 890 *path = src_line->path; 891 892 percent += src_line->samples[evidx].percent; 893 *nr_samples += src_line->samples[evidx].nr; 894 offset++; 895 } 896 } else { 897 struct sym_hist *h = annotation__histogram(notes, evidx); 898 unsigned int hits = 0; 899 900 while (offset < end) 901 hits += h->addr[offset++]; 902 903 if (h->sum) { 904 *nr_samples = hits; 905 percent = 100.0 * hits / h->sum; 906 } 907 } 908 909 return percent; 910 } 911 912 static const char *annotate__address_color(struct block_range *br) 913 { 914 double cov = block_range__coverage(br); 915 916 if (cov >= 0) { 917 /* mark red for >75% coverage */ 918 if (cov > 0.75) 919 return PERF_COLOR_RED; 920 921 /* mark dull for <1% coverage */ 922 if (cov < 0.01) 923 return PERF_COLOR_NORMAL; 924 } 925 926 return PERF_COLOR_MAGENTA; 927 } 928 929 static const char *annotate__asm_color(struct block_range *br) 930 { 931 double cov = block_range__coverage(br); 932 933 if (cov >= 0) { 934 /* mark dull for <1% coverage */ 935 if (cov < 0.01) 936 return PERF_COLOR_NORMAL; 937 } 938 939 return PERF_COLOR_BLUE; 940 } 941 942 static void annotate__branch_printf(struct block_range *br, u64 addr) 943 { 944 bool emit_comment = true; 945 946 if (!br) 947 return; 948 949 #if 1 950 if (br->is_target && br->start == addr) { 951 struct block_range *branch = br; 952 double p; 953 954 /* 955 * Find matching branch to our target. 956 */ 957 while (!branch->is_branch) 958 branch = block_range__next(branch); 959 960 p = 100 *(double)br->entry / branch->coverage; 961 962 if (p > 0.1) { 963 if (emit_comment) { 964 emit_comment = false; 965 printf("\t#"); 966 } 967 968 /* 969 * The percentage of coverage joined at this target in relation 970 * to the next branch. 971 */ 972 printf(" +%.2f%%", p); 973 } 974 } 975 #endif 976 if (br->is_branch && br->end == addr) { 977 double p = 100*(double)br->taken / br->coverage; 978 979 if (p > 0.1) { 980 if (emit_comment) { 981 emit_comment = false; 982 printf("\t#"); 983 } 984 985 /* 986 * The percentage of coverage leaving at this branch, and 987 * its prediction ratio. 988 */ 989 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken); 990 } 991 } 992 } 993 994 995 static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start, 996 struct perf_evsel *evsel, u64 len, int min_pcnt, int printed, 997 int max_lines, struct disasm_line *queue) 998 { 999 static const char *prev_line; 1000 static const char *prev_color; 1001 1002 if (dl->offset != -1) { 1003 const char *path = NULL; 1004 u64 nr_samples; 1005 double percent, max_percent = 0.0; 1006 double *ppercents = &percent; 1007 u64 *psamples = &nr_samples; 1008 int i, nr_percent = 1; 1009 const char *color; 1010 struct annotation *notes = symbol__annotation(sym); 1011 s64 offset = dl->offset; 1012 const u64 addr = start + offset; 1013 struct disasm_line *next; 1014 struct block_range *br; 1015 1016 next = disasm__get_next_ip_line(¬es->src->source, dl); 1017 1018 if (perf_evsel__is_group_event(evsel)) { 1019 nr_percent = evsel->nr_members; 1020 ppercents = calloc(nr_percent, sizeof(double)); 1021 psamples = calloc(nr_percent, sizeof(u64)); 1022 if (ppercents == NULL || psamples == NULL) { 1023 return -1; 1024 } 1025 } 1026 1027 for (i = 0; i < nr_percent; i++) { 1028 percent = disasm__calc_percent(notes, 1029 notes->src->lines ? i : evsel->idx + i, 1030 offset, 1031 next ? next->offset : (s64) len, 1032 &path, &nr_samples); 1033 1034 ppercents[i] = percent; 1035 psamples[i] = nr_samples; 1036 if (percent > max_percent) 1037 max_percent = percent; 1038 } 1039 1040 if (max_percent < min_pcnt) 1041 return -1; 1042 1043 if (max_lines && printed >= max_lines) 1044 return 1; 1045 1046 if (queue != NULL) { 1047 list_for_each_entry_from(queue, ¬es->src->source, node) { 1048 if (queue == dl) 1049 break; 1050 disasm_line__print(queue, sym, start, evsel, len, 1051 0, 0, 1, NULL); 1052 } 1053 } 1054 1055 color = get_percent_color(max_percent); 1056 1057 /* 1058 * Also color the filename and line if needed, with 1059 * the same color than the percentage. Don't print it 1060 * twice for close colored addr with the same filename:line 1061 */ 1062 if (path) { 1063 if (!prev_line || strcmp(prev_line, path) 1064 || color != prev_color) { 1065 color_fprintf(stdout, color, " %s", path); 1066 prev_line = path; 1067 prev_color = color; 1068 } 1069 } 1070 1071 for (i = 0; i < nr_percent; i++) { 1072 percent = ppercents[i]; 1073 nr_samples = psamples[i]; 1074 color = get_percent_color(percent); 1075 1076 if (symbol_conf.show_total_period) 1077 color_fprintf(stdout, color, " %7" PRIu64, 1078 nr_samples); 1079 else 1080 color_fprintf(stdout, color, " %7.2f", percent); 1081 } 1082 1083 printf(" : "); 1084 1085 br = block_range__find(addr); 1086 color_fprintf(stdout, annotate__address_color(br), " %" PRIx64 ":", addr); 1087 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line); 1088 annotate__branch_printf(br, addr); 1089 printf("\n"); 1090 1091 if (ppercents != &percent) 1092 free(ppercents); 1093 1094 if (psamples != &nr_samples) 1095 free(psamples); 1096 1097 } else if (max_lines && printed >= max_lines) 1098 return 1; 1099 else { 1100 int width = 8; 1101 1102 if (queue) 1103 return -1; 1104 1105 if (perf_evsel__is_group_event(evsel)) 1106 width *= evsel->nr_members; 1107 1108 if (!*dl->line) 1109 printf(" %*s:\n", width, " "); 1110 else 1111 printf(" %*s: %s\n", width, " ", dl->line); 1112 } 1113 1114 return 0; 1115 } 1116 1117 /* 1118 * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw) 1119 * which looks like following 1120 * 1121 * 0000000000415500 <_init>: 1122 * 415500: sub $0x8,%rsp 1123 * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8> 1124 * 41550b: test %rax,%rax 1125 * 41550e: je 415515 <_init+0x15> 1126 * 415510: callq 416e70 <__gmon_start__@plt> 1127 * 415515: add $0x8,%rsp 1128 * 415519: retq 1129 * 1130 * it will be parsed and saved into struct disasm_line as 1131 * <offset> <name> <ops.raw> 1132 * 1133 * The offset will be a relative offset from the start of the symbol and -1 1134 * means that it's not a disassembly line so should be treated differently. 1135 * The ops.raw part will be parsed further according to type of the instruction. 1136 */ 1137 static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, 1138 struct arch *arch, 1139 FILE *file, size_t privsize, 1140 int *line_nr) 1141 { 1142 struct annotation *notes = symbol__annotation(sym); 1143 struct disasm_line *dl; 1144 char *line = NULL, *parsed_line, *tmp, *tmp2, *c; 1145 size_t line_len; 1146 s64 line_ip, offset = -1; 1147 regmatch_t match[2]; 1148 1149 if (getline(&line, &line_len, file) < 0) 1150 return -1; 1151 1152 if (!line) 1153 return -1; 1154 1155 while (line_len != 0 && isspace(line[line_len - 1])) 1156 line[--line_len] = '\0'; 1157 1158 c = strchr(line, '\n'); 1159 if (c) 1160 *c = 0; 1161 1162 line_ip = -1; 1163 parsed_line = line; 1164 1165 /* /filename:linenr ? Save line number and ignore. */ 1166 if (regexec(&file_lineno, line, 2, match, 0) == 0) { 1167 *line_nr = atoi(line + match[1].rm_so); 1168 return 0; 1169 } 1170 1171 /* 1172 * Strip leading spaces: 1173 */ 1174 tmp = line; 1175 while (*tmp) { 1176 if (*tmp != ' ') 1177 break; 1178 tmp++; 1179 } 1180 1181 if (*tmp) { 1182 /* 1183 * Parse hexa addresses followed by ':' 1184 */ 1185 line_ip = strtoull(tmp, &tmp2, 16); 1186 if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0') 1187 line_ip = -1; 1188 } 1189 1190 if (line_ip != -1) { 1191 u64 start = map__rip_2objdump(map, sym->start), 1192 end = map__rip_2objdump(map, sym->end); 1193 1194 offset = line_ip - start; 1195 if ((u64)line_ip < start || (u64)line_ip >= end) 1196 offset = -1; 1197 else 1198 parsed_line = tmp2 + 1; 1199 } 1200 1201 dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map); 1202 free(line); 1203 (*line_nr)++; 1204 1205 if (dl == NULL) 1206 return -1; 1207 1208 if (dl->ops.target.offset == UINT64_MAX) 1209 dl->ops.target.offset = dl->ops.target.addr - 1210 map__rip_2objdump(map, sym->start); 1211 1212 /* kcore has no symbols, so add the call target name */ 1213 if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) { 1214 struct addr_map_symbol target = { 1215 .map = map, 1216 .addr = dl->ops.target.addr, 1217 }; 1218 1219 if (!map_groups__find_ams(&target) && 1220 target.sym->start == target.al_addr) 1221 dl->ops.target.name = strdup(target.sym->name); 1222 } 1223 1224 disasm__add(¬es->src->source, dl); 1225 1226 return 0; 1227 } 1228 1229 static __attribute__((constructor)) void symbol__init_regexpr(void) 1230 { 1231 regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED); 1232 } 1233 1234 static void delete_last_nop(struct symbol *sym) 1235 { 1236 struct annotation *notes = symbol__annotation(sym); 1237 struct list_head *list = ¬es->src->source; 1238 struct disasm_line *dl; 1239 1240 while (!list_empty(list)) { 1241 dl = list_entry(list->prev, struct disasm_line, node); 1242 1243 if (dl->ins.ops) { 1244 if (dl->ins.ops != &nop_ops) 1245 return; 1246 } else { 1247 if (!strstr(dl->line, " nop ") && 1248 !strstr(dl->line, " nopl ") && 1249 !strstr(dl->line, " nopw ")) 1250 return; 1251 } 1252 1253 list_del(&dl->node); 1254 disasm_line__free(dl); 1255 } 1256 } 1257 1258 int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map, 1259 int errnum, char *buf, size_t buflen) 1260 { 1261 struct dso *dso = map->dso; 1262 1263 BUG_ON(buflen == 0); 1264 1265 if (errnum >= 0) { 1266 str_error_r(errnum, buf, buflen); 1267 return 0; 1268 } 1269 1270 switch (errnum) { 1271 case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: { 1272 char bf[SBUILD_ID_SIZE + 15] = " with build id "; 1273 char *build_id_msg = NULL; 1274 1275 if (dso->has_build_id) { 1276 build_id__sprintf(dso->build_id, 1277 sizeof(dso->build_id), bf + 15); 1278 build_id_msg = bf; 1279 } 1280 scnprintf(buf, buflen, 1281 "No vmlinux file%s\nwas found in the path.\n\n" 1282 "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n" 1283 "Please use:\n\n" 1284 " perf buildid-cache -vu vmlinux\n\n" 1285 "or:\n\n" 1286 " --vmlinux vmlinux\n", build_id_msg ?: ""); 1287 } 1288 break; 1289 default: 1290 scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum); 1291 break; 1292 } 1293 1294 return 0; 1295 } 1296 1297 static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size) 1298 { 1299 char linkname[PATH_MAX]; 1300 char *build_id_filename; 1301 1302 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && 1303 !dso__is_kcore(dso)) 1304 return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX; 1305 1306 build_id_filename = dso__build_id_filename(dso, NULL, 0); 1307 if (build_id_filename) { 1308 __symbol__join_symfs(filename, filename_size, build_id_filename); 1309 free(build_id_filename); 1310 } else { 1311 if (dso->has_build_id) 1312 return ENOMEM; 1313 goto fallback; 1314 } 1315 1316 if (dso__is_kcore(dso) || 1317 readlink(filename, linkname, sizeof(linkname)) < 0 || 1318 strstr(linkname, DSO__NAME_KALLSYMS) || 1319 access(filename, R_OK)) { 1320 fallback: 1321 /* 1322 * If we don't have build-ids or the build-id file isn't in the 1323 * cache, or is just a kallsyms file, well, lets hope that this 1324 * DSO is the same as when 'perf record' ran. 1325 */ 1326 __symbol__join_symfs(filename, filename_size, dso->long_name); 1327 } 1328 1329 return 0; 1330 } 1331 1332 static const char *annotate__norm_arch(const char *arch_name) 1333 { 1334 struct utsname uts; 1335 1336 if (!arch_name) { /* Assume we are annotating locally. */ 1337 if (uname(&uts) < 0) 1338 return NULL; 1339 arch_name = uts.machine; 1340 } 1341 return normalize_arch((char *)arch_name); 1342 } 1343 1344 int symbol__disassemble(struct symbol *sym, struct map *map, const char *arch_name, size_t privsize) 1345 { 1346 struct dso *dso = map->dso; 1347 char command[PATH_MAX * 2]; 1348 struct arch *arch = NULL; 1349 FILE *file; 1350 char symfs_filename[PATH_MAX]; 1351 struct kcore_extract kce; 1352 bool delete_extract = false; 1353 int stdout_fd[2]; 1354 int lineno = 0; 1355 int nline; 1356 pid_t pid; 1357 int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename)); 1358 1359 if (err) 1360 return err; 1361 1362 arch_name = annotate__norm_arch(arch_name); 1363 if (!arch_name) 1364 return -1; 1365 1366 arch = arch__find(arch_name); 1367 if (arch == NULL) 1368 return -ENOTSUP; 1369 1370 if (arch->init) { 1371 err = arch->init(arch); 1372 if (err) { 1373 pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name); 1374 return err; 1375 } 1376 } 1377 1378 pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, 1379 symfs_filename, sym->name, map->unmap_ip(map, sym->start), 1380 map->unmap_ip(map, sym->end)); 1381 1382 pr_debug("annotating [%p] %30s : [%p] %30s\n", 1383 dso, dso->long_name, sym, sym->name); 1384 1385 if (dso__is_kcore(dso)) { 1386 kce.kcore_filename = symfs_filename; 1387 kce.addr = map__rip_2objdump(map, sym->start); 1388 kce.offs = sym->start; 1389 kce.len = sym->end - sym->start; 1390 if (!kcore_extract__create(&kce)) { 1391 delete_extract = true; 1392 strlcpy(symfs_filename, kce.extract_filename, 1393 sizeof(symfs_filename)); 1394 } 1395 } else if (dso__needs_decompress(dso)) { 1396 char tmp[PATH_MAX]; 1397 struct kmod_path m; 1398 int fd; 1399 bool ret; 1400 1401 if (kmod_path__parse_ext(&m, symfs_filename)) 1402 goto out; 1403 1404 snprintf(tmp, PATH_MAX, "/tmp/perf-kmod-XXXXXX"); 1405 1406 fd = mkstemp(tmp); 1407 if (fd < 0) { 1408 free(m.ext); 1409 goto out; 1410 } 1411 1412 ret = decompress_to_file(m.ext, symfs_filename, fd); 1413 1414 if (ret) 1415 pr_err("Cannot decompress %s %s\n", m.ext, symfs_filename); 1416 1417 free(m.ext); 1418 close(fd); 1419 1420 if (!ret) 1421 goto out; 1422 1423 strcpy(symfs_filename, tmp); 1424 } 1425 1426 snprintf(command, sizeof(command), 1427 "%s %s%s --start-address=0x%016" PRIx64 1428 " --stop-address=0x%016" PRIx64 1429 " -l -d %s %s -C %s 2>/dev/null|grep -v %s|expand", 1430 objdump_path ? objdump_path : "objdump", 1431 disassembler_style ? "-M " : "", 1432 disassembler_style ? disassembler_style : "", 1433 map__rip_2objdump(map, sym->start), 1434 map__rip_2objdump(map, sym->end), 1435 symbol_conf.annotate_asm_raw ? "" : "--no-show-raw", 1436 symbol_conf.annotate_src ? "-S" : "", 1437 symfs_filename, symfs_filename); 1438 1439 pr_debug("Executing: %s\n", command); 1440 1441 err = -1; 1442 if (pipe(stdout_fd) < 0) { 1443 pr_err("Failure creating the pipe to run %s\n", command); 1444 goto out_remove_tmp; 1445 } 1446 1447 pid = fork(); 1448 if (pid < 0) { 1449 pr_err("Failure forking to run %s\n", command); 1450 goto out_close_stdout; 1451 } 1452 1453 if (pid == 0) { 1454 close(stdout_fd[0]); 1455 dup2(stdout_fd[1], 1); 1456 close(stdout_fd[1]); 1457 execl("/bin/sh", "sh", "-c", command, NULL); 1458 perror(command); 1459 exit(-1); 1460 } 1461 1462 close(stdout_fd[1]); 1463 1464 file = fdopen(stdout_fd[0], "r"); 1465 if (!file) { 1466 pr_err("Failure creating FILE stream for %s\n", command); 1467 /* 1468 * If we were using debug info should retry with 1469 * original binary. 1470 */ 1471 goto out_remove_tmp; 1472 } 1473 1474 nline = 0; 1475 while (!feof(file)) { 1476 if (symbol__parse_objdump_line(sym, map, arch, file, privsize, 1477 &lineno) < 0) 1478 break; 1479 nline++; 1480 } 1481 1482 if (nline == 0) 1483 pr_err("No output from %s\n", command); 1484 1485 /* 1486 * kallsyms does not have symbol sizes so there may a nop at the end. 1487 * Remove it. 1488 */ 1489 if (dso__is_kcore(dso)) 1490 delete_last_nop(sym); 1491 1492 fclose(file); 1493 err = 0; 1494 out_remove_tmp: 1495 close(stdout_fd[0]); 1496 1497 if (dso__needs_decompress(dso)) 1498 unlink(symfs_filename); 1499 1500 if (delete_extract) 1501 kcore_extract__delete(&kce); 1502 out: 1503 return err; 1504 1505 out_close_stdout: 1506 close(stdout_fd[1]); 1507 goto out_remove_tmp; 1508 } 1509 1510 static void insert_source_line(struct rb_root *root, struct source_line *src_line) 1511 { 1512 struct source_line *iter; 1513 struct rb_node **p = &root->rb_node; 1514 struct rb_node *parent = NULL; 1515 int i, ret; 1516 1517 while (*p != NULL) { 1518 parent = *p; 1519 iter = rb_entry(parent, struct source_line, node); 1520 1521 ret = strcmp(iter->path, src_line->path); 1522 if (ret == 0) { 1523 for (i = 0; i < src_line->nr_pcnt; i++) 1524 iter->samples[i].percent_sum += src_line->samples[i].percent; 1525 return; 1526 } 1527 1528 if (ret < 0) 1529 p = &(*p)->rb_left; 1530 else 1531 p = &(*p)->rb_right; 1532 } 1533 1534 for (i = 0; i < src_line->nr_pcnt; i++) 1535 src_line->samples[i].percent_sum = src_line->samples[i].percent; 1536 1537 rb_link_node(&src_line->node, parent, p); 1538 rb_insert_color(&src_line->node, root); 1539 } 1540 1541 static int cmp_source_line(struct source_line *a, struct source_line *b) 1542 { 1543 int i; 1544 1545 for (i = 0; i < a->nr_pcnt; i++) { 1546 if (a->samples[i].percent_sum == b->samples[i].percent_sum) 1547 continue; 1548 return a->samples[i].percent_sum > b->samples[i].percent_sum; 1549 } 1550 1551 return 0; 1552 } 1553 1554 static void __resort_source_line(struct rb_root *root, struct source_line *src_line) 1555 { 1556 struct source_line *iter; 1557 struct rb_node **p = &root->rb_node; 1558 struct rb_node *parent = NULL; 1559 1560 while (*p != NULL) { 1561 parent = *p; 1562 iter = rb_entry(parent, struct source_line, node); 1563 1564 if (cmp_source_line(src_line, iter)) 1565 p = &(*p)->rb_left; 1566 else 1567 p = &(*p)->rb_right; 1568 } 1569 1570 rb_link_node(&src_line->node, parent, p); 1571 rb_insert_color(&src_line->node, root); 1572 } 1573 1574 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root) 1575 { 1576 struct source_line *src_line; 1577 struct rb_node *node; 1578 1579 node = rb_first(src_root); 1580 while (node) { 1581 struct rb_node *next; 1582 1583 src_line = rb_entry(node, struct source_line, node); 1584 next = rb_next(node); 1585 rb_erase(node, src_root); 1586 1587 __resort_source_line(dest_root, src_line); 1588 node = next; 1589 } 1590 } 1591 1592 static void symbol__free_source_line(struct symbol *sym, int len) 1593 { 1594 struct annotation *notes = symbol__annotation(sym); 1595 struct source_line *src_line = notes->src->lines; 1596 size_t sizeof_src_line; 1597 int i; 1598 1599 sizeof_src_line = sizeof(*src_line) + 1600 (sizeof(src_line->samples) * (src_line->nr_pcnt - 1)); 1601 1602 for (i = 0; i < len; i++) { 1603 free_srcline(src_line->path); 1604 src_line = (void *)src_line + sizeof_src_line; 1605 } 1606 1607 zfree(¬es->src->lines); 1608 } 1609 1610 /* Get the filename:line for the colored entries */ 1611 static int symbol__get_source_line(struct symbol *sym, struct map *map, 1612 struct perf_evsel *evsel, 1613 struct rb_root *root, int len) 1614 { 1615 u64 start; 1616 int i, k; 1617 int evidx = evsel->idx; 1618 struct source_line *src_line; 1619 struct annotation *notes = symbol__annotation(sym); 1620 struct sym_hist *h = annotation__histogram(notes, evidx); 1621 struct rb_root tmp_root = RB_ROOT; 1622 int nr_pcnt = 1; 1623 u64 h_sum = h->sum; 1624 size_t sizeof_src_line = sizeof(struct source_line); 1625 1626 if (perf_evsel__is_group_event(evsel)) { 1627 for (i = 1; i < evsel->nr_members; i++) { 1628 h = annotation__histogram(notes, evidx + i); 1629 h_sum += h->sum; 1630 } 1631 nr_pcnt = evsel->nr_members; 1632 sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples); 1633 } 1634 1635 if (!h_sum) 1636 return 0; 1637 1638 src_line = notes->src->lines = calloc(len, sizeof_src_line); 1639 if (!notes->src->lines) 1640 return -1; 1641 1642 start = map__rip_2objdump(map, sym->start); 1643 1644 for (i = 0; i < len; i++) { 1645 u64 offset; 1646 double percent_max = 0.0; 1647 1648 src_line->nr_pcnt = nr_pcnt; 1649 1650 for (k = 0; k < nr_pcnt; k++) { 1651 h = annotation__histogram(notes, evidx + k); 1652 src_line->samples[k].percent = 100.0 * h->addr[i] / h->sum; 1653 1654 if (src_line->samples[k].percent > percent_max) 1655 percent_max = src_line->samples[k].percent; 1656 } 1657 1658 if (percent_max <= 0.5) 1659 goto next; 1660 1661 offset = start + i; 1662 src_line->path = get_srcline(map->dso, offset, NULL, false); 1663 insert_source_line(&tmp_root, src_line); 1664 1665 next: 1666 src_line = (void *)src_line + sizeof_src_line; 1667 } 1668 1669 resort_source_line(root, &tmp_root); 1670 return 0; 1671 } 1672 1673 static void print_summary(struct rb_root *root, const char *filename) 1674 { 1675 struct source_line *src_line; 1676 struct rb_node *node; 1677 1678 printf("\nSorted summary for file %s\n", filename); 1679 printf("----------------------------------------------\n\n"); 1680 1681 if (RB_EMPTY_ROOT(root)) { 1682 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); 1683 return; 1684 } 1685 1686 node = rb_first(root); 1687 while (node) { 1688 double percent, percent_max = 0.0; 1689 const char *color; 1690 char *path; 1691 int i; 1692 1693 src_line = rb_entry(node, struct source_line, node); 1694 for (i = 0; i < src_line->nr_pcnt; i++) { 1695 percent = src_line->samples[i].percent_sum; 1696 color = get_percent_color(percent); 1697 color_fprintf(stdout, color, " %7.2f", percent); 1698 1699 if (percent > percent_max) 1700 percent_max = percent; 1701 } 1702 1703 path = src_line->path; 1704 color = get_percent_color(percent_max); 1705 color_fprintf(stdout, color, " %s\n", path); 1706 1707 node = rb_next(node); 1708 } 1709 } 1710 1711 static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel) 1712 { 1713 struct annotation *notes = symbol__annotation(sym); 1714 struct sym_hist *h = annotation__histogram(notes, evsel->idx); 1715 u64 len = symbol__size(sym), offset; 1716 1717 for (offset = 0; offset < len; ++offset) 1718 if (h->addr[offset] != 0) 1719 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, 1720 sym->start + offset, h->addr[offset]); 1721 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->sum", h->sum); 1722 } 1723 1724 int symbol__annotate_printf(struct symbol *sym, struct map *map, 1725 struct perf_evsel *evsel, bool full_paths, 1726 int min_pcnt, int max_lines, int context) 1727 { 1728 struct dso *dso = map->dso; 1729 char *filename; 1730 const char *d_filename; 1731 const char *evsel_name = perf_evsel__name(evsel); 1732 struct annotation *notes = symbol__annotation(sym); 1733 struct sym_hist *h = annotation__histogram(notes, evsel->idx); 1734 struct disasm_line *pos, *queue = NULL; 1735 u64 start = map__rip_2objdump(map, sym->start); 1736 int printed = 2, queue_len = 0; 1737 int more = 0; 1738 u64 len; 1739 int width = 8; 1740 int graph_dotted_len; 1741 1742 filename = strdup(dso->long_name); 1743 if (!filename) 1744 return -ENOMEM; 1745 1746 if (full_paths) 1747 d_filename = filename; 1748 else 1749 d_filename = basename(filename); 1750 1751 len = symbol__size(sym); 1752 1753 if (perf_evsel__is_group_event(evsel)) 1754 width *= evsel->nr_members; 1755 1756 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n", 1757 width, width, "Percent", d_filename, evsel_name, h->sum); 1758 1759 printf("%-*.*s----\n", 1760 graph_dotted_len, graph_dotted_len, graph_dotted_line); 1761 1762 if (verbose) 1763 symbol__annotate_hits(sym, evsel); 1764 1765 list_for_each_entry(pos, ¬es->src->source, node) { 1766 if (context && queue == NULL) { 1767 queue = pos; 1768 queue_len = 0; 1769 } 1770 1771 switch (disasm_line__print(pos, sym, start, evsel, len, 1772 min_pcnt, printed, max_lines, 1773 queue)) { 1774 case 0: 1775 ++printed; 1776 if (context) { 1777 printed += queue_len; 1778 queue = NULL; 1779 queue_len = 0; 1780 } 1781 break; 1782 case 1: 1783 /* filtered by max_lines */ 1784 ++more; 1785 break; 1786 case -1: 1787 default: 1788 /* 1789 * Filtered by min_pcnt or non IP lines when 1790 * context != 0 1791 */ 1792 if (!context) 1793 break; 1794 if (queue_len == context) 1795 queue = list_entry(queue->node.next, typeof(*queue), node); 1796 else 1797 ++queue_len; 1798 break; 1799 } 1800 } 1801 1802 free(filename); 1803 1804 return more; 1805 } 1806 1807 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx) 1808 { 1809 struct annotation *notes = symbol__annotation(sym); 1810 struct sym_hist *h = annotation__histogram(notes, evidx); 1811 1812 memset(h, 0, notes->src->sizeof_sym_hist); 1813 } 1814 1815 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) 1816 { 1817 struct annotation *notes = symbol__annotation(sym); 1818 struct sym_hist *h = annotation__histogram(notes, evidx); 1819 int len = symbol__size(sym), offset; 1820 1821 h->sum = 0; 1822 for (offset = 0; offset < len; ++offset) { 1823 h->addr[offset] = h->addr[offset] * 7 / 8; 1824 h->sum += h->addr[offset]; 1825 } 1826 } 1827 1828 void disasm__purge(struct list_head *head) 1829 { 1830 struct disasm_line *pos, *n; 1831 1832 list_for_each_entry_safe(pos, n, head, node) { 1833 list_del(&pos->node); 1834 disasm_line__free(pos); 1835 } 1836 } 1837 1838 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp) 1839 { 1840 size_t printed; 1841 1842 if (dl->offset == -1) 1843 return fprintf(fp, "%s\n", dl->line); 1844 1845 printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name); 1846 1847 if (dl->ops.raw[0] != '\0') { 1848 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ", 1849 dl->ops.raw); 1850 } 1851 1852 return printed + fprintf(fp, "\n"); 1853 } 1854 1855 size_t disasm__fprintf(struct list_head *head, FILE *fp) 1856 { 1857 struct disasm_line *pos; 1858 size_t printed = 0; 1859 1860 list_for_each_entry(pos, head, node) 1861 printed += disasm_line__fprintf(pos, fp); 1862 1863 return printed; 1864 } 1865 1866 int symbol__tty_annotate(struct symbol *sym, struct map *map, 1867 struct perf_evsel *evsel, bool print_lines, 1868 bool full_paths, int min_pcnt, int max_lines) 1869 { 1870 struct dso *dso = map->dso; 1871 struct rb_root source_line = RB_ROOT; 1872 u64 len; 1873 1874 if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 0) < 0) 1875 return -1; 1876 1877 len = symbol__size(sym); 1878 1879 if (print_lines) { 1880 srcline_full_filename = full_paths; 1881 symbol__get_source_line(sym, map, evsel, &source_line, len); 1882 print_summary(&source_line, dso->long_name); 1883 } 1884 1885 symbol__annotate_printf(sym, map, evsel, full_paths, 1886 min_pcnt, max_lines, 0); 1887 if (print_lines) 1888 symbol__free_source_line(sym, len); 1889 1890 disasm__purge(&symbol__annotation(sym)->src->source); 1891 1892 return 0; 1893 } 1894 1895 bool ui__has_annotation(void) 1896 { 1897 return use_browser == 1 && perf_hpp_list.sym; 1898 } 1899