1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-annotate.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 10 #include <errno.h> 11 #include <inttypes.h> 12 #include "util.h" 13 #include "ui/ui.h" 14 #include "sort.h" 15 #include "build-id.h" 16 #include "color.h" 17 #include "cache.h" 18 #include "symbol.h" 19 #include "debug.h" 20 #include "annotate.h" 21 #include "evsel.h" 22 #include "block-range.h" 23 #include "string2.h" 24 #include "arch/common.h" 25 #include <regex.h> 26 #include <pthread.h> 27 #include <linux/bitops.h> 28 #include <linux/kernel.h> 29 #include <sys/utsname.h> 30 31 #include "sane_ctype.h" 32 33 const char *disassembler_style; 34 const char *objdump_path; 35 static regex_t file_lineno; 36 37 static struct ins_ops *ins__find(struct arch *arch, const char *name); 38 static void ins__sort(struct arch *arch); 39 static int disasm_line__parse(char *line, const char **namep, char **rawp); 40 41 struct arch { 42 const char *name; 43 struct ins *instructions; 44 size_t nr_instructions; 45 size_t nr_instructions_allocated; 46 struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name); 47 bool sorted_instructions; 48 bool initialized; 49 void *priv; 50 unsigned int model; 51 unsigned int family; 52 int (*init)(struct arch *arch); 53 bool (*ins_is_fused)(struct arch *arch, const char *ins1, 54 const char *ins2); 55 int (*cpuid_parse)(struct arch *arch, char *cpuid); 56 struct { 57 char comment_char; 58 char skip_functions_char; 59 } objdump; 60 }; 61 62 static struct ins_ops call_ops; 63 static struct ins_ops dec_ops; 64 static struct ins_ops jump_ops; 65 static struct ins_ops mov_ops; 66 static struct ins_ops nop_ops; 67 static struct ins_ops lock_ops; 68 static struct ins_ops ret_ops; 69 70 static int arch__grow_instructions(struct arch *arch) 71 { 72 struct ins *new_instructions; 73 size_t new_nr_allocated; 74 75 if (arch->nr_instructions_allocated == 0 && arch->instructions) 76 goto grow_from_non_allocated_table; 77 78 new_nr_allocated = arch->nr_instructions_allocated + 128; 79 new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins)); 80 if (new_instructions == NULL) 81 return -1; 82 83 out_update_instructions: 84 arch->instructions = new_instructions; 85 arch->nr_instructions_allocated = new_nr_allocated; 86 return 0; 87 88 grow_from_non_allocated_table: 89 new_nr_allocated = arch->nr_instructions + 128; 90 new_instructions = calloc(new_nr_allocated, sizeof(struct ins)); 91 if (new_instructions == NULL) 92 return -1; 93 94 memcpy(new_instructions, arch->instructions, arch->nr_instructions); 95 goto out_update_instructions; 96 } 97 98 static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops) 99 { 100 struct ins *ins; 101 102 if (arch->nr_instructions == arch->nr_instructions_allocated && 103 arch__grow_instructions(arch)) 104 return -1; 105 106 ins = &arch->instructions[arch->nr_instructions]; 107 ins->name = strdup(name); 108 if (!ins->name) 109 return -1; 110 111 ins->ops = ops; 112 arch->nr_instructions++; 113 114 ins__sort(arch); 115 return 0; 116 } 117 118 #include "arch/arm/annotate/instructions.c" 119 #include "arch/arm64/annotate/instructions.c" 120 #include "arch/x86/annotate/instructions.c" 121 #include "arch/powerpc/annotate/instructions.c" 122 #include "arch/s390/annotate/instructions.c" 123 124 static struct arch architectures[] = { 125 { 126 .name = "arm", 127 .init = arm__annotate_init, 128 }, 129 { 130 .name = "arm64", 131 .init = arm64__annotate_init, 132 }, 133 { 134 .name = "x86", 135 .instructions = x86__instructions, 136 .nr_instructions = ARRAY_SIZE(x86__instructions), 137 .ins_is_fused = x86__ins_is_fused, 138 .cpuid_parse = x86__cpuid_parse, 139 .objdump = { 140 .comment_char = '#', 141 }, 142 }, 143 { 144 .name = "powerpc", 145 .init = powerpc__annotate_init, 146 }, 147 { 148 .name = "s390", 149 .init = s390__annotate_init, 150 .objdump = { 151 .comment_char = '#', 152 }, 153 }, 154 }; 155 156 static void ins__delete(struct ins_operands *ops) 157 { 158 if (ops == NULL) 159 return; 160 zfree(&ops->source.raw); 161 zfree(&ops->source.name); 162 zfree(&ops->target.raw); 163 zfree(&ops->target.name); 164 } 165 166 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, 167 struct ins_operands *ops) 168 { 169 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->raw); 170 } 171 172 int ins__scnprintf(struct ins *ins, char *bf, size_t size, 173 struct ins_operands *ops) 174 { 175 if (ins->ops->scnprintf) 176 return ins->ops->scnprintf(ins, bf, size, ops); 177 178 return ins__raw_scnprintf(ins, bf, size, ops); 179 } 180 181 bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2) 182 { 183 if (!arch || !arch->ins_is_fused) 184 return false; 185 186 return arch->ins_is_fused(arch, ins1, ins2); 187 } 188 189 static int call__parse(struct arch *arch, struct ins_operands *ops, struct map *map) 190 { 191 char *endptr, *tok, *name; 192 193 ops->target.addr = strtoull(ops->raw, &endptr, 16); 194 195 name = strchr(endptr, '<'); 196 if (name == NULL) 197 goto indirect_call; 198 199 name++; 200 201 if (arch->objdump.skip_functions_char && 202 strchr(name, arch->objdump.skip_functions_char)) 203 return -1; 204 205 tok = strchr(name, '>'); 206 if (tok == NULL) 207 return -1; 208 209 *tok = '\0'; 210 ops->target.name = strdup(name); 211 *tok = '>'; 212 213 return ops->target.name == NULL ? -1 : 0; 214 215 indirect_call: 216 tok = strchr(endptr, '*'); 217 if (tok == NULL) { 218 struct symbol *sym = map__find_symbol(map, map->map_ip(map, ops->target.addr)); 219 if (sym != NULL) 220 ops->target.name = strdup(sym->name); 221 else 222 ops->target.addr = 0; 223 return 0; 224 } 225 226 ops->target.addr = strtoull(tok + 1, NULL, 16); 227 return 0; 228 } 229 230 static int call__scnprintf(struct ins *ins, char *bf, size_t size, 231 struct ins_operands *ops) 232 { 233 if (ops->target.name) 234 return scnprintf(bf, size, "%-6.6s %s", ins->name, ops->target.name); 235 236 if (ops->target.addr == 0) 237 return ins__raw_scnprintf(ins, bf, size, ops); 238 239 return scnprintf(bf, size, "%-6.6s *%" PRIx64, ins->name, ops->target.addr); 240 } 241 242 static struct ins_ops call_ops = { 243 .parse = call__parse, 244 .scnprintf = call__scnprintf, 245 }; 246 247 bool ins__is_call(const struct ins *ins) 248 { 249 return ins->ops == &call_ops; 250 } 251 252 static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused) 253 { 254 const char *s = strchr(ops->raw, '+'); 255 const char *c = strchr(ops->raw, ','); 256 257 /* 258 * skip over possible up to 2 operands to get to address, e.g.: 259 * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0> 260 */ 261 if (c++ != NULL) { 262 ops->target.addr = strtoull(c, NULL, 16); 263 if (!ops->target.addr) { 264 c = strchr(c, ','); 265 if (c++ != NULL) 266 ops->target.addr = strtoull(c, NULL, 16); 267 } 268 } else { 269 ops->target.addr = strtoull(ops->raw, NULL, 16); 270 } 271 272 if (s++ != NULL) { 273 ops->target.offset = strtoull(s, NULL, 16); 274 ops->target.offset_avail = true; 275 } else { 276 ops->target.offset_avail = false; 277 } 278 279 return 0; 280 } 281 282 static int jump__scnprintf(struct ins *ins, char *bf, size_t size, 283 struct ins_operands *ops) 284 { 285 const char *c = strchr(ops->raw, ','); 286 287 if (!ops->target.addr || ops->target.offset < 0) 288 return ins__raw_scnprintf(ins, bf, size, ops); 289 290 if (c != NULL) { 291 const char *c2 = strchr(c + 1, ','); 292 293 /* check for 3-op insn */ 294 if (c2 != NULL) 295 c = c2; 296 c++; 297 298 /* mirror arch objdump's space-after-comma style */ 299 if (*c == ' ') 300 c++; 301 } 302 303 return scnprintf(bf, size, "%-6.6s %.*s%" PRIx64, 304 ins->name, c ? c - ops->raw : 0, ops->raw, 305 ops->target.offset); 306 } 307 308 static struct ins_ops jump_ops = { 309 .parse = jump__parse, 310 .scnprintf = jump__scnprintf, 311 }; 312 313 bool ins__is_jump(const struct ins *ins) 314 { 315 return ins->ops == &jump_ops; 316 } 317 318 static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep) 319 { 320 char *endptr, *name, *t; 321 322 if (strstr(raw, "(%rip)") == NULL) 323 return 0; 324 325 *addrp = strtoull(comment, &endptr, 16); 326 name = strchr(endptr, '<'); 327 if (name == NULL) 328 return -1; 329 330 name++; 331 332 t = strchr(name, '>'); 333 if (t == NULL) 334 return 0; 335 336 *t = '\0'; 337 *namep = strdup(name); 338 *t = '>'; 339 340 return 0; 341 } 342 343 static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map *map) 344 { 345 ops->locked.ops = zalloc(sizeof(*ops->locked.ops)); 346 if (ops->locked.ops == NULL) 347 return 0; 348 349 if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0) 350 goto out_free_ops; 351 352 ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name); 353 354 if (ops->locked.ins.ops == NULL) 355 goto out_free_ops; 356 357 if (ops->locked.ins.ops->parse && 358 ops->locked.ins.ops->parse(arch, ops->locked.ops, map) < 0) 359 goto out_free_ops; 360 361 return 0; 362 363 out_free_ops: 364 zfree(&ops->locked.ops); 365 return 0; 366 } 367 368 static int lock__scnprintf(struct ins *ins, char *bf, size_t size, 369 struct ins_operands *ops) 370 { 371 int printed; 372 373 if (ops->locked.ins.ops == NULL) 374 return ins__raw_scnprintf(ins, bf, size, ops); 375 376 printed = scnprintf(bf, size, "%-6.6s ", ins->name); 377 return printed + ins__scnprintf(&ops->locked.ins, bf + printed, 378 size - printed, ops->locked.ops); 379 } 380 381 static void lock__delete(struct ins_operands *ops) 382 { 383 struct ins *ins = &ops->locked.ins; 384 385 if (ins->ops && ins->ops->free) 386 ins->ops->free(ops->locked.ops); 387 else 388 ins__delete(ops->locked.ops); 389 390 zfree(&ops->locked.ops); 391 zfree(&ops->target.raw); 392 zfree(&ops->target.name); 393 } 394 395 static struct ins_ops lock_ops = { 396 .free = lock__delete, 397 .parse = lock__parse, 398 .scnprintf = lock__scnprintf, 399 }; 400 401 static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map *map __maybe_unused) 402 { 403 char *s = strchr(ops->raw, ','), *target, *comment, prev; 404 405 if (s == NULL) 406 return -1; 407 408 *s = '\0'; 409 ops->source.raw = strdup(ops->raw); 410 *s = ','; 411 412 if (ops->source.raw == NULL) 413 return -1; 414 415 target = ++s; 416 comment = strchr(s, arch->objdump.comment_char); 417 418 if (comment != NULL) 419 s = comment - 1; 420 else 421 s = strchr(s, '\0') - 1; 422 423 while (s > target && isspace(s[0])) 424 --s; 425 s++; 426 prev = *s; 427 *s = '\0'; 428 429 ops->target.raw = strdup(target); 430 *s = prev; 431 432 if (ops->target.raw == NULL) 433 goto out_free_source; 434 435 if (comment == NULL) 436 return 0; 437 438 comment = ltrim(comment); 439 comment__symbol(ops->source.raw, comment, &ops->source.addr, &ops->source.name); 440 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name); 441 442 return 0; 443 444 out_free_source: 445 zfree(&ops->source.raw); 446 return -1; 447 } 448 449 static int mov__scnprintf(struct ins *ins, char *bf, size_t size, 450 struct ins_operands *ops) 451 { 452 return scnprintf(bf, size, "%-6.6s %s,%s", ins->name, 453 ops->source.name ?: ops->source.raw, 454 ops->target.name ?: ops->target.raw); 455 } 456 457 static struct ins_ops mov_ops = { 458 .parse = mov__parse, 459 .scnprintf = mov__scnprintf, 460 }; 461 462 static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map *map __maybe_unused) 463 { 464 char *target, *comment, *s, prev; 465 466 target = s = ops->raw; 467 468 while (s[0] != '\0' && !isspace(s[0])) 469 ++s; 470 prev = *s; 471 *s = '\0'; 472 473 ops->target.raw = strdup(target); 474 *s = prev; 475 476 if (ops->target.raw == NULL) 477 return -1; 478 479 comment = strchr(s, arch->objdump.comment_char); 480 if (comment == NULL) 481 return 0; 482 483 comment = ltrim(comment); 484 comment__symbol(ops->target.raw, comment, &ops->target.addr, &ops->target.name); 485 486 return 0; 487 } 488 489 static int dec__scnprintf(struct ins *ins, char *bf, size_t size, 490 struct ins_operands *ops) 491 { 492 return scnprintf(bf, size, "%-6.6s %s", ins->name, 493 ops->target.name ?: ops->target.raw); 494 } 495 496 static struct ins_ops dec_ops = { 497 .parse = dec__parse, 498 .scnprintf = dec__scnprintf, 499 }; 500 501 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, 502 struct ins_operands *ops __maybe_unused) 503 { 504 return scnprintf(bf, size, "%-6.6s", "nop"); 505 } 506 507 static struct ins_ops nop_ops = { 508 .scnprintf = nop__scnprintf, 509 }; 510 511 static struct ins_ops ret_ops = { 512 .scnprintf = ins__raw_scnprintf, 513 }; 514 515 bool ins__is_ret(const struct ins *ins) 516 { 517 return ins->ops == &ret_ops; 518 } 519 520 bool ins__is_lock(const struct ins *ins) 521 { 522 return ins->ops == &lock_ops; 523 } 524 525 static int ins__key_cmp(const void *name, const void *insp) 526 { 527 const struct ins *ins = insp; 528 529 return strcmp(name, ins->name); 530 } 531 532 static int ins__cmp(const void *a, const void *b) 533 { 534 const struct ins *ia = a; 535 const struct ins *ib = b; 536 537 return strcmp(ia->name, ib->name); 538 } 539 540 static void ins__sort(struct arch *arch) 541 { 542 const int nmemb = arch->nr_instructions; 543 544 qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp); 545 } 546 547 static struct ins_ops *__ins__find(struct arch *arch, const char *name) 548 { 549 struct ins *ins; 550 const int nmemb = arch->nr_instructions; 551 552 if (!arch->sorted_instructions) { 553 ins__sort(arch); 554 arch->sorted_instructions = true; 555 } 556 557 ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp); 558 return ins ? ins->ops : NULL; 559 } 560 561 static struct ins_ops *ins__find(struct arch *arch, const char *name) 562 { 563 struct ins_ops *ops = __ins__find(arch, name); 564 565 if (!ops && arch->associate_instruction_ops) 566 ops = arch->associate_instruction_ops(arch, name); 567 568 return ops; 569 } 570 571 static int arch__key_cmp(const void *name, const void *archp) 572 { 573 const struct arch *arch = archp; 574 575 return strcmp(name, arch->name); 576 } 577 578 static int arch__cmp(const void *a, const void *b) 579 { 580 const struct arch *aa = a; 581 const struct arch *ab = b; 582 583 return strcmp(aa->name, ab->name); 584 } 585 586 static void arch__sort(void) 587 { 588 const int nmemb = ARRAY_SIZE(architectures); 589 590 qsort(architectures, nmemb, sizeof(struct arch), arch__cmp); 591 } 592 593 static struct arch *arch__find(const char *name) 594 { 595 const int nmemb = ARRAY_SIZE(architectures); 596 static bool sorted; 597 598 if (!sorted) { 599 arch__sort(); 600 sorted = true; 601 } 602 603 return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp); 604 } 605 606 int symbol__alloc_hist(struct symbol *sym) 607 { 608 struct annotation *notes = symbol__annotation(sym); 609 size_t size = symbol__size(sym); 610 size_t sizeof_sym_hist; 611 612 /* 613 * Add buffer of one element for zero length symbol. 614 * When sample is taken from first instruction of 615 * zero length symbol, perf still resolves it and 616 * shows symbol name in perf report and allows to 617 * annotate it. 618 */ 619 if (size == 0) 620 size = 1; 621 622 /* Check for overflow when calculating sizeof_sym_hist */ 623 if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(struct sym_hist_entry)) 624 return -1; 625 626 sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry)); 627 628 /* Check for overflow in zalloc argument */ 629 if (sizeof_sym_hist > (SIZE_MAX - sizeof(*notes->src)) 630 / symbol_conf.nr_events) 631 return -1; 632 633 notes->src = zalloc(sizeof(*notes->src) + symbol_conf.nr_events * sizeof_sym_hist); 634 if (notes->src == NULL) 635 return -1; 636 notes->src->sizeof_sym_hist = sizeof_sym_hist; 637 notes->src->nr_histograms = symbol_conf.nr_events; 638 INIT_LIST_HEAD(¬es->src->source); 639 return 0; 640 } 641 642 /* The cycles histogram is lazily allocated. */ 643 static int symbol__alloc_hist_cycles(struct symbol *sym) 644 { 645 struct annotation *notes = symbol__annotation(sym); 646 const size_t size = symbol__size(sym); 647 648 notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist)); 649 if (notes->src->cycles_hist == NULL) 650 return -1; 651 return 0; 652 } 653 654 void symbol__annotate_zero_histograms(struct symbol *sym) 655 { 656 struct annotation *notes = symbol__annotation(sym); 657 658 pthread_mutex_lock(¬es->lock); 659 if (notes->src != NULL) { 660 memset(notes->src->histograms, 0, 661 notes->src->nr_histograms * notes->src->sizeof_sym_hist); 662 if (notes->src->cycles_hist) 663 memset(notes->src->cycles_hist, 0, 664 symbol__size(sym) * sizeof(struct cyc_hist)); 665 } 666 pthread_mutex_unlock(¬es->lock); 667 } 668 669 static int __symbol__account_cycles(struct annotation *notes, 670 u64 start, 671 unsigned offset, unsigned cycles, 672 unsigned have_start) 673 { 674 struct cyc_hist *ch; 675 676 ch = notes->src->cycles_hist; 677 /* 678 * For now we can only account one basic block per 679 * final jump. But multiple could be overlapping. 680 * Always account the longest one. So when 681 * a shorter one has been already seen throw it away. 682 * 683 * We separately always account the full cycles. 684 */ 685 ch[offset].num_aggr++; 686 ch[offset].cycles_aggr += cycles; 687 688 if (!have_start && ch[offset].have_start) 689 return 0; 690 if (ch[offset].num) { 691 if (have_start && (!ch[offset].have_start || 692 ch[offset].start > start)) { 693 ch[offset].have_start = 0; 694 ch[offset].cycles = 0; 695 ch[offset].num = 0; 696 if (ch[offset].reset < 0xffff) 697 ch[offset].reset++; 698 } else if (have_start && 699 ch[offset].start < start) 700 return 0; 701 } 702 ch[offset].have_start = have_start; 703 ch[offset].start = start; 704 ch[offset].cycles += cycles; 705 ch[offset].num++; 706 return 0; 707 } 708 709 static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map, 710 struct annotation *notes, int evidx, u64 addr, 711 struct perf_sample *sample) 712 { 713 unsigned offset; 714 struct sym_hist *h; 715 716 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); 717 718 if ((addr < sym->start || addr >= sym->end) && 719 (addr != sym->end || sym->start != sym->end)) { 720 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n", 721 __func__, __LINE__, sym->name, sym->start, addr, sym->end); 722 return -ERANGE; 723 } 724 725 offset = addr - sym->start; 726 h = annotation__histogram(notes, evidx); 727 h->nr_samples++; 728 h->addr[offset].nr_samples++; 729 h->period += sample->period; 730 h->addr[offset].period += sample->period; 731 732 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 733 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n", 734 sym->start, sym->name, addr, addr - sym->start, evidx, 735 h->addr[offset].nr_samples, h->addr[offset].period); 736 return 0; 737 } 738 739 static struct annotation *symbol__get_annotation(struct symbol *sym, bool cycles) 740 { 741 struct annotation *notes = symbol__annotation(sym); 742 743 if (notes->src == NULL) { 744 if (symbol__alloc_hist(sym) < 0) 745 return NULL; 746 } 747 if (!notes->src->cycles_hist && cycles) { 748 if (symbol__alloc_hist_cycles(sym) < 0) 749 return NULL; 750 } 751 return notes; 752 } 753 754 static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, 755 int evidx, u64 addr, 756 struct perf_sample *sample) 757 { 758 struct annotation *notes; 759 760 if (sym == NULL) 761 return 0; 762 notes = symbol__get_annotation(sym, false); 763 if (notes == NULL) 764 return -ENOMEM; 765 return __symbol__inc_addr_samples(sym, map, notes, evidx, addr, sample); 766 } 767 768 static int symbol__account_cycles(u64 addr, u64 start, 769 struct symbol *sym, unsigned cycles) 770 { 771 struct annotation *notes; 772 unsigned offset; 773 774 if (sym == NULL) 775 return 0; 776 notes = symbol__get_annotation(sym, true); 777 if (notes == NULL) 778 return -ENOMEM; 779 if (addr < sym->start || addr >= sym->end) 780 return -ERANGE; 781 782 if (start) { 783 if (start < sym->start || start >= sym->end) 784 return -ERANGE; 785 if (start >= addr) 786 start = 0; 787 } 788 offset = addr - sym->start; 789 return __symbol__account_cycles(notes, 790 start ? start - sym->start : 0, 791 offset, cycles, 792 !!start); 793 } 794 795 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, 796 struct addr_map_symbol *start, 797 unsigned cycles) 798 { 799 u64 saddr = 0; 800 int err; 801 802 if (!cycles) 803 return 0; 804 805 /* 806 * Only set start when IPC can be computed. We can only 807 * compute it when the basic block is completely in a single 808 * function. 809 * Special case the case when the jump is elsewhere, but 810 * it starts on the function start. 811 */ 812 if (start && 813 (start->sym == ams->sym || 814 (ams->sym && 815 start->addr == ams->sym->start + ams->map->start))) 816 saddr = start->al_addr; 817 if (saddr == 0) 818 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n", 819 ams->addr, 820 start ? start->addr : 0, 821 ams->sym ? ams->sym->start + ams->map->start : 0, 822 saddr); 823 err = symbol__account_cycles(ams->al_addr, saddr, ams->sym, cycles); 824 if (err) 825 pr_debug2("account_cycles failed %d\n", err); 826 return err; 827 } 828 829 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample, 830 int evidx) 831 { 832 return symbol__inc_addr_samples(ams->sym, ams->map, evidx, ams->al_addr, sample); 833 } 834 835 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample, 836 int evidx, u64 ip) 837 { 838 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip, sample); 839 } 840 841 static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map *map) 842 { 843 dl->ins.ops = ins__find(arch, dl->ins.name); 844 845 if (!dl->ins.ops) 846 return; 847 848 if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, map) < 0) 849 dl->ins.ops = NULL; 850 } 851 852 static int disasm_line__parse(char *line, const char **namep, char **rawp) 853 { 854 char tmp, *name = ltrim(line); 855 856 if (name[0] == '\0') 857 return -1; 858 859 *rawp = name + 1; 860 861 while ((*rawp)[0] != '\0' && !isspace((*rawp)[0])) 862 ++*rawp; 863 864 tmp = (*rawp)[0]; 865 (*rawp)[0] = '\0'; 866 *namep = strdup(name); 867 868 if (*namep == NULL) 869 goto out_free_name; 870 871 (*rawp)[0] = tmp; 872 *rawp = ltrim(*rawp); 873 874 return 0; 875 876 out_free_name: 877 free((void *)namep); 878 *namep = NULL; 879 return -1; 880 } 881 882 static struct disasm_line *disasm_line__new(s64 offset, char *line, 883 size_t privsize, int line_nr, 884 struct arch *arch, 885 struct map *map) 886 { 887 struct disasm_line *dl = zalloc(sizeof(*dl) + privsize); 888 889 if (dl != NULL) { 890 dl->offset = offset; 891 dl->line = strdup(line); 892 dl->line_nr = line_nr; 893 if (dl->line == NULL) 894 goto out_delete; 895 896 if (offset != -1) { 897 if (disasm_line__parse(dl->line, &dl->ins.name, &dl->ops.raw) < 0) 898 goto out_free_line; 899 900 disasm_line__init_ins(dl, arch, map); 901 } 902 } 903 904 return dl; 905 906 out_free_line: 907 zfree(&dl->line); 908 out_delete: 909 free(dl); 910 return NULL; 911 } 912 913 void disasm_line__free(struct disasm_line *dl) 914 { 915 zfree(&dl->line); 916 if (dl->ins.ops && dl->ins.ops->free) 917 dl->ins.ops->free(&dl->ops); 918 else 919 ins__delete(&dl->ops); 920 free((void *)dl->ins.name); 921 dl->ins.name = NULL; 922 free(dl); 923 } 924 925 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw) 926 { 927 if (raw || !dl->ins.ops) 928 return scnprintf(bf, size, "%-6.6s %s", dl->ins.name, dl->ops.raw); 929 930 return ins__scnprintf(&dl->ins, bf, size, &dl->ops); 931 } 932 933 static void disasm__add(struct list_head *head, struct disasm_line *line) 934 { 935 list_add_tail(&line->node, head); 936 } 937 938 struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disasm_line *pos) 939 { 940 list_for_each_entry_continue(pos, head, node) 941 if (pos->offset >= 0) 942 return pos; 943 944 return NULL; 945 } 946 947 double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset, 948 s64 end, const char **path, struct sym_hist_entry *sample) 949 { 950 struct source_line *src_line = notes->src->lines; 951 double percent = 0.0; 952 953 sample->nr_samples = sample->period = 0; 954 955 if (src_line) { 956 size_t sizeof_src_line = sizeof(*src_line) + 957 sizeof(src_line->samples) * (src_line->nr_pcnt - 1); 958 959 while (offset < end) { 960 src_line = (void *)notes->src->lines + 961 (sizeof_src_line * offset); 962 963 if (*path == NULL) 964 *path = src_line->path; 965 966 percent += src_line->samples[evidx].percent; 967 sample->nr_samples += src_line->samples[evidx].nr; 968 offset++; 969 } 970 } else { 971 struct sym_hist *h = annotation__histogram(notes, evidx); 972 unsigned int hits = 0; 973 u64 period = 0; 974 975 while (offset < end) { 976 hits += h->addr[offset].nr_samples; 977 period += h->addr[offset].period; 978 ++offset; 979 } 980 981 if (h->nr_samples) { 982 sample->period = period; 983 sample->nr_samples = hits; 984 percent = 100.0 * hits / h->nr_samples; 985 } 986 } 987 988 return percent; 989 } 990 991 static const char *annotate__address_color(struct block_range *br) 992 { 993 double cov = block_range__coverage(br); 994 995 if (cov >= 0) { 996 /* mark red for >75% coverage */ 997 if (cov > 0.75) 998 return PERF_COLOR_RED; 999 1000 /* mark dull for <1% coverage */ 1001 if (cov < 0.01) 1002 return PERF_COLOR_NORMAL; 1003 } 1004 1005 return PERF_COLOR_MAGENTA; 1006 } 1007 1008 static const char *annotate__asm_color(struct block_range *br) 1009 { 1010 double cov = block_range__coverage(br); 1011 1012 if (cov >= 0) { 1013 /* mark dull for <1% coverage */ 1014 if (cov < 0.01) 1015 return PERF_COLOR_NORMAL; 1016 } 1017 1018 return PERF_COLOR_BLUE; 1019 } 1020 1021 static void annotate__branch_printf(struct block_range *br, u64 addr) 1022 { 1023 bool emit_comment = true; 1024 1025 if (!br) 1026 return; 1027 1028 #if 1 1029 if (br->is_target && br->start == addr) { 1030 struct block_range *branch = br; 1031 double p; 1032 1033 /* 1034 * Find matching branch to our target. 1035 */ 1036 while (!branch->is_branch) 1037 branch = block_range__next(branch); 1038 1039 p = 100 *(double)br->entry / branch->coverage; 1040 1041 if (p > 0.1) { 1042 if (emit_comment) { 1043 emit_comment = false; 1044 printf("\t#"); 1045 } 1046 1047 /* 1048 * The percentage of coverage joined at this target in relation 1049 * to the next branch. 1050 */ 1051 printf(" +%.2f%%", p); 1052 } 1053 } 1054 #endif 1055 if (br->is_branch && br->end == addr) { 1056 double p = 100*(double)br->taken / br->coverage; 1057 1058 if (p > 0.1) { 1059 if (emit_comment) { 1060 emit_comment = false; 1061 printf("\t#"); 1062 } 1063 1064 /* 1065 * The percentage of coverage leaving at this branch, and 1066 * its prediction ratio. 1067 */ 1068 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken); 1069 } 1070 } 1071 } 1072 1073 1074 static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 start, 1075 struct perf_evsel *evsel, u64 len, int min_pcnt, int printed, 1076 int max_lines, struct disasm_line *queue) 1077 { 1078 static const char *prev_line; 1079 static const char *prev_color; 1080 1081 if (dl->offset != -1) { 1082 const char *path = NULL; 1083 double percent, max_percent = 0.0; 1084 double *ppercents = &percent; 1085 struct sym_hist_entry sample; 1086 struct sym_hist_entry *psamples = &sample; 1087 int i, nr_percent = 1; 1088 const char *color; 1089 struct annotation *notes = symbol__annotation(sym); 1090 s64 offset = dl->offset; 1091 const u64 addr = start + offset; 1092 struct disasm_line *next; 1093 struct block_range *br; 1094 1095 next = disasm__get_next_ip_line(¬es->src->source, dl); 1096 1097 if (perf_evsel__is_group_event(evsel)) { 1098 nr_percent = evsel->nr_members; 1099 ppercents = calloc(nr_percent, sizeof(double)); 1100 psamples = calloc(nr_percent, sizeof(struct sym_hist_entry)); 1101 if (ppercents == NULL || psamples == NULL) { 1102 return -1; 1103 } 1104 } 1105 1106 for (i = 0; i < nr_percent; i++) { 1107 percent = disasm__calc_percent(notes, 1108 notes->src->lines ? i : evsel->idx + i, 1109 offset, 1110 next ? next->offset : (s64) len, 1111 &path, &sample); 1112 1113 ppercents[i] = percent; 1114 psamples[i] = sample; 1115 if (percent > max_percent) 1116 max_percent = percent; 1117 } 1118 1119 if (max_percent < min_pcnt) 1120 return -1; 1121 1122 if (max_lines && printed >= max_lines) 1123 return 1; 1124 1125 if (queue != NULL) { 1126 list_for_each_entry_from(queue, ¬es->src->source, node) { 1127 if (queue == dl) 1128 break; 1129 disasm_line__print(queue, sym, start, evsel, len, 1130 0, 0, 1, NULL); 1131 } 1132 } 1133 1134 color = get_percent_color(max_percent); 1135 1136 /* 1137 * Also color the filename and line if needed, with 1138 * the same color than the percentage. Don't print it 1139 * twice for close colored addr with the same filename:line 1140 */ 1141 if (path) { 1142 if (!prev_line || strcmp(prev_line, path) 1143 || color != prev_color) { 1144 color_fprintf(stdout, color, " %s", path); 1145 prev_line = path; 1146 prev_color = color; 1147 } 1148 } 1149 1150 for (i = 0; i < nr_percent; i++) { 1151 percent = ppercents[i]; 1152 sample = psamples[i]; 1153 color = get_percent_color(percent); 1154 1155 if (symbol_conf.show_total_period) 1156 color_fprintf(stdout, color, " %11" PRIu64, 1157 sample.period); 1158 else if (symbol_conf.show_nr_samples) 1159 color_fprintf(stdout, color, " %7" PRIu64, 1160 sample.nr_samples); 1161 else 1162 color_fprintf(stdout, color, " %7.2f", percent); 1163 } 1164 1165 printf(" : "); 1166 1167 br = block_range__find(addr); 1168 color_fprintf(stdout, annotate__address_color(br), " %" PRIx64 ":", addr); 1169 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->line); 1170 annotate__branch_printf(br, addr); 1171 printf("\n"); 1172 1173 if (ppercents != &percent) 1174 free(ppercents); 1175 1176 if (psamples != &sample) 1177 free(psamples); 1178 1179 } else if (max_lines && printed >= max_lines) 1180 return 1; 1181 else { 1182 int width = symbol_conf.show_total_period ? 12 : 8; 1183 1184 if (queue) 1185 return -1; 1186 1187 if (perf_evsel__is_group_event(evsel)) 1188 width *= evsel->nr_members; 1189 1190 if (!*dl->line) 1191 printf(" %*s:\n", width, " "); 1192 else 1193 printf(" %*s: %s\n", width, " ", dl->line); 1194 } 1195 1196 return 0; 1197 } 1198 1199 /* 1200 * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw) 1201 * which looks like following 1202 * 1203 * 0000000000415500 <_init>: 1204 * 415500: sub $0x8,%rsp 1205 * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8> 1206 * 41550b: test %rax,%rax 1207 * 41550e: je 415515 <_init+0x15> 1208 * 415510: callq 416e70 <__gmon_start__@plt> 1209 * 415515: add $0x8,%rsp 1210 * 415519: retq 1211 * 1212 * it will be parsed and saved into struct disasm_line as 1213 * <offset> <name> <ops.raw> 1214 * 1215 * The offset will be a relative offset from the start of the symbol and -1 1216 * means that it's not a disassembly line so should be treated differently. 1217 * The ops.raw part will be parsed further according to type of the instruction. 1218 */ 1219 static int symbol__parse_objdump_line(struct symbol *sym, struct map *map, 1220 struct arch *arch, 1221 FILE *file, size_t privsize, 1222 int *line_nr) 1223 { 1224 struct annotation *notes = symbol__annotation(sym); 1225 struct disasm_line *dl; 1226 char *line = NULL, *parsed_line, *tmp, *tmp2; 1227 size_t line_len; 1228 s64 line_ip, offset = -1; 1229 regmatch_t match[2]; 1230 1231 if (getline(&line, &line_len, file) < 0) 1232 return -1; 1233 1234 if (!line) 1235 return -1; 1236 1237 line_ip = -1; 1238 parsed_line = rtrim(line); 1239 1240 /* /filename:linenr ? Save line number and ignore. */ 1241 if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) { 1242 *line_nr = atoi(parsed_line + match[1].rm_so); 1243 return 0; 1244 } 1245 1246 tmp = ltrim(parsed_line); 1247 if (*tmp) { 1248 /* 1249 * Parse hexa addresses followed by ':' 1250 */ 1251 line_ip = strtoull(tmp, &tmp2, 16); 1252 if (*tmp2 != ':' || tmp == tmp2 || tmp2[1] == '\0') 1253 line_ip = -1; 1254 } 1255 1256 if (line_ip != -1) { 1257 u64 start = map__rip_2objdump(map, sym->start), 1258 end = map__rip_2objdump(map, sym->end); 1259 1260 offset = line_ip - start; 1261 if ((u64)line_ip < start || (u64)line_ip >= end) 1262 offset = -1; 1263 else 1264 parsed_line = tmp2 + 1; 1265 } 1266 1267 dl = disasm_line__new(offset, parsed_line, privsize, *line_nr, arch, map); 1268 free(line); 1269 (*line_nr)++; 1270 1271 if (dl == NULL) 1272 return -1; 1273 1274 if (!disasm_line__has_offset(dl)) { 1275 dl->ops.target.offset = dl->ops.target.addr - 1276 map__rip_2objdump(map, sym->start); 1277 dl->ops.target.offset_avail = true; 1278 } 1279 1280 /* kcore has no symbols, so add the call target name */ 1281 if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.name) { 1282 struct addr_map_symbol target = { 1283 .map = map, 1284 .addr = dl->ops.target.addr, 1285 }; 1286 1287 if (!map_groups__find_ams(&target) && 1288 target.sym->start == target.al_addr) 1289 dl->ops.target.name = strdup(target.sym->name); 1290 } 1291 1292 disasm__add(¬es->src->source, dl); 1293 1294 return 0; 1295 } 1296 1297 static __attribute__((constructor)) void symbol__init_regexpr(void) 1298 { 1299 regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED); 1300 } 1301 1302 static void delete_last_nop(struct symbol *sym) 1303 { 1304 struct annotation *notes = symbol__annotation(sym); 1305 struct list_head *list = ¬es->src->source; 1306 struct disasm_line *dl; 1307 1308 while (!list_empty(list)) { 1309 dl = list_entry(list->prev, struct disasm_line, node); 1310 1311 if (dl->ins.ops) { 1312 if (dl->ins.ops != &nop_ops) 1313 return; 1314 } else { 1315 if (!strstr(dl->line, " nop ") && 1316 !strstr(dl->line, " nopl ") && 1317 !strstr(dl->line, " nopw ")) 1318 return; 1319 } 1320 1321 list_del(&dl->node); 1322 disasm_line__free(dl); 1323 } 1324 } 1325 1326 int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map, 1327 int errnum, char *buf, size_t buflen) 1328 { 1329 struct dso *dso = map->dso; 1330 1331 BUG_ON(buflen == 0); 1332 1333 if (errnum >= 0) { 1334 str_error_r(errnum, buf, buflen); 1335 return 0; 1336 } 1337 1338 switch (errnum) { 1339 case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: { 1340 char bf[SBUILD_ID_SIZE + 15] = " with build id "; 1341 char *build_id_msg = NULL; 1342 1343 if (dso->has_build_id) { 1344 build_id__sprintf(dso->build_id, 1345 sizeof(dso->build_id), bf + 15); 1346 build_id_msg = bf; 1347 } 1348 scnprintf(buf, buflen, 1349 "No vmlinux file%s\nwas found in the path.\n\n" 1350 "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n" 1351 "Please use:\n\n" 1352 " perf buildid-cache -vu vmlinux\n\n" 1353 "or:\n\n" 1354 " --vmlinux vmlinux\n", build_id_msg ?: ""); 1355 } 1356 break; 1357 default: 1358 scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum); 1359 break; 1360 } 1361 1362 return 0; 1363 } 1364 1365 static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size) 1366 { 1367 char linkname[PATH_MAX]; 1368 char *build_id_filename; 1369 char *build_id_path = NULL; 1370 char *pos; 1371 1372 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && 1373 !dso__is_kcore(dso)) 1374 return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX; 1375 1376 build_id_filename = dso__build_id_filename(dso, NULL, 0, false); 1377 if (build_id_filename) { 1378 __symbol__join_symfs(filename, filename_size, build_id_filename); 1379 free(build_id_filename); 1380 } else { 1381 if (dso->has_build_id) 1382 return ENOMEM; 1383 goto fallback; 1384 } 1385 1386 build_id_path = strdup(filename); 1387 if (!build_id_path) 1388 return -1; 1389 1390 /* 1391 * old style build-id cache has name of XX/XXXXXXX.. while 1392 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}. 1393 * extract the build-id part of dirname in the new style only. 1394 */ 1395 pos = strrchr(build_id_path, '/'); 1396 if (pos && strlen(pos) < SBUILD_ID_SIZE - 2) 1397 dirname(build_id_path); 1398 1399 if (dso__is_kcore(dso) || 1400 readlink(build_id_path, linkname, sizeof(linkname)) < 0 || 1401 strstr(linkname, DSO__NAME_KALLSYMS) || 1402 access(filename, R_OK)) { 1403 fallback: 1404 /* 1405 * If we don't have build-ids or the build-id file isn't in the 1406 * cache, or is just a kallsyms file, well, lets hope that this 1407 * DSO is the same as when 'perf record' ran. 1408 */ 1409 __symbol__join_symfs(filename, filename_size, dso->long_name); 1410 } 1411 1412 free(build_id_path); 1413 return 0; 1414 } 1415 1416 static const char *annotate__norm_arch(const char *arch_name) 1417 { 1418 struct utsname uts; 1419 1420 if (!arch_name) { /* Assume we are annotating locally. */ 1421 if (uname(&uts) < 0) 1422 return NULL; 1423 arch_name = uts.machine; 1424 } 1425 return normalize_arch((char *)arch_name); 1426 } 1427 1428 int symbol__disassemble(struct symbol *sym, struct map *map, 1429 const char *arch_name, size_t privsize, 1430 struct arch **parch, char *cpuid) 1431 { 1432 struct dso *dso = map->dso; 1433 char command[PATH_MAX * 2]; 1434 struct arch *arch = NULL; 1435 FILE *file; 1436 char symfs_filename[PATH_MAX]; 1437 struct kcore_extract kce; 1438 bool delete_extract = false; 1439 int stdout_fd[2]; 1440 int lineno = 0; 1441 int nline; 1442 pid_t pid; 1443 int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename)); 1444 1445 if (err) 1446 return err; 1447 1448 arch_name = annotate__norm_arch(arch_name); 1449 if (!arch_name) 1450 return -1; 1451 1452 arch = arch__find(arch_name); 1453 if (arch == NULL) 1454 return -ENOTSUP; 1455 1456 if (parch) 1457 *parch = arch; 1458 1459 if (arch->init) { 1460 err = arch->init(arch); 1461 if (err) { 1462 pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name); 1463 return err; 1464 } 1465 } 1466 1467 if (arch->cpuid_parse && cpuid) 1468 arch->cpuid_parse(arch, cpuid); 1469 1470 pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, 1471 symfs_filename, sym->name, map->unmap_ip(map, sym->start), 1472 map->unmap_ip(map, sym->end)); 1473 1474 pr_debug("annotating [%p] %30s : [%p] %30s\n", 1475 dso, dso->long_name, sym, sym->name); 1476 1477 if (dso__is_kcore(dso)) { 1478 kce.kcore_filename = symfs_filename; 1479 kce.addr = map__rip_2objdump(map, sym->start); 1480 kce.offs = sym->start; 1481 kce.len = sym->end - sym->start; 1482 if (!kcore_extract__create(&kce)) { 1483 delete_extract = true; 1484 strlcpy(symfs_filename, kce.extract_filename, 1485 sizeof(symfs_filename)); 1486 } 1487 } else if (dso__needs_decompress(dso)) { 1488 char tmp[KMOD_DECOMP_LEN]; 1489 1490 if (dso__decompress_kmodule_path(dso, symfs_filename, 1491 tmp, sizeof(tmp)) < 0) 1492 goto out; 1493 1494 strcpy(symfs_filename, tmp); 1495 } 1496 1497 snprintf(command, sizeof(command), 1498 "%s %s%s --start-address=0x%016" PRIx64 1499 " --stop-address=0x%016" PRIx64 1500 " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand", 1501 objdump_path ? objdump_path : "objdump", 1502 disassembler_style ? "-M " : "", 1503 disassembler_style ? disassembler_style : "", 1504 map__rip_2objdump(map, sym->start), 1505 map__rip_2objdump(map, sym->end), 1506 symbol_conf.annotate_asm_raw ? "" : "--no-show-raw", 1507 symbol_conf.annotate_src ? "-S" : "", 1508 symfs_filename, symfs_filename); 1509 1510 pr_debug("Executing: %s\n", command); 1511 1512 err = -1; 1513 if (pipe(stdout_fd) < 0) { 1514 pr_err("Failure creating the pipe to run %s\n", command); 1515 goto out_remove_tmp; 1516 } 1517 1518 pid = fork(); 1519 if (pid < 0) { 1520 pr_err("Failure forking to run %s\n", command); 1521 goto out_close_stdout; 1522 } 1523 1524 if (pid == 0) { 1525 close(stdout_fd[0]); 1526 dup2(stdout_fd[1], 1); 1527 close(stdout_fd[1]); 1528 execl("/bin/sh", "sh", "-c", command, NULL); 1529 perror(command); 1530 exit(-1); 1531 } 1532 1533 close(stdout_fd[1]); 1534 1535 file = fdopen(stdout_fd[0], "r"); 1536 if (!file) { 1537 pr_err("Failure creating FILE stream for %s\n", command); 1538 /* 1539 * If we were using debug info should retry with 1540 * original binary. 1541 */ 1542 goto out_remove_tmp; 1543 } 1544 1545 nline = 0; 1546 while (!feof(file)) { 1547 /* 1548 * The source code line number (lineno) needs to be kept in 1549 * accross calls to symbol__parse_objdump_line(), so that it 1550 * can associate it with the instructions till the next one. 1551 * See disasm_line__new() and struct disasm_line::line_nr. 1552 */ 1553 if (symbol__parse_objdump_line(sym, map, arch, file, privsize, 1554 &lineno) < 0) 1555 break; 1556 nline++; 1557 } 1558 1559 if (nline == 0) 1560 pr_err("No output from %s\n", command); 1561 1562 /* 1563 * kallsyms does not have symbol sizes so there may a nop at the end. 1564 * Remove it. 1565 */ 1566 if (dso__is_kcore(dso)) 1567 delete_last_nop(sym); 1568 1569 fclose(file); 1570 err = 0; 1571 out_remove_tmp: 1572 close(stdout_fd[0]); 1573 1574 if (dso__needs_decompress(dso)) 1575 unlink(symfs_filename); 1576 1577 if (delete_extract) 1578 kcore_extract__delete(&kce); 1579 out: 1580 return err; 1581 1582 out_close_stdout: 1583 close(stdout_fd[1]); 1584 goto out_remove_tmp; 1585 } 1586 1587 static void insert_source_line(struct rb_root *root, struct source_line *src_line) 1588 { 1589 struct source_line *iter; 1590 struct rb_node **p = &root->rb_node; 1591 struct rb_node *parent = NULL; 1592 int i, ret; 1593 1594 while (*p != NULL) { 1595 parent = *p; 1596 iter = rb_entry(parent, struct source_line, node); 1597 1598 ret = strcmp(iter->path, src_line->path); 1599 if (ret == 0) { 1600 for (i = 0; i < src_line->nr_pcnt; i++) 1601 iter->samples[i].percent_sum += src_line->samples[i].percent; 1602 return; 1603 } 1604 1605 if (ret < 0) 1606 p = &(*p)->rb_left; 1607 else 1608 p = &(*p)->rb_right; 1609 } 1610 1611 for (i = 0; i < src_line->nr_pcnt; i++) 1612 src_line->samples[i].percent_sum = src_line->samples[i].percent; 1613 1614 rb_link_node(&src_line->node, parent, p); 1615 rb_insert_color(&src_line->node, root); 1616 } 1617 1618 static int cmp_source_line(struct source_line *a, struct source_line *b) 1619 { 1620 int i; 1621 1622 for (i = 0; i < a->nr_pcnt; i++) { 1623 if (a->samples[i].percent_sum == b->samples[i].percent_sum) 1624 continue; 1625 return a->samples[i].percent_sum > b->samples[i].percent_sum; 1626 } 1627 1628 return 0; 1629 } 1630 1631 static void __resort_source_line(struct rb_root *root, struct source_line *src_line) 1632 { 1633 struct source_line *iter; 1634 struct rb_node **p = &root->rb_node; 1635 struct rb_node *parent = NULL; 1636 1637 while (*p != NULL) { 1638 parent = *p; 1639 iter = rb_entry(parent, struct source_line, node); 1640 1641 if (cmp_source_line(src_line, iter)) 1642 p = &(*p)->rb_left; 1643 else 1644 p = &(*p)->rb_right; 1645 } 1646 1647 rb_link_node(&src_line->node, parent, p); 1648 rb_insert_color(&src_line->node, root); 1649 } 1650 1651 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root) 1652 { 1653 struct source_line *src_line; 1654 struct rb_node *node; 1655 1656 node = rb_first(src_root); 1657 while (node) { 1658 struct rb_node *next; 1659 1660 src_line = rb_entry(node, struct source_line, node); 1661 next = rb_next(node); 1662 rb_erase(node, src_root); 1663 1664 __resort_source_line(dest_root, src_line); 1665 node = next; 1666 } 1667 } 1668 1669 static void symbol__free_source_line(struct symbol *sym, int len) 1670 { 1671 struct annotation *notes = symbol__annotation(sym); 1672 struct source_line *src_line = notes->src->lines; 1673 size_t sizeof_src_line; 1674 int i; 1675 1676 sizeof_src_line = sizeof(*src_line) + 1677 (sizeof(src_line->samples) * (src_line->nr_pcnt - 1)); 1678 1679 for (i = 0; i < len; i++) { 1680 free_srcline(src_line->path); 1681 src_line = (void *)src_line + sizeof_src_line; 1682 } 1683 1684 zfree(¬es->src->lines); 1685 } 1686 1687 /* Get the filename:line for the colored entries */ 1688 static int symbol__get_source_line(struct symbol *sym, struct map *map, 1689 struct perf_evsel *evsel, 1690 struct rb_root *root, int len) 1691 { 1692 u64 start; 1693 int i, k; 1694 int evidx = evsel->idx; 1695 struct source_line *src_line; 1696 struct annotation *notes = symbol__annotation(sym); 1697 struct sym_hist *h = annotation__histogram(notes, evidx); 1698 struct rb_root tmp_root = RB_ROOT; 1699 int nr_pcnt = 1; 1700 u64 nr_samples = h->nr_samples; 1701 size_t sizeof_src_line = sizeof(struct source_line); 1702 1703 if (perf_evsel__is_group_event(evsel)) { 1704 for (i = 1; i < evsel->nr_members; i++) { 1705 h = annotation__histogram(notes, evidx + i); 1706 nr_samples += h->nr_samples; 1707 } 1708 nr_pcnt = evsel->nr_members; 1709 sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples); 1710 } 1711 1712 if (!nr_samples) 1713 return 0; 1714 1715 src_line = notes->src->lines = calloc(len, sizeof_src_line); 1716 if (!notes->src->lines) 1717 return -1; 1718 1719 start = map__rip_2objdump(map, sym->start); 1720 1721 for (i = 0; i < len; i++) { 1722 u64 offset; 1723 double percent_max = 0.0; 1724 1725 src_line->nr_pcnt = nr_pcnt; 1726 1727 for (k = 0; k < nr_pcnt; k++) { 1728 double percent = 0.0; 1729 1730 h = annotation__histogram(notes, evidx + k); 1731 nr_samples = h->addr[i].nr_samples; 1732 if (h->nr_samples) 1733 percent = 100.0 * nr_samples / h->nr_samples; 1734 1735 if (percent > percent_max) 1736 percent_max = percent; 1737 src_line->samples[k].percent = percent; 1738 src_line->samples[k].nr = nr_samples; 1739 } 1740 1741 if (percent_max <= 0.5) 1742 goto next; 1743 1744 offset = start + i; 1745 src_line->path = get_srcline(map->dso, offset, NULL, 1746 false, true); 1747 insert_source_line(&tmp_root, src_line); 1748 1749 next: 1750 src_line = (void *)src_line + sizeof_src_line; 1751 } 1752 1753 resort_source_line(root, &tmp_root); 1754 return 0; 1755 } 1756 1757 static void print_summary(struct rb_root *root, const char *filename) 1758 { 1759 struct source_line *src_line; 1760 struct rb_node *node; 1761 1762 printf("\nSorted summary for file %s\n", filename); 1763 printf("----------------------------------------------\n\n"); 1764 1765 if (RB_EMPTY_ROOT(root)) { 1766 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); 1767 return; 1768 } 1769 1770 node = rb_first(root); 1771 while (node) { 1772 double percent, percent_max = 0.0; 1773 const char *color; 1774 char *path; 1775 int i; 1776 1777 src_line = rb_entry(node, struct source_line, node); 1778 for (i = 0; i < src_line->nr_pcnt; i++) { 1779 percent = src_line->samples[i].percent_sum; 1780 color = get_percent_color(percent); 1781 color_fprintf(stdout, color, " %7.2f", percent); 1782 1783 if (percent > percent_max) 1784 percent_max = percent; 1785 } 1786 1787 path = src_line->path; 1788 color = get_percent_color(percent_max); 1789 color_fprintf(stdout, color, " %s\n", path); 1790 1791 node = rb_next(node); 1792 } 1793 } 1794 1795 static void symbol__annotate_hits(struct symbol *sym, struct perf_evsel *evsel) 1796 { 1797 struct annotation *notes = symbol__annotation(sym); 1798 struct sym_hist *h = annotation__histogram(notes, evsel->idx); 1799 u64 len = symbol__size(sym), offset; 1800 1801 for (offset = 0; offset < len; ++offset) 1802 if (h->addr[offset].nr_samples != 0) 1803 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, 1804 sym->start + offset, h->addr[offset].nr_samples); 1805 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples); 1806 } 1807 1808 int symbol__annotate_printf(struct symbol *sym, struct map *map, 1809 struct perf_evsel *evsel, bool full_paths, 1810 int min_pcnt, int max_lines, int context) 1811 { 1812 struct dso *dso = map->dso; 1813 char *filename; 1814 const char *d_filename; 1815 const char *evsel_name = perf_evsel__name(evsel); 1816 struct annotation *notes = symbol__annotation(sym); 1817 struct sym_hist *h = annotation__histogram(notes, evsel->idx); 1818 struct disasm_line *pos, *queue = NULL; 1819 u64 start = map__rip_2objdump(map, sym->start); 1820 int printed = 2, queue_len = 0; 1821 int more = 0; 1822 u64 len; 1823 int width = symbol_conf.show_total_period ? 12 : 8; 1824 int graph_dotted_len; 1825 1826 filename = strdup(dso->long_name); 1827 if (!filename) 1828 return -ENOMEM; 1829 1830 if (full_paths) 1831 d_filename = filename; 1832 else 1833 d_filename = basename(filename); 1834 1835 len = symbol__size(sym); 1836 1837 if (perf_evsel__is_group_event(evsel)) 1838 width *= evsel->nr_members; 1839 1840 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n", 1841 width, width, symbol_conf.show_total_period ? "Period" : 1842 symbol_conf.show_nr_samples ? "Samples" : "Percent", 1843 d_filename, evsel_name, h->nr_samples); 1844 1845 printf("%-*.*s----\n", 1846 graph_dotted_len, graph_dotted_len, graph_dotted_line); 1847 1848 if (verbose > 0) 1849 symbol__annotate_hits(sym, evsel); 1850 1851 list_for_each_entry(pos, ¬es->src->source, node) { 1852 if (context && queue == NULL) { 1853 queue = pos; 1854 queue_len = 0; 1855 } 1856 1857 switch (disasm_line__print(pos, sym, start, evsel, len, 1858 min_pcnt, printed, max_lines, 1859 queue)) { 1860 case 0: 1861 ++printed; 1862 if (context) { 1863 printed += queue_len; 1864 queue = NULL; 1865 queue_len = 0; 1866 } 1867 break; 1868 case 1: 1869 /* filtered by max_lines */ 1870 ++more; 1871 break; 1872 case -1: 1873 default: 1874 /* 1875 * Filtered by min_pcnt or non IP lines when 1876 * context != 0 1877 */ 1878 if (!context) 1879 break; 1880 if (queue_len == context) 1881 queue = list_entry(queue->node.next, typeof(*queue), node); 1882 else 1883 ++queue_len; 1884 break; 1885 } 1886 } 1887 1888 free(filename); 1889 1890 return more; 1891 } 1892 1893 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx) 1894 { 1895 struct annotation *notes = symbol__annotation(sym); 1896 struct sym_hist *h = annotation__histogram(notes, evidx); 1897 1898 memset(h, 0, notes->src->sizeof_sym_hist); 1899 } 1900 1901 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) 1902 { 1903 struct annotation *notes = symbol__annotation(sym); 1904 struct sym_hist *h = annotation__histogram(notes, evidx); 1905 int len = symbol__size(sym), offset; 1906 1907 h->nr_samples = 0; 1908 for (offset = 0; offset < len; ++offset) { 1909 h->addr[offset].nr_samples = h->addr[offset].nr_samples * 7 / 8; 1910 h->nr_samples += h->addr[offset].nr_samples; 1911 } 1912 } 1913 1914 void disasm__purge(struct list_head *head) 1915 { 1916 struct disasm_line *pos, *n; 1917 1918 list_for_each_entry_safe(pos, n, head, node) { 1919 list_del(&pos->node); 1920 disasm_line__free(pos); 1921 } 1922 } 1923 1924 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp) 1925 { 1926 size_t printed; 1927 1928 if (dl->offset == -1) 1929 return fprintf(fp, "%s\n", dl->line); 1930 1931 printed = fprintf(fp, "%#" PRIx64 " %s", dl->offset, dl->ins.name); 1932 1933 if (dl->ops.raw[0] != '\0') { 1934 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ", 1935 dl->ops.raw); 1936 } 1937 1938 return printed + fprintf(fp, "\n"); 1939 } 1940 1941 size_t disasm__fprintf(struct list_head *head, FILE *fp) 1942 { 1943 struct disasm_line *pos; 1944 size_t printed = 0; 1945 1946 list_for_each_entry(pos, head, node) 1947 printed += disasm_line__fprintf(pos, fp); 1948 1949 return printed; 1950 } 1951 1952 int symbol__tty_annotate(struct symbol *sym, struct map *map, 1953 struct perf_evsel *evsel, bool print_lines, 1954 bool full_paths, int min_pcnt, int max_lines) 1955 { 1956 struct dso *dso = map->dso; 1957 struct rb_root source_line = RB_ROOT; 1958 u64 len; 1959 1960 if (symbol__disassemble(sym, map, perf_evsel__env_arch(evsel), 1961 0, NULL, NULL) < 0) 1962 return -1; 1963 1964 len = symbol__size(sym); 1965 1966 if (print_lines) { 1967 srcline_full_filename = full_paths; 1968 symbol__get_source_line(sym, map, evsel, &source_line, len); 1969 print_summary(&source_line, dso->long_name); 1970 } 1971 1972 symbol__annotate_printf(sym, map, evsel, full_paths, 1973 min_pcnt, max_lines, 0); 1974 if (print_lines) 1975 symbol__free_source_line(sym, len); 1976 1977 disasm__purge(&symbol__annotation(sym)->src->source); 1978 1979 return 0; 1980 } 1981 1982 bool ui__has_annotation(void) 1983 { 1984 return use_browser == 1 && perf_hpp_list.sym; 1985 } 1986