1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-annotate.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <libgen.h> 12 #include <stdlib.h> 13 #include "util.h" // hex_width() 14 #include "ui/ui.h" 15 #include "sort.h" 16 #include "build-id.h" 17 #include "color.h" 18 #include "config.h" 19 #include "disasm.h" 20 #include "dso.h" 21 #include "env.h" 22 #include "map.h" 23 #include "maps.h" 24 #include "symbol.h" 25 #include "srcline.h" 26 #include "units.h" 27 #include "debug.h" 28 #include "debuginfo.h" 29 #include "annotate.h" 30 #include "annotate-data.h" 31 #include "evsel.h" 32 #include "evlist.h" 33 #include "bpf-event.h" 34 #include "bpf-utils.h" 35 #include "block-range.h" 36 #include "string2.h" 37 #include "dwarf-regs.h" 38 #include "util/event.h" 39 #include "util/sharded_mutex.h" 40 #include "arch/common.h" 41 #include "namespaces.h" 42 #include "thread.h" 43 #include "hashmap.h" 44 #include "strbuf.h" 45 #include <regex.h> 46 #include <linux/bitops.h> 47 #include <linux/kernel.h> 48 #include <linux/string.h> 49 #include <linux/zalloc.h> 50 #include <subcmd/parse-options.h> 51 #include <subcmd/run-command.h> 52 #include <math.h> 53 54 /* FIXME: For the HE_COLORSET */ 55 #include "ui/browser.h" 56 57 /* 58 * FIXME: Using the same values as slang.h, 59 * but that header may not be available everywhere 60 */ 61 #define LARROW_CHAR ((unsigned char)',') 62 #define RARROW_CHAR ((unsigned char)'+') 63 #define DARROW_CHAR ((unsigned char)'.') 64 #define UARROW_CHAR ((unsigned char)'-') 65 66 #include <linux/ctype.h> 67 68 /* global annotation options */ 69 struct annotation_options annotate_opts; 70 71 /* Data type collection debug statistics */ 72 struct annotated_data_stat ann_data_stat; 73 LIST_HEAD(ann_insn_stat); 74 75 /* Pseudo data types */ 76 struct annotated_data_type stackop_type = { 77 .self = { 78 .type_name = (char *)"(stack operation)", 79 .children = LIST_HEAD_INIT(stackop_type.self.children), 80 }, 81 }; 82 83 struct annotated_data_type canary_type = { 84 .self = { 85 .type_name = (char *)"(stack canary)", 86 .children = LIST_HEAD_INIT(canary_type.self.children), 87 }, 88 }; 89 90 #define NO_TYPE ((struct annotated_data_type *)-1UL) 91 92 /* symbol histogram: key = offset << 16 | evsel->core.idx */ 93 static size_t sym_hist_hash(long key, void *ctx __maybe_unused) 94 { 95 return (key >> 16) + (key & 0xffff); 96 } 97 98 static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused) 99 { 100 return key1 == key2; 101 } 102 103 static struct annotated_source *annotated_source__new(void) 104 { 105 struct annotated_source *src = zalloc(sizeof(*src)); 106 107 if (src != NULL) 108 INIT_LIST_HEAD(&src->source); 109 110 return src; 111 } 112 113 static __maybe_unused void annotated_source__delete(struct annotated_source *src) 114 { 115 struct hashmap_entry *cur; 116 size_t bkt; 117 118 if (src == NULL) 119 return; 120 121 if (src->samples) { 122 hashmap__for_each_entry(src->samples, cur, bkt) 123 zfree(&cur->pvalue); 124 hashmap__free(src->samples); 125 } 126 zfree(&src->histograms); 127 free(src); 128 } 129 130 static int annotated_source__alloc_histograms(struct annotated_source *src, 131 int nr_hists) 132 { 133 src->nr_histograms = nr_hists; 134 src->histograms = calloc(nr_hists, sizeof(*src->histograms)); 135 136 if (src->histograms == NULL) 137 return -1; 138 139 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL); 140 if (src->samples == NULL) 141 zfree(&src->histograms); 142 143 return src->histograms ? 0 : -1; 144 } 145 146 void symbol__annotate_zero_histograms(struct symbol *sym) 147 { 148 struct annotation *notes = symbol__annotation(sym); 149 150 annotation__lock(notes); 151 if (notes->src != NULL) { 152 memset(notes->src->histograms, 0, 153 notes->src->nr_histograms * sizeof(*notes->src->histograms)); 154 hashmap__clear(notes->src->samples); 155 } 156 if (notes->branch && notes->branch->cycles_hist) { 157 memset(notes->branch->cycles_hist, 0, 158 symbol__size(sym) * sizeof(struct cyc_hist)); 159 } 160 annotation__unlock(notes); 161 } 162 163 static int __symbol__account_cycles(struct cyc_hist *ch, 164 u64 start, 165 unsigned offset, unsigned cycles, 166 unsigned have_start) 167 { 168 /* 169 * For now we can only account one basic block per 170 * final jump. But multiple could be overlapping. 171 * Always account the longest one. So when 172 * a shorter one has been already seen throw it away. 173 * 174 * We separately always account the full cycles. 175 */ 176 ch[offset].num_aggr++; 177 ch[offset].cycles_aggr += cycles; 178 179 if (cycles > ch[offset].cycles_max) 180 ch[offset].cycles_max = cycles; 181 182 if (ch[offset].cycles_min) { 183 if (cycles && cycles < ch[offset].cycles_min) 184 ch[offset].cycles_min = cycles; 185 } else 186 ch[offset].cycles_min = cycles; 187 188 if (!have_start && ch[offset].have_start) 189 return 0; 190 if (ch[offset].num) { 191 if (have_start && (!ch[offset].have_start || 192 ch[offset].start > start)) { 193 ch[offset].have_start = 0; 194 ch[offset].cycles = 0; 195 ch[offset].num = 0; 196 if (ch[offset].reset < 0xffff) 197 ch[offset].reset++; 198 } else if (have_start && 199 ch[offset].start < start) 200 return 0; 201 } 202 203 if (ch[offset].num < NUM_SPARKS) 204 ch[offset].cycles_spark[ch[offset].num] = cycles; 205 206 ch[offset].have_start = have_start; 207 ch[offset].start = start; 208 ch[offset].cycles += cycles; 209 ch[offset].num++; 210 return 0; 211 } 212 213 static int __symbol__inc_addr_samples(struct map_symbol *ms, 214 struct annotated_source *src, struct evsel *evsel, u64 addr, 215 struct perf_sample *sample) 216 { 217 struct symbol *sym = ms->sym; 218 long hash_key; 219 u64 offset; 220 struct sym_hist *h; 221 struct sym_hist_entry *entry; 222 223 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr)); 224 225 if ((addr < sym->start || addr >= sym->end) && 226 (addr != sym->end || sym->start != sym->end)) { 227 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n", 228 __func__, __LINE__, sym->name, sym->start, addr, sym->end); 229 return -ERANGE; 230 } 231 232 offset = addr - sym->start; 233 h = annotated_source__histogram(src, evsel); 234 if (h == NULL) { 235 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n", 236 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC); 237 return -ENOMEM; 238 } 239 240 hash_key = offset << 16 | evsel->core.idx; 241 if (!hashmap__find(src->samples, hash_key, &entry)) { 242 entry = zalloc(sizeof(*entry)); 243 if (entry == NULL) 244 return -ENOMEM; 245 246 if (hashmap__add(src->samples, hash_key, entry) < 0) 247 return -ENOMEM; 248 } 249 250 h->nr_samples++; 251 h->period += sample->period; 252 entry->nr_samples++; 253 entry->period += sample->period; 254 255 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 256 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n", 257 sym->start, sym->name, addr, addr - sym->start, evsel->core.idx, 258 entry->nr_samples, entry->period); 259 return 0; 260 } 261 262 struct annotated_branch *annotation__get_branch(struct annotation *notes) 263 { 264 if (notes == NULL) 265 return NULL; 266 267 if (notes->branch == NULL) 268 notes->branch = zalloc(sizeof(*notes->branch)); 269 270 return notes->branch; 271 } 272 273 static struct annotated_branch *symbol__find_branch_hist(struct symbol *sym, 274 unsigned int br_cntr_nr) 275 { 276 struct annotation *notes = symbol__annotation(sym); 277 struct annotated_branch *branch; 278 const size_t size = symbol__size(sym); 279 280 branch = annotation__get_branch(notes); 281 if (branch == NULL) 282 return NULL; 283 284 if (branch->cycles_hist == NULL) { 285 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist)); 286 if (!branch->cycles_hist) 287 return NULL; 288 } 289 290 if (br_cntr_nr && branch->br_cntr == NULL) { 291 branch->br_cntr = calloc(br_cntr_nr * size, sizeof(u64)); 292 if (!branch->br_cntr) 293 return NULL; 294 } 295 296 return branch; 297 } 298 299 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists) 300 { 301 struct annotation *notes = symbol__annotation(sym); 302 303 if (notes->src == NULL) { 304 notes->src = annotated_source__new(); 305 if (notes->src == NULL) 306 return NULL; 307 goto alloc_histograms; 308 } 309 310 if (notes->src->histograms == NULL) { 311 alloc_histograms: 312 annotated_source__alloc_histograms(notes->src, nr_hists); 313 } 314 315 return notes->src; 316 } 317 318 static int symbol__inc_addr_samples(struct map_symbol *ms, 319 struct evsel *evsel, u64 addr, 320 struct perf_sample *sample) 321 { 322 struct symbol *sym = ms->sym; 323 struct annotated_source *src; 324 325 if (sym == NULL) 326 return 0; 327 src = symbol__hists(sym, evsel->evlist->core.nr_entries); 328 return src ? __symbol__inc_addr_samples(ms, src, evsel, addr, sample) : 0; 329 } 330 331 static int symbol__account_br_cntr(struct annotated_branch *branch, 332 struct evsel *evsel, 333 unsigned offset, 334 u64 br_cntr) 335 { 336 unsigned int br_cntr_nr = evsel__leader(evsel)->br_cntr_nr; 337 unsigned int base = evsel__leader(evsel)->br_cntr_idx; 338 unsigned int off = offset * evsel->evlist->nr_br_cntr; 339 u64 *branch_br_cntr = branch->br_cntr; 340 unsigned int i, mask, width; 341 342 if (!br_cntr || !branch_br_cntr) 343 return 0; 344 345 perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width); 346 mask = (1L << width) - 1; 347 for (i = 0; i < br_cntr_nr; i++) { 348 u64 cntr = (br_cntr >> i * width) & mask; 349 350 branch_br_cntr[off + i + base] += cntr; 351 if (cntr == mask) 352 branch_br_cntr[off + i + base] |= ANNOTATION__BR_CNTR_SATURATED_FLAG; 353 } 354 355 return 0; 356 } 357 358 static int symbol__account_cycles(u64 addr, u64 start, struct symbol *sym, 359 unsigned cycles, struct evsel *evsel, 360 u64 br_cntr) 361 { 362 struct annotated_branch *branch; 363 unsigned offset; 364 int ret; 365 366 if (sym == NULL) 367 return 0; 368 branch = symbol__find_branch_hist(sym, evsel->evlist->nr_br_cntr); 369 if (!branch) 370 return -ENOMEM; 371 if (addr < sym->start || addr >= sym->end) 372 return -ERANGE; 373 374 if (start) { 375 if (start < sym->start || start >= sym->end) 376 return -ERANGE; 377 if (start >= addr) 378 start = 0; 379 } 380 offset = addr - sym->start; 381 ret = __symbol__account_cycles(branch->cycles_hist, 382 start ? start - sym->start : 0, 383 offset, cycles, 384 !!start); 385 386 if (ret) 387 return ret; 388 389 return symbol__account_br_cntr(branch, evsel, offset, br_cntr); 390 } 391 392 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, 393 struct addr_map_symbol *start, 394 unsigned cycles, 395 struct evsel *evsel, 396 u64 br_cntr) 397 { 398 u64 saddr = 0; 399 int err; 400 401 if (!cycles) 402 return 0; 403 404 /* 405 * Only set start when IPC can be computed. We can only 406 * compute it when the basic block is completely in a single 407 * function. 408 * Special case the case when the jump is elsewhere, but 409 * it starts on the function start. 410 */ 411 if (start && 412 (start->ms.sym == ams->ms.sym || 413 (ams->ms.sym && 414 start->addr == ams->ms.sym->start + map__start(ams->ms.map)))) 415 saddr = start->al_addr; 416 if (saddr == 0) 417 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n", 418 ams->addr, 419 start ? start->addr : 0, 420 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0, 421 saddr); 422 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles, evsel, br_cntr); 423 if (err) 424 pr_debug2("account_cycles failed %d\n", err); 425 return err; 426 } 427 428 struct annotation_line *annotated_source__get_line(struct annotated_source *src, 429 s64 offset) 430 { 431 struct annotation_line *al; 432 433 list_for_each_entry(al, &src->source, node) { 434 if (al->offset == offset) 435 return al; 436 } 437 return NULL; 438 } 439 440 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end) 441 { 442 struct annotation_line *al; 443 unsigned n_insn = 0; 444 445 al = annotated_source__get_line(notes->src, start); 446 if (al == NULL) 447 return 0; 448 449 list_for_each_entry_from(al, ¬es->src->source, node) { 450 if (al->offset == -1) 451 continue; 452 if ((u64)al->offset > end) 453 break; 454 n_insn++; 455 } 456 return n_insn; 457 } 458 459 static void annotated_branch__delete(struct annotated_branch *branch) 460 { 461 if (branch) { 462 zfree(&branch->cycles_hist); 463 free(branch->br_cntr); 464 free(branch); 465 } 466 } 467 468 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) 469 { 470 unsigned n_insn; 471 unsigned int cover_insn = 0; 472 473 n_insn = annotation__count_insn(notes, start, end); 474 if (n_insn && ch->num && ch->cycles) { 475 struct annotation_line *al; 476 struct annotated_branch *branch; 477 float ipc = n_insn / ((double)ch->cycles / (double)ch->num); 478 479 /* Hide data when there are too many overlaps. */ 480 if (ch->reset >= 0x7fff) 481 return; 482 483 al = annotated_source__get_line(notes->src, start); 484 if (al == NULL) 485 return; 486 487 list_for_each_entry_from(al, ¬es->src->source, node) { 488 if (al->offset == -1) 489 continue; 490 if ((u64)al->offset > end) 491 break; 492 if (al->cycles && al->cycles->ipc == 0.0) { 493 al->cycles->ipc = ipc; 494 cover_insn++; 495 } 496 } 497 498 branch = annotation__get_branch(notes); 499 if (cover_insn && branch) { 500 branch->hit_cycles += ch->cycles; 501 branch->hit_insn += n_insn * ch->num; 502 branch->cover_insn += cover_insn; 503 } 504 } 505 } 506 507 static int annotation__compute_ipc(struct annotation *notes, size_t size, 508 struct evsel *evsel) 509 { 510 unsigned int br_cntr_nr = evsel->evlist->nr_br_cntr; 511 int err = 0; 512 s64 offset; 513 514 if (!notes->branch || !notes->branch->cycles_hist) 515 return 0; 516 517 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1); 518 notes->branch->hit_cycles = 0; 519 notes->branch->hit_insn = 0; 520 notes->branch->cover_insn = 0; 521 522 annotation__lock(notes); 523 for (offset = size - 1; offset >= 0; --offset) { 524 struct cyc_hist *ch; 525 526 ch = ¬es->branch->cycles_hist[offset]; 527 if (ch && ch->cycles) { 528 struct annotation_line *al; 529 530 al = annotated_source__get_line(notes->src, offset); 531 if (al && al->cycles == NULL) { 532 al->cycles = zalloc(sizeof(*al->cycles)); 533 if (al->cycles == NULL) { 534 err = ENOMEM; 535 break; 536 } 537 } 538 if (ch->have_start) 539 annotation__count_and_fill(notes, ch->start, offset, ch); 540 if (al && ch->num_aggr) { 541 al->cycles->avg = ch->cycles_aggr / ch->num_aggr; 542 al->cycles->max = ch->cycles_max; 543 al->cycles->min = ch->cycles_min; 544 } 545 if (al && notes->branch->br_cntr) { 546 if (!al->br_cntr) { 547 al->br_cntr = calloc(br_cntr_nr, sizeof(u64)); 548 if (!al->br_cntr) { 549 err = ENOMEM; 550 break; 551 } 552 } 553 al->num_aggr = ch->num_aggr; 554 al->br_cntr_nr = br_cntr_nr; 555 al->evsel = evsel; 556 memcpy(al->br_cntr, ¬es->branch->br_cntr[offset * br_cntr_nr], 557 br_cntr_nr * sizeof(u64)); 558 } 559 } 560 } 561 562 if (err) { 563 while (++offset < (s64)size) { 564 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset]; 565 566 if (ch && ch->cycles) { 567 struct annotation_line *al; 568 569 al = annotated_source__get_line(notes->src, offset); 570 if (al) { 571 zfree(&al->cycles); 572 zfree(&al->br_cntr); 573 } 574 } 575 } 576 } 577 578 annotation__unlock(notes); 579 return 0; 580 } 581 582 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample, 583 struct evsel *evsel) 584 { 585 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample); 586 } 587 588 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample, 589 struct evsel *evsel, u64 ip) 590 { 591 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample); 592 } 593 594 595 void annotation__exit(struct annotation *notes) 596 { 597 annotated_source__delete(notes->src); 598 annotated_branch__delete(notes->branch); 599 } 600 601 static struct sharded_mutex *sharded_mutex; 602 603 static void annotation__init_sharded_mutex(void) 604 { 605 /* As many mutexes as there are CPUs. */ 606 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu); 607 } 608 609 static size_t annotation__hash(const struct annotation *notes) 610 { 611 return (size_t)notes; 612 } 613 614 static struct mutex *annotation__get_mutex(const struct annotation *notes) 615 { 616 static pthread_once_t once = PTHREAD_ONCE_INIT; 617 618 pthread_once(&once, annotation__init_sharded_mutex); 619 if (!sharded_mutex) 620 return NULL; 621 622 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes)); 623 } 624 625 void annotation__lock(struct annotation *notes) 626 NO_THREAD_SAFETY_ANALYSIS 627 { 628 struct mutex *mutex = annotation__get_mutex(notes); 629 630 if (mutex) 631 mutex_lock(mutex); 632 } 633 634 void annotation__unlock(struct annotation *notes) 635 NO_THREAD_SAFETY_ANALYSIS 636 { 637 struct mutex *mutex = annotation__get_mutex(notes); 638 639 if (mutex) 640 mutex_unlock(mutex); 641 } 642 643 bool annotation__trylock(struct annotation *notes) 644 { 645 struct mutex *mutex = annotation__get_mutex(notes); 646 647 if (!mutex) 648 return false; 649 650 return mutex_trylock(mutex); 651 } 652 653 void annotation_line__add(struct annotation_line *al, struct list_head *head) 654 { 655 list_add_tail(&al->node, head); 656 } 657 658 struct annotation_line * 659 annotation_line__next(struct annotation_line *pos, struct list_head *head) 660 { 661 list_for_each_entry_continue(pos, head, node) 662 if (pos->offset >= 0) 663 return pos; 664 665 return NULL; 666 } 667 668 static const char *annotate__address_color(struct block_range *br) 669 { 670 double cov = block_range__coverage(br); 671 672 if (cov >= 0) { 673 /* mark red for >75% coverage */ 674 if (cov > 0.75) 675 return PERF_COLOR_RED; 676 677 /* mark dull for <1% coverage */ 678 if (cov < 0.01) 679 return PERF_COLOR_NORMAL; 680 } 681 682 return PERF_COLOR_MAGENTA; 683 } 684 685 static const char *annotate__asm_color(struct block_range *br) 686 { 687 double cov = block_range__coverage(br); 688 689 if (cov >= 0) { 690 /* mark dull for <1% coverage */ 691 if (cov < 0.01) 692 return PERF_COLOR_NORMAL; 693 } 694 695 return PERF_COLOR_BLUE; 696 } 697 698 static void annotate__branch_printf(struct block_range *br, u64 addr) 699 { 700 bool emit_comment = true; 701 702 if (!br) 703 return; 704 705 #if 1 706 if (br->is_target && br->start == addr) { 707 struct block_range *branch = br; 708 double p; 709 710 /* 711 * Find matching branch to our target. 712 */ 713 while (!branch->is_branch) 714 branch = block_range__next(branch); 715 716 p = 100 *(double)br->entry / branch->coverage; 717 718 if (p > 0.1) { 719 if (emit_comment) { 720 emit_comment = false; 721 printf("\t#"); 722 } 723 724 /* 725 * The percentage of coverage joined at this target in relation 726 * to the next branch. 727 */ 728 printf(" +%.2f%%", p); 729 } 730 } 731 #endif 732 if (br->is_branch && br->end == addr) { 733 double p = 100*(double)br->taken / br->coverage; 734 735 if (p > 0.1) { 736 if (emit_comment) { 737 emit_comment = false; 738 printf("\t#"); 739 } 740 741 /* 742 * The percentage of coverage leaving at this branch, and 743 * its prediction ratio. 744 */ 745 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken); 746 } 747 } 748 } 749 750 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width) 751 { 752 s64 offset = dl->al.offset; 753 const u64 addr = start + offset; 754 struct block_range *br; 755 756 br = block_range__find(addr); 757 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr); 758 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line); 759 annotate__branch_printf(br, addr); 760 return 0; 761 } 762 763 static struct annotated_data_type * 764 __hist_entry__get_data_type(struct hist_entry *he, const struct arch *arch, 765 struct debuginfo *dbg, struct disasm_line *dl, 766 int *type_offset); 767 768 static bool needs_type_info(struct annotated_data_type *data_type) 769 { 770 if (data_type == NULL || data_type == NO_TYPE) 771 return false; 772 773 if (verbose) 774 return true; 775 776 return (data_type != &stackop_type) && (data_type != &canary_type); 777 } 778 779 static int 780 annotation_line__print(struct annotation_line *al, struct annotation_print_data *apd, 781 struct annotation_options *opts, int printed, 782 struct annotation_line *queue) 783 { 784 struct symbol *sym = apd->he->ms.sym; 785 struct disasm_line *dl = container_of(al, struct disasm_line, al); 786 struct annotation *notes = symbol__annotation(sym); 787 static const char *prev_line; 788 int max_lines = opts->max_lines; 789 int percent_type = opts->percent_type; 790 791 if (al->offset != -1) { 792 double max_percent = 0.0; 793 int i, nr_percent = 1; 794 const char *color; 795 796 for (i = 0; i < al->data_nr; i++) { 797 double percent; 798 799 percent = annotation_data__percent(&al->data[i], 800 percent_type); 801 802 if (percent > max_percent) 803 max_percent = percent; 804 } 805 806 if (al->data_nr > nr_percent) 807 nr_percent = al->data_nr; 808 809 if (max_percent < opts->min_pcnt) 810 return -1; 811 812 if (max_lines && printed >= max_lines) 813 return 1; 814 815 if (queue != NULL) { 816 struct annotation_options queue_opts = { 817 .max_lines = 1, 818 .percent_type = percent_type, 819 }; 820 821 list_for_each_entry_from(queue, ¬es->src->source, node) { 822 if (queue == al) 823 break; 824 annotation_line__print(queue, apd, &queue_opts, 825 /*printed=*/0, /*queue=*/NULL); 826 } 827 } 828 829 color = get_percent_color(max_percent); 830 831 for (i = 0; i < nr_percent; i++) { 832 struct annotation_data *data = &al->data[i]; 833 double percent; 834 835 percent = annotation_data__percent(data, percent_type); 836 color = get_percent_color(percent); 837 838 if (symbol_conf.show_total_period) 839 color_fprintf(stdout, color, " %11" PRIu64, 840 data->he.period); 841 else if (symbol_conf.show_nr_samples) 842 color_fprintf(stdout, color, " %7" PRIu64, 843 data->he.nr_samples); 844 else 845 color_fprintf(stdout, color, " %7.2f", percent); 846 } 847 848 printf(" : "); 849 850 disasm_line__print(dl, notes->src->start, apd->addr_fmt_width); 851 852 if (opts->code_with_type && apd->dbg) { 853 struct annotated_data_type *data_type; 854 int offset = 0; 855 856 data_type = __hist_entry__get_data_type(apd->he, apd->arch, 857 apd->dbg, dl, &offset); 858 if (needs_type_info(data_type)) { 859 char buf[4096]; 860 861 printf("\t\t# data-type: %s", 862 data_type->self.type_name); 863 864 if (data_type != &stackop_type && 865 data_type != &canary_type) 866 printf(" +%#x", offset); 867 868 if (annotated_data_type__get_member_name(data_type, 869 buf, 870 sizeof(buf), 871 offset)) 872 printf(" (%s)", buf); 873 } 874 } 875 876 /* 877 * Also color the filename and line if needed, with 878 * the same color than the percentage. Don't print it 879 * twice for close colored addr with the same filename:line 880 */ 881 if (al->path) { 882 if (!prev_line || strcmp(prev_line, al->path)) { 883 color_fprintf(stdout, color, " // %s", al->path); 884 prev_line = al->path; 885 } 886 } 887 888 printf("\n"); 889 } else if (max_lines && printed >= max_lines) 890 return 1; 891 else { 892 int width = annotation__pcnt_width(notes); 893 894 if (queue) 895 return -1; 896 897 if (!*al->line) 898 printf(" %*s:\n", width, " "); 899 else 900 printf(" %*s: %-*d %s\n", width, " ", apd->addr_fmt_width, 901 al->line_nr, al->line); 902 } 903 904 return 0; 905 } 906 907 static void calc_percent(struct annotation *notes, 908 struct evsel *evsel, 909 struct annotation_data *data, 910 s64 offset, s64 end) 911 { 912 struct hists *hists = evsel__hists(evsel); 913 struct sym_hist *sym_hist = annotation__histogram(notes, evsel); 914 unsigned int hits = 0; 915 u64 period = 0; 916 917 while (offset < end) { 918 struct sym_hist_entry *entry; 919 920 entry = annotated_source__hist_entry(notes->src, evsel, offset); 921 if (entry) { 922 hits += entry->nr_samples; 923 period += entry->period; 924 } 925 ++offset; 926 } 927 928 if (sym_hist->nr_samples) { 929 data->he.period = period; 930 data->he.nr_samples = hits; 931 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples; 932 } 933 934 if (hists->stats.nr_non_filtered_samples) 935 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples; 936 937 if (sym_hist->period) 938 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period; 939 940 if (hists->stats.total_period) 941 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period; 942 } 943 944 static void annotation__calc_percent(struct annotation *notes, 945 struct evsel *leader, s64 len) 946 { 947 struct annotation_line *al, *next; 948 struct evsel *evsel; 949 950 list_for_each_entry(al, ¬es->src->source, node) { 951 s64 end; 952 int i = 0; 953 954 if (al->offset == -1) 955 continue; 956 957 next = annotation_line__next(al, ¬es->src->source); 958 end = next ? next->offset : len; 959 960 for_each_group_evsel(evsel, leader) { 961 struct annotation_data *data; 962 963 BUG_ON(i >= al->data_nr); 964 965 if (symbol_conf.skip_empty && 966 evsel__hists(evsel)->stats.nr_samples == 0) 967 continue; 968 969 data = &al->data[i++]; 970 971 calc_percent(notes, evsel, data, al->offset, end); 972 } 973 } 974 } 975 976 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel) 977 { 978 struct annotation *notes = symbol__annotation(sym); 979 980 annotation__calc_percent(notes, evsel, symbol__size(sym)); 981 } 982 983 int thread__get_arch(struct thread *thread, const struct arch **parch) 984 { 985 const struct arch *arch; 986 struct machine *machine; 987 uint16_t e_machine; 988 989 if (!thread) { 990 *parch = NULL; 991 return -1; 992 } 993 994 machine = maps__machine(thread__maps(thread)); 995 e_machine = thread__e_machine(thread, machine); 996 arch = arch__find(e_machine, machine->env ? machine->env->cpuid : NULL); 997 if (arch == NULL) { 998 pr_err("%s: unsupported arch %d\n", __func__, e_machine); 999 return errno; 1000 } 1001 if (parch) 1002 *parch = arch; 1003 1004 return 0; 1005 } 1006 1007 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, 1008 const struct arch **parch) 1009 { 1010 struct symbol *sym = ms->sym; 1011 struct annotation *notes = symbol__annotation(sym); 1012 struct annotate_args args = { 1013 .options = &annotate_opts, 1014 }; 1015 const struct arch *arch = NULL; 1016 int err, nr; 1017 1018 err = thread__get_arch(ms->thread, &arch); 1019 if (err) 1020 return err; 1021 1022 if (parch) 1023 *parch = arch; 1024 1025 if (notes->src && !list_empty(¬es->src->source)) 1026 return 0; 1027 1028 args.arch = arch; 1029 args.ms = ms; 1030 1031 if (notes->src == NULL) { 1032 notes->src = annotated_source__new(); 1033 if (notes->src == NULL) 1034 return -1; 1035 } 1036 1037 nr = 0; 1038 if (evsel__is_group_event(evsel)) { 1039 struct evsel *pos; 1040 1041 for_each_group_evsel(pos, evsel) { 1042 if (symbol_conf.skip_empty && 1043 evsel__hists(pos)->stats.nr_samples == 0) 1044 continue; 1045 nr++; 1046 } 1047 } 1048 notes->src->nr_events = nr ? nr : 1; 1049 1050 if (annotate_opts.full_addr) 1051 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start); 1052 else 1053 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start); 1054 1055 return symbol__disassemble(sym, &args); 1056 } 1057 1058 static void insert_source_line(struct rb_root *root, struct annotation_line *al) 1059 { 1060 struct annotation_line *iter; 1061 struct rb_node **p = &root->rb_node; 1062 struct rb_node *parent = NULL; 1063 unsigned int percent_type = annotate_opts.percent_type; 1064 int i, ret; 1065 1066 while (*p != NULL) { 1067 parent = *p; 1068 iter = rb_entry(parent, struct annotation_line, rb_node); 1069 1070 ret = strcmp(iter->path, al->path); 1071 if (ret == 0) { 1072 for (i = 0; i < al->data_nr; i++) { 1073 iter->data[i].percent_sum += annotation_data__percent(&al->data[i], 1074 percent_type); 1075 } 1076 return; 1077 } 1078 1079 if (ret < 0) 1080 p = &(*p)->rb_left; 1081 else 1082 p = &(*p)->rb_right; 1083 } 1084 1085 for (i = 0; i < al->data_nr; i++) { 1086 al->data[i].percent_sum = annotation_data__percent(&al->data[i], 1087 percent_type); 1088 } 1089 1090 rb_link_node(&al->rb_node, parent, p); 1091 rb_insert_color(&al->rb_node, root); 1092 } 1093 1094 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b) 1095 { 1096 int i; 1097 1098 for (i = 0; i < a->data_nr; i++) { 1099 if (a->data[i].percent_sum == b->data[i].percent_sum) 1100 continue; 1101 return a->data[i].percent_sum > b->data[i].percent_sum; 1102 } 1103 1104 return 0; 1105 } 1106 1107 static void __resort_source_line(struct rb_root *root, struct annotation_line *al) 1108 { 1109 struct annotation_line *iter; 1110 struct rb_node **p = &root->rb_node; 1111 struct rb_node *parent = NULL; 1112 1113 while (*p != NULL) { 1114 parent = *p; 1115 iter = rb_entry(parent, struct annotation_line, rb_node); 1116 1117 if (cmp_source_line(al, iter)) 1118 p = &(*p)->rb_left; 1119 else 1120 p = &(*p)->rb_right; 1121 } 1122 1123 rb_link_node(&al->rb_node, parent, p); 1124 rb_insert_color(&al->rb_node, root); 1125 } 1126 1127 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root) 1128 { 1129 struct annotation_line *al; 1130 struct rb_node *node; 1131 1132 node = rb_first(src_root); 1133 while (node) { 1134 struct rb_node *next; 1135 1136 al = rb_entry(node, struct annotation_line, rb_node); 1137 next = rb_next(node); 1138 rb_erase(node, src_root); 1139 1140 __resort_source_line(dest_root, al); 1141 node = next; 1142 } 1143 } 1144 1145 static void print_summary(struct rb_root *root, const char *filename) 1146 { 1147 struct annotation_line *al; 1148 struct rb_node *node; 1149 1150 printf("\nSorted summary for file %s\n", filename); 1151 printf("----------------------------------------------\n\n"); 1152 1153 if (RB_EMPTY_ROOT(root)) { 1154 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); 1155 return; 1156 } 1157 1158 node = rb_first(root); 1159 while (node) { 1160 double percent, percent_max = 0.0; 1161 const char *color; 1162 char *path; 1163 int i; 1164 1165 al = rb_entry(node, struct annotation_line, rb_node); 1166 for (i = 0; i < al->data_nr; i++) { 1167 percent = al->data[i].percent_sum; 1168 color = get_percent_color(percent); 1169 color_fprintf(stdout, color, " %7.2f", percent); 1170 1171 if (percent > percent_max) 1172 percent_max = percent; 1173 } 1174 1175 path = al->path; 1176 color = get_percent_color(percent_max); 1177 color_fprintf(stdout, color, " %s\n", path); 1178 1179 node = rb_next(node); 1180 } 1181 } 1182 1183 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel) 1184 { 1185 struct annotation *notes = symbol__annotation(sym); 1186 struct sym_hist *h = annotation__histogram(notes, evsel); 1187 u64 len = symbol__size(sym), offset; 1188 1189 for (offset = 0; offset < len; ++offset) { 1190 struct sym_hist_entry *entry; 1191 1192 entry = annotated_source__hist_entry(notes->src, evsel, offset); 1193 if (entry && entry->nr_samples != 0) 1194 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, 1195 sym->start + offset, entry->nr_samples); 1196 } 1197 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples); 1198 } 1199 1200 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start) 1201 { 1202 char bf[32]; 1203 struct annotation_line *line; 1204 1205 list_for_each_entry_reverse(line, lines, node) { 1206 if (line->offset != -1) 1207 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset); 1208 } 1209 1210 return 0; 1211 } 1212 1213 int hist_entry__annotate_printf(struct hist_entry *he, struct evsel *evsel) 1214 { 1215 struct map_symbol *ms = &he->ms; 1216 struct map *map = ms->map; 1217 struct symbol *sym = ms->sym; 1218 struct dso *dso = map__dso(map); 1219 char *filename; 1220 const char *d_filename; 1221 const char *evsel_name = evsel__name(evsel); 1222 struct annotation *notes = symbol__annotation(sym); 1223 struct sym_hist *h = annotation__histogram(notes, evsel); 1224 struct annotation_line *pos, *queue = NULL; 1225 struct annotation_options *opts = &annotate_opts; 1226 struct annotation_print_data apd = { 1227 .he = he, 1228 .evsel = evsel, 1229 }; 1230 int printed = 2, queue_len = 0; 1231 int more = 0; 1232 bool context = opts->context; 1233 int width = annotation__pcnt_width(notes); 1234 int graph_dotted_len; 1235 char buf[512]; 1236 1237 filename = strdup(dso__long_name(dso)); 1238 if (!filename) 1239 return -ENOMEM; 1240 1241 if (opts->full_path) 1242 d_filename = filename; 1243 else 1244 d_filename = basename(filename); 1245 1246 if (evsel__is_group_event(evsel)) { 1247 evsel__group_desc(evsel, buf, sizeof(buf)); 1248 evsel_name = buf; 1249 } 1250 1251 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, " 1252 "percent: %s)\n", 1253 width, width, symbol_conf.show_total_period ? "Period" : 1254 symbol_conf.show_nr_samples ? "Samples" : "Percent", 1255 d_filename, evsel_name, h->nr_samples, 1256 percent_type_str(opts->percent_type)); 1257 1258 printf("%-*.*s----\n", 1259 graph_dotted_len, graph_dotted_len, graph_dotted_line); 1260 1261 if (verbose > 0) 1262 symbol__annotate_hits(sym, evsel); 1263 1264 apd.addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, 1265 notes->src->start); 1266 thread__get_arch(ms->thread, &apd.arch); 1267 apd.dbg = dso__debuginfo(dso); 1268 1269 list_for_each_entry(pos, ¬es->src->source, node) { 1270 int err; 1271 1272 if (context && queue == NULL) { 1273 queue = pos; 1274 queue_len = 0; 1275 } 1276 1277 err = annotation_line__print(pos, &apd, opts, printed, queue); 1278 1279 switch (err) { 1280 case 0: 1281 ++printed; 1282 if (context) { 1283 printed += queue_len; 1284 queue = NULL; 1285 queue_len = 0; 1286 } 1287 break; 1288 case 1: 1289 /* filtered by max_lines */ 1290 ++more; 1291 break; 1292 case -1: 1293 default: 1294 /* 1295 * Filtered by min_pcnt or non IP lines when 1296 * context != 0 1297 */ 1298 if (!context) 1299 break; 1300 if (queue_len == context) 1301 queue = list_entry(queue->node.next, typeof(*queue), node); 1302 else 1303 ++queue_len; 1304 break; 1305 } 1306 } 1307 1308 debuginfo__delete(apd.dbg); 1309 free(filename); 1310 1311 return more; 1312 } 1313 1314 static void FILE__set_percent_color(void *fp __maybe_unused, 1315 double percent __maybe_unused, 1316 bool current __maybe_unused) 1317 { 1318 } 1319 1320 static int FILE__set_jumps_percent_color(void *fp __maybe_unused, 1321 int nr __maybe_unused, bool current __maybe_unused) 1322 { 1323 return 0; 1324 } 1325 1326 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused) 1327 { 1328 return 0; 1329 } 1330 1331 static void FILE__printf(void *fp, const char *fmt, ...) 1332 { 1333 va_list args; 1334 1335 va_start(args, fmt); 1336 vfprintf(fp, fmt, args); 1337 va_end(args); 1338 } 1339 1340 static void FILE__write_graph(void *fp, int graph) 1341 { 1342 const char *s; 1343 switch (graph) { 1344 1345 case DARROW_CHAR: s = "↓"; break; 1346 case UARROW_CHAR: s = "↑"; break; 1347 case LARROW_CHAR: s = "←"; break; 1348 case RARROW_CHAR: s = "→"; break; 1349 default: s = "?"; break; 1350 } 1351 1352 fputs(s, fp); 1353 } 1354 1355 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp, 1356 struct annotation_print_data *apd) 1357 { 1358 struct annotation *notes = symbol__annotation(sym); 1359 struct annotation_write_ops wops = { 1360 .first_line = true, 1361 .obj = fp, 1362 .set_color = FILE__set_color, 1363 .set_percent_color = FILE__set_percent_color, 1364 .set_jumps_percent_color = FILE__set_jumps_percent_color, 1365 .printf = FILE__printf, 1366 .write_graph = FILE__write_graph, 1367 }; 1368 struct annotation_line *al; 1369 1370 if (annotate_opts.code_with_type) { 1371 thread__get_arch(apd->he->ms.thread, &apd->arch); 1372 apd->dbg = dso__debuginfo(map__dso(apd->he->ms.map)); 1373 } 1374 1375 list_for_each_entry(al, ¬es->src->source, node) { 1376 if (annotation_line__filter(al)) 1377 continue; 1378 annotation_line__write(al, notes, &wops, apd); 1379 fputc('\n', fp); 1380 wops.first_line = false; 1381 } 1382 1383 if (annotate_opts.code_with_type) 1384 debuginfo__delete(apd->dbg); 1385 1386 return 0; 1387 } 1388 1389 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel, 1390 struct hist_entry *he) 1391 { 1392 const char *ev_name = evsel__name(evsel); 1393 char buf[1024]; 1394 char *filename; 1395 int err = -1; 1396 FILE *fp; 1397 struct annotation_print_data apd = { 1398 .he = he, 1399 .evsel = evsel, 1400 }; 1401 1402 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0) 1403 return -1; 1404 1405 fp = fopen(filename, "w"); 1406 if (fp == NULL) 1407 goto out_free_filename; 1408 1409 if (evsel__is_group_event(evsel)) { 1410 evsel__group_desc(evsel, buf, sizeof(buf)); 1411 ev_name = buf; 1412 } 1413 1414 fprintf(fp, "%s() %s\nEvent: %s\n\n", 1415 ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name); 1416 symbol__annotate_fprintf2(ms->sym, fp, &apd); 1417 1418 fclose(fp); 1419 err = 0; 1420 out_free_filename: 1421 free(filename); 1422 return err; 1423 } 1424 1425 void symbol__annotate_zero_histogram(struct symbol *sym, struct evsel *evsel) 1426 { 1427 struct annotation *notes = symbol__annotation(sym); 1428 struct sym_hist *h = annotation__histogram(notes, evsel); 1429 1430 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms); 1431 } 1432 1433 void symbol__annotate_decay_histogram(struct symbol *sym, struct evsel *evsel) 1434 { 1435 struct annotation *notes = symbol__annotation(sym); 1436 struct sym_hist *h = annotation__histogram(notes, evsel); 1437 struct annotation_line *al; 1438 1439 h->nr_samples = 0; 1440 list_for_each_entry(al, ¬es->src->source, node) { 1441 struct sym_hist_entry *entry; 1442 1443 if (al->offset == -1) 1444 continue; 1445 1446 entry = annotated_source__hist_entry(notes->src, evsel, al->offset); 1447 if (entry == NULL) 1448 continue; 1449 1450 entry->nr_samples = entry->nr_samples * 7 / 8; 1451 h->nr_samples += entry->nr_samples; 1452 } 1453 } 1454 1455 void annotated_source__purge(struct annotated_source *as) 1456 { 1457 struct annotation_line *al, *n; 1458 1459 list_for_each_entry_safe(al, n, &as->source, node) { 1460 list_del_init(&al->node); 1461 disasm_line__free(disasm_line(al)); 1462 } 1463 as->tried_source = false; 1464 } 1465 1466 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp) 1467 { 1468 size_t printed; 1469 1470 if (dl->al.offset == -1) 1471 return fprintf(fp, "%s\n", dl->al.line); 1472 1473 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name); 1474 1475 if (dl->ops.raw[0] != '\0') { 1476 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ", 1477 dl->ops.raw); 1478 } 1479 1480 return printed + fprintf(fp, "\n"); 1481 } 1482 1483 size_t disasm__fprintf(struct list_head *head, FILE *fp) 1484 { 1485 struct disasm_line *pos; 1486 size_t printed = 0; 1487 1488 list_for_each_entry(pos, head, al.node) 1489 printed += disasm_line__fprintf(pos, fp); 1490 1491 return printed; 1492 } 1493 1494 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym) 1495 { 1496 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) || 1497 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 || 1498 dl->ops.target.offset >= (s64)symbol__size(sym)) 1499 return false; 1500 1501 return true; 1502 } 1503 1504 static void 1505 annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) 1506 { 1507 struct annotation_line *al; 1508 1509 /* PLT symbols contain external offsets */ 1510 if (strstr(sym->name, "@plt")) 1511 return; 1512 1513 list_for_each_entry(al, ¬es->src->source, node) { 1514 struct disasm_line *dl; 1515 struct annotation_line *target; 1516 1517 dl = disasm_line(al); 1518 1519 if (!disasm_line__is_valid_local_jump(dl, sym)) 1520 continue; 1521 1522 target = annotated_source__get_line(notes->src, 1523 dl->ops.target.offset); 1524 /* 1525 * FIXME: Oops, no jump target? Buggy disassembler? Or do we 1526 * have to adjust to the previous offset? 1527 */ 1528 if (target == NULL) 1529 continue; 1530 1531 if (++target->jump_sources > notes->src->max_jump_sources) 1532 notes->src->max_jump_sources = target->jump_sources; 1533 } 1534 } 1535 1536 static void annotation__set_index(struct annotation *notes) 1537 { 1538 struct annotation_line *al; 1539 struct annotated_source *src = notes->src; 1540 1541 src->widths.max_line_len = 0; 1542 src->nr_entries = 0; 1543 src->nr_asm_entries = 0; 1544 1545 list_for_each_entry(al, &src->source, node) { 1546 size_t line_len = strlen(al->line); 1547 1548 if (src->widths.max_line_len < line_len) 1549 src->widths.max_line_len = line_len; 1550 al->idx = src->nr_entries++; 1551 if (al->offset != -1) 1552 al->idx_asm = src->nr_asm_entries++; 1553 else 1554 al->idx_asm = -1; 1555 } 1556 } 1557 1558 static inline int width_jumps(int n) 1559 { 1560 if (n >= 100) 1561 return 5; 1562 if (n / 10) 1563 return 2; 1564 return 1; 1565 } 1566 1567 static int annotation__max_ins_name(struct annotation *notes) 1568 { 1569 int max_name = 0, len; 1570 struct annotation_line *al; 1571 1572 list_for_each_entry(al, ¬es->src->source, node) { 1573 if (al->offset == -1) 1574 continue; 1575 1576 len = strlen(disasm_line(al)->ins.name); 1577 if (max_name < len) 1578 max_name = len; 1579 } 1580 1581 return max_name; 1582 } 1583 1584 static void 1585 annotation__init_column_widths(struct annotation *notes, struct symbol *sym) 1586 { 1587 notes->src->widths.addr = notes->src->widths.target = 1588 notes->src->widths.min_addr = hex_width(symbol__size(sym)); 1589 notes->src->widths.max_addr = hex_width(sym->end); 1590 notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources); 1591 notes->src->widths.max_ins_name = annotation__max_ins_name(notes); 1592 } 1593 1594 void annotation__update_column_widths(struct annotation *notes) 1595 { 1596 if (annotate_opts.use_offset) 1597 notes->src->widths.target = notes->src->widths.min_addr; 1598 else if (annotate_opts.full_addr) 1599 notes->src->widths.target = BITS_PER_LONG / 4; 1600 else 1601 notes->src->widths.target = notes->src->widths.max_addr; 1602 1603 notes->src->widths.addr = notes->src->widths.target; 1604 1605 if (annotate_opts.show_nr_jumps) 1606 notes->src->widths.addr += notes->src->widths.jumps + 1; 1607 } 1608 1609 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms) 1610 { 1611 annotate_opts.full_addr = !annotate_opts.full_addr; 1612 1613 if (annotate_opts.full_addr) 1614 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start); 1615 else 1616 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start); 1617 1618 annotation__update_column_widths(notes); 1619 } 1620 1621 static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms, 1622 struct rb_root *root) 1623 { 1624 struct annotation_line *al; 1625 struct rb_root tmp_root = RB_ROOT; 1626 1627 list_for_each_entry(al, ¬es->src->source, node) { 1628 double percent_max = 0.0; 1629 u64 addr; 1630 int i; 1631 1632 for (i = 0; i < al->data_nr; i++) { 1633 double percent; 1634 1635 percent = annotation_data__percent(&al->data[i], 1636 annotate_opts.percent_type); 1637 1638 if (percent > percent_max) 1639 percent_max = percent; 1640 } 1641 1642 if (percent_max <= 0.5) 1643 continue; 1644 1645 addr = map__rip_2objdump(ms->map, ms->sym->start); 1646 al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL, 1647 false, true, ms->sym->start + al->offset); 1648 insert_source_line(&tmp_root, al); 1649 } 1650 1651 resort_source_line(root, &tmp_root); 1652 } 1653 1654 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root) 1655 { 1656 struct annotation *notes = symbol__annotation(ms->sym); 1657 1658 annotation__calc_lines(notes, ms, root); 1659 } 1660 1661 int hist_entry__tty_annotate2(struct hist_entry *he, struct evsel *evsel) 1662 { 1663 struct map_symbol *ms = &he->ms; 1664 struct dso *dso = map__dso(ms->map); 1665 struct symbol *sym = ms->sym; 1666 struct rb_root source_line = RB_ROOT; 1667 struct hists *hists = evsel__hists(evsel); 1668 struct annotation_print_data apd = { 1669 .he = he, 1670 .evsel = evsel, 1671 }; 1672 char buf[1024]; 1673 int err; 1674 1675 err = symbol__annotate2(ms, evsel, NULL); 1676 if (err) { 1677 char msg[BUFSIZ]; 1678 1679 dso__set_annotate_warned(dso); 1680 symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); 1681 ui__error("Couldn't annotate %s:\n%s", sym->name, msg); 1682 return -1; 1683 } 1684 1685 if (annotate_opts.print_lines) { 1686 srcline_full_filename = annotate_opts.full_path; 1687 symbol__calc_lines(ms, &source_line); 1688 print_summary(&source_line, dso__long_name(dso)); 1689 } 1690 1691 hists__scnprintf_title(hists, buf, sizeof(buf)); 1692 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n", 1693 buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso)); 1694 symbol__annotate_fprintf2(sym, stdout, &apd); 1695 1696 annotated_source__purge(symbol__annotation(sym)->src); 1697 1698 return 0; 1699 } 1700 1701 int hist_entry__tty_annotate(struct hist_entry *he, struct evsel *evsel) 1702 { 1703 struct map_symbol *ms = &he->ms; 1704 struct dso *dso = map__dso(ms->map); 1705 struct symbol *sym = ms->sym; 1706 struct rb_root source_line = RB_ROOT; 1707 int err; 1708 1709 err = symbol__annotate(ms, evsel, NULL); 1710 if (err) { 1711 char msg[BUFSIZ]; 1712 1713 dso__set_annotate_warned(dso); 1714 symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); 1715 ui__error("Couldn't annotate %s:\n%s", sym->name, msg); 1716 return -1; 1717 } 1718 1719 symbol__calc_percent(sym, evsel); 1720 1721 if (annotate_opts.print_lines) { 1722 srcline_full_filename = annotate_opts.full_path; 1723 symbol__calc_lines(ms, &source_line); 1724 print_summary(&source_line, dso__long_name(dso)); 1725 } 1726 1727 hist_entry__annotate_printf(he, evsel); 1728 1729 annotated_source__purge(symbol__annotation(sym)->src); 1730 1731 return 0; 1732 } 1733 1734 bool ui__has_annotation(void) 1735 { 1736 return use_browser == 1 && perf_hpp_list.sym; 1737 } 1738 1739 1740 static double annotation_line__max_percent(struct annotation_line *al, 1741 unsigned int percent_type) 1742 { 1743 double percent_max = 0.0; 1744 int i; 1745 1746 for (i = 0; i < al->data_nr; i++) { 1747 double percent; 1748 1749 percent = annotation_data__percent(&al->data[i], 1750 percent_type); 1751 1752 if (percent > percent_max) 1753 percent_max = percent; 1754 } 1755 1756 return percent_max; 1757 } 1758 1759 static int disasm_line__write(struct disasm_line *dl, struct annotation *notes, 1760 void *obj, char *bf, size_t size, 1761 void (*obj__printf)(void *obj, const char *fmt, ...), 1762 void (*obj__write_graph)(void *obj, int graph)) 1763 { 1764 if (dl->ins.ops && dl->ins.ops->scnprintf) { 1765 if (ins__is_jump(&dl->ins)) { 1766 bool fwd; 1767 1768 if (dl->ops.target.outside) 1769 goto call_like; 1770 fwd = dl->ops.target.offset > dl->al.offset; 1771 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR); 1772 obj__printf(obj, " "); 1773 } else if (ins__is_call(&dl->ins)) { 1774 call_like: 1775 obj__write_graph(obj, RARROW_CHAR); 1776 obj__printf(obj, " "); 1777 } else if (ins__is_ret(&dl->ins)) { 1778 obj__write_graph(obj, LARROW_CHAR); 1779 obj__printf(obj, " "); 1780 } else { 1781 obj__printf(obj, " "); 1782 } 1783 } else { 1784 obj__printf(obj, " "); 1785 } 1786 1787 return disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, 1788 notes->src->widths.max_ins_name) + 2; 1789 } 1790 1791 static void ipc_coverage_string(char *bf, int size, struct annotation *notes) 1792 { 1793 double ipc = 0.0, coverage = 0.0; 1794 struct annotated_branch *branch = annotation__get_branch(notes); 1795 1796 if (branch && branch->hit_cycles) 1797 ipc = branch->hit_insn / ((double)branch->hit_cycles); 1798 1799 if (branch && branch->total_insn) { 1800 coverage = branch->cover_insn * 100.0 / 1801 ((double)branch->total_insn); 1802 } 1803 1804 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)", 1805 ipc, coverage); 1806 } 1807 1808 int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header) 1809 { 1810 struct evsel *pos; 1811 struct strbuf sb; 1812 1813 if (evsel->evlist->nr_br_cntr <= 0) 1814 return -ENOTSUP; 1815 1816 strbuf_init(&sb, /*hint=*/ 0); 1817 1818 if (header && strbuf_addf(&sb, "# Branch counter abbr list:\n")) 1819 goto err; 1820 1821 evlist__for_each_entry(evsel->evlist, pos) { 1822 if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) 1823 continue; 1824 if (header && strbuf_addf(&sb, "#")) 1825 goto err; 1826 1827 if (strbuf_addf(&sb, " %s = %s\n", pos->name, pos->abbr_name)) 1828 goto err; 1829 } 1830 1831 if (header && strbuf_addf(&sb, "#")) 1832 goto err; 1833 if (strbuf_addf(&sb, " '-' No event occurs\n")) 1834 goto err; 1835 1836 if (header && strbuf_addf(&sb, "#")) 1837 goto err; 1838 if (strbuf_addf(&sb, " '+' Event occurrences may be lost due to branch counter saturated\n")) 1839 goto err; 1840 1841 *str = strbuf_detach(&sb, NULL); 1842 1843 return 0; 1844 err: 1845 strbuf_release(&sb); 1846 return -ENOMEM; 1847 } 1848 1849 /* Assume the branch counter saturated at 3 */ 1850 #define ANNOTATION_BR_CNTR_SATURATION 3 1851 1852 int annotation_br_cntr_entry(char **str, int br_cntr_nr, 1853 u64 *br_cntr, int num_aggr, 1854 struct evsel *evsel) 1855 { 1856 struct evsel *pos = evsel ? evlist__first(evsel->evlist) : NULL; 1857 bool saturated = false; 1858 int i, j, avg, used; 1859 struct strbuf sb; 1860 1861 strbuf_init(&sb, /*hint=*/ 0); 1862 for (i = 0; i < br_cntr_nr; i++) { 1863 used = 0; 1864 avg = ceil((double)(br_cntr[i] & ~ANNOTATION__BR_CNTR_SATURATED_FLAG) / 1865 (double)num_aggr); 1866 1867 /* 1868 * A histogram with the abbr name is displayed by default. 1869 * With -v, the exact number of branch counter is displayed. 1870 */ 1871 if (verbose) { 1872 evlist__for_each_entry_from(evsel->evlist, pos) { 1873 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && 1874 (pos->br_cntr_idx == i)) 1875 break; 1876 } 1877 if (strbuf_addstr(&sb, pos->abbr_name)) 1878 goto err; 1879 1880 if (!br_cntr[i]) { 1881 if (strbuf_addstr(&sb, "=-")) 1882 goto err; 1883 } else { 1884 if (strbuf_addf(&sb, "=%d", avg)) 1885 goto err; 1886 } 1887 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) { 1888 if (strbuf_addch(&sb, '+')) 1889 goto err; 1890 } else { 1891 if (strbuf_addch(&sb, ' ')) 1892 goto err; 1893 } 1894 1895 if ((i < br_cntr_nr - 1) && strbuf_addch(&sb, ',')) 1896 goto err; 1897 continue; 1898 } 1899 1900 if (strbuf_addch(&sb, '|')) 1901 goto err; 1902 1903 if (!br_cntr[i]) { 1904 if (strbuf_addch(&sb, '-')) 1905 goto err; 1906 used++; 1907 } else { 1908 evlist__for_each_entry_from(evsel->evlist, pos) { 1909 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && 1910 (pos->br_cntr_idx == i)) 1911 break; 1912 } 1913 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) 1914 saturated = true; 1915 1916 for (j = 0; j < avg; j++, used++) { 1917 /* Print + if the number of logged events > 3 */ 1918 if (j >= ANNOTATION_BR_CNTR_SATURATION) { 1919 saturated = true; 1920 break; 1921 } 1922 if (strbuf_addstr(&sb, pos->abbr_name)) 1923 goto err; 1924 } 1925 1926 if (saturated) { 1927 if (strbuf_addch(&sb, '+')) 1928 goto err; 1929 used++; 1930 } 1931 pos = list_next_entry(pos, core.node); 1932 } 1933 1934 for (j = used; j < ANNOTATION_BR_CNTR_SATURATION + 1; j++) { 1935 if (strbuf_addch(&sb, ' ')) 1936 goto err; 1937 } 1938 } 1939 1940 if (!verbose && strbuf_addch(&sb, br_cntr_nr ? '|' : ' ')) 1941 goto err; 1942 1943 *str = strbuf_detach(&sb, NULL); 1944 1945 return 0; 1946 err: 1947 strbuf_release(&sb); 1948 return -ENOMEM; 1949 } 1950 1951 struct type_hash_entry { 1952 struct annotated_data_type *type; 1953 int offset; 1954 }; 1955 1956 static int disasm_line__snprint_type_info(struct disasm_line *dl, 1957 char *buf, int len, 1958 struct annotation_print_data *apd) 1959 { 1960 struct annotated_data_type *data_type = NULL; 1961 struct type_hash_entry *entry = NULL; 1962 char member[256]; 1963 int offset = 0; 1964 int printed; 1965 1966 scnprintf(buf, len, " "); 1967 1968 if (!annotate_opts.code_with_type || apd->dbg == NULL) 1969 return 1; 1970 1971 if (apd->type_hash) { 1972 hashmap__find(apd->type_hash, dl->al.offset, &entry); 1973 if (entry != NULL) { 1974 data_type = entry->type; 1975 offset = entry->offset; 1976 } 1977 } 1978 1979 if (data_type == NULL) 1980 data_type = __hist_entry__get_data_type(apd->he, apd->arch, apd->dbg, dl, &offset); 1981 1982 if (apd->type_hash && entry == NULL) { 1983 entry = malloc(sizeof(*entry)); 1984 if (entry != NULL) { 1985 entry->type = data_type; 1986 entry->offset = offset; 1987 hashmap__add(apd->type_hash, dl->al.offset, entry); 1988 } 1989 } 1990 1991 if (!needs_type_info(data_type)) 1992 return 1; 1993 1994 printed = scnprintf(buf, len, "\t\t# data-type: %s", data_type->self.type_name); 1995 1996 if (data_type != &stackop_type && data_type != &canary_type && len > printed) 1997 printed += scnprintf(buf + printed, len - printed, " +%#x", offset); 1998 1999 if (annotated_data_type__get_member_name(data_type, member, sizeof(member), offset) && 2000 len > printed) { 2001 printed += scnprintf(buf + printed, len - printed, " (%s)", member); 2002 } 2003 return printed; 2004 } 2005 2006 void annotation_line__write(struct annotation_line *al, struct annotation *notes, 2007 const struct annotation_write_ops *wops, 2008 struct annotation_print_data *apd) 2009 { 2010 bool current_entry = wops->current_entry; 2011 bool change_color = wops->change_color; 2012 double percent_max = annotation_line__max_percent(al, annotate_opts.percent_type); 2013 int width = wops->width; 2014 int pcnt_width = annotation__pcnt_width(notes); 2015 int cycles_width = annotation__cycles_width(notes); 2016 bool show_title = false; 2017 char bf[256]; 2018 int printed; 2019 void *obj = wops->obj; 2020 int (*obj__set_color)(void *obj, int color) = wops->set_color; 2021 void (*obj__set_percent_color)(void *obj, double percent, bool current) = wops->set_percent_color; 2022 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current) = wops->set_jumps_percent_color; 2023 void (*obj__printf)(void *obj, const char *fmt, ...) = wops->printf; 2024 void (*obj__write_graph)(void *obj, int graph) = wops->write_graph; 2025 2026 if (wops->first_line && (al->offset == -1 || percent_max == 0.0)) { 2027 if (notes->branch && al->cycles) { 2028 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0) 2029 show_title = true; 2030 } else 2031 show_title = true; 2032 } 2033 2034 if (al->offset != -1 && percent_max != 0.0) { 2035 int i; 2036 2037 for (i = 0; i < al->data_nr; i++) { 2038 double percent; 2039 2040 percent = annotation_data__percent(&al->data[i], 2041 annotate_opts.percent_type); 2042 2043 obj__set_percent_color(obj, percent, current_entry); 2044 if (symbol_conf.show_total_period) { 2045 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period); 2046 } else if (symbol_conf.show_nr_samples) { 2047 obj__printf(obj, "%7" PRIu64 " ", 2048 al->data[i].he.nr_samples); 2049 } else { 2050 obj__printf(obj, "%7.2f ", percent); 2051 } 2052 } 2053 } else { 2054 obj__set_percent_color(obj, 0, current_entry); 2055 2056 if (!show_title) 2057 obj__printf(obj, "%-*s", pcnt_width, " "); 2058 else { 2059 obj__printf(obj, "%-*s", pcnt_width, 2060 symbol_conf.show_total_period ? "Period" : 2061 symbol_conf.show_nr_samples ? "Samples" : "Percent"); 2062 } 2063 } 2064 width -= pcnt_width; 2065 2066 if (notes->branch) { 2067 if (al->cycles && al->cycles->ipc) 2068 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc); 2069 else if (!show_title) 2070 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " "); 2071 else 2072 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC"); 2073 2074 if (!annotate_opts.show_minmax_cycle) { 2075 if (al->cycles && al->cycles->avg) 2076 obj__printf(obj, "%*" PRIu64 " ", 2077 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg); 2078 else if (!show_title) 2079 obj__printf(obj, "%*s", 2080 ANNOTATION__CYCLES_WIDTH, " "); 2081 else 2082 obj__printf(obj, "%*s ", 2083 ANNOTATION__CYCLES_WIDTH - 1, 2084 "Cycle"); 2085 } else { 2086 if (al->cycles) { 2087 char str[32]; 2088 2089 scnprintf(str, sizeof(str), 2090 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")", 2091 al->cycles->avg, al->cycles->min, 2092 al->cycles->max); 2093 2094 obj__printf(obj, "%*s ", 2095 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 2096 str); 2097 } else if (!show_title) 2098 obj__printf(obj, "%*s", 2099 ANNOTATION__MINMAX_CYCLES_WIDTH, 2100 " "); 2101 else 2102 obj__printf(obj, "%*s ", 2103 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 2104 "Cycle(min/max)"); 2105 } 2106 2107 if (annotate_opts.show_br_cntr) { 2108 if (show_title) { 2109 obj__printf(obj, "%*s ", 2110 ANNOTATION__BR_CNTR_WIDTH, 2111 "Branch Counter"); 2112 } else { 2113 char *buf; 2114 2115 if (!annotation_br_cntr_entry(&buf, al->br_cntr_nr, al->br_cntr, 2116 al->num_aggr, al->evsel)) { 2117 obj__printf(obj, "%*s ", ANNOTATION__BR_CNTR_WIDTH, buf); 2118 free(buf); 2119 } 2120 } 2121 } 2122 2123 if (show_title && !*al->line) { 2124 ipc_coverage_string(bf, sizeof(bf), notes); 2125 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf); 2126 } 2127 } 2128 width -= cycles_width; 2129 2130 obj__printf(obj, " "); 2131 width -= 1; 2132 2133 if (!*al->line) 2134 obj__printf(obj, "%-*s", width, " "); 2135 else if (al->offset == -1) { 2136 if (al->line_nr && annotate_opts.show_linenr) 2137 printed = scnprintf(bf, sizeof(bf), "%-*d ", 2138 notes->src->widths.addr + 1, al->line_nr); 2139 else 2140 printed = scnprintf(bf, sizeof(bf), "%-*s ", 2141 notes->src->widths.addr, " "); 2142 obj__printf(obj, bf); 2143 width -= printed; 2144 obj__printf(obj, "%-*s", width, al->line); 2145 } else { 2146 u64 addr = al->offset; 2147 int color = -1; 2148 2149 if (!annotate_opts.use_offset) 2150 addr += notes->src->start; 2151 2152 if (!annotate_opts.use_offset) { 2153 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); 2154 } else { 2155 if (al->jump_sources && 2156 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { 2157 if (annotate_opts.show_nr_jumps) { 2158 int prev; 2159 printed = scnprintf(bf, sizeof(bf), "%*d ", 2160 notes->src->widths.jumps, 2161 al->jump_sources); 2162 prev = obj__set_jumps_percent_color(obj, al->jump_sources, 2163 current_entry); 2164 obj__printf(obj, bf); 2165 obj__set_color(obj, prev); 2166 } 2167 print_addr: 2168 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", 2169 notes->src->widths.target, addr); 2170 } else if (ins__is_call(&disasm_line(al)->ins) && 2171 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) { 2172 goto print_addr; 2173 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { 2174 goto print_addr; 2175 } else { 2176 printed = scnprintf(bf, sizeof(bf), "%-*s ", 2177 notes->src->widths.addr, " "); 2178 } 2179 } 2180 2181 if (change_color) 2182 color = obj__set_color(obj, HE_COLORSET_ADDR); 2183 obj__printf(obj, bf); 2184 if (change_color) 2185 obj__set_color(obj, color); 2186 2187 width -= printed; 2188 2189 printed = disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), 2190 obj__printf, obj__write_graph); 2191 2192 obj__printf(obj, "%s", bf); 2193 width -= printed; 2194 2195 disasm_line__snprint_type_info(disasm_line(al), bf, sizeof(bf), apd); 2196 obj__printf(obj, "%-*s", width, bf); 2197 } 2198 2199 } 2200 2201 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, 2202 const struct arch **parch) 2203 { 2204 struct symbol *sym = ms->sym; 2205 struct annotation *notes = symbol__annotation(sym); 2206 size_t size = symbol__size(sym); 2207 int err; 2208 2209 err = symbol__annotate(ms, evsel, parch); 2210 if (err) 2211 return err; 2212 2213 symbol__calc_percent(sym, evsel); 2214 2215 annotation__set_index(notes); 2216 annotation__mark_jump_targets(notes, sym); 2217 2218 err = annotation__compute_ipc(notes, size, evsel); 2219 if (err) 2220 return err; 2221 2222 annotation__init_column_widths(notes, sym); 2223 annotation__update_column_widths(notes); 2224 sym->annotate2 = 1; 2225 2226 return 0; 2227 } 2228 2229 const char * const perf_disassembler__strs[] = { 2230 [PERF_DISASM_UNKNOWN] = "unknown", 2231 [PERF_DISASM_LLVM] = "llvm", 2232 [PERF_DISASM_CAPSTONE] = "capstone", 2233 [PERF_DISASM_OBJDUMP] = "objdump", 2234 }; 2235 2236 2237 static void annotation_options__add_disassembler(struct annotation_options *options, 2238 enum perf_disassembler dis) 2239 { 2240 for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers); i++) { 2241 if (options->disassemblers[i] == dis) { 2242 /* Disassembler is already present then don't add again. */ 2243 return; 2244 } 2245 if (options->disassemblers[i] == PERF_DISASM_UNKNOWN) { 2246 /* Found a free slot. */ 2247 options->disassemblers[i] = dis; 2248 return; 2249 } 2250 } 2251 pr_err("Failed to add disassembler %d\n", dis); 2252 } 2253 2254 static int annotation_options__add_disassemblers_str(struct annotation_options *options, 2255 const char *str) 2256 { 2257 while (str && *str != '\0') { 2258 const char *comma = strchr(str, ','); 2259 int len = comma ? comma - str : (int)strlen(str); 2260 bool match = false; 2261 2262 for (u8 i = 0; i < ARRAY_SIZE(perf_disassembler__strs); i++) { 2263 const char *dis_str = perf_disassembler__strs[i]; 2264 2265 if (len == (int)strlen(dis_str) && !strncmp(str, dis_str, len)) { 2266 annotation_options__add_disassembler(options, i); 2267 match = true; 2268 break; 2269 } 2270 } 2271 if (!match) { 2272 pr_err("Invalid disassembler '%.*s'\n", len, str); 2273 return -1; 2274 } 2275 str = comma ? comma + 1 : NULL; 2276 } 2277 return 0; 2278 } 2279 2280 static int annotation__config(const char *var, const char *value, void *data) 2281 { 2282 struct annotation_options *opt = data; 2283 2284 if (!strstarts(var, "annotate.")) 2285 return 0; 2286 2287 if (!strcmp(var, "annotate.offset_level")) { 2288 perf_config_u8(&opt->offset_level, "offset_level", value); 2289 2290 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) 2291 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL; 2292 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL) 2293 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; 2294 } else if (!strcmp(var, "annotate.disassemblers")) { 2295 int err = annotation_options__add_disassemblers_str(opt, value); 2296 2297 if (err) 2298 return err; 2299 } else if (!strcmp(var, "annotate.hide_src_code")) { 2300 opt->hide_src_code = perf_config_bool("hide_src_code", value); 2301 } else if (!strcmp(var, "annotate.jump_arrows")) { 2302 opt->jump_arrows = perf_config_bool("jump_arrows", value); 2303 } else if (!strcmp(var, "annotate.show_linenr")) { 2304 opt->show_linenr = perf_config_bool("show_linenr", value); 2305 } else if (!strcmp(var, "annotate.show_nr_jumps")) { 2306 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value); 2307 } else if (!strcmp(var, "annotate.show_nr_samples")) { 2308 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples", 2309 value); 2310 } else if (!strcmp(var, "annotate.show_total_period")) { 2311 symbol_conf.show_total_period = perf_config_bool("show_total_period", 2312 value); 2313 } else if (!strcmp(var, "annotate.use_offset")) { 2314 opt->use_offset = perf_config_bool("use_offset", value); 2315 } else if (!strcmp(var, "annotate.disassembler_style")) { 2316 opt->disassembler_style = strdup(value); 2317 if (!opt->disassembler_style) { 2318 pr_err("Not enough memory for annotate.disassembler_style\n"); 2319 return -1; 2320 } 2321 } else if (!strcmp(var, "annotate.objdump")) { 2322 opt->objdump_path = strdup(value); 2323 if (!opt->objdump_path) { 2324 pr_err("Not enough memory for annotate.objdump\n"); 2325 return -1; 2326 } 2327 } else if (!strcmp(var, "annotate.addr2line")) { 2328 symbol_conf.addr2line_path = strdup(value); 2329 if (!symbol_conf.addr2line_path) { 2330 pr_err("Not enough memory for annotate.addr2line\n"); 2331 return -1; 2332 } 2333 } else if (!strcmp(var, "annotate.demangle")) { 2334 symbol_conf.demangle = perf_config_bool("demangle", value); 2335 } else if (!strcmp(var, "annotate.demangle_kernel")) { 2336 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value); 2337 } else { 2338 pr_debug("%s variable unknown, ignoring...", var); 2339 } 2340 2341 return 0; 2342 } 2343 2344 void annotation_options__init(void) 2345 { 2346 struct annotation_options *opt = &annotate_opts; 2347 2348 memset(opt, 0, sizeof(*opt)); 2349 2350 /* Default values. */ 2351 opt->use_offset = true; 2352 opt->jump_arrows = true; 2353 opt->annotate_src = true; 2354 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS; 2355 opt->percent_type = PERCENT_PERIOD_LOCAL; 2356 opt->hide_src_code = true; 2357 opt->hide_src_code_on_title = true; 2358 } 2359 2360 void annotation_options__exit(void) 2361 { 2362 zfree(&annotate_opts.disassembler_style); 2363 zfree(&annotate_opts.objdump_path); 2364 } 2365 2366 static void annotation_options__default_init_disassemblers(struct annotation_options *options) 2367 { 2368 if (options->disassemblers[0] != PERF_DISASM_UNKNOWN) { 2369 /* Already initialized. */ 2370 return; 2371 } 2372 #ifdef HAVE_LIBLLVM_SUPPORT 2373 annotation_options__add_disassembler(options, PERF_DISASM_LLVM); 2374 #endif 2375 #ifdef HAVE_LIBCAPSTONE_SUPPORT 2376 annotation_options__add_disassembler(options, PERF_DISASM_CAPSTONE); 2377 #endif 2378 annotation_options__add_disassembler(options, PERF_DISASM_OBJDUMP); 2379 } 2380 2381 void annotation_config__init(void) 2382 { 2383 perf_config(annotation__config, &annotate_opts); 2384 annotation_options__default_init_disassemblers(&annotate_opts); 2385 } 2386 2387 static unsigned int parse_percent_type(char *str1, char *str2) 2388 { 2389 unsigned int type = (unsigned int) -1; 2390 2391 if (!strcmp("period", str1)) { 2392 if (!strcmp("local", str2)) 2393 type = PERCENT_PERIOD_LOCAL; 2394 else if (!strcmp("global", str2)) 2395 type = PERCENT_PERIOD_GLOBAL; 2396 } 2397 2398 if (!strcmp("hits", str1)) { 2399 if (!strcmp("local", str2)) 2400 type = PERCENT_HITS_LOCAL; 2401 else if (!strcmp("global", str2)) 2402 type = PERCENT_HITS_GLOBAL; 2403 } 2404 2405 return type; 2406 } 2407 2408 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str, 2409 int unset __maybe_unused) 2410 { 2411 unsigned int type; 2412 char *str1, *str2; 2413 int err = -1; 2414 2415 str1 = strdup(_str); 2416 if (!str1) 2417 return -ENOMEM; 2418 2419 str2 = strchr(str1, '-'); 2420 if (!str2) 2421 goto out; 2422 2423 *str2++ = 0; 2424 2425 type = parse_percent_type(str1, str2); 2426 if (type == (unsigned int) -1) 2427 type = parse_percent_type(str2, str1); 2428 if (type != (unsigned int) -1) { 2429 annotate_opts.percent_type = type; 2430 err = 0; 2431 } 2432 2433 out: 2434 free(str1); 2435 return err; 2436 } 2437 2438 int annotate_check_args(void) 2439 { 2440 struct annotation_options *args = &annotate_opts; 2441 2442 if (args->prefix_strip && !args->prefix) { 2443 pr_err("--prefix-strip requires --prefix\n"); 2444 return -1; 2445 } 2446 return 0; 2447 } 2448 2449 /* 2450 * Get register number and access offset from the given instruction. 2451 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs 2452 * to revisit the format when it handles different architecture. 2453 * Fills @reg and @offset when return 0. 2454 */ 2455 static int extract_reg_offset(const struct arch *arch, const char *str, 2456 struct annotated_op_loc *op_loc) 2457 { 2458 char *p; 2459 char *regname; 2460 2461 if (arch->objdump.register_char == 0) 2462 return -1; 2463 2464 /* 2465 * It should start from offset, but it's possible to skip 0 2466 * in the asm. So 0(%rax) should be same as (%rax). 2467 * 2468 * However, it also start with a segment select register like 2469 * %gs:0x18(%rbx). In that case it should skip the part. 2470 */ 2471 if (*str == arch->objdump.register_char) { 2472 if (arch__is_x86(arch)) { 2473 /* FIXME: Handle other segment registers */ 2474 if (!strncmp(str, "%gs:", 4)) 2475 op_loc->segment = INSN_SEG_X86_GS; 2476 } 2477 2478 while (*str && !isdigit(*str) && 2479 *str != arch->objdump.memory_ref_char) 2480 str++; 2481 } 2482 2483 op_loc->offset = strtol(str, &p, 0); 2484 2485 p = strchr(p, arch->objdump.register_char); 2486 if (p == NULL) 2487 return -1; 2488 2489 regname = strdup(p); 2490 if (regname == NULL) 2491 return -1; 2492 2493 op_loc->reg1 = get_dwarf_regnum(regname, arch->id.e_machine, arch->id.e_flags); 2494 free(regname); 2495 2496 /* Get the second register */ 2497 if (op_loc->multi_regs) { 2498 p = strchr(p + 1, arch->objdump.register_char); 2499 if (p == NULL) 2500 return -1; 2501 2502 regname = strdup(p); 2503 if (regname == NULL) 2504 return -1; 2505 2506 op_loc->reg2 = get_dwarf_regnum(regname, arch->id.e_machine, arch->id.e_flags); 2507 free(regname); 2508 } 2509 return 0; 2510 } 2511 2512 /** 2513 * annotate_get_insn_location - Get location of instruction 2514 * @arch: the architecture info 2515 * @dl: the target instruction 2516 * @loc: a buffer to save the data 2517 * 2518 * Get detailed location info (register and offset) in the instruction. 2519 * It needs both source and target operand and whether it accesses a 2520 * memory location. The offset field is meaningful only when the 2521 * corresponding mem flag is set. The reg2 field is meaningful only 2522 * when multi_regs flag is set. 2523 * 2524 * Some examples on x86: 2525 * 2526 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0 2527 * # dst_reg1 = rcx, dst_mem = 0 2528 * 2529 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0 2530 * # dst_reg1 = r8, dst_mem = 0 2531 * 2532 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0 2533 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1 2534 * # dst_multi_regs = 1, dst_offset = 8 2535 */ 2536 int annotate_get_insn_location(const struct arch *arch, struct disasm_line *dl, 2537 struct annotated_insn_loc *loc) 2538 { 2539 struct ins_operands *ops; 2540 struct annotated_op_loc *op_loc; 2541 int i; 2542 2543 if (ins__is_lock(&dl->ins)) 2544 ops = dl->ops.locked.ops; 2545 else 2546 ops = &dl->ops; 2547 2548 if (ops == NULL) 2549 return -1; 2550 2551 memset(loc, 0, sizeof(*loc)); 2552 2553 for_each_insn_op_loc(loc, i, op_loc) { 2554 const char *insn_str = ops->source.raw; 2555 bool multi_regs = ops->source.multi_regs; 2556 bool mem_ref = ops->source.mem_ref; 2557 2558 if (i == INSN_OP_TARGET) { 2559 insn_str = ops->target.raw; 2560 multi_regs = ops->target.multi_regs; 2561 mem_ref = ops->target.mem_ref; 2562 } 2563 2564 /* Invalidate the register by default */ 2565 op_loc->reg1 = -1; 2566 op_loc->reg2 = -1; 2567 2568 if (insn_str == NULL) { 2569 if (!arch__is_powerpc(arch)) 2570 continue; 2571 } 2572 2573 /* 2574 * For powerpc, call get_powerpc_regs function which extracts the 2575 * required fields for op_loc, ie reg1, reg2, offset from the 2576 * raw instruction. 2577 */ 2578 if (arch__is_powerpc(arch)) { 2579 op_loc->mem_ref = mem_ref; 2580 op_loc->multi_regs = multi_regs; 2581 get_powerpc_regs(dl->raw.raw_insn, !i, op_loc); 2582 } else if (strchr(insn_str, arch->objdump.memory_ref_char)) { 2583 op_loc->mem_ref = true; 2584 op_loc->multi_regs = multi_regs; 2585 extract_reg_offset(arch, insn_str, op_loc); 2586 } else { 2587 char *s, *p = NULL; 2588 2589 if (arch__is_x86(arch)) { 2590 /* FIXME: Handle other segment registers */ 2591 if (!strncmp(insn_str, "%gs:", 4)) { 2592 op_loc->segment = INSN_SEG_X86_GS; 2593 op_loc->offset = strtol(insn_str + 4, 2594 &p, 0); 2595 if (p && p != insn_str + 4) 2596 op_loc->imm = true; 2597 continue; 2598 } 2599 } 2600 2601 s = strdup(insn_str); 2602 if (s == NULL) 2603 return -1; 2604 2605 if (*s == arch->objdump.register_char) { 2606 op_loc->reg1 = get_dwarf_regnum(s, 2607 arch->id.e_machine, 2608 arch->id.e_flags); 2609 } 2610 else if (*s == arch->objdump.imm_char) { 2611 op_loc->offset = strtol(s + 1, &p, 0); 2612 if (p && p != s + 1) 2613 op_loc->imm = true; 2614 } 2615 free(s); 2616 } 2617 } 2618 2619 return 0; 2620 } 2621 2622 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip, 2623 bool allow_update) 2624 { 2625 struct disasm_line *dl; 2626 struct annotation *notes; 2627 2628 notes = symbol__annotation(sym); 2629 2630 list_for_each_entry(dl, ¬es->src->source, al.node) { 2631 if (dl->al.offset == -1) 2632 continue; 2633 2634 if (sym->start + dl->al.offset == ip) { 2635 /* 2636 * llvm-objdump places "lock" in a separate line and 2637 * in that case, we want to get the next line. 2638 */ 2639 if (ins__is_lock(&dl->ins) && 2640 *dl->ops.raw == '\0' && allow_update) { 2641 ip++; 2642 continue; 2643 } 2644 return dl; 2645 } 2646 } 2647 return NULL; 2648 } 2649 2650 static struct annotated_item_stat *annotate_data_stat(struct list_head *head, 2651 const char *name) 2652 { 2653 struct annotated_item_stat *istat; 2654 2655 list_for_each_entry(istat, head, list) { 2656 if (!strcmp(istat->name, name)) 2657 return istat; 2658 } 2659 2660 istat = zalloc(sizeof(*istat)); 2661 if (istat == NULL) 2662 return NULL; 2663 2664 istat->name = strdup(name); 2665 if ((istat->name == NULL) || (!strlen(istat->name))) { 2666 free(istat); 2667 return NULL; 2668 } 2669 2670 list_add_tail(&istat->list, head); 2671 return istat; 2672 } 2673 2674 static bool is_stack_operation(const struct arch *arch, struct disasm_line *dl) 2675 { 2676 if (arch__is_x86(arch)) { 2677 if (!strncmp(dl->ins.name, "push", 4) || 2678 !strncmp(dl->ins.name, "pop", 3) || 2679 !strncmp(dl->ins.name, "call", 4) || 2680 !strncmp(dl->ins.name, "ret", 3)) 2681 return true; 2682 } 2683 2684 return false; 2685 } 2686 2687 static bool is_stack_canary(const struct arch *arch, struct annotated_op_loc *loc) 2688 { 2689 /* On x86_64, %gs:40 is used for stack canary */ 2690 if (arch__is_x86(arch)) { 2691 if (loc->segment == INSN_SEG_X86_GS && loc->imm && 2692 loc->offset == 40) 2693 return true; 2694 } 2695 2696 return false; 2697 } 2698 2699 /** 2700 * Returns true if the instruction has a memory operand without 2701 * performing a load/store 2702 */ 2703 static bool is_address_gen_insn(const struct arch *arch, struct disasm_line *dl) 2704 { 2705 if (arch__is_x86(arch)) { 2706 if (!strncmp(dl->ins.name, "lea", 3)) 2707 return true; 2708 } 2709 2710 return false; 2711 } 2712 2713 static struct disasm_line * 2714 annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr) 2715 { 2716 struct list_head *sources = ¬es->src->source; 2717 struct disasm_line *prev; 2718 2719 if (curr == list_first_entry(sources, struct disasm_line, al.node)) 2720 return NULL; 2721 2722 prev = list_prev_entry(curr, al.node); 2723 while (prev->al.offset == -1 && 2724 prev != list_first_entry(sources, struct disasm_line, al.node)) 2725 prev = list_prev_entry(prev, al.node); 2726 2727 if (prev->al.offset == -1) 2728 return NULL; 2729 2730 return prev; 2731 } 2732 2733 static struct disasm_line * 2734 annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr) 2735 { 2736 struct list_head *sources = ¬es->src->source; 2737 struct disasm_line *next; 2738 2739 if (curr == list_last_entry(sources, struct disasm_line, al.node)) 2740 return NULL; 2741 2742 next = list_next_entry(curr, al.node); 2743 while (next->al.offset == -1 && 2744 next != list_last_entry(sources, struct disasm_line, al.node)) 2745 next = list_next_entry(next, al.node); 2746 2747 if (next->al.offset == -1) 2748 return NULL; 2749 2750 return next; 2751 } 2752 2753 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset, 2754 struct disasm_line *dl) 2755 { 2756 struct annotation *notes; 2757 struct disasm_line *next; 2758 u64 addr; 2759 2760 notes = symbol__annotation(ms->sym); 2761 /* 2762 * PC-relative addressing starts from the next instruction address 2763 * But the IP is for the current instruction. Since disasm_line 2764 * doesn't have the instruction size, calculate it using the next 2765 * disasm_line. If it's the last one, we can use symbol's end 2766 * address directly. 2767 */ 2768 next = annotation__next_asm_line(notes, dl); 2769 if (next == NULL) 2770 addr = ms->sym->end + offset; 2771 else 2772 addr = ip + (next->al.offset - dl->al.offset) + offset; 2773 2774 return map__rip_2objdump(ms->map, addr); 2775 } 2776 2777 static struct debuginfo_cache { 2778 struct dso *dso; 2779 struct debuginfo *dbg; 2780 } di_cache; 2781 2782 void debuginfo_cache__delete(void) 2783 { 2784 dso__put(di_cache.dso); 2785 di_cache.dso = NULL; 2786 2787 debuginfo__delete(di_cache.dbg); 2788 di_cache.dbg = NULL; 2789 } 2790 2791 static struct annotated_data_type * 2792 __hist_entry__get_data_type(struct hist_entry *he, const struct arch *arch, 2793 struct debuginfo *dbg, struct disasm_line *dl, 2794 int *type_offset) 2795 { 2796 struct map_symbol *ms = &he->ms; 2797 struct annotated_insn_loc loc; 2798 struct annotated_op_loc *op_loc; 2799 struct annotated_data_type *mem_type; 2800 struct annotated_item_stat *istat; 2801 int i; 2802 2803 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); 2804 if (istat == NULL) { 2805 ann_data_stat.no_insn++; 2806 return NO_TYPE; 2807 } 2808 2809 if (annotate_get_insn_location(arch, dl, &loc) < 0) { 2810 ann_data_stat.no_insn_ops++; 2811 istat->bad++; 2812 return NO_TYPE; 2813 } 2814 2815 if (is_stack_operation(arch, dl)) { 2816 istat->good++; 2817 *type_offset = 0; 2818 return &stackop_type; 2819 } 2820 2821 if (is_address_gen_insn(arch, dl)) { 2822 istat->bad++; 2823 ann_data_stat.no_mem_ops++; 2824 return NO_TYPE; 2825 } 2826 2827 for_each_insn_op_loc(&loc, i, op_loc) { 2828 struct data_loc_info dloc = { 2829 .arch = arch, 2830 .thread = he->thread, 2831 .ms = ms, 2832 .ip = ms->sym->start + dl->al.offset, 2833 .cpumode = he->cpumode, 2834 .op = op_loc, 2835 .di = dbg, 2836 }; 2837 2838 if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE) 2839 continue; 2840 2841 /* PC-relative addressing */ 2842 if (op_loc->reg1 == DWARF_REG_PC) { 2843 dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip, 2844 op_loc->offset, dl); 2845 } 2846 2847 /* This CPU access in kernel - pretend PC-relative addressing */ 2848 if (dso__kernel(map__dso(ms->map)) && arch__is_x86(arch) && 2849 op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) { 2850 dloc.var_addr = op_loc->offset; 2851 op_loc->reg1 = DWARF_REG_PC; 2852 } 2853 2854 mem_type = find_data_type(&dloc); 2855 2856 if (mem_type == NULL && is_stack_canary(arch, op_loc)) { 2857 istat->good++; 2858 *type_offset = 0; 2859 return &canary_type; 2860 } 2861 2862 if (mem_type) 2863 istat->good++; 2864 else 2865 istat->bad++; 2866 2867 if (symbol_conf.annotate_data_sample) { 2868 struct evsel *evsel = hists_to_evsel(he->hists); 2869 2870 annotated_data_type__update_samples(mem_type, evsel, 2871 dloc.type_offset, 2872 he->stat.nr_events, 2873 he->stat.period); 2874 } 2875 *type_offset = dloc.type_offset; 2876 return mem_type ?: NO_TYPE; 2877 } 2878 2879 /* retry with a fused instruction */ 2880 return NULL; 2881 } 2882 2883 /** 2884 * hist_entry__get_data_type - find data type for given hist entry 2885 * @he: hist entry 2886 * 2887 * This function first annotates the instruction at @he->ip and extracts 2888 * register and offset info from it. Then it searches the DWARF debug 2889 * info to get a variable and type information using the address, register, 2890 * and offset. 2891 */ 2892 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) 2893 { 2894 struct map_symbol *ms = &he->ms; 2895 struct evsel *evsel = hists_to_evsel(he->hists); 2896 const struct arch *arch; 2897 struct disasm_line *dl; 2898 struct annotated_data_type *mem_type; 2899 struct annotated_item_stat *istat; 2900 u64 ip = he->ip; 2901 2902 ann_data_stat.total++; 2903 2904 if (ms->map == NULL || ms->sym == NULL) { 2905 ann_data_stat.no_sym++; 2906 return NULL; 2907 } 2908 2909 if (!symbol_conf.init_annotation) { 2910 ann_data_stat.no_sym++; 2911 return NULL; 2912 } 2913 2914 /* 2915 * di_cache holds a pair of values, but code below assumes 2916 * di_cache.dso can be compared/updated and di_cache.dbg can be 2917 * read/updated independently from each other. That assumption only 2918 * holds in single threaded code. 2919 */ 2920 assert(perf_singlethreaded); 2921 2922 if (map__dso(ms->map) != di_cache.dso) { 2923 dso__put(di_cache.dso); 2924 di_cache.dso = dso__get(map__dso(ms->map)); 2925 2926 debuginfo__delete(di_cache.dbg); 2927 di_cache.dbg = dso__debuginfo(di_cache.dso); 2928 } 2929 2930 if (di_cache.dbg == NULL) { 2931 ann_data_stat.no_dbginfo++; 2932 return NULL; 2933 } 2934 2935 /* Make sure it has the disasm of the function */ 2936 if (symbol__annotate(ms, evsel, &arch) < 0) { 2937 ann_data_stat.no_insn++; 2938 return NULL; 2939 } 2940 2941 /* 2942 * Get a disasm to extract the location from the insn. 2943 * This is too slow... 2944 */ 2945 dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true); 2946 if (dl == NULL) { 2947 ann_data_stat.no_insn++; 2948 return NULL; 2949 } 2950 2951 retry: 2952 mem_type = __hist_entry__get_data_type(he, arch, di_cache.dbg, dl, 2953 &he->mem_type_off); 2954 if (mem_type) 2955 return mem_type == NO_TYPE ? NULL : mem_type; 2956 2957 /* 2958 * Some instructions can be fused and the actual memory access came 2959 * from the previous instruction. 2960 */ 2961 if (dl->al.offset > 0) { 2962 struct annotation *notes; 2963 struct disasm_line *prev_dl; 2964 2965 notes = symbol__annotation(ms->sym); 2966 prev_dl = annotation__prev_asm_line(notes, dl); 2967 2968 if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) { 2969 dl = prev_dl; 2970 goto retry; 2971 } 2972 } 2973 2974 ann_data_stat.no_mem_ops++; 2975 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); 2976 if (istat) 2977 istat->bad++; 2978 return NULL; 2979 } 2980 2981 /* Basic block traversal (BFS) data structure */ 2982 struct basic_block_data { 2983 struct list_head queue; 2984 struct list_head visited; 2985 }; 2986 2987 /* 2988 * During the traversal, it needs to know the parent block where the current 2989 * block block started from. Note that single basic block can be parent of 2990 * two child basic blocks (in case of condition jump). 2991 */ 2992 struct basic_block_link { 2993 struct list_head node; 2994 struct basic_block_link *parent; 2995 struct annotated_basic_block *bb; 2996 }; 2997 2998 /* Check any of basic block in the list already has the offset */ 2999 static bool basic_block_has_offset(struct list_head *head, s64 offset) 3000 { 3001 struct basic_block_link *link; 3002 3003 list_for_each_entry(link, head, node) { 3004 s64 begin_offset = link->bb->begin->al.offset; 3005 s64 end_offset = link->bb->end->al.offset; 3006 3007 if (begin_offset <= offset && offset <= end_offset) 3008 return true; 3009 } 3010 return false; 3011 } 3012 3013 static bool is_new_basic_block(struct basic_block_data *bb_data, 3014 struct disasm_line *dl) 3015 { 3016 s64 offset = dl->al.offset; 3017 3018 if (basic_block_has_offset(&bb_data->visited, offset)) 3019 return false; 3020 if (basic_block_has_offset(&bb_data->queue, offset)) 3021 return false; 3022 return true; 3023 } 3024 3025 /* Add a basic block starting from dl and link it to the parent */ 3026 static int add_basic_block(struct basic_block_data *bb_data, 3027 struct basic_block_link *parent, 3028 struct disasm_line *dl) 3029 { 3030 struct annotated_basic_block *bb; 3031 struct basic_block_link *link; 3032 3033 if (dl == NULL) 3034 return -1; 3035 3036 if (!is_new_basic_block(bb_data, dl)) 3037 return 0; 3038 3039 bb = zalloc(sizeof(*bb)); 3040 if (bb == NULL) 3041 return -1; 3042 3043 bb->begin = dl; 3044 bb->end = dl; 3045 INIT_LIST_HEAD(&bb->list); 3046 3047 link = malloc(sizeof(*link)); 3048 if (link == NULL) { 3049 free(bb); 3050 return -1; 3051 } 3052 3053 link->bb = bb; 3054 link->parent = parent; 3055 list_add_tail(&link->node, &bb_data->queue); 3056 return 0; 3057 } 3058 3059 /* Returns true when it finds the target in the current basic block */ 3060 static bool process_basic_block(struct basic_block_data *bb_data, 3061 struct basic_block_link *link, 3062 struct symbol *sym, u64 target) 3063 { 3064 struct disasm_line *dl, *next_dl, *last_dl; 3065 struct annotation *notes = symbol__annotation(sym); 3066 bool found = false; 3067 3068 dl = link->bb->begin; 3069 /* Check if it's already visited */ 3070 if (basic_block_has_offset(&bb_data->visited, dl->al.offset)) 3071 return false; 3072 3073 last_dl = list_last_entry(¬es->src->source, 3074 struct disasm_line, al.node); 3075 if (last_dl->al.offset == -1) 3076 last_dl = annotation__prev_asm_line(notes, last_dl); 3077 3078 if (last_dl == NULL) 3079 return false; 3080 3081 list_for_each_entry_from(dl, ¬es->src->source, al.node) { 3082 /* Skip comment or debug info line */ 3083 if (dl->al.offset == -1) 3084 continue; 3085 /* Found the target instruction */ 3086 if (sym->start + dl->al.offset == target) { 3087 found = true; 3088 break; 3089 } 3090 /* End of the function, finish the block */ 3091 if (dl == last_dl) 3092 break; 3093 /* 'return' instruction finishes the block */ 3094 if (ins__is_ret(&dl->ins)) 3095 break; 3096 /* normal instructions are part of the basic block */ 3097 if (!ins__is_jump(&dl->ins)) 3098 continue; 3099 /* jump to a different function, tail call or return */ 3100 if (dl->ops.target.outside) 3101 break; 3102 /* jump instruction creates new basic block(s) */ 3103 next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset, 3104 /*allow_update=*/false); 3105 if (next_dl) 3106 add_basic_block(bb_data, link, next_dl); 3107 3108 /* 3109 * FIXME: determine conditional jumps properly. 3110 * Conditional jumps create another basic block with the 3111 * next disasm line. 3112 */ 3113 if (!strstr(dl->ins.name, "jmp")) { 3114 next_dl = annotation__next_asm_line(notes, dl); 3115 if (next_dl) 3116 add_basic_block(bb_data, link, next_dl); 3117 } 3118 break; 3119 3120 } 3121 link->bb->end = dl; 3122 return found; 3123 } 3124 3125 /* 3126 * It founds a target basic block, build a proper linked list of basic blocks 3127 * by following the link recursively. 3128 */ 3129 static void link_found_basic_blocks(struct basic_block_link *link, 3130 struct list_head *head) 3131 { 3132 while (link) { 3133 struct basic_block_link *parent = link->parent; 3134 3135 list_move(&link->bb->list, head); 3136 list_del(&link->node); 3137 free(link); 3138 3139 link = parent; 3140 } 3141 } 3142 3143 static void delete_basic_blocks(struct basic_block_data *bb_data) 3144 { 3145 struct basic_block_link *link, *tmp; 3146 3147 list_for_each_entry_safe(link, tmp, &bb_data->queue, node) { 3148 list_del(&link->node); 3149 zfree(&link->bb); 3150 free(link); 3151 } 3152 3153 list_for_each_entry_safe(link, tmp, &bb_data->visited, node) { 3154 list_del(&link->node); 3155 zfree(&link->bb); 3156 free(link); 3157 } 3158 } 3159 3160 /** 3161 * annotate_get_basic_blocks - Get basic blocks for given address range 3162 * @sym: symbol to annotate 3163 * @src: source address 3164 * @dst: destination address 3165 * @head: list head to save basic blocks 3166 * 3167 * This function traverses disasm_lines from @src to @dst and save them in a 3168 * list of annotated_basic_block to @head. It uses BFS to find the shortest 3169 * path between two. The basic_block_link is to maintain parent links so 3170 * that it can build a list of blocks from the start. 3171 */ 3172 int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst, 3173 struct list_head *head) 3174 { 3175 struct basic_block_data bb_data = { 3176 .queue = LIST_HEAD_INIT(bb_data.queue), 3177 .visited = LIST_HEAD_INIT(bb_data.visited), 3178 }; 3179 struct basic_block_link *link; 3180 struct disasm_line *dl; 3181 int ret = -1; 3182 3183 dl = find_disasm_line(sym, src, /*allow_update=*/false); 3184 if (dl == NULL) 3185 return -1; 3186 3187 if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0) 3188 return -1; 3189 3190 /* Find shortest path from src to dst using BFS */ 3191 while (!list_empty(&bb_data.queue)) { 3192 link = list_first_entry(&bb_data.queue, struct basic_block_link, node); 3193 3194 if (process_basic_block(&bb_data, link, sym, dst)) { 3195 link_found_basic_blocks(link, head); 3196 ret = 0; 3197 break; 3198 } 3199 list_move(&link->node, &bb_data.visited); 3200 } 3201 delete_basic_blocks(&bb_data); 3202 return ret; 3203 } 3204