1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-annotate.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <libgen.h> 12 #include <stdlib.h> 13 #include "util.h" // hex_width() 14 #include "ui/ui.h" 15 #include "sort.h" 16 #include "build-id.h" 17 #include "color.h" 18 #include "config.h" 19 #include "disasm.h" 20 #include "dso.h" 21 #include "env.h" 22 #include "map.h" 23 #include "maps.h" 24 #include "symbol.h" 25 #include "srcline.h" 26 #include "units.h" 27 #include "debug.h" 28 #include "debuginfo.h" 29 #include "annotate.h" 30 #include "annotate-data.h" 31 #include "evsel.h" 32 #include "evlist.h" 33 #include "bpf-event.h" 34 #include "bpf-utils.h" 35 #include "block-range.h" 36 #include "string2.h" 37 #include "dwarf-regs.h" 38 #include "util/event.h" 39 #include "util/sharded_mutex.h" 40 #include "arch/common.h" 41 #include "namespaces.h" 42 #include "thread.h" 43 #include "hashmap.h" 44 #include "strbuf.h" 45 #include <regex.h> 46 #include <linux/bitops.h> 47 #include <linux/kernel.h> 48 #include <linux/string.h> 49 #include <linux/zalloc.h> 50 #include <subcmd/parse-options.h> 51 #include <subcmd/run-command.h> 52 #include <math.h> 53 54 /* FIXME: For the HE_COLORSET */ 55 #include "ui/browser.h" 56 57 /* 58 * FIXME: Using the same values as slang.h, 59 * but that header may not be available everywhere 60 */ 61 #define LARROW_CHAR ((unsigned char)',') 62 #define RARROW_CHAR ((unsigned char)'+') 63 #define DARROW_CHAR ((unsigned char)'.') 64 #define UARROW_CHAR ((unsigned char)'-') 65 66 #include <linux/ctype.h> 67 68 /* global annotation options */ 69 struct annotation_options annotate_opts; 70 71 /* Data type collection debug statistics */ 72 struct annotated_data_stat ann_data_stat; 73 LIST_HEAD(ann_insn_stat); 74 75 /* Pseudo data types */ 76 struct annotated_data_type stackop_type = { 77 .self = { 78 .type_name = (char *)"(stack operation)", 79 .children = LIST_HEAD_INIT(stackop_type.self.children), 80 }, 81 }; 82 83 struct annotated_data_type canary_type = { 84 .self = { 85 .type_name = (char *)"(stack canary)", 86 .children = LIST_HEAD_INIT(canary_type.self.children), 87 }, 88 }; 89 90 #define NO_TYPE ((struct annotated_data_type *)-1UL) 91 92 /* symbol histogram: key = offset << 16 | evsel->core.idx */ 93 static size_t sym_hist_hash(long key, void *ctx __maybe_unused) 94 { 95 return (key >> 16) + (key & 0xffff); 96 } 97 98 static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused) 99 { 100 return key1 == key2; 101 } 102 103 static struct annotated_source *annotated_source__new(void) 104 { 105 struct annotated_source *src = zalloc(sizeof(*src)); 106 107 if (src != NULL) 108 INIT_LIST_HEAD(&src->source); 109 110 return src; 111 } 112 113 static __maybe_unused void annotated_source__delete(struct annotated_source *src) 114 { 115 struct hashmap_entry *cur; 116 size_t bkt; 117 118 if (src == NULL) 119 return; 120 121 if (src->samples) { 122 hashmap__for_each_entry(src->samples, cur, bkt) 123 zfree(&cur->pvalue); 124 hashmap__free(src->samples); 125 } 126 zfree(&src->histograms); 127 free(src); 128 } 129 130 static int annotated_source__alloc_histograms(struct annotated_source *src, 131 int nr_hists) 132 { 133 src->nr_histograms = nr_hists; 134 src->histograms = calloc(nr_hists, sizeof(*src->histograms)); 135 136 if (src->histograms == NULL) 137 return -1; 138 139 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL); 140 if (src->samples == NULL) 141 zfree(&src->histograms); 142 143 return src->histograms ? 0 : -1; 144 } 145 146 void symbol__annotate_zero_histograms(struct symbol *sym) 147 { 148 struct annotation *notes = symbol__annotation(sym); 149 150 annotation__lock(notes); 151 if (notes->src != NULL) { 152 memset(notes->src->histograms, 0, 153 notes->src->nr_histograms * sizeof(*notes->src->histograms)); 154 hashmap__clear(notes->src->samples); 155 } 156 if (notes->branch && notes->branch->cycles_hist) { 157 memset(notes->branch->cycles_hist, 0, 158 symbol__size(sym) * sizeof(struct cyc_hist)); 159 } 160 annotation__unlock(notes); 161 } 162 163 static int __symbol__account_cycles(struct cyc_hist *ch, 164 u64 start, 165 unsigned offset, unsigned cycles, 166 unsigned have_start) 167 { 168 /* 169 * For now we can only account one basic block per 170 * final jump. But multiple could be overlapping. 171 * Always account the longest one. So when 172 * a shorter one has been already seen throw it away. 173 * 174 * We separately always account the full cycles. 175 */ 176 ch[offset].num_aggr++; 177 ch[offset].cycles_aggr += cycles; 178 179 if (cycles > ch[offset].cycles_max) 180 ch[offset].cycles_max = cycles; 181 182 if (ch[offset].cycles_min) { 183 if (cycles && cycles < ch[offset].cycles_min) 184 ch[offset].cycles_min = cycles; 185 } else 186 ch[offset].cycles_min = cycles; 187 188 if (!have_start && ch[offset].have_start) 189 return 0; 190 if (ch[offset].num) { 191 if (have_start && (!ch[offset].have_start || 192 ch[offset].start > start)) { 193 ch[offset].have_start = 0; 194 ch[offset].cycles = 0; 195 ch[offset].num = 0; 196 if (ch[offset].reset < 0xffff) 197 ch[offset].reset++; 198 } else if (have_start && 199 ch[offset].start < start) 200 return 0; 201 } 202 203 if (ch[offset].num < NUM_SPARKS) 204 ch[offset].cycles_spark[ch[offset].num] = cycles; 205 206 ch[offset].have_start = have_start; 207 ch[offset].start = start; 208 ch[offset].cycles += cycles; 209 ch[offset].num++; 210 return 0; 211 } 212 213 static int __symbol__inc_addr_samples(struct map_symbol *ms, 214 struct annotated_source *src, struct evsel *evsel, u64 addr, 215 struct perf_sample *sample) 216 { 217 struct symbol *sym = ms->sym; 218 long hash_key; 219 u64 offset; 220 struct sym_hist *h; 221 struct sym_hist_entry *entry; 222 223 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr)); 224 225 if ((addr < sym->start || addr >= sym->end) && 226 (addr != sym->end || sym->start != sym->end)) { 227 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n", 228 __func__, __LINE__, sym->name, sym->start, addr, sym->end); 229 return -ERANGE; 230 } 231 232 offset = addr - sym->start; 233 h = annotated_source__histogram(src, evsel); 234 if (h == NULL) { 235 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n", 236 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC); 237 return -ENOMEM; 238 } 239 240 hash_key = offset << 16 | evsel->core.idx; 241 if (!hashmap__find(src->samples, hash_key, &entry)) { 242 entry = zalloc(sizeof(*entry)); 243 if (entry == NULL) 244 return -ENOMEM; 245 246 if (hashmap__add(src->samples, hash_key, entry) < 0) 247 return -ENOMEM; 248 } 249 250 h->nr_samples++; 251 h->period += sample->period; 252 entry->nr_samples++; 253 entry->period += sample->period; 254 255 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 256 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n", 257 sym->start, sym->name, addr, addr - sym->start, evsel->core.idx, 258 entry->nr_samples, entry->period); 259 return 0; 260 } 261 262 struct annotated_branch *annotation__get_branch(struct annotation *notes) 263 { 264 if (notes == NULL) 265 return NULL; 266 267 if (notes->branch == NULL) 268 notes->branch = zalloc(sizeof(*notes->branch)); 269 270 return notes->branch; 271 } 272 273 static struct annotated_branch *symbol__find_branch_hist(struct symbol *sym, 274 unsigned int br_cntr_nr) 275 { 276 struct annotation *notes = symbol__annotation(sym); 277 struct annotated_branch *branch; 278 const size_t size = symbol__size(sym); 279 280 branch = annotation__get_branch(notes); 281 if (branch == NULL) 282 return NULL; 283 284 if (branch->cycles_hist == NULL) { 285 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist)); 286 if (!branch->cycles_hist) 287 return NULL; 288 } 289 290 if (br_cntr_nr && branch->br_cntr == NULL) { 291 branch->br_cntr = calloc(br_cntr_nr * size, sizeof(u64)); 292 if (!branch->br_cntr) 293 return NULL; 294 } 295 296 return branch; 297 } 298 299 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists) 300 { 301 struct annotation *notes = symbol__annotation(sym); 302 303 if (notes->src == NULL) { 304 notes->src = annotated_source__new(); 305 if (notes->src == NULL) 306 return NULL; 307 goto alloc_histograms; 308 } 309 310 if (notes->src->histograms == NULL) { 311 alloc_histograms: 312 annotated_source__alloc_histograms(notes->src, nr_hists); 313 } 314 315 return notes->src; 316 } 317 318 static int symbol__inc_addr_samples(struct map_symbol *ms, 319 struct evsel *evsel, u64 addr, 320 struct perf_sample *sample) 321 { 322 struct symbol *sym = ms->sym; 323 struct annotated_source *src; 324 325 if (sym == NULL) 326 return 0; 327 src = symbol__hists(sym, evsel->evlist->core.nr_entries); 328 return src ? __symbol__inc_addr_samples(ms, src, evsel, addr, sample) : 0; 329 } 330 331 static int symbol__account_br_cntr(struct annotated_branch *branch, 332 struct evsel *evsel, 333 unsigned offset, 334 u64 br_cntr) 335 { 336 unsigned int br_cntr_nr = evsel__leader(evsel)->br_cntr_nr; 337 unsigned int base = evsel__leader(evsel)->br_cntr_idx; 338 unsigned int off = offset * evsel->evlist->nr_br_cntr; 339 u64 *branch_br_cntr = branch->br_cntr; 340 unsigned int i, mask, width; 341 342 if (!br_cntr || !branch_br_cntr) 343 return 0; 344 345 perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width); 346 mask = (1L << width) - 1; 347 for (i = 0; i < br_cntr_nr; i++) { 348 u64 cntr = (br_cntr >> i * width) & mask; 349 350 branch_br_cntr[off + i + base] += cntr; 351 if (cntr == mask) 352 branch_br_cntr[off + i + base] |= ANNOTATION__BR_CNTR_SATURATED_FLAG; 353 } 354 355 return 0; 356 } 357 358 static int symbol__account_cycles(u64 addr, u64 start, struct symbol *sym, 359 unsigned cycles, struct evsel *evsel, 360 u64 br_cntr) 361 { 362 struct annotated_branch *branch; 363 unsigned offset; 364 int ret; 365 366 if (sym == NULL) 367 return 0; 368 branch = symbol__find_branch_hist(sym, evsel->evlist->nr_br_cntr); 369 if (!branch) 370 return -ENOMEM; 371 if (addr < sym->start || addr >= sym->end) 372 return -ERANGE; 373 374 if (start) { 375 if (start < sym->start || start >= sym->end) 376 return -ERANGE; 377 if (start >= addr) 378 start = 0; 379 } 380 offset = addr - sym->start; 381 ret = __symbol__account_cycles(branch->cycles_hist, 382 start ? start - sym->start : 0, 383 offset, cycles, 384 !!start); 385 386 if (ret) 387 return ret; 388 389 return symbol__account_br_cntr(branch, evsel, offset, br_cntr); 390 } 391 392 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, 393 struct addr_map_symbol *start, 394 unsigned cycles, 395 struct evsel *evsel, 396 u64 br_cntr) 397 { 398 u64 saddr = 0; 399 int err; 400 401 if (!cycles) 402 return 0; 403 404 /* 405 * Only set start when IPC can be computed. We can only 406 * compute it when the basic block is completely in a single 407 * function. 408 * Special case the case when the jump is elsewhere, but 409 * it starts on the function start. 410 */ 411 if (start && 412 (start->ms.sym == ams->ms.sym || 413 (ams->ms.sym && 414 start->addr == ams->ms.sym->start + map__start(ams->ms.map)))) 415 saddr = start->al_addr; 416 if (saddr == 0) 417 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n", 418 ams->addr, 419 start ? start->addr : 0, 420 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0, 421 saddr); 422 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles, evsel, br_cntr); 423 if (err) 424 pr_debug2("account_cycles failed %d\n", err); 425 return err; 426 } 427 428 struct annotation_line *annotated_source__get_line(struct annotated_source *src, 429 s64 offset) 430 { 431 struct annotation_line *al; 432 433 list_for_each_entry(al, &src->source, node) { 434 if (al->offset == offset) 435 return al; 436 } 437 return NULL; 438 } 439 440 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end) 441 { 442 struct annotation_line *al; 443 unsigned n_insn = 0; 444 445 al = annotated_source__get_line(notes->src, start); 446 if (al == NULL) 447 return 0; 448 449 list_for_each_entry_from(al, ¬es->src->source, node) { 450 if (al->offset == -1) 451 continue; 452 if ((u64)al->offset > end) 453 break; 454 n_insn++; 455 } 456 return n_insn; 457 } 458 459 static void annotated_branch__delete(struct annotated_branch *branch) 460 { 461 if (branch) { 462 zfree(&branch->cycles_hist); 463 free(branch->br_cntr); 464 free(branch); 465 } 466 } 467 468 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) 469 { 470 unsigned n_insn; 471 unsigned int cover_insn = 0; 472 473 n_insn = annotation__count_insn(notes, start, end); 474 if (n_insn && ch->num && ch->cycles) { 475 struct annotation_line *al; 476 struct annotated_branch *branch; 477 float ipc = n_insn / ((double)ch->cycles / (double)ch->num); 478 479 /* Hide data when there are too many overlaps. */ 480 if (ch->reset >= 0x7fff) 481 return; 482 483 al = annotated_source__get_line(notes->src, start); 484 if (al == NULL) 485 return; 486 487 list_for_each_entry_from(al, ¬es->src->source, node) { 488 if (al->offset == -1) 489 continue; 490 if ((u64)al->offset > end) 491 break; 492 if (al->cycles && al->cycles->ipc == 0.0) { 493 al->cycles->ipc = ipc; 494 cover_insn++; 495 } 496 } 497 498 branch = annotation__get_branch(notes); 499 if (cover_insn && branch) { 500 branch->hit_cycles += ch->cycles; 501 branch->hit_insn += n_insn * ch->num; 502 branch->cover_insn += cover_insn; 503 } 504 } 505 } 506 507 static int annotation__compute_ipc(struct annotation *notes, size_t size, 508 struct evsel *evsel) 509 { 510 unsigned int br_cntr_nr = evsel->evlist->nr_br_cntr; 511 int err = 0; 512 s64 offset; 513 514 if (!notes->branch || !notes->branch->cycles_hist) 515 return 0; 516 517 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1); 518 notes->branch->hit_cycles = 0; 519 notes->branch->hit_insn = 0; 520 notes->branch->cover_insn = 0; 521 522 annotation__lock(notes); 523 for (offset = size - 1; offset >= 0; --offset) { 524 struct cyc_hist *ch; 525 526 ch = ¬es->branch->cycles_hist[offset]; 527 if (ch && ch->cycles) { 528 struct annotation_line *al; 529 530 al = annotated_source__get_line(notes->src, offset); 531 if (al && al->cycles == NULL) { 532 al->cycles = zalloc(sizeof(*al->cycles)); 533 if (al->cycles == NULL) { 534 err = ENOMEM; 535 break; 536 } 537 } 538 if (ch->have_start) 539 annotation__count_and_fill(notes, ch->start, offset, ch); 540 if (al && ch->num_aggr) { 541 al->cycles->avg = ch->cycles_aggr / ch->num_aggr; 542 al->cycles->max = ch->cycles_max; 543 al->cycles->min = ch->cycles_min; 544 } 545 if (al && notes->branch->br_cntr) { 546 if (!al->br_cntr) { 547 al->br_cntr = calloc(br_cntr_nr, sizeof(u64)); 548 if (!al->br_cntr) { 549 err = ENOMEM; 550 break; 551 } 552 } 553 al->num_aggr = ch->num_aggr; 554 al->br_cntr_nr = br_cntr_nr; 555 al->evsel = evsel; 556 memcpy(al->br_cntr, ¬es->branch->br_cntr[offset * br_cntr_nr], 557 br_cntr_nr * sizeof(u64)); 558 } 559 } 560 } 561 562 if (err) { 563 while (++offset < (s64)size) { 564 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset]; 565 566 if (ch && ch->cycles) { 567 struct annotation_line *al; 568 569 al = annotated_source__get_line(notes->src, offset); 570 if (al) { 571 zfree(&al->cycles); 572 zfree(&al->br_cntr); 573 } 574 } 575 } 576 } 577 578 annotation__unlock(notes); 579 return 0; 580 } 581 582 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample, 583 struct evsel *evsel) 584 { 585 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample); 586 } 587 588 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample, 589 struct evsel *evsel, u64 ip) 590 { 591 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample); 592 } 593 594 595 void annotation__exit(struct annotation *notes) 596 { 597 annotated_source__delete(notes->src); 598 annotated_branch__delete(notes->branch); 599 } 600 601 static struct sharded_mutex *sharded_mutex; 602 603 static void annotation__init_sharded_mutex(void) 604 { 605 /* As many mutexes as there are CPUs. */ 606 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu); 607 } 608 609 static size_t annotation__hash(const struct annotation *notes) 610 { 611 return (size_t)notes; 612 } 613 614 static struct mutex *annotation__get_mutex(const struct annotation *notes) 615 { 616 static pthread_once_t once = PTHREAD_ONCE_INIT; 617 618 pthread_once(&once, annotation__init_sharded_mutex); 619 if (!sharded_mutex) 620 return NULL; 621 622 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes)); 623 } 624 625 void annotation__lock(struct annotation *notes) 626 NO_THREAD_SAFETY_ANALYSIS 627 { 628 struct mutex *mutex = annotation__get_mutex(notes); 629 630 if (mutex) 631 mutex_lock(mutex); 632 } 633 634 void annotation__unlock(struct annotation *notes) 635 NO_THREAD_SAFETY_ANALYSIS 636 { 637 struct mutex *mutex = annotation__get_mutex(notes); 638 639 if (mutex) 640 mutex_unlock(mutex); 641 } 642 643 bool annotation__trylock(struct annotation *notes) 644 { 645 struct mutex *mutex = annotation__get_mutex(notes); 646 647 if (!mutex) 648 return false; 649 650 return mutex_trylock(mutex); 651 } 652 653 void annotation_line__add(struct annotation_line *al, struct list_head *head) 654 { 655 list_add_tail(&al->node, head); 656 } 657 658 struct annotation_line * 659 annotation_line__next(struct annotation_line *pos, struct list_head *head) 660 { 661 list_for_each_entry_continue(pos, head, node) 662 if (pos->offset >= 0) 663 return pos; 664 665 return NULL; 666 } 667 668 static const char *annotate__address_color(struct block_range *br) 669 { 670 double cov = block_range__coverage(br); 671 672 if (cov >= 0) { 673 /* mark red for >75% coverage */ 674 if (cov > 0.75) 675 return PERF_COLOR_RED; 676 677 /* mark dull for <1% coverage */ 678 if (cov < 0.01) 679 return PERF_COLOR_NORMAL; 680 } 681 682 return PERF_COLOR_MAGENTA; 683 } 684 685 static const char *annotate__asm_color(struct block_range *br) 686 { 687 double cov = block_range__coverage(br); 688 689 if (cov >= 0) { 690 /* mark dull for <1% coverage */ 691 if (cov < 0.01) 692 return PERF_COLOR_NORMAL; 693 } 694 695 return PERF_COLOR_BLUE; 696 } 697 698 static void annotate__branch_printf(struct block_range *br, u64 addr) 699 { 700 bool emit_comment = true; 701 702 if (!br) 703 return; 704 705 #if 1 706 if (br->is_target && br->start == addr) { 707 struct block_range *branch = br; 708 double p; 709 710 /* 711 * Find matching branch to our target. 712 */ 713 while (!branch->is_branch) 714 branch = block_range__next(branch); 715 716 p = 100 *(double)br->entry / branch->coverage; 717 718 if (p > 0.1) { 719 if (emit_comment) { 720 emit_comment = false; 721 printf("\t#"); 722 } 723 724 /* 725 * The percentage of coverage joined at this target in relation 726 * to the next branch. 727 */ 728 printf(" +%.2f%%", p); 729 } 730 } 731 #endif 732 if (br->is_branch && br->end == addr) { 733 double p = 100*(double)br->taken / br->coverage; 734 735 if (p > 0.1) { 736 if (emit_comment) { 737 emit_comment = false; 738 printf("\t#"); 739 } 740 741 /* 742 * The percentage of coverage leaving at this branch, and 743 * its prediction ratio. 744 */ 745 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken); 746 } 747 } 748 } 749 750 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width) 751 { 752 s64 offset = dl->al.offset; 753 const u64 addr = start + offset; 754 struct block_range *br; 755 756 br = block_range__find(addr); 757 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr); 758 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line); 759 annotate__branch_printf(br, addr); 760 return 0; 761 } 762 763 static struct annotated_data_type * 764 __hist_entry__get_data_type(struct hist_entry *he, struct arch *arch, 765 struct debuginfo *dbg, struct disasm_line *dl, 766 int *type_offset); 767 768 static bool needs_type_info(struct annotated_data_type *data_type) 769 { 770 if (data_type == NULL || data_type == NO_TYPE) 771 return false; 772 773 if (verbose) 774 return true; 775 776 return (data_type != &stackop_type) && (data_type != &canary_type); 777 } 778 779 static int 780 annotation_line__print(struct annotation_line *al, struct annotation_print_data *apd, 781 struct annotation_options *opts, int printed, 782 struct annotation_line *queue) 783 { 784 struct symbol *sym = apd->he->ms.sym; 785 struct disasm_line *dl = container_of(al, struct disasm_line, al); 786 struct annotation *notes = symbol__annotation(sym); 787 static const char *prev_line; 788 int max_lines = opts->max_lines; 789 int percent_type = opts->percent_type; 790 791 if (al->offset != -1) { 792 double max_percent = 0.0; 793 int i, nr_percent = 1; 794 const char *color; 795 796 for (i = 0; i < al->data_nr; i++) { 797 double percent; 798 799 percent = annotation_data__percent(&al->data[i], 800 percent_type); 801 802 if (percent > max_percent) 803 max_percent = percent; 804 } 805 806 if (al->data_nr > nr_percent) 807 nr_percent = al->data_nr; 808 809 if (max_percent < opts->min_pcnt) 810 return -1; 811 812 if (max_lines && printed >= max_lines) 813 return 1; 814 815 if (queue != NULL) { 816 struct annotation_options queue_opts = { 817 .max_lines = 1, 818 .percent_type = percent_type, 819 }; 820 821 list_for_each_entry_from(queue, ¬es->src->source, node) { 822 if (queue == al) 823 break; 824 annotation_line__print(queue, apd, &queue_opts, 825 /*printed=*/0, /*queue=*/NULL); 826 } 827 } 828 829 color = get_percent_color(max_percent); 830 831 for (i = 0; i < nr_percent; i++) { 832 struct annotation_data *data = &al->data[i]; 833 double percent; 834 835 percent = annotation_data__percent(data, percent_type); 836 color = get_percent_color(percent); 837 838 if (symbol_conf.show_total_period) 839 color_fprintf(stdout, color, " %11" PRIu64, 840 data->he.period); 841 else if (symbol_conf.show_nr_samples) 842 color_fprintf(stdout, color, " %7" PRIu64, 843 data->he.nr_samples); 844 else 845 color_fprintf(stdout, color, " %7.2f", percent); 846 } 847 848 printf(" : "); 849 850 disasm_line__print(dl, notes->src->start, apd->addr_fmt_width); 851 852 if (opts->code_with_type && apd->dbg) { 853 struct annotated_data_type *data_type; 854 int offset = 0; 855 856 data_type = __hist_entry__get_data_type(apd->he, apd->arch, 857 apd->dbg, dl, &offset); 858 if (needs_type_info(data_type)) { 859 char buf[4096]; 860 861 printf("\t\t# data-type: %s", 862 data_type->self.type_name); 863 864 if (data_type != &stackop_type && 865 data_type != &canary_type) 866 printf(" +%#x", offset); 867 868 if (annotated_data_type__get_member_name(data_type, 869 buf, 870 sizeof(buf), 871 offset)) 872 printf(" (%s)", buf); 873 } 874 } 875 876 /* 877 * Also color the filename and line if needed, with 878 * the same color than the percentage. Don't print it 879 * twice for close colored addr with the same filename:line 880 */ 881 if (al->path) { 882 if (!prev_line || strcmp(prev_line, al->path)) { 883 color_fprintf(stdout, color, " // %s", al->path); 884 prev_line = al->path; 885 } 886 } 887 888 printf("\n"); 889 } else if (max_lines && printed >= max_lines) 890 return 1; 891 else { 892 int width = annotation__pcnt_width(notes); 893 894 if (queue) 895 return -1; 896 897 if (!*al->line) 898 printf(" %*s:\n", width, " "); 899 else 900 printf(" %*s: %-*d %s\n", width, " ", apd->addr_fmt_width, 901 al->line_nr, al->line); 902 } 903 904 return 0; 905 } 906 907 static void calc_percent(struct annotation *notes, 908 struct evsel *evsel, 909 struct annotation_data *data, 910 s64 offset, s64 end) 911 { 912 struct hists *hists = evsel__hists(evsel); 913 struct sym_hist *sym_hist = annotation__histogram(notes, evsel); 914 unsigned int hits = 0; 915 u64 period = 0; 916 917 while (offset < end) { 918 struct sym_hist_entry *entry; 919 920 entry = annotated_source__hist_entry(notes->src, evsel, offset); 921 if (entry) { 922 hits += entry->nr_samples; 923 period += entry->period; 924 } 925 ++offset; 926 } 927 928 if (sym_hist->nr_samples) { 929 data->he.period = period; 930 data->he.nr_samples = hits; 931 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples; 932 } 933 934 if (hists->stats.nr_non_filtered_samples) 935 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples; 936 937 if (sym_hist->period) 938 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period; 939 940 if (hists->stats.total_period) 941 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period; 942 } 943 944 static void annotation__calc_percent(struct annotation *notes, 945 struct evsel *leader, s64 len) 946 { 947 struct annotation_line *al, *next; 948 struct evsel *evsel; 949 950 list_for_each_entry(al, ¬es->src->source, node) { 951 s64 end; 952 int i = 0; 953 954 if (al->offset == -1) 955 continue; 956 957 next = annotation_line__next(al, ¬es->src->source); 958 end = next ? next->offset : len; 959 960 for_each_group_evsel(evsel, leader) { 961 struct annotation_data *data; 962 963 BUG_ON(i >= al->data_nr); 964 965 if (symbol_conf.skip_empty && 966 evsel__hists(evsel)->stats.nr_samples == 0) 967 continue; 968 969 data = &al->data[i++]; 970 971 calc_percent(notes, evsel, data, al->offset, end); 972 } 973 } 974 } 975 976 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel) 977 { 978 struct annotation *notes = symbol__annotation(sym); 979 980 annotation__calc_percent(notes, evsel, symbol__size(sym)); 981 } 982 983 static int evsel__get_arch(struct evsel *evsel, struct arch **parch) 984 { 985 struct perf_env *env = evsel__env(evsel); 986 const char *arch_name = perf_env__arch(env); 987 struct arch *arch; 988 int err; 989 990 if (!arch_name) { 991 *parch = NULL; 992 return errno; 993 } 994 995 *parch = arch = arch__find(arch_name); 996 if (arch == NULL) { 997 pr_err("%s: unsupported arch %s\n", __func__, arch_name); 998 return ENOTSUP; 999 } 1000 1001 if (arch->init) { 1002 err = arch->init(arch, env ? env->cpuid : NULL); 1003 if (err) { 1004 pr_err("%s: failed to initialize %s arch priv area\n", 1005 __func__, arch->name); 1006 return err; 1007 } 1008 } 1009 return 0; 1010 } 1011 1012 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, 1013 struct arch **parch) 1014 { 1015 struct symbol *sym = ms->sym; 1016 struct annotation *notes = symbol__annotation(sym); 1017 struct annotate_args args = { 1018 .evsel = evsel, 1019 .options = &annotate_opts, 1020 }; 1021 struct arch *arch = NULL; 1022 int err, nr; 1023 1024 err = evsel__get_arch(evsel, &arch); 1025 if (err < 0) 1026 return err; 1027 1028 if (parch) 1029 *parch = arch; 1030 1031 if (notes->src && !list_empty(¬es->src->source)) 1032 return 0; 1033 1034 args.arch = arch; 1035 args.ms = *ms; 1036 1037 if (notes->src == NULL) { 1038 notes->src = annotated_source__new(); 1039 if (notes->src == NULL) 1040 return -1; 1041 } 1042 1043 nr = 0; 1044 if (evsel__is_group_event(evsel)) { 1045 struct evsel *pos; 1046 1047 for_each_group_evsel(pos, evsel) { 1048 if (symbol_conf.skip_empty && 1049 evsel__hists(pos)->stats.nr_samples == 0) 1050 continue; 1051 nr++; 1052 } 1053 } 1054 notes->src->nr_events = nr ? nr : 1; 1055 1056 if (annotate_opts.full_addr) 1057 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start); 1058 else 1059 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start); 1060 1061 return symbol__disassemble(sym, &args); 1062 } 1063 1064 static void insert_source_line(struct rb_root *root, struct annotation_line *al) 1065 { 1066 struct annotation_line *iter; 1067 struct rb_node **p = &root->rb_node; 1068 struct rb_node *parent = NULL; 1069 unsigned int percent_type = annotate_opts.percent_type; 1070 int i, ret; 1071 1072 while (*p != NULL) { 1073 parent = *p; 1074 iter = rb_entry(parent, struct annotation_line, rb_node); 1075 1076 ret = strcmp(iter->path, al->path); 1077 if (ret == 0) { 1078 for (i = 0; i < al->data_nr; i++) { 1079 iter->data[i].percent_sum += annotation_data__percent(&al->data[i], 1080 percent_type); 1081 } 1082 return; 1083 } 1084 1085 if (ret < 0) 1086 p = &(*p)->rb_left; 1087 else 1088 p = &(*p)->rb_right; 1089 } 1090 1091 for (i = 0; i < al->data_nr; i++) { 1092 al->data[i].percent_sum = annotation_data__percent(&al->data[i], 1093 percent_type); 1094 } 1095 1096 rb_link_node(&al->rb_node, parent, p); 1097 rb_insert_color(&al->rb_node, root); 1098 } 1099 1100 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b) 1101 { 1102 int i; 1103 1104 for (i = 0; i < a->data_nr; i++) { 1105 if (a->data[i].percent_sum == b->data[i].percent_sum) 1106 continue; 1107 return a->data[i].percent_sum > b->data[i].percent_sum; 1108 } 1109 1110 return 0; 1111 } 1112 1113 static void __resort_source_line(struct rb_root *root, struct annotation_line *al) 1114 { 1115 struct annotation_line *iter; 1116 struct rb_node **p = &root->rb_node; 1117 struct rb_node *parent = NULL; 1118 1119 while (*p != NULL) { 1120 parent = *p; 1121 iter = rb_entry(parent, struct annotation_line, rb_node); 1122 1123 if (cmp_source_line(al, iter)) 1124 p = &(*p)->rb_left; 1125 else 1126 p = &(*p)->rb_right; 1127 } 1128 1129 rb_link_node(&al->rb_node, parent, p); 1130 rb_insert_color(&al->rb_node, root); 1131 } 1132 1133 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root) 1134 { 1135 struct annotation_line *al; 1136 struct rb_node *node; 1137 1138 node = rb_first(src_root); 1139 while (node) { 1140 struct rb_node *next; 1141 1142 al = rb_entry(node, struct annotation_line, rb_node); 1143 next = rb_next(node); 1144 rb_erase(node, src_root); 1145 1146 __resort_source_line(dest_root, al); 1147 node = next; 1148 } 1149 } 1150 1151 static void print_summary(struct rb_root *root, const char *filename) 1152 { 1153 struct annotation_line *al; 1154 struct rb_node *node; 1155 1156 printf("\nSorted summary for file %s\n", filename); 1157 printf("----------------------------------------------\n\n"); 1158 1159 if (RB_EMPTY_ROOT(root)) { 1160 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); 1161 return; 1162 } 1163 1164 node = rb_first(root); 1165 while (node) { 1166 double percent, percent_max = 0.0; 1167 const char *color; 1168 char *path; 1169 int i; 1170 1171 al = rb_entry(node, struct annotation_line, rb_node); 1172 for (i = 0; i < al->data_nr; i++) { 1173 percent = al->data[i].percent_sum; 1174 color = get_percent_color(percent); 1175 color_fprintf(stdout, color, " %7.2f", percent); 1176 1177 if (percent > percent_max) 1178 percent_max = percent; 1179 } 1180 1181 path = al->path; 1182 color = get_percent_color(percent_max); 1183 color_fprintf(stdout, color, " %s\n", path); 1184 1185 node = rb_next(node); 1186 } 1187 } 1188 1189 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel) 1190 { 1191 struct annotation *notes = symbol__annotation(sym); 1192 struct sym_hist *h = annotation__histogram(notes, evsel); 1193 u64 len = symbol__size(sym), offset; 1194 1195 for (offset = 0; offset < len; ++offset) { 1196 struct sym_hist_entry *entry; 1197 1198 entry = annotated_source__hist_entry(notes->src, evsel, offset); 1199 if (entry && entry->nr_samples != 0) 1200 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, 1201 sym->start + offset, entry->nr_samples); 1202 } 1203 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples); 1204 } 1205 1206 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start) 1207 { 1208 char bf[32]; 1209 struct annotation_line *line; 1210 1211 list_for_each_entry_reverse(line, lines, node) { 1212 if (line->offset != -1) 1213 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset); 1214 } 1215 1216 return 0; 1217 } 1218 1219 int hist_entry__annotate_printf(struct hist_entry *he, struct evsel *evsel) 1220 { 1221 struct map_symbol *ms = &he->ms; 1222 struct map *map = ms->map; 1223 struct symbol *sym = ms->sym; 1224 struct dso *dso = map__dso(map); 1225 char *filename; 1226 const char *d_filename; 1227 const char *evsel_name = evsel__name(evsel); 1228 struct annotation *notes = symbol__annotation(sym); 1229 struct sym_hist *h = annotation__histogram(notes, evsel); 1230 struct annotation_line *pos, *queue = NULL; 1231 struct annotation_options *opts = &annotate_opts; 1232 struct annotation_print_data apd = { 1233 .he = he, 1234 .evsel = evsel, 1235 }; 1236 int printed = 2, queue_len = 0; 1237 int more = 0; 1238 bool context = opts->context; 1239 int width = annotation__pcnt_width(notes); 1240 int graph_dotted_len; 1241 char buf[512]; 1242 1243 filename = strdup(dso__long_name(dso)); 1244 if (!filename) 1245 return -ENOMEM; 1246 1247 if (opts->full_path) 1248 d_filename = filename; 1249 else 1250 d_filename = basename(filename); 1251 1252 if (evsel__is_group_event(evsel)) { 1253 evsel__group_desc(evsel, buf, sizeof(buf)); 1254 evsel_name = buf; 1255 } 1256 1257 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, " 1258 "percent: %s)\n", 1259 width, width, symbol_conf.show_total_period ? "Period" : 1260 symbol_conf.show_nr_samples ? "Samples" : "Percent", 1261 d_filename, evsel_name, h->nr_samples, 1262 percent_type_str(opts->percent_type)); 1263 1264 printf("%-*.*s----\n", 1265 graph_dotted_len, graph_dotted_len, graph_dotted_line); 1266 1267 if (verbose > 0) 1268 symbol__annotate_hits(sym, evsel); 1269 1270 apd.addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, 1271 notes->src->start); 1272 evsel__get_arch(evsel, &apd.arch); 1273 apd.dbg = dso__debuginfo(dso); 1274 1275 list_for_each_entry(pos, ¬es->src->source, node) { 1276 int err; 1277 1278 if (context && queue == NULL) { 1279 queue = pos; 1280 queue_len = 0; 1281 } 1282 1283 err = annotation_line__print(pos, &apd, opts, printed, queue); 1284 1285 switch (err) { 1286 case 0: 1287 ++printed; 1288 if (context) { 1289 printed += queue_len; 1290 queue = NULL; 1291 queue_len = 0; 1292 } 1293 break; 1294 case 1: 1295 /* filtered by max_lines */ 1296 ++more; 1297 break; 1298 case -1: 1299 default: 1300 /* 1301 * Filtered by min_pcnt or non IP lines when 1302 * context != 0 1303 */ 1304 if (!context) 1305 break; 1306 if (queue_len == context) 1307 queue = list_entry(queue->node.next, typeof(*queue), node); 1308 else 1309 ++queue_len; 1310 break; 1311 } 1312 } 1313 1314 debuginfo__delete(apd.dbg); 1315 free(filename); 1316 1317 return more; 1318 } 1319 1320 static void FILE__set_percent_color(void *fp __maybe_unused, 1321 double percent __maybe_unused, 1322 bool current __maybe_unused) 1323 { 1324 } 1325 1326 static int FILE__set_jumps_percent_color(void *fp __maybe_unused, 1327 int nr __maybe_unused, bool current __maybe_unused) 1328 { 1329 return 0; 1330 } 1331 1332 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused) 1333 { 1334 return 0; 1335 } 1336 1337 static void FILE__printf(void *fp, const char *fmt, ...) 1338 { 1339 va_list args; 1340 1341 va_start(args, fmt); 1342 vfprintf(fp, fmt, args); 1343 va_end(args); 1344 } 1345 1346 static void FILE__write_graph(void *fp, int graph) 1347 { 1348 const char *s; 1349 switch (graph) { 1350 1351 case DARROW_CHAR: s = "↓"; break; 1352 case UARROW_CHAR: s = "↑"; break; 1353 case LARROW_CHAR: s = "←"; break; 1354 case RARROW_CHAR: s = "→"; break; 1355 default: s = "?"; break; 1356 } 1357 1358 fputs(s, fp); 1359 } 1360 1361 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp, 1362 struct annotation_print_data *apd) 1363 { 1364 struct annotation *notes = symbol__annotation(sym); 1365 struct annotation_write_ops wops = { 1366 .first_line = true, 1367 .obj = fp, 1368 .set_color = FILE__set_color, 1369 .set_percent_color = FILE__set_percent_color, 1370 .set_jumps_percent_color = FILE__set_jumps_percent_color, 1371 .printf = FILE__printf, 1372 .write_graph = FILE__write_graph, 1373 }; 1374 struct annotation_line *al; 1375 1376 if (annotate_opts.code_with_type) { 1377 evsel__get_arch(apd->evsel, &apd->arch); 1378 apd->dbg = dso__debuginfo(map__dso(apd->he->ms.map)); 1379 } 1380 1381 list_for_each_entry(al, ¬es->src->source, node) { 1382 if (annotation_line__filter(al)) 1383 continue; 1384 annotation_line__write(al, notes, &wops, apd); 1385 fputc('\n', fp); 1386 wops.first_line = false; 1387 } 1388 1389 if (annotate_opts.code_with_type) 1390 debuginfo__delete(apd->dbg); 1391 1392 return 0; 1393 } 1394 1395 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel, 1396 struct hist_entry *he) 1397 { 1398 const char *ev_name = evsel__name(evsel); 1399 char buf[1024]; 1400 char *filename; 1401 int err = -1; 1402 FILE *fp; 1403 struct annotation_print_data apd = { 1404 .he = he, 1405 .evsel = evsel, 1406 }; 1407 1408 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0) 1409 return -1; 1410 1411 fp = fopen(filename, "w"); 1412 if (fp == NULL) 1413 goto out_free_filename; 1414 1415 if (evsel__is_group_event(evsel)) { 1416 evsel__group_desc(evsel, buf, sizeof(buf)); 1417 ev_name = buf; 1418 } 1419 1420 fprintf(fp, "%s() %s\nEvent: %s\n\n", 1421 ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name); 1422 symbol__annotate_fprintf2(ms->sym, fp, &apd); 1423 1424 fclose(fp); 1425 err = 0; 1426 out_free_filename: 1427 free(filename); 1428 return err; 1429 } 1430 1431 void symbol__annotate_zero_histogram(struct symbol *sym, struct evsel *evsel) 1432 { 1433 struct annotation *notes = symbol__annotation(sym); 1434 struct sym_hist *h = annotation__histogram(notes, evsel); 1435 1436 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms); 1437 } 1438 1439 void symbol__annotate_decay_histogram(struct symbol *sym, struct evsel *evsel) 1440 { 1441 struct annotation *notes = symbol__annotation(sym); 1442 struct sym_hist *h = annotation__histogram(notes, evsel); 1443 struct annotation_line *al; 1444 1445 h->nr_samples = 0; 1446 list_for_each_entry(al, ¬es->src->source, node) { 1447 struct sym_hist_entry *entry; 1448 1449 if (al->offset == -1) 1450 continue; 1451 1452 entry = annotated_source__hist_entry(notes->src, evsel, al->offset); 1453 if (entry == NULL) 1454 continue; 1455 1456 entry->nr_samples = entry->nr_samples * 7 / 8; 1457 h->nr_samples += entry->nr_samples; 1458 } 1459 } 1460 1461 void annotated_source__purge(struct annotated_source *as) 1462 { 1463 struct annotation_line *al, *n; 1464 1465 list_for_each_entry_safe(al, n, &as->source, node) { 1466 list_del_init(&al->node); 1467 disasm_line__free(disasm_line(al)); 1468 } 1469 as->tried_source = false; 1470 } 1471 1472 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp) 1473 { 1474 size_t printed; 1475 1476 if (dl->al.offset == -1) 1477 return fprintf(fp, "%s\n", dl->al.line); 1478 1479 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name); 1480 1481 if (dl->ops.raw[0] != '\0') { 1482 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ", 1483 dl->ops.raw); 1484 } 1485 1486 return printed + fprintf(fp, "\n"); 1487 } 1488 1489 size_t disasm__fprintf(struct list_head *head, FILE *fp) 1490 { 1491 struct disasm_line *pos; 1492 size_t printed = 0; 1493 1494 list_for_each_entry(pos, head, al.node) 1495 printed += disasm_line__fprintf(pos, fp); 1496 1497 return printed; 1498 } 1499 1500 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym) 1501 { 1502 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) || 1503 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 || 1504 dl->ops.target.offset >= (s64)symbol__size(sym)) 1505 return false; 1506 1507 return true; 1508 } 1509 1510 static void 1511 annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) 1512 { 1513 struct annotation_line *al; 1514 1515 /* PLT symbols contain external offsets */ 1516 if (strstr(sym->name, "@plt")) 1517 return; 1518 1519 list_for_each_entry(al, ¬es->src->source, node) { 1520 struct disasm_line *dl; 1521 struct annotation_line *target; 1522 1523 dl = disasm_line(al); 1524 1525 if (!disasm_line__is_valid_local_jump(dl, sym)) 1526 continue; 1527 1528 target = annotated_source__get_line(notes->src, 1529 dl->ops.target.offset); 1530 /* 1531 * FIXME: Oops, no jump target? Buggy disassembler? Or do we 1532 * have to adjust to the previous offset? 1533 */ 1534 if (target == NULL) 1535 continue; 1536 1537 if (++target->jump_sources > notes->src->max_jump_sources) 1538 notes->src->max_jump_sources = target->jump_sources; 1539 } 1540 } 1541 1542 static void annotation__set_index(struct annotation *notes) 1543 { 1544 struct annotation_line *al; 1545 struct annotated_source *src = notes->src; 1546 1547 src->widths.max_line_len = 0; 1548 src->nr_entries = 0; 1549 src->nr_asm_entries = 0; 1550 1551 list_for_each_entry(al, &src->source, node) { 1552 size_t line_len = strlen(al->line); 1553 1554 if (src->widths.max_line_len < line_len) 1555 src->widths.max_line_len = line_len; 1556 al->idx = src->nr_entries++; 1557 if (al->offset != -1) 1558 al->idx_asm = src->nr_asm_entries++; 1559 else 1560 al->idx_asm = -1; 1561 } 1562 } 1563 1564 static inline int width_jumps(int n) 1565 { 1566 if (n >= 100) 1567 return 5; 1568 if (n / 10) 1569 return 2; 1570 return 1; 1571 } 1572 1573 static int annotation__max_ins_name(struct annotation *notes) 1574 { 1575 int max_name = 0, len; 1576 struct annotation_line *al; 1577 1578 list_for_each_entry(al, ¬es->src->source, node) { 1579 if (al->offset == -1) 1580 continue; 1581 1582 len = strlen(disasm_line(al)->ins.name); 1583 if (max_name < len) 1584 max_name = len; 1585 } 1586 1587 return max_name; 1588 } 1589 1590 static void 1591 annotation__init_column_widths(struct annotation *notes, struct symbol *sym) 1592 { 1593 notes->src->widths.addr = notes->src->widths.target = 1594 notes->src->widths.min_addr = hex_width(symbol__size(sym)); 1595 notes->src->widths.max_addr = hex_width(sym->end); 1596 notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources); 1597 notes->src->widths.max_ins_name = annotation__max_ins_name(notes); 1598 } 1599 1600 void annotation__update_column_widths(struct annotation *notes) 1601 { 1602 if (annotate_opts.use_offset) 1603 notes->src->widths.target = notes->src->widths.min_addr; 1604 else if (annotate_opts.full_addr) 1605 notes->src->widths.target = BITS_PER_LONG / 4; 1606 else 1607 notes->src->widths.target = notes->src->widths.max_addr; 1608 1609 notes->src->widths.addr = notes->src->widths.target; 1610 1611 if (annotate_opts.show_nr_jumps) 1612 notes->src->widths.addr += notes->src->widths.jumps + 1; 1613 } 1614 1615 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms) 1616 { 1617 annotate_opts.full_addr = !annotate_opts.full_addr; 1618 1619 if (annotate_opts.full_addr) 1620 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start); 1621 else 1622 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start); 1623 1624 annotation__update_column_widths(notes); 1625 } 1626 1627 static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms, 1628 struct rb_root *root) 1629 { 1630 struct annotation_line *al; 1631 struct rb_root tmp_root = RB_ROOT; 1632 1633 list_for_each_entry(al, ¬es->src->source, node) { 1634 double percent_max = 0.0; 1635 u64 addr; 1636 int i; 1637 1638 for (i = 0; i < al->data_nr; i++) { 1639 double percent; 1640 1641 percent = annotation_data__percent(&al->data[i], 1642 annotate_opts.percent_type); 1643 1644 if (percent > percent_max) 1645 percent_max = percent; 1646 } 1647 1648 if (percent_max <= 0.5) 1649 continue; 1650 1651 addr = map__rip_2objdump(ms->map, ms->sym->start); 1652 al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL, 1653 false, true, ms->sym->start + al->offset); 1654 insert_source_line(&tmp_root, al); 1655 } 1656 1657 resort_source_line(root, &tmp_root); 1658 } 1659 1660 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root) 1661 { 1662 struct annotation *notes = symbol__annotation(ms->sym); 1663 1664 annotation__calc_lines(notes, ms, root); 1665 } 1666 1667 int hist_entry__tty_annotate2(struct hist_entry *he, struct evsel *evsel) 1668 { 1669 struct map_symbol *ms = &he->ms; 1670 struct dso *dso = map__dso(ms->map); 1671 struct symbol *sym = ms->sym; 1672 struct rb_root source_line = RB_ROOT; 1673 struct hists *hists = evsel__hists(evsel); 1674 struct annotation_print_data apd = { 1675 .he = he, 1676 .evsel = evsel, 1677 }; 1678 char buf[1024]; 1679 int err; 1680 1681 err = symbol__annotate2(ms, evsel, NULL); 1682 if (err) { 1683 char msg[BUFSIZ]; 1684 1685 dso__set_annotate_warned(dso); 1686 symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); 1687 ui__error("Couldn't annotate %s:\n%s", sym->name, msg); 1688 return -1; 1689 } 1690 1691 if (annotate_opts.print_lines) { 1692 srcline_full_filename = annotate_opts.full_path; 1693 symbol__calc_lines(ms, &source_line); 1694 print_summary(&source_line, dso__long_name(dso)); 1695 } 1696 1697 hists__scnprintf_title(hists, buf, sizeof(buf)); 1698 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n", 1699 buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso)); 1700 symbol__annotate_fprintf2(sym, stdout, &apd); 1701 1702 annotated_source__purge(symbol__annotation(sym)->src); 1703 1704 return 0; 1705 } 1706 1707 int hist_entry__tty_annotate(struct hist_entry *he, struct evsel *evsel) 1708 { 1709 struct map_symbol *ms = &he->ms; 1710 struct dso *dso = map__dso(ms->map); 1711 struct symbol *sym = ms->sym; 1712 struct rb_root source_line = RB_ROOT; 1713 int err; 1714 1715 err = symbol__annotate(ms, evsel, NULL); 1716 if (err) { 1717 char msg[BUFSIZ]; 1718 1719 dso__set_annotate_warned(dso); 1720 symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); 1721 ui__error("Couldn't annotate %s:\n%s", sym->name, msg); 1722 return -1; 1723 } 1724 1725 symbol__calc_percent(sym, evsel); 1726 1727 if (annotate_opts.print_lines) { 1728 srcline_full_filename = annotate_opts.full_path; 1729 symbol__calc_lines(ms, &source_line); 1730 print_summary(&source_line, dso__long_name(dso)); 1731 } 1732 1733 hist_entry__annotate_printf(he, evsel); 1734 1735 annotated_source__purge(symbol__annotation(sym)->src); 1736 1737 return 0; 1738 } 1739 1740 bool ui__has_annotation(void) 1741 { 1742 return use_browser == 1 && perf_hpp_list.sym; 1743 } 1744 1745 1746 static double annotation_line__max_percent(struct annotation_line *al, 1747 unsigned int percent_type) 1748 { 1749 double percent_max = 0.0; 1750 int i; 1751 1752 for (i = 0; i < al->data_nr; i++) { 1753 double percent; 1754 1755 percent = annotation_data__percent(&al->data[i], 1756 percent_type); 1757 1758 if (percent > percent_max) 1759 percent_max = percent; 1760 } 1761 1762 return percent_max; 1763 } 1764 1765 static int disasm_line__write(struct disasm_line *dl, struct annotation *notes, 1766 void *obj, char *bf, size_t size, 1767 void (*obj__printf)(void *obj, const char *fmt, ...), 1768 void (*obj__write_graph)(void *obj, int graph)) 1769 { 1770 if (dl->ins.ops && dl->ins.ops->scnprintf) { 1771 if (ins__is_jump(&dl->ins)) { 1772 bool fwd; 1773 1774 if (dl->ops.target.outside) 1775 goto call_like; 1776 fwd = dl->ops.target.offset > dl->al.offset; 1777 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR); 1778 obj__printf(obj, " "); 1779 } else if (ins__is_call(&dl->ins)) { 1780 call_like: 1781 obj__write_graph(obj, RARROW_CHAR); 1782 obj__printf(obj, " "); 1783 } else if (ins__is_ret(&dl->ins)) { 1784 obj__write_graph(obj, LARROW_CHAR); 1785 obj__printf(obj, " "); 1786 } else { 1787 obj__printf(obj, " "); 1788 } 1789 } else { 1790 obj__printf(obj, " "); 1791 } 1792 1793 return disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, 1794 notes->src->widths.max_ins_name) + 2; 1795 } 1796 1797 static void ipc_coverage_string(char *bf, int size, struct annotation *notes) 1798 { 1799 double ipc = 0.0, coverage = 0.0; 1800 struct annotated_branch *branch = annotation__get_branch(notes); 1801 1802 if (branch && branch->hit_cycles) 1803 ipc = branch->hit_insn / ((double)branch->hit_cycles); 1804 1805 if (branch && branch->total_insn) { 1806 coverage = branch->cover_insn * 100.0 / 1807 ((double)branch->total_insn); 1808 } 1809 1810 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)", 1811 ipc, coverage); 1812 } 1813 1814 int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header) 1815 { 1816 struct evsel *pos; 1817 struct strbuf sb; 1818 1819 if (evsel->evlist->nr_br_cntr <= 0) 1820 return -ENOTSUP; 1821 1822 strbuf_init(&sb, /*hint=*/ 0); 1823 1824 if (header && strbuf_addf(&sb, "# Branch counter abbr list:\n")) 1825 goto err; 1826 1827 evlist__for_each_entry(evsel->evlist, pos) { 1828 if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) 1829 continue; 1830 if (header && strbuf_addf(&sb, "#")) 1831 goto err; 1832 1833 if (strbuf_addf(&sb, " %s = %s\n", pos->name, pos->abbr_name)) 1834 goto err; 1835 } 1836 1837 if (header && strbuf_addf(&sb, "#")) 1838 goto err; 1839 if (strbuf_addf(&sb, " '-' No event occurs\n")) 1840 goto err; 1841 1842 if (header && strbuf_addf(&sb, "#")) 1843 goto err; 1844 if (strbuf_addf(&sb, " '+' Event occurrences may be lost due to branch counter saturated\n")) 1845 goto err; 1846 1847 *str = strbuf_detach(&sb, NULL); 1848 1849 return 0; 1850 err: 1851 strbuf_release(&sb); 1852 return -ENOMEM; 1853 } 1854 1855 /* Assume the branch counter saturated at 3 */ 1856 #define ANNOTATION_BR_CNTR_SATURATION 3 1857 1858 int annotation_br_cntr_entry(char **str, int br_cntr_nr, 1859 u64 *br_cntr, int num_aggr, 1860 struct evsel *evsel) 1861 { 1862 struct evsel *pos = evsel ? evlist__first(evsel->evlist) : NULL; 1863 bool saturated = false; 1864 int i, j, avg, used; 1865 struct strbuf sb; 1866 1867 strbuf_init(&sb, /*hint=*/ 0); 1868 for (i = 0; i < br_cntr_nr; i++) { 1869 used = 0; 1870 avg = ceil((double)(br_cntr[i] & ~ANNOTATION__BR_CNTR_SATURATED_FLAG) / 1871 (double)num_aggr); 1872 1873 /* 1874 * A histogram with the abbr name is displayed by default. 1875 * With -v, the exact number of branch counter is displayed. 1876 */ 1877 if (verbose) { 1878 evlist__for_each_entry_from(evsel->evlist, pos) { 1879 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && 1880 (pos->br_cntr_idx == i)) 1881 break; 1882 } 1883 if (strbuf_addstr(&sb, pos->abbr_name)) 1884 goto err; 1885 1886 if (!br_cntr[i]) { 1887 if (strbuf_addstr(&sb, "=-")) 1888 goto err; 1889 } else { 1890 if (strbuf_addf(&sb, "=%d", avg)) 1891 goto err; 1892 } 1893 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) { 1894 if (strbuf_addch(&sb, '+')) 1895 goto err; 1896 } else { 1897 if (strbuf_addch(&sb, ' ')) 1898 goto err; 1899 } 1900 1901 if ((i < br_cntr_nr - 1) && strbuf_addch(&sb, ',')) 1902 goto err; 1903 continue; 1904 } 1905 1906 if (strbuf_addch(&sb, '|')) 1907 goto err; 1908 1909 if (!br_cntr[i]) { 1910 if (strbuf_addch(&sb, '-')) 1911 goto err; 1912 used++; 1913 } else { 1914 evlist__for_each_entry_from(evsel->evlist, pos) { 1915 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && 1916 (pos->br_cntr_idx == i)) 1917 break; 1918 } 1919 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) 1920 saturated = true; 1921 1922 for (j = 0; j < avg; j++, used++) { 1923 /* Print + if the number of logged events > 3 */ 1924 if (j >= ANNOTATION_BR_CNTR_SATURATION) { 1925 saturated = true; 1926 break; 1927 } 1928 if (strbuf_addstr(&sb, pos->abbr_name)) 1929 goto err; 1930 } 1931 1932 if (saturated) { 1933 if (strbuf_addch(&sb, '+')) 1934 goto err; 1935 used++; 1936 } 1937 pos = list_next_entry(pos, core.node); 1938 } 1939 1940 for (j = used; j < ANNOTATION_BR_CNTR_SATURATION + 1; j++) { 1941 if (strbuf_addch(&sb, ' ')) 1942 goto err; 1943 } 1944 } 1945 1946 if (!verbose && strbuf_addch(&sb, br_cntr_nr ? '|' : ' ')) 1947 goto err; 1948 1949 *str = strbuf_detach(&sb, NULL); 1950 1951 return 0; 1952 err: 1953 strbuf_release(&sb); 1954 return -ENOMEM; 1955 } 1956 1957 struct type_hash_entry { 1958 struct annotated_data_type *type; 1959 int offset; 1960 }; 1961 1962 static int disasm_line__snprint_type_info(struct disasm_line *dl, 1963 char *buf, int len, 1964 struct annotation_print_data *apd) 1965 { 1966 struct annotated_data_type *data_type = NULL; 1967 struct type_hash_entry *entry = NULL; 1968 char member[256]; 1969 int offset = 0; 1970 int printed; 1971 1972 scnprintf(buf, len, " "); 1973 1974 if (!annotate_opts.code_with_type || apd->dbg == NULL) 1975 return 1; 1976 1977 if (apd->type_hash) { 1978 hashmap__find(apd->type_hash, dl->al.offset, &entry); 1979 if (entry != NULL) { 1980 data_type = entry->type; 1981 offset = entry->offset; 1982 } 1983 } 1984 1985 if (data_type == NULL) 1986 data_type = __hist_entry__get_data_type(apd->he, apd->arch, apd->dbg, dl, &offset); 1987 1988 if (apd->type_hash && entry == NULL) { 1989 entry = malloc(sizeof(*entry)); 1990 if (entry != NULL) { 1991 entry->type = data_type; 1992 entry->offset = offset; 1993 hashmap__add(apd->type_hash, dl->al.offset, entry); 1994 } 1995 } 1996 1997 if (!needs_type_info(data_type)) 1998 return 1; 1999 2000 printed = scnprintf(buf, len, "\t\t# data-type: %s", data_type->self.type_name); 2001 2002 if (data_type != &stackop_type && data_type != &canary_type && len > printed) 2003 printed += scnprintf(buf + printed, len - printed, " +%#x", offset); 2004 2005 if (annotated_data_type__get_member_name(data_type, member, sizeof(member), offset) && 2006 len > printed) { 2007 printed += scnprintf(buf + printed, len - printed, " (%s)", member); 2008 } 2009 return printed; 2010 } 2011 2012 void annotation_line__write(struct annotation_line *al, struct annotation *notes, 2013 const struct annotation_write_ops *wops, 2014 struct annotation_print_data *apd) 2015 { 2016 bool current_entry = wops->current_entry; 2017 bool change_color = wops->change_color; 2018 double percent_max = annotation_line__max_percent(al, annotate_opts.percent_type); 2019 int width = wops->width; 2020 int pcnt_width = annotation__pcnt_width(notes); 2021 int cycles_width = annotation__cycles_width(notes); 2022 bool show_title = false; 2023 char bf[256]; 2024 int printed; 2025 void *obj = wops->obj; 2026 int (*obj__set_color)(void *obj, int color) = wops->set_color; 2027 void (*obj__set_percent_color)(void *obj, double percent, bool current) = wops->set_percent_color; 2028 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current) = wops->set_jumps_percent_color; 2029 void (*obj__printf)(void *obj, const char *fmt, ...) = wops->printf; 2030 void (*obj__write_graph)(void *obj, int graph) = wops->write_graph; 2031 2032 if (wops->first_line && (al->offset == -1 || percent_max == 0.0)) { 2033 if (notes->branch && al->cycles) { 2034 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0) 2035 show_title = true; 2036 } else 2037 show_title = true; 2038 } 2039 2040 if (al->offset != -1 && percent_max != 0.0) { 2041 int i; 2042 2043 for (i = 0; i < al->data_nr; i++) { 2044 double percent; 2045 2046 percent = annotation_data__percent(&al->data[i], 2047 annotate_opts.percent_type); 2048 2049 obj__set_percent_color(obj, percent, current_entry); 2050 if (symbol_conf.show_total_period) { 2051 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period); 2052 } else if (symbol_conf.show_nr_samples) { 2053 obj__printf(obj, "%7" PRIu64 " ", 2054 al->data[i].he.nr_samples); 2055 } else { 2056 obj__printf(obj, "%7.2f ", percent); 2057 } 2058 } 2059 } else { 2060 obj__set_percent_color(obj, 0, current_entry); 2061 2062 if (!show_title) 2063 obj__printf(obj, "%-*s", pcnt_width, " "); 2064 else { 2065 obj__printf(obj, "%-*s", pcnt_width, 2066 symbol_conf.show_total_period ? "Period" : 2067 symbol_conf.show_nr_samples ? "Samples" : "Percent"); 2068 } 2069 } 2070 width -= pcnt_width; 2071 2072 if (notes->branch) { 2073 if (al->cycles && al->cycles->ipc) 2074 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc); 2075 else if (!show_title) 2076 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " "); 2077 else 2078 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC"); 2079 2080 if (!annotate_opts.show_minmax_cycle) { 2081 if (al->cycles && al->cycles->avg) 2082 obj__printf(obj, "%*" PRIu64 " ", 2083 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg); 2084 else if (!show_title) 2085 obj__printf(obj, "%*s", 2086 ANNOTATION__CYCLES_WIDTH, " "); 2087 else 2088 obj__printf(obj, "%*s ", 2089 ANNOTATION__CYCLES_WIDTH - 1, 2090 "Cycle"); 2091 } else { 2092 if (al->cycles) { 2093 char str[32]; 2094 2095 scnprintf(str, sizeof(str), 2096 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")", 2097 al->cycles->avg, al->cycles->min, 2098 al->cycles->max); 2099 2100 obj__printf(obj, "%*s ", 2101 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 2102 str); 2103 } else if (!show_title) 2104 obj__printf(obj, "%*s", 2105 ANNOTATION__MINMAX_CYCLES_WIDTH, 2106 " "); 2107 else 2108 obj__printf(obj, "%*s ", 2109 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 2110 "Cycle(min/max)"); 2111 } 2112 2113 if (annotate_opts.show_br_cntr) { 2114 if (show_title) { 2115 obj__printf(obj, "%*s ", 2116 ANNOTATION__BR_CNTR_WIDTH, 2117 "Branch Counter"); 2118 } else { 2119 char *buf; 2120 2121 if (!annotation_br_cntr_entry(&buf, al->br_cntr_nr, al->br_cntr, 2122 al->num_aggr, al->evsel)) { 2123 obj__printf(obj, "%*s ", ANNOTATION__BR_CNTR_WIDTH, buf); 2124 free(buf); 2125 } 2126 } 2127 } 2128 2129 if (show_title && !*al->line) { 2130 ipc_coverage_string(bf, sizeof(bf), notes); 2131 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf); 2132 } 2133 } 2134 width -= cycles_width; 2135 2136 obj__printf(obj, " "); 2137 width -= 1; 2138 2139 if (!*al->line) 2140 obj__printf(obj, "%-*s", width, " "); 2141 else if (al->offset == -1) { 2142 if (al->line_nr && annotate_opts.show_linenr) 2143 printed = scnprintf(bf, sizeof(bf), "%-*d ", 2144 notes->src->widths.addr + 1, al->line_nr); 2145 else 2146 printed = scnprintf(bf, sizeof(bf), "%-*s ", 2147 notes->src->widths.addr, " "); 2148 obj__printf(obj, bf); 2149 width -= printed; 2150 obj__printf(obj, "%-*s", width, al->line); 2151 } else { 2152 u64 addr = al->offset; 2153 int color = -1; 2154 2155 if (!annotate_opts.use_offset) 2156 addr += notes->src->start; 2157 2158 if (!annotate_opts.use_offset) { 2159 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); 2160 } else { 2161 if (al->jump_sources && 2162 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { 2163 if (annotate_opts.show_nr_jumps) { 2164 int prev; 2165 printed = scnprintf(bf, sizeof(bf), "%*d ", 2166 notes->src->widths.jumps, 2167 al->jump_sources); 2168 prev = obj__set_jumps_percent_color(obj, al->jump_sources, 2169 current_entry); 2170 obj__printf(obj, bf); 2171 obj__set_color(obj, prev); 2172 } 2173 print_addr: 2174 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", 2175 notes->src->widths.target, addr); 2176 } else if (ins__is_call(&disasm_line(al)->ins) && 2177 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) { 2178 goto print_addr; 2179 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { 2180 goto print_addr; 2181 } else { 2182 printed = scnprintf(bf, sizeof(bf), "%-*s ", 2183 notes->src->widths.addr, " "); 2184 } 2185 } 2186 2187 if (change_color) 2188 color = obj__set_color(obj, HE_COLORSET_ADDR); 2189 obj__printf(obj, bf); 2190 if (change_color) 2191 obj__set_color(obj, color); 2192 2193 width -= printed; 2194 2195 printed = disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), 2196 obj__printf, obj__write_graph); 2197 2198 obj__printf(obj, "%s", bf); 2199 width -= printed; 2200 2201 disasm_line__snprint_type_info(disasm_line(al), bf, sizeof(bf), apd); 2202 obj__printf(obj, "%-*s", width, bf); 2203 } 2204 2205 } 2206 2207 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, 2208 struct arch **parch) 2209 { 2210 struct symbol *sym = ms->sym; 2211 struct annotation *notes = symbol__annotation(sym); 2212 size_t size = symbol__size(sym); 2213 int err; 2214 2215 err = symbol__annotate(ms, evsel, parch); 2216 if (err) 2217 return err; 2218 2219 symbol__calc_percent(sym, evsel); 2220 2221 annotation__set_index(notes); 2222 annotation__mark_jump_targets(notes, sym); 2223 2224 err = annotation__compute_ipc(notes, size, evsel); 2225 if (err) 2226 return err; 2227 2228 annotation__init_column_widths(notes, sym); 2229 annotation__update_column_widths(notes); 2230 sym->annotate2 = 1; 2231 2232 return 0; 2233 } 2234 2235 const char * const perf_disassembler__strs[] = { 2236 [PERF_DISASM_UNKNOWN] = "unknown", 2237 [PERF_DISASM_LLVM] = "llvm", 2238 [PERF_DISASM_CAPSTONE] = "capstone", 2239 [PERF_DISASM_OBJDUMP] = "objdump", 2240 }; 2241 2242 2243 static void annotation_options__add_disassembler(struct annotation_options *options, 2244 enum perf_disassembler dis) 2245 { 2246 for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers); i++) { 2247 if (options->disassemblers[i] == dis) { 2248 /* Disassembler is already present then don't add again. */ 2249 return; 2250 } 2251 if (options->disassemblers[i] == PERF_DISASM_UNKNOWN) { 2252 /* Found a free slot. */ 2253 options->disassemblers[i] = dis; 2254 return; 2255 } 2256 } 2257 pr_err("Failed to add disassembler %d\n", dis); 2258 } 2259 2260 static int annotation_options__add_disassemblers_str(struct annotation_options *options, 2261 const char *str) 2262 { 2263 while (str && *str != '\0') { 2264 const char *comma = strchr(str, ','); 2265 int len = comma ? comma - str : (int)strlen(str); 2266 bool match = false; 2267 2268 for (u8 i = 0; i < ARRAY_SIZE(perf_disassembler__strs); i++) { 2269 const char *dis_str = perf_disassembler__strs[i]; 2270 2271 if (len == (int)strlen(dis_str) && !strncmp(str, dis_str, len)) { 2272 annotation_options__add_disassembler(options, i); 2273 match = true; 2274 break; 2275 } 2276 } 2277 if (!match) { 2278 pr_err("Invalid disassembler '%.*s'\n", len, str); 2279 return -1; 2280 } 2281 str = comma ? comma + 1 : NULL; 2282 } 2283 return 0; 2284 } 2285 2286 static int annotation__config(const char *var, const char *value, void *data) 2287 { 2288 struct annotation_options *opt = data; 2289 2290 if (!strstarts(var, "annotate.")) 2291 return 0; 2292 2293 if (!strcmp(var, "annotate.offset_level")) { 2294 perf_config_u8(&opt->offset_level, "offset_level", value); 2295 2296 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) 2297 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL; 2298 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL) 2299 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; 2300 } else if (!strcmp(var, "annotate.disassemblers")) { 2301 int err = annotation_options__add_disassemblers_str(opt, value); 2302 2303 if (err) 2304 return err; 2305 } else if (!strcmp(var, "annotate.hide_src_code")) { 2306 opt->hide_src_code = perf_config_bool("hide_src_code", value); 2307 } else if (!strcmp(var, "annotate.jump_arrows")) { 2308 opt->jump_arrows = perf_config_bool("jump_arrows", value); 2309 } else if (!strcmp(var, "annotate.show_linenr")) { 2310 opt->show_linenr = perf_config_bool("show_linenr", value); 2311 } else if (!strcmp(var, "annotate.show_nr_jumps")) { 2312 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value); 2313 } else if (!strcmp(var, "annotate.show_nr_samples")) { 2314 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples", 2315 value); 2316 } else if (!strcmp(var, "annotate.show_total_period")) { 2317 symbol_conf.show_total_period = perf_config_bool("show_total_period", 2318 value); 2319 } else if (!strcmp(var, "annotate.use_offset")) { 2320 opt->use_offset = perf_config_bool("use_offset", value); 2321 } else if (!strcmp(var, "annotate.disassembler_style")) { 2322 opt->disassembler_style = strdup(value); 2323 if (!opt->disassembler_style) { 2324 pr_err("Not enough memory for annotate.disassembler_style\n"); 2325 return -1; 2326 } 2327 } else if (!strcmp(var, "annotate.objdump")) { 2328 opt->objdump_path = strdup(value); 2329 if (!opt->objdump_path) { 2330 pr_err("Not enough memory for annotate.objdump\n"); 2331 return -1; 2332 } 2333 } else if (!strcmp(var, "annotate.addr2line")) { 2334 symbol_conf.addr2line_path = strdup(value); 2335 if (!symbol_conf.addr2line_path) { 2336 pr_err("Not enough memory for annotate.addr2line\n"); 2337 return -1; 2338 } 2339 } else if (!strcmp(var, "annotate.demangle")) { 2340 symbol_conf.demangle = perf_config_bool("demangle", value); 2341 } else if (!strcmp(var, "annotate.demangle_kernel")) { 2342 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value); 2343 } else { 2344 pr_debug("%s variable unknown, ignoring...", var); 2345 } 2346 2347 return 0; 2348 } 2349 2350 void annotation_options__init(void) 2351 { 2352 struct annotation_options *opt = &annotate_opts; 2353 2354 memset(opt, 0, sizeof(*opt)); 2355 2356 /* Default values. */ 2357 opt->use_offset = true; 2358 opt->jump_arrows = true; 2359 opt->annotate_src = true; 2360 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS; 2361 opt->percent_type = PERCENT_PERIOD_LOCAL; 2362 opt->hide_src_code = true; 2363 opt->hide_src_code_on_title = true; 2364 } 2365 2366 void annotation_options__exit(void) 2367 { 2368 zfree(&annotate_opts.disassembler_style); 2369 zfree(&annotate_opts.objdump_path); 2370 } 2371 2372 static void annotation_options__default_init_disassemblers(struct annotation_options *options) 2373 { 2374 if (options->disassemblers[0] != PERF_DISASM_UNKNOWN) { 2375 /* Already initialized. */ 2376 return; 2377 } 2378 #ifdef HAVE_LIBLLVM_SUPPORT 2379 annotation_options__add_disassembler(options, PERF_DISASM_LLVM); 2380 #endif 2381 #ifdef HAVE_LIBCAPSTONE_SUPPORT 2382 annotation_options__add_disassembler(options, PERF_DISASM_CAPSTONE); 2383 #endif 2384 annotation_options__add_disassembler(options, PERF_DISASM_OBJDUMP); 2385 } 2386 2387 void annotation_config__init(void) 2388 { 2389 perf_config(annotation__config, &annotate_opts); 2390 annotation_options__default_init_disassemblers(&annotate_opts); 2391 } 2392 2393 static unsigned int parse_percent_type(char *str1, char *str2) 2394 { 2395 unsigned int type = (unsigned int) -1; 2396 2397 if (!strcmp("period", str1)) { 2398 if (!strcmp("local", str2)) 2399 type = PERCENT_PERIOD_LOCAL; 2400 else if (!strcmp("global", str2)) 2401 type = PERCENT_PERIOD_GLOBAL; 2402 } 2403 2404 if (!strcmp("hits", str1)) { 2405 if (!strcmp("local", str2)) 2406 type = PERCENT_HITS_LOCAL; 2407 else if (!strcmp("global", str2)) 2408 type = PERCENT_HITS_GLOBAL; 2409 } 2410 2411 return type; 2412 } 2413 2414 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str, 2415 int unset __maybe_unused) 2416 { 2417 unsigned int type; 2418 char *str1, *str2; 2419 int err = -1; 2420 2421 str1 = strdup(_str); 2422 if (!str1) 2423 return -ENOMEM; 2424 2425 str2 = strchr(str1, '-'); 2426 if (!str2) 2427 goto out; 2428 2429 *str2++ = 0; 2430 2431 type = parse_percent_type(str1, str2); 2432 if (type == (unsigned int) -1) 2433 type = parse_percent_type(str2, str1); 2434 if (type != (unsigned int) -1) { 2435 annotate_opts.percent_type = type; 2436 err = 0; 2437 } 2438 2439 out: 2440 free(str1); 2441 return err; 2442 } 2443 2444 int annotate_check_args(void) 2445 { 2446 struct annotation_options *args = &annotate_opts; 2447 2448 if (args->prefix_strip && !args->prefix) { 2449 pr_err("--prefix-strip requires --prefix\n"); 2450 return -1; 2451 } 2452 return 0; 2453 } 2454 2455 /* 2456 * Get register number and access offset from the given instruction. 2457 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs 2458 * to revisit the format when it handles different architecture. 2459 * Fills @reg and @offset when return 0. 2460 */ 2461 static int extract_reg_offset(struct arch *arch, const char *str, 2462 struct annotated_op_loc *op_loc) 2463 { 2464 char *p; 2465 char *regname; 2466 2467 if (arch->objdump.register_char == 0) 2468 return -1; 2469 2470 /* 2471 * It should start from offset, but it's possible to skip 0 2472 * in the asm. So 0(%rax) should be same as (%rax). 2473 * 2474 * However, it also start with a segment select register like 2475 * %gs:0x18(%rbx). In that case it should skip the part. 2476 */ 2477 if (*str == arch->objdump.register_char) { 2478 if (arch__is(arch, "x86")) { 2479 /* FIXME: Handle other segment registers */ 2480 if (!strncmp(str, "%gs:", 4)) 2481 op_loc->segment = INSN_SEG_X86_GS; 2482 } 2483 2484 while (*str && !isdigit(*str) && 2485 *str != arch->objdump.memory_ref_char) 2486 str++; 2487 } 2488 2489 op_loc->offset = strtol(str, &p, 0); 2490 2491 p = strchr(p, arch->objdump.register_char); 2492 if (p == NULL) 2493 return -1; 2494 2495 regname = strdup(p); 2496 if (regname == NULL) 2497 return -1; 2498 2499 op_loc->reg1 = get_dwarf_regnum(regname, arch->e_machine, arch->e_flags); 2500 free(regname); 2501 2502 /* Get the second register */ 2503 if (op_loc->multi_regs) { 2504 p = strchr(p + 1, arch->objdump.register_char); 2505 if (p == NULL) 2506 return -1; 2507 2508 regname = strdup(p); 2509 if (regname == NULL) 2510 return -1; 2511 2512 op_loc->reg2 = get_dwarf_regnum(regname, arch->e_machine, arch->e_flags); 2513 free(regname); 2514 } 2515 return 0; 2516 } 2517 2518 /** 2519 * annotate_get_insn_location - Get location of instruction 2520 * @arch: the architecture info 2521 * @dl: the target instruction 2522 * @loc: a buffer to save the data 2523 * 2524 * Get detailed location info (register and offset) in the instruction. 2525 * It needs both source and target operand and whether it accesses a 2526 * memory location. The offset field is meaningful only when the 2527 * corresponding mem flag is set. The reg2 field is meaningful only 2528 * when multi_regs flag is set. 2529 * 2530 * Some examples on x86: 2531 * 2532 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0 2533 * # dst_reg1 = rcx, dst_mem = 0 2534 * 2535 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0 2536 * # dst_reg1 = r8, dst_mem = 0 2537 * 2538 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0 2539 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1 2540 * # dst_multi_regs = 1, dst_offset = 8 2541 */ 2542 int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, 2543 struct annotated_insn_loc *loc) 2544 { 2545 struct ins_operands *ops; 2546 struct annotated_op_loc *op_loc; 2547 int i; 2548 2549 if (ins__is_lock(&dl->ins)) 2550 ops = dl->ops.locked.ops; 2551 else 2552 ops = &dl->ops; 2553 2554 if (ops == NULL) 2555 return -1; 2556 2557 memset(loc, 0, sizeof(*loc)); 2558 2559 for_each_insn_op_loc(loc, i, op_loc) { 2560 const char *insn_str = ops->source.raw; 2561 bool multi_regs = ops->source.multi_regs; 2562 bool mem_ref = ops->source.mem_ref; 2563 2564 if (i == INSN_OP_TARGET) { 2565 insn_str = ops->target.raw; 2566 multi_regs = ops->target.multi_regs; 2567 mem_ref = ops->target.mem_ref; 2568 } 2569 2570 /* Invalidate the register by default */ 2571 op_loc->reg1 = -1; 2572 op_loc->reg2 = -1; 2573 2574 if (insn_str == NULL) { 2575 if (!arch__is(arch, "powerpc")) 2576 continue; 2577 } 2578 2579 /* 2580 * For powerpc, call get_powerpc_regs function which extracts the 2581 * required fields for op_loc, ie reg1, reg2, offset from the 2582 * raw instruction. 2583 */ 2584 if (arch__is(arch, "powerpc")) { 2585 op_loc->mem_ref = mem_ref; 2586 op_loc->multi_regs = multi_regs; 2587 get_powerpc_regs(dl->raw.raw_insn, !i, op_loc); 2588 } else if (strchr(insn_str, arch->objdump.memory_ref_char)) { 2589 op_loc->mem_ref = true; 2590 op_loc->multi_regs = multi_regs; 2591 extract_reg_offset(arch, insn_str, op_loc); 2592 } else { 2593 char *s, *p = NULL; 2594 2595 if (arch__is(arch, "x86")) { 2596 /* FIXME: Handle other segment registers */ 2597 if (!strncmp(insn_str, "%gs:", 4)) { 2598 op_loc->segment = INSN_SEG_X86_GS; 2599 op_loc->offset = strtol(insn_str + 4, 2600 &p, 0); 2601 if (p && p != insn_str + 4) 2602 op_loc->imm = true; 2603 continue; 2604 } 2605 } 2606 2607 s = strdup(insn_str); 2608 if (s == NULL) 2609 return -1; 2610 2611 if (*s == arch->objdump.register_char) 2612 op_loc->reg1 = get_dwarf_regnum(s, arch->e_machine, arch->e_flags); 2613 else if (*s == arch->objdump.imm_char) { 2614 op_loc->offset = strtol(s + 1, &p, 0); 2615 if (p && p != s + 1) 2616 op_loc->imm = true; 2617 } 2618 free(s); 2619 } 2620 } 2621 2622 return 0; 2623 } 2624 2625 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip, 2626 bool allow_update) 2627 { 2628 struct disasm_line *dl; 2629 struct annotation *notes; 2630 2631 notes = symbol__annotation(sym); 2632 2633 list_for_each_entry(dl, ¬es->src->source, al.node) { 2634 if (dl->al.offset == -1) 2635 continue; 2636 2637 if (sym->start + dl->al.offset == ip) { 2638 /* 2639 * llvm-objdump places "lock" in a separate line and 2640 * in that case, we want to get the next line. 2641 */ 2642 if (ins__is_lock(&dl->ins) && 2643 *dl->ops.raw == '\0' && allow_update) { 2644 ip++; 2645 continue; 2646 } 2647 return dl; 2648 } 2649 } 2650 return NULL; 2651 } 2652 2653 static struct annotated_item_stat *annotate_data_stat(struct list_head *head, 2654 const char *name) 2655 { 2656 struct annotated_item_stat *istat; 2657 2658 list_for_each_entry(istat, head, list) { 2659 if (!strcmp(istat->name, name)) 2660 return istat; 2661 } 2662 2663 istat = zalloc(sizeof(*istat)); 2664 if (istat == NULL) 2665 return NULL; 2666 2667 istat->name = strdup(name); 2668 if ((istat->name == NULL) || (!strlen(istat->name))) { 2669 free(istat); 2670 return NULL; 2671 } 2672 2673 list_add_tail(&istat->list, head); 2674 return istat; 2675 } 2676 2677 static bool is_stack_operation(struct arch *arch, struct disasm_line *dl) 2678 { 2679 if (arch__is(arch, "x86")) { 2680 if (!strncmp(dl->ins.name, "push", 4) || 2681 !strncmp(dl->ins.name, "pop", 3) || 2682 !strncmp(dl->ins.name, "call", 4) || 2683 !strncmp(dl->ins.name, "ret", 3)) 2684 return true; 2685 } 2686 2687 return false; 2688 } 2689 2690 static bool is_stack_canary(struct arch *arch, struct annotated_op_loc *loc) 2691 { 2692 /* On x86_64, %gs:40 is used for stack canary */ 2693 if (arch__is(arch, "x86")) { 2694 if (loc->segment == INSN_SEG_X86_GS && loc->imm && 2695 loc->offset == 40) 2696 return true; 2697 } 2698 2699 return false; 2700 } 2701 2702 static struct disasm_line * 2703 annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr) 2704 { 2705 struct list_head *sources = ¬es->src->source; 2706 struct disasm_line *prev; 2707 2708 if (curr == list_first_entry(sources, struct disasm_line, al.node)) 2709 return NULL; 2710 2711 prev = list_prev_entry(curr, al.node); 2712 while (prev->al.offset == -1 && 2713 prev != list_first_entry(sources, struct disasm_line, al.node)) 2714 prev = list_prev_entry(prev, al.node); 2715 2716 if (prev->al.offset == -1) 2717 return NULL; 2718 2719 return prev; 2720 } 2721 2722 static struct disasm_line * 2723 annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr) 2724 { 2725 struct list_head *sources = ¬es->src->source; 2726 struct disasm_line *next; 2727 2728 if (curr == list_last_entry(sources, struct disasm_line, al.node)) 2729 return NULL; 2730 2731 next = list_next_entry(curr, al.node); 2732 while (next->al.offset == -1 && 2733 next != list_last_entry(sources, struct disasm_line, al.node)) 2734 next = list_next_entry(next, al.node); 2735 2736 if (next->al.offset == -1) 2737 return NULL; 2738 2739 return next; 2740 } 2741 2742 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset, 2743 struct disasm_line *dl) 2744 { 2745 struct annotation *notes; 2746 struct disasm_line *next; 2747 u64 addr; 2748 2749 notes = symbol__annotation(ms->sym); 2750 /* 2751 * PC-relative addressing starts from the next instruction address 2752 * But the IP is for the current instruction. Since disasm_line 2753 * doesn't have the instruction size, calculate it using the next 2754 * disasm_line. If it's the last one, we can use symbol's end 2755 * address directly. 2756 */ 2757 next = annotation__next_asm_line(notes, dl); 2758 if (next == NULL) 2759 addr = ms->sym->end + offset; 2760 else 2761 addr = ip + (next->al.offset - dl->al.offset) + offset; 2762 2763 return map__rip_2objdump(ms->map, addr); 2764 } 2765 2766 static struct debuginfo_cache { 2767 struct dso *dso; 2768 struct debuginfo *dbg; 2769 } di_cache; 2770 2771 void debuginfo_cache__delete(void) 2772 { 2773 dso__put(di_cache.dso); 2774 di_cache.dso = NULL; 2775 2776 debuginfo__delete(di_cache.dbg); 2777 di_cache.dbg = NULL; 2778 } 2779 2780 static struct annotated_data_type * 2781 __hist_entry__get_data_type(struct hist_entry *he, struct arch *arch, 2782 struct debuginfo *dbg, struct disasm_line *dl, 2783 int *type_offset) 2784 { 2785 struct map_symbol *ms = &he->ms; 2786 struct annotated_insn_loc loc; 2787 struct annotated_op_loc *op_loc; 2788 struct annotated_data_type *mem_type; 2789 struct annotated_item_stat *istat; 2790 int i; 2791 2792 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); 2793 if (istat == NULL) { 2794 ann_data_stat.no_insn++; 2795 return NO_TYPE; 2796 } 2797 2798 if (annotate_get_insn_location(arch, dl, &loc) < 0) { 2799 ann_data_stat.no_insn_ops++; 2800 istat->bad++; 2801 return NO_TYPE; 2802 } 2803 2804 if (is_stack_operation(arch, dl)) { 2805 istat->good++; 2806 *type_offset = 0; 2807 return &stackop_type; 2808 } 2809 2810 for_each_insn_op_loc(&loc, i, op_loc) { 2811 struct data_loc_info dloc = { 2812 .arch = arch, 2813 .thread = he->thread, 2814 .ms = ms, 2815 .ip = ms->sym->start + dl->al.offset, 2816 .cpumode = he->cpumode, 2817 .op = op_loc, 2818 .di = dbg, 2819 }; 2820 2821 if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE) 2822 continue; 2823 2824 /* PC-relative addressing */ 2825 if (op_loc->reg1 == DWARF_REG_PC) { 2826 dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip, 2827 op_loc->offset, dl); 2828 } 2829 2830 /* This CPU access in kernel - pretend PC-relative addressing */ 2831 if (dso__kernel(map__dso(ms->map)) && arch__is(arch, "x86") && 2832 op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) { 2833 dloc.var_addr = op_loc->offset; 2834 op_loc->reg1 = DWARF_REG_PC; 2835 } 2836 2837 mem_type = find_data_type(&dloc); 2838 2839 if (mem_type == NULL && is_stack_canary(arch, op_loc)) { 2840 istat->good++; 2841 *type_offset = 0; 2842 return &canary_type; 2843 } 2844 2845 if (mem_type) 2846 istat->good++; 2847 else 2848 istat->bad++; 2849 2850 if (symbol_conf.annotate_data_sample) { 2851 struct evsel *evsel = hists_to_evsel(he->hists); 2852 2853 annotated_data_type__update_samples(mem_type, evsel, 2854 dloc.type_offset, 2855 he->stat.nr_events, 2856 he->stat.period); 2857 } 2858 *type_offset = dloc.type_offset; 2859 return mem_type ?: NO_TYPE; 2860 } 2861 2862 /* retry with a fused instruction */ 2863 return NULL; 2864 } 2865 2866 /** 2867 * hist_entry__get_data_type - find data type for given hist entry 2868 * @he: hist entry 2869 * 2870 * This function first annotates the instruction at @he->ip and extracts 2871 * register and offset info from it. Then it searches the DWARF debug 2872 * info to get a variable and type information using the address, register, 2873 * and offset. 2874 */ 2875 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) 2876 { 2877 struct map_symbol *ms = &he->ms; 2878 struct evsel *evsel = hists_to_evsel(he->hists); 2879 struct arch *arch; 2880 struct disasm_line *dl; 2881 struct annotated_data_type *mem_type; 2882 struct annotated_item_stat *istat; 2883 u64 ip = he->ip; 2884 2885 ann_data_stat.total++; 2886 2887 if (ms->map == NULL || ms->sym == NULL) { 2888 ann_data_stat.no_sym++; 2889 return NULL; 2890 } 2891 2892 if (!symbol_conf.init_annotation) { 2893 ann_data_stat.no_sym++; 2894 return NULL; 2895 } 2896 2897 /* 2898 * di_cache holds a pair of values, but code below assumes 2899 * di_cache.dso can be compared/updated and di_cache.dbg can be 2900 * read/updated independently from each other. That assumption only 2901 * holds in single threaded code. 2902 */ 2903 assert(perf_singlethreaded); 2904 2905 if (map__dso(ms->map) != di_cache.dso) { 2906 dso__put(di_cache.dso); 2907 di_cache.dso = dso__get(map__dso(ms->map)); 2908 2909 debuginfo__delete(di_cache.dbg); 2910 di_cache.dbg = dso__debuginfo(di_cache.dso); 2911 } 2912 2913 if (di_cache.dbg == NULL) { 2914 ann_data_stat.no_dbginfo++; 2915 return NULL; 2916 } 2917 2918 /* Make sure it has the disasm of the function */ 2919 if (symbol__annotate(ms, evsel, &arch) < 0) { 2920 ann_data_stat.no_insn++; 2921 return NULL; 2922 } 2923 2924 /* 2925 * Get a disasm to extract the location from the insn. 2926 * This is too slow... 2927 */ 2928 dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true); 2929 if (dl == NULL) { 2930 ann_data_stat.no_insn++; 2931 return NULL; 2932 } 2933 2934 retry: 2935 mem_type = __hist_entry__get_data_type(he, arch, di_cache.dbg, dl, 2936 &he->mem_type_off); 2937 if (mem_type) 2938 return mem_type == NO_TYPE ? NULL : mem_type; 2939 2940 /* 2941 * Some instructions can be fused and the actual memory access came 2942 * from the previous instruction. 2943 */ 2944 if (dl->al.offset > 0) { 2945 struct annotation *notes; 2946 struct disasm_line *prev_dl; 2947 2948 notes = symbol__annotation(ms->sym); 2949 prev_dl = annotation__prev_asm_line(notes, dl); 2950 2951 if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) { 2952 dl = prev_dl; 2953 goto retry; 2954 } 2955 } 2956 2957 ann_data_stat.no_mem_ops++; 2958 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); 2959 if (istat) 2960 istat->bad++; 2961 return NULL; 2962 } 2963 2964 /* Basic block traversal (BFS) data structure */ 2965 struct basic_block_data { 2966 struct list_head queue; 2967 struct list_head visited; 2968 }; 2969 2970 /* 2971 * During the traversal, it needs to know the parent block where the current 2972 * block block started from. Note that single basic block can be parent of 2973 * two child basic blocks (in case of condition jump). 2974 */ 2975 struct basic_block_link { 2976 struct list_head node; 2977 struct basic_block_link *parent; 2978 struct annotated_basic_block *bb; 2979 }; 2980 2981 /* Check any of basic block in the list already has the offset */ 2982 static bool basic_block_has_offset(struct list_head *head, s64 offset) 2983 { 2984 struct basic_block_link *link; 2985 2986 list_for_each_entry(link, head, node) { 2987 s64 begin_offset = link->bb->begin->al.offset; 2988 s64 end_offset = link->bb->end->al.offset; 2989 2990 if (begin_offset <= offset && offset <= end_offset) 2991 return true; 2992 } 2993 return false; 2994 } 2995 2996 static bool is_new_basic_block(struct basic_block_data *bb_data, 2997 struct disasm_line *dl) 2998 { 2999 s64 offset = dl->al.offset; 3000 3001 if (basic_block_has_offset(&bb_data->visited, offset)) 3002 return false; 3003 if (basic_block_has_offset(&bb_data->queue, offset)) 3004 return false; 3005 return true; 3006 } 3007 3008 /* Add a basic block starting from dl and link it to the parent */ 3009 static int add_basic_block(struct basic_block_data *bb_data, 3010 struct basic_block_link *parent, 3011 struct disasm_line *dl) 3012 { 3013 struct annotated_basic_block *bb; 3014 struct basic_block_link *link; 3015 3016 if (dl == NULL) 3017 return -1; 3018 3019 if (!is_new_basic_block(bb_data, dl)) 3020 return 0; 3021 3022 bb = zalloc(sizeof(*bb)); 3023 if (bb == NULL) 3024 return -1; 3025 3026 bb->begin = dl; 3027 bb->end = dl; 3028 INIT_LIST_HEAD(&bb->list); 3029 3030 link = malloc(sizeof(*link)); 3031 if (link == NULL) { 3032 free(bb); 3033 return -1; 3034 } 3035 3036 link->bb = bb; 3037 link->parent = parent; 3038 list_add_tail(&link->node, &bb_data->queue); 3039 return 0; 3040 } 3041 3042 /* Returns true when it finds the target in the current basic block */ 3043 static bool process_basic_block(struct basic_block_data *bb_data, 3044 struct basic_block_link *link, 3045 struct symbol *sym, u64 target) 3046 { 3047 struct disasm_line *dl, *next_dl, *last_dl; 3048 struct annotation *notes = symbol__annotation(sym); 3049 bool found = false; 3050 3051 dl = link->bb->begin; 3052 /* Check if it's already visited */ 3053 if (basic_block_has_offset(&bb_data->visited, dl->al.offset)) 3054 return false; 3055 3056 last_dl = list_last_entry(¬es->src->source, 3057 struct disasm_line, al.node); 3058 if (last_dl->al.offset == -1) 3059 last_dl = annotation__prev_asm_line(notes, last_dl); 3060 3061 if (last_dl == NULL) 3062 return false; 3063 3064 list_for_each_entry_from(dl, ¬es->src->source, al.node) { 3065 /* Skip comment or debug info line */ 3066 if (dl->al.offset == -1) 3067 continue; 3068 /* Found the target instruction */ 3069 if (sym->start + dl->al.offset == target) { 3070 found = true; 3071 break; 3072 } 3073 /* End of the function, finish the block */ 3074 if (dl == last_dl) 3075 break; 3076 /* 'return' instruction finishes the block */ 3077 if (ins__is_ret(&dl->ins)) 3078 break; 3079 /* normal instructions are part of the basic block */ 3080 if (!ins__is_jump(&dl->ins)) 3081 continue; 3082 /* jump to a different function, tail call or return */ 3083 if (dl->ops.target.outside) 3084 break; 3085 /* jump instruction creates new basic block(s) */ 3086 next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset, 3087 /*allow_update=*/false); 3088 if (next_dl) 3089 add_basic_block(bb_data, link, next_dl); 3090 3091 /* 3092 * FIXME: determine conditional jumps properly. 3093 * Conditional jumps create another basic block with the 3094 * next disasm line. 3095 */ 3096 if (!strstr(dl->ins.name, "jmp")) { 3097 next_dl = annotation__next_asm_line(notes, dl); 3098 if (next_dl) 3099 add_basic_block(bb_data, link, next_dl); 3100 } 3101 break; 3102 3103 } 3104 link->bb->end = dl; 3105 return found; 3106 } 3107 3108 /* 3109 * It founds a target basic block, build a proper linked list of basic blocks 3110 * by following the link recursively. 3111 */ 3112 static void link_found_basic_blocks(struct basic_block_link *link, 3113 struct list_head *head) 3114 { 3115 while (link) { 3116 struct basic_block_link *parent = link->parent; 3117 3118 list_move(&link->bb->list, head); 3119 list_del(&link->node); 3120 free(link); 3121 3122 link = parent; 3123 } 3124 } 3125 3126 static void delete_basic_blocks(struct basic_block_data *bb_data) 3127 { 3128 struct basic_block_link *link, *tmp; 3129 3130 list_for_each_entry_safe(link, tmp, &bb_data->queue, node) { 3131 list_del(&link->node); 3132 zfree(&link->bb); 3133 free(link); 3134 } 3135 3136 list_for_each_entry_safe(link, tmp, &bb_data->visited, node) { 3137 list_del(&link->node); 3138 zfree(&link->bb); 3139 free(link); 3140 } 3141 } 3142 3143 /** 3144 * annotate_get_basic_blocks - Get basic blocks for given address range 3145 * @sym: symbol to annotate 3146 * @src: source address 3147 * @dst: destination address 3148 * @head: list head to save basic blocks 3149 * 3150 * This function traverses disasm_lines from @src to @dst and save them in a 3151 * list of annotated_basic_block to @head. It uses BFS to find the shortest 3152 * path between two. The basic_block_link is to maintain parent links so 3153 * that it can build a list of blocks from the start. 3154 */ 3155 int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst, 3156 struct list_head *head) 3157 { 3158 struct basic_block_data bb_data = { 3159 .queue = LIST_HEAD_INIT(bb_data.queue), 3160 .visited = LIST_HEAD_INIT(bb_data.visited), 3161 }; 3162 struct basic_block_link *link; 3163 struct disasm_line *dl; 3164 int ret = -1; 3165 3166 dl = find_disasm_line(sym, src, /*allow_update=*/false); 3167 if (dl == NULL) 3168 return -1; 3169 3170 if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0) 3171 return -1; 3172 3173 /* Find shortest path from src to dst using BFS */ 3174 while (!list_empty(&bb_data.queue)) { 3175 link = list_first_entry(&bb_data.queue, struct basic_block_link, node); 3176 3177 if (process_basic_block(&bb_data, link, sym, dst)) { 3178 link_found_basic_blocks(link, head); 3179 ret = 0; 3180 break; 3181 } 3182 list_move(&link->node, &bb_data.visited); 3183 } 3184 delete_basic_blocks(&bb_data); 3185 return ret; 3186 } 3187