1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-annotate.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <stdlib.h> 12 #include "util.h" // hex_width() 13 #include "ui/ui.h" 14 #include "sort.h" 15 #include "build-id.h" 16 #include "color.h" 17 #include "config.h" 18 #include "disasm.h" 19 #include "dso.h" 20 #include "env.h" 21 #include "map.h" 22 #include "maps.h" 23 #include "symbol.h" 24 #include "srcline.h" 25 #include "units.h" 26 #include "debug.h" 27 #include "debuginfo.h" 28 #include "annotate.h" 29 #include "annotate-data.h" 30 #include "evsel.h" 31 #include "evlist.h" 32 #include "bpf-event.h" 33 #include "bpf-utils.h" 34 #include "block-range.h" 35 #include "string2.h" 36 #include "dwarf-regs.h" 37 #include "util/event.h" 38 #include "util/sharded_mutex.h" 39 #include "arch/common.h" 40 #include "namespaces.h" 41 #include "thread.h" 42 #include "hashmap.h" 43 #include "strbuf.h" 44 #include <regex.h> 45 #include <linux/bitops.h> 46 #include <linux/err.h> 47 #include <linux/kernel.h> 48 #include <linux/string.h> 49 #include <linux/zalloc.h> 50 #include <subcmd/parse-options.h> 51 #include <subcmd/run-command.h> 52 #include <math.h> 53 54 /* FIXME: For the HE_COLORSET */ 55 #include "ui/browser.h" 56 57 /* 58 * FIXME: Using the same values as slang.h, 59 * but that header may not be available everywhere 60 */ 61 #define LARROW_CHAR ((unsigned char)',') 62 #define RARROW_CHAR ((unsigned char)'+') 63 #define DARROW_CHAR ((unsigned char)'.') 64 #define UARROW_CHAR ((unsigned char)'-') 65 66 #include <linux/ctype.h> 67 68 /* global annotation options */ 69 struct annotation_options annotate_opts; 70 71 /* Data type collection debug statistics */ 72 struct annotated_data_stat ann_data_stat; 73 LIST_HEAD(ann_insn_stat); 74 75 /* Pseudo data types */ 76 struct annotated_data_type stackop_type = { 77 .self = { 78 .type_name = (char *)"(stack operation)", 79 .children = LIST_HEAD_INIT(stackop_type.self.children), 80 }, 81 }; 82 83 struct annotated_data_type canary_type = { 84 .self = { 85 .type_name = (char *)"(stack canary)", 86 .children = LIST_HEAD_INIT(canary_type.self.children), 87 }, 88 }; 89 90 #define NO_TYPE ((struct annotated_data_type *)-1UL) 91 92 /* symbol histogram: key = offset << 16 | evsel->core.idx */ 93 static size_t sym_hist_hash(long key, void *ctx __maybe_unused) 94 { 95 return (key >> 16) + (key & 0xffff); 96 } 97 98 static bool sym_hist_equal(long key1, long key2, void *ctx __maybe_unused) 99 { 100 return key1 == key2; 101 } 102 103 static struct annotated_source *annotated_source__new(void) 104 { 105 struct annotated_source *src = zalloc(sizeof(*src)); 106 107 if (src != NULL) 108 INIT_LIST_HEAD(&src->source); 109 110 return src; 111 } 112 113 static __maybe_unused void annotated_source__delete(struct annotated_source *src) 114 { 115 struct hashmap_entry *cur; 116 size_t bkt; 117 118 if (src == NULL) 119 return; 120 121 if (src->samples) { 122 hashmap__for_each_entry(src->samples, cur, bkt) 123 zfree(&cur->pvalue); 124 hashmap__free(src->samples); 125 } 126 zfree(&src->histograms); 127 free(src); 128 } 129 130 static int annotated_source__alloc_histograms(struct annotated_source *src, 131 int nr_hists) 132 { 133 src->nr_histograms = nr_hists; 134 src->histograms = calloc(nr_hists, sizeof(*src->histograms)); 135 136 if (src->histograms == NULL) 137 return -1; 138 139 src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL); 140 if (IS_ERR(src->samples)) { 141 zfree(&src->histograms); 142 src->samples = NULL; 143 } 144 145 return src->histograms ? 0 : -1; 146 } 147 148 void symbol__annotate_zero_histograms(struct symbol *sym) 149 { 150 struct annotation *notes = symbol__annotation(sym); 151 152 annotation__lock(notes); 153 if (notes->src != NULL) { 154 memset(notes->src->histograms, 0, 155 notes->src->nr_histograms * sizeof(*notes->src->histograms)); 156 hashmap__clear(notes->src->samples); 157 } 158 if (notes->branch && notes->branch->cycles_hist) { 159 memset(notes->branch->cycles_hist, 0, 160 symbol__size(sym) * sizeof(struct cyc_hist)); 161 } 162 annotation__unlock(notes); 163 } 164 165 static int __symbol__account_cycles(struct cyc_hist *ch, 166 u64 start, 167 unsigned offset, unsigned cycles, 168 unsigned have_start) 169 { 170 /* 171 * For now we can only account one basic block per 172 * final jump. But multiple could be overlapping. 173 * Always account the longest one. So when 174 * a shorter one has been already seen throw it away. 175 * 176 * We separately always account the full cycles. 177 */ 178 ch[offset].num_aggr++; 179 ch[offset].cycles_aggr += cycles; 180 181 if (cycles > ch[offset].cycles_max) 182 ch[offset].cycles_max = cycles; 183 184 if (ch[offset].cycles_min) { 185 if (cycles && cycles < ch[offset].cycles_min) 186 ch[offset].cycles_min = cycles; 187 } else 188 ch[offset].cycles_min = cycles; 189 190 if (!have_start && ch[offset].have_start) 191 return 0; 192 if (ch[offset].num) { 193 if (have_start && (!ch[offset].have_start || 194 ch[offset].start > start)) { 195 ch[offset].have_start = 0; 196 ch[offset].cycles = 0; 197 ch[offset].num = 0; 198 if (ch[offset].reset < 0xffff) 199 ch[offset].reset++; 200 } else if (have_start && 201 ch[offset].start < start) 202 return 0; 203 } 204 205 if (ch[offset].num < NUM_SPARKS) 206 ch[offset].cycles_spark[ch[offset].num] = cycles; 207 208 ch[offset].have_start = have_start; 209 ch[offset].start = start; 210 ch[offset].cycles += cycles; 211 ch[offset].num++; 212 return 0; 213 } 214 215 static int __symbol__inc_addr_samples(struct map_symbol *ms, 216 struct annotated_source *src, struct evsel *evsel, u64 addr, 217 struct perf_sample *sample) 218 { 219 struct symbol *sym = ms->sym; 220 long hash_key; 221 u64 offset; 222 struct sym_hist *h; 223 struct sym_hist_entry *entry; 224 225 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr)); 226 227 if ((addr < sym->start || addr >= sym->end) && 228 (addr != sym->end || sym->start != sym->end)) { 229 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n", 230 __func__, __LINE__, sym->name, sym->start, addr, sym->end); 231 return -ERANGE; 232 } 233 234 offset = addr - sym->start; 235 h = annotated_source__histogram(src, evsel); 236 if (h == NULL) { 237 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n", 238 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC); 239 return -ENOMEM; 240 } 241 242 hash_key = offset << 16 | evsel->core.idx; 243 if (!hashmap__find(src->samples, hash_key, &entry)) { 244 entry = zalloc(sizeof(*entry)); 245 if (entry == NULL) 246 return -ENOMEM; 247 248 if (hashmap__add(src->samples, hash_key, entry) < 0) 249 return -ENOMEM; 250 } 251 252 h->nr_samples++; 253 h->period += sample->period; 254 entry->nr_samples++; 255 entry->period += sample->period; 256 257 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 258 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n", 259 sym->start, sym->name, addr, addr - sym->start, evsel->core.idx, 260 entry->nr_samples, entry->period); 261 return 0; 262 } 263 264 struct annotated_branch *annotation__get_branch(struct annotation *notes) 265 { 266 if (notes == NULL) 267 return NULL; 268 269 if (notes->branch == NULL) 270 notes->branch = zalloc(sizeof(*notes->branch)); 271 272 return notes->branch; 273 } 274 275 static struct annotated_branch *symbol__find_branch_hist(struct symbol *sym, 276 unsigned int br_cntr_nr) 277 { 278 struct annotation *notes = symbol__annotation(sym); 279 struct annotated_branch *branch; 280 const size_t size = symbol__size(sym); 281 282 branch = annotation__get_branch(notes); 283 if (branch == NULL) 284 return NULL; 285 286 if (branch->cycles_hist == NULL) { 287 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist)); 288 if (!branch->cycles_hist) 289 return NULL; 290 } 291 292 if (br_cntr_nr && branch->br_cntr == NULL) { 293 branch->br_cntr = calloc(br_cntr_nr * size, sizeof(u64)); 294 if (!branch->br_cntr) 295 return NULL; 296 } 297 298 return branch; 299 } 300 301 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists) 302 { 303 struct annotation *notes = symbol__annotation(sym); 304 305 if (notes->src == NULL) { 306 notes->src = annotated_source__new(); 307 if (notes->src == NULL) 308 return NULL; 309 goto alloc_histograms; 310 } 311 312 if (notes->src->histograms == NULL) { 313 alloc_histograms: 314 annotated_source__alloc_histograms(notes->src, nr_hists); 315 } 316 317 return notes->src; 318 } 319 320 static int symbol__inc_addr_samples(struct map_symbol *ms, 321 struct evsel *evsel, u64 addr, 322 struct perf_sample *sample) 323 { 324 struct symbol *sym = ms->sym; 325 struct annotated_source *src; 326 327 if (sym == NULL) 328 return 0; 329 src = symbol__hists(sym, evsel->evlist->core.nr_entries); 330 return src ? __symbol__inc_addr_samples(ms, src, evsel, addr, sample) : 0; 331 } 332 333 static int symbol__account_br_cntr(struct annotated_branch *branch, 334 struct evsel *evsel, 335 unsigned offset, 336 u64 br_cntr) 337 { 338 unsigned int br_cntr_nr = evsel__leader(evsel)->br_cntr_nr; 339 unsigned int base = evsel__leader(evsel)->br_cntr_idx; 340 unsigned int off = offset * evsel->evlist->nr_br_cntr; 341 u64 *branch_br_cntr = branch->br_cntr; 342 unsigned int i, mask, width; 343 344 if (!br_cntr || !branch_br_cntr) 345 return 0; 346 347 perf_env__find_br_cntr_info(evsel__env(evsel), NULL, &width); 348 mask = (1L << width) - 1; 349 for (i = 0; i < br_cntr_nr; i++) { 350 u64 cntr = (br_cntr >> i * width) & mask; 351 352 branch_br_cntr[off + i + base] += cntr; 353 if (cntr == mask) 354 branch_br_cntr[off + i + base] |= ANNOTATION__BR_CNTR_SATURATED_FLAG; 355 } 356 357 return 0; 358 } 359 360 static int symbol__account_cycles(u64 addr, u64 start, struct symbol *sym, 361 unsigned cycles, struct evsel *evsel, 362 u64 br_cntr) 363 { 364 struct annotated_branch *branch; 365 unsigned offset; 366 int ret; 367 368 if (sym == NULL) 369 return 0; 370 branch = symbol__find_branch_hist(sym, evsel->evlist->nr_br_cntr); 371 if (!branch) 372 return -ENOMEM; 373 if (addr < sym->start || addr >= sym->end) 374 return -ERANGE; 375 376 if (start) { 377 if (start < sym->start || start >= sym->end) 378 return -ERANGE; 379 if (start >= addr) 380 start = 0; 381 } 382 offset = addr - sym->start; 383 ret = __symbol__account_cycles(branch->cycles_hist, 384 start ? start - sym->start : 0, 385 offset, cycles, 386 !!start); 387 388 if (ret) 389 return ret; 390 391 return symbol__account_br_cntr(branch, evsel, offset, br_cntr); 392 } 393 394 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, 395 struct addr_map_symbol *start, 396 unsigned cycles, 397 struct evsel *evsel, 398 u64 br_cntr) 399 { 400 u64 saddr = 0; 401 int err; 402 403 if (!cycles) 404 return 0; 405 406 /* 407 * Only set start when IPC can be computed. We can only 408 * compute it when the basic block is completely in a single 409 * function. 410 * Special case the case when the jump is elsewhere, but 411 * it starts on the function start. 412 */ 413 if (start && 414 (start->ms.sym == ams->ms.sym || 415 (ams->ms.sym && 416 start->addr == ams->ms.sym->start + map__start(ams->ms.map)))) 417 saddr = start->al_addr; 418 if (saddr == 0) 419 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n", 420 ams->addr, 421 start ? start->addr : 0, 422 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0, 423 saddr); 424 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles, evsel, br_cntr); 425 if (err) 426 pr_debug2("account_cycles failed %d\n", err); 427 return err; 428 } 429 430 struct annotation_line *annotated_source__get_line(struct annotated_source *src, 431 s64 offset) 432 { 433 struct annotation_line *al; 434 435 list_for_each_entry(al, &src->source, node) { 436 if (al->offset == offset) 437 return al; 438 } 439 return NULL; 440 } 441 442 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end) 443 { 444 struct annotation_line *al; 445 unsigned n_insn = 0; 446 447 al = annotated_source__get_line(notes->src, start); 448 if (al == NULL) 449 return 0; 450 451 list_for_each_entry_from(al, ¬es->src->source, node) { 452 if (al->offset == -1) 453 continue; 454 if ((u64)al->offset > end) 455 break; 456 n_insn++; 457 } 458 return n_insn; 459 } 460 461 static void annotated_branch__delete(struct annotated_branch *branch) 462 { 463 if (branch) { 464 zfree(&branch->cycles_hist); 465 free(branch->br_cntr); 466 free(branch); 467 } 468 } 469 470 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) 471 { 472 unsigned n_insn; 473 unsigned int cover_insn = 0; 474 475 n_insn = annotation__count_insn(notes, start, end); 476 if (n_insn && ch->num && ch->cycles) { 477 struct annotation_line *al; 478 struct annotated_branch *branch; 479 float ipc = n_insn / ((double)ch->cycles / (double)ch->num); 480 481 /* Hide data when there are too many overlaps. */ 482 if (ch->reset >= 0x7fff) 483 return; 484 485 al = annotated_source__get_line(notes->src, start); 486 if (al == NULL) 487 return; 488 489 list_for_each_entry_from(al, ¬es->src->source, node) { 490 if (al->offset == -1) 491 continue; 492 if ((u64)al->offset > end) 493 break; 494 if (al->cycles && al->cycles->ipc == 0.0) { 495 al->cycles->ipc = ipc; 496 cover_insn++; 497 } 498 } 499 500 branch = annotation__get_branch(notes); 501 if (cover_insn && branch) { 502 branch->hit_cycles += ch->cycles; 503 branch->hit_insn += n_insn * ch->num; 504 branch->cover_insn += cover_insn; 505 } 506 } 507 } 508 509 static int annotation__compute_ipc(struct annotation *notes, size_t size, 510 struct evsel *evsel) 511 { 512 unsigned int br_cntr_nr = evsel->evlist->nr_br_cntr; 513 int err = 0; 514 s64 offset; 515 516 if (!notes->branch || !notes->branch->cycles_hist) 517 return 0; 518 519 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1); 520 notes->branch->hit_cycles = 0; 521 notes->branch->hit_insn = 0; 522 notes->branch->cover_insn = 0; 523 524 annotation__lock(notes); 525 for (offset = size - 1; offset >= 0; --offset) { 526 struct cyc_hist *ch; 527 528 ch = ¬es->branch->cycles_hist[offset]; 529 if (ch && ch->cycles) { 530 struct annotation_line *al; 531 532 al = annotated_source__get_line(notes->src, offset); 533 if (al && al->cycles == NULL) { 534 al->cycles = zalloc(sizeof(*al->cycles)); 535 if (al->cycles == NULL) { 536 err = ENOMEM; 537 break; 538 } 539 } 540 if (ch->have_start) 541 annotation__count_and_fill(notes, ch->start, offset, ch); 542 if (al && ch->num_aggr) { 543 al->cycles->avg = ch->cycles_aggr / ch->num_aggr; 544 al->cycles->max = ch->cycles_max; 545 al->cycles->min = ch->cycles_min; 546 } 547 if (al && notes->branch->br_cntr) { 548 if (!al->br_cntr) { 549 al->br_cntr = calloc(br_cntr_nr, sizeof(u64)); 550 if (!al->br_cntr) { 551 err = ENOMEM; 552 break; 553 } 554 } 555 al->num_aggr = ch->num_aggr; 556 al->br_cntr_nr = br_cntr_nr; 557 al->evsel = evsel; 558 memcpy(al->br_cntr, ¬es->branch->br_cntr[offset * br_cntr_nr], 559 br_cntr_nr * sizeof(u64)); 560 } 561 } 562 } 563 564 if (err) { 565 while (++offset < (s64)size) { 566 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset]; 567 568 if (ch && ch->cycles) { 569 struct annotation_line *al; 570 571 al = annotated_source__get_line(notes->src, offset); 572 if (al) { 573 zfree(&al->cycles); 574 zfree(&al->br_cntr); 575 } 576 } 577 } 578 } 579 580 annotation__unlock(notes); 581 return 0; 582 } 583 584 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample, 585 struct evsel *evsel) 586 { 587 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample); 588 } 589 590 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample, 591 struct evsel *evsel, u64 ip) 592 { 593 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample); 594 } 595 596 597 void annotation__exit(struct annotation *notes) 598 { 599 annotated_source__delete(notes->src); 600 annotated_branch__delete(notes->branch); 601 } 602 603 static struct sharded_mutex *sharded_mutex; 604 605 static void annotation__init_sharded_mutex(void) 606 { 607 /* As many mutexes as there are CPUs. */ 608 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu); 609 } 610 611 static size_t annotation__hash(const struct annotation *notes) 612 { 613 return (size_t)notes; 614 } 615 616 static struct mutex *annotation__get_mutex(const struct annotation *notes) 617 { 618 static pthread_once_t once = PTHREAD_ONCE_INIT; 619 620 pthread_once(&once, annotation__init_sharded_mutex); 621 if (!sharded_mutex) 622 return NULL; 623 624 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes)); 625 } 626 627 void annotation__lock(struct annotation *notes) 628 NO_THREAD_SAFETY_ANALYSIS 629 { 630 struct mutex *mutex = annotation__get_mutex(notes); 631 632 if (mutex) 633 mutex_lock(mutex); 634 } 635 636 void annotation__unlock(struct annotation *notes) 637 NO_THREAD_SAFETY_ANALYSIS 638 { 639 struct mutex *mutex = annotation__get_mutex(notes); 640 641 if (mutex) 642 mutex_unlock(mutex); 643 } 644 645 bool annotation__trylock(struct annotation *notes) 646 { 647 struct mutex *mutex = annotation__get_mutex(notes); 648 649 if (!mutex) 650 return false; 651 652 return mutex_trylock(mutex); 653 } 654 655 void annotation_line__add(struct annotation_line *al, struct list_head *head) 656 { 657 list_add_tail(&al->node, head); 658 } 659 660 struct annotation_line * 661 annotation_line__next(struct annotation_line *pos, struct list_head *head) 662 { 663 list_for_each_entry_continue(pos, head, node) 664 if (pos->offset >= 0) 665 return pos; 666 667 return NULL; 668 } 669 670 static const char *annotate__address_color(struct block_range *br) 671 { 672 double cov = block_range__coverage(br); 673 674 if (cov >= 0) { 675 /* mark red for >75% coverage */ 676 if (cov > 0.75) 677 return PERF_COLOR_RED; 678 679 /* mark dull for <1% coverage */ 680 if (cov < 0.01) 681 return PERF_COLOR_NORMAL; 682 } 683 684 return PERF_COLOR_MAGENTA; 685 } 686 687 static const char *annotate__asm_color(struct block_range *br) 688 { 689 double cov = block_range__coverage(br); 690 691 if (cov >= 0) { 692 /* mark dull for <1% coverage */ 693 if (cov < 0.01) 694 return PERF_COLOR_NORMAL; 695 } 696 697 return PERF_COLOR_BLUE; 698 } 699 700 static void annotate__branch_printf(struct block_range *br, u64 addr) 701 { 702 bool emit_comment = true; 703 704 if (!br) 705 return; 706 707 #if 1 708 if (br->is_target && br->start == addr) { 709 struct block_range *branch = br; 710 double p; 711 712 /* 713 * Find matching branch to our target. 714 */ 715 while (!branch->is_branch) 716 branch = block_range__next(branch); 717 718 p = 100 *(double)br->entry / branch->coverage; 719 720 if (p > 0.1) { 721 if (emit_comment) { 722 emit_comment = false; 723 printf("\t#"); 724 } 725 726 /* 727 * The percentage of coverage joined at this target in relation 728 * to the next branch. 729 */ 730 printf(" +%.2f%%", p); 731 } 732 } 733 #endif 734 if (br->is_branch && br->end == addr) { 735 double p = 100*(double)br->taken / br->coverage; 736 737 if (p > 0.1) { 738 if (emit_comment) { 739 emit_comment = false; 740 printf("\t#"); 741 } 742 743 /* 744 * The percentage of coverage leaving at this branch, and 745 * its prediction ratio. 746 */ 747 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken); 748 } 749 } 750 } 751 752 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width) 753 { 754 s64 offset = dl->al.offset; 755 const u64 addr = start + offset; 756 struct block_range *br; 757 758 br = block_range__find(addr); 759 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr); 760 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line); 761 annotate__branch_printf(br, addr); 762 return 0; 763 } 764 765 static struct annotated_data_type * 766 __hist_entry__get_data_type(struct hist_entry *he, const struct arch *arch, 767 struct debuginfo *dbg, struct disasm_line *dl, 768 int *type_offset); 769 770 static bool needs_type_info(struct annotated_data_type *data_type) 771 { 772 if (data_type == NULL || data_type == NO_TYPE) 773 return false; 774 775 if (verbose) 776 return true; 777 778 return (data_type != &stackop_type) && (data_type != &canary_type); 779 } 780 781 static int 782 annotation_line__print(struct annotation_line *al, struct annotation_print_data *apd, 783 struct annotation_options *opts, int printed, 784 struct annotation_line *queue) 785 { 786 struct symbol *sym = apd->he->ms.sym; 787 struct disasm_line *dl = container_of(al, struct disasm_line, al); 788 struct annotation *notes = symbol__annotation(sym); 789 static const char *prev_line; 790 int max_lines = opts->max_lines; 791 int percent_type = opts->percent_type; 792 793 if (al->offset != -1) { 794 double max_percent = 0.0; 795 int i, nr_percent = 1; 796 const char *color; 797 798 for (i = 0; i < al->data_nr; i++) { 799 double percent; 800 801 percent = annotation_data__percent(&al->data[i], 802 percent_type); 803 804 if (percent > max_percent) 805 max_percent = percent; 806 } 807 808 if (al->data_nr > nr_percent) 809 nr_percent = al->data_nr; 810 811 if (max_percent < opts->min_pcnt) 812 return -1; 813 814 if (max_lines && printed >= max_lines) 815 return 1; 816 817 if (queue != NULL) { 818 struct annotation_options queue_opts = { 819 .max_lines = 1, 820 .percent_type = percent_type, 821 }; 822 823 list_for_each_entry_from(queue, ¬es->src->source, node) { 824 if (queue == al) 825 break; 826 annotation_line__print(queue, apd, &queue_opts, 827 /*printed=*/0, /*queue=*/NULL); 828 } 829 } 830 831 color = get_percent_color(max_percent); 832 833 for (i = 0; i < nr_percent; i++) { 834 struct annotation_data *data = &al->data[i]; 835 double percent; 836 837 percent = annotation_data__percent(data, percent_type); 838 color = get_percent_color(percent); 839 840 if (symbol_conf.show_total_period) 841 color_fprintf(stdout, color, " %11" PRIu64, 842 data->he.period); 843 else if (symbol_conf.show_nr_samples) 844 color_fprintf(stdout, color, " %7" PRIu64, 845 data->he.nr_samples); 846 else 847 color_fprintf(stdout, color, " %7.2f", percent); 848 } 849 850 printf(" : "); 851 852 disasm_line__print(dl, notes->src->start, apd->addr_fmt_width); 853 854 if (opts->code_with_type && apd->dbg) { 855 struct annotated_data_type *data_type; 856 int offset = 0; 857 858 data_type = __hist_entry__get_data_type(apd->he, apd->arch, 859 apd->dbg, dl, &offset); 860 if (needs_type_info(data_type)) { 861 char buf[4096]; 862 863 printf("\t\t# data-type: %s", 864 data_type->self.type_name); 865 866 if (data_type != &stackop_type && 867 data_type != &canary_type) 868 printf(" +%#x", offset); 869 870 if (annotated_data_type__get_member_name(data_type, 871 buf, 872 sizeof(buf), 873 offset)) 874 printf(" (%s)", buf); 875 } 876 } 877 878 /* 879 * Also color the filename and line if needed, with 880 * the same color than the percentage. Don't print it 881 * twice for close colored addr with the same filename:line 882 */ 883 if (al->path) { 884 if (!prev_line || strcmp(prev_line, al->path)) { 885 color_fprintf(stdout, color, " // %s", al->path); 886 prev_line = al->path; 887 } 888 } 889 890 printf("\n"); 891 } else if (max_lines && printed >= max_lines) 892 return 1; 893 else { 894 int width = annotation__pcnt_width(notes); 895 896 if (queue) 897 return -1; 898 899 if (!*al->line) 900 printf(" %*s:\n", width, " "); 901 else 902 printf(" %*s: %-*d %s\n", width, " ", apd->addr_fmt_width, 903 al->line_nr, al->line); 904 } 905 906 return 0; 907 } 908 909 static void calc_percent(struct annotation *notes, 910 struct evsel *evsel, 911 struct annotation_data *data, 912 s64 offset, s64 end) 913 { 914 struct hists *hists = evsel__hists(evsel); 915 struct sym_hist *sym_hist = annotation__histogram(notes, evsel); 916 unsigned int hits = 0; 917 u64 period = 0; 918 919 while (offset < end) { 920 struct sym_hist_entry *entry; 921 922 entry = annotated_source__hist_entry(notes->src, evsel, offset); 923 if (entry) { 924 hits += entry->nr_samples; 925 period += entry->period; 926 } 927 ++offset; 928 } 929 930 if (sym_hist->nr_samples) { 931 data->he.period = period; 932 data->he.nr_samples = hits; 933 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples; 934 } 935 936 if (hists->stats.nr_non_filtered_samples) 937 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples; 938 939 if (sym_hist->period) 940 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period; 941 942 if (hists->stats.total_period) 943 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period; 944 } 945 946 static void annotation__calc_percent(struct annotation *notes, 947 struct evsel *leader, s64 len) 948 { 949 struct annotation_line *al, *next; 950 struct evsel *evsel; 951 952 list_for_each_entry(al, ¬es->src->source, node) { 953 s64 end; 954 int i = 0; 955 956 if (al->offset == -1) 957 continue; 958 959 next = annotation_line__next(al, ¬es->src->source); 960 end = next ? next->offset : len; 961 962 for_each_group_evsel(evsel, leader) { 963 struct annotation_data *data; 964 965 BUG_ON(i >= al->data_nr); 966 967 if (symbol_conf.skip_empty && 968 evsel__hists(evsel)->stats.nr_samples == 0) 969 continue; 970 971 data = &al->data[i++]; 972 973 calc_percent(notes, evsel, data, al->offset, end); 974 } 975 } 976 } 977 978 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel) 979 { 980 struct annotation *notes = symbol__annotation(sym); 981 982 annotation__calc_percent(notes, evsel, symbol__size(sym)); 983 } 984 985 int thread__get_arch(struct thread *thread, const struct arch **parch) 986 { 987 const struct arch *arch; 988 struct machine *machine; 989 uint32_t e_flags; 990 uint16_t e_machine; 991 992 if (!thread) { 993 *parch = NULL; 994 return -1; 995 } 996 997 machine = maps__machine(thread__maps(thread)); 998 e_machine = thread__e_machine(thread, machine, &e_flags); 999 arch = arch__find(e_machine, e_flags, machine->env ? machine->env->cpuid : NULL); 1000 if (arch == NULL) { 1001 pr_err("%s: unsupported arch %d\n", __func__, e_machine); 1002 return errno; 1003 } 1004 if (parch) 1005 *parch = arch; 1006 1007 return 0; 1008 } 1009 1010 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, 1011 const struct arch **parch) 1012 { 1013 struct symbol *sym = ms->sym; 1014 struct annotation *notes = symbol__annotation(sym); 1015 struct annotate_args args = { 1016 .options = &annotate_opts, 1017 }; 1018 const struct arch *arch = NULL; 1019 int err, nr; 1020 1021 err = thread__get_arch(ms->thread, &arch); 1022 if (err) 1023 return err; 1024 1025 if (parch) 1026 *parch = arch; 1027 1028 if (notes->src && !list_empty(¬es->src->source)) 1029 return 0; 1030 1031 args.arch = arch; 1032 args.ms = ms; 1033 1034 if (notes->src == NULL) { 1035 notes->src = annotated_source__new(); 1036 if (notes->src == NULL) 1037 return -1; 1038 } 1039 1040 nr = 0; 1041 if (evsel__is_group_event(evsel)) { 1042 struct evsel *pos; 1043 1044 for_each_group_evsel(pos, evsel) { 1045 if (symbol_conf.skip_empty && 1046 evsel__hists(pos)->stats.nr_samples == 0) 1047 continue; 1048 nr++; 1049 } 1050 } 1051 notes->src->nr_events = nr ? nr : 1; 1052 1053 if (annotate_opts.full_addr) 1054 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start); 1055 else 1056 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start); 1057 1058 return symbol__disassemble(sym, &args); 1059 } 1060 1061 static void insert_source_line(struct rb_root *root, struct annotation_line *al) 1062 { 1063 struct annotation_line *iter; 1064 struct rb_node **p = &root->rb_node; 1065 struct rb_node *parent = NULL; 1066 unsigned int percent_type = annotate_opts.percent_type; 1067 int i, ret; 1068 1069 while (*p != NULL) { 1070 parent = *p; 1071 iter = rb_entry(parent, struct annotation_line, rb_node); 1072 1073 ret = strcmp(iter->path, al->path); 1074 if (ret == 0) { 1075 for (i = 0; i < al->data_nr; i++) { 1076 iter->data[i].percent_sum += annotation_data__percent(&al->data[i], 1077 percent_type); 1078 } 1079 return; 1080 } 1081 1082 if (ret < 0) 1083 p = &(*p)->rb_left; 1084 else 1085 p = &(*p)->rb_right; 1086 } 1087 1088 for (i = 0; i < al->data_nr; i++) { 1089 al->data[i].percent_sum = annotation_data__percent(&al->data[i], 1090 percent_type); 1091 } 1092 1093 rb_link_node(&al->rb_node, parent, p); 1094 rb_insert_color(&al->rb_node, root); 1095 } 1096 1097 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b) 1098 { 1099 int i; 1100 1101 for (i = 0; i < a->data_nr; i++) { 1102 if (a->data[i].percent_sum == b->data[i].percent_sum) 1103 continue; 1104 return a->data[i].percent_sum > b->data[i].percent_sum; 1105 } 1106 1107 return 0; 1108 } 1109 1110 static void __resort_source_line(struct rb_root *root, struct annotation_line *al) 1111 { 1112 struct annotation_line *iter; 1113 struct rb_node **p = &root->rb_node; 1114 struct rb_node *parent = NULL; 1115 1116 while (*p != NULL) { 1117 parent = *p; 1118 iter = rb_entry(parent, struct annotation_line, rb_node); 1119 1120 if (cmp_source_line(al, iter)) 1121 p = &(*p)->rb_left; 1122 else 1123 p = &(*p)->rb_right; 1124 } 1125 1126 rb_link_node(&al->rb_node, parent, p); 1127 rb_insert_color(&al->rb_node, root); 1128 } 1129 1130 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root) 1131 { 1132 struct annotation_line *al; 1133 struct rb_node *node; 1134 1135 node = rb_first(src_root); 1136 while (node) { 1137 struct rb_node *next; 1138 1139 al = rb_entry(node, struct annotation_line, rb_node); 1140 next = rb_next(node); 1141 rb_erase(node, src_root); 1142 1143 __resort_source_line(dest_root, al); 1144 node = next; 1145 } 1146 } 1147 1148 static void print_summary(struct rb_root *root, const char *filename) 1149 { 1150 struct annotation_line *al; 1151 struct rb_node *node; 1152 1153 printf("\nSorted summary for file %s\n", filename); 1154 printf("----------------------------------------------\n\n"); 1155 1156 if (RB_EMPTY_ROOT(root)) { 1157 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); 1158 return; 1159 } 1160 1161 node = rb_first(root); 1162 while (node) { 1163 double percent, percent_max = 0.0; 1164 const char *color; 1165 char *path; 1166 int i; 1167 1168 al = rb_entry(node, struct annotation_line, rb_node); 1169 for (i = 0; i < al->data_nr; i++) { 1170 percent = al->data[i].percent_sum; 1171 color = get_percent_color(percent); 1172 color_fprintf(stdout, color, " %7.2f", percent); 1173 1174 if (percent > percent_max) 1175 percent_max = percent; 1176 } 1177 1178 path = al->path; 1179 color = get_percent_color(percent_max); 1180 color_fprintf(stdout, color, " %s\n", path); 1181 1182 node = rb_next(node); 1183 } 1184 } 1185 1186 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel) 1187 { 1188 struct annotation *notes = symbol__annotation(sym); 1189 struct sym_hist *h = annotation__histogram(notes, evsel); 1190 u64 len = symbol__size(sym), offset; 1191 1192 for (offset = 0; offset < len; ++offset) { 1193 struct sym_hist_entry *entry; 1194 1195 entry = annotated_source__hist_entry(notes->src, evsel, offset); 1196 if (entry && entry->nr_samples != 0) 1197 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, 1198 sym->start + offset, entry->nr_samples); 1199 } 1200 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples); 1201 } 1202 1203 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start) 1204 { 1205 char bf[32]; 1206 struct annotation_line *line; 1207 1208 list_for_each_entry_reverse(line, lines, node) { 1209 if (line->offset != -1) 1210 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset); 1211 } 1212 1213 return 0; 1214 } 1215 1216 int hist_entry__annotate_printf(struct hist_entry *he, struct evsel *evsel) 1217 { 1218 struct map_symbol *ms = &he->ms; 1219 struct map *map = ms->map; 1220 struct symbol *sym = ms->sym; 1221 struct dso *dso = map__dso(map); 1222 char *filename; 1223 const char *d_filename; 1224 const char *evsel_name = evsel__name(evsel); 1225 struct annotation *notes = symbol__annotation(sym); 1226 struct sym_hist *h = annotation__histogram(notes, evsel); 1227 struct annotation_line *pos, *queue = NULL; 1228 struct annotation_options *opts = &annotate_opts; 1229 struct annotation_print_data apd = { 1230 .he = he, 1231 .evsel = evsel, 1232 }; 1233 int printed = 2, queue_len = 0; 1234 int more = 0; 1235 bool context = opts->context; 1236 int width = annotation__pcnt_width(notes); 1237 int graph_dotted_len; 1238 char buf[512]; 1239 1240 filename = strdup(dso__long_name(dso)); 1241 if (!filename) 1242 return -ENOMEM; 1243 1244 if (opts->full_path) 1245 d_filename = filename; 1246 else 1247 d_filename = perf_basename(filename); 1248 1249 if (evsel__is_group_event(evsel)) { 1250 evsel__group_desc(evsel, buf, sizeof(buf)); 1251 evsel_name = buf; 1252 } 1253 1254 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, " 1255 "percent: %s)\n", 1256 width, width, symbol_conf.show_total_period ? "Period" : 1257 symbol_conf.show_nr_samples ? "Samples" : "Percent", 1258 d_filename, evsel_name, h->nr_samples, 1259 percent_type_str(opts->percent_type)); 1260 1261 printf("%-*.*s----\n", 1262 graph_dotted_len, graph_dotted_len, graph_dotted_line); 1263 1264 if (verbose > 0) 1265 symbol__annotate_hits(sym, evsel); 1266 1267 apd.addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, 1268 notes->src->start); 1269 thread__get_arch(ms->thread, &apd.arch); 1270 apd.dbg = dso__debuginfo(dso); 1271 1272 list_for_each_entry(pos, ¬es->src->source, node) { 1273 int err; 1274 1275 if (context && queue == NULL) { 1276 queue = pos; 1277 queue_len = 0; 1278 } 1279 1280 err = annotation_line__print(pos, &apd, opts, printed, queue); 1281 1282 switch (err) { 1283 case 0: 1284 ++printed; 1285 if (context) { 1286 printed += queue_len; 1287 queue = NULL; 1288 queue_len = 0; 1289 } 1290 break; 1291 case 1: 1292 /* filtered by max_lines */ 1293 ++more; 1294 break; 1295 case -1: 1296 default: 1297 /* 1298 * Filtered by min_pcnt or non IP lines when 1299 * context != 0 1300 */ 1301 if (!context) 1302 break; 1303 if (queue_len == context) 1304 queue = list_entry(queue->node.next, typeof(*queue), node); 1305 else 1306 ++queue_len; 1307 break; 1308 } 1309 } 1310 1311 debuginfo__delete(apd.dbg); 1312 free(filename); 1313 1314 return more; 1315 } 1316 1317 static void FILE__set_percent_color(void *fp __maybe_unused, 1318 double percent __maybe_unused, 1319 bool current __maybe_unused) 1320 { 1321 } 1322 1323 static int FILE__set_jumps_percent_color(void *fp __maybe_unused, 1324 int nr __maybe_unused, bool current __maybe_unused) 1325 { 1326 return 0; 1327 } 1328 1329 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused) 1330 { 1331 return 0; 1332 } 1333 1334 static void FILE__printf(void *fp, const char *fmt, ...) 1335 { 1336 va_list args; 1337 1338 va_start(args, fmt); 1339 vfprintf(fp, fmt, args); 1340 va_end(args); 1341 } 1342 1343 static void FILE__write_graph(void *fp, int graph) 1344 { 1345 const char *s; 1346 switch (graph) { 1347 1348 case DARROW_CHAR: s = "↓"; break; 1349 case UARROW_CHAR: s = "↑"; break; 1350 case LARROW_CHAR: s = "←"; break; 1351 case RARROW_CHAR: s = "→"; break; 1352 default: s = "?"; break; 1353 } 1354 1355 fputs(s, fp); 1356 } 1357 1358 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp, 1359 struct annotation_print_data *apd) 1360 { 1361 struct annotation *notes = symbol__annotation(sym); 1362 struct annotation_write_ops wops = { 1363 .first_line = true, 1364 .obj = fp, 1365 .set_color = FILE__set_color, 1366 .set_percent_color = FILE__set_percent_color, 1367 .set_jumps_percent_color = FILE__set_jumps_percent_color, 1368 .printf = FILE__printf, 1369 .write_graph = FILE__write_graph, 1370 }; 1371 struct annotation_line *al; 1372 1373 if (annotate_opts.code_with_type) { 1374 thread__get_arch(apd->he->ms.thread, &apd->arch); 1375 apd->dbg = dso__debuginfo(map__dso(apd->he->ms.map)); 1376 } 1377 1378 list_for_each_entry(al, ¬es->src->source, node) { 1379 if (annotation_line__filter(al)) 1380 continue; 1381 annotation_line__write(al, notes, &wops, apd); 1382 fputc('\n', fp); 1383 wops.first_line = false; 1384 } 1385 1386 if (annotate_opts.code_with_type) 1387 debuginfo__delete(apd->dbg); 1388 1389 return 0; 1390 } 1391 1392 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel, 1393 struct hist_entry *he) 1394 { 1395 const char *ev_name = evsel__name(evsel); 1396 char buf[1024]; 1397 char *filename; 1398 int err = -1; 1399 FILE *fp; 1400 struct annotation_print_data apd = { 1401 .he = he, 1402 .evsel = evsel, 1403 }; 1404 1405 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0) 1406 return -1; 1407 1408 fp = fopen(filename, "w"); 1409 if (fp == NULL) 1410 goto out_free_filename; 1411 1412 if (evsel__is_group_event(evsel)) { 1413 evsel__group_desc(evsel, buf, sizeof(buf)); 1414 ev_name = buf; 1415 } 1416 1417 fprintf(fp, "%s() %s\nEvent: %s\n\n", 1418 ms->sym->name, dso__long_name(map__dso(ms->map)), ev_name); 1419 symbol__annotate_fprintf2(ms->sym, fp, &apd); 1420 1421 fclose(fp); 1422 err = 0; 1423 out_free_filename: 1424 free(filename); 1425 return err; 1426 } 1427 1428 void symbol__annotate_zero_histogram(struct symbol *sym, struct evsel *evsel) 1429 { 1430 struct annotation *notes = symbol__annotation(sym); 1431 struct sym_hist *h = annotation__histogram(notes, evsel); 1432 1433 memset(h, 0, sizeof(*notes->src->histograms) * notes->src->nr_histograms); 1434 } 1435 1436 void symbol__annotate_decay_histogram(struct symbol *sym, struct evsel *evsel) 1437 { 1438 struct annotation *notes = symbol__annotation(sym); 1439 struct sym_hist *h = annotation__histogram(notes, evsel); 1440 struct annotation_line *al; 1441 1442 h->nr_samples = 0; 1443 list_for_each_entry(al, ¬es->src->source, node) { 1444 struct sym_hist_entry *entry; 1445 1446 if (al->offset == -1) 1447 continue; 1448 1449 entry = annotated_source__hist_entry(notes->src, evsel, al->offset); 1450 if (entry == NULL) 1451 continue; 1452 1453 entry->nr_samples = entry->nr_samples * 7 / 8; 1454 h->nr_samples += entry->nr_samples; 1455 } 1456 } 1457 1458 void annotated_source__purge(struct annotated_source *as) 1459 { 1460 struct annotation_line *al, *n; 1461 1462 list_for_each_entry_safe(al, n, &as->source, node) { 1463 list_del_init(&al->node); 1464 disasm_line__free(disasm_line(al)); 1465 } 1466 as->tried_source = false; 1467 } 1468 1469 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp) 1470 { 1471 size_t printed; 1472 1473 if (dl->al.offset == -1) 1474 return fprintf(fp, "%s\n", dl->al.line); 1475 1476 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name); 1477 1478 if (dl->ops.raw[0] != '\0') { 1479 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ", 1480 dl->ops.raw); 1481 } 1482 1483 return printed + fprintf(fp, "\n"); 1484 } 1485 1486 size_t disasm__fprintf(struct list_head *head, FILE *fp) 1487 { 1488 struct disasm_line *pos; 1489 size_t printed = 0; 1490 1491 list_for_each_entry(pos, head, al.node) 1492 printed += disasm_line__fprintf(pos, fp); 1493 1494 return printed; 1495 } 1496 1497 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym) 1498 { 1499 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) || 1500 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 || 1501 dl->ops.target.offset >= (s64)symbol__size(sym)) 1502 return false; 1503 1504 return true; 1505 } 1506 1507 static void 1508 annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) 1509 { 1510 struct annotation_line *al; 1511 1512 /* PLT symbols contain external offsets */ 1513 if (strstr(sym->name, "@plt")) 1514 return; 1515 1516 list_for_each_entry(al, ¬es->src->source, node) { 1517 struct disasm_line *dl; 1518 struct annotation_line *target; 1519 1520 dl = disasm_line(al); 1521 1522 if (!disasm_line__is_valid_local_jump(dl, sym)) 1523 continue; 1524 1525 target = annotated_source__get_line(notes->src, 1526 dl->ops.target.offset); 1527 /* 1528 * FIXME: Oops, no jump target? Buggy disassembler? Or do we 1529 * have to adjust to the previous offset? 1530 */ 1531 if (target == NULL) 1532 continue; 1533 1534 if (++target->jump_sources > notes->src->max_jump_sources) 1535 notes->src->max_jump_sources = target->jump_sources; 1536 } 1537 } 1538 1539 static void annotation__set_index(struct annotation *notes) 1540 { 1541 struct annotation_line *al; 1542 struct annotated_source *src = notes->src; 1543 1544 src->widths.max_line_len = 0; 1545 src->nr_entries = 0; 1546 src->nr_asm_entries = 0; 1547 1548 list_for_each_entry(al, &src->source, node) { 1549 size_t line_len = strlen(al->line); 1550 1551 if (src->widths.max_line_len < line_len) 1552 src->widths.max_line_len = line_len; 1553 al->idx = src->nr_entries++; 1554 if (al->offset != -1) 1555 al->idx_asm = src->nr_asm_entries++; 1556 else 1557 al->idx_asm = -1; 1558 } 1559 } 1560 1561 static inline int width_jumps(int n) 1562 { 1563 if (n >= 100) 1564 return 5; 1565 if (n / 10) 1566 return 2; 1567 return 1; 1568 } 1569 1570 static int annotation__max_ins_name(struct annotation *notes) 1571 { 1572 int max_name = 0, len; 1573 struct annotation_line *al; 1574 1575 list_for_each_entry(al, ¬es->src->source, node) { 1576 if (al->offset == -1) 1577 continue; 1578 1579 len = strlen(disasm_line(al)->ins.name); 1580 if (max_name < len) 1581 max_name = len; 1582 } 1583 1584 return max_name; 1585 } 1586 1587 static void 1588 annotation__init_column_widths(struct annotation *notes, struct symbol *sym) 1589 { 1590 notes->src->widths.addr = notes->src->widths.target = 1591 notes->src->widths.min_addr = hex_width(symbol__size(sym)); 1592 notes->src->widths.max_addr = hex_width(sym->end); 1593 notes->src->widths.jumps = width_jumps(notes->src->max_jump_sources); 1594 notes->src->widths.max_ins_name = annotation__max_ins_name(notes); 1595 } 1596 1597 void annotation__update_column_widths(struct annotation *notes) 1598 { 1599 if (annotate_opts.use_offset) 1600 notes->src->widths.target = notes->src->widths.min_addr; 1601 else if (annotate_opts.full_addr) 1602 notes->src->widths.target = BITS_PER_LONG / 4; 1603 else 1604 notes->src->widths.target = notes->src->widths.max_addr; 1605 1606 notes->src->widths.addr = notes->src->widths.target; 1607 1608 if (annotate_opts.show_nr_jumps) 1609 notes->src->widths.addr += notes->src->widths.jumps + 1; 1610 } 1611 1612 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms) 1613 { 1614 annotate_opts.full_addr = !annotate_opts.full_addr; 1615 1616 if (annotate_opts.full_addr) 1617 notes->src->start = map__objdump_2mem(ms->map, ms->sym->start); 1618 else 1619 notes->src->start = map__rip_2objdump(ms->map, ms->sym->start); 1620 1621 annotation__update_column_widths(notes); 1622 } 1623 1624 static void annotation__calc_lines(struct annotation *notes, struct map_symbol *ms, 1625 struct rb_root *root) 1626 { 1627 struct annotation_line *al; 1628 struct rb_root tmp_root = RB_ROOT; 1629 1630 list_for_each_entry(al, ¬es->src->source, node) { 1631 double percent_max = 0.0; 1632 u64 addr; 1633 int i; 1634 1635 for (i = 0; i < al->data_nr; i++) { 1636 double percent; 1637 1638 percent = annotation_data__percent(&al->data[i], 1639 annotate_opts.percent_type); 1640 1641 if (percent > percent_max) 1642 percent_max = percent; 1643 } 1644 1645 if (percent_max <= 0.5) 1646 continue; 1647 1648 addr = map__rip_2objdump(ms->map, ms->sym->start); 1649 al->path = get_srcline(map__dso(ms->map), addr + al->offset, NULL, 1650 false, true, ms->sym->start + al->offset); 1651 insert_source_line(&tmp_root, al); 1652 } 1653 1654 resort_source_line(root, &tmp_root); 1655 } 1656 1657 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root) 1658 { 1659 struct annotation *notes = symbol__annotation(ms->sym); 1660 1661 annotation__calc_lines(notes, ms, root); 1662 } 1663 1664 int hist_entry__tty_annotate2(struct hist_entry *he, struct evsel *evsel) 1665 { 1666 struct map_symbol *ms = &he->ms; 1667 struct dso *dso = map__dso(ms->map); 1668 struct symbol *sym = ms->sym; 1669 struct rb_root source_line = RB_ROOT; 1670 struct hists *hists = evsel__hists(evsel); 1671 struct annotation_print_data apd = { 1672 .he = he, 1673 .evsel = evsel, 1674 }; 1675 char buf[1024]; 1676 int err; 1677 1678 err = symbol__annotate2(ms, evsel, NULL); 1679 if (err) { 1680 char msg[BUFSIZ]; 1681 1682 dso__set_annotate_warned(dso); 1683 symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); 1684 ui__error("Couldn't annotate %s:\n%s", sym->name, msg); 1685 return -1; 1686 } 1687 1688 if (annotate_opts.print_lines) { 1689 srcline_full_filename = annotate_opts.full_path; 1690 symbol__calc_lines(ms, &source_line); 1691 print_summary(&source_line, dso__long_name(dso)); 1692 } 1693 1694 hists__scnprintf_title(hists, buf, sizeof(buf)); 1695 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n", 1696 buf, percent_type_str(annotate_opts.percent_type), sym->name, dso__long_name(dso)); 1697 symbol__annotate_fprintf2(sym, stdout, &apd); 1698 1699 annotated_source__purge(symbol__annotation(sym)->src); 1700 1701 return 0; 1702 } 1703 1704 int hist_entry__tty_annotate(struct hist_entry *he, struct evsel *evsel) 1705 { 1706 struct map_symbol *ms = &he->ms; 1707 struct dso *dso = map__dso(ms->map); 1708 struct symbol *sym = ms->sym; 1709 struct rb_root source_line = RB_ROOT; 1710 int err; 1711 1712 err = symbol__annotate(ms, evsel, NULL); 1713 if (err) { 1714 char msg[BUFSIZ]; 1715 1716 dso__set_annotate_warned(dso); 1717 symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); 1718 ui__error("Couldn't annotate %s:\n%s", sym->name, msg); 1719 return -1; 1720 } 1721 1722 symbol__calc_percent(sym, evsel); 1723 1724 if (annotate_opts.print_lines) { 1725 srcline_full_filename = annotate_opts.full_path; 1726 symbol__calc_lines(ms, &source_line); 1727 print_summary(&source_line, dso__long_name(dso)); 1728 } 1729 1730 hist_entry__annotate_printf(he, evsel); 1731 1732 annotated_source__purge(symbol__annotation(sym)->src); 1733 1734 return 0; 1735 } 1736 1737 bool ui__has_annotation(void) 1738 { 1739 return use_browser == 1 && perf_hpp_list.sym; 1740 } 1741 1742 1743 static double annotation_line__max_percent(struct annotation_line *al, 1744 unsigned int percent_type) 1745 { 1746 double percent_max = 0.0; 1747 int i; 1748 1749 for (i = 0; i < al->data_nr; i++) { 1750 double percent; 1751 1752 percent = annotation_data__percent(&al->data[i], 1753 percent_type); 1754 1755 if (percent > percent_max) 1756 percent_max = percent; 1757 } 1758 1759 return percent_max; 1760 } 1761 1762 static int disasm_line__write(struct disasm_line *dl, struct annotation *notes, 1763 void *obj, char *bf, size_t size, 1764 void (*obj__printf)(void *obj, const char *fmt, ...), 1765 void (*obj__write_graph)(void *obj, int graph)) 1766 { 1767 if (dl->ins.ops && dl->ins.ops->scnprintf) { 1768 if (ins__is_jump(&dl->ins)) { 1769 bool fwd; 1770 1771 if (dl->ops.target.outside) 1772 goto call_like; 1773 fwd = dl->ops.target.offset > dl->al.offset; 1774 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR); 1775 obj__printf(obj, " "); 1776 } else if (ins__is_call(&dl->ins)) { 1777 call_like: 1778 obj__write_graph(obj, RARROW_CHAR); 1779 obj__printf(obj, " "); 1780 } else if (ins__is_ret(&dl->ins)) { 1781 obj__write_graph(obj, LARROW_CHAR); 1782 obj__printf(obj, " "); 1783 } else { 1784 obj__printf(obj, " "); 1785 } 1786 } else { 1787 obj__printf(obj, " "); 1788 } 1789 1790 return disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, 1791 notes->src->widths.max_ins_name) + 2; 1792 } 1793 1794 static void ipc_coverage_string(char *bf, int size, struct annotation *notes) 1795 { 1796 double ipc = 0.0, coverage = 0.0; 1797 struct annotated_branch *branch = annotation__get_branch(notes); 1798 1799 if (branch && branch->hit_cycles) 1800 ipc = branch->hit_insn / ((double)branch->hit_cycles); 1801 1802 if (branch && branch->total_insn) { 1803 coverage = branch->cover_insn * 100.0 / 1804 ((double)branch->total_insn); 1805 } 1806 1807 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)", 1808 ipc, coverage); 1809 } 1810 1811 int annotation_br_cntr_abbr_list(char **str, struct evsel *evsel, bool header) 1812 { 1813 struct evsel *pos; 1814 struct strbuf sb; 1815 1816 if (evsel->evlist->nr_br_cntr <= 0) 1817 return -ENOTSUP; 1818 1819 strbuf_init(&sb, /*hint=*/ 0); 1820 1821 if (header && strbuf_addf(&sb, "# Branch counter abbr list:\n")) 1822 goto err; 1823 1824 evlist__for_each_entry(evsel->evlist, pos) { 1825 if (!(pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) 1826 continue; 1827 if (header && strbuf_addf(&sb, "#")) 1828 goto err; 1829 1830 if (strbuf_addf(&sb, " %s = %s\n", pos->name, pos->abbr_name)) 1831 goto err; 1832 } 1833 1834 if (header && strbuf_addf(&sb, "#")) 1835 goto err; 1836 if (strbuf_addf(&sb, " '-' No event occurs\n")) 1837 goto err; 1838 1839 if (header && strbuf_addf(&sb, "#")) 1840 goto err; 1841 if (strbuf_addf(&sb, " '+' Event occurrences may be lost due to branch counter saturated\n")) 1842 goto err; 1843 1844 *str = strbuf_detach(&sb, NULL); 1845 1846 return 0; 1847 err: 1848 strbuf_release(&sb); 1849 return -ENOMEM; 1850 } 1851 1852 /* Assume the branch counter saturated at 3 */ 1853 #define ANNOTATION_BR_CNTR_SATURATION 3 1854 1855 int annotation_br_cntr_entry(char **str, int br_cntr_nr, 1856 u64 *br_cntr, int num_aggr, 1857 struct evsel *evsel) 1858 { 1859 struct evsel *pos = evsel ? evlist__first(evsel->evlist) : NULL; 1860 bool saturated = false; 1861 int i, j, avg, used; 1862 struct strbuf sb; 1863 1864 strbuf_init(&sb, /*hint=*/ 0); 1865 for (i = 0; i < br_cntr_nr; i++) { 1866 used = 0; 1867 avg = ceil((double)(br_cntr[i] & ~ANNOTATION__BR_CNTR_SATURATED_FLAG) / 1868 (double)num_aggr); 1869 1870 /* 1871 * A histogram with the abbr name is displayed by default. 1872 * With -v, the exact number of branch counter is displayed. 1873 */ 1874 if (verbose) { 1875 evlist__for_each_entry_from(evsel->evlist, pos) { 1876 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && 1877 (pos->br_cntr_idx == i)) 1878 break; 1879 } 1880 if (strbuf_addstr(&sb, pos->abbr_name)) 1881 goto err; 1882 1883 if (!br_cntr[i]) { 1884 if (strbuf_addstr(&sb, "=-")) 1885 goto err; 1886 } else { 1887 if (strbuf_addf(&sb, "=%d", avg)) 1888 goto err; 1889 } 1890 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) { 1891 if (strbuf_addch(&sb, '+')) 1892 goto err; 1893 } else { 1894 if (strbuf_addch(&sb, ' ')) 1895 goto err; 1896 } 1897 1898 if ((i < br_cntr_nr - 1) && strbuf_addch(&sb, ',')) 1899 goto err; 1900 continue; 1901 } 1902 1903 if (strbuf_addch(&sb, '|')) 1904 goto err; 1905 1906 if (!br_cntr[i]) { 1907 if (strbuf_addch(&sb, '-')) 1908 goto err; 1909 used++; 1910 } else { 1911 evlist__for_each_entry_from(evsel->evlist, pos) { 1912 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && 1913 (pos->br_cntr_idx == i)) 1914 break; 1915 } 1916 if (br_cntr[i] & ANNOTATION__BR_CNTR_SATURATED_FLAG) 1917 saturated = true; 1918 1919 for (j = 0; j < avg; j++, used++) { 1920 /* Print + if the number of logged events > 3 */ 1921 if (j >= ANNOTATION_BR_CNTR_SATURATION) { 1922 saturated = true; 1923 break; 1924 } 1925 if (strbuf_addstr(&sb, pos->abbr_name)) 1926 goto err; 1927 } 1928 1929 if (saturated) { 1930 if (strbuf_addch(&sb, '+')) 1931 goto err; 1932 used++; 1933 } 1934 pos = list_next_entry(pos, core.node); 1935 } 1936 1937 for (j = used; j < ANNOTATION_BR_CNTR_SATURATION + 1; j++) { 1938 if (strbuf_addch(&sb, ' ')) 1939 goto err; 1940 } 1941 } 1942 1943 if (!verbose && strbuf_addch(&sb, br_cntr_nr ? '|' : ' ')) 1944 goto err; 1945 1946 *str = strbuf_detach(&sb, NULL); 1947 1948 return 0; 1949 err: 1950 strbuf_release(&sb); 1951 return -ENOMEM; 1952 } 1953 1954 struct type_hash_entry { 1955 struct annotated_data_type *type; 1956 int offset; 1957 }; 1958 1959 static int disasm_line__snprint_type_info(struct disasm_line *dl, 1960 char *buf, int len, 1961 struct annotation_print_data *apd) 1962 { 1963 struct annotated_data_type *data_type = NULL; 1964 struct type_hash_entry *entry = NULL; 1965 char member[256]; 1966 int offset = 0; 1967 int printed; 1968 1969 scnprintf(buf, len, " "); 1970 1971 if (!annotate_opts.code_with_type || apd->dbg == NULL) 1972 return 1; 1973 1974 if (apd->type_hash) { 1975 hashmap__find(apd->type_hash, dl->al.offset, &entry); 1976 if (entry != NULL) { 1977 data_type = entry->type; 1978 offset = entry->offset; 1979 } 1980 } 1981 1982 if (data_type == NULL) 1983 data_type = __hist_entry__get_data_type(apd->he, apd->arch, apd->dbg, dl, &offset); 1984 1985 if (apd->type_hash && entry == NULL) { 1986 entry = malloc(sizeof(*entry)); 1987 if (entry != NULL) { 1988 entry->type = data_type; 1989 entry->offset = offset; 1990 hashmap__add(apd->type_hash, dl->al.offset, entry); 1991 } 1992 } 1993 1994 if (!needs_type_info(data_type)) 1995 return 1; 1996 1997 printed = scnprintf(buf, len, "\t\t# data-type: %s", data_type->self.type_name); 1998 1999 if (data_type != &stackop_type && data_type != &canary_type && len > printed) 2000 printed += scnprintf(buf + printed, len - printed, " +%#x", offset); 2001 2002 if (annotated_data_type__get_member_name(data_type, member, sizeof(member), offset) && 2003 len > printed) { 2004 printed += scnprintf(buf + printed, len - printed, " (%s)", member); 2005 } 2006 return printed; 2007 } 2008 2009 void annotation_line__write(struct annotation_line *al, struct annotation *notes, 2010 const struct annotation_write_ops *wops, 2011 struct annotation_print_data *apd) 2012 { 2013 bool current_entry = wops->current_entry; 2014 bool change_color = wops->change_color; 2015 double percent_max = annotation_line__max_percent(al, annotate_opts.percent_type); 2016 int width = wops->width; 2017 int pcnt_width = annotation__pcnt_width(notes); 2018 int cycles_width = annotation__cycles_width(notes); 2019 bool show_title = false; 2020 char bf[256]; 2021 int printed; 2022 void *obj = wops->obj; 2023 int (*obj__set_color)(void *obj, int color) = wops->set_color; 2024 void (*obj__set_percent_color)(void *obj, double percent, bool current) = wops->set_percent_color; 2025 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current) = wops->set_jumps_percent_color; 2026 void (*obj__printf)(void *obj, const char *fmt, ...) = wops->printf; 2027 void (*obj__write_graph)(void *obj, int graph) = wops->write_graph; 2028 2029 if (wops->first_line && (al->offset == -1 || percent_max == 0.0)) { 2030 if (notes->branch && al->cycles) { 2031 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0) 2032 show_title = true; 2033 } else 2034 show_title = true; 2035 } 2036 2037 if (al->offset != -1 && percent_max != 0.0) { 2038 int i; 2039 2040 for (i = 0; i < al->data_nr; i++) { 2041 double percent; 2042 2043 percent = annotation_data__percent(&al->data[i], 2044 annotate_opts.percent_type); 2045 2046 obj__set_percent_color(obj, percent, current_entry); 2047 if (symbol_conf.show_total_period) { 2048 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period); 2049 } else if (symbol_conf.show_nr_samples) { 2050 obj__printf(obj, "%7" PRIu64 " ", 2051 al->data[i].he.nr_samples); 2052 } else { 2053 obj__printf(obj, "%7.2f ", percent); 2054 } 2055 } 2056 } else { 2057 obj__set_percent_color(obj, 0, current_entry); 2058 2059 if (!show_title) 2060 obj__printf(obj, "%-*s", pcnt_width, " "); 2061 else { 2062 obj__printf(obj, "%-*s", pcnt_width, 2063 symbol_conf.show_total_period ? "Period" : 2064 symbol_conf.show_nr_samples ? "Samples" : "Percent"); 2065 } 2066 } 2067 width -= pcnt_width; 2068 2069 if (notes->branch) { 2070 if (al->cycles && al->cycles->ipc) 2071 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc); 2072 else if (!show_title) 2073 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " "); 2074 else 2075 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC"); 2076 2077 if (!annotate_opts.show_minmax_cycle) { 2078 if (al->cycles && al->cycles->avg) 2079 obj__printf(obj, "%*" PRIu64 " ", 2080 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg); 2081 else if (!show_title) 2082 obj__printf(obj, "%*s", 2083 ANNOTATION__CYCLES_WIDTH, " "); 2084 else 2085 obj__printf(obj, "%*s ", 2086 ANNOTATION__CYCLES_WIDTH - 1, 2087 "Cycle"); 2088 } else { 2089 if (al->cycles) { 2090 char str[32]; 2091 2092 scnprintf(str, sizeof(str), 2093 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")", 2094 al->cycles->avg, al->cycles->min, 2095 al->cycles->max); 2096 2097 obj__printf(obj, "%*s ", 2098 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 2099 str); 2100 } else if (!show_title) 2101 obj__printf(obj, "%*s", 2102 ANNOTATION__MINMAX_CYCLES_WIDTH, 2103 " "); 2104 else 2105 obj__printf(obj, "%*s ", 2106 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 2107 "Cycle(min/max)"); 2108 } 2109 2110 if (annotate_opts.show_br_cntr) { 2111 if (show_title) { 2112 obj__printf(obj, "%*s ", 2113 ANNOTATION__BR_CNTR_WIDTH, 2114 "Branch Counter"); 2115 } else { 2116 char *buf; 2117 2118 if (!annotation_br_cntr_entry(&buf, al->br_cntr_nr, al->br_cntr, 2119 al->num_aggr, al->evsel)) { 2120 obj__printf(obj, "%*s ", ANNOTATION__BR_CNTR_WIDTH, buf); 2121 free(buf); 2122 } 2123 } 2124 } 2125 2126 if (show_title && !*al->line) { 2127 ipc_coverage_string(bf, sizeof(bf), notes); 2128 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf); 2129 } 2130 } 2131 width -= cycles_width; 2132 2133 obj__printf(obj, " "); 2134 width -= 1; 2135 2136 if (!*al->line) 2137 obj__printf(obj, "%-*s", width, " "); 2138 else if (al->offset == -1) { 2139 if (al->line_nr && annotate_opts.show_linenr) 2140 printed = scnprintf(bf, sizeof(bf), "%-*d ", 2141 notes->src->widths.addr + 1, al->line_nr); 2142 else 2143 printed = scnprintf(bf, sizeof(bf), "%-*s ", 2144 notes->src->widths.addr, " "); 2145 obj__printf(obj, bf); 2146 width -= printed; 2147 obj__printf(obj, "%-*s", width, al->line); 2148 } else { 2149 u64 addr = al->offset; 2150 int color = -1; 2151 2152 if (!annotate_opts.use_offset) 2153 addr += notes->src->start; 2154 2155 if (!annotate_opts.use_offset) { 2156 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); 2157 } else { 2158 if (al->jump_sources && 2159 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { 2160 if (annotate_opts.show_nr_jumps) { 2161 int prev; 2162 printed = scnprintf(bf, sizeof(bf), "%*d ", 2163 notes->src->widths.jumps, 2164 al->jump_sources); 2165 prev = obj__set_jumps_percent_color(obj, al->jump_sources, 2166 current_entry); 2167 obj__printf(obj, bf); 2168 obj__set_color(obj, prev); 2169 } 2170 print_addr: 2171 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", 2172 notes->src->widths.target, addr); 2173 } else if (ins__is_call(&disasm_line(al)->ins) && 2174 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) { 2175 goto print_addr; 2176 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { 2177 goto print_addr; 2178 } else { 2179 printed = scnprintf(bf, sizeof(bf), "%-*s ", 2180 notes->src->widths.addr, " "); 2181 } 2182 } 2183 2184 if (change_color) 2185 color = obj__set_color(obj, HE_COLORSET_ADDR); 2186 obj__printf(obj, bf); 2187 if (change_color) 2188 obj__set_color(obj, color); 2189 2190 width -= printed; 2191 2192 printed = disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), 2193 obj__printf, obj__write_graph); 2194 2195 obj__printf(obj, "%s", bf); 2196 width -= printed; 2197 2198 disasm_line__snprint_type_info(disasm_line(al), bf, sizeof(bf), apd); 2199 obj__printf(obj, "%-*s", width, bf); 2200 } 2201 2202 } 2203 2204 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, 2205 const struct arch **parch) 2206 { 2207 struct symbol *sym = ms->sym; 2208 struct annotation *notes = symbol__annotation(sym); 2209 size_t size = symbol__size(sym); 2210 int err; 2211 2212 err = symbol__annotate(ms, evsel, parch); 2213 if (err) 2214 return err; 2215 2216 symbol__calc_percent(sym, evsel); 2217 2218 annotation__set_index(notes); 2219 annotation__mark_jump_targets(notes, sym); 2220 2221 err = annotation__compute_ipc(notes, size, evsel); 2222 if (err) 2223 return err; 2224 2225 annotation__init_column_widths(notes, sym); 2226 annotation__update_column_widths(notes); 2227 sym->annotate2 = 1; 2228 2229 return 0; 2230 } 2231 2232 const char * const perf_disassembler__strs[] = { 2233 [PERF_DISASM_UNKNOWN] = "unknown", 2234 [PERF_DISASM_LLVM] = "llvm", 2235 [PERF_DISASM_CAPSTONE] = "capstone", 2236 [PERF_DISASM_OBJDUMP] = "objdump", 2237 }; 2238 2239 2240 static void annotation_options__add_disassembler(struct annotation_options *options, 2241 enum perf_disassembler dis) 2242 { 2243 for (u8 i = 0; i < ARRAY_SIZE(options->disassemblers); i++) { 2244 if (options->disassemblers[i] == dis) { 2245 /* Disassembler is already present then don't add again. */ 2246 return; 2247 } 2248 if (options->disassemblers[i] == PERF_DISASM_UNKNOWN) { 2249 /* Found a free slot. */ 2250 options->disassemblers[i] = dis; 2251 return; 2252 } 2253 } 2254 pr_err("Failed to add disassembler %d\n", dis); 2255 } 2256 2257 static int annotation_options__add_disassemblers_str(struct annotation_options *options, 2258 const char *str) 2259 { 2260 while (str && *str != '\0') { 2261 const char *comma = strchr(str, ','); 2262 int len = comma ? comma - str : (int)strlen(str); 2263 bool match = false; 2264 2265 for (u8 i = 0; i < ARRAY_SIZE(perf_disassembler__strs); i++) { 2266 const char *dis_str = perf_disassembler__strs[i]; 2267 2268 if (len == (int)strlen(dis_str) && !strncmp(str, dis_str, len)) { 2269 annotation_options__add_disassembler(options, i); 2270 match = true; 2271 break; 2272 } 2273 } 2274 if (!match) { 2275 pr_err("Invalid disassembler '%.*s'\n", len, str); 2276 return -1; 2277 } 2278 str = comma ? comma + 1 : NULL; 2279 } 2280 return 0; 2281 } 2282 2283 static int annotation__config(const char *var, const char *value, void *data) 2284 { 2285 struct annotation_options *opt = data; 2286 2287 if (!strstarts(var, "annotate.")) 2288 return 0; 2289 2290 if (!strcmp(var, "annotate.offset_level")) { 2291 perf_config_u8(&opt->offset_level, "offset_level", value); 2292 2293 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) 2294 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL; 2295 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL) 2296 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; 2297 } else if (!strcmp(var, "annotate.disassemblers")) { 2298 int err = annotation_options__add_disassemblers_str(opt, value); 2299 2300 if (err) 2301 return err; 2302 } else if (!strcmp(var, "annotate.hide_src_code")) { 2303 opt->hide_src_code = perf_config_bool("hide_src_code", value); 2304 } else if (!strcmp(var, "annotate.jump_arrows")) { 2305 opt->jump_arrows = perf_config_bool("jump_arrows", value); 2306 } else if (!strcmp(var, "annotate.show_linenr")) { 2307 opt->show_linenr = perf_config_bool("show_linenr", value); 2308 } else if (!strcmp(var, "annotate.show_nr_jumps")) { 2309 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value); 2310 } else if (!strcmp(var, "annotate.show_nr_samples")) { 2311 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples", 2312 value); 2313 } else if (!strcmp(var, "annotate.show_total_period")) { 2314 symbol_conf.show_total_period = perf_config_bool("show_total_period", 2315 value); 2316 } else if (!strcmp(var, "annotate.use_offset")) { 2317 opt->use_offset = perf_config_bool("use_offset", value); 2318 } else if (!strcmp(var, "annotate.disassembler_style")) { 2319 opt->disassembler_style = strdup(value); 2320 if (!opt->disassembler_style) { 2321 pr_err("Not enough memory for annotate.disassembler_style\n"); 2322 return -1; 2323 } 2324 } else if (!strcmp(var, "annotate.objdump")) { 2325 opt->objdump_path = strdup(value); 2326 if (!opt->objdump_path) { 2327 pr_err("Not enough memory for annotate.objdump\n"); 2328 return -1; 2329 } 2330 } else if (!strcmp(var, "annotate.addr2line")) { 2331 symbol_conf.addr2line_path = strdup(value); 2332 if (!symbol_conf.addr2line_path) { 2333 pr_err("Not enough memory for annotate.addr2line\n"); 2334 return -1; 2335 } 2336 } else if (!strcmp(var, "annotate.demangle")) { 2337 symbol_conf.demangle = perf_config_bool("demangle", value); 2338 } else if (!strcmp(var, "annotate.demangle_kernel")) { 2339 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value); 2340 } else { 2341 pr_debug("%s variable unknown, ignoring...", var); 2342 } 2343 2344 return 0; 2345 } 2346 2347 void annotation_options__init(void) 2348 { 2349 struct annotation_options *opt = &annotate_opts; 2350 2351 memset(opt, 0, sizeof(*opt)); 2352 2353 /* Default values. */ 2354 opt->use_offset = true; 2355 opt->jump_arrows = true; 2356 opt->annotate_src = true; 2357 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS; 2358 opt->percent_type = PERCENT_PERIOD_LOCAL; 2359 opt->hide_src_code = true; 2360 opt->hide_src_code_on_title = true; 2361 } 2362 2363 void annotation_options__exit(void) 2364 { 2365 zfree(&annotate_opts.disassembler_style); 2366 zfree(&annotate_opts.objdump_path); 2367 } 2368 2369 static void annotation_options__default_init_disassemblers(struct annotation_options *options) 2370 { 2371 if (options->disassemblers[0] != PERF_DISASM_UNKNOWN) { 2372 /* Already initialized. */ 2373 return; 2374 } 2375 #ifdef HAVE_LIBLLVM_SUPPORT 2376 annotation_options__add_disassembler(options, PERF_DISASM_LLVM); 2377 #endif 2378 #ifdef HAVE_LIBCAPSTONE_SUPPORT 2379 annotation_options__add_disassembler(options, PERF_DISASM_CAPSTONE); 2380 #endif 2381 annotation_options__add_disassembler(options, PERF_DISASM_OBJDUMP); 2382 } 2383 2384 void annotation_config__init(void) 2385 { 2386 perf_config(annotation__config, &annotate_opts); 2387 annotation_options__default_init_disassemblers(&annotate_opts); 2388 } 2389 2390 static unsigned int parse_percent_type(char *str1, char *str2) 2391 { 2392 unsigned int type = (unsigned int) -1; 2393 2394 if (!strcmp("period", str1)) { 2395 if (!strcmp("local", str2)) 2396 type = PERCENT_PERIOD_LOCAL; 2397 else if (!strcmp("global", str2)) 2398 type = PERCENT_PERIOD_GLOBAL; 2399 } 2400 2401 if (!strcmp("hits", str1)) { 2402 if (!strcmp("local", str2)) 2403 type = PERCENT_HITS_LOCAL; 2404 else if (!strcmp("global", str2)) 2405 type = PERCENT_HITS_GLOBAL; 2406 } 2407 2408 return type; 2409 } 2410 2411 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str, 2412 int unset __maybe_unused) 2413 { 2414 unsigned int type; 2415 char *str1, *str2; 2416 int err = -1; 2417 2418 str1 = strdup(_str); 2419 if (!str1) 2420 return -ENOMEM; 2421 2422 str2 = strchr(str1, '-'); 2423 if (!str2) 2424 goto out; 2425 2426 *str2++ = 0; 2427 2428 type = parse_percent_type(str1, str2); 2429 if (type == (unsigned int) -1) 2430 type = parse_percent_type(str2, str1); 2431 if (type != (unsigned int) -1) { 2432 annotate_opts.percent_type = type; 2433 err = 0; 2434 } 2435 2436 out: 2437 free(str1); 2438 return err; 2439 } 2440 2441 int annotate_check_args(void) 2442 { 2443 struct annotation_options *args = &annotate_opts; 2444 2445 if (args->prefix_strip && !args->prefix) { 2446 pr_err("--prefix-strip requires --prefix\n"); 2447 return -1; 2448 } 2449 return 0; 2450 } 2451 2452 static int arch__dwarf_regnum(const struct arch *arch, const char *str) 2453 { 2454 const char *p; 2455 char *regname, *q; 2456 int reg; 2457 2458 p = strchr(str, arch->objdump.register_char); 2459 if (p == NULL) 2460 return -1; 2461 2462 regname = strdup(p); 2463 if (regname == NULL) 2464 return -1; 2465 2466 q = strpbrk(regname, ",) "); 2467 if (q) 2468 *q = '\0'; 2469 2470 reg = get_dwarf_regnum(regname, arch->id.e_machine, arch->id.e_flags); 2471 free(regname); 2472 return reg; 2473 } 2474 2475 /* 2476 * Get register number and access offset from the given instruction. 2477 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs 2478 * to revisit the format when it handles different architecture. 2479 * Fills @reg and @offset when return 0. 2480 */ 2481 static int extract_reg_offset(const struct arch *arch, const char *str, 2482 struct annotated_op_loc *op_loc) 2483 { 2484 char *p; 2485 2486 if (arch->objdump.register_char == 0) 2487 return -1; 2488 2489 /* 2490 * It should start from offset, but it's possible to skip 0 2491 * in the asm. So 0(%rax) should be same as (%rax). 2492 * 2493 * However, it also start with a segment select register like 2494 * %gs:0x18(%rbx). In that case it should skip the part. 2495 */ 2496 if (*str == arch->objdump.register_char) { 2497 if (arch__is_x86(arch)) { 2498 /* FIXME: Handle other segment registers */ 2499 if (!strncmp(str, "%gs:", 4)) 2500 op_loc->segment = INSN_SEG_X86_GS; 2501 } 2502 2503 while (*str && !isdigit(*str) && 2504 *str != arch->objdump.memory_ref_char) 2505 str++; 2506 } 2507 2508 op_loc->offset = strtol(str, &p, 0); 2509 op_loc->reg1 = arch__dwarf_regnum(arch, p); 2510 if (op_loc->reg1 == -1) 2511 return -1; 2512 2513 /* Get the second register */ 2514 if (op_loc->multi_regs) 2515 op_loc->reg2 = arch__dwarf_regnum(arch, p + 1); 2516 2517 return 0; 2518 } 2519 2520 /** 2521 * annotate_get_insn_location - Get location of instruction 2522 * @arch: the architecture info 2523 * @dl: the target instruction 2524 * @loc: a buffer to save the data 2525 * 2526 * Get detailed location info (register and offset) in the instruction. 2527 * It needs both source and target operand and whether it accesses a 2528 * memory location. The offset field is meaningful only when the 2529 * corresponding mem flag is set. The reg2 field is meaningful only 2530 * when multi_regs flag is set. 2531 * 2532 * Some examples on x86: 2533 * 2534 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0 2535 * # dst_reg1 = rcx, dst_mem = 0 2536 * 2537 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0 2538 * # dst_reg1 = r8, dst_mem = 0 2539 * 2540 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, src_multi_regs = 0 2541 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1 2542 * # dst_multi_regs = 1, dst_offset = 8 2543 */ 2544 int annotate_get_insn_location(const struct arch *arch, struct disasm_line *dl, 2545 struct annotated_insn_loc *loc) 2546 { 2547 struct ins_operands *ops; 2548 struct annotated_op_loc *op_loc; 2549 int i; 2550 2551 if (ins__is_lock(&dl->ins)) 2552 ops = dl->ops.locked.ops; 2553 else 2554 ops = &dl->ops; 2555 2556 if (ops == NULL) 2557 return -1; 2558 2559 memset(loc, 0, sizeof(*loc)); 2560 2561 for_each_insn_op_loc(loc, i, op_loc) { 2562 const char *insn_str = ops->source.raw; 2563 bool multi_regs = ops->source.multi_regs; 2564 bool mem_ref = ops->source.mem_ref; 2565 2566 if (i == INSN_OP_TARGET) { 2567 insn_str = ops->target.raw; 2568 multi_regs = ops->target.multi_regs; 2569 mem_ref = ops->target.mem_ref; 2570 } 2571 2572 /* Invalidate the register by default */ 2573 op_loc->reg1 = -1; 2574 op_loc->reg2 = -1; 2575 2576 if (insn_str == NULL) { 2577 if (!arch__is_powerpc(arch)) 2578 continue; 2579 } 2580 2581 /* 2582 * For powerpc, call get_powerpc_regs function which extracts the 2583 * required fields for op_loc, ie reg1, reg2, offset from the 2584 * raw instruction. 2585 */ 2586 if (arch__is_powerpc(arch)) { 2587 op_loc->mem_ref = mem_ref; 2588 op_loc->multi_regs = multi_regs; 2589 get_powerpc_regs(dl->raw.raw_insn, !i, op_loc); 2590 } else if (strchr(insn_str, arch->objdump.memory_ref_char)) { 2591 op_loc->mem_ref = true; 2592 op_loc->multi_regs = multi_regs; 2593 extract_reg_offset(arch, insn_str, op_loc); 2594 } else { 2595 const char *s = insn_str; 2596 char *p = NULL; 2597 2598 if (arch__is_x86(arch)) { 2599 /* FIXME: Handle other segment registers */ 2600 if (!strncmp(insn_str, "%gs:", 4)) { 2601 op_loc->segment = INSN_SEG_X86_GS; 2602 op_loc->offset = strtol(insn_str + 4, 2603 &p, 0); 2604 if (p && p != insn_str + 4) 2605 op_loc->imm = true; 2606 continue; 2607 } 2608 } 2609 2610 if (*s == arch->objdump.register_char) { 2611 op_loc->reg1 = arch__dwarf_regnum(arch, s); 2612 } 2613 else if (*s == arch->objdump.imm_char) { 2614 op_loc->offset = strtol(s + 1, &p, 0); 2615 if (p && p != s + 1) 2616 op_loc->imm = true; 2617 } 2618 } 2619 } 2620 2621 return 0; 2622 } 2623 2624 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip, 2625 bool allow_update) 2626 { 2627 struct disasm_line *dl; 2628 struct annotation *notes; 2629 2630 notes = symbol__annotation(sym); 2631 2632 list_for_each_entry(dl, ¬es->src->source, al.node) { 2633 if (dl->al.offset == -1) 2634 continue; 2635 2636 if (sym->start + dl->al.offset == ip) { 2637 /* 2638 * llvm-objdump places "lock" in a separate line and 2639 * in that case, we want to get the next line. 2640 */ 2641 if (ins__is_lock(&dl->ins) && 2642 *dl->ops.raw == '\0' && allow_update) { 2643 ip++; 2644 continue; 2645 } 2646 return dl; 2647 } 2648 } 2649 return NULL; 2650 } 2651 2652 static struct annotated_item_stat *annotate_data_stat(struct list_head *head, 2653 const char *name) 2654 { 2655 struct annotated_item_stat *istat; 2656 2657 list_for_each_entry(istat, head, list) { 2658 if (!strcmp(istat->name, name)) 2659 return istat; 2660 } 2661 2662 istat = zalloc(sizeof(*istat)); 2663 if (istat == NULL) 2664 return NULL; 2665 2666 istat->name = strdup(name); 2667 if ((istat->name == NULL) || (!strlen(istat->name))) { 2668 free(istat); 2669 return NULL; 2670 } 2671 2672 list_add_tail(&istat->list, head); 2673 return istat; 2674 } 2675 2676 static bool is_stack_operation(const struct arch *arch, struct disasm_line *dl) 2677 { 2678 if (arch__is_x86(arch)) { 2679 if (!strncmp(dl->ins.name, "push", 4) || 2680 !strncmp(dl->ins.name, "pop", 3) || 2681 !strncmp(dl->ins.name, "call", 4) || 2682 !strncmp(dl->ins.name, "ret", 3)) 2683 return true; 2684 } 2685 2686 return false; 2687 } 2688 2689 static bool is_stack_canary(const struct arch *arch, struct annotated_op_loc *loc) 2690 { 2691 /* On x86_64, %gs:40 is used for stack canary */ 2692 if (arch__is_x86(arch)) { 2693 if (loc->segment == INSN_SEG_X86_GS && loc->imm && 2694 loc->offset == 40) 2695 return true; 2696 } 2697 2698 return false; 2699 } 2700 2701 /** 2702 * Returns true if the instruction has a memory operand without 2703 * performing a load/store 2704 */ 2705 static bool is_address_gen_insn(const struct arch *arch, struct disasm_line *dl) 2706 { 2707 if (arch__is_x86(arch)) { 2708 if (!strncmp(dl->ins.name, "lea", 3)) 2709 return true; 2710 } 2711 2712 return false; 2713 } 2714 2715 static struct disasm_line * 2716 annotation__prev_asm_line(struct annotation *notes, struct disasm_line *curr) 2717 { 2718 struct list_head *sources = ¬es->src->source; 2719 struct disasm_line *prev; 2720 2721 if (curr == list_first_entry(sources, struct disasm_line, al.node)) 2722 return NULL; 2723 2724 prev = list_prev_entry(curr, al.node); 2725 while (prev->al.offset == -1 && 2726 prev != list_first_entry(sources, struct disasm_line, al.node)) 2727 prev = list_prev_entry(prev, al.node); 2728 2729 if (prev->al.offset == -1) 2730 return NULL; 2731 2732 return prev; 2733 } 2734 2735 static struct disasm_line * 2736 annotation__next_asm_line(struct annotation *notes, struct disasm_line *curr) 2737 { 2738 struct list_head *sources = ¬es->src->source; 2739 struct disasm_line *next; 2740 2741 if (curr == list_last_entry(sources, struct disasm_line, al.node)) 2742 return NULL; 2743 2744 next = list_next_entry(curr, al.node); 2745 while (next->al.offset == -1 && 2746 next != list_last_entry(sources, struct disasm_line, al.node)) 2747 next = list_next_entry(next, al.node); 2748 2749 if (next->al.offset == -1) 2750 return NULL; 2751 2752 return next; 2753 } 2754 2755 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset, 2756 struct disasm_line *dl) 2757 { 2758 struct annotation *notes; 2759 struct disasm_line *next; 2760 u64 addr; 2761 2762 notes = symbol__annotation(ms->sym); 2763 /* 2764 * PC-relative addressing starts from the next instruction address 2765 * But the IP is for the current instruction. Since disasm_line 2766 * doesn't have the instruction size, calculate it using the next 2767 * disasm_line. If it's the last one, we can use symbol's end 2768 * address directly. 2769 */ 2770 next = annotation__next_asm_line(notes, dl); 2771 if (next == NULL) 2772 addr = ms->sym->end + offset; 2773 else 2774 addr = ip + (next->al.offset - dl->al.offset) + offset; 2775 2776 return map__rip_2objdump(ms->map, addr); 2777 } 2778 2779 static struct debuginfo_cache { 2780 struct dso *dso; 2781 struct debuginfo *dbg; 2782 } di_cache; 2783 2784 void debuginfo_cache__delete(void) 2785 { 2786 dso__put(di_cache.dso); 2787 di_cache.dso = NULL; 2788 2789 debuginfo__delete(di_cache.dbg); 2790 di_cache.dbg = NULL; 2791 } 2792 2793 static struct annotated_data_type * 2794 __hist_entry__get_data_type(struct hist_entry *he, const struct arch *arch, 2795 struct debuginfo *dbg, struct disasm_line *dl, 2796 int *type_offset) 2797 { 2798 struct map_symbol *ms = &he->ms; 2799 struct annotated_insn_loc loc; 2800 struct annotated_op_loc *op_loc; 2801 struct annotated_data_type *mem_type; 2802 struct annotated_item_stat *istat; 2803 int i; 2804 2805 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); 2806 if (istat == NULL) { 2807 ann_data_stat.no_insn++; 2808 return NO_TYPE; 2809 } 2810 2811 if (annotate_get_insn_location(arch, dl, &loc) < 0) { 2812 ann_data_stat.no_insn_ops++; 2813 istat->bad++; 2814 return NO_TYPE; 2815 } 2816 2817 if (is_stack_operation(arch, dl)) { 2818 istat->good++; 2819 *type_offset = 0; 2820 return &stackop_type; 2821 } 2822 2823 if (is_address_gen_insn(arch, dl)) { 2824 istat->bad++; 2825 ann_data_stat.no_mem_ops++; 2826 return NO_TYPE; 2827 } 2828 2829 for_each_insn_op_loc(&loc, i, op_loc) { 2830 struct data_loc_info dloc = { 2831 .arch = arch, 2832 .thread = he->thread, 2833 .ms = ms, 2834 .ip = ms->sym->start + dl->al.offset, 2835 .cpumode = he->cpumode, 2836 .op = op_loc, 2837 .di = dbg, 2838 }; 2839 2840 if (!op_loc->mem_ref && op_loc->segment == INSN_SEG_NONE) 2841 continue; 2842 2843 /* PC-relative addressing */ 2844 if (op_loc->reg1 == DWARF_REG_PC) { 2845 dloc.var_addr = annotate_calc_pcrel(ms, dloc.ip, 2846 op_loc->offset, dl); 2847 } 2848 2849 /* This CPU access in kernel - pretend PC-relative addressing */ 2850 if (dso__kernel(map__dso(ms->map)) && arch__is_x86(arch) && 2851 op_loc->segment == INSN_SEG_X86_GS && op_loc->imm) { 2852 dloc.var_addr = op_loc->offset; 2853 op_loc->reg1 = DWARF_REG_PC; 2854 } 2855 2856 mem_type = find_data_type(&dloc); 2857 2858 if (mem_type == NULL && is_stack_canary(arch, op_loc)) { 2859 istat->good++; 2860 *type_offset = 0; 2861 return &canary_type; 2862 } 2863 2864 if (mem_type) 2865 istat->good++; 2866 else 2867 istat->bad++; 2868 2869 if (symbol_conf.annotate_data_sample) { 2870 struct evsel *evsel = hists_to_evsel(he->hists); 2871 2872 annotated_data_type__update_samples(mem_type, evsel, 2873 dloc.type_offset, 2874 he->stat.nr_events, 2875 he->stat.period); 2876 } 2877 *type_offset = dloc.type_offset; 2878 return mem_type ?: NO_TYPE; 2879 } 2880 2881 /* retry with a fused instruction */ 2882 return NULL; 2883 } 2884 2885 /** 2886 * hist_entry__get_data_type - find data type for given hist entry 2887 * @he: hist entry 2888 * 2889 * This function first annotates the instruction at @he->ip and extracts 2890 * register and offset info from it. Then it searches the DWARF debug 2891 * info to get a variable and type information using the address, register, 2892 * and offset. 2893 */ 2894 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) 2895 { 2896 struct map_symbol *ms = &he->ms; 2897 struct evsel *evsel = hists_to_evsel(he->hists); 2898 const struct arch *arch; 2899 struct disasm_line *dl; 2900 struct annotated_data_type *mem_type; 2901 struct annotated_item_stat *istat; 2902 u64 ip = he->ip; 2903 2904 ann_data_stat.total++; 2905 2906 if (ms->map == NULL || ms->sym == NULL) { 2907 ann_data_stat.no_sym++; 2908 return NULL; 2909 } 2910 2911 if (!symbol_conf.init_annotation) { 2912 ann_data_stat.no_sym++; 2913 return NULL; 2914 } 2915 2916 /* 2917 * di_cache holds a pair of values, but code below assumes 2918 * di_cache.dso can be compared/updated and di_cache.dbg can be 2919 * read/updated independently from each other. That assumption only 2920 * holds in single threaded code. 2921 */ 2922 assert(perf_singlethreaded); 2923 2924 if (map__dso(ms->map) != di_cache.dso) { 2925 dso__put(di_cache.dso); 2926 di_cache.dso = dso__get(map__dso(ms->map)); 2927 2928 debuginfo__delete(di_cache.dbg); 2929 di_cache.dbg = dso__debuginfo(di_cache.dso); 2930 } 2931 2932 if (di_cache.dbg == NULL) { 2933 ann_data_stat.no_dbginfo++; 2934 return NULL; 2935 } 2936 2937 /* Make sure it has the disasm of the function */ 2938 if (symbol__annotate(ms, evsel, &arch) < 0) { 2939 ann_data_stat.no_insn++; 2940 return NULL; 2941 } 2942 2943 /* 2944 * Get a disasm to extract the location from the insn. 2945 * This is too slow... 2946 */ 2947 dl = find_disasm_line(ms->sym, ip, /*allow_update=*/true); 2948 if (dl == NULL) { 2949 ann_data_stat.no_insn++; 2950 return NULL; 2951 } 2952 2953 retry: 2954 mem_type = __hist_entry__get_data_type(he, arch, di_cache.dbg, dl, 2955 &he->mem_type_off); 2956 if (mem_type) 2957 return mem_type == NO_TYPE ? NULL : mem_type; 2958 2959 /* 2960 * Some instructions can be fused and the actual memory access came 2961 * from the previous instruction. 2962 */ 2963 if (dl->al.offset > 0) { 2964 struct annotation *notes; 2965 struct disasm_line *prev_dl; 2966 2967 notes = symbol__annotation(ms->sym); 2968 prev_dl = annotation__prev_asm_line(notes, dl); 2969 2970 if (prev_dl && ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) { 2971 dl = prev_dl; 2972 goto retry; 2973 } 2974 } 2975 2976 ann_data_stat.no_mem_ops++; 2977 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); 2978 if (istat) 2979 istat->bad++; 2980 return NULL; 2981 } 2982 2983 /* Basic block traversal (BFS) data structure */ 2984 struct basic_block_data { 2985 struct list_head queue; 2986 struct list_head visited; 2987 }; 2988 2989 /* 2990 * During the traversal, it needs to know the parent block where the current 2991 * block block started from. Note that single basic block can be parent of 2992 * two child basic blocks (in case of condition jump). 2993 */ 2994 struct basic_block_link { 2995 struct list_head node; 2996 struct basic_block_link *parent; 2997 struct annotated_basic_block *bb; 2998 }; 2999 3000 /* Check any of basic block in the list already has the offset */ 3001 static bool basic_block_has_offset(struct list_head *head, s64 offset) 3002 { 3003 struct basic_block_link *link; 3004 3005 list_for_each_entry(link, head, node) { 3006 s64 begin_offset = link->bb->begin->al.offset; 3007 s64 end_offset = link->bb->end->al.offset; 3008 3009 if (begin_offset <= offset && offset <= end_offset) 3010 return true; 3011 } 3012 return false; 3013 } 3014 3015 static bool is_new_basic_block(struct basic_block_data *bb_data, 3016 struct disasm_line *dl) 3017 { 3018 s64 offset = dl->al.offset; 3019 3020 if (basic_block_has_offset(&bb_data->visited, offset)) 3021 return false; 3022 if (basic_block_has_offset(&bb_data->queue, offset)) 3023 return false; 3024 return true; 3025 } 3026 3027 /* Add a basic block starting from dl and link it to the parent */ 3028 static int add_basic_block(struct basic_block_data *bb_data, 3029 struct basic_block_link *parent, 3030 struct disasm_line *dl) 3031 { 3032 struct annotated_basic_block *bb; 3033 struct basic_block_link *link; 3034 3035 if (dl == NULL) 3036 return -1; 3037 3038 if (!is_new_basic_block(bb_data, dl)) 3039 return 0; 3040 3041 bb = zalloc(sizeof(*bb)); 3042 if (bb == NULL) 3043 return -1; 3044 3045 bb->begin = dl; 3046 bb->end = dl; 3047 INIT_LIST_HEAD(&bb->list); 3048 3049 link = malloc(sizeof(*link)); 3050 if (link == NULL) { 3051 free(bb); 3052 return -1; 3053 } 3054 3055 link->bb = bb; 3056 link->parent = parent; 3057 list_add_tail(&link->node, &bb_data->queue); 3058 return 0; 3059 } 3060 3061 /* Returns true when it finds the target in the current basic block */ 3062 static bool process_basic_block(struct basic_block_data *bb_data, 3063 struct basic_block_link *link, 3064 struct symbol *sym, u64 target) 3065 { 3066 struct disasm_line *dl, *next_dl, *last_dl; 3067 struct annotation *notes = symbol__annotation(sym); 3068 bool found = false; 3069 3070 dl = link->bb->begin; 3071 /* Check if it's already visited */ 3072 if (basic_block_has_offset(&bb_data->visited, dl->al.offset)) 3073 return false; 3074 3075 last_dl = list_last_entry(¬es->src->source, 3076 struct disasm_line, al.node); 3077 if (last_dl->al.offset == -1) 3078 last_dl = annotation__prev_asm_line(notes, last_dl); 3079 3080 if (last_dl == NULL) 3081 return false; 3082 3083 list_for_each_entry_from(dl, ¬es->src->source, al.node) { 3084 /* Skip comment or debug info line */ 3085 if (dl->al.offset == -1) 3086 continue; 3087 /* Found the target instruction */ 3088 if (sym->start + dl->al.offset == target) { 3089 found = true; 3090 break; 3091 } 3092 /* End of the function, finish the block */ 3093 if (dl == last_dl) 3094 break; 3095 /* 'return' instruction finishes the block */ 3096 if (ins__is_ret(&dl->ins)) 3097 break; 3098 /* normal instructions are part of the basic block */ 3099 if (!ins__is_jump(&dl->ins)) 3100 continue; 3101 /* jump to a different function, tail call or return */ 3102 if (dl->ops.target.outside) 3103 break; 3104 /* jump instruction creates new basic block(s) */ 3105 next_dl = find_disasm_line(sym, sym->start + dl->ops.target.offset, 3106 /*allow_update=*/false); 3107 if (next_dl) 3108 add_basic_block(bb_data, link, next_dl); 3109 3110 /* 3111 * FIXME: determine conditional jumps properly. 3112 * Conditional jumps create another basic block with the 3113 * next disasm line. 3114 */ 3115 if (!strstr(dl->ins.name, "jmp")) { 3116 next_dl = annotation__next_asm_line(notes, dl); 3117 if (next_dl) 3118 add_basic_block(bb_data, link, next_dl); 3119 } 3120 break; 3121 3122 } 3123 link->bb->end = dl; 3124 return found; 3125 } 3126 3127 /* 3128 * It founds a target basic block, build a proper linked list of basic blocks 3129 * by following the link recursively. 3130 */ 3131 static void link_found_basic_blocks(struct basic_block_link *link, 3132 struct list_head *head) 3133 { 3134 while (link) { 3135 struct basic_block_link *parent = link->parent; 3136 3137 list_move(&link->bb->list, head); 3138 list_del(&link->node); 3139 free(link); 3140 3141 link = parent; 3142 } 3143 } 3144 3145 static void delete_basic_blocks(struct basic_block_data *bb_data) 3146 { 3147 struct basic_block_link *link, *tmp; 3148 3149 list_for_each_entry_safe(link, tmp, &bb_data->queue, node) { 3150 list_del(&link->node); 3151 zfree(&link->bb); 3152 free(link); 3153 } 3154 3155 list_for_each_entry_safe(link, tmp, &bb_data->visited, node) { 3156 list_del(&link->node); 3157 zfree(&link->bb); 3158 free(link); 3159 } 3160 } 3161 3162 /** 3163 * annotate_get_basic_blocks - Get basic blocks for given address range 3164 * @sym: symbol to annotate 3165 * @src: source address 3166 * @dst: destination address 3167 * @head: list head to save basic blocks 3168 * 3169 * This function traverses disasm_lines from @src to @dst and save them in a 3170 * list of annotated_basic_block to @head. It uses BFS to find the shortest 3171 * path between two. The basic_block_link is to maintain parent links so 3172 * that it can build a list of blocks from the start. 3173 */ 3174 int annotate_get_basic_blocks(struct symbol *sym, s64 src, s64 dst, 3175 struct list_head *head) 3176 { 3177 struct basic_block_data bb_data = { 3178 .queue = LIST_HEAD_INIT(bb_data.queue), 3179 .visited = LIST_HEAD_INIT(bb_data.visited), 3180 }; 3181 struct basic_block_link *link; 3182 struct disasm_line *dl; 3183 int ret = -1; 3184 3185 dl = find_disasm_line(sym, src, /*allow_update=*/false); 3186 if (dl == NULL) 3187 return -1; 3188 3189 if (add_basic_block(&bb_data, /*parent=*/NULL, dl) < 0) 3190 return -1; 3191 3192 /* Find shortest path from src to dst using BFS */ 3193 while (!list_empty(&bb_data.queue)) { 3194 link = list_first_entry(&bb_data.queue, struct basic_block_link, node); 3195 3196 if (process_basic_block(&bb_data, link, sym, dst)) { 3197 link_found_basic_blocks(link, head); 3198 ret = 0; 3199 break; 3200 } 3201 list_move(&link->node, &bb_data.visited); 3202 } 3203 delete_basic_blocks(&bb_data); 3204 return ret; 3205 } 3206