1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-annotate.c, see those files for further 6 * copyright notes. 7 */ 8 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <libgen.h> 12 #include <stdlib.h> 13 #include "util.h" // hex_width() 14 #include "ui/ui.h" 15 #include "sort.h" 16 #include "build-id.h" 17 #include "color.h" 18 #include "config.h" 19 #include "dso.h" 20 #include "env.h" 21 #include "map.h" 22 #include "maps.h" 23 #include "symbol.h" 24 #include "srcline.h" 25 #include "units.h" 26 #include "debug.h" 27 #include "annotate.h" 28 #include "annotate-data.h" 29 #include "evsel.h" 30 #include "evlist.h" 31 #include "bpf-event.h" 32 #include "bpf-utils.h" 33 #include "block-range.h" 34 #include "string2.h" 35 #include "dwarf-regs.h" 36 #include "util/event.h" 37 #include "util/sharded_mutex.h" 38 #include "arch/common.h" 39 #include "namespaces.h" 40 #include "thread.h" 41 #include <regex.h> 42 #include <linux/bitops.h> 43 #include <linux/kernel.h> 44 #include <linux/string.h> 45 #include <linux/zalloc.h> 46 #include <subcmd/parse-options.h> 47 #include <subcmd/run-command.h> 48 49 /* FIXME: For the HE_COLORSET */ 50 #include "ui/browser.h" 51 52 /* 53 * FIXME: Using the same values as slang.h, 54 * but that header may not be available everywhere 55 */ 56 #define LARROW_CHAR ((unsigned char)',') 57 #define RARROW_CHAR ((unsigned char)'+') 58 #define DARROW_CHAR ((unsigned char)'.') 59 #define UARROW_CHAR ((unsigned char)'-') 60 61 #include <linux/ctype.h> 62 63 /* global annotation options */ 64 struct annotation_options annotate_opts; 65 66 static regex_t file_lineno; 67 68 static struct ins_ops *ins__find(struct arch *arch, const char *name); 69 static void ins__sort(struct arch *arch); 70 static int disasm_line__parse(char *line, const char **namep, char **rawp); 71 static int call__scnprintf(struct ins *ins, char *bf, size_t size, 72 struct ins_operands *ops, int max_ins_name); 73 static int jump__scnprintf(struct ins *ins, char *bf, size_t size, 74 struct ins_operands *ops, int max_ins_name); 75 76 struct arch { 77 const char *name; 78 struct ins *instructions; 79 size_t nr_instructions; 80 size_t nr_instructions_allocated; 81 struct ins_ops *(*associate_instruction_ops)(struct arch *arch, const char *name); 82 bool sorted_instructions; 83 bool initialized; 84 const char *insn_suffix; 85 void *priv; 86 unsigned int model; 87 unsigned int family; 88 int (*init)(struct arch *arch, char *cpuid); 89 bool (*ins_is_fused)(struct arch *arch, const char *ins1, 90 const char *ins2); 91 struct { 92 char comment_char; 93 char skip_functions_char; 94 char register_char; 95 char memory_ref_char; 96 } objdump; 97 }; 98 99 static struct ins_ops call_ops; 100 static struct ins_ops dec_ops; 101 static struct ins_ops jump_ops; 102 static struct ins_ops mov_ops; 103 static struct ins_ops nop_ops; 104 static struct ins_ops lock_ops; 105 static struct ins_ops ret_ops; 106 107 /* Data type collection debug statistics */ 108 struct annotated_data_stat ann_data_stat; 109 LIST_HEAD(ann_insn_stat); 110 111 /* Pseudo data types */ 112 struct annotated_data_type stackop_type = { 113 .self = { 114 .type_name = (char *)"(stack operation)", 115 .children = LIST_HEAD_INIT(stackop_type.self.children), 116 }, 117 }; 118 119 static int arch__grow_instructions(struct arch *arch) 120 { 121 struct ins *new_instructions; 122 size_t new_nr_allocated; 123 124 if (arch->nr_instructions_allocated == 0 && arch->instructions) 125 goto grow_from_non_allocated_table; 126 127 new_nr_allocated = arch->nr_instructions_allocated + 128; 128 new_instructions = realloc(arch->instructions, new_nr_allocated * sizeof(struct ins)); 129 if (new_instructions == NULL) 130 return -1; 131 132 out_update_instructions: 133 arch->instructions = new_instructions; 134 arch->nr_instructions_allocated = new_nr_allocated; 135 return 0; 136 137 grow_from_non_allocated_table: 138 new_nr_allocated = arch->nr_instructions + 128; 139 new_instructions = calloc(new_nr_allocated, sizeof(struct ins)); 140 if (new_instructions == NULL) 141 return -1; 142 143 memcpy(new_instructions, arch->instructions, arch->nr_instructions); 144 goto out_update_instructions; 145 } 146 147 static int arch__associate_ins_ops(struct arch* arch, const char *name, struct ins_ops *ops) 148 { 149 struct ins *ins; 150 151 if (arch->nr_instructions == arch->nr_instructions_allocated && 152 arch__grow_instructions(arch)) 153 return -1; 154 155 ins = &arch->instructions[arch->nr_instructions]; 156 ins->name = strdup(name); 157 if (!ins->name) 158 return -1; 159 160 ins->ops = ops; 161 arch->nr_instructions++; 162 163 ins__sort(arch); 164 return 0; 165 } 166 167 #include "arch/arc/annotate/instructions.c" 168 #include "arch/arm/annotate/instructions.c" 169 #include "arch/arm64/annotate/instructions.c" 170 #include "arch/csky/annotate/instructions.c" 171 #include "arch/loongarch/annotate/instructions.c" 172 #include "arch/mips/annotate/instructions.c" 173 #include "arch/x86/annotate/instructions.c" 174 #include "arch/powerpc/annotate/instructions.c" 175 #include "arch/riscv64/annotate/instructions.c" 176 #include "arch/s390/annotate/instructions.c" 177 #include "arch/sparc/annotate/instructions.c" 178 179 static struct arch architectures[] = { 180 { 181 .name = "arc", 182 .init = arc__annotate_init, 183 }, 184 { 185 .name = "arm", 186 .init = arm__annotate_init, 187 }, 188 { 189 .name = "arm64", 190 .init = arm64__annotate_init, 191 }, 192 { 193 .name = "csky", 194 .init = csky__annotate_init, 195 }, 196 { 197 .name = "mips", 198 .init = mips__annotate_init, 199 .objdump = { 200 .comment_char = '#', 201 }, 202 }, 203 { 204 .name = "x86", 205 .init = x86__annotate_init, 206 .instructions = x86__instructions, 207 .nr_instructions = ARRAY_SIZE(x86__instructions), 208 .insn_suffix = "bwlq", 209 .objdump = { 210 .comment_char = '#', 211 .register_char = '%', 212 .memory_ref_char = '(', 213 }, 214 }, 215 { 216 .name = "powerpc", 217 .init = powerpc__annotate_init, 218 }, 219 { 220 .name = "riscv64", 221 .init = riscv64__annotate_init, 222 }, 223 { 224 .name = "s390", 225 .init = s390__annotate_init, 226 .objdump = { 227 .comment_char = '#', 228 }, 229 }, 230 { 231 .name = "sparc", 232 .init = sparc__annotate_init, 233 .objdump = { 234 .comment_char = '#', 235 }, 236 }, 237 { 238 .name = "loongarch", 239 .init = loongarch__annotate_init, 240 .objdump = { 241 .comment_char = '#', 242 }, 243 }, 244 }; 245 246 static void ins__delete(struct ins_operands *ops) 247 { 248 if (ops == NULL) 249 return; 250 zfree(&ops->source.raw); 251 zfree(&ops->source.name); 252 zfree(&ops->target.raw); 253 zfree(&ops->target.name); 254 } 255 256 static int ins__raw_scnprintf(struct ins *ins, char *bf, size_t size, 257 struct ins_operands *ops, int max_ins_name) 258 { 259 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->raw); 260 } 261 262 int ins__scnprintf(struct ins *ins, char *bf, size_t size, 263 struct ins_operands *ops, int max_ins_name) 264 { 265 if (ins->ops->scnprintf) 266 return ins->ops->scnprintf(ins, bf, size, ops, max_ins_name); 267 268 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); 269 } 270 271 bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2) 272 { 273 if (!arch || !arch->ins_is_fused) 274 return false; 275 276 return arch->ins_is_fused(arch, ins1, ins2); 277 } 278 279 static int call__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) 280 { 281 char *endptr, *tok, *name; 282 struct map *map = ms->map; 283 struct addr_map_symbol target = { 284 .ms = { .map = map, }, 285 }; 286 287 ops->target.addr = strtoull(ops->raw, &endptr, 16); 288 289 name = strchr(endptr, '<'); 290 if (name == NULL) 291 goto indirect_call; 292 293 name++; 294 295 if (arch->objdump.skip_functions_char && 296 strchr(name, arch->objdump.skip_functions_char)) 297 return -1; 298 299 tok = strchr(name, '>'); 300 if (tok == NULL) 301 return -1; 302 303 *tok = '\0'; 304 ops->target.name = strdup(name); 305 *tok = '>'; 306 307 if (ops->target.name == NULL) 308 return -1; 309 find_target: 310 target.addr = map__objdump_2mem(map, ops->target.addr); 311 312 if (maps__find_ams(ms->maps, &target) == 0 && 313 map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr) 314 ops->target.sym = target.ms.sym; 315 316 return 0; 317 318 indirect_call: 319 tok = strchr(endptr, '*'); 320 if (tok != NULL) { 321 endptr++; 322 323 /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx). 324 * Do not parse such instruction. */ 325 if (strstr(endptr, "(%r") == NULL) 326 ops->target.addr = strtoull(endptr, NULL, 16); 327 } 328 goto find_target; 329 } 330 331 static int call__scnprintf(struct ins *ins, char *bf, size_t size, 332 struct ins_operands *ops, int max_ins_name) 333 { 334 if (ops->target.sym) 335 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name); 336 337 if (ops->target.addr == 0) 338 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); 339 340 if (ops->target.name) 341 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.name); 342 343 return scnprintf(bf, size, "%-*s *%" PRIx64, max_ins_name, ins->name, ops->target.addr); 344 } 345 346 static struct ins_ops call_ops = { 347 .parse = call__parse, 348 .scnprintf = call__scnprintf, 349 }; 350 351 bool ins__is_call(const struct ins *ins) 352 { 353 return ins->ops == &call_ops || ins->ops == &s390_call_ops || ins->ops == &loongarch_call_ops; 354 } 355 356 /* 357 * Prevents from matching commas in the comment section, e.g.: 358 * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast 359 * 360 * and skip comma as part of function arguments, e.g.: 361 * 1d8b4ac <linemap_lookup(line_maps const*, unsigned int)+0xcc> 362 */ 363 static inline const char *validate_comma(const char *c, struct ins_operands *ops) 364 { 365 if (ops->jump.raw_comment && c > ops->jump.raw_comment) 366 return NULL; 367 368 if (ops->jump.raw_func_start && c > ops->jump.raw_func_start) 369 return NULL; 370 371 return c; 372 } 373 374 static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) 375 { 376 struct map *map = ms->map; 377 struct symbol *sym = ms->sym; 378 struct addr_map_symbol target = { 379 .ms = { .map = map, }, 380 }; 381 const char *c = strchr(ops->raw, ','); 382 u64 start, end; 383 384 ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char); 385 ops->jump.raw_func_start = strchr(ops->raw, '<'); 386 387 c = validate_comma(c, ops); 388 389 /* 390 * Examples of lines to parse for the _cpp_lex_token@@Base 391 * function: 392 * 393 * 1159e6c: jne 115aa32 <_cpp_lex_token@@Base+0xf92> 394 * 1159e8b: jne c469be <cpp_named_operator2name@@Base+0xa72> 395 * 396 * The first is a jump to an offset inside the same function, 397 * the second is to another function, i.e. that 0xa72 is an 398 * offset in the cpp_named_operator2name@@base function. 399 */ 400 /* 401 * skip over possible up to 2 operands to get to address, e.g.: 402 * tbnz w0, #26, ffff0000083cd190 <security_file_permission+0xd0> 403 */ 404 if (c++ != NULL) { 405 ops->target.addr = strtoull(c, NULL, 16); 406 if (!ops->target.addr) { 407 c = strchr(c, ','); 408 c = validate_comma(c, ops); 409 if (c++ != NULL) 410 ops->target.addr = strtoull(c, NULL, 16); 411 } 412 } else { 413 ops->target.addr = strtoull(ops->raw, NULL, 16); 414 } 415 416 target.addr = map__objdump_2mem(map, ops->target.addr); 417 start = map__unmap_ip(map, sym->start); 418 end = map__unmap_ip(map, sym->end); 419 420 ops->target.outside = target.addr < start || target.addr > end; 421 422 /* 423 * FIXME: things like this in _cpp_lex_token (gcc's cc1 program): 424 425 cpp_named_operator2name@@Base+0xa72 426 427 * Point to a place that is after the cpp_named_operator2name 428 * boundaries, i.e. in the ELF symbol table for cc1 429 * cpp_named_operator2name is marked as being 32-bytes long, but it in 430 * fact is much larger than that, so we seem to need a symbols__find() 431 * routine that looks for >= current->start and < next_symbol->start, 432 * possibly just for C++ objects? 433 * 434 * For now lets just make some progress by marking jumps to outside the 435 * current function as call like. 436 * 437 * Actual navigation will come next, with further understanding of how 438 * the symbol searching and disassembly should be done. 439 */ 440 if (maps__find_ams(ms->maps, &target) == 0 && 441 map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr) 442 ops->target.sym = target.ms.sym; 443 444 if (!ops->target.outside) { 445 ops->target.offset = target.addr - start; 446 ops->target.offset_avail = true; 447 } else { 448 ops->target.offset_avail = false; 449 } 450 451 return 0; 452 } 453 454 static int jump__scnprintf(struct ins *ins, char *bf, size_t size, 455 struct ins_operands *ops, int max_ins_name) 456 { 457 const char *c; 458 459 if (!ops->target.addr || ops->target.offset < 0) 460 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); 461 462 if (ops->target.outside && ops->target.sym != NULL) 463 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, ops->target.sym->name); 464 465 c = strchr(ops->raw, ','); 466 c = validate_comma(c, ops); 467 468 if (c != NULL) { 469 const char *c2 = strchr(c + 1, ','); 470 471 c2 = validate_comma(c2, ops); 472 /* check for 3-op insn */ 473 if (c2 != NULL) 474 c = c2; 475 c++; 476 477 /* mirror arch objdump's space-after-comma style */ 478 if (*c == ' ') 479 c++; 480 } 481 482 return scnprintf(bf, size, "%-*s %.*s%" PRIx64, max_ins_name, 483 ins->name, c ? c - ops->raw : 0, ops->raw, 484 ops->target.offset); 485 } 486 487 static void jump__delete(struct ins_operands *ops __maybe_unused) 488 { 489 /* 490 * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the 491 * raw string, don't free them. 492 */ 493 } 494 495 static struct ins_ops jump_ops = { 496 .free = jump__delete, 497 .parse = jump__parse, 498 .scnprintf = jump__scnprintf, 499 }; 500 501 bool ins__is_jump(const struct ins *ins) 502 { 503 return ins->ops == &jump_ops || ins->ops == &loongarch_jump_ops; 504 } 505 506 static int comment__symbol(char *raw, char *comment, u64 *addrp, char **namep) 507 { 508 char *endptr, *name, *t; 509 510 if (strstr(raw, "(%rip)") == NULL) 511 return 0; 512 513 *addrp = strtoull(comment, &endptr, 16); 514 if (endptr == comment) 515 return 0; 516 name = strchr(endptr, '<'); 517 if (name == NULL) 518 return -1; 519 520 name++; 521 522 t = strchr(name, '>'); 523 if (t == NULL) 524 return 0; 525 526 *t = '\0'; 527 *namep = strdup(name); 528 *t = '>'; 529 530 return 0; 531 } 532 533 static int lock__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms) 534 { 535 ops->locked.ops = zalloc(sizeof(*ops->locked.ops)); 536 if (ops->locked.ops == NULL) 537 return 0; 538 539 if (disasm_line__parse(ops->raw, &ops->locked.ins.name, &ops->locked.ops->raw) < 0) 540 goto out_free_ops; 541 542 ops->locked.ins.ops = ins__find(arch, ops->locked.ins.name); 543 544 if (ops->locked.ins.ops == NULL) 545 goto out_free_ops; 546 547 if (ops->locked.ins.ops->parse && 548 ops->locked.ins.ops->parse(arch, ops->locked.ops, ms) < 0) 549 goto out_free_ops; 550 551 return 0; 552 553 out_free_ops: 554 zfree(&ops->locked.ops); 555 return 0; 556 } 557 558 static int lock__scnprintf(struct ins *ins, char *bf, size_t size, 559 struct ins_operands *ops, int max_ins_name) 560 { 561 int printed; 562 563 if (ops->locked.ins.ops == NULL) 564 return ins__raw_scnprintf(ins, bf, size, ops, max_ins_name); 565 566 printed = scnprintf(bf, size, "%-*s ", max_ins_name, ins->name); 567 return printed + ins__scnprintf(&ops->locked.ins, bf + printed, 568 size - printed, ops->locked.ops, max_ins_name); 569 } 570 571 static void lock__delete(struct ins_operands *ops) 572 { 573 struct ins *ins = &ops->locked.ins; 574 575 if (ins->ops && ins->ops->free) 576 ins->ops->free(ops->locked.ops); 577 else 578 ins__delete(ops->locked.ops); 579 580 zfree(&ops->locked.ops); 581 zfree(&ops->target.raw); 582 zfree(&ops->target.name); 583 } 584 585 static struct ins_ops lock_ops = { 586 .free = lock__delete, 587 .parse = lock__parse, 588 .scnprintf = lock__scnprintf, 589 }; 590 591 /* 592 * Check if the operand has more than one registers like x86 SIB addressing: 593 * 0x1234(%rax, %rbx, 8) 594 * 595 * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check 596 * the input string after 'memory_ref_char' if exists. 597 */ 598 static bool check_multi_regs(struct arch *arch, const char *op) 599 { 600 int count = 0; 601 602 if (arch->objdump.register_char == 0) 603 return false; 604 605 if (arch->objdump.memory_ref_char) { 606 op = strchr(op, arch->objdump.memory_ref_char); 607 if (op == NULL) 608 return false; 609 } 610 611 while ((op = strchr(op, arch->objdump.register_char)) != NULL) { 612 count++; 613 op++; 614 } 615 616 return count > 1; 617 } 618 619 static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused) 620 { 621 char *s = strchr(ops->raw, ','), *target, *comment, prev; 622 623 if (s == NULL) 624 return -1; 625 626 *s = '\0'; 627 628 /* 629 * x86 SIB addressing has something like 0x8(%rax, %rcx, 1) 630 * then it needs to have the closing parenthesis. 631 */ 632 if (strchr(ops->raw, '(')) { 633 *s = ','; 634 s = strchr(ops->raw, ')'); 635 if (s == NULL || s[1] != ',') 636 return -1; 637 *++s = '\0'; 638 } 639 640 ops->source.raw = strdup(ops->raw); 641 *s = ','; 642 643 if (ops->source.raw == NULL) 644 return -1; 645 646 ops->source.multi_regs = check_multi_regs(arch, ops->source.raw); 647 648 target = skip_spaces(++s); 649 comment = strchr(s, arch->objdump.comment_char); 650 651 if (comment != NULL) 652 s = comment - 1; 653 else 654 s = strchr(s, '\0') - 1; 655 656 while (s > target && isspace(s[0])) 657 --s; 658 s++; 659 prev = *s; 660 *s = '\0'; 661 662 ops->target.raw = strdup(target); 663 *s = prev; 664 665 if (ops->target.raw == NULL) 666 goto out_free_source; 667 668 ops->target.multi_regs = check_multi_regs(arch, ops->target.raw); 669 670 if (comment == NULL) 671 return 0; 672 673 comment = skip_spaces(comment); 674 comment__symbol(ops->source.raw, comment + 1, &ops->source.addr, &ops->source.name); 675 comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name); 676 677 return 0; 678 679 out_free_source: 680 zfree(&ops->source.raw); 681 return -1; 682 } 683 684 static int mov__scnprintf(struct ins *ins, char *bf, size_t size, 685 struct ins_operands *ops, int max_ins_name) 686 { 687 return scnprintf(bf, size, "%-*s %s,%s", max_ins_name, ins->name, 688 ops->source.name ?: ops->source.raw, 689 ops->target.name ?: ops->target.raw); 690 } 691 692 static struct ins_ops mov_ops = { 693 .parse = mov__parse, 694 .scnprintf = mov__scnprintf, 695 }; 696 697 static int dec__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms __maybe_unused) 698 { 699 char *target, *comment, *s, prev; 700 701 target = s = ops->raw; 702 703 while (s[0] != '\0' && !isspace(s[0])) 704 ++s; 705 prev = *s; 706 *s = '\0'; 707 708 ops->target.raw = strdup(target); 709 *s = prev; 710 711 if (ops->target.raw == NULL) 712 return -1; 713 714 comment = strchr(s, arch->objdump.comment_char); 715 if (comment == NULL) 716 return 0; 717 718 comment = skip_spaces(comment); 719 comment__symbol(ops->target.raw, comment + 1, &ops->target.addr, &ops->target.name); 720 721 return 0; 722 } 723 724 static int dec__scnprintf(struct ins *ins, char *bf, size_t size, 725 struct ins_operands *ops, int max_ins_name) 726 { 727 return scnprintf(bf, size, "%-*s %s", max_ins_name, ins->name, 728 ops->target.name ?: ops->target.raw); 729 } 730 731 static struct ins_ops dec_ops = { 732 .parse = dec__parse, 733 .scnprintf = dec__scnprintf, 734 }; 735 736 static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, 737 struct ins_operands *ops __maybe_unused, int max_ins_name) 738 { 739 return scnprintf(bf, size, "%-*s", max_ins_name, "nop"); 740 } 741 742 static struct ins_ops nop_ops = { 743 .scnprintf = nop__scnprintf, 744 }; 745 746 static struct ins_ops ret_ops = { 747 .scnprintf = ins__raw_scnprintf, 748 }; 749 750 bool ins__is_ret(const struct ins *ins) 751 { 752 return ins->ops == &ret_ops; 753 } 754 755 bool ins__is_lock(const struct ins *ins) 756 { 757 return ins->ops == &lock_ops; 758 } 759 760 static int ins__key_cmp(const void *name, const void *insp) 761 { 762 const struct ins *ins = insp; 763 764 return strcmp(name, ins->name); 765 } 766 767 static int ins__cmp(const void *a, const void *b) 768 { 769 const struct ins *ia = a; 770 const struct ins *ib = b; 771 772 return strcmp(ia->name, ib->name); 773 } 774 775 static void ins__sort(struct arch *arch) 776 { 777 const int nmemb = arch->nr_instructions; 778 779 qsort(arch->instructions, nmemb, sizeof(struct ins), ins__cmp); 780 } 781 782 static struct ins_ops *__ins__find(struct arch *arch, const char *name) 783 { 784 struct ins *ins; 785 const int nmemb = arch->nr_instructions; 786 787 if (!arch->sorted_instructions) { 788 ins__sort(arch); 789 arch->sorted_instructions = true; 790 } 791 792 ins = bsearch(name, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp); 793 if (ins) 794 return ins->ops; 795 796 if (arch->insn_suffix) { 797 char tmp[32]; 798 char suffix; 799 size_t len = strlen(name); 800 801 if (len == 0 || len >= sizeof(tmp)) 802 return NULL; 803 804 suffix = name[len - 1]; 805 if (strchr(arch->insn_suffix, suffix) == NULL) 806 return NULL; 807 808 strcpy(tmp, name); 809 tmp[len - 1] = '\0'; /* remove the suffix and check again */ 810 811 ins = bsearch(tmp, arch->instructions, nmemb, sizeof(struct ins), ins__key_cmp); 812 } 813 return ins ? ins->ops : NULL; 814 } 815 816 static struct ins_ops *ins__find(struct arch *arch, const char *name) 817 { 818 struct ins_ops *ops = __ins__find(arch, name); 819 820 if (!ops && arch->associate_instruction_ops) 821 ops = arch->associate_instruction_ops(arch, name); 822 823 return ops; 824 } 825 826 static int arch__key_cmp(const void *name, const void *archp) 827 { 828 const struct arch *arch = archp; 829 830 return strcmp(name, arch->name); 831 } 832 833 static int arch__cmp(const void *a, const void *b) 834 { 835 const struct arch *aa = a; 836 const struct arch *ab = b; 837 838 return strcmp(aa->name, ab->name); 839 } 840 841 static void arch__sort(void) 842 { 843 const int nmemb = ARRAY_SIZE(architectures); 844 845 qsort(architectures, nmemb, sizeof(struct arch), arch__cmp); 846 } 847 848 static struct arch *arch__find(const char *name) 849 { 850 const int nmemb = ARRAY_SIZE(architectures); 851 static bool sorted; 852 853 if (!sorted) { 854 arch__sort(); 855 sorted = true; 856 } 857 858 return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp); 859 } 860 861 bool arch__is(struct arch *arch, const char *name) 862 { 863 return !strcmp(arch->name, name); 864 } 865 866 static struct annotated_source *annotated_source__new(void) 867 { 868 struct annotated_source *src = zalloc(sizeof(*src)); 869 870 if (src != NULL) 871 INIT_LIST_HEAD(&src->source); 872 873 return src; 874 } 875 876 static __maybe_unused void annotated_source__delete(struct annotated_source *src) 877 { 878 if (src == NULL) 879 return; 880 zfree(&src->histograms); 881 free(src); 882 } 883 884 static int annotated_source__alloc_histograms(struct annotated_source *src, 885 size_t size, int nr_hists) 886 { 887 size_t sizeof_sym_hist; 888 889 /* 890 * Add buffer of one element for zero length symbol. 891 * When sample is taken from first instruction of 892 * zero length symbol, perf still resolves it and 893 * shows symbol name in perf report and allows to 894 * annotate it. 895 */ 896 if (size == 0) 897 size = 1; 898 899 /* Check for overflow when calculating sizeof_sym_hist */ 900 if (size > (SIZE_MAX - sizeof(struct sym_hist)) / sizeof(struct sym_hist_entry)) 901 return -1; 902 903 sizeof_sym_hist = (sizeof(struct sym_hist) + size * sizeof(struct sym_hist_entry)); 904 905 /* Check for overflow in zalloc argument */ 906 if (sizeof_sym_hist > SIZE_MAX / nr_hists) 907 return -1; 908 909 src->sizeof_sym_hist = sizeof_sym_hist; 910 src->nr_histograms = nr_hists; 911 src->histograms = calloc(nr_hists, sizeof_sym_hist) ; 912 return src->histograms ? 0 : -1; 913 } 914 915 void symbol__annotate_zero_histograms(struct symbol *sym) 916 { 917 struct annotation *notes = symbol__annotation(sym); 918 919 annotation__lock(notes); 920 if (notes->src != NULL) { 921 memset(notes->src->histograms, 0, 922 notes->src->nr_histograms * notes->src->sizeof_sym_hist); 923 } 924 if (notes->branch && notes->branch->cycles_hist) { 925 memset(notes->branch->cycles_hist, 0, 926 symbol__size(sym) * sizeof(struct cyc_hist)); 927 } 928 annotation__unlock(notes); 929 } 930 931 static int __symbol__account_cycles(struct cyc_hist *ch, 932 u64 start, 933 unsigned offset, unsigned cycles, 934 unsigned have_start) 935 { 936 /* 937 * For now we can only account one basic block per 938 * final jump. But multiple could be overlapping. 939 * Always account the longest one. So when 940 * a shorter one has been already seen throw it away. 941 * 942 * We separately always account the full cycles. 943 */ 944 ch[offset].num_aggr++; 945 ch[offset].cycles_aggr += cycles; 946 947 if (cycles > ch[offset].cycles_max) 948 ch[offset].cycles_max = cycles; 949 950 if (ch[offset].cycles_min) { 951 if (cycles && cycles < ch[offset].cycles_min) 952 ch[offset].cycles_min = cycles; 953 } else 954 ch[offset].cycles_min = cycles; 955 956 if (!have_start && ch[offset].have_start) 957 return 0; 958 if (ch[offset].num) { 959 if (have_start && (!ch[offset].have_start || 960 ch[offset].start > start)) { 961 ch[offset].have_start = 0; 962 ch[offset].cycles = 0; 963 ch[offset].num = 0; 964 if (ch[offset].reset < 0xffff) 965 ch[offset].reset++; 966 } else if (have_start && 967 ch[offset].start < start) 968 return 0; 969 } 970 971 if (ch[offset].num < NUM_SPARKS) 972 ch[offset].cycles_spark[ch[offset].num] = cycles; 973 974 ch[offset].have_start = have_start; 975 ch[offset].start = start; 976 ch[offset].cycles += cycles; 977 ch[offset].num++; 978 return 0; 979 } 980 981 static int __symbol__inc_addr_samples(struct map_symbol *ms, 982 struct annotated_source *src, int evidx, u64 addr, 983 struct perf_sample *sample) 984 { 985 struct symbol *sym = ms->sym; 986 unsigned offset; 987 struct sym_hist *h; 988 989 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map__unmap_ip(ms->map, addr)); 990 991 if ((addr < sym->start || addr >= sym->end) && 992 (addr != sym->end || sym->start != sym->end)) { 993 pr_debug("%s(%d): ERANGE! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 "\n", 994 __func__, __LINE__, sym->name, sym->start, addr, sym->end); 995 return -ERANGE; 996 } 997 998 offset = addr - sym->start; 999 h = annotated_source__histogram(src, evidx); 1000 if (h == NULL) { 1001 pr_debug("%s(%d): ENOMEM! sym->name=%s, start=%#" PRIx64 ", addr=%#" PRIx64 ", end=%#" PRIx64 ", func: %d\n", 1002 __func__, __LINE__, sym->name, sym->start, addr, sym->end, sym->type == STT_FUNC); 1003 return -ENOMEM; 1004 } 1005 h->nr_samples++; 1006 h->addr[offset].nr_samples++; 1007 h->period += sample->period; 1008 h->addr[offset].period += sample->period; 1009 1010 pr_debug3("%#" PRIx64 " %s: period++ [addr: %#" PRIx64 ", %#" PRIx64 1011 ", evidx=%d] => nr_samples: %" PRIu64 ", period: %" PRIu64 "\n", 1012 sym->start, sym->name, addr, addr - sym->start, evidx, 1013 h->addr[offset].nr_samples, h->addr[offset].period); 1014 return 0; 1015 } 1016 1017 struct annotated_branch *annotation__get_branch(struct annotation *notes) 1018 { 1019 if (notes == NULL) 1020 return NULL; 1021 1022 if (notes->branch == NULL) 1023 notes->branch = zalloc(sizeof(*notes->branch)); 1024 1025 return notes->branch; 1026 } 1027 1028 static struct cyc_hist *symbol__cycles_hist(struct symbol *sym) 1029 { 1030 struct annotation *notes = symbol__annotation(sym); 1031 struct annotated_branch *branch; 1032 1033 branch = annotation__get_branch(notes); 1034 if (branch == NULL) 1035 return NULL; 1036 1037 if (branch->cycles_hist == NULL) { 1038 const size_t size = symbol__size(sym); 1039 1040 branch->cycles_hist = calloc(size, sizeof(struct cyc_hist)); 1041 } 1042 1043 return branch->cycles_hist; 1044 } 1045 1046 struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists) 1047 { 1048 struct annotation *notes = symbol__annotation(sym); 1049 1050 if (notes->src == NULL) { 1051 notes->src = annotated_source__new(); 1052 if (notes->src == NULL) 1053 return NULL; 1054 goto alloc_histograms; 1055 } 1056 1057 if (notes->src->histograms == NULL) { 1058 alloc_histograms: 1059 annotated_source__alloc_histograms(notes->src, symbol__size(sym), 1060 nr_hists); 1061 } 1062 1063 return notes->src; 1064 } 1065 1066 static int symbol__inc_addr_samples(struct map_symbol *ms, 1067 struct evsel *evsel, u64 addr, 1068 struct perf_sample *sample) 1069 { 1070 struct symbol *sym = ms->sym; 1071 struct annotated_source *src; 1072 1073 if (sym == NULL) 1074 return 0; 1075 src = symbol__hists(sym, evsel->evlist->core.nr_entries); 1076 return src ? __symbol__inc_addr_samples(ms, src, evsel->core.idx, addr, sample) : 0; 1077 } 1078 1079 static int symbol__account_cycles(u64 addr, u64 start, 1080 struct symbol *sym, unsigned cycles) 1081 { 1082 struct cyc_hist *cycles_hist; 1083 unsigned offset; 1084 1085 if (sym == NULL) 1086 return 0; 1087 cycles_hist = symbol__cycles_hist(sym); 1088 if (cycles_hist == NULL) 1089 return -ENOMEM; 1090 if (addr < sym->start || addr >= sym->end) 1091 return -ERANGE; 1092 1093 if (start) { 1094 if (start < sym->start || start >= sym->end) 1095 return -ERANGE; 1096 if (start >= addr) 1097 start = 0; 1098 } 1099 offset = addr - sym->start; 1100 return __symbol__account_cycles(cycles_hist, 1101 start ? start - sym->start : 0, 1102 offset, cycles, 1103 !!start); 1104 } 1105 1106 int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, 1107 struct addr_map_symbol *start, 1108 unsigned cycles) 1109 { 1110 u64 saddr = 0; 1111 int err; 1112 1113 if (!cycles) 1114 return 0; 1115 1116 /* 1117 * Only set start when IPC can be computed. We can only 1118 * compute it when the basic block is completely in a single 1119 * function. 1120 * Special case the case when the jump is elsewhere, but 1121 * it starts on the function start. 1122 */ 1123 if (start && 1124 (start->ms.sym == ams->ms.sym || 1125 (ams->ms.sym && 1126 start->addr == ams->ms.sym->start + map__start(ams->ms.map)))) 1127 saddr = start->al_addr; 1128 if (saddr == 0) 1129 pr_debug2("BB with bad start: addr %"PRIx64" start %"PRIx64" sym %"PRIx64" saddr %"PRIx64"\n", 1130 ams->addr, 1131 start ? start->addr : 0, 1132 ams->ms.sym ? ams->ms.sym->start + map__start(ams->ms.map) : 0, 1133 saddr); 1134 err = symbol__account_cycles(ams->al_addr, saddr, ams->ms.sym, cycles); 1135 if (err) 1136 pr_debug2("account_cycles failed %d\n", err); 1137 return err; 1138 } 1139 1140 static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 end) 1141 { 1142 unsigned n_insn = 0; 1143 u64 offset; 1144 1145 for (offset = start; offset <= end; offset++) { 1146 if (notes->src->offsets[offset]) 1147 n_insn++; 1148 } 1149 return n_insn; 1150 } 1151 1152 static void annotated_branch__delete(struct annotated_branch *branch) 1153 { 1154 if (branch) { 1155 zfree(&branch->cycles_hist); 1156 free(branch); 1157 } 1158 } 1159 1160 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) 1161 { 1162 unsigned n_insn; 1163 unsigned int cover_insn = 0; 1164 u64 offset; 1165 1166 n_insn = annotation__count_insn(notes, start, end); 1167 if (n_insn && ch->num && ch->cycles) { 1168 struct annotated_branch *branch; 1169 float ipc = n_insn / ((double)ch->cycles / (double)ch->num); 1170 1171 /* Hide data when there are too many overlaps. */ 1172 if (ch->reset >= 0x7fff) 1173 return; 1174 1175 for (offset = start; offset <= end; offset++) { 1176 struct annotation_line *al = notes->src->offsets[offset]; 1177 1178 if (al && al->cycles && al->cycles->ipc == 0.0) { 1179 al->cycles->ipc = ipc; 1180 cover_insn++; 1181 } 1182 } 1183 1184 branch = annotation__get_branch(notes); 1185 if (cover_insn && branch) { 1186 branch->hit_cycles += ch->cycles; 1187 branch->hit_insn += n_insn * ch->num; 1188 branch->cover_insn += cover_insn; 1189 } 1190 } 1191 } 1192 1193 static int annotation__compute_ipc(struct annotation *notes, size_t size) 1194 { 1195 int err = 0; 1196 s64 offset; 1197 1198 if (!notes->branch || !notes->branch->cycles_hist) 1199 return 0; 1200 1201 notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1); 1202 notes->branch->hit_cycles = 0; 1203 notes->branch->hit_insn = 0; 1204 notes->branch->cover_insn = 0; 1205 1206 annotation__lock(notes); 1207 for (offset = size - 1; offset >= 0; --offset) { 1208 struct cyc_hist *ch; 1209 1210 ch = ¬es->branch->cycles_hist[offset]; 1211 if (ch && ch->cycles) { 1212 struct annotation_line *al; 1213 1214 al = notes->src->offsets[offset]; 1215 if (al && al->cycles == NULL) { 1216 al->cycles = zalloc(sizeof(*al->cycles)); 1217 if (al->cycles == NULL) { 1218 err = ENOMEM; 1219 break; 1220 } 1221 } 1222 if (ch->have_start) 1223 annotation__count_and_fill(notes, ch->start, offset, ch); 1224 if (al && ch->num_aggr) { 1225 al->cycles->avg = ch->cycles_aggr / ch->num_aggr; 1226 al->cycles->max = ch->cycles_max; 1227 al->cycles->min = ch->cycles_min; 1228 } 1229 } 1230 } 1231 1232 if (err) { 1233 while (++offset < (s64)size) { 1234 struct cyc_hist *ch = ¬es->branch->cycles_hist[offset]; 1235 1236 if (ch && ch->cycles) { 1237 struct annotation_line *al = notes->src->offsets[offset]; 1238 if (al) 1239 zfree(&al->cycles); 1240 } 1241 } 1242 } 1243 1244 annotation__unlock(notes); 1245 return 0; 1246 } 1247 1248 int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample, 1249 struct evsel *evsel) 1250 { 1251 return symbol__inc_addr_samples(&ams->ms, evsel, ams->al_addr, sample); 1252 } 1253 1254 int hist_entry__inc_addr_samples(struct hist_entry *he, struct perf_sample *sample, 1255 struct evsel *evsel, u64 ip) 1256 { 1257 return symbol__inc_addr_samples(&he->ms, evsel, ip, sample); 1258 } 1259 1260 static void disasm_line__init_ins(struct disasm_line *dl, struct arch *arch, struct map_symbol *ms) 1261 { 1262 dl->ins.ops = ins__find(arch, dl->ins.name); 1263 1264 if (!dl->ins.ops) 1265 return; 1266 1267 if (dl->ins.ops->parse && dl->ins.ops->parse(arch, &dl->ops, ms) < 0) 1268 dl->ins.ops = NULL; 1269 } 1270 1271 static int disasm_line__parse(char *line, const char **namep, char **rawp) 1272 { 1273 char tmp, *name = skip_spaces(line); 1274 1275 if (name[0] == '\0') 1276 return -1; 1277 1278 *rawp = name + 1; 1279 1280 while ((*rawp)[0] != '\0' && !isspace((*rawp)[0])) 1281 ++*rawp; 1282 1283 tmp = (*rawp)[0]; 1284 (*rawp)[0] = '\0'; 1285 *namep = strdup(name); 1286 1287 if (*namep == NULL) 1288 goto out; 1289 1290 (*rawp)[0] = tmp; 1291 *rawp = strim(*rawp); 1292 1293 return 0; 1294 1295 out: 1296 return -1; 1297 } 1298 1299 struct annotate_args { 1300 struct arch *arch; 1301 struct map_symbol ms; 1302 struct evsel *evsel; 1303 struct annotation_options *options; 1304 s64 offset; 1305 char *line; 1306 int line_nr; 1307 char *fileloc; 1308 }; 1309 1310 static void annotation_line__init(struct annotation_line *al, 1311 struct annotate_args *args, 1312 int nr) 1313 { 1314 al->offset = args->offset; 1315 al->line = strdup(args->line); 1316 al->line_nr = args->line_nr; 1317 al->fileloc = args->fileloc; 1318 al->data_nr = nr; 1319 } 1320 1321 static void annotation_line__exit(struct annotation_line *al) 1322 { 1323 zfree_srcline(&al->path); 1324 zfree(&al->line); 1325 zfree(&al->cycles); 1326 } 1327 1328 static size_t disasm_line_size(int nr) 1329 { 1330 struct annotation_line *al; 1331 1332 return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr)); 1333 } 1334 1335 /* 1336 * Allocating the disasm annotation line data with 1337 * following structure: 1338 * 1339 * ------------------------------------------- 1340 * struct disasm_line | struct annotation_line 1341 * ------------------------------------------- 1342 * 1343 * We have 'struct annotation_line' member as last member 1344 * of 'struct disasm_line' to have an easy access. 1345 */ 1346 static struct disasm_line *disasm_line__new(struct annotate_args *args) 1347 { 1348 struct disasm_line *dl = NULL; 1349 int nr = 1; 1350 1351 if (evsel__is_group_event(args->evsel)) 1352 nr = args->evsel->core.nr_members; 1353 1354 dl = zalloc(disasm_line_size(nr)); 1355 if (!dl) 1356 return NULL; 1357 1358 annotation_line__init(&dl->al, args, nr); 1359 if (dl->al.line == NULL) 1360 goto out_delete; 1361 1362 if (args->offset != -1) { 1363 if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0) 1364 goto out_free_line; 1365 1366 disasm_line__init_ins(dl, args->arch, &args->ms); 1367 } 1368 1369 return dl; 1370 1371 out_free_line: 1372 zfree(&dl->al.line); 1373 out_delete: 1374 free(dl); 1375 return NULL; 1376 } 1377 1378 void disasm_line__free(struct disasm_line *dl) 1379 { 1380 if (dl->ins.ops && dl->ins.ops->free) 1381 dl->ins.ops->free(&dl->ops); 1382 else 1383 ins__delete(&dl->ops); 1384 zfree(&dl->ins.name); 1385 annotation_line__exit(&dl->al); 1386 free(dl); 1387 } 1388 1389 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name) 1390 { 1391 if (raw || !dl->ins.ops) 1392 return scnprintf(bf, size, "%-*s %s", max_ins_name, dl->ins.name, dl->ops.raw); 1393 1394 return ins__scnprintf(&dl->ins, bf, size, &dl->ops, max_ins_name); 1395 } 1396 1397 void annotation__exit(struct annotation *notes) 1398 { 1399 annotated_source__delete(notes->src); 1400 annotated_branch__delete(notes->branch); 1401 } 1402 1403 static struct sharded_mutex *sharded_mutex; 1404 1405 static void annotation__init_sharded_mutex(void) 1406 { 1407 /* As many mutexes as there are CPUs. */ 1408 sharded_mutex = sharded_mutex__new(cpu__max_present_cpu().cpu); 1409 } 1410 1411 static size_t annotation__hash(const struct annotation *notes) 1412 { 1413 return (size_t)notes; 1414 } 1415 1416 static struct mutex *annotation__get_mutex(const struct annotation *notes) 1417 { 1418 static pthread_once_t once = PTHREAD_ONCE_INIT; 1419 1420 pthread_once(&once, annotation__init_sharded_mutex); 1421 if (!sharded_mutex) 1422 return NULL; 1423 1424 return sharded_mutex__get_mutex(sharded_mutex, annotation__hash(notes)); 1425 } 1426 1427 void annotation__lock(struct annotation *notes) 1428 NO_THREAD_SAFETY_ANALYSIS 1429 { 1430 struct mutex *mutex = annotation__get_mutex(notes); 1431 1432 if (mutex) 1433 mutex_lock(mutex); 1434 } 1435 1436 void annotation__unlock(struct annotation *notes) 1437 NO_THREAD_SAFETY_ANALYSIS 1438 { 1439 struct mutex *mutex = annotation__get_mutex(notes); 1440 1441 if (mutex) 1442 mutex_unlock(mutex); 1443 } 1444 1445 bool annotation__trylock(struct annotation *notes) 1446 { 1447 struct mutex *mutex = annotation__get_mutex(notes); 1448 1449 if (!mutex) 1450 return false; 1451 1452 return mutex_trylock(mutex); 1453 } 1454 1455 1456 static void annotation_line__add(struct annotation_line *al, struct list_head *head) 1457 { 1458 list_add_tail(&al->node, head); 1459 } 1460 1461 struct annotation_line * 1462 annotation_line__next(struct annotation_line *pos, struct list_head *head) 1463 { 1464 list_for_each_entry_continue(pos, head, node) 1465 if (pos->offset >= 0) 1466 return pos; 1467 1468 return NULL; 1469 } 1470 1471 static const char *annotate__address_color(struct block_range *br) 1472 { 1473 double cov = block_range__coverage(br); 1474 1475 if (cov >= 0) { 1476 /* mark red for >75% coverage */ 1477 if (cov > 0.75) 1478 return PERF_COLOR_RED; 1479 1480 /* mark dull for <1% coverage */ 1481 if (cov < 0.01) 1482 return PERF_COLOR_NORMAL; 1483 } 1484 1485 return PERF_COLOR_MAGENTA; 1486 } 1487 1488 static const char *annotate__asm_color(struct block_range *br) 1489 { 1490 double cov = block_range__coverage(br); 1491 1492 if (cov >= 0) { 1493 /* mark dull for <1% coverage */ 1494 if (cov < 0.01) 1495 return PERF_COLOR_NORMAL; 1496 } 1497 1498 return PERF_COLOR_BLUE; 1499 } 1500 1501 static void annotate__branch_printf(struct block_range *br, u64 addr) 1502 { 1503 bool emit_comment = true; 1504 1505 if (!br) 1506 return; 1507 1508 #if 1 1509 if (br->is_target && br->start == addr) { 1510 struct block_range *branch = br; 1511 double p; 1512 1513 /* 1514 * Find matching branch to our target. 1515 */ 1516 while (!branch->is_branch) 1517 branch = block_range__next(branch); 1518 1519 p = 100 *(double)br->entry / branch->coverage; 1520 1521 if (p > 0.1) { 1522 if (emit_comment) { 1523 emit_comment = false; 1524 printf("\t#"); 1525 } 1526 1527 /* 1528 * The percentage of coverage joined at this target in relation 1529 * to the next branch. 1530 */ 1531 printf(" +%.2f%%", p); 1532 } 1533 } 1534 #endif 1535 if (br->is_branch && br->end == addr) { 1536 double p = 100*(double)br->taken / br->coverage; 1537 1538 if (p > 0.1) { 1539 if (emit_comment) { 1540 emit_comment = false; 1541 printf("\t#"); 1542 } 1543 1544 /* 1545 * The percentage of coverage leaving at this branch, and 1546 * its prediction ratio. 1547 */ 1548 printf(" -%.2f%% (p:%.2f%%)", p, 100*(double)br->pred / br->taken); 1549 } 1550 } 1551 } 1552 1553 static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_width) 1554 { 1555 s64 offset = dl->al.offset; 1556 const u64 addr = start + offset; 1557 struct block_range *br; 1558 1559 br = block_range__find(addr); 1560 color_fprintf(stdout, annotate__address_color(br), " %*" PRIx64 ":", addr_fmt_width, addr); 1561 color_fprintf(stdout, annotate__asm_color(br), "%s", dl->al.line); 1562 annotate__branch_printf(br, addr); 1563 return 0; 1564 } 1565 1566 static int 1567 annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start, 1568 struct evsel *evsel, u64 len, int min_pcnt, int printed, 1569 int max_lines, struct annotation_line *queue, int addr_fmt_width, 1570 int percent_type) 1571 { 1572 struct disasm_line *dl = container_of(al, struct disasm_line, al); 1573 static const char *prev_line; 1574 1575 if (al->offset != -1) { 1576 double max_percent = 0.0; 1577 int i, nr_percent = 1; 1578 const char *color; 1579 struct annotation *notes = symbol__annotation(sym); 1580 1581 for (i = 0; i < al->data_nr; i++) { 1582 double percent; 1583 1584 percent = annotation_data__percent(&al->data[i], 1585 percent_type); 1586 1587 if (percent > max_percent) 1588 max_percent = percent; 1589 } 1590 1591 if (al->data_nr > nr_percent) 1592 nr_percent = al->data_nr; 1593 1594 if (max_percent < min_pcnt) 1595 return -1; 1596 1597 if (max_lines && printed >= max_lines) 1598 return 1; 1599 1600 if (queue != NULL) { 1601 list_for_each_entry_from(queue, ¬es->src->source, node) { 1602 if (queue == al) 1603 break; 1604 annotation_line__print(queue, sym, start, evsel, len, 1605 0, 0, 1, NULL, addr_fmt_width, 1606 percent_type); 1607 } 1608 } 1609 1610 color = get_percent_color(max_percent); 1611 1612 for (i = 0; i < nr_percent; i++) { 1613 struct annotation_data *data = &al->data[i]; 1614 double percent; 1615 1616 percent = annotation_data__percent(data, percent_type); 1617 color = get_percent_color(percent); 1618 1619 if (symbol_conf.show_total_period) 1620 color_fprintf(stdout, color, " %11" PRIu64, 1621 data->he.period); 1622 else if (symbol_conf.show_nr_samples) 1623 color_fprintf(stdout, color, " %7" PRIu64, 1624 data->he.nr_samples); 1625 else 1626 color_fprintf(stdout, color, " %7.2f", percent); 1627 } 1628 1629 printf(" : "); 1630 1631 disasm_line__print(dl, start, addr_fmt_width); 1632 1633 /* 1634 * Also color the filename and line if needed, with 1635 * the same color than the percentage. Don't print it 1636 * twice for close colored addr with the same filename:line 1637 */ 1638 if (al->path) { 1639 if (!prev_line || strcmp(prev_line, al->path)) { 1640 color_fprintf(stdout, color, " // %s", al->path); 1641 prev_line = al->path; 1642 } 1643 } 1644 1645 printf("\n"); 1646 } else if (max_lines && printed >= max_lines) 1647 return 1; 1648 else { 1649 int width = symbol_conf.show_total_period ? 12 : 8; 1650 1651 if (queue) 1652 return -1; 1653 1654 if (evsel__is_group_event(evsel)) 1655 width *= evsel->core.nr_members; 1656 1657 if (!*al->line) 1658 printf(" %*s:\n", width, " "); 1659 else 1660 printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line); 1661 } 1662 1663 return 0; 1664 } 1665 1666 /* 1667 * symbol__parse_objdump_line() parses objdump output (with -d --no-show-raw) 1668 * which looks like following 1669 * 1670 * 0000000000415500 <_init>: 1671 * 415500: sub $0x8,%rsp 1672 * 415504: mov 0x2f5ad5(%rip),%rax # 70afe0 <_DYNAMIC+0x2f8> 1673 * 41550b: test %rax,%rax 1674 * 41550e: je 415515 <_init+0x15> 1675 * 415510: callq 416e70 <__gmon_start__@plt> 1676 * 415515: add $0x8,%rsp 1677 * 415519: retq 1678 * 1679 * it will be parsed and saved into struct disasm_line as 1680 * <offset> <name> <ops.raw> 1681 * 1682 * The offset will be a relative offset from the start of the symbol and -1 1683 * means that it's not a disassembly line so should be treated differently. 1684 * The ops.raw part will be parsed further according to type of the instruction. 1685 */ 1686 static int symbol__parse_objdump_line(struct symbol *sym, 1687 struct annotate_args *args, 1688 char *parsed_line, int *line_nr, char **fileloc) 1689 { 1690 struct map *map = args->ms.map; 1691 struct annotation *notes = symbol__annotation(sym); 1692 struct disasm_line *dl; 1693 char *tmp; 1694 s64 line_ip, offset = -1; 1695 regmatch_t match[2]; 1696 1697 /* /filename:linenr ? Save line number and ignore. */ 1698 if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) { 1699 *line_nr = atoi(parsed_line + match[1].rm_so); 1700 free(*fileloc); 1701 *fileloc = strdup(parsed_line); 1702 return 0; 1703 } 1704 1705 /* Process hex address followed by ':'. */ 1706 line_ip = strtoull(parsed_line, &tmp, 16); 1707 if (parsed_line != tmp && tmp[0] == ':' && tmp[1] != '\0') { 1708 u64 start = map__rip_2objdump(map, sym->start), 1709 end = map__rip_2objdump(map, sym->end); 1710 1711 offset = line_ip - start; 1712 if ((u64)line_ip < start || (u64)line_ip >= end) 1713 offset = -1; 1714 else 1715 parsed_line = tmp + 1; 1716 } 1717 1718 args->offset = offset; 1719 args->line = parsed_line; 1720 args->line_nr = *line_nr; 1721 args->fileloc = *fileloc; 1722 args->ms.sym = sym; 1723 1724 dl = disasm_line__new(args); 1725 (*line_nr)++; 1726 1727 if (dl == NULL) 1728 return -1; 1729 1730 if (!disasm_line__has_local_offset(dl)) { 1731 dl->ops.target.offset = dl->ops.target.addr - 1732 map__rip_2objdump(map, sym->start); 1733 dl->ops.target.offset_avail = true; 1734 } 1735 1736 /* kcore has no symbols, so add the call target symbol */ 1737 if (dl->ins.ops && ins__is_call(&dl->ins) && !dl->ops.target.sym) { 1738 struct addr_map_symbol target = { 1739 .addr = dl->ops.target.addr, 1740 .ms = { .map = map, }, 1741 }; 1742 1743 if (!maps__find_ams(args->ms.maps, &target) && 1744 target.ms.sym->start == target.al_addr) 1745 dl->ops.target.sym = target.ms.sym; 1746 } 1747 1748 annotation_line__add(&dl->al, ¬es->src->source); 1749 return 0; 1750 } 1751 1752 static __attribute__((constructor)) void symbol__init_regexpr(void) 1753 { 1754 regcomp(&file_lineno, "^/[^:]+:([0-9]+)", REG_EXTENDED); 1755 } 1756 1757 static void delete_last_nop(struct symbol *sym) 1758 { 1759 struct annotation *notes = symbol__annotation(sym); 1760 struct list_head *list = ¬es->src->source; 1761 struct disasm_line *dl; 1762 1763 while (!list_empty(list)) { 1764 dl = list_entry(list->prev, struct disasm_line, al.node); 1765 1766 if (dl->ins.ops) { 1767 if (dl->ins.ops != &nop_ops) 1768 return; 1769 } else { 1770 if (!strstr(dl->al.line, " nop ") && 1771 !strstr(dl->al.line, " nopl ") && 1772 !strstr(dl->al.line, " nopw ")) 1773 return; 1774 } 1775 1776 list_del_init(&dl->al.node); 1777 disasm_line__free(dl); 1778 } 1779 } 1780 1781 int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen) 1782 { 1783 struct dso *dso = map__dso(ms->map); 1784 1785 BUG_ON(buflen == 0); 1786 1787 if (errnum >= 0) { 1788 str_error_r(errnum, buf, buflen); 1789 return 0; 1790 } 1791 1792 switch (errnum) { 1793 case SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX: { 1794 char bf[SBUILD_ID_SIZE + 15] = " with build id "; 1795 char *build_id_msg = NULL; 1796 1797 if (dso->has_build_id) { 1798 build_id__sprintf(&dso->bid, bf + 15); 1799 build_id_msg = bf; 1800 } 1801 scnprintf(buf, buflen, 1802 "No vmlinux file%s\nwas found in the path.\n\n" 1803 "Note that annotation using /proc/kcore requires CAP_SYS_RAWIO capability.\n\n" 1804 "Please use:\n\n" 1805 " perf buildid-cache -vu vmlinux\n\n" 1806 "or:\n\n" 1807 " --vmlinux vmlinux\n", build_id_msg ?: ""); 1808 } 1809 break; 1810 case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF: 1811 scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation"); 1812 break; 1813 case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP: 1814 scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions."); 1815 break; 1816 case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING: 1817 scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization."); 1818 break; 1819 case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE: 1820 scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name); 1821 break; 1822 case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF: 1823 scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.", 1824 dso->long_name); 1825 break; 1826 default: 1827 scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum); 1828 break; 1829 } 1830 1831 return 0; 1832 } 1833 1834 static int dso__disassemble_filename(struct dso *dso, char *filename, size_t filename_size) 1835 { 1836 char linkname[PATH_MAX]; 1837 char *build_id_filename; 1838 char *build_id_path = NULL; 1839 char *pos; 1840 int len; 1841 1842 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && 1843 !dso__is_kcore(dso)) 1844 return SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX; 1845 1846 build_id_filename = dso__build_id_filename(dso, NULL, 0, false); 1847 if (build_id_filename) { 1848 __symbol__join_symfs(filename, filename_size, build_id_filename); 1849 free(build_id_filename); 1850 } else { 1851 if (dso->has_build_id) 1852 return ENOMEM; 1853 goto fallback; 1854 } 1855 1856 build_id_path = strdup(filename); 1857 if (!build_id_path) 1858 return ENOMEM; 1859 1860 /* 1861 * old style build-id cache has name of XX/XXXXXXX.. while 1862 * new style has XX/XXXXXXX../{elf,kallsyms,vdso}. 1863 * extract the build-id part of dirname in the new style only. 1864 */ 1865 pos = strrchr(build_id_path, '/'); 1866 if (pos && strlen(pos) < SBUILD_ID_SIZE - 2) 1867 dirname(build_id_path); 1868 1869 if (dso__is_kcore(dso)) 1870 goto fallback; 1871 1872 len = readlink(build_id_path, linkname, sizeof(linkname) - 1); 1873 if (len < 0) 1874 goto fallback; 1875 1876 linkname[len] = '\0'; 1877 if (strstr(linkname, DSO__NAME_KALLSYMS) || 1878 access(filename, R_OK)) { 1879 fallback: 1880 /* 1881 * If we don't have build-ids or the build-id file isn't in the 1882 * cache, or is just a kallsyms file, well, lets hope that this 1883 * DSO is the same as when 'perf record' ran. 1884 */ 1885 if (dso->kernel && dso->long_name[0] == '/') 1886 snprintf(filename, filename_size, "%s", dso->long_name); 1887 else 1888 __symbol__join_symfs(filename, filename_size, dso->long_name); 1889 1890 mutex_lock(&dso->lock); 1891 if (access(filename, R_OK) && errno == ENOENT && dso->nsinfo) { 1892 char *new_name = dso__filename_with_chroot(dso, filename); 1893 if (new_name) { 1894 strlcpy(filename, new_name, filename_size); 1895 free(new_name); 1896 } 1897 } 1898 mutex_unlock(&dso->lock); 1899 } 1900 1901 free(build_id_path); 1902 return 0; 1903 } 1904 1905 #if defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) 1906 #define PACKAGE "perf" 1907 #include <bfd.h> 1908 #include <dis-asm.h> 1909 #include <bpf/bpf.h> 1910 #include <bpf/btf.h> 1911 #include <bpf/libbpf.h> 1912 #include <linux/btf.h> 1913 #include <tools/dis-asm-compat.h> 1914 1915 static int symbol__disassemble_bpf(struct symbol *sym, 1916 struct annotate_args *args) 1917 { 1918 struct annotation *notes = symbol__annotation(sym); 1919 struct bpf_prog_linfo *prog_linfo = NULL; 1920 struct bpf_prog_info_node *info_node; 1921 int len = sym->end - sym->start; 1922 disassembler_ftype disassemble; 1923 struct map *map = args->ms.map; 1924 struct perf_bpil *info_linear; 1925 struct disassemble_info info; 1926 struct dso *dso = map__dso(map); 1927 int pc = 0, count, sub_id; 1928 struct btf *btf = NULL; 1929 char tpath[PATH_MAX]; 1930 size_t buf_size; 1931 int nr_skip = 0; 1932 char *buf; 1933 bfd *bfdf; 1934 int ret; 1935 FILE *s; 1936 1937 if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO) 1938 return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE; 1939 1940 pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__, 1941 sym->name, sym->start, sym->end - sym->start); 1942 1943 memset(tpath, 0, sizeof(tpath)); 1944 perf_exe(tpath, sizeof(tpath)); 1945 1946 bfdf = bfd_openr(tpath, NULL); 1947 if (bfdf == NULL) 1948 abort(); 1949 1950 if (!bfd_check_format(bfdf, bfd_object)) 1951 abort(); 1952 1953 s = open_memstream(&buf, &buf_size); 1954 if (!s) { 1955 ret = errno; 1956 goto out; 1957 } 1958 init_disassemble_info_compat(&info, s, 1959 (fprintf_ftype) fprintf, 1960 fprintf_styled); 1961 info.arch = bfd_get_arch(bfdf); 1962 info.mach = bfd_get_mach(bfdf); 1963 1964 info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, 1965 dso->bpf_prog.id); 1966 if (!info_node) { 1967 ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF; 1968 goto out; 1969 } 1970 info_linear = info_node->info_linear; 1971 sub_id = dso->bpf_prog.sub_id; 1972 1973 info.buffer = (void *)(uintptr_t)(info_linear->info.jited_prog_insns); 1974 info.buffer_length = info_linear->info.jited_prog_len; 1975 1976 if (info_linear->info.nr_line_info) 1977 prog_linfo = bpf_prog_linfo__new(&info_linear->info); 1978 1979 if (info_linear->info.btf_id) { 1980 struct btf_node *node; 1981 1982 node = perf_env__find_btf(dso->bpf_prog.env, 1983 info_linear->info.btf_id); 1984 if (node) 1985 btf = btf__new((__u8 *)(node->data), 1986 node->data_size); 1987 } 1988 1989 disassemble_init_for_target(&info); 1990 1991 #ifdef DISASM_FOUR_ARGS_SIGNATURE 1992 disassemble = disassembler(info.arch, 1993 bfd_big_endian(bfdf), 1994 info.mach, 1995 bfdf); 1996 #else 1997 disassemble = disassembler(bfdf); 1998 #endif 1999 if (disassemble == NULL) 2000 abort(); 2001 2002 fflush(s); 2003 do { 2004 const struct bpf_line_info *linfo = NULL; 2005 struct disasm_line *dl; 2006 size_t prev_buf_size; 2007 const char *srcline; 2008 u64 addr; 2009 2010 addr = pc + ((u64 *)(uintptr_t)(info_linear->info.jited_ksyms))[sub_id]; 2011 count = disassemble(pc, &info); 2012 2013 if (prog_linfo) 2014 linfo = bpf_prog_linfo__lfind_addr_func(prog_linfo, 2015 addr, sub_id, 2016 nr_skip); 2017 2018 if (linfo && btf) { 2019 srcline = btf__name_by_offset(btf, linfo->line_off); 2020 nr_skip++; 2021 } else 2022 srcline = NULL; 2023 2024 fprintf(s, "\n"); 2025 prev_buf_size = buf_size; 2026 fflush(s); 2027 2028 if (!annotate_opts.hide_src_code && srcline) { 2029 args->offset = -1; 2030 args->line = strdup(srcline); 2031 args->line_nr = 0; 2032 args->fileloc = NULL; 2033 args->ms.sym = sym; 2034 dl = disasm_line__new(args); 2035 if (dl) { 2036 annotation_line__add(&dl->al, 2037 ¬es->src->source); 2038 } 2039 } 2040 2041 args->offset = pc; 2042 args->line = buf + prev_buf_size; 2043 args->line_nr = 0; 2044 args->fileloc = NULL; 2045 args->ms.sym = sym; 2046 dl = disasm_line__new(args); 2047 if (dl) 2048 annotation_line__add(&dl->al, ¬es->src->source); 2049 2050 pc += count; 2051 } while (count > 0 && pc < len); 2052 2053 ret = 0; 2054 out: 2055 free(prog_linfo); 2056 btf__free(btf); 2057 fclose(s); 2058 bfd_close(bfdf); 2059 return ret; 2060 } 2061 #else // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) 2062 static int symbol__disassemble_bpf(struct symbol *sym __maybe_unused, 2063 struct annotate_args *args __maybe_unused) 2064 { 2065 return SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF; 2066 } 2067 #endif // defined(HAVE_LIBBFD_SUPPORT) && defined(HAVE_LIBBPF_SUPPORT) 2068 2069 static int 2070 symbol__disassemble_bpf_image(struct symbol *sym, 2071 struct annotate_args *args) 2072 { 2073 struct annotation *notes = symbol__annotation(sym); 2074 struct disasm_line *dl; 2075 2076 args->offset = -1; 2077 args->line = strdup("to be implemented"); 2078 args->line_nr = 0; 2079 args->fileloc = NULL; 2080 dl = disasm_line__new(args); 2081 if (dl) 2082 annotation_line__add(&dl->al, ¬es->src->source); 2083 2084 zfree(&args->line); 2085 return 0; 2086 } 2087 2088 /* 2089 * Possibly create a new version of line with tabs expanded. Returns the 2090 * existing or new line, storage is updated if a new line is allocated. If 2091 * allocation fails then NULL is returned. 2092 */ 2093 static char *expand_tabs(char *line, char **storage, size_t *storage_len) 2094 { 2095 size_t i, src, dst, len, new_storage_len, num_tabs; 2096 char *new_line; 2097 size_t line_len = strlen(line); 2098 2099 for (num_tabs = 0, i = 0; i < line_len; i++) 2100 if (line[i] == '\t') 2101 num_tabs++; 2102 2103 if (num_tabs == 0) 2104 return line; 2105 2106 /* 2107 * Space for the line and '\0', less the leading and trailing 2108 * spaces. Each tab may introduce 7 additional spaces. 2109 */ 2110 new_storage_len = line_len + 1 + (num_tabs * 7); 2111 2112 new_line = malloc(new_storage_len); 2113 if (new_line == NULL) { 2114 pr_err("Failure allocating memory for tab expansion\n"); 2115 return NULL; 2116 } 2117 2118 /* 2119 * Copy regions starting at src and expand tabs. If there are two 2120 * adjacent tabs then 'src == i', the memcpy is of size 0 and the spaces 2121 * are inserted. 2122 */ 2123 for (i = 0, src = 0, dst = 0; i < line_len && num_tabs; i++) { 2124 if (line[i] == '\t') { 2125 len = i - src; 2126 memcpy(&new_line[dst], &line[src], len); 2127 dst += len; 2128 new_line[dst++] = ' '; 2129 while (dst % 8 != 0) 2130 new_line[dst++] = ' '; 2131 src = i + 1; 2132 num_tabs--; 2133 } 2134 } 2135 2136 /* Expand the last region. */ 2137 len = line_len - src; 2138 memcpy(&new_line[dst], &line[src], len); 2139 dst += len; 2140 new_line[dst] = '\0'; 2141 2142 free(*storage); 2143 *storage = new_line; 2144 *storage_len = new_storage_len; 2145 return new_line; 2146 2147 } 2148 2149 static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) 2150 { 2151 struct annotation_options *opts = &annotate_opts; 2152 struct map *map = args->ms.map; 2153 struct dso *dso = map__dso(map); 2154 char *command; 2155 FILE *file; 2156 char symfs_filename[PATH_MAX]; 2157 struct kcore_extract kce; 2158 bool delete_extract = false; 2159 bool decomp = false; 2160 int lineno = 0; 2161 char *fileloc = NULL; 2162 int nline; 2163 char *line; 2164 size_t line_len; 2165 const char *objdump_argv[] = { 2166 "/bin/sh", 2167 "-c", 2168 NULL, /* Will be the objdump command to run. */ 2169 "--", 2170 NULL, /* Will be the symfs path. */ 2171 NULL, 2172 }; 2173 struct child_process objdump_process; 2174 int err = dso__disassemble_filename(dso, symfs_filename, sizeof(symfs_filename)); 2175 2176 if (err) 2177 return err; 2178 2179 pr_debug("%s: filename=%s, sym=%s, start=%#" PRIx64 ", end=%#" PRIx64 "\n", __func__, 2180 symfs_filename, sym->name, map__unmap_ip(map, sym->start), 2181 map__unmap_ip(map, sym->end)); 2182 2183 pr_debug("annotating [%p] %30s : [%p] %30s\n", 2184 dso, dso->long_name, sym, sym->name); 2185 2186 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) { 2187 return symbol__disassemble_bpf(sym, args); 2188 } else if (dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE) { 2189 return symbol__disassemble_bpf_image(sym, args); 2190 } else if (dso__is_kcore(dso)) { 2191 kce.kcore_filename = symfs_filename; 2192 kce.addr = map__rip_2objdump(map, sym->start); 2193 kce.offs = sym->start; 2194 kce.len = sym->end - sym->start; 2195 if (!kcore_extract__create(&kce)) { 2196 delete_extract = true; 2197 strlcpy(symfs_filename, kce.extract_filename, 2198 sizeof(symfs_filename)); 2199 } 2200 } else if (dso__needs_decompress(dso)) { 2201 char tmp[KMOD_DECOMP_LEN]; 2202 2203 if (dso__decompress_kmodule_path(dso, symfs_filename, 2204 tmp, sizeof(tmp)) < 0) 2205 return -1; 2206 2207 decomp = true; 2208 strcpy(symfs_filename, tmp); 2209 } 2210 2211 err = asprintf(&command, 2212 "%s %s%s --start-address=0x%016" PRIx64 2213 " --stop-address=0x%016" PRIx64 2214 " %s -d %s %s %s %c%s%c %s%s -C \"$1\"", 2215 opts->objdump_path ?: "objdump", 2216 opts->disassembler_style ? "-M " : "", 2217 opts->disassembler_style ?: "", 2218 map__rip_2objdump(map, sym->start), 2219 map__rip_2objdump(map, sym->end), 2220 opts->show_linenr ? "-l" : "", 2221 opts->show_asm_raw ? "" : "--no-show-raw-insn", 2222 opts->annotate_src ? "-S" : "", 2223 opts->prefix ? "--prefix " : "", 2224 opts->prefix ? '"' : ' ', 2225 opts->prefix ?: "", 2226 opts->prefix ? '"' : ' ', 2227 opts->prefix_strip ? "--prefix-strip=" : "", 2228 opts->prefix_strip ?: ""); 2229 2230 if (err < 0) { 2231 pr_err("Failure allocating memory for the command to run\n"); 2232 goto out_remove_tmp; 2233 } 2234 2235 pr_debug("Executing: %s\n", command); 2236 2237 objdump_argv[2] = command; 2238 objdump_argv[4] = symfs_filename; 2239 2240 /* Create a pipe to read from for stdout */ 2241 memset(&objdump_process, 0, sizeof(objdump_process)); 2242 objdump_process.argv = objdump_argv; 2243 objdump_process.out = -1; 2244 objdump_process.err = -1; 2245 objdump_process.no_stderr = 1; 2246 if (start_command(&objdump_process)) { 2247 pr_err("Failure starting to run %s\n", command); 2248 err = -1; 2249 goto out_free_command; 2250 } 2251 2252 file = fdopen(objdump_process.out, "r"); 2253 if (!file) { 2254 pr_err("Failure creating FILE stream for %s\n", command); 2255 /* 2256 * If we were using debug info should retry with 2257 * original binary. 2258 */ 2259 err = -1; 2260 goto out_close_stdout; 2261 } 2262 2263 /* Storage for getline. */ 2264 line = NULL; 2265 line_len = 0; 2266 2267 nline = 0; 2268 while (!feof(file)) { 2269 const char *match; 2270 char *expanded_line; 2271 2272 if (getline(&line, &line_len, file) < 0 || !line) 2273 break; 2274 2275 /* Skip lines containing "filename:" */ 2276 match = strstr(line, symfs_filename); 2277 if (match && match[strlen(symfs_filename)] == ':') 2278 continue; 2279 2280 expanded_line = strim(line); 2281 expanded_line = expand_tabs(expanded_line, &line, &line_len); 2282 if (!expanded_line) 2283 break; 2284 2285 /* 2286 * The source code line number (lineno) needs to be kept in 2287 * across calls to symbol__parse_objdump_line(), so that it 2288 * can associate it with the instructions till the next one. 2289 * See disasm_line__new() and struct disasm_line::line_nr. 2290 */ 2291 if (symbol__parse_objdump_line(sym, args, expanded_line, 2292 &lineno, &fileloc) < 0) 2293 break; 2294 nline++; 2295 } 2296 free(line); 2297 free(fileloc); 2298 2299 err = finish_command(&objdump_process); 2300 if (err) 2301 pr_err("Error running %s\n", command); 2302 2303 if (nline == 0) { 2304 err = -1; 2305 pr_err("No output from %s\n", command); 2306 } 2307 2308 /* 2309 * kallsyms does not have symbol sizes so there may a nop at the end. 2310 * Remove it. 2311 */ 2312 if (dso__is_kcore(dso)) 2313 delete_last_nop(sym); 2314 2315 fclose(file); 2316 2317 out_close_stdout: 2318 close(objdump_process.out); 2319 2320 out_free_command: 2321 free(command); 2322 2323 out_remove_tmp: 2324 if (decomp) 2325 unlink(symfs_filename); 2326 2327 if (delete_extract) 2328 kcore_extract__delete(&kce); 2329 2330 return err; 2331 } 2332 2333 static void calc_percent(struct sym_hist *sym_hist, 2334 struct hists *hists, 2335 struct annotation_data *data, 2336 s64 offset, s64 end) 2337 { 2338 unsigned int hits = 0; 2339 u64 period = 0; 2340 2341 while (offset < end) { 2342 hits += sym_hist->addr[offset].nr_samples; 2343 period += sym_hist->addr[offset].period; 2344 ++offset; 2345 } 2346 2347 if (sym_hist->nr_samples) { 2348 data->he.period = period; 2349 data->he.nr_samples = hits; 2350 data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples; 2351 } 2352 2353 if (hists->stats.nr_non_filtered_samples) 2354 data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples; 2355 2356 if (sym_hist->period) 2357 data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period; 2358 2359 if (hists->stats.total_period) 2360 data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period; 2361 } 2362 2363 static void annotation__calc_percent(struct annotation *notes, 2364 struct evsel *leader, s64 len) 2365 { 2366 struct annotation_line *al, *next; 2367 struct evsel *evsel; 2368 2369 list_for_each_entry(al, ¬es->src->source, node) { 2370 s64 end; 2371 int i = 0; 2372 2373 if (al->offset == -1) 2374 continue; 2375 2376 next = annotation_line__next(al, ¬es->src->source); 2377 end = next ? next->offset : len; 2378 2379 for_each_group_evsel(evsel, leader) { 2380 struct hists *hists = evsel__hists(evsel); 2381 struct annotation_data *data; 2382 struct sym_hist *sym_hist; 2383 2384 BUG_ON(i >= al->data_nr); 2385 2386 sym_hist = annotation__histogram(notes, evsel->core.idx); 2387 data = &al->data[i++]; 2388 2389 calc_percent(sym_hist, hists, data, al->offset, end); 2390 } 2391 } 2392 } 2393 2394 void symbol__calc_percent(struct symbol *sym, struct evsel *evsel) 2395 { 2396 struct annotation *notes = symbol__annotation(sym); 2397 2398 annotation__calc_percent(notes, evsel, symbol__size(sym)); 2399 } 2400 2401 static int evsel__get_arch(struct evsel *evsel, struct arch **parch) 2402 { 2403 struct perf_env *env = evsel__env(evsel); 2404 const char *arch_name = perf_env__arch(env); 2405 struct arch *arch; 2406 int err; 2407 2408 if (!arch_name) 2409 return errno; 2410 2411 *parch = arch = arch__find(arch_name); 2412 if (arch == NULL) { 2413 pr_err("%s: unsupported arch %s\n", __func__, arch_name); 2414 return ENOTSUP; 2415 } 2416 2417 if (arch->init) { 2418 err = arch->init(arch, env ? env->cpuid : NULL); 2419 if (err) { 2420 pr_err("%s: failed to initialize %s arch priv area\n", 2421 __func__, arch->name); 2422 return err; 2423 } 2424 } 2425 return 0; 2426 } 2427 2428 int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, 2429 struct arch **parch) 2430 { 2431 struct symbol *sym = ms->sym; 2432 struct annotation *notes = symbol__annotation(sym); 2433 struct annotate_args args = { 2434 .evsel = evsel, 2435 .options = &annotate_opts, 2436 }; 2437 struct arch *arch = NULL; 2438 int err; 2439 2440 err = evsel__get_arch(evsel, &arch); 2441 if (err < 0) 2442 return err; 2443 2444 if (parch) 2445 *parch = arch; 2446 2447 args.arch = arch; 2448 args.ms = *ms; 2449 if (annotate_opts.full_addr) 2450 notes->start = map__objdump_2mem(ms->map, ms->sym->start); 2451 else 2452 notes->start = map__rip_2objdump(ms->map, ms->sym->start); 2453 2454 return symbol__disassemble(sym, &args); 2455 } 2456 2457 static void insert_source_line(struct rb_root *root, struct annotation_line *al) 2458 { 2459 struct annotation_line *iter; 2460 struct rb_node **p = &root->rb_node; 2461 struct rb_node *parent = NULL; 2462 unsigned int percent_type = annotate_opts.percent_type; 2463 int i, ret; 2464 2465 while (*p != NULL) { 2466 parent = *p; 2467 iter = rb_entry(parent, struct annotation_line, rb_node); 2468 2469 ret = strcmp(iter->path, al->path); 2470 if (ret == 0) { 2471 for (i = 0; i < al->data_nr; i++) { 2472 iter->data[i].percent_sum += annotation_data__percent(&al->data[i], 2473 percent_type); 2474 } 2475 return; 2476 } 2477 2478 if (ret < 0) 2479 p = &(*p)->rb_left; 2480 else 2481 p = &(*p)->rb_right; 2482 } 2483 2484 for (i = 0; i < al->data_nr; i++) { 2485 al->data[i].percent_sum = annotation_data__percent(&al->data[i], 2486 percent_type); 2487 } 2488 2489 rb_link_node(&al->rb_node, parent, p); 2490 rb_insert_color(&al->rb_node, root); 2491 } 2492 2493 static int cmp_source_line(struct annotation_line *a, struct annotation_line *b) 2494 { 2495 int i; 2496 2497 for (i = 0; i < a->data_nr; i++) { 2498 if (a->data[i].percent_sum == b->data[i].percent_sum) 2499 continue; 2500 return a->data[i].percent_sum > b->data[i].percent_sum; 2501 } 2502 2503 return 0; 2504 } 2505 2506 static void __resort_source_line(struct rb_root *root, struct annotation_line *al) 2507 { 2508 struct annotation_line *iter; 2509 struct rb_node **p = &root->rb_node; 2510 struct rb_node *parent = NULL; 2511 2512 while (*p != NULL) { 2513 parent = *p; 2514 iter = rb_entry(parent, struct annotation_line, rb_node); 2515 2516 if (cmp_source_line(al, iter)) 2517 p = &(*p)->rb_left; 2518 else 2519 p = &(*p)->rb_right; 2520 } 2521 2522 rb_link_node(&al->rb_node, parent, p); 2523 rb_insert_color(&al->rb_node, root); 2524 } 2525 2526 static void resort_source_line(struct rb_root *dest_root, struct rb_root *src_root) 2527 { 2528 struct annotation_line *al; 2529 struct rb_node *node; 2530 2531 node = rb_first(src_root); 2532 while (node) { 2533 struct rb_node *next; 2534 2535 al = rb_entry(node, struct annotation_line, rb_node); 2536 next = rb_next(node); 2537 rb_erase(node, src_root); 2538 2539 __resort_source_line(dest_root, al); 2540 node = next; 2541 } 2542 } 2543 2544 static void print_summary(struct rb_root *root, const char *filename) 2545 { 2546 struct annotation_line *al; 2547 struct rb_node *node; 2548 2549 printf("\nSorted summary for file %s\n", filename); 2550 printf("----------------------------------------------\n\n"); 2551 2552 if (RB_EMPTY_ROOT(root)) { 2553 printf(" Nothing higher than %1.1f%%\n", MIN_GREEN); 2554 return; 2555 } 2556 2557 node = rb_first(root); 2558 while (node) { 2559 double percent, percent_max = 0.0; 2560 const char *color; 2561 char *path; 2562 int i; 2563 2564 al = rb_entry(node, struct annotation_line, rb_node); 2565 for (i = 0; i < al->data_nr; i++) { 2566 percent = al->data[i].percent_sum; 2567 color = get_percent_color(percent); 2568 color_fprintf(stdout, color, " %7.2f", percent); 2569 2570 if (percent > percent_max) 2571 percent_max = percent; 2572 } 2573 2574 path = al->path; 2575 color = get_percent_color(percent_max); 2576 color_fprintf(stdout, color, " %s\n", path); 2577 2578 node = rb_next(node); 2579 } 2580 } 2581 2582 static void symbol__annotate_hits(struct symbol *sym, struct evsel *evsel) 2583 { 2584 struct annotation *notes = symbol__annotation(sym); 2585 struct sym_hist *h = annotation__histogram(notes, evsel->core.idx); 2586 u64 len = symbol__size(sym), offset; 2587 2588 for (offset = 0; offset < len; ++offset) 2589 if (h->addr[offset].nr_samples != 0) 2590 printf("%*" PRIx64 ": %" PRIu64 "\n", BITS_PER_LONG / 2, 2591 sym->start + offset, h->addr[offset].nr_samples); 2592 printf("%*s: %" PRIu64 "\n", BITS_PER_LONG / 2, "h->nr_samples", h->nr_samples); 2593 } 2594 2595 static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start) 2596 { 2597 char bf[32]; 2598 struct annotation_line *line; 2599 2600 list_for_each_entry_reverse(line, lines, node) { 2601 if (line->offset != -1) 2602 return scnprintf(bf, sizeof(bf), "%" PRIx64, start + line->offset); 2603 } 2604 2605 return 0; 2606 } 2607 2608 int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel) 2609 { 2610 struct map *map = ms->map; 2611 struct symbol *sym = ms->sym; 2612 struct dso *dso = map__dso(map); 2613 char *filename; 2614 const char *d_filename; 2615 const char *evsel_name = evsel__name(evsel); 2616 struct annotation *notes = symbol__annotation(sym); 2617 struct sym_hist *h = annotation__histogram(notes, evsel->core.idx); 2618 struct annotation_line *pos, *queue = NULL; 2619 struct annotation_options *opts = &annotate_opts; 2620 u64 start = map__rip_2objdump(map, sym->start); 2621 int printed = 2, queue_len = 0, addr_fmt_width; 2622 int more = 0; 2623 bool context = opts->context; 2624 u64 len; 2625 int width = symbol_conf.show_total_period ? 12 : 8; 2626 int graph_dotted_len; 2627 char buf[512]; 2628 2629 filename = strdup(dso->long_name); 2630 if (!filename) 2631 return -ENOMEM; 2632 2633 if (opts->full_path) 2634 d_filename = filename; 2635 else 2636 d_filename = basename(filename); 2637 2638 len = symbol__size(sym); 2639 2640 if (evsel__is_group_event(evsel)) { 2641 width *= evsel->core.nr_members; 2642 evsel__group_desc(evsel, buf, sizeof(buf)); 2643 evsel_name = buf; 2644 } 2645 2646 graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, " 2647 "percent: %s)\n", 2648 width, width, symbol_conf.show_total_period ? "Period" : 2649 symbol_conf.show_nr_samples ? "Samples" : "Percent", 2650 d_filename, evsel_name, h->nr_samples, 2651 percent_type_str(opts->percent_type)); 2652 2653 printf("%-*.*s----\n", 2654 graph_dotted_len, graph_dotted_len, graph_dotted_line); 2655 2656 if (verbose > 0) 2657 symbol__annotate_hits(sym, evsel); 2658 2659 addr_fmt_width = annotated_source__addr_fmt_width(¬es->src->source, start); 2660 2661 list_for_each_entry(pos, ¬es->src->source, node) { 2662 int err; 2663 2664 if (context && queue == NULL) { 2665 queue = pos; 2666 queue_len = 0; 2667 } 2668 2669 err = annotation_line__print(pos, sym, start, evsel, len, 2670 opts->min_pcnt, printed, opts->max_lines, 2671 queue, addr_fmt_width, opts->percent_type); 2672 2673 switch (err) { 2674 case 0: 2675 ++printed; 2676 if (context) { 2677 printed += queue_len; 2678 queue = NULL; 2679 queue_len = 0; 2680 } 2681 break; 2682 case 1: 2683 /* filtered by max_lines */ 2684 ++more; 2685 break; 2686 case -1: 2687 default: 2688 /* 2689 * Filtered by min_pcnt or non IP lines when 2690 * context != 0 2691 */ 2692 if (!context) 2693 break; 2694 if (queue_len == context) 2695 queue = list_entry(queue->node.next, typeof(*queue), node); 2696 else 2697 ++queue_len; 2698 break; 2699 } 2700 } 2701 2702 free(filename); 2703 2704 return more; 2705 } 2706 2707 static void FILE__set_percent_color(void *fp __maybe_unused, 2708 double percent __maybe_unused, 2709 bool current __maybe_unused) 2710 { 2711 } 2712 2713 static int FILE__set_jumps_percent_color(void *fp __maybe_unused, 2714 int nr __maybe_unused, bool current __maybe_unused) 2715 { 2716 return 0; 2717 } 2718 2719 static int FILE__set_color(void *fp __maybe_unused, int color __maybe_unused) 2720 { 2721 return 0; 2722 } 2723 2724 static void FILE__printf(void *fp, const char *fmt, ...) 2725 { 2726 va_list args; 2727 2728 va_start(args, fmt); 2729 vfprintf(fp, fmt, args); 2730 va_end(args); 2731 } 2732 2733 static void FILE__write_graph(void *fp, int graph) 2734 { 2735 const char *s; 2736 switch (graph) { 2737 2738 case DARROW_CHAR: s = "↓"; break; 2739 case UARROW_CHAR: s = "↑"; break; 2740 case LARROW_CHAR: s = "←"; break; 2741 case RARROW_CHAR: s = "→"; break; 2742 default: s = "?"; break; 2743 } 2744 2745 fputs(s, fp); 2746 } 2747 2748 static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp) 2749 { 2750 struct annotation *notes = symbol__annotation(sym); 2751 struct annotation_write_ops wops = { 2752 .first_line = true, 2753 .obj = fp, 2754 .set_color = FILE__set_color, 2755 .set_percent_color = FILE__set_percent_color, 2756 .set_jumps_percent_color = FILE__set_jumps_percent_color, 2757 .printf = FILE__printf, 2758 .write_graph = FILE__write_graph, 2759 }; 2760 struct annotation_line *al; 2761 2762 list_for_each_entry(al, ¬es->src->source, node) { 2763 if (annotation_line__filter(al)) 2764 continue; 2765 annotation_line__write(al, notes, &wops); 2766 fputc('\n', fp); 2767 wops.first_line = false; 2768 } 2769 2770 return 0; 2771 } 2772 2773 int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel) 2774 { 2775 const char *ev_name = evsel__name(evsel); 2776 char buf[1024]; 2777 char *filename; 2778 int err = -1; 2779 FILE *fp; 2780 2781 if (asprintf(&filename, "%s.annotation", ms->sym->name) < 0) 2782 return -1; 2783 2784 fp = fopen(filename, "w"); 2785 if (fp == NULL) 2786 goto out_free_filename; 2787 2788 if (evsel__is_group_event(evsel)) { 2789 evsel__group_desc(evsel, buf, sizeof(buf)); 2790 ev_name = buf; 2791 } 2792 2793 fprintf(fp, "%s() %s\nEvent: %s\n\n", 2794 ms->sym->name, map__dso(ms->map)->long_name, ev_name); 2795 symbol__annotate_fprintf2(ms->sym, fp); 2796 2797 fclose(fp); 2798 err = 0; 2799 out_free_filename: 2800 free(filename); 2801 return err; 2802 } 2803 2804 void symbol__annotate_zero_histogram(struct symbol *sym, int evidx) 2805 { 2806 struct annotation *notes = symbol__annotation(sym); 2807 struct sym_hist *h = annotation__histogram(notes, evidx); 2808 2809 memset(h, 0, notes->src->sizeof_sym_hist); 2810 } 2811 2812 void symbol__annotate_decay_histogram(struct symbol *sym, int evidx) 2813 { 2814 struct annotation *notes = symbol__annotation(sym); 2815 struct sym_hist *h = annotation__histogram(notes, evidx); 2816 int len = symbol__size(sym), offset; 2817 2818 h->nr_samples = 0; 2819 for (offset = 0; offset < len; ++offset) { 2820 h->addr[offset].nr_samples = h->addr[offset].nr_samples * 7 / 8; 2821 h->nr_samples += h->addr[offset].nr_samples; 2822 } 2823 } 2824 2825 void annotated_source__purge(struct annotated_source *as) 2826 { 2827 struct annotation_line *al, *n; 2828 2829 list_for_each_entry_safe(al, n, &as->source, node) { 2830 list_del_init(&al->node); 2831 disasm_line__free(disasm_line(al)); 2832 } 2833 } 2834 2835 static size_t disasm_line__fprintf(struct disasm_line *dl, FILE *fp) 2836 { 2837 size_t printed; 2838 2839 if (dl->al.offset == -1) 2840 return fprintf(fp, "%s\n", dl->al.line); 2841 2842 printed = fprintf(fp, "%#" PRIx64 " %s", dl->al.offset, dl->ins.name); 2843 2844 if (dl->ops.raw[0] != '\0') { 2845 printed += fprintf(fp, "%.*s %s\n", 6 - (int)printed, " ", 2846 dl->ops.raw); 2847 } 2848 2849 return printed + fprintf(fp, "\n"); 2850 } 2851 2852 size_t disasm__fprintf(struct list_head *head, FILE *fp) 2853 { 2854 struct disasm_line *pos; 2855 size_t printed = 0; 2856 2857 list_for_each_entry(pos, head, al.node) 2858 printed += disasm_line__fprintf(pos, fp); 2859 2860 return printed; 2861 } 2862 2863 bool disasm_line__is_valid_local_jump(struct disasm_line *dl, struct symbol *sym) 2864 { 2865 if (!dl || !dl->ins.ops || !ins__is_jump(&dl->ins) || 2866 !disasm_line__has_local_offset(dl) || dl->ops.target.offset < 0 || 2867 dl->ops.target.offset >= (s64)symbol__size(sym)) 2868 return false; 2869 2870 return true; 2871 } 2872 2873 void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) 2874 { 2875 u64 offset, size = symbol__size(sym); 2876 2877 /* PLT symbols contain external offsets */ 2878 if (strstr(sym->name, "@plt")) 2879 return; 2880 2881 for (offset = 0; offset < size; ++offset) { 2882 struct annotation_line *al = notes->src->offsets[offset]; 2883 struct disasm_line *dl; 2884 2885 dl = disasm_line(al); 2886 2887 if (!disasm_line__is_valid_local_jump(dl, sym)) 2888 continue; 2889 2890 al = notes->src->offsets[dl->ops.target.offset]; 2891 2892 /* 2893 * FIXME: Oops, no jump target? Buggy disassembler? Or do we 2894 * have to adjust to the previous offset? 2895 */ 2896 if (al == NULL) 2897 continue; 2898 2899 if (++al->jump_sources > notes->max_jump_sources) 2900 notes->max_jump_sources = al->jump_sources; 2901 } 2902 } 2903 2904 void annotation__set_offsets(struct annotation *notes, s64 size) 2905 { 2906 struct annotation_line *al; 2907 struct annotated_source *src = notes->src; 2908 2909 src->max_line_len = 0; 2910 src->nr_entries = 0; 2911 src->nr_asm_entries = 0; 2912 2913 list_for_each_entry(al, &src->source, node) { 2914 size_t line_len = strlen(al->line); 2915 2916 if (src->max_line_len < line_len) 2917 src->max_line_len = line_len; 2918 al->idx = src->nr_entries++; 2919 if (al->offset != -1) { 2920 al->idx_asm = src->nr_asm_entries++; 2921 /* 2922 * FIXME: short term bandaid to cope with assembly 2923 * routines that comes with labels in the same column 2924 * as the address in objdump, sigh. 2925 * 2926 * E.g. copy_user_generic_unrolled 2927 */ 2928 if (al->offset < size) 2929 notes->src->offsets[al->offset] = al; 2930 } else 2931 al->idx_asm = -1; 2932 } 2933 } 2934 2935 static inline int width_jumps(int n) 2936 { 2937 if (n >= 100) 2938 return 5; 2939 if (n / 10) 2940 return 2; 2941 return 1; 2942 } 2943 2944 static int annotation__max_ins_name(struct annotation *notes) 2945 { 2946 int max_name = 0, len; 2947 struct annotation_line *al; 2948 2949 list_for_each_entry(al, ¬es->src->source, node) { 2950 if (al->offset == -1) 2951 continue; 2952 2953 len = strlen(disasm_line(al)->ins.name); 2954 if (max_name < len) 2955 max_name = len; 2956 } 2957 2958 return max_name; 2959 } 2960 2961 void annotation__init_column_widths(struct annotation *notes, struct symbol *sym) 2962 { 2963 notes->widths.addr = notes->widths.target = 2964 notes->widths.min_addr = hex_width(symbol__size(sym)); 2965 notes->widths.max_addr = hex_width(sym->end); 2966 notes->widths.jumps = width_jumps(notes->max_jump_sources); 2967 notes->widths.max_ins_name = annotation__max_ins_name(notes); 2968 } 2969 2970 void annotation__update_column_widths(struct annotation *notes) 2971 { 2972 if (annotate_opts.use_offset) 2973 notes->widths.target = notes->widths.min_addr; 2974 else if (annotate_opts.full_addr) 2975 notes->widths.target = BITS_PER_LONG / 4; 2976 else 2977 notes->widths.target = notes->widths.max_addr; 2978 2979 notes->widths.addr = notes->widths.target; 2980 2981 if (annotate_opts.show_nr_jumps) 2982 notes->widths.addr += notes->widths.jumps + 1; 2983 } 2984 2985 void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms) 2986 { 2987 annotate_opts.full_addr = !annotate_opts.full_addr; 2988 2989 if (annotate_opts.full_addr) 2990 notes->start = map__objdump_2mem(ms->map, ms->sym->start); 2991 else 2992 notes->start = map__rip_2objdump(ms->map, ms->sym->start); 2993 2994 annotation__update_column_widths(notes); 2995 } 2996 2997 static void annotation__calc_lines(struct annotation *notes, struct map *map, 2998 struct rb_root *root) 2999 { 3000 struct annotation_line *al; 3001 struct rb_root tmp_root = RB_ROOT; 3002 3003 list_for_each_entry(al, ¬es->src->source, node) { 3004 double percent_max = 0.0; 3005 int i; 3006 3007 for (i = 0; i < al->data_nr; i++) { 3008 double percent; 3009 3010 percent = annotation_data__percent(&al->data[i], 3011 annotate_opts.percent_type); 3012 3013 if (percent > percent_max) 3014 percent_max = percent; 3015 } 3016 3017 if (percent_max <= 0.5) 3018 continue; 3019 3020 al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL, 3021 false, true, notes->start + al->offset); 3022 insert_source_line(&tmp_root, al); 3023 } 3024 3025 resort_source_line(root, &tmp_root); 3026 } 3027 3028 static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root) 3029 { 3030 struct annotation *notes = symbol__annotation(ms->sym); 3031 3032 annotation__calc_lines(notes, ms->map, root); 3033 } 3034 3035 int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel) 3036 { 3037 struct dso *dso = map__dso(ms->map); 3038 struct symbol *sym = ms->sym; 3039 struct rb_root source_line = RB_ROOT; 3040 struct hists *hists = evsel__hists(evsel); 3041 char buf[1024]; 3042 int err; 3043 3044 err = symbol__annotate2(ms, evsel, NULL); 3045 if (err) { 3046 char msg[BUFSIZ]; 3047 3048 dso->annotate_warned = true; 3049 symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); 3050 ui__error("Couldn't annotate %s:\n%s", sym->name, msg); 3051 return -1; 3052 } 3053 3054 if (annotate_opts.print_lines) { 3055 srcline_full_filename = annotate_opts.full_path; 3056 symbol__calc_lines(ms, &source_line); 3057 print_summary(&source_line, dso->long_name); 3058 } 3059 3060 hists__scnprintf_title(hists, buf, sizeof(buf)); 3061 fprintf(stdout, "%s, [percent: %s]\n%s() %s\n", 3062 buf, percent_type_str(annotate_opts.percent_type), sym->name, 3063 dso->long_name); 3064 symbol__annotate_fprintf2(sym, stdout); 3065 3066 annotated_source__purge(symbol__annotation(sym)->src); 3067 3068 return 0; 3069 } 3070 3071 int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel) 3072 { 3073 struct dso *dso = map__dso(ms->map); 3074 struct symbol *sym = ms->sym; 3075 struct rb_root source_line = RB_ROOT; 3076 int err; 3077 3078 err = symbol__annotate(ms, evsel, NULL); 3079 if (err) { 3080 char msg[BUFSIZ]; 3081 3082 dso->annotate_warned = true; 3083 symbol__strerror_disassemble(ms, err, msg, sizeof(msg)); 3084 ui__error("Couldn't annotate %s:\n%s", sym->name, msg); 3085 return -1; 3086 } 3087 3088 symbol__calc_percent(sym, evsel); 3089 3090 if (annotate_opts.print_lines) { 3091 srcline_full_filename = annotate_opts.full_path; 3092 symbol__calc_lines(ms, &source_line); 3093 print_summary(&source_line, dso->long_name); 3094 } 3095 3096 symbol__annotate_printf(ms, evsel); 3097 3098 annotated_source__purge(symbol__annotation(sym)->src); 3099 3100 return 0; 3101 } 3102 3103 bool ui__has_annotation(void) 3104 { 3105 return use_browser == 1 && perf_hpp_list.sym; 3106 } 3107 3108 3109 static double annotation_line__max_percent(struct annotation_line *al, 3110 struct annotation *notes, 3111 unsigned int percent_type) 3112 { 3113 double percent_max = 0.0; 3114 int i; 3115 3116 for (i = 0; i < notes->nr_events; i++) { 3117 double percent; 3118 3119 percent = annotation_data__percent(&al->data[i], 3120 percent_type); 3121 3122 if (percent > percent_max) 3123 percent_max = percent; 3124 } 3125 3126 return percent_max; 3127 } 3128 3129 static void disasm_line__write(struct disasm_line *dl, struct annotation *notes, 3130 void *obj, char *bf, size_t size, 3131 void (*obj__printf)(void *obj, const char *fmt, ...), 3132 void (*obj__write_graph)(void *obj, int graph)) 3133 { 3134 if (dl->ins.ops && dl->ins.ops->scnprintf) { 3135 if (ins__is_jump(&dl->ins)) { 3136 bool fwd; 3137 3138 if (dl->ops.target.outside) 3139 goto call_like; 3140 fwd = dl->ops.target.offset > dl->al.offset; 3141 obj__write_graph(obj, fwd ? DARROW_CHAR : UARROW_CHAR); 3142 obj__printf(obj, " "); 3143 } else if (ins__is_call(&dl->ins)) { 3144 call_like: 3145 obj__write_graph(obj, RARROW_CHAR); 3146 obj__printf(obj, " "); 3147 } else if (ins__is_ret(&dl->ins)) { 3148 obj__write_graph(obj, LARROW_CHAR); 3149 obj__printf(obj, " "); 3150 } else { 3151 obj__printf(obj, " "); 3152 } 3153 } else { 3154 obj__printf(obj, " "); 3155 } 3156 3157 disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, notes->widths.max_ins_name); 3158 } 3159 3160 static void ipc_coverage_string(char *bf, int size, struct annotation *notes) 3161 { 3162 double ipc = 0.0, coverage = 0.0; 3163 struct annotated_branch *branch = annotation__get_branch(notes); 3164 3165 if (branch && branch->hit_cycles) 3166 ipc = branch->hit_insn / ((double)branch->hit_cycles); 3167 3168 if (branch && branch->total_insn) { 3169 coverage = branch->cover_insn * 100.0 / 3170 ((double)branch->total_insn); 3171 } 3172 3173 scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)", 3174 ipc, coverage); 3175 } 3176 3177 static void __annotation_line__write(struct annotation_line *al, struct annotation *notes, 3178 bool first_line, bool current_entry, bool change_color, int width, 3179 void *obj, unsigned int percent_type, 3180 int (*obj__set_color)(void *obj, int color), 3181 void (*obj__set_percent_color)(void *obj, double percent, bool current), 3182 int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current), 3183 void (*obj__printf)(void *obj, const char *fmt, ...), 3184 void (*obj__write_graph)(void *obj, int graph)) 3185 3186 { 3187 double percent_max = annotation_line__max_percent(al, notes, percent_type); 3188 int pcnt_width = annotation__pcnt_width(notes), 3189 cycles_width = annotation__cycles_width(notes); 3190 bool show_title = false; 3191 char bf[256]; 3192 int printed; 3193 3194 if (first_line && (al->offset == -1 || percent_max == 0.0)) { 3195 if (notes->branch && al->cycles) { 3196 if (al->cycles->ipc == 0.0 && al->cycles->avg == 0) 3197 show_title = true; 3198 } else 3199 show_title = true; 3200 } 3201 3202 if (al->offset != -1 && percent_max != 0.0) { 3203 int i; 3204 3205 for (i = 0; i < notes->nr_events; i++) { 3206 double percent; 3207 3208 percent = annotation_data__percent(&al->data[i], percent_type); 3209 3210 obj__set_percent_color(obj, percent, current_entry); 3211 if (symbol_conf.show_total_period) { 3212 obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period); 3213 } else if (symbol_conf.show_nr_samples) { 3214 obj__printf(obj, "%6" PRIu64 " ", 3215 al->data[i].he.nr_samples); 3216 } else { 3217 obj__printf(obj, "%6.2f ", percent); 3218 } 3219 } 3220 } else { 3221 obj__set_percent_color(obj, 0, current_entry); 3222 3223 if (!show_title) 3224 obj__printf(obj, "%-*s", pcnt_width, " "); 3225 else { 3226 obj__printf(obj, "%-*s", pcnt_width, 3227 symbol_conf.show_total_period ? "Period" : 3228 symbol_conf.show_nr_samples ? "Samples" : "Percent"); 3229 } 3230 } 3231 3232 if (notes->branch) { 3233 if (al->cycles && al->cycles->ipc) 3234 obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc); 3235 else if (!show_title) 3236 obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " "); 3237 else 3238 obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC"); 3239 3240 if (!annotate_opts.show_minmax_cycle) { 3241 if (al->cycles && al->cycles->avg) 3242 obj__printf(obj, "%*" PRIu64 " ", 3243 ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg); 3244 else if (!show_title) 3245 obj__printf(obj, "%*s", 3246 ANNOTATION__CYCLES_WIDTH, " "); 3247 else 3248 obj__printf(obj, "%*s ", 3249 ANNOTATION__CYCLES_WIDTH - 1, 3250 "Cycle"); 3251 } else { 3252 if (al->cycles) { 3253 char str[32]; 3254 3255 scnprintf(str, sizeof(str), 3256 "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")", 3257 al->cycles->avg, al->cycles->min, 3258 al->cycles->max); 3259 3260 obj__printf(obj, "%*s ", 3261 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 3262 str); 3263 } else if (!show_title) 3264 obj__printf(obj, "%*s", 3265 ANNOTATION__MINMAX_CYCLES_WIDTH, 3266 " "); 3267 else 3268 obj__printf(obj, "%*s ", 3269 ANNOTATION__MINMAX_CYCLES_WIDTH - 1, 3270 "Cycle(min/max)"); 3271 } 3272 3273 if (show_title && !*al->line) { 3274 ipc_coverage_string(bf, sizeof(bf), notes); 3275 obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf); 3276 } 3277 } 3278 3279 obj__printf(obj, " "); 3280 3281 if (!*al->line) 3282 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " "); 3283 else if (al->offset == -1) { 3284 if (al->line_nr && annotate_opts.show_linenr) 3285 printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr); 3286 else 3287 printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " "); 3288 obj__printf(obj, bf); 3289 obj__printf(obj, "%-*s", width - printed - pcnt_width - cycles_width + 1, al->line); 3290 } else { 3291 u64 addr = al->offset; 3292 int color = -1; 3293 3294 if (!annotate_opts.use_offset) 3295 addr += notes->start; 3296 3297 if (!annotate_opts.use_offset) { 3298 printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); 3299 } else { 3300 if (al->jump_sources && 3301 annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { 3302 if (annotate_opts.show_nr_jumps) { 3303 int prev; 3304 printed = scnprintf(bf, sizeof(bf), "%*d ", 3305 notes->widths.jumps, 3306 al->jump_sources); 3307 prev = obj__set_jumps_percent_color(obj, al->jump_sources, 3308 current_entry); 3309 obj__printf(obj, bf); 3310 obj__set_color(obj, prev); 3311 } 3312 print_addr: 3313 printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", 3314 notes->widths.target, addr); 3315 } else if (ins__is_call(&disasm_line(al)->ins) && 3316 annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) { 3317 goto print_addr; 3318 } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { 3319 goto print_addr; 3320 } else { 3321 printed = scnprintf(bf, sizeof(bf), "%-*s ", 3322 notes->widths.addr, " "); 3323 } 3324 } 3325 3326 if (change_color) 3327 color = obj__set_color(obj, HE_COLORSET_ADDR); 3328 obj__printf(obj, bf); 3329 if (change_color) 3330 obj__set_color(obj, color); 3331 3332 disasm_line__write(disasm_line(al), notes, obj, bf, sizeof(bf), obj__printf, obj__write_graph); 3333 3334 obj__printf(obj, "%-*s", width - pcnt_width - cycles_width - 3 - printed, bf); 3335 } 3336 3337 } 3338 3339 void annotation_line__write(struct annotation_line *al, struct annotation *notes, 3340 struct annotation_write_ops *wops) 3341 { 3342 __annotation_line__write(al, notes, wops->first_line, wops->current_entry, 3343 wops->change_color, wops->width, wops->obj, 3344 annotate_opts.percent_type, 3345 wops->set_color, wops->set_percent_color, 3346 wops->set_jumps_percent_color, wops->printf, 3347 wops->write_graph); 3348 } 3349 3350 int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, 3351 struct arch **parch) 3352 { 3353 struct symbol *sym = ms->sym; 3354 struct annotation *notes = symbol__annotation(sym); 3355 size_t size = symbol__size(sym); 3356 int nr_pcnt = 1, err; 3357 3358 notes->src->offsets = zalloc(size * sizeof(struct annotation_line *)); 3359 if (notes->src->offsets == NULL) 3360 return ENOMEM; 3361 3362 if (evsel__is_group_event(evsel)) 3363 nr_pcnt = evsel->core.nr_members; 3364 3365 err = symbol__annotate(ms, evsel, parch); 3366 if (err) 3367 goto out_free_offsets; 3368 3369 symbol__calc_percent(sym, evsel); 3370 3371 annotation__set_offsets(notes, size); 3372 annotation__mark_jump_targets(notes, sym); 3373 3374 err = annotation__compute_ipc(notes, size); 3375 if (err) 3376 goto out_free_offsets; 3377 3378 annotation__init_column_widths(notes, sym); 3379 notes->nr_events = nr_pcnt; 3380 3381 annotation__update_column_widths(notes); 3382 sym->annotate2 = 1; 3383 3384 return 0; 3385 3386 out_free_offsets: 3387 zfree(¬es->src->offsets); 3388 return err; 3389 } 3390 3391 static int annotation__config(const char *var, const char *value, void *data) 3392 { 3393 struct annotation_options *opt = data; 3394 3395 if (!strstarts(var, "annotate.")) 3396 return 0; 3397 3398 if (!strcmp(var, "annotate.offset_level")) { 3399 perf_config_u8(&opt->offset_level, "offset_level", value); 3400 3401 if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) 3402 opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL; 3403 else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL) 3404 opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; 3405 } else if (!strcmp(var, "annotate.hide_src_code")) { 3406 opt->hide_src_code = perf_config_bool("hide_src_code", value); 3407 } else if (!strcmp(var, "annotate.jump_arrows")) { 3408 opt->jump_arrows = perf_config_bool("jump_arrows", value); 3409 } else if (!strcmp(var, "annotate.show_linenr")) { 3410 opt->show_linenr = perf_config_bool("show_linenr", value); 3411 } else if (!strcmp(var, "annotate.show_nr_jumps")) { 3412 opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value); 3413 } else if (!strcmp(var, "annotate.show_nr_samples")) { 3414 symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples", 3415 value); 3416 } else if (!strcmp(var, "annotate.show_total_period")) { 3417 symbol_conf.show_total_period = perf_config_bool("show_total_period", 3418 value); 3419 } else if (!strcmp(var, "annotate.use_offset")) { 3420 opt->use_offset = perf_config_bool("use_offset", value); 3421 } else if (!strcmp(var, "annotate.disassembler_style")) { 3422 opt->disassembler_style = strdup(value); 3423 if (!opt->disassembler_style) { 3424 pr_err("Not enough memory for annotate.disassembler_style\n"); 3425 return -1; 3426 } 3427 } else if (!strcmp(var, "annotate.objdump")) { 3428 opt->objdump_path = strdup(value); 3429 if (!opt->objdump_path) { 3430 pr_err("Not enough memory for annotate.objdump\n"); 3431 return -1; 3432 } 3433 } else if (!strcmp(var, "annotate.addr2line")) { 3434 symbol_conf.addr2line_path = strdup(value); 3435 if (!symbol_conf.addr2line_path) { 3436 pr_err("Not enough memory for annotate.addr2line\n"); 3437 return -1; 3438 } 3439 } else if (!strcmp(var, "annotate.demangle")) { 3440 symbol_conf.demangle = perf_config_bool("demangle", value); 3441 } else if (!strcmp(var, "annotate.demangle_kernel")) { 3442 symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value); 3443 } else { 3444 pr_debug("%s variable unknown, ignoring...", var); 3445 } 3446 3447 return 0; 3448 } 3449 3450 void annotation_options__init(void) 3451 { 3452 struct annotation_options *opt = &annotate_opts; 3453 3454 memset(opt, 0, sizeof(*opt)); 3455 3456 /* Default values. */ 3457 opt->use_offset = true; 3458 opt->jump_arrows = true; 3459 opt->annotate_src = true; 3460 opt->offset_level = ANNOTATION__OFFSET_JUMP_TARGETS; 3461 opt->percent_type = PERCENT_PERIOD_LOCAL; 3462 } 3463 3464 void annotation_options__exit(void) 3465 { 3466 zfree(&annotate_opts.disassembler_style); 3467 zfree(&annotate_opts.objdump_path); 3468 } 3469 3470 void annotation_config__init(void) 3471 { 3472 perf_config(annotation__config, &annotate_opts); 3473 } 3474 3475 static unsigned int parse_percent_type(char *str1, char *str2) 3476 { 3477 unsigned int type = (unsigned int) -1; 3478 3479 if (!strcmp("period", str1)) { 3480 if (!strcmp("local", str2)) 3481 type = PERCENT_PERIOD_LOCAL; 3482 else if (!strcmp("global", str2)) 3483 type = PERCENT_PERIOD_GLOBAL; 3484 } 3485 3486 if (!strcmp("hits", str1)) { 3487 if (!strcmp("local", str2)) 3488 type = PERCENT_HITS_LOCAL; 3489 else if (!strcmp("global", str2)) 3490 type = PERCENT_HITS_GLOBAL; 3491 } 3492 3493 return type; 3494 } 3495 3496 int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str, 3497 int unset __maybe_unused) 3498 { 3499 unsigned int type; 3500 char *str1, *str2; 3501 int err = -1; 3502 3503 str1 = strdup(_str); 3504 if (!str1) 3505 return -ENOMEM; 3506 3507 str2 = strchr(str1, '-'); 3508 if (!str2) 3509 goto out; 3510 3511 *str2++ = 0; 3512 3513 type = parse_percent_type(str1, str2); 3514 if (type == (unsigned int) -1) 3515 type = parse_percent_type(str2, str1); 3516 if (type != (unsigned int) -1) { 3517 annotate_opts.percent_type = type; 3518 err = 0; 3519 } 3520 3521 out: 3522 free(str1); 3523 return err; 3524 } 3525 3526 int annotate_check_args(void) 3527 { 3528 struct annotation_options *args = &annotate_opts; 3529 3530 if (args->prefix_strip && !args->prefix) { 3531 pr_err("--prefix-strip requires --prefix\n"); 3532 return -1; 3533 } 3534 return 0; 3535 } 3536 3537 /* 3538 * Get register number and access offset from the given instruction. 3539 * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs 3540 * to revisit the format when it handles different architecture. 3541 * Fills @reg and @offset when return 0. 3542 */ 3543 static int extract_reg_offset(struct arch *arch, const char *str, 3544 struct annotated_op_loc *op_loc) 3545 { 3546 char *p; 3547 char *regname; 3548 3549 if (arch->objdump.register_char == 0) 3550 return -1; 3551 3552 /* 3553 * It should start from offset, but it's possible to skip 0 3554 * in the asm. So 0(%rax) should be same as (%rax). 3555 * 3556 * However, it also start with a segment select register like 3557 * %gs:0x18(%rbx). In that case it should skip the part. 3558 */ 3559 if (*str == arch->objdump.register_char) { 3560 while (*str && !isdigit(*str) && 3561 *str != arch->objdump.memory_ref_char) 3562 str++; 3563 } 3564 3565 op_loc->offset = strtol(str, &p, 0); 3566 3567 p = strchr(p, arch->objdump.register_char); 3568 if (p == NULL) 3569 return -1; 3570 3571 regname = strdup(p); 3572 if (regname == NULL) 3573 return -1; 3574 3575 op_loc->reg1 = get_dwarf_regnum(regname, 0); 3576 free(regname); 3577 3578 /* Get the second register */ 3579 if (op_loc->multi_regs) { 3580 p = strchr(p + 1, arch->objdump.register_char); 3581 if (p == NULL) 3582 return -1; 3583 3584 regname = strdup(p); 3585 if (regname == NULL) 3586 return -1; 3587 3588 op_loc->reg2 = get_dwarf_regnum(regname, 0); 3589 free(regname); 3590 } 3591 return 0; 3592 } 3593 3594 /** 3595 * annotate_get_insn_location - Get location of instruction 3596 * @arch: the architecture info 3597 * @dl: the target instruction 3598 * @loc: a buffer to save the data 3599 * 3600 * Get detailed location info (register and offset) in the instruction. 3601 * It needs both source and target operand and whether it accesses a 3602 * memory location. The offset field is meaningful only when the 3603 * corresponding mem flag is set. The reg2 field is meaningful only 3604 * when multi_regs flag is set. 3605 * 3606 * Some examples on x86: 3607 * 3608 * mov (%rax), %rcx # src_reg1 = rax, src_mem = 1, src_offset = 0 3609 * # dst_reg1 = rcx, dst_mem = 0 3610 * 3611 * mov 0x18, %r8 # src_reg1 = -1, src_mem = 0 3612 * # dst_reg1 = r8, dst_mem = 0 3613 * 3614 * mov %rsi, 8(%rbx,%rcx,4) # src_reg1 = rsi, src_mem = 0, dst_multi_regs = 0 3615 * # dst_reg1 = rbx, dst_reg2 = rcx, dst_mem = 1 3616 * # dst_multi_regs = 1, dst_offset = 8 3617 */ 3618 int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, 3619 struct annotated_insn_loc *loc) 3620 { 3621 struct ins_operands *ops; 3622 struct annotated_op_loc *op_loc; 3623 int i; 3624 3625 if (!strcmp(dl->ins.name, "lock")) 3626 ops = dl->ops.locked.ops; 3627 else 3628 ops = &dl->ops; 3629 3630 if (ops == NULL) 3631 return -1; 3632 3633 memset(loc, 0, sizeof(*loc)); 3634 3635 for_each_insn_op_loc(loc, i, op_loc) { 3636 const char *insn_str = ops->source.raw; 3637 bool multi_regs = ops->source.multi_regs; 3638 3639 if (i == INSN_OP_TARGET) { 3640 insn_str = ops->target.raw; 3641 multi_regs = ops->target.multi_regs; 3642 } 3643 3644 /* Invalidate the register by default */ 3645 op_loc->reg1 = -1; 3646 op_loc->reg2 = -1; 3647 3648 if (insn_str == NULL) 3649 continue; 3650 3651 if (strchr(insn_str, arch->objdump.memory_ref_char)) { 3652 op_loc->mem_ref = true; 3653 op_loc->multi_regs = multi_regs; 3654 extract_reg_offset(arch, insn_str, op_loc); 3655 } else { 3656 char *s = strdup(insn_str); 3657 3658 if (s) { 3659 op_loc->reg1 = get_dwarf_regnum(s, 0); 3660 free(s); 3661 } 3662 } 3663 } 3664 3665 return 0; 3666 } 3667 3668 static void symbol__ensure_annotate(struct map_symbol *ms, struct evsel *evsel) 3669 { 3670 struct disasm_line *dl, *tmp_dl; 3671 struct annotation *notes; 3672 3673 notes = symbol__annotation(ms->sym); 3674 if (!list_empty(¬es->src->source)) 3675 return; 3676 3677 if (symbol__annotate(ms, evsel, NULL) < 0) 3678 return; 3679 3680 /* remove non-insn disasm lines for simplicity */ 3681 list_for_each_entry_safe(dl, tmp_dl, ¬es->src->source, al.node) { 3682 if (dl->al.offset == -1) { 3683 list_del(&dl->al.node); 3684 free(dl); 3685 } 3686 } 3687 } 3688 3689 static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip) 3690 { 3691 struct disasm_line *dl; 3692 struct annotation *notes; 3693 3694 notes = symbol__annotation(sym); 3695 3696 list_for_each_entry(dl, ¬es->src->source, al.node) { 3697 if (sym->start + dl->al.offset == ip) { 3698 /* 3699 * llvm-objdump places "lock" in a separate line and 3700 * in that case, we want to get the next line. 3701 */ 3702 if (!strcmp(dl->ins.name, "lock") && *dl->ops.raw == '\0') { 3703 ip++; 3704 continue; 3705 } 3706 return dl; 3707 } 3708 } 3709 return NULL; 3710 } 3711 3712 static struct annotated_item_stat *annotate_data_stat(struct list_head *head, 3713 const char *name) 3714 { 3715 struct annotated_item_stat *istat; 3716 3717 list_for_each_entry(istat, head, list) { 3718 if (!strcmp(istat->name, name)) 3719 return istat; 3720 } 3721 3722 istat = zalloc(sizeof(*istat)); 3723 if (istat == NULL) 3724 return NULL; 3725 3726 istat->name = strdup(name); 3727 if (istat->name == NULL) { 3728 free(istat); 3729 return NULL; 3730 } 3731 3732 list_add_tail(&istat->list, head); 3733 return istat; 3734 } 3735 3736 static bool is_stack_operation(struct arch *arch, struct disasm_line *dl) 3737 { 3738 if (arch__is(arch, "x86")) { 3739 if (!strncmp(dl->ins.name, "push", 4) || 3740 !strncmp(dl->ins.name, "pop", 3) || 3741 !strncmp(dl->ins.name, "ret", 3)) 3742 return true; 3743 } 3744 3745 return false; 3746 } 3747 3748 u64 annotate_calc_pcrel(struct map_symbol *ms, u64 ip, int offset, 3749 struct disasm_line *dl) 3750 { 3751 struct annotation *notes; 3752 struct disasm_line *next; 3753 u64 addr; 3754 3755 notes = symbol__annotation(ms->sym); 3756 /* 3757 * PC-relative addressing starts from the next instruction address 3758 * But the IP is for the current instruction. Since disasm_line 3759 * doesn't have the instruction size, calculate it using the next 3760 * disasm_line. If it's the last one, we can use symbol's end 3761 * address directly. 3762 */ 3763 if (&dl->al.node == notes->src->source.prev) 3764 addr = ms->sym->end + offset; 3765 else { 3766 next = list_next_entry(dl, al.node); 3767 addr = ip + (next->al.offset - dl->al.offset) + offset; 3768 } 3769 return map__rip_2objdump(ms->map, addr); 3770 } 3771 3772 /** 3773 * hist_entry__get_data_type - find data type for given hist entry 3774 * @he: hist entry 3775 * 3776 * This function first annotates the instruction at @he->ip and extracts 3777 * register and offset info from it. Then it searches the DWARF debug 3778 * info to get a variable and type information using the address, register, 3779 * and offset. 3780 */ 3781 struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) 3782 { 3783 struct map_symbol *ms = &he->ms; 3784 struct evsel *evsel = hists_to_evsel(he->hists); 3785 struct arch *arch; 3786 struct disasm_line *dl; 3787 struct annotated_insn_loc loc; 3788 struct annotated_op_loc *op_loc; 3789 struct annotated_data_type *mem_type; 3790 struct annotated_item_stat *istat; 3791 u64 ip = he->ip, addr = 0; 3792 const char *var_name = NULL; 3793 int var_offset; 3794 int i; 3795 3796 ann_data_stat.total++; 3797 3798 if (ms->map == NULL || ms->sym == NULL) { 3799 ann_data_stat.no_sym++; 3800 return NULL; 3801 } 3802 3803 if (!symbol_conf.init_annotation) { 3804 ann_data_stat.no_sym++; 3805 return NULL; 3806 } 3807 3808 if (evsel__get_arch(evsel, &arch) < 0) { 3809 ann_data_stat.no_insn++; 3810 return NULL; 3811 } 3812 3813 /* Make sure it runs objdump to get disasm of the function */ 3814 symbol__ensure_annotate(ms, evsel); 3815 3816 /* 3817 * Get a disasm to extract the location from the insn. 3818 * This is too slow... 3819 */ 3820 dl = find_disasm_line(ms->sym, ip); 3821 if (dl == NULL) { 3822 ann_data_stat.no_insn++; 3823 return NULL; 3824 } 3825 3826 retry: 3827 istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); 3828 if (istat == NULL) { 3829 ann_data_stat.no_insn++; 3830 return NULL; 3831 } 3832 3833 if (annotate_get_insn_location(arch, dl, &loc) < 0) { 3834 ann_data_stat.no_insn_ops++; 3835 istat->bad++; 3836 return NULL; 3837 } 3838 3839 if (is_stack_operation(arch, dl)) { 3840 istat->good++; 3841 he->mem_type_off = 0; 3842 return &stackop_type; 3843 } 3844 3845 for_each_insn_op_loc(&loc, i, op_loc) { 3846 if (!op_loc->mem_ref) 3847 continue; 3848 3849 /* Recalculate IP because of LOCK prefix or insn fusion */ 3850 ip = ms->sym->start + dl->al.offset; 3851 3852 var_offset = op_loc->offset; 3853 3854 /* PC-relative addressing */ 3855 if (op_loc->reg1 == DWARF_REG_PC) { 3856 struct addr_location al; 3857 struct symbol *var; 3858 u64 map_addr; 3859 3860 addr = annotate_calc_pcrel(ms, ip, op_loc->offset, dl); 3861 /* Kernel symbols might be relocated */ 3862 map_addr = addr + map__reloc(ms->map); 3863 3864 addr_location__init(&al); 3865 var = thread__find_symbol_fb(he->thread, he->cpumode, 3866 map_addr, &al); 3867 if (var) { 3868 var_name = var->name; 3869 /* Calculate type offset from the start of variable */ 3870 var_offset = map_addr - map__unmap_ip(al.map, var->start); 3871 } 3872 addr_location__exit(&al); 3873 } 3874 3875 mem_type = find_data_type(ms, ip, op_loc, addr, var_name); 3876 if (mem_type) 3877 istat->good++; 3878 else 3879 istat->bad++; 3880 3881 if (mem_type && var_name) 3882 op_loc->offset = var_offset; 3883 3884 if (symbol_conf.annotate_data_sample) { 3885 annotated_data_type__update_samples(mem_type, evsel, 3886 op_loc->offset, 3887 he->stat.nr_events, 3888 he->stat.period); 3889 } 3890 he->mem_type_off = op_loc->offset; 3891 return mem_type; 3892 } 3893 3894 /* 3895 * Some instructions can be fused and the actual memory access came 3896 * from the previous instruction. 3897 */ 3898 if (dl->al.offset > 0) { 3899 struct disasm_line *prev_dl; 3900 3901 prev_dl = list_prev_entry(dl, al.node); 3902 if (ins__is_fused(arch, prev_dl->ins.name, dl->ins.name)) { 3903 dl = prev_dl; 3904 goto retry; 3905 } 3906 } 3907 3908 ann_data_stat.no_mem_ops++; 3909 istat->bad++; 3910 return NULL; 3911 } 3912