1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Convert sample address to data type using DWARF debug info. 4 * 5 * Written by Namhyung Kim <namhyung@kernel.org> 6 */ 7 8 #include <stdio.h> 9 #include <stdlib.h> 10 #include <inttypes.h> 11 #include <linux/zalloc.h> 12 13 #include "annotate.h" 14 #include "annotate-data.h" 15 #include "debuginfo.h" 16 #include "debug.h" 17 #include "dso.h" 18 #include "dwarf-regs.h" 19 #include "evsel.h" 20 #include "evlist.h" 21 #include "map.h" 22 #include "map_symbol.h" 23 #include "sort.h" 24 #include "strbuf.h" 25 #include "symbol.h" 26 #include "symbol_conf.h" 27 #include "thread.h" 28 29 /* register number of the stack pointer */ 30 #define X86_REG_SP 7 31 32 static void delete_var_types(struct die_var_type *var_types); 33 34 #define pr_debug_dtp(fmt, ...) \ 35 do { \ 36 if (debug_type_profile) \ 37 pr_info(fmt, ##__VA_ARGS__); \ 38 else \ 39 pr_debug3(fmt, ##__VA_ARGS__); \ 40 } while (0) 41 42 void pr_debug_type_name(Dwarf_Die *die, enum type_state_kind kind) 43 { 44 struct strbuf sb; 45 char *str; 46 Dwarf_Word size = 0; 47 48 if (!debug_type_profile && verbose < 3) 49 return; 50 51 switch (kind) { 52 case TSR_KIND_INVALID: 53 pr_info("\n"); 54 return; 55 case TSR_KIND_PERCPU_BASE: 56 pr_info(" percpu base\n"); 57 return; 58 case TSR_KIND_CONST: 59 pr_info(" constant\n"); 60 return; 61 case TSR_KIND_POINTER: 62 pr_info(" pointer"); 63 /* it also prints the type info */ 64 break; 65 case TSR_KIND_CANARY: 66 pr_info(" stack canary\n"); 67 return; 68 case TSR_KIND_TYPE: 69 default: 70 break; 71 } 72 73 dwarf_aggregate_size(die, &size); 74 75 strbuf_init(&sb, 32); 76 die_get_typename_from_type(die, &sb); 77 str = strbuf_detach(&sb, NULL); 78 pr_info(" type='%s' size=%#lx (die:%#lx)\n", 79 str, (long)size, (long)dwarf_dieoffset(die)); 80 free(str); 81 } 82 83 static void pr_debug_location(Dwarf_Die *die, u64 pc, int reg) 84 { 85 ptrdiff_t off = 0; 86 Dwarf_Attribute attr; 87 Dwarf_Addr base, start, end; 88 Dwarf_Op *ops; 89 size_t nops; 90 91 if (!debug_type_profile && verbose < 3) 92 return; 93 94 if (dwarf_attr(die, DW_AT_location, &attr) == NULL) 95 return; 96 97 while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) { 98 if (reg != DWARF_REG_PC && end <= pc) 99 continue; 100 if (reg != DWARF_REG_PC && start > pc) 101 break; 102 103 pr_info(" variable location: "); 104 switch (ops->atom) { 105 case DW_OP_reg0 ...DW_OP_reg31: 106 pr_info("reg%d\n", ops->atom - DW_OP_reg0); 107 break; 108 case DW_OP_breg0 ...DW_OP_breg31: 109 pr_info("base=reg%d, offset=%#lx\n", 110 ops->atom - DW_OP_breg0, (long)ops->number); 111 break; 112 case DW_OP_regx: 113 pr_info("reg%ld\n", (long)ops->number); 114 break; 115 case DW_OP_bregx: 116 pr_info("base=reg%ld, offset=%#lx\n", 117 (long)ops->number, (long)ops->number2); 118 break; 119 case DW_OP_fbreg: 120 pr_info("use frame base, offset=%#lx\n", (long)ops->number); 121 break; 122 case DW_OP_addr: 123 pr_info("address=%#lx\n", (long)ops->number); 124 break; 125 default: 126 pr_info("unknown: code=%#x, number=%#lx\n", 127 ops->atom, (long)ops->number); 128 break; 129 } 130 break; 131 } 132 } 133 134 bool has_reg_type(struct type_state *state, int reg) 135 { 136 return (unsigned)reg < ARRAY_SIZE(state->regs); 137 } 138 139 static void init_type_state(struct type_state *state, struct arch *arch) 140 { 141 memset(state, 0, sizeof(*state)); 142 INIT_LIST_HEAD(&state->stack_vars); 143 144 if (arch__is(arch, "x86")) { 145 state->regs[0].caller_saved = true; 146 state->regs[1].caller_saved = true; 147 state->regs[2].caller_saved = true; 148 state->regs[4].caller_saved = true; 149 state->regs[5].caller_saved = true; 150 state->regs[8].caller_saved = true; 151 state->regs[9].caller_saved = true; 152 state->regs[10].caller_saved = true; 153 state->regs[11].caller_saved = true; 154 state->ret_reg = 0; 155 state->stack_reg = X86_REG_SP; 156 } 157 } 158 159 static void exit_type_state(struct type_state *state) 160 { 161 struct type_state_stack *stack, *tmp; 162 163 list_for_each_entry_safe(stack, tmp, &state->stack_vars, list) { 164 list_del(&stack->list); 165 free(stack); 166 } 167 } 168 169 /* 170 * Compare type name and size to maintain them in a tree. 171 * I'm not sure if DWARF would have information of a single type in many 172 * different places (compilation units). If not, it could compare the 173 * offset of the type entry in the .debug_info section. 174 */ 175 static int data_type_cmp(const void *_key, const struct rb_node *node) 176 { 177 const struct annotated_data_type *key = _key; 178 struct annotated_data_type *type; 179 180 type = rb_entry(node, struct annotated_data_type, node); 181 182 if (key->self.size != type->self.size) 183 return key->self.size - type->self.size; 184 return strcmp(key->self.type_name, type->self.type_name); 185 } 186 187 static bool data_type_less(struct rb_node *node_a, const struct rb_node *node_b) 188 { 189 struct annotated_data_type *a, *b; 190 191 a = rb_entry(node_a, struct annotated_data_type, node); 192 b = rb_entry(node_b, struct annotated_data_type, node); 193 194 if (a->self.size != b->self.size) 195 return a->self.size < b->self.size; 196 return strcmp(a->self.type_name, b->self.type_name) < 0; 197 } 198 199 /* Recursively add new members for struct/union */ 200 static int __add_member_cb(Dwarf_Die *die, void *arg) 201 { 202 struct annotated_member *parent = arg; 203 struct annotated_member *member; 204 Dwarf_Die member_type, die_mem; 205 Dwarf_Word size, loc, bit_size = 0; 206 Dwarf_Attribute attr; 207 struct strbuf sb; 208 int tag; 209 210 if (dwarf_tag(die) != DW_TAG_member) 211 return DIE_FIND_CB_SIBLING; 212 213 member = zalloc(sizeof(*member)); 214 if (member == NULL) 215 return DIE_FIND_CB_END; 216 217 strbuf_init(&sb, 32); 218 die_get_typename(die, &sb); 219 220 __die_get_real_type(die, &member_type); 221 if (dwarf_tag(&member_type) == DW_TAG_typedef) 222 die_get_real_type(&member_type, &die_mem); 223 else 224 die_mem = member_type; 225 226 if (dwarf_aggregate_size(&die_mem, &size) < 0) 227 size = 0; 228 229 if (dwarf_attr_integrate(die, DW_AT_data_member_location, &attr)) 230 dwarf_formudata(&attr, &loc); 231 else { 232 /* bitfield member */ 233 if (dwarf_attr_integrate(die, DW_AT_data_bit_offset, &attr) && 234 dwarf_formudata(&attr, &loc) == 0) 235 loc /= 8; 236 else 237 loc = 0; 238 239 if (dwarf_attr_integrate(die, DW_AT_bit_size, &attr) && 240 dwarf_formudata(&attr, &bit_size) == 0) 241 size = (bit_size + 7) / 8; 242 } 243 244 member->type_name = strbuf_detach(&sb, NULL); 245 /* member->var_name can be NULL */ 246 if (dwarf_diename(die)) { 247 if (bit_size) { 248 if (asprintf(&member->var_name, "%s:%ld", 249 dwarf_diename(die), (long)bit_size) < 0) 250 member->var_name = NULL; 251 } else { 252 member->var_name = strdup(dwarf_diename(die)); 253 } 254 255 if (member->var_name == NULL) { 256 free(member); 257 return DIE_FIND_CB_END; 258 } 259 } 260 member->size = size; 261 member->offset = loc + parent->offset; 262 INIT_LIST_HEAD(&member->children); 263 list_add_tail(&member->node, &parent->children); 264 265 tag = dwarf_tag(&die_mem); 266 switch (tag) { 267 case DW_TAG_structure_type: 268 case DW_TAG_union_type: 269 die_find_child(&die_mem, __add_member_cb, member, &die_mem); 270 break; 271 default: 272 break; 273 } 274 return DIE_FIND_CB_SIBLING; 275 } 276 277 static void add_member_types(struct annotated_data_type *parent, Dwarf_Die *type) 278 { 279 Dwarf_Die die_mem; 280 281 die_find_child(type, __add_member_cb, &parent->self, &die_mem); 282 } 283 284 static void delete_members(struct annotated_member *member) 285 { 286 struct annotated_member *child, *tmp; 287 288 list_for_each_entry_safe(child, tmp, &member->children, node) { 289 list_del(&child->node); 290 delete_members(child); 291 zfree(&child->type_name); 292 zfree(&child->var_name); 293 free(child); 294 } 295 } 296 297 static struct annotated_data_type *dso__findnew_data_type(struct dso *dso, 298 Dwarf_Die *type_die) 299 { 300 struct annotated_data_type *result = NULL; 301 struct annotated_data_type key; 302 struct rb_node *node; 303 struct strbuf sb; 304 char *type_name; 305 Dwarf_Word size; 306 307 strbuf_init(&sb, 32); 308 if (die_get_typename_from_type(type_die, &sb) < 0) 309 strbuf_add(&sb, "(unknown type)", 14); 310 type_name = strbuf_detach(&sb, NULL); 311 312 if (dwarf_tag(type_die) == DW_TAG_typedef) 313 die_get_real_type(type_die, type_die); 314 315 dwarf_aggregate_size(type_die, &size); 316 317 /* Check existing nodes in dso->data_types tree */ 318 key.self.type_name = type_name; 319 key.self.size = size; 320 node = rb_find(&key, dso__data_types(dso), data_type_cmp); 321 if (node) { 322 result = rb_entry(node, struct annotated_data_type, node); 323 free(type_name); 324 return result; 325 } 326 327 /* If not, add a new one */ 328 result = zalloc(sizeof(*result)); 329 if (result == NULL) { 330 free(type_name); 331 return NULL; 332 } 333 334 result->self.type_name = type_name; 335 result->self.size = size; 336 INIT_LIST_HEAD(&result->self.children); 337 338 if (symbol_conf.annotate_data_member) 339 add_member_types(result, type_die); 340 341 rb_add(&result->node, dso__data_types(dso), data_type_less); 342 return result; 343 } 344 345 static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die) 346 { 347 Dwarf_Off off, next_off; 348 size_t header_size; 349 350 if (dwarf_addrdie(di->dbg, pc, cu_die) != NULL) 351 return cu_die; 352 353 /* 354 * There are some kernels don't have full aranges and contain only a few 355 * aranges entries. Fallback to iterate all CU entries in .debug_info 356 * in case it's missing. 357 */ 358 off = 0; 359 while (dwarf_nextcu(di->dbg, off, &next_off, &header_size, 360 NULL, NULL, NULL) == 0) { 361 if (dwarf_offdie(di->dbg, off + header_size, cu_die) && 362 dwarf_haspc(cu_die, pc)) 363 return true; 364 365 off = next_off; 366 } 367 return false; 368 } 369 370 enum type_match_result { 371 PERF_TMR_UNKNOWN = 0, 372 PERF_TMR_OK, 373 PERF_TMR_NO_TYPE, 374 PERF_TMR_NO_POINTER, 375 PERF_TMR_NO_SIZE, 376 PERF_TMR_BAD_OFFSET, 377 PERF_TMR_BAIL_OUT, 378 }; 379 380 static const char *match_result_str(enum type_match_result tmr) 381 { 382 switch (tmr) { 383 case PERF_TMR_OK: 384 return "Good!"; 385 case PERF_TMR_NO_TYPE: 386 return "no type information"; 387 case PERF_TMR_NO_POINTER: 388 return "no/void pointer"; 389 case PERF_TMR_NO_SIZE: 390 return "type size is unknown"; 391 case PERF_TMR_BAD_OFFSET: 392 return "offset bigger than size"; 393 case PERF_TMR_UNKNOWN: 394 case PERF_TMR_BAIL_OUT: 395 default: 396 return "invalid state"; 397 } 398 } 399 400 static bool is_pointer_type(Dwarf_Die *type_die) 401 { 402 int tag = dwarf_tag(type_die); 403 404 return tag == DW_TAG_pointer_type || tag == DW_TAG_array_type; 405 } 406 407 /* returns if Type B has better information than Type A */ 408 static bool is_better_type(Dwarf_Die *type_a, Dwarf_Die *type_b) 409 { 410 Dwarf_Word size_a, size_b; 411 Dwarf_Die die_a, die_b; 412 413 /* pointer type is preferred */ 414 if (is_pointer_type(type_a) != is_pointer_type(type_b)) 415 return is_pointer_type(type_b); 416 417 if (is_pointer_type(type_b)) { 418 /* 419 * We want to compare the target type, but 'void *' can fail to 420 * get the target type. 421 */ 422 if (die_get_real_type(type_a, &die_a) == NULL) 423 return true; 424 if (die_get_real_type(type_b, &die_b) == NULL) 425 return false; 426 427 type_a = &die_a; 428 type_b = &die_b; 429 } 430 431 /* bigger type is preferred */ 432 if (dwarf_aggregate_size(type_a, &size_a) < 0 || 433 dwarf_aggregate_size(type_b, &size_b) < 0) 434 return false; 435 436 return size_a < size_b; 437 } 438 439 /* The type info will be saved in @type_die */ 440 static enum type_match_result check_variable(struct data_loc_info *dloc, 441 Dwarf_Die *var_die, 442 Dwarf_Die *type_die, int reg, 443 int offset, bool is_fbreg) 444 { 445 Dwarf_Word size; 446 bool needs_pointer = true; 447 Dwarf_Die sized_type; 448 449 if (reg == DWARF_REG_PC) 450 needs_pointer = false; 451 else if (reg == dloc->fbreg || is_fbreg) 452 needs_pointer = false; 453 else if (arch__is(dloc->arch, "x86") && reg == X86_REG_SP) 454 needs_pointer = false; 455 456 /* Get the type of the variable */ 457 if (__die_get_real_type(var_die, type_die) == NULL) 458 return PERF_TMR_NO_TYPE; 459 460 /* 461 * Usually it expects a pointer type for a memory access. 462 * Convert to a real type it points to. But global variables 463 * and local variables are accessed directly without a pointer. 464 */ 465 if (needs_pointer) { 466 if (!is_pointer_type(type_die) || 467 __die_get_real_type(type_die, type_die) == NULL) 468 return PERF_TMR_NO_POINTER; 469 } 470 471 if (dwarf_tag(type_die) == DW_TAG_typedef) 472 die_get_real_type(type_die, &sized_type); 473 else 474 sized_type = *type_die; 475 476 /* Get the size of the actual type */ 477 if (dwarf_aggregate_size(&sized_type, &size) < 0) 478 return PERF_TMR_NO_SIZE; 479 480 /* Minimal sanity check */ 481 if ((unsigned)offset >= size) 482 return PERF_TMR_BAD_OFFSET; 483 484 return PERF_TMR_OK; 485 } 486 487 struct type_state_stack *find_stack_state(struct type_state *state, 488 int offset) 489 { 490 struct type_state_stack *stack; 491 492 list_for_each_entry(stack, &state->stack_vars, list) { 493 if (offset == stack->offset) 494 return stack; 495 496 if (stack->compound && stack->offset < offset && 497 offset < stack->offset + stack->size) 498 return stack; 499 } 500 return NULL; 501 } 502 503 void set_stack_state(struct type_state_stack *stack, int offset, u8 kind, 504 Dwarf_Die *type_die) 505 { 506 int tag; 507 Dwarf_Word size; 508 509 if (dwarf_aggregate_size(type_die, &size) < 0) 510 size = 0; 511 512 tag = dwarf_tag(type_die); 513 514 stack->type = *type_die; 515 stack->size = size; 516 stack->offset = offset; 517 stack->kind = kind; 518 519 switch (tag) { 520 case DW_TAG_structure_type: 521 case DW_TAG_union_type: 522 stack->compound = (kind != TSR_KIND_POINTER); 523 break; 524 default: 525 stack->compound = false; 526 break; 527 } 528 } 529 530 struct type_state_stack *findnew_stack_state(struct type_state *state, 531 int offset, u8 kind, 532 Dwarf_Die *type_die) 533 { 534 struct type_state_stack *stack = find_stack_state(state, offset); 535 536 if (stack) { 537 set_stack_state(stack, offset, kind, type_die); 538 return stack; 539 } 540 541 stack = malloc(sizeof(*stack)); 542 if (stack) { 543 set_stack_state(stack, offset, kind, type_die); 544 list_add(&stack->list, &state->stack_vars); 545 } 546 return stack; 547 } 548 549 /* Maintain a cache for quick global variable lookup */ 550 struct global_var_entry { 551 struct rb_node node; 552 char *name; 553 u64 start; 554 u64 end; 555 u64 die_offset; 556 }; 557 558 static int global_var_cmp(const void *_key, const struct rb_node *node) 559 { 560 const u64 addr = (uintptr_t)_key; 561 struct global_var_entry *gvar; 562 563 gvar = rb_entry(node, struct global_var_entry, node); 564 565 if (gvar->start <= addr && addr < gvar->end) 566 return 0; 567 return gvar->start > addr ? -1 : 1; 568 } 569 570 static bool global_var_less(struct rb_node *node_a, const struct rb_node *node_b) 571 { 572 struct global_var_entry *gvar_a, *gvar_b; 573 574 gvar_a = rb_entry(node_a, struct global_var_entry, node); 575 gvar_b = rb_entry(node_b, struct global_var_entry, node); 576 577 return gvar_a->start < gvar_b->start; 578 } 579 580 static struct global_var_entry *global_var__find(struct data_loc_info *dloc, u64 addr) 581 { 582 struct dso *dso = map__dso(dloc->ms->map); 583 struct rb_node *node; 584 585 node = rb_find((void *)(uintptr_t)addr, dso__global_vars(dso), global_var_cmp); 586 if (node == NULL) 587 return NULL; 588 589 return rb_entry(node, struct global_var_entry, node); 590 } 591 592 static bool global_var__add(struct data_loc_info *dloc, u64 addr, 593 const char *name, Dwarf_Die *type_die) 594 { 595 struct dso *dso = map__dso(dloc->ms->map); 596 struct global_var_entry *gvar; 597 Dwarf_Word size; 598 599 if (dwarf_aggregate_size(type_die, &size) < 0) 600 return false; 601 602 gvar = malloc(sizeof(*gvar)); 603 if (gvar == NULL) 604 return false; 605 606 gvar->name = name ? strdup(name) : NULL; 607 if (name && gvar->name == NULL) { 608 free(gvar); 609 return false; 610 } 611 612 gvar->start = addr; 613 gvar->end = addr + size; 614 gvar->die_offset = dwarf_dieoffset(type_die); 615 616 rb_add(&gvar->node, dso__global_vars(dso), global_var_less); 617 return true; 618 } 619 620 void global_var_type__tree_delete(struct rb_root *root) 621 { 622 struct global_var_entry *gvar; 623 624 while (!RB_EMPTY_ROOT(root)) { 625 struct rb_node *node = rb_first(root); 626 627 rb_erase(node, root); 628 gvar = rb_entry(node, struct global_var_entry, node); 629 zfree(&gvar->name); 630 free(gvar); 631 } 632 } 633 634 bool get_global_var_info(struct data_loc_info *dloc, u64 addr, 635 const char **var_name, int *var_offset) 636 { 637 struct addr_location al; 638 struct symbol *sym; 639 u64 mem_addr; 640 641 /* Kernel symbols might be relocated */ 642 mem_addr = addr + map__reloc(dloc->ms->map); 643 644 addr_location__init(&al); 645 sym = thread__find_symbol_fb(dloc->thread, dloc->cpumode, 646 mem_addr, &al); 647 if (sym) { 648 *var_name = sym->name; 649 /* Calculate type offset from the start of variable */ 650 *var_offset = mem_addr - map__unmap_ip(al.map, sym->start); 651 } else { 652 *var_name = NULL; 653 } 654 addr_location__exit(&al); 655 if (*var_name == NULL) 656 return false; 657 658 return true; 659 } 660 661 static void global_var__collect(struct data_loc_info *dloc) 662 { 663 Dwarf *dwarf = dloc->di->dbg; 664 Dwarf_Off off, next_off; 665 Dwarf_Die cu_die, type_die; 666 size_t header_size; 667 668 /* Iterate all CU and collect global variables that have no location in a register. */ 669 off = 0; 670 while (dwarf_nextcu(dwarf, off, &next_off, &header_size, 671 NULL, NULL, NULL) == 0) { 672 struct die_var_type *var_types = NULL; 673 struct die_var_type *pos; 674 675 if (dwarf_offdie(dwarf, off + header_size, &cu_die) == NULL) { 676 off = next_off; 677 continue; 678 } 679 680 die_collect_global_vars(&cu_die, &var_types); 681 682 for (pos = var_types; pos; pos = pos->next) { 683 const char *var_name = NULL; 684 int var_offset = 0; 685 686 if (pos->reg != -1) 687 continue; 688 689 if (!dwarf_offdie(dwarf, pos->die_off, &type_die)) 690 continue; 691 692 if (!get_global_var_info(dloc, pos->addr, &var_name, 693 &var_offset)) 694 continue; 695 696 if (var_offset != 0) 697 continue; 698 699 global_var__add(dloc, pos->addr, var_name, &type_die); 700 } 701 702 delete_var_types(var_types); 703 704 off = next_off; 705 } 706 } 707 708 bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc, 709 u64 ip, u64 var_addr, int *var_offset, 710 Dwarf_Die *type_die) 711 { 712 u64 pc; 713 int offset; 714 const char *var_name = NULL; 715 struct global_var_entry *gvar; 716 struct dso *dso = map__dso(dloc->ms->map); 717 Dwarf_Die var_die; 718 719 if (RB_EMPTY_ROOT(dso__global_vars(dso))) 720 global_var__collect(dloc); 721 722 gvar = global_var__find(dloc, var_addr); 723 if (gvar) { 724 if (!dwarf_offdie(dloc->di->dbg, gvar->die_offset, type_die)) 725 return false; 726 727 *var_offset = var_addr - gvar->start; 728 return true; 729 } 730 731 /* Try to get the variable by address first */ 732 if (die_find_variable_by_addr(cu_die, var_addr, &var_die, &offset) && 733 check_variable(dloc, &var_die, type_die, DWARF_REG_PC, offset, 734 /*is_fbreg=*/false) == PERF_TMR_OK) { 735 var_name = dwarf_diename(&var_die); 736 *var_offset = offset; 737 goto ok; 738 } 739 740 if (!get_global_var_info(dloc, var_addr, &var_name, var_offset)) 741 return false; 742 743 pc = map__rip_2objdump(dloc->ms->map, ip); 744 745 /* Try to get the name of global variable */ 746 if (die_find_variable_at(cu_die, var_name, pc, &var_die) && 747 check_variable(dloc, &var_die, type_die, DWARF_REG_PC, *var_offset, 748 /*is_fbreg=*/false) == PERF_TMR_OK) 749 goto ok; 750 751 return false; 752 753 ok: 754 /* The address should point to the start of the variable */ 755 global_var__add(dloc, var_addr - *var_offset, var_name, type_die); 756 return true; 757 } 758 759 /** 760 * update_var_state - Update type state using given variables 761 * @state: type state table 762 * @dloc: data location info 763 * @addr: instruction address to match with variable 764 * @insn_offset: instruction offset (for debug) 765 * @var_types: list of variables with type info 766 * 767 * This function fills the @state table using @var_types info. Each variable 768 * is used only at the given location and updates an entry in the table. 769 */ 770 static void update_var_state(struct type_state *state, struct data_loc_info *dloc, 771 u64 addr, u64 insn_offset, struct die_var_type *var_types) 772 { 773 Dwarf_Die mem_die; 774 struct die_var_type *var; 775 int fbreg = dloc->fbreg; 776 int fb_offset = 0; 777 778 if (dloc->fb_cfa) { 779 if (die_get_cfa(dloc->di->dbg, addr, &fbreg, &fb_offset) < 0) 780 fbreg = -1; 781 } 782 783 for (var = var_types; var != NULL; var = var->next) { 784 if (var->addr != addr) 785 continue; 786 /* Get the type DIE using the offset */ 787 if (!dwarf_offdie(dloc->di->dbg, var->die_off, &mem_die)) 788 continue; 789 790 if (var->reg == DWARF_REG_FB || var->reg == fbreg) { 791 int offset = var->offset; 792 struct type_state_stack *stack; 793 794 if (var->reg != DWARF_REG_FB) 795 offset -= fb_offset; 796 797 stack = find_stack_state(state, offset); 798 if (stack && stack->kind == TSR_KIND_TYPE && 799 !is_better_type(&stack->type, &mem_die)) 800 continue; 801 802 findnew_stack_state(state, offset, TSR_KIND_TYPE, 803 &mem_die); 804 805 pr_debug_dtp("var [%"PRIx64"] -%#x(stack)", 806 insn_offset, -offset); 807 pr_debug_type_name(&mem_die, TSR_KIND_TYPE); 808 } else if (has_reg_type(state, var->reg) && var->offset == 0) { 809 struct type_state_reg *reg; 810 811 reg = &state->regs[var->reg]; 812 813 if (reg->ok && reg->kind == TSR_KIND_TYPE && 814 !is_better_type(®->type, &mem_die)) 815 continue; 816 817 reg->type = mem_die; 818 reg->kind = TSR_KIND_TYPE; 819 reg->ok = true; 820 821 pr_debug_dtp("var [%"PRIx64"] reg%d", 822 insn_offset, var->reg); 823 pr_debug_type_name(&mem_die, TSR_KIND_TYPE); 824 } 825 } 826 } 827 828 /** 829 * update_insn_state - Update type state for an instruction 830 * @state: type state table 831 * @dloc: data location info 832 * @cu_die: compile unit debug entry 833 * @dl: disasm line for the instruction 834 * 835 * This function updates the @state table for the target operand of the 836 * instruction at @dl if it transfers the type like MOV on x86. Since it 837 * tracks the type, it won't care about the values like in arithmetic 838 * instructions like ADD/SUB/MUL/DIV and INC/DEC. 839 * 840 * Note that ops->reg2 is only available when both mem_ref and multi_regs 841 * are true. 842 */ 843 static void update_insn_state(struct type_state *state, struct data_loc_info *dloc, 844 Dwarf_Die *cu_die, struct disasm_line *dl) 845 { 846 if (dloc->arch->update_insn_state) 847 dloc->arch->update_insn_state(state, dloc, cu_die, dl); 848 } 849 850 /* 851 * Prepend this_blocks (from the outer scope) to full_blocks, removing 852 * duplicate disasm line. 853 */ 854 static void prepend_basic_blocks(struct list_head *this_blocks, 855 struct list_head *full_blocks) 856 { 857 struct annotated_basic_block *first_bb, *last_bb; 858 859 last_bb = list_last_entry(this_blocks, typeof(*last_bb), list); 860 first_bb = list_first_entry(full_blocks, typeof(*first_bb), list); 861 862 if (list_empty(full_blocks)) 863 goto out; 864 865 /* Last insn in this_blocks should be same as first insn in full_blocks */ 866 if (last_bb->end != first_bb->begin) { 867 pr_debug("prepend basic blocks: mismatched disasm line %"PRIx64" -> %"PRIx64"\n", 868 last_bb->end->al.offset, first_bb->begin->al.offset); 869 goto out; 870 } 871 872 /* Is the basic block have only one disasm_line? */ 873 if (last_bb->begin == last_bb->end) { 874 list_del(&last_bb->list); 875 free(last_bb); 876 goto out; 877 } 878 879 /* Point to the insn before the last when adding this block to full_blocks */ 880 last_bb->end = list_prev_entry(last_bb->end, al.node); 881 882 out: 883 list_splice(this_blocks, full_blocks); 884 } 885 886 static void delete_basic_blocks(struct list_head *basic_blocks) 887 { 888 struct annotated_basic_block *bb, *tmp; 889 890 list_for_each_entry_safe(bb, tmp, basic_blocks, list) { 891 list_del(&bb->list); 892 free(bb); 893 } 894 } 895 896 /* Make sure all variables have a valid start address */ 897 static void fixup_var_address(struct die_var_type *var_types, u64 addr) 898 { 899 while (var_types) { 900 /* 901 * Some variables have no address range meaning it's always 902 * available in the whole scope. Let's adjust the start 903 * address to the start of the scope. 904 */ 905 if (var_types->addr == 0) 906 var_types->addr = addr; 907 908 var_types = var_types->next; 909 } 910 } 911 912 static void delete_var_types(struct die_var_type *var_types) 913 { 914 while (var_types) { 915 struct die_var_type *next = var_types->next; 916 917 free(var_types); 918 var_types = next; 919 } 920 } 921 922 /* should match to is_stack_canary() in util/annotate.c */ 923 static void setup_stack_canary(struct data_loc_info *dloc) 924 { 925 if (arch__is(dloc->arch, "x86")) { 926 dloc->op->segment = INSN_SEG_X86_GS; 927 dloc->op->imm = true; 928 dloc->op->offset = 40; 929 } 930 } 931 932 /* 933 * It's at the target address, check if it has a matching type. 934 * It returns PERF_TMR_BAIL_OUT when it looks up per-cpu variables which 935 * are similar to global variables and no additional info is needed. 936 */ 937 static enum type_match_result check_matching_type(struct type_state *state, 938 struct data_loc_info *dloc, 939 Dwarf_Die *cu_die, 940 Dwarf_Die *type_die) 941 { 942 Dwarf_Word size; 943 u32 insn_offset = dloc->ip - dloc->ms->sym->start; 944 int reg = dloc->op->reg1; 945 946 pr_debug_dtp("chk [%x] reg%d offset=%#x ok=%d kind=%d ", 947 insn_offset, reg, dloc->op->offset, 948 state->regs[reg].ok, state->regs[reg].kind); 949 950 if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_TYPE) { 951 Dwarf_Die sized_type; 952 953 /* 954 * Normal registers should hold a pointer (or array) to 955 * dereference a memory location. 956 */ 957 if (!is_pointer_type(&state->regs[reg].type)) { 958 if (dloc->op->offset < 0 && reg != state->stack_reg) 959 goto check_kernel; 960 961 return PERF_TMR_NO_POINTER; 962 } 963 964 /* Remove the pointer and get the target type */ 965 if (__die_get_real_type(&state->regs[reg].type, type_die) == NULL) 966 return PERF_TMR_NO_POINTER; 967 968 dloc->type_offset = dloc->op->offset; 969 970 if (dwarf_tag(type_die) == DW_TAG_typedef) 971 die_get_real_type(type_die, &sized_type); 972 else 973 sized_type = *type_die; 974 975 /* Get the size of the actual type */ 976 if (dwarf_aggregate_size(&sized_type, &size) < 0 || 977 (unsigned)dloc->type_offset >= size) 978 return PERF_TMR_BAD_OFFSET; 979 980 return PERF_TMR_OK; 981 } 982 983 if (reg == dloc->fbreg) { 984 struct type_state_stack *stack; 985 986 pr_debug_dtp("fbreg"); 987 988 stack = find_stack_state(state, dloc->type_offset); 989 if (stack == NULL) 990 return PERF_TMR_NO_TYPE; 991 992 if (stack->kind == TSR_KIND_CANARY) { 993 setup_stack_canary(dloc); 994 return PERF_TMR_BAIL_OUT; 995 } 996 997 if (stack->kind != TSR_KIND_TYPE) 998 return PERF_TMR_NO_TYPE; 999 1000 *type_die = stack->type; 1001 /* Update the type offset from the start of slot */ 1002 dloc->type_offset -= stack->offset; 1003 1004 return PERF_TMR_OK; 1005 } 1006 1007 if (dloc->fb_cfa) { 1008 struct type_state_stack *stack; 1009 u64 pc = map__rip_2objdump(dloc->ms->map, dloc->ip); 1010 int fbreg, fboff; 1011 1012 pr_debug_dtp("cfa"); 1013 1014 if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0) 1015 fbreg = -1; 1016 1017 if (reg != fbreg) 1018 return PERF_TMR_NO_TYPE; 1019 1020 stack = find_stack_state(state, dloc->type_offset - fboff); 1021 if (stack == NULL) 1022 return PERF_TMR_NO_TYPE; 1023 1024 if (stack->kind == TSR_KIND_CANARY) { 1025 setup_stack_canary(dloc); 1026 return PERF_TMR_BAIL_OUT; 1027 } 1028 1029 if (stack->kind != TSR_KIND_TYPE) 1030 return PERF_TMR_NO_TYPE; 1031 1032 *type_die = stack->type; 1033 /* Update the type offset from the start of slot */ 1034 dloc->type_offset -= fboff + stack->offset; 1035 1036 return PERF_TMR_OK; 1037 } 1038 1039 if (state->regs[reg].kind == TSR_KIND_PERCPU_BASE) { 1040 u64 var_addr = dloc->op->offset; 1041 int var_offset; 1042 1043 pr_debug_dtp("percpu var"); 1044 1045 if (dloc->op->multi_regs) { 1046 int reg2 = dloc->op->reg2; 1047 1048 if (dloc->op->reg2 == reg) 1049 reg2 = dloc->op->reg1; 1050 1051 if (has_reg_type(state, reg2) && state->regs[reg2].ok && 1052 state->regs[reg2].kind == TSR_KIND_CONST) 1053 var_addr += state->regs[reg2].imm_value; 1054 } 1055 1056 if (get_global_var_type(cu_die, dloc, dloc->ip, var_addr, 1057 &var_offset, type_die)) { 1058 dloc->type_offset = var_offset; 1059 return PERF_TMR_OK; 1060 } 1061 /* No need to retry per-cpu (global) variables */ 1062 return PERF_TMR_BAIL_OUT; 1063 } 1064 1065 if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_POINTER) { 1066 pr_debug_dtp("percpu ptr"); 1067 1068 /* 1069 * It's actaully pointer but the address was calculated using 1070 * some arithmetic. So it points to the actual type already. 1071 */ 1072 *type_die = state->regs[reg].type; 1073 1074 dloc->type_offset = dloc->op->offset; 1075 1076 /* Get the size of the actual type */ 1077 if (dwarf_aggregate_size(type_die, &size) < 0 || 1078 (unsigned)dloc->type_offset >= size) 1079 return PERF_TMR_BAIL_OUT; 1080 1081 return PERF_TMR_OK; 1082 } 1083 1084 if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_CANARY) { 1085 pr_debug_dtp("stack canary"); 1086 1087 /* 1088 * This is a saved value of the stack canary which will be handled 1089 * in the outer logic when it returns failure here. Pretend it's 1090 * from the stack canary directly. 1091 */ 1092 setup_stack_canary(dloc); 1093 1094 return PERF_TMR_BAIL_OUT; 1095 } 1096 1097 check_kernel: 1098 if (dso__kernel(map__dso(dloc->ms->map))) { 1099 u64 addr; 1100 int offset; 1101 1102 /* Direct this-cpu access like "%gs:0x34740" */ 1103 if (dloc->op->segment == INSN_SEG_X86_GS && dloc->op->imm && 1104 arch__is(dloc->arch, "x86")) { 1105 pr_debug_dtp("this-cpu var"); 1106 1107 addr = dloc->op->offset; 1108 1109 if (get_global_var_type(cu_die, dloc, dloc->ip, addr, 1110 &offset, type_die)) { 1111 dloc->type_offset = offset; 1112 return PERF_TMR_OK; 1113 } 1114 return PERF_TMR_BAIL_OUT; 1115 } 1116 1117 /* Access to global variable like "-0x7dcf0500(,%rdx,8)" */ 1118 if (dloc->op->offset < 0 && reg != state->stack_reg) { 1119 addr = (s64) dloc->op->offset; 1120 1121 if (get_global_var_type(cu_die, dloc, dloc->ip, addr, 1122 &offset, type_die)) { 1123 pr_debug_dtp("global var"); 1124 1125 dloc->type_offset = offset; 1126 return PERF_TMR_OK; 1127 } 1128 return PERF_TMR_BAIL_OUT; 1129 } 1130 } 1131 1132 return PERF_TMR_UNKNOWN; 1133 } 1134 1135 /* Iterate instructions in basic blocks and update type table */ 1136 static enum type_match_result find_data_type_insn(struct data_loc_info *dloc, 1137 struct list_head *basic_blocks, 1138 struct die_var_type *var_types, 1139 Dwarf_Die *cu_die, 1140 Dwarf_Die *type_die) 1141 { 1142 struct type_state state; 1143 struct symbol *sym = dloc->ms->sym; 1144 struct annotation *notes = symbol__annotation(sym); 1145 struct annotated_basic_block *bb; 1146 enum type_match_result ret = PERF_TMR_UNKNOWN; 1147 1148 init_type_state(&state, dloc->arch); 1149 1150 list_for_each_entry(bb, basic_blocks, list) { 1151 struct disasm_line *dl = bb->begin; 1152 1153 BUG_ON(bb->begin->al.offset == -1 || bb->end->al.offset == -1); 1154 1155 pr_debug_dtp("bb: [%"PRIx64" - %"PRIx64"]\n", 1156 bb->begin->al.offset, bb->end->al.offset); 1157 1158 list_for_each_entry_from(dl, ¬es->src->source, al.node) { 1159 u64 this_ip = sym->start + dl->al.offset; 1160 u64 addr = map__rip_2objdump(dloc->ms->map, this_ip); 1161 1162 /* Skip comment or debug info lines */ 1163 if (dl->al.offset == -1) 1164 continue; 1165 1166 /* Update variable type at this address */ 1167 update_var_state(&state, dloc, addr, dl->al.offset, var_types); 1168 1169 if (this_ip == dloc->ip) { 1170 ret = check_matching_type(&state, dloc, 1171 cu_die, type_die); 1172 pr_debug_dtp(" : %s\n", match_result_str(ret)); 1173 goto out; 1174 } 1175 1176 /* Update type table after processing the instruction */ 1177 update_insn_state(&state, dloc, cu_die, dl); 1178 if (dl == bb->end) 1179 break; 1180 } 1181 } 1182 1183 out: 1184 exit_type_state(&state); 1185 return ret; 1186 } 1187 1188 static int arch_supports_insn_tracking(struct data_loc_info *dloc) 1189 { 1190 if ((arch__is(dloc->arch, "x86")) || (arch__is(dloc->arch, "powerpc"))) 1191 return 1; 1192 return 0; 1193 } 1194 1195 /* 1196 * Construct a list of basic blocks for each scope with variables and try to find 1197 * the data type by updating a type state table through instructions. 1198 */ 1199 static enum type_match_result find_data_type_block(struct data_loc_info *dloc, 1200 Dwarf_Die *cu_die, 1201 Dwarf_Die *scopes, 1202 int nr_scopes, 1203 Dwarf_Die *type_die) 1204 { 1205 LIST_HEAD(basic_blocks); 1206 struct die_var_type *var_types = NULL; 1207 u64 src_ip, dst_ip, prev_dst_ip; 1208 enum type_match_result ret = PERF_TMR_UNKNOWN; 1209 1210 /* TODO: other architecture support */ 1211 if (!arch_supports_insn_tracking(dloc)) 1212 return PERF_TMR_BAIL_OUT; 1213 1214 prev_dst_ip = dst_ip = dloc->ip; 1215 for (int i = nr_scopes - 1; i >= 0; i--) { 1216 Dwarf_Addr base, start, end; 1217 LIST_HEAD(this_blocks); 1218 1219 if (dwarf_ranges(&scopes[i], 0, &base, &start, &end) < 0) 1220 break; 1221 1222 pr_debug_dtp("scope: [%d/%d] (die:%lx)\n", 1223 i + 1, nr_scopes, (long)dwarf_dieoffset(&scopes[i])); 1224 src_ip = map__objdump_2rip(dloc->ms->map, start); 1225 1226 again: 1227 /* Get basic blocks for this scope */ 1228 if (annotate_get_basic_blocks(dloc->ms->sym, src_ip, dst_ip, 1229 &this_blocks) < 0) { 1230 /* Try previous block if they are not connected */ 1231 if (prev_dst_ip != dst_ip) { 1232 dst_ip = prev_dst_ip; 1233 goto again; 1234 } 1235 1236 pr_debug_dtp("cannot find a basic block from %"PRIx64" to %"PRIx64"\n", 1237 src_ip - dloc->ms->sym->start, 1238 dst_ip - dloc->ms->sym->start); 1239 continue; 1240 } 1241 prepend_basic_blocks(&this_blocks, &basic_blocks); 1242 1243 /* Get variable info for this scope and add to var_types list */ 1244 die_collect_vars(&scopes[i], &var_types); 1245 fixup_var_address(var_types, start); 1246 1247 /* Find from start of this scope to the target instruction */ 1248 ret = find_data_type_insn(dloc, &basic_blocks, var_types, 1249 cu_die, type_die); 1250 if (ret == PERF_TMR_OK) { 1251 char buf[64]; 1252 1253 if (dloc->op->multi_regs) 1254 snprintf(buf, sizeof(buf), "reg%d, reg%d", 1255 dloc->op->reg1, dloc->op->reg2); 1256 else 1257 snprintf(buf, sizeof(buf), "reg%d", dloc->op->reg1); 1258 1259 pr_debug_dtp("found by insn track: %#x(%s) type-offset=%#x\n", 1260 dloc->op->offset, buf, dloc->type_offset); 1261 break; 1262 } 1263 1264 if (ret == PERF_TMR_BAIL_OUT) 1265 break; 1266 1267 /* Go up to the next scope and find blocks to the start */ 1268 prev_dst_ip = dst_ip; 1269 dst_ip = src_ip; 1270 } 1271 1272 delete_basic_blocks(&basic_blocks); 1273 delete_var_types(var_types); 1274 return ret; 1275 } 1276 1277 /* The result will be saved in @type_die */ 1278 static int find_data_type_die(struct data_loc_info *dloc, Dwarf_Die *type_die) 1279 { 1280 struct annotated_op_loc *loc = dloc->op; 1281 Dwarf_Die cu_die, var_die; 1282 Dwarf_Die *scopes = NULL; 1283 int reg, offset; 1284 int ret = -1; 1285 int i, nr_scopes; 1286 int fbreg = -1; 1287 int fb_offset = 0; 1288 bool is_fbreg = false; 1289 bool found = false; 1290 u64 pc; 1291 char buf[64]; 1292 enum type_match_result result = PERF_TMR_UNKNOWN; 1293 1294 if (dloc->op->multi_regs) 1295 snprintf(buf, sizeof(buf), "reg%d, reg%d", dloc->op->reg1, dloc->op->reg2); 1296 else if (dloc->op->reg1 == DWARF_REG_PC) 1297 snprintf(buf, sizeof(buf), "PC"); 1298 else 1299 snprintf(buf, sizeof(buf), "reg%d", dloc->op->reg1); 1300 1301 pr_debug_dtp("-----------------------------------------------------------\n"); 1302 pr_debug_dtp("find data type for %#x(%s) at %s+%#"PRIx64"\n", 1303 dloc->op->offset, buf, dloc->ms->sym->name, 1304 dloc->ip - dloc->ms->sym->start); 1305 1306 /* 1307 * IP is a relative instruction address from the start of the map, as 1308 * it can be randomized/relocated, it needs to translate to PC which is 1309 * a file address for DWARF processing. 1310 */ 1311 pc = map__rip_2objdump(dloc->ms->map, dloc->ip); 1312 1313 /* Get a compile_unit for this address */ 1314 if (!find_cu_die(dloc->di, pc, &cu_die)) { 1315 pr_debug_dtp("cannot find CU for address %"PRIx64"\n", pc); 1316 ann_data_stat.no_cuinfo++; 1317 return -1; 1318 } 1319 1320 reg = loc->reg1; 1321 offset = loc->offset; 1322 1323 pr_debug_dtp("CU for %s (die:%#lx)\n", 1324 dwarf_diename(&cu_die), (long)dwarf_dieoffset(&cu_die)); 1325 1326 if (reg == DWARF_REG_PC) { 1327 if (get_global_var_type(&cu_die, dloc, dloc->ip, dloc->var_addr, 1328 &offset, type_die)) { 1329 dloc->type_offset = offset; 1330 1331 pr_debug_dtp("found by addr=%#"PRIx64" type_offset=%#x\n", 1332 dloc->var_addr, offset); 1333 pr_debug_type_name(type_die, TSR_KIND_TYPE); 1334 found = true; 1335 goto out; 1336 } 1337 } 1338 1339 /* Get a list of nested scopes - i.e. (inlined) functions and blocks. */ 1340 nr_scopes = die_get_scopes(&cu_die, pc, &scopes); 1341 1342 if (reg != DWARF_REG_PC && dwarf_hasattr(&scopes[0], DW_AT_frame_base)) { 1343 Dwarf_Attribute attr; 1344 Dwarf_Block block; 1345 1346 /* Check if the 'reg' is assigned as frame base register */ 1347 if (dwarf_attr(&scopes[0], DW_AT_frame_base, &attr) != NULL && 1348 dwarf_formblock(&attr, &block) == 0 && block.length == 1) { 1349 switch (*block.data) { 1350 case DW_OP_reg0 ... DW_OP_reg31: 1351 fbreg = dloc->fbreg = *block.data - DW_OP_reg0; 1352 break; 1353 case DW_OP_call_frame_cfa: 1354 dloc->fb_cfa = true; 1355 if (die_get_cfa(dloc->di->dbg, pc, &fbreg, 1356 &fb_offset) < 0) 1357 fbreg = -1; 1358 break; 1359 default: 1360 break; 1361 } 1362 1363 pr_debug_dtp("frame base: cfa=%d fbreg=%d\n", 1364 dloc->fb_cfa, fbreg); 1365 } 1366 } 1367 1368 retry: 1369 is_fbreg = (reg == fbreg); 1370 if (is_fbreg) 1371 offset = loc->offset - fb_offset; 1372 1373 /* Search from the inner-most scope to the outer */ 1374 for (i = nr_scopes - 1; i >= 0; i--) { 1375 Dwarf_Die mem_die; 1376 int type_offset = offset; 1377 1378 if (reg == DWARF_REG_PC) { 1379 if (!die_find_variable_by_addr(&scopes[i], dloc->var_addr, 1380 &var_die, &type_offset)) 1381 continue; 1382 } else { 1383 /* Look up variables/parameters in this scope */ 1384 if (!die_find_variable_by_reg(&scopes[i], pc, reg, 1385 &type_offset, is_fbreg, &var_die)) 1386 continue; 1387 } 1388 1389 pr_debug_dtp("found \"%s\" (die: %#lx) in scope=%d/%d (die: %#lx) ", 1390 dwarf_diename(&var_die), (long)dwarf_dieoffset(&var_die), 1391 i+1, nr_scopes, (long)dwarf_dieoffset(&scopes[i])); 1392 1393 /* Found a variable, see if it's correct */ 1394 result = check_variable(dloc, &var_die, &mem_die, reg, type_offset, is_fbreg); 1395 if (result == PERF_TMR_OK) { 1396 if (reg == DWARF_REG_PC) { 1397 pr_debug_dtp("addr=%#"PRIx64" type_offset=%#x\n", 1398 dloc->var_addr, type_offset); 1399 } else if (reg == DWARF_REG_FB || is_fbreg) { 1400 pr_debug_dtp("stack_offset=%#x type_offset=%#x\n", 1401 fb_offset, type_offset); 1402 } else { 1403 pr_debug_dtp("type_offset=%#x\n", type_offset); 1404 } 1405 1406 if (!found || is_better_type(type_die, &mem_die)) { 1407 *type_die = mem_die; 1408 dloc->type_offset = type_offset; 1409 found = true; 1410 } 1411 } else { 1412 pr_debug_dtp("failed: %s\n", match_result_str(result)); 1413 } 1414 1415 pr_debug_location(&var_die, pc, reg); 1416 pr_debug_type_name(&mem_die, TSR_KIND_TYPE); 1417 } 1418 1419 if (!found && loc->multi_regs && reg == loc->reg1 && loc->reg1 != loc->reg2) { 1420 reg = loc->reg2; 1421 goto retry; 1422 } 1423 1424 if (!found && reg != DWARF_REG_PC) { 1425 result = find_data_type_block(dloc, &cu_die, scopes, 1426 nr_scopes, type_die); 1427 if (result == PERF_TMR_OK) { 1428 ann_data_stat.insn_track++; 1429 found = true; 1430 } 1431 } 1432 1433 out: 1434 if (found) { 1435 pr_debug_dtp("final type:"); 1436 pr_debug_type_name(type_die, TSR_KIND_TYPE); 1437 ret = 0; 1438 } else { 1439 switch (result) { 1440 case PERF_TMR_NO_TYPE: 1441 case PERF_TMR_NO_POINTER: 1442 pr_debug_dtp("%s\n", match_result_str(result)); 1443 ann_data_stat.no_typeinfo++; 1444 break; 1445 case PERF_TMR_NO_SIZE: 1446 pr_debug_dtp("%s\n", match_result_str(result)); 1447 ann_data_stat.invalid_size++; 1448 break; 1449 case PERF_TMR_BAD_OFFSET: 1450 pr_debug_dtp("%s\n", match_result_str(result)); 1451 ann_data_stat.bad_offset++; 1452 break; 1453 case PERF_TMR_UNKNOWN: 1454 case PERF_TMR_BAIL_OUT: 1455 case PERF_TMR_OK: /* should not reach here */ 1456 default: 1457 pr_debug_dtp("no variable found\n"); 1458 ann_data_stat.no_var++; 1459 break; 1460 } 1461 ret = -1; 1462 } 1463 1464 free(scopes); 1465 return ret; 1466 } 1467 1468 /** 1469 * find_data_type - Return a data type at the location 1470 * @dloc: data location 1471 * 1472 * This functions searches the debug information of the binary to get the data 1473 * type it accesses. The exact location is expressed by (ip, reg, offset) 1474 * for pointer variables or (ip, addr) for global variables. Note that global 1475 * variables might update the @dloc->type_offset after finding the start of the 1476 * variable. If it cannot find a global variable by address, it tried to find 1477 * a declaration of the variable using var_name. In that case, @dloc->offset 1478 * won't be updated. 1479 * 1480 * It return %NULL if not found. 1481 */ 1482 struct annotated_data_type *find_data_type(struct data_loc_info *dloc) 1483 { 1484 struct dso *dso = map__dso(dloc->ms->map); 1485 Dwarf_Die type_die; 1486 1487 /* 1488 * The type offset is the same as instruction offset by default. 1489 * But when finding a global variable, the offset won't be valid. 1490 */ 1491 dloc->type_offset = dloc->op->offset; 1492 1493 dloc->fbreg = -1; 1494 1495 if (find_data_type_die(dloc, &type_die) < 0) 1496 return NULL; 1497 1498 return dso__findnew_data_type(dso, &type_die); 1499 } 1500 1501 static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_entries) 1502 { 1503 int i; 1504 size_t sz = sizeof(struct type_hist); 1505 1506 sz += sizeof(struct type_hist_entry) * adt->self.size; 1507 1508 /* Allocate a table of pointers for each event */ 1509 adt->histograms = calloc(nr_entries, sizeof(*adt->histograms)); 1510 if (adt->histograms == NULL) 1511 return -ENOMEM; 1512 1513 /* 1514 * Each histogram is allocated for the whole size of the type. 1515 * TODO: Probably we can move the histogram to members. 1516 */ 1517 for (i = 0; i < nr_entries; i++) { 1518 adt->histograms[i] = zalloc(sz); 1519 if (adt->histograms[i] == NULL) 1520 goto err; 1521 } 1522 1523 adt->nr_histograms = nr_entries; 1524 return 0; 1525 1526 err: 1527 while (--i >= 0) 1528 zfree(&(adt->histograms[i])); 1529 zfree(&adt->histograms); 1530 return -ENOMEM; 1531 } 1532 1533 static void delete_data_type_histograms(struct annotated_data_type *adt) 1534 { 1535 for (int i = 0; i < adt->nr_histograms; i++) 1536 zfree(&(adt->histograms[i])); 1537 1538 zfree(&adt->histograms); 1539 adt->nr_histograms = 0; 1540 } 1541 1542 void annotated_data_type__tree_delete(struct rb_root *root) 1543 { 1544 struct annotated_data_type *pos; 1545 1546 while (!RB_EMPTY_ROOT(root)) { 1547 struct rb_node *node = rb_first(root); 1548 1549 rb_erase(node, root); 1550 pos = rb_entry(node, struct annotated_data_type, node); 1551 delete_members(&pos->self); 1552 delete_data_type_histograms(pos); 1553 zfree(&pos->self.type_name); 1554 free(pos); 1555 } 1556 } 1557 1558 /** 1559 * annotated_data_type__update_samples - Update histogram 1560 * @adt: Data type to update 1561 * @evsel: Event to update 1562 * @offset: Offset in the type 1563 * @nr_samples: Number of samples at this offset 1564 * @period: Event count at this offset 1565 * 1566 * This function updates type histogram at @ofs for @evsel. Samples are 1567 * aggregated before calling this function so it can be called with more 1568 * than one samples at a certain offset. 1569 */ 1570 int annotated_data_type__update_samples(struct annotated_data_type *adt, 1571 struct evsel *evsel, int offset, 1572 int nr_samples, u64 period) 1573 { 1574 struct type_hist *h; 1575 1576 if (adt == NULL) 1577 return 0; 1578 1579 if (adt->histograms == NULL) { 1580 int nr = evsel->evlist->core.nr_entries; 1581 1582 if (alloc_data_type_histograms(adt, nr) < 0) 1583 return -1; 1584 } 1585 1586 if (offset < 0 || offset >= adt->self.size) 1587 return -1; 1588 1589 h = adt->histograms[evsel->core.idx]; 1590 1591 h->nr_samples += nr_samples; 1592 h->addr[offset].nr_samples += nr_samples; 1593 h->period += period; 1594 h->addr[offset].period += period; 1595 return 0; 1596 } 1597 1598 static void print_annotated_data_header(struct hist_entry *he, struct evsel *evsel) 1599 { 1600 struct dso *dso = map__dso(he->ms.map); 1601 int nr_members = 1; 1602 int nr_samples = he->stat.nr_events; 1603 int width = 7; 1604 const char *val_hdr = "Percent"; 1605 1606 if (evsel__is_group_event(evsel)) { 1607 struct hist_entry *pair; 1608 1609 list_for_each_entry(pair, &he->pairs.head, pairs.node) 1610 nr_samples += pair->stat.nr_events; 1611 } 1612 1613 printf("Annotate type: '%s' in %s (%d samples):\n", 1614 he->mem_type->self.type_name, dso__name(dso), nr_samples); 1615 1616 if (evsel__is_group_event(evsel)) { 1617 struct evsel *pos; 1618 int i = 0; 1619 1620 nr_members = 0; 1621 for_each_group_evsel(pos, evsel) { 1622 if (symbol_conf.skip_empty && 1623 evsel__hists(pos)->stats.nr_samples == 0) 1624 continue; 1625 1626 printf(" event[%d] = %s\n", i++, pos->name); 1627 nr_members++; 1628 } 1629 } 1630 1631 if (symbol_conf.show_total_period) { 1632 width = 11; 1633 val_hdr = "Period"; 1634 } else if (symbol_conf.show_nr_samples) { 1635 width = 7; 1636 val_hdr = "Samples"; 1637 } 1638 1639 printf("============================================================================\n"); 1640 printf("%*s %10s %10s %s\n", (width + 1) * nr_members, val_hdr, 1641 "offset", "size", "field"); 1642 } 1643 1644 static void print_annotated_data_value(struct type_hist *h, u64 period, int nr_samples) 1645 { 1646 double percent = h->period ? (100.0 * period / h->period) : 0; 1647 const char *color = get_percent_color(percent); 1648 1649 if (symbol_conf.show_total_period) 1650 color_fprintf(stdout, color, " %11" PRIu64, period); 1651 else if (symbol_conf.show_nr_samples) 1652 color_fprintf(stdout, color, " %7d", nr_samples); 1653 else 1654 color_fprintf(stdout, color, " %7.2f", percent); 1655 } 1656 1657 static void print_annotated_data_type(struct annotated_data_type *mem_type, 1658 struct annotated_member *member, 1659 struct evsel *evsel, int indent) 1660 { 1661 struct annotated_member *child; 1662 struct type_hist *h = mem_type->histograms[evsel->core.idx]; 1663 int i, nr_events = 0, samples = 0; 1664 u64 period = 0; 1665 int width = symbol_conf.show_total_period ? 11 : 7; 1666 struct evsel *pos; 1667 1668 for_each_group_evsel(pos, evsel) { 1669 h = mem_type->histograms[pos->core.idx]; 1670 1671 if (symbol_conf.skip_empty && 1672 evsel__hists(pos)->stats.nr_samples == 0) 1673 continue; 1674 1675 samples = 0; 1676 period = 0; 1677 for (i = 0; i < member->size; i++) { 1678 samples += h->addr[member->offset + i].nr_samples; 1679 period += h->addr[member->offset + i].period; 1680 } 1681 print_annotated_data_value(h, period, samples); 1682 nr_events++; 1683 } 1684 1685 printf(" %10d %10d %*s%s\t%s", 1686 member->offset, member->size, indent, "", member->type_name, 1687 member->var_name ?: ""); 1688 1689 if (!list_empty(&member->children)) 1690 printf(" {\n"); 1691 1692 list_for_each_entry(child, &member->children, node) 1693 print_annotated_data_type(mem_type, child, evsel, indent + 4); 1694 1695 if (!list_empty(&member->children)) 1696 printf("%*s}", (width + 1) * nr_events + 24 + indent, ""); 1697 printf(";\n"); 1698 } 1699 1700 int hist_entry__annotate_data_tty(struct hist_entry *he, struct evsel *evsel) 1701 { 1702 print_annotated_data_header(he, evsel); 1703 print_annotated_data_type(he->mem_type, &he->mem_type->self, evsel, 0); 1704 printf("\n"); 1705 1706 /* move to the next entry */ 1707 return '>'; 1708 } 1709