1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Convert sample address to data type using DWARF debug info. 4 * 5 * Written by Namhyung Kim <namhyung@kernel.org> 6 */ 7 8 #include <stdio.h> 9 #include <stdlib.h> 10 #include <inttypes.h> 11 12 #include "annotate.h" 13 #include "annotate-data.h" 14 #include "debuginfo.h" 15 #include "debug.h" 16 #include "dso.h" 17 #include "dwarf-regs.h" 18 #include "evsel.h" 19 #include "evlist.h" 20 #include "map.h" 21 #include "map_symbol.h" 22 #include "sort.h" 23 #include "strbuf.h" 24 #include "symbol.h" 25 #include "symbol_conf.h" 26 #include "thread.h" 27 28 /* register number of the stack pointer */ 29 #define X86_REG_SP 7 30 31 enum type_state_kind { 32 TSR_KIND_INVALID = 0, 33 TSR_KIND_TYPE, 34 TSR_KIND_PERCPU_BASE, 35 TSR_KIND_CONST, 36 TSR_KIND_POINTER, 37 TSR_KIND_CANARY, 38 }; 39 40 #define pr_debug_dtp(fmt, ...) \ 41 do { \ 42 if (debug_type_profile) \ 43 pr_info(fmt, ##__VA_ARGS__); \ 44 else \ 45 pr_debug3(fmt, ##__VA_ARGS__); \ 46 } while (0) 47 48 static void pr_debug_type_name(Dwarf_Die *die, enum type_state_kind kind) 49 { 50 struct strbuf sb; 51 char *str; 52 Dwarf_Word size = 0; 53 54 if (!debug_type_profile && verbose < 3) 55 return; 56 57 switch (kind) { 58 case TSR_KIND_INVALID: 59 pr_info("\n"); 60 return; 61 case TSR_KIND_PERCPU_BASE: 62 pr_info(" percpu base\n"); 63 return; 64 case TSR_KIND_CONST: 65 pr_info(" constant\n"); 66 return; 67 case TSR_KIND_POINTER: 68 pr_info(" pointer"); 69 /* it also prints the type info */ 70 break; 71 case TSR_KIND_CANARY: 72 pr_info(" stack canary\n"); 73 return; 74 case TSR_KIND_TYPE: 75 default: 76 break; 77 } 78 79 dwarf_aggregate_size(die, &size); 80 81 strbuf_init(&sb, 32); 82 die_get_typename_from_type(die, &sb); 83 str = strbuf_detach(&sb, NULL); 84 pr_info(" type='%s' size=%#lx (die:%#lx)\n", 85 str, (long)size, (long)dwarf_dieoffset(die)); 86 free(str); 87 } 88 89 static void pr_debug_location(Dwarf_Die *die, u64 pc, int reg) 90 { 91 ptrdiff_t off = 0; 92 Dwarf_Attribute attr; 93 Dwarf_Addr base, start, end; 94 Dwarf_Op *ops; 95 size_t nops; 96 97 if (!debug_type_profile && verbose < 3) 98 return; 99 100 if (dwarf_attr(die, DW_AT_location, &attr) == NULL) 101 return; 102 103 while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) { 104 if (reg != DWARF_REG_PC && end < pc) 105 continue; 106 if (reg != DWARF_REG_PC && start > pc) 107 break; 108 109 pr_info(" variable location: "); 110 switch (ops->atom) { 111 case DW_OP_reg0 ...DW_OP_reg31: 112 pr_info("reg%d\n", ops->atom - DW_OP_reg0); 113 break; 114 case DW_OP_breg0 ...DW_OP_breg31: 115 pr_info("base=reg%d, offset=%#lx\n", 116 ops->atom - DW_OP_breg0, (long)ops->number); 117 break; 118 case DW_OP_regx: 119 pr_info("reg%ld\n", (long)ops->number); 120 break; 121 case DW_OP_bregx: 122 pr_info("base=reg%ld, offset=%#lx\n", 123 (long)ops->number, (long)ops->number2); 124 break; 125 case DW_OP_fbreg: 126 pr_info("use frame base, offset=%#lx\n", (long)ops->number); 127 break; 128 case DW_OP_addr: 129 pr_info("address=%#lx\n", (long)ops->number); 130 break; 131 default: 132 pr_info("unknown: code=%#x, number=%#lx\n", 133 ops->atom, (long)ops->number); 134 break; 135 } 136 break; 137 } 138 } 139 140 /* 141 * Type information in a register, valid when @ok is true. 142 * The @caller_saved registers are invalidated after a function call. 143 */ 144 struct type_state_reg { 145 Dwarf_Die type; 146 u32 imm_value; 147 bool ok; 148 bool caller_saved; 149 u8 kind; 150 }; 151 152 /* Type information in a stack location, dynamically allocated */ 153 struct type_state_stack { 154 struct list_head list; 155 Dwarf_Die type; 156 int offset; 157 int size; 158 bool compound; 159 u8 kind; 160 }; 161 162 /* FIXME: This should be arch-dependent */ 163 #define TYPE_STATE_MAX_REGS 16 164 165 /* 166 * State table to maintain type info in each register and stack location. 167 * It'll be updated when new variable is allocated or type info is moved 168 * to a new location (register or stack). As it'd be used with the 169 * shortest path of basic blocks, it only maintains a single table. 170 */ 171 struct type_state { 172 /* state of general purpose registers */ 173 struct type_state_reg regs[TYPE_STATE_MAX_REGS]; 174 /* state of stack location */ 175 struct list_head stack_vars; 176 /* return value register */ 177 int ret_reg; 178 /* stack pointer register */ 179 int stack_reg; 180 }; 181 182 static bool has_reg_type(struct type_state *state, int reg) 183 { 184 return (unsigned)reg < ARRAY_SIZE(state->regs); 185 } 186 187 static void init_type_state(struct type_state *state, struct arch *arch) 188 { 189 memset(state, 0, sizeof(*state)); 190 INIT_LIST_HEAD(&state->stack_vars); 191 192 if (arch__is(arch, "x86")) { 193 state->regs[0].caller_saved = true; 194 state->regs[1].caller_saved = true; 195 state->regs[2].caller_saved = true; 196 state->regs[4].caller_saved = true; 197 state->regs[5].caller_saved = true; 198 state->regs[8].caller_saved = true; 199 state->regs[9].caller_saved = true; 200 state->regs[10].caller_saved = true; 201 state->regs[11].caller_saved = true; 202 state->ret_reg = 0; 203 state->stack_reg = X86_REG_SP; 204 } 205 } 206 207 static void exit_type_state(struct type_state *state) 208 { 209 struct type_state_stack *stack, *tmp; 210 211 list_for_each_entry_safe(stack, tmp, &state->stack_vars, list) { 212 list_del(&stack->list); 213 free(stack); 214 } 215 } 216 217 /* 218 * Compare type name and size to maintain them in a tree. 219 * I'm not sure if DWARF would have information of a single type in many 220 * different places (compilation units). If not, it could compare the 221 * offset of the type entry in the .debug_info section. 222 */ 223 static int data_type_cmp(const void *_key, const struct rb_node *node) 224 { 225 const struct annotated_data_type *key = _key; 226 struct annotated_data_type *type; 227 228 type = rb_entry(node, struct annotated_data_type, node); 229 230 if (key->self.size != type->self.size) 231 return key->self.size - type->self.size; 232 return strcmp(key->self.type_name, type->self.type_name); 233 } 234 235 static bool data_type_less(struct rb_node *node_a, const struct rb_node *node_b) 236 { 237 struct annotated_data_type *a, *b; 238 239 a = rb_entry(node_a, struct annotated_data_type, node); 240 b = rb_entry(node_b, struct annotated_data_type, node); 241 242 if (a->self.size != b->self.size) 243 return a->self.size < b->self.size; 244 return strcmp(a->self.type_name, b->self.type_name) < 0; 245 } 246 247 /* Recursively add new members for struct/union */ 248 static int __add_member_cb(Dwarf_Die *die, void *arg) 249 { 250 struct annotated_member *parent = arg; 251 struct annotated_member *member; 252 Dwarf_Die member_type, die_mem; 253 Dwarf_Word size, loc; 254 Dwarf_Attribute attr; 255 struct strbuf sb; 256 int tag; 257 258 if (dwarf_tag(die) != DW_TAG_member) 259 return DIE_FIND_CB_SIBLING; 260 261 member = zalloc(sizeof(*member)); 262 if (member == NULL) 263 return DIE_FIND_CB_END; 264 265 strbuf_init(&sb, 32); 266 die_get_typename(die, &sb); 267 268 die_get_real_type(die, &member_type); 269 if (dwarf_aggregate_size(&member_type, &size) < 0) 270 size = 0; 271 272 if (!dwarf_attr_integrate(die, DW_AT_data_member_location, &attr)) 273 loc = 0; 274 else 275 dwarf_formudata(&attr, &loc); 276 277 member->type_name = strbuf_detach(&sb, NULL); 278 /* member->var_name can be NULL */ 279 if (dwarf_diename(die)) 280 member->var_name = strdup(dwarf_diename(die)); 281 member->size = size; 282 member->offset = loc + parent->offset; 283 INIT_LIST_HEAD(&member->children); 284 list_add_tail(&member->node, &parent->children); 285 286 tag = dwarf_tag(&member_type); 287 switch (tag) { 288 case DW_TAG_structure_type: 289 case DW_TAG_union_type: 290 die_find_child(&member_type, __add_member_cb, member, &die_mem); 291 break; 292 default: 293 break; 294 } 295 return DIE_FIND_CB_SIBLING; 296 } 297 298 static void add_member_types(struct annotated_data_type *parent, Dwarf_Die *type) 299 { 300 Dwarf_Die die_mem; 301 302 die_find_child(type, __add_member_cb, &parent->self, &die_mem); 303 } 304 305 static void delete_members(struct annotated_member *member) 306 { 307 struct annotated_member *child, *tmp; 308 309 list_for_each_entry_safe(child, tmp, &member->children, node) { 310 list_del(&child->node); 311 delete_members(child); 312 free(child->type_name); 313 free(child->var_name); 314 free(child); 315 } 316 } 317 318 static struct annotated_data_type *dso__findnew_data_type(struct dso *dso, 319 Dwarf_Die *type_die) 320 { 321 struct annotated_data_type *result = NULL; 322 struct annotated_data_type key; 323 struct rb_node *node; 324 struct strbuf sb; 325 char *type_name; 326 Dwarf_Word size; 327 328 strbuf_init(&sb, 32); 329 if (die_get_typename_from_type(type_die, &sb) < 0) 330 strbuf_add(&sb, "(unknown type)", 14); 331 type_name = strbuf_detach(&sb, NULL); 332 dwarf_aggregate_size(type_die, &size); 333 334 /* Check existing nodes in dso->data_types tree */ 335 key.self.type_name = type_name; 336 key.self.size = size; 337 node = rb_find(&key, &dso->data_types, data_type_cmp); 338 if (node) { 339 result = rb_entry(node, struct annotated_data_type, node); 340 free(type_name); 341 return result; 342 } 343 344 /* If not, add a new one */ 345 result = zalloc(sizeof(*result)); 346 if (result == NULL) { 347 free(type_name); 348 return NULL; 349 } 350 351 result->self.type_name = type_name; 352 result->self.size = size; 353 INIT_LIST_HEAD(&result->self.children); 354 355 if (symbol_conf.annotate_data_member) 356 add_member_types(result, type_die); 357 358 rb_add(&result->node, &dso->data_types, data_type_less); 359 return result; 360 } 361 362 static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die) 363 { 364 Dwarf_Off off, next_off; 365 size_t header_size; 366 367 if (dwarf_addrdie(di->dbg, pc, cu_die) != NULL) 368 return cu_die; 369 370 /* 371 * There are some kernels don't have full aranges and contain only a few 372 * aranges entries. Fallback to iterate all CU entries in .debug_info 373 * in case it's missing. 374 */ 375 off = 0; 376 while (dwarf_nextcu(di->dbg, off, &next_off, &header_size, 377 NULL, NULL, NULL) == 0) { 378 if (dwarf_offdie(di->dbg, off + header_size, cu_die) && 379 dwarf_haspc(cu_die, pc)) 380 return true; 381 382 off = next_off; 383 } 384 return false; 385 } 386 387 /* The type info will be saved in @type_die */ 388 static int check_variable(struct data_loc_info *dloc, Dwarf_Die *var_die, 389 Dwarf_Die *type_die, int reg, int offset, bool is_fbreg) 390 { 391 Dwarf_Word size; 392 bool is_pointer = true; 393 394 if (reg == DWARF_REG_PC) 395 is_pointer = false; 396 else if (reg == dloc->fbreg || is_fbreg) 397 is_pointer = false; 398 else if (arch__is(dloc->arch, "x86") && reg == X86_REG_SP) 399 is_pointer = false; 400 401 /* Get the type of the variable */ 402 if (die_get_real_type(var_die, type_die) == NULL) { 403 pr_debug_dtp("variable has no type\n"); 404 ann_data_stat.no_typeinfo++; 405 return -1; 406 } 407 408 /* 409 * Usually it expects a pointer type for a memory access. 410 * Convert to a real type it points to. But global variables 411 * and local variables are accessed directly without a pointer. 412 */ 413 if (is_pointer) { 414 if ((dwarf_tag(type_die) != DW_TAG_pointer_type && 415 dwarf_tag(type_die) != DW_TAG_array_type) || 416 die_get_real_type(type_die, type_die) == NULL) { 417 pr_debug_dtp("no pointer or no type\n"); 418 ann_data_stat.no_typeinfo++; 419 return -1; 420 } 421 } 422 423 /* Get the size of the actual type */ 424 if (dwarf_aggregate_size(type_die, &size) < 0) { 425 pr_debug_dtp("type size is unknown\n"); 426 ann_data_stat.invalid_size++; 427 return -1; 428 } 429 430 /* Minimal sanity check */ 431 if ((unsigned)offset >= size) { 432 pr_debug_dtp("offset: %d is bigger than size: %"PRIu64"\n", 433 offset, size); 434 ann_data_stat.bad_offset++; 435 return -1; 436 } 437 438 return 0; 439 } 440 441 static struct type_state_stack *find_stack_state(struct type_state *state, 442 int offset) 443 { 444 struct type_state_stack *stack; 445 446 list_for_each_entry(stack, &state->stack_vars, list) { 447 if (offset == stack->offset) 448 return stack; 449 450 if (stack->compound && stack->offset < offset && 451 offset < stack->offset + stack->size) 452 return stack; 453 } 454 return NULL; 455 } 456 457 static void set_stack_state(struct type_state_stack *stack, int offset, u8 kind, 458 Dwarf_Die *type_die) 459 { 460 int tag; 461 Dwarf_Word size; 462 463 if (dwarf_aggregate_size(type_die, &size) < 0) 464 size = 0; 465 466 tag = dwarf_tag(type_die); 467 468 stack->type = *type_die; 469 stack->size = size; 470 stack->offset = offset; 471 stack->kind = kind; 472 473 switch (tag) { 474 case DW_TAG_structure_type: 475 case DW_TAG_union_type: 476 stack->compound = (kind != TSR_KIND_POINTER); 477 break; 478 default: 479 stack->compound = false; 480 break; 481 } 482 } 483 484 static struct type_state_stack *findnew_stack_state(struct type_state *state, 485 int offset, u8 kind, 486 Dwarf_Die *type_die) 487 { 488 struct type_state_stack *stack = find_stack_state(state, offset); 489 490 if (stack) { 491 set_stack_state(stack, offset, kind, type_die); 492 return stack; 493 } 494 495 stack = malloc(sizeof(*stack)); 496 if (stack) { 497 set_stack_state(stack, offset, kind, type_die); 498 list_add(&stack->list, &state->stack_vars); 499 } 500 return stack; 501 } 502 503 /* Maintain a cache for quick global variable lookup */ 504 struct global_var_entry { 505 struct rb_node node; 506 char *name; 507 u64 start; 508 u64 end; 509 u64 die_offset; 510 }; 511 512 static int global_var_cmp(const void *_key, const struct rb_node *node) 513 { 514 const u64 addr = (uintptr_t)_key; 515 struct global_var_entry *gvar; 516 517 gvar = rb_entry(node, struct global_var_entry, node); 518 519 if (gvar->start <= addr && addr < gvar->end) 520 return 0; 521 return gvar->start > addr ? -1 : 1; 522 } 523 524 static bool global_var_less(struct rb_node *node_a, const struct rb_node *node_b) 525 { 526 struct global_var_entry *gvar_a, *gvar_b; 527 528 gvar_a = rb_entry(node_a, struct global_var_entry, node); 529 gvar_b = rb_entry(node_b, struct global_var_entry, node); 530 531 return gvar_a->start < gvar_b->start; 532 } 533 534 static struct global_var_entry *global_var__find(struct data_loc_info *dloc, u64 addr) 535 { 536 struct dso *dso = map__dso(dloc->ms->map); 537 struct rb_node *node; 538 539 node = rb_find((void *)(uintptr_t)addr, &dso->global_vars, global_var_cmp); 540 if (node == NULL) 541 return NULL; 542 543 return rb_entry(node, struct global_var_entry, node); 544 } 545 546 static bool global_var__add(struct data_loc_info *dloc, u64 addr, 547 const char *name, Dwarf_Die *type_die) 548 { 549 struct dso *dso = map__dso(dloc->ms->map); 550 struct global_var_entry *gvar; 551 Dwarf_Word size; 552 553 if (dwarf_aggregate_size(type_die, &size) < 0) 554 return false; 555 556 gvar = malloc(sizeof(*gvar)); 557 if (gvar == NULL) 558 return false; 559 560 gvar->name = strdup(name); 561 if (gvar->name == NULL) { 562 free(gvar); 563 return false; 564 } 565 566 gvar->start = addr; 567 gvar->end = addr + size; 568 gvar->die_offset = dwarf_dieoffset(type_die); 569 570 rb_add(&gvar->node, &dso->global_vars, global_var_less); 571 return true; 572 } 573 574 void global_var_type__tree_delete(struct rb_root *root) 575 { 576 struct global_var_entry *gvar; 577 578 while (!RB_EMPTY_ROOT(root)) { 579 struct rb_node *node = rb_first(root); 580 581 rb_erase(node, root); 582 gvar = rb_entry(node, struct global_var_entry, node); 583 free(gvar->name); 584 free(gvar); 585 } 586 } 587 588 static bool get_global_var_info(struct data_loc_info *dloc, u64 addr, 589 const char **var_name, int *var_offset) 590 { 591 struct addr_location al; 592 struct symbol *sym; 593 u64 mem_addr; 594 595 /* Kernel symbols might be relocated */ 596 mem_addr = addr + map__reloc(dloc->ms->map); 597 598 addr_location__init(&al); 599 sym = thread__find_symbol_fb(dloc->thread, dloc->cpumode, 600 mem_addr, &al); 601 if (sym) { 602 *var_name = sym->name; 603 /* Calculate type offset from the start of variable */ 604 *var_offset = mem_addr - map__unmap_ip(al.map, sym->start); 605 } else { 606 *var_name = NULL; 607 } 608 addr_location__exit(&al); 609 if (*var_name == NULL) 610 return false; 611 612 return true; 613 } 614 615 static bool get_global_var_type(Dwarf_Die *cu_die, struct data_loc_info *dloc, 616 u64 ip, u64 var_addr, int *var_offset, 617 Dwarf_Die *type_die) 618 { 619 u64 pc; 620 int offset; 621 const char *var_name = NULL; 622 struct global_var_entry *gvar; 623 Dwarf_Die var_die; 624 625 gvar = global_var__find(dloc, var_addr); 626 if (gvar) { 627 if (!dwarf_offdie(dloc->di->dbg, gvar->die_offset, type_die)) 628 return false; 629 630 *var_offset = var_addr - gvar->start; 631 return true; 632 } 633 634 /* Try to get the variable by address first */ 635 if (die_find_variable_by_addr(cu_die, var_addr, &var_die, &offset) && 636 check_variable(dloc, &var_die, type_die, DWARF_REG_PC, offset, 637 /*is_fbreg=*/false) == 0) { 638 var_name = dwarf_diename(&var_die); 639 *var_offset = offset; 640 goto ok; 641 } 642 643 if (!get_global_var_info(dloc, var_addr, &var_name, var_offset)) 644 return false; 645 646 pc = map__rip_2objdump(dloc->ms->map, ip); 647 648 /* Try to get the name of global variable */ 649 if (die_find_variable_at(cu_die, var_name, pc, &var_die) && 650 check_variable(dloc, &var_die, type_die, DWARF_REG_PC, *var_offset, 651 /*is_fbreg=*/false) == 0) 652 goto ok; 653 654 return false; 655 656 ok: 657 /* The address should point to the start of the variable */ 658 global_var__add(dloc, var_addr - *var_offset, var_name, type_die); 659 return true; 660 } 661 662 /** 663 * update_var_state - Update type state using given variables 664 * @state: type state table 665 * @dloc: data location info 666 * @addr: instruction address to match with variable 667 * @insn_offset: instruction offset (for debug) 668 * @var_types: list of variables with type info 669 * 670 * This function fills the @state table using @var_types info. Each variable 671 * is used only at the given location and updates an entry in the table. 672 */ 673 static void update_var_state(struct type_state *state, struct data_loc_info *dloc, 674 u64 addr, u64 insn_offset, struct die_var_type *var_types) 675 { 676 Dwarf_Die mem_die; 677 struct die_var_type *var; 678 int fbreg = dloc->fbreg; 679 int fb_offset = 0; 680 681 if (dloc->fb_cfa) { 682 if (die_get_cfa(dloc->di->dbg, addr, &fbreg, &fb_offset) < 0) 683 fbreg = -1; 684 } 685 686 for (var = var_types; var != NULL; var = var->next) { 687 if (var->addr != addr) 688 continue; 689 /* Get the type DIE using the offset */ 690 if (!dwarf_offdie(dloc->di->dbg, var->die_off, &mem_die)) 691 continue; 692 693 if (var->reg == DWARF_REG_FB) { 694 findnew_stack_state(state, var->offset, TSR_KIND_TYPE, 695 &mem_die); 696 697 pr_debug_dtp("var [%"PRIx64"] -%#x(stack)", 698 insn_offset, -var->offset); 699 pr_debug_type_name(&mem_die, TSR_KIND_TYPE); 700 } else if (var->reg == fbreg) { 701 findnew_stack_state(state, var->offset - fb_offset, 702 TSR_KIND_TYPE, &mem_die); 703 704 pr_debug_dtp("var [%"PRIx64"] -%#x(stack)", 705 insn_offset, -var->offset + fb_offset); 706 pr_debug_type_name(&mem_die, TSR_KIND_TYPE); 707 } else if (has_reg_type(state, var->reg) && var->offset == 0) { 708 struct type_state_reg *reg; 709 710 reg = &state->regs[var->reg]; 711 reg->type = mem_die; 712 reg->kind = TSR_KIND_TYPE; 713 reg->ok = true; 714 715 pr_debug_dtp("var [%"PRIx64"] reg%d", 716 insn_offset, var->reg); 717 pr_debug_type_name(&mem_die, TSR_KIND_TYPE); 718 } 719 } 720 } 721 722 static void update_insn_state_x86(struct type_state *state, 723 struct data_loc_info *dloc, Dwarf_Die *cu_die, 724 struct disasm_line *dl) 725 { 726 struct annotated_insn_loc loc; 727 struct annotated_op_loc *src = &loc.ops[INSN_OP_SOURCE]; 728 struct annotated_op_loc *dst = &loc.ops[INSN_OP_TARGET]; 729 struct type_state_reg *tsr; 730 Dwarf_Die type_die; 731 u32 insn_offset = dl->al.offset; 732 int fbreg = dloc->fbreg; 733 int fboff = 0; 734 735 if (annotate_get_insn_location(dloc->arch, dl, &loc) < 0) 736 return; 737 738 if (ins__is_call(&dl->ins)) { 739 struct symbol *func = dl->ops.target.sym; 740 741 if (func == NULL) 742 return; 743 744 /* __fentry__ will preserve all registers */ 745 if (!strcmp(func->name, "__fentry__")) 746 return; 747 748 pr_debug_dtp("call [%x] %s\n", insn_offset, func->name); 749 750 /* Otherwise invalidate caller-saved registers after call */ 751 for (unsigned i = 0; i < ARRAY_SIZE(state->regs); i++) { 752 if (state->regs[i].caller_saved) 753 state->regs[i].ok = false; 754 } 755 756 /* Update register with the return type (if any) */ 757 if (die_find_func_rettype(cu_die, func->name, &type_die)) { 758 tsr = &state->regs[state->ret_reg]; 759 tsr->type = type_die; 760 tsr->kind = TSR_KIND_TYPE; 761 tsr->ok = true; 762 763 pr_debug_dtp("call [%x] return -> reg%d", 764 insn_offset, state->ret_reg); 765 pr_debug_type_name(&type_die, tsr->kind); 766 } 767 return; 768 } 769 770 if (!strncmp(dl->ins.name, "add", 3)) { 771 u64 imm_value = -1ULL; 772 int offset; 773 const char *var_name = NULL; 774 struct map_symbol *ms = dloc->ms; 775 u64 ip = ms->sym->start + dl->al.offset; 776 777 if (!has_reg_type(state, dst->reg1)) 778 return; 779 780 tsr = &state->regs[dst->reg1]; 781 782 if (src->imm) 783 imm_value = src->offset; 784 else if (has_reg_type(state, src->reg1) && 785 state->regs[src->reg1].kind == TSR_KIND_CONST) 786 imm_value = state->regs[src->reg1].imm_value; 787 else if (src->reg1 == DWARF_REG_PC) { 788 u64 var_addr = annotate_calc_pcrel(dloc->ms, ip, 789 src->offset, dl); 790 791 if (get_global_var_info(dloc, var_addr, 792 &var_name, &offset) && 793 !strcmp(var_name, "this_cpu_off") && 794 tsr->kind == TSR_KIND_CONST) { 795 tsr->kind = TSR_KIND_PERCPU_BASE; 796 imm_value = tsr->imm_value; 797 } 798 } 799 else 800 return; 801 802 if (tsr->kind != TSR_KIND_PERCPU_BASE) 803 return; 804 805 if (get_global_var_type(cu_die, dloc, ip, imm_value, &offset, 806 &type_die) && offset == 0) { 807 /* 808 * This is not a pointer type, but it should be treated 809 * as a pointer. 810 */ 811 tsr->type = type_die; 812 tsr->kind = TSR_KIND_POINTER; 813 tsr->ok = true; 814 815 pr_debug_dtp("add [%x] percpu %#"PRIx64" -> reg%d", 816 insn_offset, imm_value, dst->reg1); 817 pr_debug_type_name(&tsr->type, tsr->kind); 818 } 819 return; 820 } 821 822 if (strncmp(dl->ins.name, "mov", 3)) 823 return; 824 825 if (dloc->fb_cfa) { 826 u64 ip = dloc->ms->sym->start + dl->al.offset; 827 u64 pc = map__rip_2objdump(dloc->ms->map, ip); 828 829 if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0) 830 fbreg = -1; 831 } 832 833 /* Case 1. register to register or segment:offset to register transfers */ 834 if (!src->mem_ref && !dst->mem_ref) { 835 if (!has_reg_type(state, dst->reg1)) 836 return; 837 838 tsr = &state->regs[dst->reg1]; 839 if (map__dso(dloc->ms->map)->kernel && 840 src->segment == INSN_SEG_X86_GS && src->imm) { 841 u64 ip = dloc->ms->sym->start + dl->al.offset; 842 u64 var_addr; 843 int offset; 844 845 /* 846 * In kernel, %gs points to a per-cpu region for the 847 * current CPU. Access with a constant offset should 848 * be treated as a global variable access. 849 */ 850 var_addr = src->offset; 851 852 if (var_addr == 40) { 853 tsr->kind = TSR_KIND_CANARY; 854 tsr->ok = true; 855 856 pr_debug_dtp("mov [%x] stack canary -> reg%d\n", 857 insn_offset, dst->reg1); 858 return; 859 } 860 861 if (!get_global_var_type(cu_die, dloc, ip, var_addr, 862 &offset, &type_die) || 863 !die_get_member_type(&type_die, offset, &type_die)) { 864 tsr->ok = false; 865 return; 866 } 867 868 tsr->type = type_die; 869 tsr->kind = TSR_KIND_TYPE; 870 tsr->ok = true; 871 872 pr_debug_dtp("mov [%x] this-cpu addr=%#"PRIx64" -> reg%d", 873 insn_offset, var_addr, dst->reg1); 874 pr_debug_type_name(&tsr->type, tsr->kind); 875 return; 876 } 877 878 if (src->imm) { 879 tsr->kind = TSR_KIND_CONST; 880 tsr->imm_value = src->offset; 881 tsr->ok = true; 882 883 pr_debug_dtp("mov [%x] imm=%#x -> reg%d\n", 884 insn_offset, tsr->imm_value, dst->reg1); 885 return; 886 } 887 888 if (!has_reg_type(state, src->reg1) || 889 !state->regs[src->reg1].ok) { 890 tsr->ok = false; 891 return; 892 } 893 894 tsr->type = state->regs[src->reg1].type; 895 tsr->kind = state->regs[src->reg1].kind; 896 tsr->ok = true; 897 898 pr_debug_dtp("mov [%x] reg%d -> reg%d", 899 insn_offset, src->reg1, dst->reg1); 900 pr_debug_type_name(&tsr->type, tsr->kind); 901 } 902 /* Case 2. memory to register transers */ 903 if (src->mem_ref && !dst->mem_ref) { 904 int sreg = src->reg1; 905 906 if (!has_reg_type(state, dst->reg1)) 907 return; 908 909 tsr = &state->regs[dst->reg1]; 910 911 retry: 912 /* Check stack variables with offset */ 913 if (sreg == fbreg) { 914 struct type_state_stack *stack; 915 int offset = src->offset - fboff; 916 917 stack = find_stack_state(state, offset); 918 if (stack == NULL) { 919 tsr->ok = false; 920 return; 921 } else if (!stack->compound) { 922 tsr->type = stack->type; 923 tsr->kind = stack->kind; 924 tsr->ok = true; 925 } else if (die_get_member_type(&stack->type, 926 offset - stack->offset, 927 &type_die)) { 928 tsr->type = type_die; 929 tsr->kind = TSR_KIND_TYPE; 930 tsr->ok = true; 931 } else { 932 tsr->ok = false; 933 return; 934 } 935 936 pr_debug_dtp("mov [%x] -%#x(stack) -> reg%d", 937 insn_offset, -offset, dst->reg1); 938 pr_debug_type_name(&tsr->type, tsr->kind); 939 } 940 /* And then dereference the pointer if it has one */ 941 else if (has_reg_type(state, sreg) && state->regs[sreg].ok && 942 state->regs[sreg].kind == TSR_KIND_TYPE && 943 die_deref_ptr_type(&state->regs[sreg].type, 944 src->offset, &type_die)) { 945 tsr->type = type_die; 946 tsr->kind = TSR_KIND_TYPE; 947 tsr->ok = true; 948 949 pr_debug_dtp("mov [%x] %#x(reg%d) -> reg%d", 950 insn_offset, src->offset, sreg, dst->reg1); 951 pr_debug_type_name(&tsr->type, tsr->kind); 952 } 953 /* Or check if it's a global variable */ 954 else if (sreg == DWARF_REG_PC) { 955 struct map_symbol *ms = dloc->ms; 956 u64 ip = ms->sym->start + dl->al.offset; 957 u64 addr; 958 int offset; 959 960 addr = annotate_calc_pcrel(ms, ip, src->offset, dl); 961 962 if (!get_global_var_type(cu_die, dloc, ip, addr, &offset, 963 &type_die) || 964 !die_get_member_type(&type_die, offset, &type_die)) { 965 tsr->ok = false; 966 return; 967 } 968 969 tsr->type = type_die; 970 tsr->kind = TSR_KIND_TYPE; 971 tsr->ok = true; 972 973 pr_debug_dtp("mov [%x] global addr=%"PRIx64" -> reg%d", 974 insn_offset, addr, dst->reg1); 975 pr_debug_type_name(&type_die, tsr->kind); 976 } 977 /* And check percpu access with base register */ 978 else if (has_reg_type(state, sreg) && 979 state->regs[sreg].kind == TSR_KIND_PERCPU_BASE) { 980 u64 ip = dloc->ms->sym->start + dl->al.offset; 981 int offset; 982 983 /* 984 * In kernel, %gs points to a per-cpu region for the 985 * current CPU. Access with a constant offset should 986 * be treated as a global variable access. 987 */ 988 if (get_global_var_type(cu_die, dloc, ip, src->offset, 989 &offset, &type_die) && 990 die_get_member_type(&type_die, offset, &type_die)) { 991 tsr->type = type_die; 992 tsr->kind = TSR_KIND_TYPE; 993 tsr->ok = true; 994 995 pr_debug_dtp("mov [%x] percpu %#x(reg%d) -> reg%d", 996 insn_offset, src->offset, sreg, dst->reg1); 997 pr_debug_type_name(&tsr->type, tsr->kind); 998 } else { 999 tsr->ok = false; 1000 } 1001 } 1002 /* And then dereference the calculated pointer if it has one */ 1003 else if (has_reg_type(state, sreg) && state->regs[sreg].ok && 1004 state->regs[sreg].kind == TSR_KIND_POINTER && 1005 die_get_member_type(&state->regs[sreg].type, 1006 src->offset, &type_die)) { 1007 tsr->type = type_die; 1008 tsr->kind = TSR_KIND_TYPE; 1009 tsr->ok = true; 1010 1011 pr_debug_dtp("mov [%x] pointer %#x(reg%d) -> reg%d", 1012 insn_offset, src->offset, sreg, dst->reg1); 1013 pr_debug_type_name(&tsr->type, tsr->kind); 1014 } 1015 /* Or try another register if any */ 1016 else if (src->multi_regs && sreg == src->reg1 && 1017 src->reg1 != src->reg2) { 1018 sreg = src->reg2; 1019 goto retry; 1020 } 1021 else { 1022 int offset; 1023 const char *var_name = NULL; 1024 1025 /* it might be per-cpu variable (in kernel) access */ 1026 if (src->offset < 0) { 1027 if (get_global_var_info(dloc, (s64)src->offset, 1028 &var_name, &offset) && 1029 !strcmp(var_name, "__per_cpu_offset")) { 1030 tsr->kind = TSR_KIND_PERCPU_BASE; 1031 1032 pr_debug_dtp("mov [%x] percpu base reg%d\n", 1033 insn_offset, dst->reg1); 1034 } 1035 } 1036 1037 tsr->ok = false; 1038 } 1039 } 1040 /* Case 3. register to memory transfers */ 1041 if (!src->mem_ref && dst->mem_ref) { 1042 if (!has_reg_type(state, src->reg1) || 1043 !state->regs[src->reg1].ok) 1044 return; 1045 1046 /* Check stack variables with offset */ 1047 if (dst->reg1 == fbreg) { 1048 struct type_state_stack *stack; 1049 int offset = dst->offset - fboff; 1050 1051 tsr = &state->regs[src->reg1]; 1052 1053 stack = find_stack_state(state, offset); 1054 if (stack) { 1055 /* 1056 * The source register is likely to hold a type 1057 * of member if it's a compound type. Do not 1058 * update the stack variable type since we can 1059 * get the member type later by using the 1060 * die_get_member_type(). 1061 */ 1062 if (!stack->compound) 1063 set_stack_state(stack, offset, tsr->kind, 1064 &tsr->type); 1065 } else { 1066 findnew_stack_state(state, offset, tsr->kind, 1067 &tsr->type); 1068 } 1069 1070 pr_debug_dtp("mov [%x] reg%d -> -%#x(stack)", 1071 insn_offset, src->reg1, -offset); 1072 pr_debug_type_name(&tsr->type, tsr->kind); 1073 } 1074 /* 1075 * Ignore other transfers since it'd set a value in a struct 1076 * and won't change the type. 1077 */ 1078 } 1079 /* Case 4. memory to memory transfers (not handled for now) */ 1080 } 1081 1082 /** 1083 * update_insn_state - Update type state for an instruction 1084 * @state: type state table 1085 * @dloc: data location info 1086 * @cu_die: compile unit debug entry 1087 * @dl: disasm line for the instruction 1088 * 1089 * This function updates the @state table for the target operand of the 1090 * instruction at @dl if it transfers the type like MOV on x86. Since it 1091 * tracks the type, it won't care about the values like in arithmetic 1092 * instructions like ADD/SUB/MUL/DIV and INC/DEC. 1093 * 1094 * Note that ops->reg2 is only available when both mem_ref and multi_regs 1095 * are true. 1096 */ 1097 static void update_insn_state(struct type_state *state, struct data_loc_info *dloc, 1098 Dwarf_Die *cu_die, struct disasm_line *dl) 1099 { 1100 if (arch__is(dloc->arch, "x86")) 1101 update_insn_state_x86(state, dloc, cu_die, dl); 1102 } 1103 1104 /* 1105 * Prepend this_blocks (from the outer scope) to full_blocks, removing 1106 * duplicate disasm line. 1107 */ 1108 static void prepend_basic_blocks(struct list_head *this_blocks, 1109 struct list_head *full_blocks) 1110 { 1111 struct annotated_basic_block *first_bb, *last_bb; 1112 1113 last_bb = list_last_entry(this_blocks, typeof(*last_bb), list); 1114 first_bb = list_first_entry(full_blocks, typeof(*first_bb), list); 1115 1116 if (list_empty(full_blocks)) 1117 goto out; 1118 1119 /* Last insn in this_blocks should be same as first insn in full_blocks */ 1120 if (last_bb->end != first_bb->begin) { 1121 pr_debug("prepend basic blocks: mismatched disasm line %"PRIx64" -> %"PRIx64"\n", 1122 last_bb->end->al.offset, first_bb->begin->al.offset); 1123 goto out; 1124 } 1125 1126 /* Is the basic block have only one disasm_line? */ 1127 if (last_bb->begin == last_bb->end) { 1128 list_del(&last_bb->list); 1129 free(last_bb); 1130 goto out; 1131 } 1132 1133 /* Point to the insn before the last when adding this block to full_blocks */ 1134 last_bb->end = list_prev_entry(last_bb->end, al.node); 1135 1136 out: 1137 list_splice(this_blocks, full_blocks); 1138 } 1139 1140 static void delete_basic_blocks(struct list_head *basic_blocks) 1141 { 1142 struct annotated_basic_block *bb, *tmp; 1143 1144 list_for_each_entry_safe(bb, tmp, basic_blocks, list) { 1145 list_del(&bb->list); 1146 free(bb); 1147 } 1148 } 1149 1150 /* Make sure all variables have a valid start address */ 1151 static void fixup_var_address(struct die_var_type *var_types, u64 addr) 1152 { 1153 while (var_types) { 1154 /* 1155 * Some variables have no address range meaning it's always 1156 * available in the whole scope. Let's adjust the start 1157 * address to the start of the scope. 1158 */ 1159 if (var_types->addr == 0) 1160 var_types->addr = addr; 1161 1162 var_types = var_types->next; 1163 } 1164 } 1165 1166 static void delete_var_types(struct die_var_type *var_types) 1167 { 1168 while (var_types) { 1169 struct die_var_type *next = var_types->next; 1170 1171 free(var_types); 1172 var_types = next; 1173 } 1174 } 1175 1176 /* should match to is_stack_canary() in util/annotate.c */ 1177 static void setup_stack_canary(struct data_loc_info *dloc) 1178 { 1179 if (arch__is(dloc->arch, "x86")) { 1180 dloc->op->segment = INSN_SEG_X86_GS; 1181 dloc->op->imm = true; 1182 dloc->op->offset = 40; 1183 } 1184 } 1185 1186 /* 1187 * It's at the target address, check if it has a matching type. 1188 * It returns 1 if found, 0 if not or -1 if not found but no need to 1189 * repeat the search. The last case is for per-cpu variables which 1190 * are similar to global variables and no additional info is needed. 1191 */ 1192 static int check_matching_type(struct type_state *state, 1193 struct data_loc_info *dloc, int reg, 1194 Dwarf_Die *cu_die, Dwarf_Die *type_die) 1195 { 1196 Dwarf_Word size; 1197 u32 insn_offset = dloc->ip - dloc->ms->sym->start; 1198 1199 pr_debug_dtp("chk [%x] reg%d offset=%#x ok=%d kind=%d", 1200 insn_offset, reg, dloc->op->offset, 1201 state->regs[reg].ok, state->regs[reg].kind); 1202 1203 if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_TYPE) { 1204 int tag = dwarf_tag(&state->regs[reg].type); 1205 1206 pr_debug_dtp("\n"); 1207 1208 /* 1209 * Normal registers should hold a pointer (or array) to 1210 * dereference a memory location. 1211 */ 1212 if (tag != DW_TAG_pointer_type && tag != DW_TAG_array_type) 1213 return -1; 1214 1215 /* Remove the pointer and get the target type */ 1216 if (die_get_real_type(&state->regs[reg].type, type_die) == NULL) 1217 return -1; 1218 1219 dloc->type_offset = dloc->op->offset; 1220 1221 /* Get the size of the actual type */ 1222 if (dwarf_aggregate_size(type_die, &size) < 0 || 1223 (unsigned)dloc->type_offset >= size) 1224 return -1; 1225 1226 return 1; 1227 } 1228 1229 if (reg == dloc->fbreg) { 1230 struct type_state_stack *stack; 1231 1232 pr_debug_dtp(" fbreg\n"); 1233 1234 stack = find_stack_state(state, dloc->type_offset); 1235 if (stack == NULL) 1236 return 0; 1237 1238 if (stack->kind == TSR_KIND_CANARY) { 1239 setup_stack_canary(dloc); 1240 return -1; 1241 } 1242 1243 *type_die = stack->type; 1244 /* Update the type offset from the start of slot */ 1245 dloc->type_offset -= stack->offset; 1246 1247 return 1; 1248 } 1249 1250 if (dloc->fb_cfa) { 1251 struct type_state_stack *stack; 1252 u64 pc = map__rip_2objdump(dloc->ms->map, dloc->ip); 1253 int fbreg, fboff; 1254 1255 pr_debug_dtp(" cfa\n"); 1256 1257 if (die_get_cfa(dloc->di->dbg, pc, &fbreg, &fboff) < 0) 1258 fbreg = -1; 1259 1260 if (reg != fbreg) 1261 return 0; 1262 1263 stack = find_stack_state(state, dloc->type_offset - fboff); 1264 if (stack == NULL) 1265 return 0; 1266 1267 if (stack->kind == TSR_KIND_CANARY) { 1268 setup_stack_canary(dloc); 1269 return -1; 1270 } 1271 1272 *type_die = stack->type; 1273 /* Update the type offset from the start of slot */ 1274 dloc->type_offset -= fboff + stack->offset; 1275 1276 return 1; 1277 } 1278 1279 if (state->regs[reg].kind == TSR_KIND_PERCPU_BASE) { 1280 u64 var_addr = dloc->op->offset; 1281 int var_offset; 1282 1283 pr_debug_dtp(" percpu var\n"); 1284 1285 if (get_global_var_type(cu_die, dloc, dloc->ip, var_addr, 1286 &var_offset, type_die)) { 1287 dloc->type_offset = var_offset; 1288 return 1; 1289 } 1290 /* No need to retry per-cpu (global) variables */ 1291 return -1; 1292 } 1293 1294 if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_POINTER) { 1295 pr_debug_dtp(" percpu ptr\n"); 1296 1297 /* 1298 * It's actaully pointer but the address was calculated using 1299 * some arithmetic. So it points to the actual type already. 1300 */ 1301 *type_die = state->regs[reg].type; 1302 1303 dloc->type_offset = dloc->op->offset; 1304 1305 /* Get the size of the actual type */ 1306 if (dwarf_aggregate_size(type_die, &size) < 0 || 1307 (unsigned)dloc->type_offset >= size) 1308 return -1; 1309 1310 return 1; 1311 } 1312 1313 if (state->regs[reg].ok && state->regs[reg].kind == TSR_KIND_CANARY) { 1314 pr_debug_dtp(" stack canary\n"); 1315 1316 /* 1317 * This is a saved value of the stack canary which will be handled 1318 * in the outer logic when it returns failure here. Pretend it's 1319 * from the stack canary directly. 1320 */ 1321 setup_stack_canary(dloc); 1322 1323 return -1; 1324 } 1325 1326 if (map__dso(dloc->ms->map)->kernel && arch__is(dloc->arch, "x86")) { 1327 u64 addr; 1328 int offset; 1329 1330 /* Direct this-cpu access like "%gs:0x34740" */ 1331 if (dloc->op->segment == INSN_SEG_X86_GS && dloc->op->imm) { 1332 pr_debug_dtp(" this-cpu var\n"); 1333 1334 addr = dloc->op->offset; 1335 1336 if (get_global_var_type(cu_die, dloc, dloc->ip, addr, 1337 &offset, type_die)) { 1338 dloc->type_offset = offset; 1339 return 1; 1340 } 1341 return -1; 1342 } 1343 1344 /* Access to per-cpu base like "-0x7dcf0500(,%rdx,8)" */ 1345 if (dloc->op->offset < 0 && reg != state->stack_reg) { 1346 const char *var_name = NULL; 1347 1348 addr = (s64) dloc->op->offset; 1349 1350 if (get_global_var_info(dloc, addr, &var_name, &offset) && 1351 !strcmp(var_name, "__per_cpu_offset") && offset == 0 && 1352 get_global_var_type(cu_die, dloc, dloc->ip, addr, 1353 &offset, type_die)) { 1354 pr_debug_dtp(" percpu base\n"); 1355 1356 dloc->type_offset = offset; 1357 return 1; 1358 } 1359 pr_debug_dtp(" negative offset\n"); 1360 return -1; 1361 } 1362 } 1363 1364 pr_debug_dtp("\n"); 1365 return 0; 1366 } 1367 1368 /* Iterate instructions in basic blocks and update type table */ 1369 static int find_data_type_insn(struct data_loc_info *dloc, int reg, 1370 struct list_head *basic_blocks, 1371 struct die_var_type *var_types, 1372 Dwarf_Die *cu_die, Dwarf_Die *type_die) 1373 { 1374 struct type_state state; 1375 struct symbol *sym = dloc->ms->sym; 1376 struct annotation *notes = symbol__annotation(sym); 1377 struct annotated_basic_block *bb; 1378 int ret = 0; 1379 1380 init_type_state(&state, dloc->arch); 1381 1382 list_for_each_entry(bb, basic_blocks, list) { 1383 struct disasm_line *dl = bb->begin; 1384 1385 BUG_ON(bb->begin->al.offset == -1 || bb->end->al.offset == -1); 1386 1387 pr_debug_dtp("bb: [%"PRIx64" - %"PRIx64"]\n", 1388 bb->begin->al.offset, bb->end->al.offset); 1389 1390 list_for_each_entry_from(dl, ¬es->src->source, al.node) { 1391 u64 this_ip = sym->start + dl->al.offset; 1392 u64 addr = map__rip_2objdump(dloc->ms->map, this_ip); 1393 1394 /* Skip comment or debug info lines */ 1395 if (dl->al.offset == -1) 1396 continue; 1397 1398 /* Update variable type at this address */ 1399 update_var_state(&state, dloc, addr, dl->al.offset, var_types); 1400 1401 if (this_ip == dloc->ip) { 1402 ret = check_matching_type(&state, dloc, reg, 1403 cu_die, type_die); 1404 goto out; 1405 } 1406 1407 /* Update type table after processing the instruction */ 1408 update_insn_state(&state, dloc, cu_die, dl); 1409 if (dl == bb->end) 1410 break; 1411 } 1412 } 1413 1414 out: 1415 exit_type_state(&state); 1416 return ret; 1417 } 1418 1419 /* 1420 * Construct a list of basic blocks for each scope with variables and try to find 1421 * the data type by updating a type state table through instructions. 1422 */ 1423 static int find_data_type_block(struct data_loc_info *dloc, int reg, 1424 Dwarf_Die *cu_die, Dwarf_Die *scopes, 1425 int nr_scopes, Dwarf_Die *type_die) 1426 { 1427 LIST_HEAD(basic_blocks); 1428 struct die_var_type *var_types = NULL; 1429 u64 src_ip, dst_ip, prev_dst_ip; 1430 int ret = -1; 1431 1432 /* TODO: other architecture support */ 1433 if (!arch__is(dloc->arch, "x86")) 1434 return -1; 1435 1436 prev_dst_ip = dst_ip = dloc->ip; 1437 for (int i = nr_scopes - 1; i >= 0; i--) { 1438 Dwarf_Addr base, start, end; 1439 LIST_HEAD(this_blocks); 1440 int found; 1441 1442 if (dwarf_ranges(&scopes[i], 0, &base, &start, &end) < 0) 1443 break; 1444 1445 pr_debug_dtp("scope: [%d/%d] (die:%lx)\n", 1446 i + 1, nr_scopes, (long)dwarf_dieoffset(&scopes[i])); 1447 src_ip = map__objdump_2rip(dloc->ms->map, start); 1448 1449 again: 1450 /* Get basic blocks for this scope */ 1451 if (annotate_get_basic_blocks(dloc->ms->sym, src_ip, dst_ip, 1452 &this_blocks) < 0) { 1453 /* Try previous block if they are not connected */ 1454 if (prev_dst_ip != dst_ip) { 1455 dst_ip = prev_dst_ip; 1456 goto again; 1457 } 1458 1459 pr_debug_dtp("cannot find a basic block from %"PRIx64" to %"PRIx64"\n", 1460 src_ip - dloc->ms->sym->start, 1461 dst_ip - dloc->ms->sym->start); 1462 continue; 1463 } 1464 prepend_basic_blocks(&this_blocks, &basic_blocks); 1465 1466 /* Get variable info for this scope and add to var_types list */ 1467 die_collect_vars(&scopes[i], &var_types); 1468 fixup_var_address(var_types, start); 1469 1470 /* Find from start of this scope to the target instruction */ 1471 found = find_data_type_insn(dloc, reg, &basic_blocks, var_types, 1472 cu_die, type_die); 1473 if (found > 0) { 1474 pr_debug_dtp("found by insn track: %#x(reg%d) type-offset=%#x\n", 1475 dloc->op->offset, reg, dloc->type_offset); 1476 pr_debug_type_name(type_die, TSR_KIND_TYPE); 1477 ret = 0; 1478 break; 1479 } 1480 1481 if (found < 0) 1482 break; 1483 1484 /* Go up to the next scope and find blocks to the start */ 1485 prev_dst_ip = dst_ip; 1486 dst_ip = src_ip; 1487 } 1488 1489 delete_basic_blocks(&basic_blocks); 1490 delete_var_types(var_types); 1491 return ret; 1492 } 1493 1494 /* The result will be saved in @type_die */ 1495 static int find_data_type_die(struct data_loc_info *dloc, Dwarf_Die *type_die) 1496 { 1497 struct annotated_op_loc *loc = dloc->op; 1498 Dwarf_Die cu_die, var_die; 1499 Dwarf_Die *scopes = NULL; 1500 int reg, offset; 1501 int ret = -1; 1502 int i, nr_scopes; 1503 int fbreg = -1; 1504 int fb_offset = 0; 1505 bool is_fbreg = false; 1506 u64 pc; 1507 char buf[64]; 1508 1509 if (dloc->op->multi_regs) 1510 snprintf(buf, sizeof(buf), "reg%d, reg%d", dloc->op->reg1, dloc->op->reg2); 1511 else if (dloc->op->reg1 == DWARF_REG_PC) 1512 snprintf(buf, sizeof(buf), "PC"); 1513 else 1514 snprintf(buf, sizeof(buf), "reg%d", dloc->op->reg1); 1515 1516 pr_debug_dtp("-----------------------------------------------------------\n"); 1517 pr_debug_dtp("find data type for %#x(%s) at %s+%#"PRIx64"\n", 1518 dloc->op->offset, buf, dloc->ms->sym->name, 1519 dloc->ip - dloc->ms->sym->start); 1520 1521 /* 1522 * IP is a relative instruction address from the start of the map, as 1523 * it can be randomized/relocated, it needs to translate to PC which is 1524 * a file address for DWARF processing. 1525 */ 1526 pc = map__rip_2objdump(dloc->ms->map, dloc->ip); 1527 1528 /* Get a compile_unit for this address */ 1529 if (!find_cu_die(dloc->di, pc, &cu_die)) { 1530 pr_debug_dtp("cannot find CU for address %"PRIx64"\n", pc); 1531 ann_data_stat.no_cuinfo++; 1532 return -1; 1533 } 1534 1535 reg = loc->reg1; 1536 offset = loc->offset; 1537 1538 pr_debug_dtp("CU for %s (die:%#lx)\n", 1539 dwarf_diename(&cu_die), (long)dwarf_dieoffset(&cu_die)); 1540 1541 if (reg == DWARF_REG_PC) { 1542 if (get_global_var_type(&cu_die, dloc, dloc->ip, dloc->var_addr, 1543 &offset, type_die)) { 1544 dloc->type_offset = offset; 1545 1546 pr_debug_dtp("found by addr=%#"PRIx64" type_offset=%#x\n", 1547 dloc->var_addr, offset); 1548 pr_debug_type_name(type_die, TSR_KIND_TYPE); 1549 ret = 0; 1550 goto out; 1551 } 1552 } 1553 1554 /* Get a list of nested scopes - i.e. (inlined) functions and blocks. */ 1555 nr_scopes = die_get_scopes(&cu_die, pc, &scopes); 1556 1557 if (reg != DWARF_REG_PC && dwarf_hasattr(&scopes[0], DW_AT_frame_base)) { 1558 Dwarf_Attribute attr; 1559 Dwarf_Block block; 1560 1561 /* Check if the 'reg' is assigned as frame base register */ 1562 if (dwarf_attr(&scopes[0], DW_AT_frame_base, &attr) != NULL && 1563 dwarf_formblock(&attr, &block) == 0 && block.length == 1) { 1564 switch (*block.data) { 1565 case DW_OP_reg0 ... DW_OP_reg31: 1566 fbreg = dloc->fbreg = *block.data - DW_OP_reg0; 1567 break; 1568 case DW_OP_call_frame_cfa: 1569 dloc->fb_cfa = true; 1570 if (die_get_cfa(dloc->di->dbg, pc, &fbreg, 1571 &fb_offset) < 0) 1572 fbreg = -1; 1573 break; 1574 default: 1575 break; 1576 } 1577 1578 pr_debug_dtp("frame base: cfa=%d fbreg=%d\n", 1579 dloc->fb_cfa, fbreg); 1580 } 1581 } 1582 1583 retry: 1584 is_fbreg = (reg == fbreg); 1585 if (is_fbreg) 1586 offset = loc->offset - fb_offset; 1587 1588 /* Search from the inner-most scope to the outer */ 1589 for (i = nr_scopes - 1; i >= 0; i--) { 1590 if (reg == DWARF_REG_PC) { 1591 if (!die_find_variable_by_addr(&scopes[i], dloc->var_addr, 1592 &var_die, &offset)) 1593 continue; 1594 } else { 1595 /* Look up variables/parameters in this scope */ 1596 if (!die_find_variable_by_reg(&scopes[i], pc, reg, 1597 &offset, is_fbreg, &var_die)) 1598 continue; 1599 } 1600 1601 /* Found a variable, see if it's correct */ 1602 ret = check_variable(dloc, &var_die, type_die, reg, offset, is_fbreg); 1603 if (ret == 0) { 1604 pr_debug_dtp("found \"%s\" in scope=%d/%d (die: %#lx) ", 1605 dwarf_diename(&var_die), i+1, nr_scopes, 1606 (long)dwarf_dieoffset(&scopes[i])); 1607 if (reg == DWARF_REG_PC) { 1608 pr_debug_dtp("addr=%#"PRIx64" type_offset=%#x\n", 1609 dloc->var_addr, offset); 1610 } else if (reg == DWARF_REG_FB || is_fbreg) { 1611 pr_debug_dtp("stack_offset=%#x type_offset=%#x\n", 1612 fb_offset, offset); 1613 } else { 1614 pr_debug_dtp("type_offset=%#x\n", offset); 1615 } 1616 pr_debug_location(&var_die, pc, reg); 1617 pr_debug_type_name(type_die, TSR_KIND_TYPE); 1618 } else { 1619 pr_debug_dtp("check variable \"%s\" failed (die: %#lx)\n", 1620 dwarf_diename(&var_die), 1621 (long)dwarf_dieoffset(&var_die)); 1622 pr_debug_location(&var_die, pc, reg); 1623 pr_debug_type_name(type_die, TSR_KIND_TYPE); 1624 } 1625 dloc->type_offset = offset; 1626 goto out; 1627 } 1628 1629 if (reg != DWARF_REG_PC) { 1630 ret = find_data_type_block(dloc, reg, &cu_die, scopes, 1631 nr_scopes, type_die); 1632 if (ret == 0) { 1633 ann_data_stat.insn_track++; 1634 goto out; 1635 } 1636 } 1637 1638 if (loc->multi_regs && reg == loc->reg1 && loc->reg1 != loc->reg2) { 1639 reg = loc->reg2; 1640 goto retry; 1641 } 1642 1643 if (ret < 0) { 1644 pr_debug_dtp("no variable found\n"); 1645 ann_data_stat.no_var++; 1646 } 1647 1648 out: 1649 free(scopes); 1650 return ret; 1651 } 1652 1653 /** 1654 * find_data_type - Return a data type at the location 1655 * @dloc: data location 1656 * 1657 * This functions searches the debug information of the binary to get the data 1658 * type it accesses. The exact location is expressed by (ip, reg, offset) 1659 * for pointer variables or (ip, addr) for global variables. Note that global 1660 * variables might update the @dloc->type_offset after finding the start of the 1661 * variable. If it cannot find a global variable by address, it tried to find 1662 * a declaration of the variable using var_name. In that case, @dloc->offset 1663 * won't be updated. 1664 * 1665 * It return %NULL if not found. 1666 */ 1667 struct annotated_data_type *find_data_type(struct data_loc_info *dloc) 1668 { 1669 struct annotated_data_type *result = NULL; 1670 struct dso *dso = map__dso(dloc->ms->map); 1671 Dwarf_Die type_die; 1672 1673 dloc->di = debuginfo__new(dso->long_name); 1674 if (dloc->di == NULL) { 1675 pr_debug_dtp("cannot get the debug info\n"); 1676 return NULL; 1677 } 1678 1679 /* 1680 * The type offset is the same as instruction offset by default. 1681 * But when finding a global variable, the offset won't be valid. 1682 */ 1683 dloc->type_offset = dloc->op->offset; 1684 1685 dloc->fbreg = -1; 1686 1687 if (find_data_type_die(dloc, &type_die) < 0) 1688 goto out; 1689 1690 result = dso__findnew_data_type(dso, &type_die); 1691 1692 out: 1693 debuginfo__delete(dloc->di); 1694 return result; 1695 } 1696 1697 static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_entries) 1698 { 1699 int i; 1700 size_t sz = sizeof(struct type_hist); 1701 1702 sz += sizeof(struct type_hist_entry) * adt->self.size; 1703 1704 /* Allocate a table of pointers for each event */ 1705 adt->nr_histograms = nr_entries; 1706 adt->histograms = calloc(nr_entries, sizeof(*adt->histograms)); 1707 if (adt->histograms == NULL) 1708 return -ENOMEM; 1709 1710 /* 1711 * Each histogram is allocated for the whole size of the type. 1712 * TODO: Probably we can move the histogram to members. 1713 */ 1714 for (i = 0; i < nr_entries; i++) { 1715 adt->histograms[i] = zalloc(sz); 1716 if (adt->histograms[i] == NULL) 1717 goto err; 1718 } 1719 return 0; 1720 1721 err: 1722 while (--i >= 0) 1723 free(adt->histograms[i]); 1724 free(adt->histograms); 1725 return -ENOMEM; 1726 } 1727 1728 static void delete_data_type_histograms(struct annotated_data_type *adt) 1729 { 1730 for (int i = 0; i < adt->nr_histograms; i++) 1731 free(adt->histograms[i]); 1732 free(adt->histograms); 1733 } 1734 1735 void annotated_data_type__tree_delete(struct rb_root *root) 1736 { 1737 struct annotated_data_type *pos; 1738 1739 while (!RB_EMPTY_ROOT(root)) { 1740 struct rb_node *node = rb_first(root); 1741 1742 rb_erase(node, root); 1743 pos = rb_entry(node, struct annotated_data_type, node); 1744 delete_members(&pos->self); 1745 delete_data_type_histograms(pos); 1746 free(pos->self.type_name); 1747 free(pos); 1748 } 1749 } 1750 1751 /** 1752 * annotated_data_type__update_samples - Update histogram 1753 * @adt: Data type to update 1754 * @evsel: Event to update 1755 * @offset: Offset in the type 1756 * @nr_samples: Number of samples at this offset 1757 * @period: Event count at this offset 1758 * 1759 * This function updates type histogram at @ofs for @evsel. Samples are 1760 * aggregated before calling this function so it can be called with more 1761 * than one samples at a certain offset. 1762 */ 1763 int annotated_data_type__update_samples(struct annotated_data_type *adt, 1764 struct evsel *evsel, int offset, 1765 int nr_samples, u64 period) 1766 { 1767 struct type_hist *h; 1768 1769 if (adt == NULL) 1770 return 0; 1771 1772 if (adt->histograms == NULL) { 1773 int nr = evsel->evlist->core.nr_entries; 1774 1775 if (alloc_data_type_histograms(adt, nr) < 0) 1776 return -1; 1777 } 1778 1779 if (offset < 0 || offset >= adt->self.size) 1780 return -1; 1781 1782 h = adt->histograms[evsel->core.idx]; 1783 1784 h->nr_samples += nr_samples; 1785 h->addr[offset].nr_samples += nr_samples; 1786 h->period += period; 1787 h->addr[offset].period += period; 1788 return 0; 1789 } 1790 1791 static void print_annotated_data_header(struct hist_entry *he, struct evsel *evsel) 1792 { 1793 struct dso *dso = map__dso(he->ms.map); 1794 int nr_members = 1; 1795 int nr_samples = he->stat.nr_events; 1796 int width = 7; 1797 const char *val_hdr = "Percent"; 1798 1799 if (evsel__is_group_event(evsel)) { 1800 struct hist_entry *pair; 1801 1802 list_for_each_entry(pair, &he->pairs.head, pairs.node) 1803 nr_samples += pair->stat.nr_events; 1804 } 1805 1806 printf("Annotate type: '%s' in %s (%d samples):\n", 1807 he->mem_type->self.type_name, dso->name, nr_samples); 1808 1809 if (evsel__is_group_event(evsel)) { 1810 struct evsel *pos; 1811 int i = 0; 1812 1813 for_each_group_evsel(pos, evsel) 1814 printf(" event[%d] = %s\n", i++, pos->name); 1815 1816 nr_members = evsel->core.nr_members; 1817 } 1818 1819 if (symbol_conf.show_total_period) { 1820 width = 11; 1821 val_hdr = "Period"; 1822 } else if (symbol_conf.show_nr_samples) { 1823 width = 7; 1824 val_hdr = "Samples"; 1825 } 1826 1827 printf("============================================================================\n"); 1828 printf("%*s %10s %10s %s\n", (width + 1) * nr_members, val_hdr, 1829 "offset", "size", "field"); 1830 } 1831 1832 static void print_annotated_data_value(struct type_hist *h, u64 period, int nr_samples) 1833 { 1834 double percent = h->period ? (100.0 * period / h->period) : 0; 1835 const char *color = get_percent_color(percent); 1836 1837 if (symbol_conf.show_total_period) 1838 color_fprintf(stdout, color, " %11" PRIu64, period); 1839 else if (symbol_conf.show_nr_samples) 1840 color_fprintf(stdout, color, " %7d", nr_samples); 1841 else 1842 color_fprintf(stdout, color, " %7.2f", percent); 1843 } 1844 1845 static void print_annotated_data_type(struct annotated_data_type *mem_type, 1846 struct annotated_member *member, 1847 struct evsel *evsel, int indent) 1848 { 1849 struct annotated_member *child; 1850 struct type_hist *h = mem_type->histograms[evsel->core.idx]; 1851 int i, nr_events = 1, samples = 0; 1852 u64 period = 0; 1853 int width = symbol_conf.show_total_period ? 11 : 7; 1854 1855 for (i = 0; i < member->size; i++) { 1856 samples += h->addr[member->offset + i].nr_samples; 1857 period += h->addr[member->offset + i].period; 1858 } 1859 print_annotated_data_value(h, period, samples); 1860 1861 if (evsel__is_group_event(evsel)) { 1862 struct evsel *pos; 1863 1864 for_each_group_member(pos, evsel) { 1865 h = mem_type->histograms[pos->core.idx]; 1866 1867 samples = 0; 1868 period = 0; 1869 for (i = 0; i < member->size; i++) { 1870 samples += h->addr[member->offset + i].nr_samples; 1871 period += h->addr[member->offset + i].period; 1872 } 1873 print_annotated_data_value(h, period, samples); 1874 } 1875 nr_events = evsel->core.nr_members; 1876 } 1877 1878 printf(" %10d %10d %*s%s\t%s", 1879 member->offset, member->size, indent, "", member->type_name, 1880 member->var_name ?: ""); 1881 1882 if (!list_empty(&member->children)) 1883 printf(" {\n"); 1884 1885 list_for_each_entry(child, &member->children, node) 1886 print_annotated_data_type(mem_type, child, evsel, indent + 4); 1887 1888 if (!list_empty(&member->children)) 1889 printf("%*s}", (width + 1) * nr_events + 24 + indent, ""); 1890 printf(";\n"); 1891 } 1892 1893 int hist_entry__annotate_data_tty(struct hist_entry *he, struct evsel *evsel) 1894 { 1895 print_annotated_data_header(he, evsel); 1896 print_annotated_data_type(he->mem_type, &he->mem_type->self, evsel, 0); 1897 printf("\n"); 1898 1899 /* move to the next entry */ 1900 return '>'; 1901 } 1902