1 #define _GNU_SOURCE 2 #include <ctype.h> 3 #include <dirent.h> 4 #include <errno.h> 5 #include <libgen.h> 6 #include <stdlib.h> 7 #include <stdio.h> 8 #include <string.h> 9 #include <sys/types.h> 10 #include <sys/stat.h> 11 #include <sys/param.h> 12 #include <fcntl.h> 13 #include <unistd.h> 14 #include "build-id.h" 15 #include "debug.h" 16 #include "symbol.h" 17 #include "strlist.h" 18 19 #include <libelf.h> 20 #include <gelf.h> 21 #include <elf.h> 22 #include <limits.h> 23 #include <sys/utsname.h> 24 25 #ifndef NT_GNU_BUILD_ID 26 #define NT_GNU_BUILD_ID 3 27 #endif 28 29 static bool dso__build_id_equal(const struct dso *self, u8 *build_id); 30 static int elf_read_build_id(Elf *elf, void *bf, size_t size); 31 static void dsos__add(struct list_head *head, struct dso *dso); 32 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); 33 static int dso__load_kernel_sym(struct dso *self, struct map *map, 34 symbol_filter_t filter); 35 static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, 36 symbol_filter_t filter); 37 static int vmlinux_path__nr_entries; 38 static char **vmlinux_path; 39 40 struct symbol_conf symbol_conf = { 41 .exclude_other = true, 42 .use_modules = true, 43 .try_vmlinux_path = true, 44 }; 45 46 int dso__name_len(const struct dso *self) 47 { 48 if (verbose) 49 return self->long_name_len; 50 51 return self->short_name_len; 52 } 53 54 bool dso__loaded(const struct dso *self, enum map_type type) 55 { 56 return self->loaded & (1 << type); 57 } 58 59 bool dso__sorted_by_name(const struct dso *self, enum map_type type) 60 { 61 return self->sorted_by_name & (1 << type); 62 } 63 64 static void dso__set_sorted_by_name(struct dso *self, enum map_type type) 65 { 66 self->sorted_by_name |= (1 << type); 67 } 68 69 bool symbol_type__is_a(char symbol_type, enum map_type map_type) 70 { 71 switch (map_type) { 72 case MAP__FUNCTION: 73 return symbol_type == 'T' || symbol_type == 'W'; 74 case MAP__VARIABLE: 75 return symbol_type == 'D' || symbol_type == 'd'; 76 default: 77 return false; 78 } 79 } 80 81 static void symbols__fixup_end(struct rb_root *self) 82 { 83 struct rb_node *nd, *prevnd = rb_first(self); 84 struct symbol *curr, *prev; 85 86 if (prevnd == NULL) 87 return; 88 89 curr = rb_entry(prevnd, struct symbol, rb_node); 90 91 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 92 prev = curr; 93 curr = rb_entry(nd, struct symbol, rb_node); 94 95 if (prev->end == prev->start) 96 prev->end = curr->start - 1; 97 } 98 99 /* Last entry */ 100 if (curr->end == curr->start) 101 curr->end = roundup(curr->start, 4096); 102 } 103 104 static void __map_groups__fixup_end(struct map_groups *self, enum map_type type) 105 { 106 struct map *prev, *curr; 107 struct rb_node *nd, *prevnd = rb_first(&self->maps[type]); 108 109 if (prevnd == NULL) 110 return; 111 112 curr = rb_entry(prevnd, struct map, rb_node); 113 114 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 115 prev = curr; 116 curr = rb_entry(nd, struct map, rb_node); 117 prev->end = curr->start - 1; 118 } 119 120 /* 121 * We still haven't the actual symbols, so guess the 122 * last map final address. 123 */ 124 curr->end = ~0UL; 125 } 126 127 static void map_groups__fixup_end(struct map_groups *self) 128 { 129 int i; 130 for (i = 0; i < MAP__NR_TYPES; ++i) 131 __map_groups__fixup_end(self, i); 132 } 133 134 static struct symbol *symbol__new(u64 start, u64 len, const char *name) 135 { 136 size_t namelen = strlen(name) + 1; 137 struct symbol *self = calloc(1, (symbol_conf.priv_size + 138 sizeof(*self) + namelen)); 139 if (self == NULL) 140 return NULL; 141 142 if (symbol_conf.priv_size) 143 self = ((void *)self) + symbol_conf.priv_size; 144 145 self->start = start; 146 self->end = len ? start + len - 1 : start; 147 self->namelen = namelen - 1; 148 149 pr_debug4("%s: %s %#Lx-%#Lx\n", __func__, name, start, self->end); 150 151 memcpy(self->name, name, namelen); 152 153 return self; 154 } 155 156 void symbol__delete(struct symbol *self) 157 { 158 free(((void *)self) - symbol_conf.priv_size); 159 } 160 161 static size_t symbol__fprintf(struct symbol *self, FILE *fp) 162 { 163 return fprintf(fp, " %llx-%llx %s\n", 164 self->start, self->end, self->name); 165 } 166 167 void dso__set_long_name(struct dso *self, char *name) 168 { 169 if (name == NULL) 170 return; 171 self->long_name = name; 172 self->long_name_len = strlen(name); 173 } 174 175 static void dso__set_short_name(struct dso *self, const char *name) 176 { 177 if (name == NULL) 178 return; 179 self->short_name = name; 180 self->short_name_len = strlen(name); 181 } 182 183 static void dso__set_basename(struct dso *self) 184 { 185 dso__set_short_name(self, basename(self->long_name)); 186 } 187 188 struct dso *dso__new(const char *name) 189 { 190 struct dso *self = calloc(1, sizeof(*self) + strlen(name) + 1); 191 192 if (self != NULL) { 193 int i; 194 strcpy(self->name, name); 195 dso__set_long_name(self, self->name); 196 dso__set_short_name(self, self->name); 197 for (i = 0; i < MAP__NR_TYPES; ++i) 198 self->symbols[i] = self->symbol_names[i] = RB_ROOT; 199 self->slen_calculated = 0; 200 self->origin = DSO__ORIG_NOT_FOUND; 201 self->loaded = 0; 202 self->sorted_by_name = 0; 203 self->has_build_id = 0; 204 self->kernel = DSO_TYPE_USER; 205 INIT_LIST_HEAD(&self->node); 206 } 207 208 return self; 209 } 210 211 static void symbols__delete(struct rb_root *self) 212 { 213 struct symbol *pos; 214 struct rb_node *next = rb_first(self); 215 216 while (next) { 217 pos = rb_entry(next, struct symbol, rb_node); 218 next = rb_next(&pos->rb_node); 219 rb_erase(&pos->rb_node, self); 220 symbol__delete(pos); 221 } 222 } 223 224 void dso__delete(struct dso *self) 225 { 226 int i; 227 for (i = 0; i < MAP__NR_TYPES; ++i) 228 symbols__delete(&self->symbols[i]); 229 if (self->sname_alloc) 230 free((char *)self->short_name); 231 if (self->lname_alloc) 232 free(self->long_name); 233 free(self); 234 } 235 236 void dso__set_build_id(struct dso *self, void *build_id) 237 { 238 memcpy(self->build_id, build_id, sizeof(self->build_id)); 239 self->has_build_id = 1; 240 } 241 242 static void symbols__insert(struct rb_root *self, struct symbol *sym) 243 { 244 struct rb_node **p = &self->rb_node; 245 struct rb_node *parent = NULL; 246 const u64 ip = sym->start; 247 struct symbol *s; 248 249 while (*p != NULL) { 250 parent = *p; 251 s = rb_entry(parent, struct symbol, rb_node); 252 if (ip < s->start) 253 p = &(*p)->rb_left; 254 else 255 p = &(*p)->rb_right; 256 } 257 rb_link_node(&sym->rb_node, parent, p); 258 rb_insert_color(&sym->rb_node, self); 259 } 260 261 static struct symbol *symbols__find(struct rb_root *self, u64 ip) 262 { 263 struct rb_node *n; 264 265 if (self == NULL) 266 return NULL; 267 268 n = self->rb_node; 269 270 while (n) { 271 struct symbol *s = rb_entry(n, struct symbol, rb_node); 272 273 if (ip < s->start) 274 n = n->rb_left; 275 else if (ip > s->end) 276 n = n->rb_right; 277 else 278 return s; 279 } 280 281 return NULL; 282 } 283 284 struct symbol_name_rb_node { 285 struct rb_node rb_node; 286 struct symbol sym; 287 }; 288 289 static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym) 290 { 291 struct rb_node **p = &self->rb_node; 292 struct rb_node *parent = NULL; 293 struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s; 294 295 while (*p != NULL) { 296 parent = *p; 297 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 298 if (strcmp(sym->name, s->sym.name) < 0) 299 p = &(*p)->rb_left; 300 else 301 p = &(*p)->rb_right; 302 } 303 rb_link_node(&symn->rb_node, parent, p); 304 rb_insert_color(&symn->rb_node, self); 305 } 306 307 static void symbols__sort_by_name(struct rb_root *self, struct rb_root *source) 308 { 309 struct rb_node *nd; 310 311 for (nd = rb_first(source); nd; nd = rb_next(nd)) { 312 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 313 symbols__insert_by_name(self, pos); 314 } 315 } 316 317 static struct symbol *symbols__find_by_name(struct rb_root *self, const char *name) 318 { 319 struct rb_node *n; 320 321 if (self == NULL) 322 return NULL; 323 324 n = self->rb_node; 325 326 while (n) { 327 struct symbol_name_rb_node *s; 328 int cmp; 329 330 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 331 cmp = strcmp(name, s->sym.name); 332 333 if (cmp < 0) 334 n = n->rb_left; 335 else if (cmp > 0) 336 n = n->rb_right; 337 else 338 return &s->sym; 339 } 340 341 return NULL; 342 } 343 344 struct symbol *dso__find_symbol(struct dso *self, 345 enum map_type type, u64 addr) 346 { 347 return symbols__find(&self->symbols[type], addr); 348 } 349 350 struct symbol *dso__find_symbol_by_name(struct dso *self, enum map_type type, 351 const char *name) 352 { 353 return symbols__find_by_name(&self->symbol_names[type], name); 354 } 355 356 void dso__sort_by_name(struct dso *self, enum map_type type) 357 { 358 dso__set_sorted_by_name(self, type); 359 return symbols__sort_by_name(&self->symbol_names[type], 360 &self->symbols[type]); 361 } 362 363 int build_id__sprintf(const u8 *self, int len, char *bf) 364 { 365 char *bid = bf; 366 const u8 *raw = self; 367 int i; 368 369 for (i = 0; i < len; ++i) { 370 sprintf(bid, "%02x", *raw); 371 ++raw; 372 bid += 2; 373 } 374 375 return raw - self; 376 } 377 378 size_t dso__fprintf_buildid(struct dso *self, FILE *fp) 379 { 380 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 381 382 build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id); 383 return fprintf(fp, "%s", sbuild_id); 384 } 385 386 size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp) 387 { 388 struct rb_node *nd; 389 size_t ret = fprintf(fp, "dso: %s (", self->short_name); 390 391 if (self->short_name != self->long_name) 392 ret += fprintf(fp, "%s, ", self->long_name); 393 ret += fprintf(fp, "%s, %sloaded, ", map_type__name[type], 394 self->loaded ? "" : "NOT "); 395 ret += dso__fprintf_buildid(self, fp); 396 ret += fprintf(fp, ")\n"); 397 for (nd = rb_first(&self->symbols[type]); nd; nd = rb_next(nd)) { 398 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 399 ret += symbol__fprintf(pos, fp); 400 } 401 402 return ret; 403 } 404 405 int kallsyms__parse(const char *filename, void *arg, 406 int (*process_symbol)(void *arg, const char *name, 407 char type, u64 start)) 408 { 409 char *line = NULL; 410 size_t n; 411 int err = 0; 412 FILE *file = fopen(filename, "r"); 413 414 if (file == NULL) 415 goto out_failure; 416 417 while (!feof(file)) { 418 u64 start; 419 int line_len, len; 420 char symbol_type; 421 char *symbol_name; 422 423 line_len = getline(&line, &n, file); 424 if (line_len < 0 || !line) 425 break; 426 427 line[--line_len] = '\0'; /* \n */ 428 429 len = hex2u64(line, &start); 430 431 len++; 432 if (len + 2 >= line_len) 433 continue; 434 435 symbol_type = toupper(line[len]); 436 symbol_name = line + len + 2; 437 438 err = process_symbol(arg, symbol_name, symbol_type, start); 439 if (err) 440 break; 441 } 442 443 free(line); 444 fclose(file); 445 return err; 446 447 out_failure: 448 return -1; 449 } 450 451 struct process_kallsyms_args { 452 struct map *map; 453 struct dso *dso; 454 }; 455 456 static int map__process_kallsym_symbol(void *arg, const char *name, 457 char type, u64 start) 458 { 459 struct symbol *sym; 460 struct process_kallsyms_args *a = arg; 461 struct rb_root *root = &a->dso->symbols[a->map->type]; 462 463 if (!symbol_type__is_a(type, a->map->type)) 464 return 0; 465 466 /* 467 * Will fix up the end later, when we have all symbols sorted. 468 */ 469 sym = symbol__new(start, 0, name); 470 471 if (sym == NULL) 472 return -ENOMEM; 473 /* 474 * We will pass the symbols to the filter later, in 475 * map__split_kallsyms, when we have split the maps per module 476 */ 477 symbols__insert(root, sym); 478 479 return 0; 480 } 481 482 /* 483 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 484 * so that we can in the next step set the symbol ->end address and then 485 * call kernel_maps__split_kallsyms. 486 */ 487 static int dso__load_all_kallsyms(struct dso *self, const char *filename, 488 struct map *map) 489 { 490 struct process_kallsyms_args args = { .map = map, .dso = self, }; 491 return kallsyms__parse(filename, &args, map__process_kallsym_symbol); 492 } 493 494 /* 495 * Split the symbols into maps, making sure there are no overlaps, i.e. the 496 * kernel range is broken in several maps, named [kernel].N, as we don't have 497 * the original ELF section names vmlinux have. 498 */ 499 static int dso__split_kallsyms(struct dso *self, struct map *map, 500 symbol_filter_t filter) 501 { 502 struct map_groups *kmaps = map__kmap(map)->kmaps; 503 struct machine *machine = kmaps->machine; 504 struct map *curr_map = map; 505 struct symbol *pos; 506 int count = 0; 507 struct rb_root *root = &self->symbols[map->type]; 508 struct rb_node *next = rb_first(root); 509 int kernel_range = 0; 510 511 while (next) { 512 char *module; 513 514 pos = rb_entry(next, struct symbol, rb_node); 515 next = rb_next(&pos->rb_node); 516 517 module = strchr(pos->name, '\t'); 518 if (module) { 519 if (!symbol_conf.use_modules) 520 goto discard_symbol; 521 522 *module++ = '\0'; 523 524 if (strcmp(curr_map->dso->short_name, module)) { 525 if (curr_map != map && 526 self->kernel == DSO_TYPE_GUEST_KERNEL && 527 machine__is_default_guest(machine)) { 528 /* 529 * We assume all symbols of a module are 530 * continuous in * kallsyms, so curr_map 531 * points to a module and all its 532 * symbols are in its kmap. Mark it as 533 * loaded. 534 */ 535 dso__set_loaded(curr_map->dso, 536 curr_map->type); 537 } 538 539 curr_map = map_groups__find_by_name(kmaps, 540 map->type, module); 541 if (curr_map == NULL) { 542 pr_debug("%s/proc/{kallsyms,modules} " 543 "inconsistency while looking " 544 "for \"%s\" module!\n", 545 machine->root_dir, module); 546 curr_map = map; 547 goto discard_symbol; 548 } 549 550 if (curr_map->dso->loaded && 551 !machine__is_default_guest(machine)) 552 goto discard_symbol; 553 } 554 /* 555 * So that we look just like we get from .ko files, 556 * i.e. not prelinked, relative to map->start. 557 */ 558 pos->start = curr_map->map_ip(curr_map, pos->start); 559 pos->end = curr_map->map_ip(curr_map, pos->end); 560 } else if (curr_map != map) { 561 char dso_name[PATH_MAX]; 562 struct dso *dso; 563 564 if (self->kernel == DSO_TYPE_GUEST_KERNEL) 565 snprintf(dso_name, sizeof(dso_name), 566 "[guest.kernel].%d", 567 kernel_range++); 568 else 569 snprintf(dso_name, sizeof(dso_name), 570 "[kernel].%d", 571 kernel_range++); 572 573 dso = dso__new(dso_name); 574 if (dso == NULL) 575 return -1; 576 577 dso->kernel = self->kernel; 578 579 curr_map = map__new2(pos->start, dso, map->type); 580 if (curr_map == NULL) { 581 dso__delete(dso); 582 return -1; 583 } 584 585 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 586 map_groups__insert(kmaps, curr_map); 587 ++kernel_range; 588 } 589 590 if (filter && filter(curr_map, pos)) { 591 discard_symbol: rb_erase(&pos->rb_node, root); 592 symbol__delete(pos); 593 } else { 594 if (curr_map != map) { 595 rb_erase(&pos->rb_node, root); 596 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); 597 } 598 count++; 599 } 600 } 601 602 if (curr_map != map && 603 self->kernel == DSO_TYPE_GUEST_KERNEL && 604 machine__is_default_guest(kmaps->machine)) { 605 dso__set_loaded(curr_map->dso, curr_map->type); 606 } 607 608 return count; 609 } 610 611 int dso__load_kallsyms(struct dso *self, const char *filename, 612 struct map *map, symbol_filter_t filter) 613 { 614 if (dso__load_all_kallsyms(self, filename, map) < 0) 615 return -1; 616 617 symbols__fixup_end(&self->symbols[map->type]); 618 if (self->kernel == DSO_TYPE_GUEST_KERNEL) 619 self->origin = DSO__ORIG_GUEST_KERNEL; 620 else 621 self->origin = DSO__ORIG_KERNEL; 622 623 return dso__split_kallsyms(self, map, filter); 624 } 625 626 static int dso__load_perf_map(struct dso *self, struct map *map, 627 symbol_filter_t filter) 628 { 629 char *line = NULL; 630 size_t n; 631 FILE *file; 632 int nr_syms = 0; 633 634 file = fopen(self->long_name, "r"); 635 if (file == NULL) 636 goto out_failure; 637 638 while (!feof(file)) { 639 u64 start, size; 640 struct symbol *sym; 641 int line_len, len; 642 643 line_len = getline(&line, &n, file); 644 if (line_len < 0) 645 break; 646 647 if (!line) 648 goto out_failure; 649 650 line[--line_len] = '\0'; /* \n */ 651 652 len = hex2u64(line, &start); 653 654 len++; 655 if (len + 2 >= line_len) 656 continue; 657 658 len += hex2u64(line + len, &size); 659 660 len++; 661 if (len + 2 >= line_len) 662 continue; 663 664 sym = symbol__new(start, size, line + len); 665 666 if (sym == NULL) 667 goto out_delete_line; 668 669 if (filter && filter(map, sym)) 670 symbol__delete(sym); 671 else { 672 symbols__insert(&self->symbols[map->type], sym); 673 nr_syms++; 674 } 675 } 676 677 free(line); 678 fclose(file); 679 680 return nr_syms; 681 682 out_delete_line: 683 free(line); 684 out_failure: 685 return -1; 686 } 687 688 /** 689 * elf_symtab__for_each_symbol - iterate thru all the symbols 690 * 691 * @self: struct elf_symtab instance to iterate 692 * @idx: uint32_t idx 693 * @sym: GElf_Sym iterator 694 */ 695 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ 696 for (idx = 0, gelf_getsym(syms, idx, &sym);\ 697 idx < nr_syms; \ 698 idx++, gelf_getsym(syms, idx, &sym)) 699 700 static inline uint8_t elf_sym__type(const GElf_Sym *sym) 701 { 702 return GELF_ST_TYPE(sym->st_info); 703 } 704 705 static inline int elf_sym__is_function(const GElf_Sym *sym) 706 { 707 return elf_sym__type(sym) == STT_FUNC && 708 sym->st_name != 0 && 709 sym->st_shndx != SHN_UNDEF; 710 } 711 712 static inline bool elf_sym__is_object(const GElf_Sym *sym) 713 { 714 return elf_sym__type(sym) == STT_OBJECT && 715 sym->st_name != 0 && 716 sym->st_shndx != SHN_UNDEF; 717 } 718 719 static inline int elf_sym__is_label(const GElf_Sym *sym) 720 { 721 return elf_sym__type(sym) == STT_NOTYPE && 722 sym->st_name != 0 && 723 sym->st_shndx != SHN_UNDEF && 724 sym->st_shndx != SHN_ABS; 725 } 726 727 static inline const char *elf_sec__name(const GElf_Shdr *shdr, 728 const Elf_Data *secstrs) 729 { 730 return secstrs->d_buf + shdr->sh_name; 731 } 732 733 static inline int elf_sec__is_text(const GElf_Shdr *shdr, 734 const Elf_Data *secstrs) 735 { 736 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; 737 } 738 739 static inline bool elf_sec__is_data(const GElf_Shdr *shdr, 740 const Elf_Data *secstrs) 741 { 742 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; 743 } 744 745 static inline const char *elf_sym__name(const GElf_Sym *sym, 746 const Elf_Data *symstrs) 747 { 748 return symstrs->d_buf + sym->st_name; 749 } 750 751 static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, 752 GElf_Shdr *shp, const char *name, 753 size_t *idx) 754 { 755 Elf_Scn *sec = NULL; 756 size_t cnt = 1; 757 758 while ((sec = elf_nextscn(elf, sec)) != NULL) { 759 char *str; 760 761 gelf_getshdr(sec, shp); 762 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); 763 if (!strcmp(name, str)) { 764 if (idx) 765 *idx = cnt; 766 break; 767 } 768 ++cnt; 769 } 770 771 return sec; 772 } 773 774 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ 775 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ 776 idx < nr_entries; \ 777 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) 778 779 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ 780 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ 781 idx < nr_entries; \ 782 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) 783 784 /* 785 * We need to check if we have a .dynsym, so that we can handle the 786 * .plt, synthesizing its symbols, that aren't on the symtabs (be it 787 * .dynsym or .symtab). 788 * And always look at the original dso, not at debuginfo packages, that 789 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 790 */ 791 static int dso__synthesize_plt_symbols(struct dso *self, struct map *map, 792 symbol_filter_t filter) 793 { 794 uint32_t nr_rel_entries, idx; 795 GElf_Sym sym; 796 u64 plt_offset; 797 GElf_Shdr shdr_plt; 798 struct symbol *f; 799 GElf_Shdr shdr_rel_plt, shdr_dynsym; 800 Elf_Data *reldata, *syms, *symstrs; 801 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; 802 size_t dynsym_idx; 803 GElf_Ehdr ehdr; 804 char sympltname[1024]; 805 Elf *elf; 806 int nr = 0, symidx, fd, err = 0; 807 808 fd = open(self->long_name, O_RDONLY); 809 if (fd < 0) 810 goto out; 811 812 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 813 if (elf == NULL) 814 goto out_close; 815 816 if (gelf_getehdr(elf, &ehdr) == NULL) 817 goto out_elf_end; 818 819 scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym, 820 ".dynsym", &dynsym_idx); 821 if (scn_dynsym == NULL) 822 goto out_elf_end; 823 824 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 825 ".rela.plt", NULL); 826 if (scn_plt_rel == NULL) { 827 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 828 ".rel.plt", NULL); 829 if (scn_plt_rel == NULL) 830 goto out_elf_end; 831 } 832 833 err = -1; 834 835 if (shdr_rel_plt.sh_link != dynsym_idx) 836 goto out_elf_end; 837 838 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) 839 goto out_elf_end; 840 841 /* 842 * Fetch the relocation section to find the idxes to the GOT 843 * and the symbols in the .dynsym they refer to. 844 */ 845 reldata = elf_getdata(scn_plt_rel, NULL); 846 if (reldata == NULL) 847 goto out_elf_end; 848 849 syms = elf_getdata(scn_dynsym, NULL); 850 if (syms == NULL) 851 goto out_elf_end; 852 853 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); 854 if (scn_symstrs == NULL) 855 goto out_elf_end; 856 857 symstrs = elf_getdata(scn_symstrs, NULL); 858 if (symstrs == NULL) 859 goto out_elf_end; 860 861 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; 862 plt_offset = shdr_plt.sh_offset; 863 864 if (shdr_rel_plt.sh_type == SHT_RELA) { 865 GElf_Rela pos_mem, *pos; 866 867 elf_section__for_each_rela(reldata, pos, pos_mem, idx, 868 nr_rel_entries) { 869 symidx = GELF_R_SYM(pos->r_info); 870 plt_offset += shdr_plt.sh_entsize; 871 gelf_getsym(syms, symidx, &sym); 872 snprintf(sympltname, sizeof(sympltname), 873 "%s@plt", elf_sym__name(&sym, symstrs)); 874 875 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 876 sympltname); 877 if (!f) 878 goto out_elf_end; 879 880 if (filter && filter(map, f)) 881 symbol__delete(f); 882 else { 883 symbols__insert(&self->symbols[map->type], f); 884 ++nr; 885 } 886 } 887 } else if (shdr_rel_plt.sh_type == SHT_REL) { 888 GElf_Rel pos_mem, *pos; 889 elf_section__for_each_rel(reldata, pos, pos_mem, idx, 890 nr_rel_entries) { 891 symidx = GELF_R_SYM(pos->r_info); 892 plt_offset += shdr_plt.sh_entsize; 893 gelf_getsym(syms, symidx, &sym); 894 snprintf(sympltname, sizeof(sympltname), 895 "%s@plt", elf_sym__name(&sym, symstrs)); 896 897 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 898 sympltname); 899 if (!f) 900 goto out_elf_end; 901 902 if (filter && filter(map, f)) 903 symbol__delete(f); 904 else { 905 symbols__insert(&self->symbols[map->type], f); 906 ++nr; 907 } 908 } 909 } 910 911 err = 0; 912 out_elf_end: 913 elf_end(elf); 914 out_close: 915 close(fd); 916 917 if (err == 0) 918 return nr; 919 out: 920 pr_debug("%s: problems reading %s PLT info.\n", 921 __func__, self->long_name); 922 return 0; 923 } 924 925 static bool elf_sym__is_a(GElf_Sym *self, enum map_type type) 926 { 927 switch (type) { 928 case MAP__FUNCTION: 929 return elf_sym__is_function(self); 930 case MAP__VARIABLE: 931 return elf_sym__is_object(self); 932 default: 933 return false; 934 } 935 } 936 937 static bool elf_sec__is_a(GElf_Shdr *self, Elf_Data *secstrs, enum map_type type) 938 { 939 switch (type) { 940 case MAP__FUNCTION: 941 return elf_sec__is_text(self, secstrs); 942 case MAP__VARIABLE: 943 return elf_sec__is_data(self, secstrs); 944 default: 945 return false; 946 } 947 } 948 949 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) 950 { 951 Elf_Scn *sec = NULL; 952 GElf_Shdr shdr; 953 size_t cnt = 1; 954 955 while ((sec = elf_nextscn(elf, sec)) != NULL) { 956 gelf_getshdr(sec, &shdr); 957 958 if ((addr >= shdr.sh_addr) && 959 (addr < (shdr.sh_addr + shdr.sh_size))) 960 return cnt; 961 962 ++cnt; 963 } 964 965 return -1; 966 } 967 968 static int dso__load_sym(struct dso *self, struct map *map, const char *name, 969 int fd, symbol_filter_t filter, int kmodule, 970 int want_symtab) 971 { 972 struct kmap *kmap = self->kernel ? map__kmap(map) : NULL; 973 struct map *curr_map = map; 974 struct dso *curr_dso = self; 975 Elf_Data *symstrs, *secstrs; 976 uint32_t nr_syms; 977 int err = -1; 978 uint32_t idx; 979 GElf_Ehdr ehdr; 980 GElf_Shdr shdr, opdshdr; 981 Elf_Data *syms, *opddata = NULL; 982 GElf_Sym sym; 983 Elf_Scn *sec, *sec_strndx, *opdsec; 984 Elf *elf; 985 int nr = 0; 986 size_t opdidx = 0; 987 988 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 989 if (elf == NULL) { 990 pr_debug("%s: cannot read %s ELF file.\n", __func__, name); 991 goto out_close; 992 } 993 994 if (gelf_getehdr(elf, &ehdr) == NULL) { 995 pr_debug("%s: cannot get elf header.\n", __func__); 996 goto out_elf_end; 997 } 998 999 /* Always reject images with a mismatched build-id: */ 1000 if (self->has_build_id) { 1001 u8 build_id[BUILD_ID_SIZE]; 1002 1003 if (elf_read_build_id(elf, build_id, 1004 BUILD_ID_SIZE) != BUILD_ID_SIZE) 1005 goto out_elf_end; 1006 1007 if (!dso__build_id_equal(self, build_id)) 1008 goto out_elf_end; 1009 } 1010 1011 sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); 1012 if (sec == NULL) { 1013 if (want_symtab) 1014 goto out_elf_end; 1015 1016 sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL); 1017 if (sec == NULL) 1018 goto out_elf_end; 1019 } 1020 1021 opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx); 1022 if (opdsec) 1023 opddata = elf_rawdata(opdsec, NULL); 1024 1025 syms = elf_getdata(sec, NULL); 1026 if (syms == NULL) 1027 goto out_elf_end; 1028 1029 sec = elf_getscn(elf, shdr.sh_link); 1030 if (sec == NULL) 1031 goto out_elf_end; 1032 1033 symstrs = elf_getdata(sec, NULL); 1034 if (symstrs == NULL) 1035 goto out_elf_end; 1036 1037 sec_strndx = elf_getscn(elf, ehdr.e_shstrndx); 1038 if (sec_strndx == NULL) 1039 goto out_elf_end; 1040 1041 secstrs = elf_getdata(sec_strndx, NULL); 1042 if (secstrs == NULL) 1043 goto out_elf_end; 1044 1045 nr_syms = shdr.sh_size / shdr.sh_entsize; 1046 1047 memset(&sym, 0, sizeof(sym)); 1048 if (self->kernel == DSO_TYPE_USER) { 1049 self->adjust_symbols = (ehdr.e_type == ET_EXEC || 1050 elf_section_by_name(elf, &ehdr, &shdr, 1051 ".gnu.prelink_undo", 1052 NULL) != NULL); 1053 } else self->adjust_symbols = 0; 1054 1055 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 1056 struct symbol *f; 1057 const char *elf_name = elf_sym__name(&sym, symstrs); 1058 char *demangled = NULL; 1059 int is_label = elf_sym__is_label(&sym); 1060 const char *section_name; 1061 1062 if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && 1063 strcmp(elf_name, kmap->ref_reloc_sym->name) == 0) 1064 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; 1065 1066 if (!is_label && !elf_sym__is_a(&sym, map->type)) 1067 continue; 1068 1069 if (opdsec && sym.st_shndx == opdidx) { 1070 u32 offset = sym.st_value - opdshdr.sh_addr; 1071 u64 *opd = opddata->d_buf + offset; 1072 sym.st_value = *opd; 1073 sym.st_shndx = elf_addr_to_index(elf, sym.st_value); 1074 } 1075 1076 sec = elf_getscn(elf, sym.st_shndx); 1077 if (!sec) 1078 goto out_elf_end; 1079 1080 gelf_getshdr(sec, &shdr); 1081 1082 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) 1083 continue; 1084 1085 section_name = elf_sec__name(&shdr, secstrs); 1086 1087 if (self->kernel != DSO_TYPE_USER || kmodule) { 1088 char dso_name[PATH_MAX]; 1089 1090 if (strcmp(section_name, 1091 (curr_dso->short_name + 1092 self->short_name_len)) == 0) 1093 goto new_symbol; 1094 1095 if (strcmp(section_name, ".text") == 0) { 1096 curr_map = map; 1097 curr_dso = self; 1098 goto new_symbol; 1099 } 1100 1101 snprintf(dso_name, sizeof(dso_name), 1102 "%s%s", self->short_name, section_name); 1103 1104 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); 1105 if (curr_map == NULL) { 1106 u64 start = sym.st_value; 1107 1108 if (kmodule) 1109 start += map->start + shdr.sh_offset; 1110 1111 curr_dso = dso__new(dso_name); 1112 if (curr_dso == NULL) 1113 goto out_elf_end; 1114 curr_dso->kernel = self->kernel; 1115 curr_map = map__new2(start, curr_dso, 1116 map->type); 1117 if (curr_map == NULL) { 1118 dso__delete(curr_dso); 1119 goto out_elf_end; 1120 } 1121 curr_map->map_ip = identity__map_ip; 1122 curr_map->unmap_ip = identity__map_ip; 1123 curr_dso->origin = self->origin; 1124 map_groups__insert(kmap->kmaps, curr_map); 1125 dsos__add(&self->node, curr_dso); 1126 dso__set_loaded(curr_dso, map->type); 1127 } else 1128 curr_dso = curr_map->dso; 1129 1130 goto new_symbol; 1131 } 1132 1133 if (curr_dso->adjust_symbols) { 1134 pr_debug4("%s: adjusting symbol: st_value: %#Lx " 1135 "sh_addr: %#Lx sh_offset: %#Lx\n", __func__, 1136 (u64)sym.st_value, (u64)shdr.sh_addr, 1137 (u64)shdr.sh_offset); 1138 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1139 } 1140 /* 1141 * We need to figure out if the object was created from C++ sources 1142 * DWARF DW_compile_unit has this, but we don't always have access 1143 * to it... 1144 */ 1145 demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); 1146 if (demangled != NULL) 1147 elf_name = demangled; 1148 new_symbol: 1149 f = symbol__new(sym.st_value, sym.st_size, elf_name); 1150 free(demangled); 1151 if (!f) 1152 goto out_elf_end; 1153 1154 if (filter && filter(curr_map, f)) 1155 symbol__delete(f); 1156 else { 1157 symbols__insert(&curr_dso->symbols[curr_map->type], f); 1158 nr++; 1159 } 1160 } 1161 1162 /* 1163 * For misannotated, zeroed, ASM function sizes. 1164 */ 1165 if (nr > 0) { 1166 symbols__fixup_end(&self->symbols[map->type]); 1167 if (kmap) { 1168 /* 1169 * We need to fixup this here too because we create new 1170 * maps here, for things like vsyscall sections. 1171 */ 1172 __map_groups__fixup_end(kmap->kmaps, map->type); 1173 } 1174 } 1175 err = nr; 1176 out_elf_end: 1177 elf_end(elf); 1178 out_close: 1179 return err; 1180 } 1181 1182 static bool dso__build_id_equal(const struct dso *self, u8 *build_id) 1183 { 1184 return memcmp(self->build_id, build_id, sizeof(self->build_id)) == 0; 1185 } 1186 1187 bool __dsos__read_build_ids(struct list_head *head, bool with_hits) 1188 { 1189 bool have_build_id = false; 1190 struct dso *pos; 1191 1192 list_for_each_entry(pos, head, node) { 1193 if (with_hits && !pos->hit) 1194 continue; 1195 if (pos->has_build_id) { 1196 have_build_id = true; 1197 continue; 1198 } 1199 if (filename__read_build_id(pos->long_name, pos->build_id, 1200 sizeof(pos->build_id)) > 0) { 1201 have_build_id = true; 1202 pos->has_build_id = true; 1203 } 1204 } 1205 1206 return have_build_id; 1207 } 1208 1209 /* 1210 * Align offset to 4 bytes as needed for note name and descriptor data. 1211 */ 1212 #define NOTE_ALIGN(n) (((n) + 3) & -4U) 1213 1214 static int elf_read_build_id(Elf *elf, void *bf, size_t size) 1215 { 1216 int err = -1; 1217 GElf_Ehdr ehdr; 1218 GElf_Shdr shdr; 1219 Elf_Data *data; 1220 Elf_Scn *sec; 1221 Elf_Kind ek; 1222 void *ptr; 1223 1224 if (size < BUILD_ID_SIZE) 1225 goto out; 1226 1227 ek = elf_kind(elf); 1228 if (ek != ELF_K_ELF) 1229 goto out; 1230 1231 if (gelf_getehdr(elf, &ehdr) == NULL) { 1232 pr_err("%s: cannot get elf header.\n", __func__); 1233 goto out; 1234 } 1235 1236 sec = elf_section_by_name(elf, &ehdr, &shdr, 1237 ".note.gnu.build-id", NULL); 1238 if (sec == NULL) { 1239 sec = elf_section_by_name(elf, &ehdr, &shdr, 1240 ".notes", NULL); 1241 if (sec == NULL) 1242 goto out; 1243 } 1244 1245 data = elf_getdata(sec, NULL); 1246 if (data == NULL) 1247 goto out; 1248 1249 ptr = data->d_buf; 1250 while (ptr < (data->d_buf + data->d_size)) { 1251 GElf_Nhdr *nhdr = ptr; 1252 int namesz = NOTE_ALIGN(nhdr->n_namesz), 1253 descsz = NOTE_ALIGN(nhdr->n_descsz); 1254 const char *name; 1255 1256 ptr += sizeof(*nhdr); 1257 name = ptr; 1258 ptr += namesz; 1259 if (nhdr->n_type == NT_GNU_BUILD_ID && 1260 nhdr->n_namesz == sizeof("GNU")) { 1261 if (memcmp(name, "GNU", sizeof("GNU")) == 0) { 1262 memcpy(bf, ptr, BUILD_ID_SIZE); 1263 err = BUILD_ID_SIZE; 1264 break; 1265 } 1266 } 1267 ptr += descsz; 1268 } 1269 1270 out: 1271 return err; 1272 } 1273 1274 int filename__read_build_id(const char *filename, void *bf, size_t size) 1275 { 1276 int fd, err = -1; 1277 Elf *elf; 1278 1279 if (size < BUILD_ID_SIZE) 1280 goto out; 1281 1282 fd = open(filename, O_RDONLY); 1283 if (fd < 0) 1284 goto out; 1285 1286 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1287 if (elf == NULL) { 1288 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 1289 goto out_close; 1290 } 1291 1292 err = elf_read_build_id(elf, bf, size); 1293 1294 elf_end(elf); 1295 out_close: 1296 close(fd); 1297 out: 1298 return err; 1299 } 1300 1301 int sysfs__read_build_id(const char *filename, void *build_id, size_t size) 1302 { 1303 int fd, err = -1; 1304 1305 if (size < BUILD_ID_SIZE) 1306 goto out; 1307 1308 fd = open(filename, O_RDONLY); 1309 if (fd < 0) 1310 goto out; 1311 1312 while (1) { 1313 char bf[BUFSIZ]; 1314 GElf_Nhdr nhdr; 1315 int namesz, descsz; 1316 1317 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) 1318 break; 1319 1320 namesz = NOTE_ALIGN(nhdr.n_namesz); 1321 descsz = NOTE_ALIGN(nhdr.n_descsz); 1322 if (nhdr.n_type == NT_GNU_BUILD_ID && 1323 nhdr.n_namesz == sizeof("GNU")) { 1324 if (read(fd, bf, namesz) != namesz) 1325 break; 1326 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { 1327 if (read(fd, build_id, 1328 BUILD_ID_SIZE) == BUILD_ID_SIZE) { 1329 err = 0; 1330 break; 1331 } 1332 } else if (read(fd, bf, descsz) != descsz) 1333 break; 1334 } else { 1335 int n = namesz + descsz; 1336 if (read(fd, bf, n) != n) 1337 break; 1338 } 1339 } 1340 close(fd); 1341 out: 1342 return err; 1343 } 1344 1345 char dso__symtab_origin(const struct dso *self) 1346 { 1347 static const char origin[] = { 1348 [DSO__ORIG_KERNEL] = 'k', 1349 [DSO__ORIG_JAVA_JIT] = 'j', 1350 [DSO__ORIG_BUILD_ID_CACHE] = 'B', 1351 [DSO__ORIG_FEDORA] = 'f', 1352 [DSO__ORIG_UBUNTU] = 'u', 1353 [DSO__ORIG_BUILDID] = 'b', 1354 [DSO__ORIG_DSO] = 'd', 1355 [DSO__ORIG_KMODULE] = 'K', 1356 [DSO__ORIG_GUEST_KERNEL] = 'g', 1357 [DSO__ORIG_GUEST_KMODULE] = 'G', 1358 }; 1359 1360 if (self == NULL || self->origin == DSO__ORIG_NOT_FOUND) 1361 return '!'; 1362 return origin[self->origin]; 1363 } 1364 1365 int dso__load(struct dso *self, struct map *map, symbol_filter_t filter) 1366 { 1367 int size = PATH_MAX; 1368 char *name; 1369 int ret = -1; 1370 int fd; 1371 struct machine *machine; 1372 const char *root_dir; 1373 int want_symtab; 1374 1375 dso__set_loaded(self, map->type); 1376 1377 if (self->kernel == DSO_TYPE_KERNEL) 1378 return dso__load_kernel_sym(self, map, filter); 1379 else if (self->kernel == DSO_TYPE_GUEST_KERNEL) 1380 return dso__load_guest_kernel_sym(self, map, filter); 1381 1382 if (map->groups && map->groups->machine) 1383 machine = map->groups->machine; 1384 else 1385 machine = NULL; 1386 1387 name = malloc(size); 1388 if (!name) 1389 return -1; 1390 1391 self->adjust_symbols = 0; 1392 1393 if (strncmp(self->name, "/tmp/perf-", 10) == 0) { 1394 ret = dso__load_perf_map(self, map, filter); 1395 self->origin = ret > 0 ? DSO__ORIG_JAVA_JIT : 1396 DSO__ORIG_NOT_FOUND; 1397 return ret; 1398 } 1399 1400 /* Iterate over candidate debug images. 1401 * On the first pass, only load images if they have a full symtab. 1402 * Failing that, do a second pass where we accept .dynsym also 1403 */ 1404 for (self->origin = DSO__ORIG_BUILD_ID_CACHE, want_symtab = 1; 1405 self->origin != DSO__ORIG_NOT_FOUND; 1406 self->origin++) { 1407 switch (self->origin) { 1408 case DSO__ORIG_BUILD_ID_CACHE: 1409 if (dso__build_id_filename(self, name, size) == NULL) 1410 continue; 1411 break; 1412 case DSO__ORIG_FEDORA: 1413 snprintf(name, size, "/usr/lib/debug%s.debug", 1414 self->long_name); 1415 break; 1416 case DSO__ORIG_UBUNTU: 1417 snprintf(name, size, "/usr/lib/debug%s", 1418 self->long_name); 1419 break; 1420 case DSO__ORIG_BUILDID: { 1421 char build_id_hex[BUILD_ID_SIZE * 2 + 1]; 1422 1423 if (!self->has_build_id) 1424 continue; 1425 1426 build_id__sprintf(self->build_id, 1427 sizeof(self->build_id), 1428 build_id_hex); 1429 snprintf(name, size, 1430 "/usr/lib/debug/.build-id/%.2s/%s.debug", 1431 build_id_hex, build_id_hex + 2); 1432 } 1433 break; 1434 case DSO__ORIG_DSO: 1435 snprintf(name, size, "%s", self->long_name); 1436 break; 1437 case DSO__ORIG_GUEST_KMODULE: 1438 if (map->groups && map->groups->machine) 1439 root_dir = map->groups->machine->root_dir; 1440 else 1441 root_dir = ""; 1442 snprintf(name, size, "%s%s", root_dir, self->long_name); 1443 break; 1444 1445 default: 1446 /* 1447 * If we wanted a full symtab but no image had one, 1448 * relax our requirements and repeat the search. 1449 */ 1450 if (want_symtab) { 1451 want_symtab = 0; 1452 self->origin = DSO__ORIG_BUILD_ID_CACHE; 1453 } else 1454 continue; 1455 } 1456 1457 /* Name is now the name of the next image to try */ 1458 fd = open(name, O_RDONLY); 1459 if (fd < 0) 1460 continue; 1461 1462 ret = dso__load_sym(self, map, name, fd, filter, 0, 1463 want_symtab); 1464 close(fd); 1465 1466 /* 1467 * Some people seem to have debuginfo files _WITHOUT_ debug 1468 * info!?!? 1469 */ 1470 if (!ret) 1471 continue; 1472 1473 if (ret > 0) { 1474 int nr_plt = dso__synthesize_plt_symbols(self, map, filter); 1475 if (nr_plt > 0) 1476 ret += nr_plt; 1477 break; 1478 } 1479 } 1480 1481 free(name); 1482 if (ret < 0 && strstr(self->name, " (deleted)") != NULL) 1483 return 0; 1484 return ret; 1485 } 1486 1487 struct map *map_groups__find_by_name(struct map_groups *self, 1488 enum map_type type, const char *name) 1489 { 1490 struct rb_node *nd; 1491 1492 for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { 1493 struct map *map = rb_entry(nd, struct map, rb_node); 1494 1495 if (map->dso && strcmp(map->dso->short_name, name) == 0) 1496 return map; 1497 } 1498 1499 return NULL; 1500 } 1501 1502 static int dso__kernel_module_get_build_id(struct dso *self, 1503 const char *root_dir) 1504 { 1505 char filename[PATH_MAX]; 1506 /* 1507 * kernel module short names are of the form "[module]" and 1508 * we need just "module" here. 1509 */ 1510 const char *name = self->short_name + 1; 1511 1512 snprintf(filename, sizeof(filename), 1513 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1514 root_dir, (int)strlen(name) - 1, name); 1515 1516 if (sysfs__read_build_id(filename, self->build_id, 1517 sizeof(self->build_id)) == 0) 1518 self->has_build_id = true; 1519 1520 return 0; 1521 } 1522 1523 static int map_groups__set_modules_path_dir(struct map_groups *self, 1524 const char *dir_name) 1525 { 1526 struct dirent *dent; 1527 DIR *dir = opendir(dir_name); 1528 int ret = 0; 1529 1530 if (!dir) { 1531 pr_debug("%s: cannot open %s dir\n", __func__, dir_name); 1532 return -1; 1533 } 1534 1535 while ((dent = readdir(dir)) != NULL) { 1536 char path[PATH_MAX]; 1537 struct stat st; 1538 1539 /*sshfs might return bad dent->d_type, so we have to stat*/ 1540 sprintf(path, "%s/%s", dir_name, dent->d_name); 1541 if (stat(path, &st)) 1542 continue; 1543 1544 if (S_ISDIR(st.st_mode)) { 1545 if (!strcmp(dent->d_name, ".") || 1546 !strcmp(dent->d_name, "..")) 1547 continue; 1548 1549 snprintf(path, sizeof(path), "%s/%s", 1550 dir_name, dent->d_name); 1551 ret = map_groups__set_modules_path_dir(self, path); 1552 if (ret < 0) 1553 goto out; 1554 } else { 1555 char *dot = strrchr(dent->d_name, '.'), 1556 dso_name[PATH_MAX]; 1557 struct map *map; 1558 char *long_name; 1559 1560 if (dot == NULL || strcmp(dot, ".ko")) 1561 continue; 1562 snprintf(dso_name, sizeof(dso_name), "[%.*s]", 1563 (int)(dot - dent->d_name), dent->d_name); 1564 1565 strxfrchar(dso_name, '-', '_'); 1566 map = map_groups__find_by_name(self, MAP__FUNCTION, dso_name); 1567 if (map == NULL) 1568 continue; 1569 1570 snprintf(path, sizeof(path), "%s/%s", 1571 dir_name, dent->d_name); 1572 1573 long_name = strdup(path); 1574 if (long_name == NULL) { 1575 ret = -1; 1576 goto out; 1577 } 1578 dso__set_long_name(map->dso, long_name); 1579 map->dso->lname_alloc = 1; 1580 dso__kernel_module_get_build_id(map->dso, ""); 1581 } 1582 } 1583 1584 out: 1585 closedir(dir); 1586 return ret; 1587 } 1588 1589 static char *get_kernel_version(const char *root_dir) 1590 { 1591 char version[PATH_MAX]; 1592 FILE *file; 1593 char *name, *tmp; 1594 const char *prefix = "Linux version "; 1595 1596 sprintf(version, "%s/proc/version", root_dir); 1597 file = fopen(version, "r"); 1598 if (!file) 1599 return NULL; 1600 1601 version[0] = '\0'; 1602 tmp = fgets(version, sizeof(version), file); 1603 fclose(file); 1604 1605 name = strstr(version, prefix); 1606 if (!name) 1607 return NULL; 1608 name += strlen(prefix); 1609 tmp = strchr(name, ' '); 1610 if (tmp) 1611 *tmp = '\0'; 1612 1613 return strdup(name); 1614 } 1615 1616 static int machine__set_modules_path(struct machine *self) 1617 { 1618 char *version; 1619 char modules_path[PATH_MAX]; 1620 1621 version = get_kernel_version(self->root_dir); 1622 if (!version) 1623 return -1; 1624 1625 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel", 1626 self->root_dir, version); 1627 free(version); 1628 1629 return map_groups__set_modules_path_dir(&self->kmaps, modules_path); 1630 } 1631 1632 /* 1633 * Constructor variant for modules (where we know from /proc/modules where 1634 * they are loaded) and for vmlinux, where only after we load all the 1635 * symbols we'll know where it starts and ends. 1636 */ 1637 static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) 1638 { 1639 struct map *self = calloc(1, (sizeof(*self) + 1640 (dso->kernel ? sizeof(struct kmap) : 0))); 1641 if (self != NULL) { 1642 /* 1643 * ->end will be filled after we load all the symbols 1644 */ 1645 map__init(self, type, start, 0, 0, dso); 1646 } 1647 1648 return self; 1649 } 1650 1651 struct map *machine__new_module(struct machine *self, u64 start, 1652 const char *filename) 1653 { 1654 struct map *map; 1655 struct dso *dso = __dsos__findnew(&self->kernel_dsos, filename); 1656 1657 if (dso == NULL) 1658 return NULL; 1659 1660 map = map__new2(start, dso, MAP__FUNCTION); 1661 if (map == NULL) 1662 return NULL; 1663 1664 if (machine__is_host(self)) 1665 dso->origin = DSO__ORIG_KMODULE; 1666 else 1667 dso->origin = DSO__ORIG_GUEST_KMODULE; 1668 map_groups__insert(&self->kmaps, map); 1669 return map; 1670 } 1671 1672 static int machine__create_modules(struct machine *self) 1673 { 1674 char *line = NULL; 1675 size_t n; 1676 FILE *file; 1677 struct map *map; 1678 const char *modules; 1679 char path[PATH_MAX]; 1680 1681 if (machine__is_default_guest(self)) 1682 modules = symbol_conf.default_guest_modules; 1683 else { 1684 sprintf(path, "%s/proc/modules", self->root_dir); 1685 modules = path; 1686 } 1687 1688 file = fopen(modules, "r"); 1689 if (file == NULL) 1690 return -1; 1691 1692 while (!feof(file)) { 1693 char name[PATH_MAX]; 1694 u64 start; 1695 char *sep; 1696 int line_len; 1697 1698 line_len = getline(&line, &n, file); 1699 if (line_len < 0) 1700 break; 1701 1702 if (!line) 1703 goto out_failure; 1704 1705 line[--line_len] = '\0'; /* \n */ 1706 1707 sep = strrchr(line, 'x'); 1708 if (sep == NULL) 1709 continue; 1710 1711 hex2u64(sep + 1, &start); 1712 1713 sep = strchr(line, ' '); 1714 if (sep == NULL) 1715 continue; 1716 1717 *sep = '\0'; 1718 1719 snprintf(name, sizeof(name), "[%s]", line); 1720 map = machine__new_module(self, start, name); 1721 if (map == NULL) 1722 goto out_delete_line; 1723 dso__kernel_module_get_build_id(map->dso, self->root_dir); 1724 } 1725 1726 free(line); 1727 fclose(file); 1728 1729 return machine__set_modules_path(self); 1730 1731 out_delete_line: 1732 free(line); 1733 out_failure: 1734 return -1; 1735 } 1736 1737 static int dso__load_vmlinux(struct dso *self, struct map *map, 1738 const char *vmlinux, symbol_filter_t filter) 1739 { 1740 int err = -1, fd; 1741 1742 fd = open(vmlinux, O_RDONLY); 1743 if (fd < 0) 1744 return -1; 1745 1746 dso__set_loaded(self, map->type); 1747 err = dso__load_sym(self, map, vmlinux, fd, filter, 0, 0); 1748 close(fd); 1749 1750 if (err > 0) 1751 pr_debug("Using %s for symbols\n", vmlinux); 1752 1753 return err; 1754 } 1755 1756 int dso__load_vmlinux_path(struct dso *self, struct map *map, 1757 symbol_filter_t filter) 1758 { 1759 int i, err = 0; 1760 char *filename; 1761 1762 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1763 vmlinux_path__nr_entries + 1); 1764 1765 filename = dso__build_id_filename(self, NULL, 0); 1766 if (filename != NULL) { 1767 err = dso__load_vmlinux(self, map, filename, filter); 1768 if (err > 0) { 1769 dso__set_long_name(self, filename); 1770 goto out; 1771 } 1772 free(filename); 1773 } 1774 1775 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1776 err = dso__load_vmlinux(self, map, vmlinux_path[i], filter); 1777 if (err > 0) { 1778 dso__set_long_name(self, strdup(vmlinux_path[i])); 1779 break; 1780 } 1781 } 1782 out: 1783 return err; 1784 } 1785 1786 static int dso__load_kernel_sym(struct dso *self, struct map *map, 1787 symbol_filter_t filter) 1788 { 1789 int err; 1790 const char *kallsyms_filename = NULL; 1791 char *kallsyms_allocated_filename = NULL; 1792 /* 1793 * Step 1: if the user specified a vmlinux filename, use it and only 1794 * it, reporting errors to the user if it cannot be used. 1795 * 1796 * For instance, try to analyse an ARM perf.data file _without_ a 1797 * build-id, or if the user specifies the wrong path to the right 1798 * vmlinux file, obviously we can't fallback to another vmlinux (a 1799 * x86_86 one, on the machine where analysis is being performed, say), 1800 * or worse, /proc/kallsyms. 1801 * 1802 * If the specified file _has_ a build-id and there is a build-id 1803 * section in the perf.data file, we will still do the expected 1804 * validation in dso__load_vmlinux and will bail out if they don't 1805 * match. 1806 */ 1807 if (symbol_conf.vmlinux_name != NULL) { 1808 err = dso__load_vmlinux(self, map, 1809 symbol_conf.vmlinux_name, filter); 1810 if (err > 0) { 1811 dso__set_long_name(self, 1812 strdup(symbol_conf.vmlinux_name)); 1813 goto out_fixup; 1814 } 1815 return err; 1816 } 1817 1818 if (vmlinux_path != NULL) { 1819 err = dso__load_vmlinux_path(self, map, filter); 1820 if (err > 0) 1821 goto out_fixup; 1822 } 1823 1824 /* 1825 * Say the kernel DSO was created when processing the build-id header table, 1826 * we have a build-id, so check if it is the same as the running kernel, 1827 * using it if it is. 1828 */ 1829 if (self->has_build_id) { 1830 u8 kallsyms_build_id[BUILD_ID_SIZE]; 1831 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 1832 1833 if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, 1834 sizeof(kallsyms_build_id)) == 0) { 1835 if (dso__build_id_equal(self, kallsyms_build_id)) { 1836 kallsyms_filename = "/proc/kallsyms"; 1837 goto do_kallsyms; 1838 } 1839 } 1840 /* 1841 * Now look if we have it on the build-id cache in 1842 * $HOME/.debug/[kernel.kallsyms]. 1843 */ 1844 build_id__sprintf(self->build_id, sizeof(self->build_id), 1845 sbuild_id); 1846 1847 if (asprintf(&kallsyms_allocated_filename, 1848 "%s/.debug/[kernel.kallsyms]/%s", 1849 getenv("HOME"), sbuild_id) == -1) { 1850 pr_err("Not enough memory for kallsyms file lookup\n"); 1851 return -1; 1852 } 1853 1854 kallsyms_filename = kallsyms_allocated_filename; 1855 1856 if (access(kallsyms_filename, F_OK)) { 1857 pr_err("No kallsyms or vmlinux with build-id %s " 1858 "was found\n", sbuild_id); 1859 free(kallsyms_allocated_filename); 1860 return -1; 1861 } 1862 } else { 1863 /* 1864 * Last resort, if we don't have a build-id and couldn't find 1865 * any vmlinux file, try the running kernel kallsyms table. 1866 */ 1867 kallsyms_filename = "/proc/kallsyms"; 1868 } 1869 1870 do_kallsyms: 1871 err = dso__load_kallsyms(self, kallsyms_filename, map, filter); 1872 if (err > 0) 1873 pr_debug("Using %s for symbols\n", kallsyms_filename); 1874 free(kallsyms_allocated_filename); 1875 1876 if (err > 0) { 1877 out_fixup: 1878 if (kallsyms_filename != NULL) 1879 dso__set_long_name(self, strdup("[kernel.kallsyms]")); 1880 map__fixup_start(map); 1881 map__fixup_end(map); 1882 } 1883 1884 return err; 1885 } 1886 1887 static int dso__load_guest_kernel_sym(struct dso *self, struct map *map, 1888 symbol_filter_t filter) 1889 { 1890 int err; 1891 const char *kallsyms_filename = NULL; 1892 struct machine *machine; 1893 char path[PATH_MAX]; 1894 1895 if (!map->groups) { 1896 pr_debug("Guest kernel map hasn't the point to groups\n"); 1897 return -1; 1898 } 1899 machine = map->groups->machine; 1900 1901 if (machine__is_default_guest(machine)) { 1902 /* 1903 * if the user specified a vmlinux filename, use it and only 1904 * it, reporting errors to the user if it cannot be used. 1905 * Or use file guest_kallsyms inputted by user on commandline 1906 */ 1907 if (symbol_conf.default_guest_vmlinux_name != NULL) { 1908 err = dso__load_vmlinux(self, map, 1909 symbol_conf.default_guest_vmlinux_name, filter); 1910 goto out_try_fixup; 1911 } 1912 1913 kallsyms_filename = symbol_conf.default_guest_kallsyms; 1914 if (!kallsyms_filename) 1915 return -1; 1916 } else { 1917 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 1918 kallsyms_filename = path; 1919 } 1920 1921 err = dso__load_kallsyms(self, kallsyms_filename, map, filter); 1922 if (err > 0) 1923 pr_debug("Using %s for symbols\n", kallsyms_filename); 1924 1925 out_try_fixup: 1926 if (err > 0) { 1927 if (kallsyms_filename != NULL) { 1928 machine__mmap_name(machine, path, sizeof(path)); 1929 dso__set_long_name(self, strdup(path)); 1930 } 1931 map__fixup_start(map); 1932 map__fixup_end(map); 1933 } 1934 1935 return err; 1936 } 1937 1938 static void dsos__add(struct list_head *head, struct dso *dso) 1939 { 1940 list_add_tail(&dso->node, head); 1941 } 1942 1943 static struct dso *dsos__find(struct list_head *head, const char *name) 1944 { 1945 struct dso *pos; 1946 1947 list_for_each_entry(pos, head, node) 1948 if (strcmp(pos->long_name, name) == 0) 1949 return pos; 1950 return NULL; 1951 } 1952 1953 struct dso *__dsos__findnew(struct list_head *head, const char *name) 1954 { 1955 struct dso *dso = dsos__find(head, name); 1956 1957 if (!dso) { 1958 dso = dso__new(name); 1959 if (dso != NULL) { 1960 dsos__add(head, dso); 1961 dso__set_basename(dso); 1962 } 1963 } 1964 1965 return dso; 1966 } 1967 1968 size_t __dsos__fprintf(struct list_head *head, FILE *fp) 1969 { 1970 struct dso *pos; 1971 size_t ret = 0; 1972 1973 list_for_each_entry(pos, head, node) { 1974 int i; 1975 for (i = 0; i < MAP__NR_TYPES; ++i) 1976 ret += dso__fprintf(pos, i, fp); 1977 } 1978 1979 return ret; 1980 } 1981 1982 size_t machines__fprintf_dsos(struct rb_root *self, FILE *fp) 1983 { 1984 struct rb_node *nd; 1985 size_t ret = 0; 1986 1987 for (nd = rb_first(self); nd; nd = rb_next(nd)) { 1988 struct machine *pos = rb_entry(nd, struct machine, rb_node); 1989 ret += __dsos__fprintf(&pos->kernel_dsos, fp); 1990 ret += __dsos__fprintf(&pos->user_dsos, fp); 1991 } 1992 1993 return ret; 1994 } 1995 1996 static size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp, 1997 bool with_hits) 1998 { 1999 struct dso *pos; 2000 size_t ret = 0; 2001 2002 list_for_each_entry(pos, head, node) { 2003 if (with_hits && !pos->hit) 2004 continue; 2005 ret += dso__fprintf_buildid(pos, fp); 2006 ret += fprintf(fp, " %s\n", pos->long_name); 2007 } 2008 return ret; 2009 } 2010 2011 size_t machine__fprintf_dsos_buildid(struct machine *self, FILE *fp, bool with_hits) 2012 { 2013 return __dsos__fprintf_buildid(&self->kernel_dsos, fp, with_hits) + 2014 __dsos__fprintf_buildid(&self->user_dsos, fp, with_hits); 2015 } 2016 2017 size_t machines__fprintf_dsos_buildid(struct rb_root *self, FILE *fp, bool with_hits) 2018 { 2019 struct rb_node *nd; 2020 size_t ret = 0; 2021 2022 for (nd = rb_first(self); nd; nd = rb_next(nd)) { 2023 struct machine *pos = rb_entry(nd, struct machine, rb_node); 2024 ret += machine__fprintf_dsos_buildid(pos, fp, with_hits); 2025 } 2026 return ret; 2027 } 2028 2029 struct dso *dso__new_kernel(const char *name) 2030 { 2031 struct dso *self = dso__new(name ?: "[kernel.kallsyms]"); 2032 2033 if (self != NULL) { 2034 dso__set_short_name(self, "[kernel]"); 2035 self->kernel = DSO_TYPE_KERNEL; 2036 } 2037 2038 return self; 2039 } 2040 2041 static struct dso *dso__new_guest_kernel(struct machine *machine, 2042 const char *name) 2043 { 2044 char bf[PATH_MAX]; 2045 struct dso *self = dso__new(name ?: machine__mmap_name(machine, bf, sizeof(bf))); 2046 2047 if (self != NULL) { 2048 dso__set_short_name(self, "[guest.kernel]"); 2049 self->kernel = DSO_TYPE_GUEST_KERNEL; 2050 } 2051 2052 return self; 2053 } 2054 2055 void dso__read_running_kernel_build_id(struct dso *self, struct machine *machine) 2056 { 2057 char path[PATH_MAX]; 2058 2059 if (machine__is_default_guest(machine)) 2060 return; 2061 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 2062 if (sysfs__read_build_id(path, self->build_id, 2063 sizeof(self->build_id)) == 0) 2064 self->has_build_id = true; 2065 } 2066 2067 static struct dso *machine__create_kernel(struct machine *self) 2068 { 2069 const char *vmlinux_name = NULL; 2070 struct dso *kernel; 2071 2072 if (machine__is_host(self)) { 2073 vmlinux_name = symbol_conf.vmlinux_name; 2074 kernel = dso__new_kernel(vmlinux_name); 2075 } else { 2076 if (machine__is_default_guest(self)) 2077 vmlinux_name = symbol_conf.default_guest_vmlinux_name; 2078 kernel = dso__new_guest_kernel(self, vmlinux_name); 2079 } 2080 2081 if (kernel != NULL) { 2082 dso__read_running_kernel_build_id(kernel, self); 2083 dsos__add(&self->kernel_dsos, kernel); 2084 } 2085 return kernel; 2086 } 2087 2088 int __machine__create_kernel_maps(struct machine *self, struct dso *kernel) 2089 { 2090 enum map_type type; 2091 2092 for (type = 0; type < MAP__NR_TYPES; ++type) { 2093 struct kmap *kmap; 2094 2095 self->vmlinux_maps[type] = map__new2(0, kernel, type); 2096 if (self->vmlinux_maps[type] == NULL) 2097 return -1; 2098 2099 self->vmlinux_maps[type]->map_ip = 2100 self->vmlinux_maps[type]->unmap_ip = identity__map_ip; 2101 2102 kmap = map__kmap(self->vmlinux_maps[type]); 2103 kmap->kmaps = &self->kmaps; 2104 map_groups__insert(&self->kmaps, self->vmlinux_maps[type]); 2105 } 2106 2107 return 0; 2108 } 2109 2110 void machine__destroy_kernel_maps(struct machine *self) 2111 { 2112 enum map_type type; 2113 2114 for (type = 0; type < MAP__NR_TYPES; ++type) { 2115 struct kmap *kmap; 2116 2117 if (self->vmlinux_maps[type] == NULL) 2118 continue; 2119 2120 kmap = map__kmap(self->vmlinux_maps[type]); 2121 map_groups__remove(&self->kmaps, self->vmlinux_maps[type]); 2122 if (kmap->ref_reloc_sym) { 2123 /* 2124 * ref_reloc_sym is shared among all maps, so free just 2125 * on one of them. 2126 */ 2127 if (type == MAP__FUNCTION) { 2128 free((char *)kmap->ref_reloc_sym->name); 2129 kmap->ref_reloc_sym->name = NULL; 2130 free(kmap->ref_reloc_sym); 2131 } 2132 kmap->ref_reloc_sym = NULL; 2133 } 2134 2135 map__delete(self->vmlinux_maps[type]); 2136 self->vmlinux_maps[type] = NULL; 2137 } 2138 } 2139 2140 int machine__create_kernel_maps(struct machine *self) 2141 { 2142 struct dso *kernel = machine__create_kernel(self); 2143 2144 if (kernel == NULL || 2145 __machine__create_kernel_maps(self, kernel) < 0) 2146 return -1; 2147 2148 if (symbol_conf.use_modules && machine__create_modules(self) < 0) 2149 pr_debug("Problems creating module maps, continuing anyway...\n"); 2150 /* 2151 * Now that we have all the maps created, just set the ->end of them: 2152 */ 2153 map_groups__fixup_end(&self->kmaps); 2154 return 0; 2155 } 2156 2157 static void vmlinux_path__exit(void) 2158 { 2159 while (--vmlinux_path__nr_entries >= 0) { 2160 free(vmlinux_path[vmlinux_path__nr_entries]); 2161 vmlinux_path[vmlinux_path__nr_entries] = NULL; 2162 } 2163 2164 free(vmlinux_path); 2165 vmlinux_path = NULL; 2166 } 2167 2168 static int vmlinux_path__init(void) 2169 { 2170 struct utsname uts; 2171 char bf[PATH_MAX]; 2172 2173 if (uname(&uts) < 0) 2174 return -1; 2175 2176 vmlinux_path = malloc(sizeof(char *) * 5); 2177 if (vmlinux_path == NULL) 2178 return -1; 2179 2180 vmlinux_path[vmlinux_path__nr_entries] = strdup("vmlinux"); 2181 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2182 goto out_fail; 2183 ++vmlinux_path__nr_entries; 2184 vmlinux_path[vmlinux_path__nr_entries] = strdup("/boot/vmlinux"); 2185 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2186 goto out_fail; 2187 ++vmlinux_path__nr_entries; 2188 snprintf(bf, sizeof(bf), "/boot/vmlinux-%s", uts.release); 2189 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 2190 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2191 goto out_fail; 2192 ++vmlinux_path__nr_entries; 2193 snprintf(bf, sizeof(bf), "/lib/modules/%s/build/vmlinux", uts.release); 2194 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 2195 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2196 goto out_fail; 2197 ++vmlinux_path__nr_entries; 2198 snprintf(bf, sizeof(bf), "/usr/lib/debug/lib/modules/%s/vmlinux", 2199 uts.release); 2200 vmlinux_path[vmlinux_path__nr_entries] = strdup(bf); 2201 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2202 goto out_fail; 2203 ++vmlinux_path__nr_entries; 2204 2205 return 0; 2206 2207 out_fail: 2208 vmlinux_path__exit(); 2209 return -1; 2210 } 2211 2212 size_t machine__fprintf_vmlinux_path(struct machine *self, FILE *fp) 2213 { 2214 int i; 2215 size_t printed = 0; 2216 struct dso *kdso = self->vmlinux_maps[MAP__FUNCTION]->dso; 2217 2218 if (kdso->has_build_id) { 2219 char filename[PATH_MAX]; 2220 if (dso__build_id_filename(kdso, filename, sizeof(filename))) 2221 printed += fprintf(fp, "[0] %s\n", filename); 2222 } 2223 2224 for (i = 0; i < vmlinux_path__nr_entries; ++i) 2225 printed += fprintf(fp, "[%d] %s\n", 2226 i + kdso->has_build_id, vmlinux_path[i]); 2227 2228 return printed; 2229 } 2230 2231 static int setup_list(struct strlist **list, const char *list_str, 2232 const char *list_name) 2233 { 2234 if (list_str == NULL) 2235 return 0; 2236 2237 *list = strlist__new(true, list_str); 2238 if (!*list) { 2239 pr_err("problems parsing %s list\n", list_name); 2240 return -1; 2241 } 2242 return 0; 2243 } 2244 2245 int symbol__init(void) 2246 { 2247 elf_version(EV_CURRENT); 2248 if (symbol_conf.sort_by_name) 2249 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 2250 sizeof(struct symbol)); 2251 2252 if (symbol_conf.try_vmlinux_path && vmlinux_path__init() < 0) 2253 return -1; 2254 2255 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 2256 pr_err("'.' is the only non valid --field-separator argument\n"); 2257 return -1; 2258 } 2259 2260 if (setup_list(&symbol_conf.dso_list, 2261 symbol_conf.dso_list_str, "dso") < 0) 2262 return -1; 2263 2264 if (setup_list(&symbol_conf.comm_list, 2265 symbol_conf.comm_list_str, "comm") < 0) 2266 goto out_free_dso_list; 2267 2268 if (setup_list(&symbol_conf.sym_list, 2269 symbol_conf.sym_list_str, "symbol") < 0) 2270 goto out_free_comm_list; 2271 2272 return 0; 2273 2274 out_free_dso_list: 2275 strlist__delete(symbol_conf.dso_list); 2276 out_free_comm_list: 2277 strlist__delete(symbol_conf.comm_list); 2278 return -1; 2279 } 2280 2281 void symbol__exit(void) 2282 { 2283 strlist__delete(symbol_conf.sym_list); 2284 strlist__delete(symbol_conf.dso_list); 2285 strlist__delete(symbol_conf.comm_list); 2286 vmlinux_path__exit(); 2287 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2288 } 2289 2290 int machines__create_kernel_maps(struct rb_root *self, pid_t pid) 2291 { 2292 struct machine *machine = machines__findnew(self, pid); 2293 2294 if (machine == NULL) 2295 return -1; 2296 2297 return machine__create_kernel_maps(machine); 2298 } 2299 2300 static int hex(char ch) 2301 { 2302 if ((ch >= '0') && (ch <= '9')) 2303 return ch - '0'; 2304 if ((ch >= 'a') && (ch <= 'f')) 2305 return ch - 'a' + 10; 2306 if ((ch >= 'A') && (ch <= 'F')) 2307 return ch - 'A' + 10; 2308 return -1; 2309 } 2310 2311 /* 2312 * While we find nice hex chars, build a long_val. 2313 * Return number of chars processed. 2314 */ 2315 int hex2u64(const char *ptr, u64 *long_val) 2316 { 2317 const char *p = ptr; 2318 *long_val = 0; 2319 2320 while (*p) { 2321 const int hex_val = hex(*p); 2322 2323 if (hex_val < 0) 2324 break; 2325 2326 *long_val = (*long_val << 4) | hex_val; 2327 p++; 2328 } 2329 2330 return p - ptr; 2331 } 2332 2333 char *strxfrchar(char *s, char from, char to) 2334 { 2335 char *p = s; 2336 2337 while ((p = strchr(p, from)) != NULL) 2338 *p++ = to; 2339 2340 return s; 2341 } 2342 2343 int machines__create_guest_kernel_maps(struct rb_root *self) 2344 { 2345 int ret = 0; 2346 struct dirent **namelist = NULL; 2347 int i, items = 0; 2348 char path[PATH_MAX]; 2349 pid_t pid; 2350 2351 if (symbol_conf.default_guest_vmlinux_name || 2352 symbol_conf.default_guest_modules || 2353 symbol_conf.default_guest_kallsyms) { 2354 machines__create_kernel_maps(self, DEFAULT_GUEST_KERNEL_ID); 2355 } 2356 2357 if (symbol_conf.guestmount) { 2358 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL); 2359 if (items <= 0) 2360 return -ENOENT; 2361 for (i = 0; i < items; i++) { 2362 if (!isdigit(namelist[i]->d_name[0])) { 2363 /* Filter out . and .. */ 2364 continue; 2365 } 2366 pid = atoi(namelist[i]->d_name); 2367 sprintf(path, "%s/%s/proc/kallsyms", 2368 symbol_conf.guestmount, 2369 namelist[i]->d_name); 2370 ret = access(path, R_OK); 2371 if (ret) { 2372 pr_debug("Can't access file %s\n", path); 2373 goto failure; 2374 } 2375 machines__create_kernel_maps(self, pid); 2376 } 2377 failure: 2378 free(namelist); 2379 } 2380 2381 return ret; 2382 } 2383 2384 void machines__destroy_guest_kernel_maps(struct rb_root *self) 2385 { 2386 struct rb_node *next = rb_first(self); 2387 2388 while (next) { 2389 struct machine *pos = rb_entry(next, struct machine, rb_node); 2390 2391 next = rb_next(&pos->rb_node); 2392 rb_erase(&pos->rb_node, self); 2393 machine__delete(pos); 2394 } 2395 } 2396 2397 int machine__load_kallsyms(struct machine *self, const char *filename, 2398 enum map_type type, symbol_filter_t filter) 2399 { 2400 struct map *map = self->vmlinux_maps[type]; 2401 int ret = dso__load_kallsyms(map->dso, filename, map, filter); 2402 2403 if (ret > 0) { 2404 dso__set_loaded(map->dso, type); 2405 /* 2406 * Since /proc/kallsyms will have multiple sessions for the 2407 * kernel, with modules between them, fixup the end of all 2408 * sections. 2409 */ 2410 __map_groups__fixup_end(&self->kmaps, type); 2411 } 2412 2413 return ret; 2414 } 2415 2416 int machine__load_vmlinux_path(struct machine *self, enum map_type type, 2417 symbol_filter_t filter) 2418 { 2419 struct map *map = self->vmlinux_maps[type]; 2420 int ret = dso__load_vmlinux_path(map->dso, map, filter); 2421 2422 if (ret > 0) { 2423 dso__set_loaded(map->dso, type); 2424 map__reloc_vmlinux(map); 2425 } 2426 2427 return ret; 2428 } 2429