1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <stdlib.h> 5 #include <stdio.h> 6 #include <string.h> 7 #include <linux/capability.h> 8 #include <linux/kernel.h> 9 #include <linux/mman.h> 10 #include <linux/string.h> 11 #include <linux/time64.h> 12 #include <sys/types.h> 13 #include <sys/stat.h> 14 #include <sys/param.h> 15 #include <fcntl.h> 16 #include <unistd.h> 17 #include <inttypes.h> 18 #include "annotate.h" 19 #include "build-id.h" 20 #include "cap.h" 21 #include "dso.h" 22 #include "util.h" // lsdir() 23 #include "debug.h" 24 #include "event.h" 25 #include "machine.h" 26 #include "map.h" 27 #include "symbol.h" 28 #include "map_symbol.h" 29 #include "mem-events.h" 30 #include "symsrc.h" 31 #include "strlist.h" 32 #include "intlist.h" 33 #include "namespaces.h" 34 #include "header.h" 35 #include "path.h" 36 #include <linux/ctype.h> 37 #include <linux/zalloc.h> 38 39 #include <elf.h> 40 #include <limits.h> 41 #include <symbol/kallsyms.h> 42 #include <sys/utsname.h> 43 44 static int dso__load_kernel_sym(struct dso *dso, struct map *map); 45 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map); 46 static bool symbol__is_idle(const char *name); 47 48 int vmlinux_path__nr_entries; 49 char **vmlinux_path; 50 51 struct map_list_node { 52 struct list_head node; 53 struct map *map; 54 }; 55 56 struct symbol_conf symbol_conf = { 57 .nanosecs = false, 58 .use_modules = true, 59 .try_vmlinux_path = true, 60 .demangle = true, 61 .demangle_kernel = false, 62 .cumulate_callchain = true, 63 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */ 64 .show_hist_headers = true, 65 .symfs = "", 66 .event_group = true, 67 .inline_name = true, 68 .res_sample = 0, 69 }; 70 71 static enum dso_binary_type binary_type_symtab[] = { 72 DSO_BINARY_TYPE__KALLSYMS, 73 DSO_BINARY_TYPE__GUEST_KALLSYMS, 74 DSO_BINARY_TYPE__JAVA_JIT, 75 DSO_BINARY_TYPE__DEBUGLINK, 76 DSO_BINARY_TYPE__BUILD_ID_CACHE, 77 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, 78 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 79 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 80 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 81 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 82 DSO_BINARY_TYPE__GUEST_KMODULE, 83 DSO_BINARY_TYPE__GUEST_KMODULE_COMP, 84 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 85 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, 86 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 87 DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, 88 DSO_BINARY_TYPE__NOT_FOUND, 89 }; 90 91 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 92 93 static struct map_list_node *map_list_node__new(void) 94 { 95 return malloc(sizeof(struct map_list_node)); 96 } 97 98 static bool symbol_type__filter(char symbol_type) 99 { 100 symbol_type = toupper(symbol_type); 101 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B'; 102 } 103 104 static int prefix_underscores_count(const char *str) 105 { 106 const char *tail = str; 107 108 while (*tail == '_') 109 tail++; 110 111 return tail - str; 112 } 113 114 const char * __weak arch__normalize_symbol_name(const char *name) 115 { 116 return name; 117 } 118 119 int __weak arch__compare_symbol_names(const char *namea, const char *nameb) 120 { 121 return strcmp(namea, nameb); 122 } 123 124 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb, 125 unsigned int n) 126 { 127 return strncmp(namea, nameb, n); 128 } 129 130 int __weak arch__choose_best_symbol(struct symbol *syma, 131 struct symbol *symb __maybe_unused) 132 { 133 /* Avoid "SyS" kernel syscall aliases */ 134 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3)) 135 return SYMBOL_B; 136 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 137 return SYMBOL_B; 138 139 return SYMBOL_A; 140 } 141 142 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 143 { 144 s64 a; 145 s64 b; 146 size_t na, nb; 147 148 /* Prefer a symbol with non zero length */ 149 a = syma->end - syma->start; 150 b = symb->end - symb->start; 151 if ((b == 0) && (a > 0)) 152 return SYMBOL_A; 153 else if ((a == 0) && (b > 0)) 154 return SYMBOL_B; 155 156 /* Prefer a non weak symbol over a weak one */ 157 a = syma->binding == STB_WEAK; 158 b = symb->binding == STB_WEAK; 159 if (b && !a) 160 return SYMBOL_A; 161 if (a && !b) 162 return SYMBOL_B; 163 164 /* Prefer a global symbol over a non global one */ 165 a = syma->binding == STB_GLOBAL; 166 b = symb->binding == STB_GLOBAL; 167 if (a && !b) 168 return SYMBOL_A; 169 if (b && !a) 170 return SYMBOL_B; 171 172 /* Prefer a symbol with less underscores */ 173 a = prefix_underscores_count(syma->name); 174 b = prefix_underscores_count(symb->name); 175 if (b > a) 176 return SYMBOL_A; 177 else if (a > b) 178 return SYMBOL_B; 179 180 /* Choose the symbol with the longest name */ 181 na = strlen(syma->name); 182 nb = strlen(symb->name); 183 if (na > nb) 184 return SYMBOL_A; 185 else if (na < nb) 186 return SYMBOL_B; 187 188 return arch__choose_best_symbol(syma, symb); 189 } 190 191 void symbols__fixup_duplicate(struct rb_root_cached *symbols) 192 { 193 struct rb_node *nd; 194 struct symbol *curr, *next; 195 196 if (symbol_conf.allow_aliases) 197 return; 198 199 nd = rb_first_cached(symbols); 200 201 while (nd) { 202 curr = rb_entry(nd, struct symbol, rb_node); 203 again: 204 nd = rb_next(&curr->rb_node); 205 next = rb_entry(nd, struct symbol, rb_node); 206 207 if (!nd) 208 break; 209 210 if (curr->start != next->start) 211 continue; 212 213 if (choose_best_symbol(curr, next) == SYMBOL_A) { 214 if (next->type == STT_GNU_IFUNC) 215 curr->ifunc_alias = true; 216 rb_erase_cached(&next->rb_node, symbols); 217 symbol__delete(next); 218 goto again; 219 } else { 220 if (curr->type == STT_GNU_IFUNC) 221 next->ifunc_alias = true; 222 nd = rb_next(&curr->rb_node); 223 rb_erase_cached(&curr->rb_node, symbols); 224 symbol__delete(curr); 225 } 226 } 227 } 228 229 /* Update zero-sized symbols using the address of the next symbol */ 230 void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) 231 { 232 struct rb_node *nd, *prevnd = rb_first_cached(symbols); 233 struct symbol *curr, *prev; 234 235 if (prevnd == NULL) 236 return; 237 238 curr = rb_entry(prevnd, struct symbol, rb_node); 239 240 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 241 prev = curr; 242 curr = rb_entry(nd, struct symbol, rb_node); 243 244 /* 245 * On some architecture kernel text segment start is located at 246 * some low memory address, while modules are located at high 247 * memory addresses (or vice versa). The gap between end of 248 * kernel text segment and beginning of first module's text 249 * segment is very big. Therefore do not fill this gap and do 250 * not assign it to the kernel dso map (kallsyms). 251 * 252 * In kallsyms, it determines module symbols using '[' character 253 * like in: 254 * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi] 255 */ 256 if (prev->end == prev->start) { 257 /* Last kernel/module symbol mapped to end of page */ 258 if (is_kallsyms && (!strchr(prev->name, '[') != 259 !strchr(curr->name, '['))) 260 prev->end = roundup(prev->end + 4096, 4096); 261 else 262 prev->end = curr->start; 263 264 pr_debug4("%s sym:%s end:%#" PRIx64 "\n", 265 __func__, prev->name, prev->end); 266 } 267 } 268 269 /* Last entry */ 270 if (curr->end == curr->start) 271 curr->end = roundup(curr->start, 4096) + 4096; 272 } 273 274 void maps__fixup_end(struct maps *maps) 275 { 276 struct map_rb_node *prev = NULL, *curr; 277 278 down_write(maps__lock(maps)); 279 280 maps__for_each_entry(maps, curr) { 281 if (prev != NULL && !prev->map->end) 282 prev->map->end = curr->map->start; 283 284 prev = curr; 285 } 286 287 /* 288 * We still haven't the actual symbols, so guess the 289 * last map final address. 290 */ 291 if (curr && !curr->map->end) 292 curr->map->end = ~0ULL; 293 294 up_write(maps__lock(maps)); 295 } 296 297 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) 298 { 299 size_t namelen = strlen(name) + 1; 300 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 301 sizeof(*sym) + namelen)); 302 if (sym == NULL) 303 return NULL; 304 305 if (symbol_conf.priv_size) { 306 if (symbol_conf.init_annotation) { 307 struct annotation *notes = (void *)sym; 308 annotation__init(notes); 309 } 310 sym = ((void *)sym) + symbol_conf.priv_size; 311 } 312 313 sym->start = start; 314 sym->end = len ? start + len : start; 315 sym->type = type; 316 sym->binding = binding; 317 sym->namelen = namelen - 1; 318 319 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 320 __func__, name, start, sym->end); 321 memcpy(sym->name, name, namelen); 322 323 return sym; 324 } 325 326 void symbol__delete(struct symbol *sym) 327 { 328 if (symbol_conf.priv_size) { 329 if (symbol_conf.init_annotation) { 330 struct annotation *notes = symbol__annotation(sym); 331 332 annotation__exit(notes); 333 } 334 } 335 free(((void *)sym) - symbol_conf.priv_size); 336 } 337 338 void symbols__delete(struct rb_root_cached *symbols) 339 { 340 struct symbol *pos; 341 struct rb_node *next = rb_first_cached(symbols); 342 343 while (next) { 344 pos = rb_entry(next, struct symbol, rb_node); 345 next = rb_next(&pos->rb_node); 346 rb_erase_cached(&pos->rb_node, symbols); 347 symbol__delete(pos); 348 } 349 } 350 351 void __symbols__insert(struct rb_root_cached *symbols, 352 struct symbol *sym, bool kernel) 353 { 354 struct rb_node **p = &symbols->rb_root.rb_node; 355 struct rb_node *parent = NULL; 356 const u64 ip = sym->start; 357 struct symbol *s; 358 bool leftmost = true; 359 360 if (kernel) { 361 const char *name = sym->name; 362 /* 363 * ppc64 uses function descriptors and appends a '.' to the 364 * start of every instruction address. Remove it. 365 */ 366 if (name[0] == '.') 367 name++; 368 sym->idle = symbol__is_idle(name); 369 } 370 371 while (*p != NULL) { 372 parent = *p; 373 s = rb_entry(parent, struct symbol, rb_node); 374 if (ip < s->start) 375 p = &(*p)->rb_left; 376 else { 377 p = &(*p)->rb_right; 378 leftmost = false; 379 } 380 } 381 rb_link_node(&sym->rb_node, parent, p); 382 rb_insert_color_cached(&sym->rb_node, symbols, leftmost); 383 } 384 385 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym) 386 { 387 __symbols__insert(symbols, sym, false); 388 } 389 390 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip) 391 { 392 struct rb_node *n; 393 394 if (symbols == NULL) 395 return NULL; 396 397 n = symbols->rb_root.rb_node; 398 399 while (n) { 400 struct symbol *s = rb_entry(n, struct symbol, rb_node); 401 402 if (ip < s->start) 403 n = n->rb_left; 404 else if (ip > s->end || (ip == s->end && ip != s->start)) 405 n = n->rb_right; 406 else 407 return s; 408 } 409 410 return NULL; 411 } 412 413 static struct symbol *symbols__first(struct rb_root_cached *symbols) 414 { 415 struct rb_node *n = rb_first_cached(symbols); 416 417 if (n) 418 return rb_entry(n, struct symbol, rb_node); 419 420 return NULL; 421 } 422 423 static struct symbol *symbols__last(struct rb_root_cached *symbols) 424 { 425 struct rb_node *n = rb_last(&symbols->rb_root); 426 427 if (n) 428 return rb_entry(n, struct symbol, rb_node); 429 430 return NULL; 431 } 432 433 static struct symbol *symbols__next(struct symbol *sym) 434 { 435 struct rb_node *n = rb_next(&sym->rb_node); 436 437 if (n) 438 return rb_entry(n, struct symbol, rb_node); 439 440 return NULL; 441 } 442 443 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym) 444 { 445 struct rb_node **p = &symbols->rb_root.rb_node; 446 struct rb_node *parent = NULL; 447 struct symbol_name_rb_node *symn, *s; 448 bool leftmost = true; 449 450 symn = container_of(sym, struct symbol_name_rb_node, sym); 451 452 while (*p != NULL) { 453 parent = *p; 454 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 455 if (strcmp(sym->name, s->sym.name) < 0) 456 p = &(*p)->rb_left; 457 else { 458 p = &(*p)->rb_right; 459 leftmost = false; 460 } 461 } 462 rb_link_node(&symn->rb_node, parent, p); 463 rb_insert_color_cached(&symn->rb_node, symbols, leftmost); 464 } 465 466 static void symbols__sort_by_name(struct rb_root_cached *symbols, 467 struct rb_root_cached *source) 468 { 469 struct rb_node *nd; 470 471 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { 472 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 473 symbols__insert_by_name(symbols, pos); 474 } 475 } 476 477 int symbol__match_symbol_name(const char *name, const char *str, 478 enum symbol_tag_include includes) 479 { 480 const char *versioning; 481 482 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY && 483 (versioning = strstr(name, "@@"))) { 484 int len = strlen(str); 485 486 if (len < versioning - name) 487 len = versioning - name; 488 489 return arch__compare_symbol_names_n(name, str, len); 490 } else 491 return arch__compare_symbol_names(name, str); 492 } 493 494 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols, 495 const char *name, 496 enum symbol_tag_include includes) 497 { 498 struct rb_node *n; 499 struct symbol_name_rb_node *s = NULL; 500 501 if (symbols == NULL) 502 return NULL; 503 504 n = symbols->rb_root.rb_node; 505 506 while (n) { 507 int cmp; 508 509 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 510 cmp = symbol__match_symbol_name(s->sym.name, name, includes); 511 512 if (cmp > 0) 513 n = n->rb_left; 514 else if (cmp < 0) 515 n = n->rb_right; 516 else 517 break; 518 } 519 520 if (n == NULL) 521 return NULL; 522 523 if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) 524 /* return first symbol that has same name (if any) */ 525 for (n = rb_prev(n); n; n = rb_prev(n)) { 526 struct symbol_name_rb_node *tmp; 527 528 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); 529 if (arch__compare_symbol_names(tmp->sym.name, s->sym.name)) 530 break; 531 532 s = tmp; 533 } 534 535 return &s->sym; 536 } 537 538 void dso__reset_find_symbol_cache(struct dso *dso) 539 { 540 dso->last_find_result.addr = 0; 541 dso->last_find_result.symbol = NULL; 542 } 543 544 void dso__insert_symbol(struct dso *dso, struct symbol *sym) 545 { 546 __symbols__insert(&dso->symbols, sym, dso->kernel); 547 548 /* update the symbol cache if necessary */ 549 if (dso->last_find_result.addr >= sym->start && 550 (dso->last_find_result.addr < sym->end || 551 sym->start == sym->end)) { 552 dso->last_find_result.symbol = sym; 553 } 554 } 555 556 void dso__delete_symbol(struct dso *dso, struct symbol *sym) 557 { 558 rb_erase_cached(&sym->rb_node, &dso->symbols); 559 symbol__delete(sym); 560 dso__reset_find_symbol_cache(dso); 561 } 562 563 struct symbol *dso__find_symbol(struct dso *dso, u64 addr) 564 { 565 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) { 566 dso->last_find_result.addr = addr; 567 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr); 568 } 569 570 return dso->last_find_result.symbol; 571 } 572 573 struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr) 574 { 575 return symbols__find(&dso->symbols, addr); 576 } 577 578 struct symbol *dso__first_symbol(struct dso *dso) 579 { 580 return symbols__first(&dso->symbols); 581 } 582 583 struct symbol *dso__last_symbol(struct dso *dso) 584 { 585 return symbols__last(&dso->symbols); 586 } 587 588 struct symbol *dso__next_symbol(struct symbol *sym) 589 { 590 return symbols__next(sym); 591 } 592 593 struct symbol *symbol__next_by_name(struct symbol *sym) 594 { 595 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); 596 struct rb_node *n = rb_next(&s->rb_node); 597 598 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; 599 } 600 601 /* 602 * Returns first symbol that matched with @name. 603 */ 604 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name) 605 { 606 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name, 607 SYMBOL_TAG_INCLUDE__NONE); 608 if (!s) 609 s = symbols__find_by_name(&dso->symbol_names, name, 610 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY); 611 return s; 612 } 613 614 void dso__sort_by_name(struct dso *dso) 615 { 616 dso__set_sorted_by_name(dso); 617 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols); 618 } 619 620 /* 621 * While we find nice hex chars, build a long_val. 622 * Return number of chars processed. 623 */ 624 static int hex2u64(const char *ptr, u64 *long_val) 625 { 626 char *p; 627 628 *long_val = strtoull(ptr, &p, 16); 629 630 return p - ptr; 631 } 632 633 634 int modules__parse(const char *filename, void *arg, 635 int (*process_module)(void *arg, const char *name, 636 u64 start, u64 size)) 637 { 638 char *line = NULL; 639 size_t n; 640 FILE *file; 641 int err = 0; 642 643 file = fopen(filename, "r"); 644 if (file == NULL) 645 return -1; 646 647 while (1) { 648 char name[PATH_MAX]; 649 u64 start, size; 650 char *sep, *endptr; 651 ssize_t line_len; 652 653 line_len = getline(&line, &n, file); 654 if (line_len < 0) { 655 if (feof(file)) 656 break; 657 err = -1; 658 goto out; 659 } 660 661 if (!line) { 662 err = -1; 663 goto out; 664 } 665 666 line[--line_len] = '\0'; /* \n */ 667 668 sep = strrchr(line, 'x'); 669 if (sep == NULL) 670 continue; 671 672 hex2u64(sep + 1, &start); 673 674 sep = strchr(line, ' '); 675 if (sep == NULL) 676 continue; 677 678 *sep = '\0'; 679 680 scnprintf(name, sizeof(name), "[%s]", line); 681 682 size = strtoul(sep + 1, &endptr, 0); 683 if (*endptr != ' ' && *endptr != '\t') 684 continue; 685 686 err = process_module(arg, name, start, size); 687 if (err) 688 break; 689 } 690 out: 691 free(line); 692 fclose(file); 693 return err; 694 } 695 696 /* 697 * These are symbols in the kernel image, so make sure that 698 * sym is from a kernel DSO. 699 */ 700 static bool symbol__is_idle(const char *name) 701 { 702 const char * const idle_symbols[] = { 703 "acpi_idle_do_entry", 704 "acpi_processor_ffh_cstate_enter", 705 "arch_cpu_idle", 706 "cpu_idle", 707 "cpu_startup_entry", 708 "idle_cpu", 709 "intel_idle", 710 "default_idle", 711 "native_safe_halt", 712 "enter_idle", 713 "exit_idle", 714 "mwait_idle", 715 "mwait_idle_with_hints", 716 "mwait_idle_with_hints.constprop.0", 717 "poll_idle", 718 "ppc64_runlatch_off", 719 "pseries_dedicated_idle_sleep", 720 "psw_idle", 721 "psw_idle_exit", 722 NULL 723 }; 724 int i; 725 static struct strlist *idle_symbols_list; 726 727 if (idle_symbols_list) 728 return strlist__has_entry(idle_symbols_list, name); 729 730 idle_symbols_list = strlist__new(NULL, NULL); 731 732 for (i = 0; idle_symbols[i]; i++) 733 strlist__add(idle_symbols_list, idle_symbols[i]); 734 735 return strlist__has_entry(idle_symbols_list, name); 736 } 737 738 static int map__process_kallsym_symbol(void *arg, const char *name, 739 char type, u64 start) 740 { 741 struct symbol *sym; 742 struct dso *dso = arg; 743 struct rb_root_cached *root = &dso->symbols; 744 745 if (!symbol_type__filter(type)) 746 return 0; 747 748 /* Ignore local symbols for ARM modules */ 749 if (name[0] == '$') 750 return 0; 751 752 /* 753 * module symbols are not sorted so we add all 754 * symbols, setting length to 0, and rely on 755 * symbols__fixup_end() to fix it up. 756 */ 757 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); 758 if (sym == NULL) 759 return -ENOMEM; 760 /* 761 * We will pass the symbols to the filter later, in 762 * map__split_kallsyms, when we have split the maps per module 763 */ 764 __symbols__insert(root, sym, !strchr(name, '[')); 765 766 return 0; 767 } 768 769 /* 770 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 771 * so that we can in the next step set the symbol ->end address and then 772 * call kernel_maps__split_kallsyms. 773 */ 774 static int dso__load_all_kallsyms(struct dso *dso, const char *filename) 775 { 776 return kallsyms__parse(filename, dso, map__process_kallsym_symbol); 777 } 778 779 static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) 780 { 781 struct map *curr_map; 782 struct symbol *pos; 783 int count = 0; 784 struct rb_root_cached old_root = dso->symbols; 785 struct rb_root_cached *root = &dso->symbols; 786 struct rb_node *next = rb_first_cached(root); 787 788 if (!kmaps) 789 return -1; 790 791 *root = RB_ROOT_CACHED; 792 793 while (next) { 794 struct dso *curr_map_dso; 795 char *module; 796 797 pos = rb_entry(next, struct symbol, rb_node); 798 next = rb_next(&pos->rb_node); 799 800 rb_erase_cached(&pos->rb_node, &old_root); 801 RB_CLEAR_NODE(&pos->rb_node); 802 module = strchr(pos->name, '\t'); 803 if (module) 804 *module = '\0'; 805 806 curr_map = maps__find(kmaps, pos->start); 807 808 if (!curr_map) { 809 symbol__delete(pos); 810 continue; 811 } 812 curr_map_dso = map__dso(curr_map); 813 pos->start -= curr_map->start - curr_map->pgoff; 814 if (pos->end > curr_map->end) 815 pos->end = curr_map->end; 816 if (pos->end) 817 pos->end -= curr_map->start - curr_map->pgoff; 818 symbols__insert(&curr_map_dso->symbols, pos); 819 ++count; 820 } 821 822 /* Symbols have been adjusted */ 823 dso->adjust_symbols = 1; 824 825 return count; 826 } 827 828 /* 829 * Split the symbols into maps, making sure there are no overlaps, i.e. the 830 * kernel range is broken in several maps, named [kernel].N, as we don't have 831 * the original ELF section names vmlinux have. 832 */ 833 static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, 834 struct map *initial_map) 835 { 836 struct machine *machine; 837 struct map *curr_map = initial_map; 838 struct symbol *pos; 839 int count = 0, moved = 0; 840 struct rb_root_cached *root = &dso->symbols; 841 struct rb_node *next = rb_first_cached(root); 842 int kernel_range = 0; 843 bool x86_64; 844 845 if (!kmaps) 846 return -1; 847 848 machine = maps__machine(kmaps); 849 850 x86_64 = machine__is(machine, "x86_64"); 851 852 while (next) { 853 char *module; 854 855 pos = rb_entry(next, struct symbol, rb_node); 856 next = rb_next(&pos->rb_node); 857 858 module = strchr(pos->name, '\t'); 859 if (module) { 860 struct dso *curr_map_dso; 861 862 if (!symbol_conf.use_modules) 863 goto discard_symbol; 864 865 *module++ = '\0'; 866 curr_map_dso = map__dso(curr_map); 867 if (strcmp(curr_map_dso->short_name, module)) { 868 if (curr_map != initial_map && 869 dso->kernel == DSO_SPACE__KERNEL_GUEST && 870 machine__is_default_guest(machine)) { 871 /* 872 * We assume all symbols of a module are 873 * continuous in * kallsyms, so curr_map 874 * points to a module and all its 875 * symbols are in its kmap. Mark it as 876 * loaded. 877 */ 878 dso__set_loaded(curr_map_dso); 879 } 880 881 curr_map = maps__find_by_name(kmaps, module); 882 if (curr_map == NULL) { 883 pr_debug("%s/proc/{kallsyms,modules} " 884 "inconsistency while looking " 885 "for \"%s\" module!\n", 886 machine->root_dir, module); 887 curr_map = initial_map; 888 goto discard_symbol; 889 } 890 curr_map_dso = map__dso(curr_map); 891 if (curr_map_dso->loaded && 892 !machine__is_default_guest(machine)) 893 goto discard_symbol; 894 } 895 /* 896 * So that we look just like we get from .ko files, 897 * i.e. not prelinked, relative to initial_map->start. 898 */ 899 pos->start = curr_map->map_ip(curr_map, pos->start); 900 pos->end = curr_map->map_ip(curr_map, pos->end); 901 } else if (x86_64 && is_entry_trampoline(pos->name)) { 902 /* 903 * These symbols are not needed anymore since the 904 * trampoline maps refer to the text section and it's 905 * symbols instead. Avoid having to deal with 906 * relocations, and the assumption that the first symbol 907 * is the start of kernel text, by simply removing the 908 * symbols at this point. 909 */ 910 goto discard_symbol; 911 } else if (curr_map != initial_map) { 912 char dso_name[PATH_MAX]; 913 struct dso *ndso; 914 915 if (delta) { 916 /* Kernel was relocated at boot time */ 917 pos->start -= delta; 918 pos->end -= delta; 919 } 920 921 if (count == 0) { 922 curr_map = initial_map; 923 goto add_symbol; 924 } 925 926 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 927 snprintf(dso_name, sizeof(dso_name), 928 "[guest.kernel].%d", 929 kernel_range++); 930 else 931 snprintf(dso_name, sizeof(dso_name), 932 "[kernel].%d", 933 kernel_range++); 934 935 ndso = dso__new(dso_name); 936 if (ndso == NULL) 937 return -1; 938 939 ndso->kernel = dso->kernel; 940 941 curr_map = map__new2(pos->start, ndso); 942 if (curr_map == NULL) { 943 dso__put(ndso); 944 return -1; 945 } 946 947 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 948 if (maps__insert(kmaps, curr_map)) { 949 dso__put(ndso); 950 return -1; 951 } 952 ++kernel_range; 953 } else if (delta) { 954 /* Kernel was relocated at boot time */ 955 pos->start -= delta; 956 pos->end -= delta; 957 } 958 add_symbol: 959 if (curr_map != initial_map) { 960 struct dso *curr_map_dso = map__dso(curr_map); 961 962 rb_erase_cached(&pos->rb_node, root); 963 symbols__insert(&curr_map_dso->symbols, pos); 964 ++moved; 965 } else 966 ++count; 967 968 continue; 969 discard_symbol: 970 rb_erase_cached(&pos->rb_node, root); 971 symbol__delete(pos); 972 } 973 974 if (curr_map != initial_map && 975 dso->kernel == DSO_SPACE__KERNEL_GUEST && 976 machine__is_default_guest(maps__machine(kmaps))) { 977 dso__set_loaded(map__dso(curr_map)); 978 } 979 980 return count + moved; 981 } 982 983 bool symbol__restricted_filename(const char *filename, 984 const char *restricted_filename) 985 { 986 bool restricted = false; 987 988 if (symbol_conf.kptr_restrict) { 989 char *r = realpath(filename, NULL); 990 991 if (r != NULL) { 992 restricted = strcmp(r, restricted_filename) == 0; 993 free(r); 994 return restricted; 995 } 996 } 997 998 return restricted; 999 } 1000 1001 struct module_info { 1002 struct rb_node rb_node; 1003 char *name; 1004 u64 start; 1005 }; 1006 1007 static void add_module(struct module_info *mi, struct rb_root *modules) 1008 { 1009 struct rb_node **p = &modules->rb_node; 1010 struct rb_node *parent = NULL; 1011 struct module_info *m; 1012 1013 while (*p != NULL) { 1014 parent = *p; 1015 m = rb_entry(parent, struct module_info, rb_node); 1016 if (strcmp(mi->name, m->name) < 0) 1017 p = &(*p)->rb_left; 1018 else 1019 p = &(*p)->rb_right; 1020 } 1021 rb_link_node(&mi->rb_node, parent, p); 1022 rb_insert_color(&mi->rb_node, modules); 1023 } 1024 1025 static void delete_modules(struct rb_root *modules) 1026 { 1027 struct module_info *mi; 1028 struct rb_node *next = rb_first(modules); 1029 1030 while (next) { 1031 mi = rb_entry(next, struct module_info, rb_node); 1032 next = rb_next(&mi->rb_node); 1033 rb_erase(&mi->rb_node, modules); 1034 zfree(&mi->name); 1035 free(mi); 1036 } 1037 } 1038 1039 static struct module_info *find_module(const char *name, 1040 struct rb_root *modules) 1041 { 1042 struct rb_node *n = modules->rb_node; 1043 1044 while (n) { 1045 struct module_info *m; 1046 int cmp; 1047 1048 m = rb_entry(n, struct module_info, rb_node); 1049 cmp = strcmp(name, m->name); 1050 if (cmp < 0) 1051 n = n->rb_left; 1052 else if (cmp > 0) 1053 n = n->rb_right; 1054 else 1055 return m; 1056 } 1057 1058 return NULL; 1059 } 1060 1061 static int __read_proc_modules(void *arg, const char *name, u64 start, 1062 u64 size __maybe_unused) 1063 { 1064 struct rb_root *modules = arg; 1065 struct module_info *mi; 1066 1067 mi = zalloc(sizeof(struct module_info)); 1068 if (!mi) 1069 return -ENOMEM; 1070 1071 mi->name = strdup(name); 1072 mi->start = start; 1073 1074 if (!mi->name) { 1075 free(mi); 1076 return -ENOMEM; 1077 } 1078 1079 add_module(mi, modules); 1080 1081 return 0; 1082 } 1083 1084 static int read_proc_modules(const char *filename, struct rb_root *modules) 1085 { 1086 if (symbol__restricted_filename(filename, "/proc/modules")) 1087 return -1; 1088 1089 if (modules__parse(filename, modules, __read_proc_modules)) { 1090 delete_modules(modules); 1091 return -1; 1092 } 1093 1094 return 0; 1095 } 1096 1097 int compare_proc_modules(const char *from, const char *to) 1098 { 1099 struct rb_root from_modules = RB_ROOT; 1100 struct rb_root to_modules = RB_ROOT; 1101 struct rb_node *from_node, *to_node; 1102 struct module_info *from_m, *to_m; 1103 int ret = -1; 1104 1105 if (read_proc_modules(from, &from_modules)) 1106 return -1; 1107 1108 if (read_proc_modules(to, &to_modules)) 1109 goto out_delete_from; 1110 1111 from_node = rb_first(&from_modules); 1112 to_node = rb_first(&to_modules); 1113 while (from_node) { 1114 if (!to_node) 1115 break; 1116 1117 from_m = rb_entry(from_node, struct module_info, rb_node); 1118 to_m = rb_entry(to_node, struct module_info, rb_node); 1119 1120 if (from_m->start != to_m->start || 1121 strcmp(from_m->name, to_m->name)) 1122 break; 1123 1124 from_node = rb_next(from_node); 1125 to_node = rb_next(to_node); 1126 } 1127 1128 if (!from_node && !to_node) 1129 ret = 0; 1130 1131 delete_modules(&to_modules); 1132 out_delete_from: 1133 delete_modules(&from_modules); 1134 1135 return ret; 1136 } 1137 1138 static int do_validate_kcore_modules(const char *filename, struct maps *kmaps) 1139 { 1140 struct rb_root modules = RB_ROOT; 1141 struct map_rb_node *old_node; 1142 int err; 1143 1144 err = read_proc_modules(filename, &modules); 1145 if (err) 1146 return err; 1147 1148 maps__for_each_entry(kmaps, old_node) { 1149 struct map *old_map = old_node->map; 1150 struct module_info *mi; 1151 struct dso *dso; 1152 1153 if (!__map__is_kmodule(old_map)) { 1154 continue; 1155 } 1156 dso = map__dso(old_map); 1157 /* Module must be in memory at the same address */ 1158 mi = find_module(dso->short_name, &modules); 1159 if (!mi || mi->start != old_map->start) { 1160 err = -EINVAL; 1161 goto out; 1162 } 1163 } 1164 out: 1165 delete_modules(&modules); 1166 return err; 1167 } 1168 1169 /* 1170 * If kallsyms is referenced by name then we look for filename in the same 1171 * directory. 1172 */ 1173 static bool filename_from_kallsyms_filename(char *filename, 1174 const char *base_name, 1175 const char *kallsyms_filename) 1176 { 1177 char *name; 1178 1179 strcpy(filename, kallsyms_filename); 1180 name = strrchr(filename, '/'); 1181 if (!name) 1182 return false; 1183 1184 name += 1; 1185 1186 if (!strcmp(name, "kallsyms")) { 1187 strcpy(name, base_name); 1188 return true; 1189 } 1190 1191 return false; 1192 } 1193 1194 static int validate_kcore_modules(const char *kallsyms_filename, 1195 struct map *map) 1196 { 1197 struct maps *kmaps = map__kmaps(map); 1198 char modules_filename[PATH_MAX]; 1199 1200 if (!kmaps) 1201 return -EINVAL; 1202 1203 if (!filename_from_kallsyms_filename(modules_filename, "modules", 1204 kallsyms_filename)) 1205 return -EINVAL; 1206 1207 if (do_validate_kcore_modules(modules_filename, kmaps)) 1208 return -EINVAL; 1209 1210 return 0; 1211 } 1212 1213 static int validate_kcore_addresses(const char *kallsyms_filename, 1214 struct map *map) 1215 { 1216 struct kmap *kmap = map__kmap(map); 1217 1218 if (!kmap) 1219 return -EINVAL; 1220 1221 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 1222 u64 start; 1223 1224 if (kallsyms__get_function_start(kallsyms_filename, 1225 kmap->ref_reloc_sym->name, &start)) 1226 return -ENOENT; 1227 if (start != kmap->ref_reloc_sym->addr) 1228 return -EINVAL; 1229 } 1230 1231 return validate_kcore_modules(kallsyms_filename, map); 1232 } 1233 1234 struct kcore_mapfn_data { 1235 struct dso *dso; 1236 struct list_head maps; 1237 }; 1238 1239 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1240 { 1241 struct kcore_mapfn_data *md = data; 1242 struct map_list_node *list_node = map_list_node__new(); 1243 1244 if (!list_node) 1245 return -ENOMEM; 1246 1247 list_node->map = map__new2(start, md->dso); 1248 if (!list_node->map) { 1249 free(list_node); 1250 return -ENOMEM; 1251 } 1252 1253 list_node->map->end = list_node->map->start + len; 1254 list_node->map->pgoff = pgoff; 1255 1256 list_add(&list_node->node, &md->maps); 1257 1258 return 0; 1259 } 1260 1261 /* 1262 * Merges map into maps by splitting the new map within the existing map 1263 * regions. 1264 */ 1265 int maps__merge_in(struct maps *kmaps, struct map *new_map) 1266 { 1267 struct map_rb_node *rb_node; 1268 LIST_HEAD(merged); 1269 int err = 0; 1270 1271 maps__for_each_entry(kmaps, rb_node) { 1272 struct map *old_map = rb_node->map; 1273 1274 /* no overload with this one */ 1275 if (new_map->end < old_map->start || 1276 new_map->start >= old_map->end) 1277 continue; 1278 1279 if (new_map->start < old_map->start) { 1280 /* 1281 * |new...... 1282 * |old.... 1283 */ 1284 if (new_map->end < old_map->end) { 1285 /* 1286 * |new......| -> |new..| 1287 * |old....| -> |old....| 1288 */ 1289 new_map->end = old_map->start; 1290 } else { 1291 /* 1292 * |new.............| -> |new..| |new..| 1293 * |old....| -> |old....| 1294 */ 1295 struct map_list_node *m = map_list_node__new(); 1296 1297 if (!m) { 1298 err = -ENOMEM; 1299 goto out; 1300 } 1301 1302 m->map = map__clone(new_map); 1303 if (!m->map) { 1304 free(m); 1305 err = -ENOMEM; 1306 goto out; 1307 } 1308 1309 m->map->end = old_map->start; 1310 list_add_tail(&m->node, &merged); 1311 new_map->pgoff += old_map->end - new_map->start; 1312 new_map->start = old_map->end; 1313 } 1314 } else { 1315 /* 1316 * |new...... 1317 * |old.... 1318 */ 1319 if (new_map->end < old_map->end) { 1320 /* 1321 * |new..| -> x 1322 * |old.........| -> |old.........| 1323 */ 1324 map__put(new_map); 1325 new_map = NULL; 1326 break; 1327 } else { 1328 /* 1329 * |new......| -> |new...| 1330 * |old....| -> |old....| 1331 */ 1332 new_map->pgoff += old_map->end - new_map->start; 1333 new_map->start = old_map->end; 1334 } 1335 } 1336 } 1337 1338 out: 1339 while (!list_empty(&merged)) { 1340 struct map_list_node *old_node; 1341 1342 old_node = list_entry(merged.next, struct map_list_node, node); 1343 list_del_init(&old_node->node); 1344 if (!err) 1345 err = maps__insert(kmaps, old_node->map); 1346 map__put(old_node->map); 1347 free(old_node); 1348 } 1349 1350 if (new_map) { 1351 if (!err) 1352 err = maps__insert(kmaps, new_map); 1353 map__put(new_map); 1354 } 1355 return err; 1356 } 1357 1358 static int dso__load_kcore(struct dso *dso, struct map *map, 1359 const char *kallsyms_filename) 1360 { 1361 struct maps *kmaps = map__kmaps(map); 1362 struct kcore_mapfn_data md; 1363 struct map *replacement_map = NULL; 1364 struct map_rb_node *old_node, *next; 1365 struct machine *machine; 1366 bool is_64_bit; 1367 int err, fd; 1368 char kcore_filename[PATH_MAX]; 1369 u64 stext; 1370 1371 if (!kmaps) 1372 return -EINVAL; 1373 1374 machine = maps__machine(kmaps); 1375 1376 /* This function requires that the map is the kernel map */ 1377 if (!__map__is_kernel(map)) 1378 return -EINVAL; 1379 1380 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1381 kallsyms_filename)) 1382 return -EINVAL; 1383 1384 /* Modules and kernel must be present at their original addresses */ 1385 if (validate_kcore_addresses(kallsyms_filename, map)) 1386 return -EINVAL; 1387 1388 md.dso = dso; 1389 INIT_LIST_HEAD(&md.maps); 1390 1391 fd = open(kcore_filename, O_RDONLY); 1392 if (fd < 0) { 1393 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n", 1394 kcore_filename); 1395 return -EINVAL; 1396 } 1397 1398 /* Read new maps into temporary lists */ 1399 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md, 1400 &is_64_bit); 1401 if (err) 1402 goto out_err; 1403 dso->is_64_bit = is_64_bit; 1404 1405 if (list_empty(&md.maps)) { 1406 err = -EINVAL; 1407 goto out_err; 1408 } 1409 1410 /* Remove old maps */ 1411 maps__for_each_entry_safe(kmaps, old_node, next) { 1412 struct map *old_map = old_node->map; 1413 1414 /* 1415 * We need to preserve eBPF maps even if they are 1416 * covered by kcore, because we need to access 1417 * eBPF dso for source data. 1418 */ 1419 if (old_map != map && !__map__is_bpf_prog(old_map)) 1420 maps__remove(kmaps, old_map); 1421 } 1422 machine->trampolines_mapped = false; 1423 1424 /* Find the kernel map using the '_stext' symbol */ 1425 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) { 1426 u64 replacement_size = 0; 1427 struct map_list_node *new_node; 1428 1429 list_for_each_entry(new_node, &md.maps, node) { 1430 struct map *new_map = new_node->map; 1431 u64 new_size = new_map->end - new_map->start; 1432 1433 if (!(stext >= new_map->start && stext < new_map->end)) 1434 continue; 1435 1436 /* 1437 * On some architectures, ARM64 for example, the kernel 1438 * text can get allocated inside of the vmalloc segment. 1439 * Select the smallest matching segment, in case stext 1440 * falls within more than one in the list. 1441 */ 1442 if (!replacement_map || new_size < replacement_size) { 1443 replacement_map = new_map; 1444 replacement_size = new_size; 1445 } 1446 } 1447 } 1448 1449 if (!replacement_map) 1450 replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map; 1451 1452 /* Add new maps */ 1453 while (!list_empty(&md.maps)) { 1454 struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node); 1455 struct map *new_map = new_node->map; 1456 1457 list_del_init(&new_node->node); 1458 1459 if (new_map == replacement_map) { 1460 map->start = new_map->start; 1461 map->end = new_map->end; 1462 map->pgoff = new_map->pgoff; 1463 map->map_ip = new_map->map_ip; 1464 map->unmap_ip = new_map->unmap_ip; 1465 /* Ensure maps are correctly ordered */ 1466 map__get(map); 1467 maps__remove(kmaps, map); 1468 err = maps__insert(kmaps, map); 1469 map__put(map); 1470 map__put(new_map); 1471 if (err) 1472 goto out_err; 1473 } else { 1474 /* 1475 * Merge kcore map into existing maps, 1476 * and ensure that current maps (eBPF) 1477 * stay intact. 1478 */ 1479 if (maps__merge_in(kmaps, new_map)) { 1480 err = -EINVAL; 1481 goto out_err; 1482 } 1483 } 1484 free(new_node); 1485 } 1486 1487 if (machine__is(machine, "x86_64")) { 1488 u64 addr; 1489 1490 /* 1491 * If one of the corresponding symbols is there, assume the 1492 * entry trampoline maps are too. 1493 */ 1494 if (!kallsyms__get_function_start(kallsyms_filename, 1495 ENTRY_TRAMPOLINE_NAME, 1496 &addr)) 1497 machine->trampolines_mapped = true; 1498 } 1499 1500 /* 1501 * Set the data type and long name so that kcore can be read via 1502 * dso__data_read_addr(). 1503 */ 1504 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1505 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; 1506 else 1507 dso->binary_type = DSO_BINARY_TYPE__KCORE; 1508 dso__set_long_name(dso, strdup(kcore_filename), true); 1509 1510 close(fd); 1511 1512 if (map->prot & PROT_EXEC) 1513 pr_debug("Using %s for kernel object code\n", kcore_filename); 1514 else 1515 pr_debug("Using %s for kernel data\n", kcore_filename); 1516 1517 return 0; 1518 1519 out_err: 1520 while (!list_empty(&md.maps)) { 1521 struct map_list_node *list_node; 1522 1523 list_node = list_entry(md.maps.next, struct map_list_node, node); 1524 list_del_init(&list_node->node); 1525 map__zput(list_node->map); 1526 free(list_node); 1527 } 1528 close(fd); 1529 return err; 1530 } 1531 1532 /* 1533 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1534 * delta based on the relocation reference symbol. 1535 */ 1536 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) 1537 { 1538 u64 addr; 1539 1540 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1541 return 0; 1542 1543 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr)) 1544 return -1; 1545 1546 *delta = addr - kmap->ref_reloc_sym->addr; 1547 return 0; 1548 } 1549 1550 int __dso__load_kallsyms(struct dso *dso, const char *filename, 1551 struct map *map, bool no_kcore) 1552 { 1553 struct kmap *kmap = map__kmap(map); 1554 u64 delta = 0; 1555 1556 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1557 return -1; 1558 1559 if (!kmap || !kmap->kmaps) 1560 return -1; 1561 1562 if (dso__load_all_kallsyms(dso, filename) < 0) 1563 return -1; 1564 1565 if (kallsyms__delta(kmap, filename, &delta)) 1566 return -1; 1567 1568 symbols__fixup_end(&dso->symbols, true); 1569 symbols__fixup_duplicate(&dso->symbols); 1570 1571 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1572 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1573 else 1574 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1575 1576 if (!no_kcore && !dso__load_kcore(dso, map, filename)) 1577 return maps__split_kallsyms_for_kcore(kmap->kmaps, dso); 1578 else 1579 return maps__split_kallsyms(kmap->kmaps, dso, delta, map); 1580 } 1581 1582 int dso__load_kallsyms(struct dso *dso, const char *filename, 1583 struct map *map) 1584 { 1585 return __dso__load_kallsyms(dso, filename, map, false); 1586 } 1587 1588 static int dso__load_perf_map(const char *map_path, struct dso *dso) 1589 { 1590 char *line = NULL; 1591 size_t n; 1592 FILE *file; 1593 int nr_syms = 0; 1594 1595 file = fopen(map_path, "r"); 1596 if (file == NULL) 1597 goto out_failure; 1598 1599 while (!feof(file)) { 1600 u64 start, size; 1601 struct symbol *sym; 1602 int line_len, len; 1603 1604 line_len = getline(&line, &n, file); 1605 if (line_len < 0) 1606 break; 1607 1608 if (!line) 1609 goto out_failure; 1610 1611 line[--line_len] = '\0'; /* \n */ 1612 1613 len = hex2u64(line, &start); 1614 1615 len++; 1616 if (len + 2 >= line_len) 1617 continue; 1618 1619 len += hex2u64(line + len, &size); 1620 1621 len++; 1622 if (len + 2 >= line_len) 1623 continue; 1624 1625 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len); 1626 1627 if (sym == NULL) 1628 goto out_delete_line; 1629 1630 symbols__insert(&dso->symbols, sym); 1631 nr_syms++; 1632 } 1633 1634 free(line); 1635 fclose(file); 1636 1637 return nr_syms; 1638 1639 out_delete_line: 1640 free(line); 1641 out_failure: 1642 return -1; 1643 } 1644 1645 #ifdef HAVE_LIBBFD_SUPPORT 1646 #define PACKAGE 'perf' 1647 #include <bfd.h> 1648 1649 static int bfd_symbols__cmpvalue(const void *a, const void *b) 1650 { 1651 const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b; 1652 1653 if (bfd_asymbol_value(as) != bfd_asymbol_value(bs)) 1654 return bfd_asymbol_value(as) - bfd_asymbol_value(bs); 1655 1656 return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0]; 1657 } 1658 1659 static int bfd2elf_binding(asymbol *symbol) 1660 { 1661 if (symbol->flags & BSF_WEAK) 1662 return STB_WEAK; 1663 if (symbol->flags & BSF_GLOBAL) 1664 return STB_GLOBAL; 1665 if (symbol->flags & BSF_LOCAL) 1666 return STB_LOCAL; 1667 return -1; 1668 } 1669 1670 int dso__load_bfd_symbols(struct dso *dso, const char *debugfile) 1671 { 1672 int err = -1; 1673 long symbols_size, symbols_count, i; 1674 asection *section; 1675 asymbol **symbols, *sym; 1676 struct symbol *symbol; 1677 bfd *abfd; 1678 u64 start, len; 1679 1680 abfd = bfd_openr(debugfile, NULL); 1681 if (!abfd) 1682 return -1; 1683 1684 if (!bfd_check_format(abfd, bfd_object)) { 1685 pr_debug2("%s: cannot read %s bfd file.\n", __func__, 1686 dso->long_name); 1687 goto out_close; 1688 } 1689 1690 if (bfd_get_flavour(abfd) == bfd_target_elf_flavour) 1691 goto out_close; 1692 1693 symbols_size = bfd_get_symtab_upper_bound(abfd); 1694 if (symbols_size == 0) { 1695 bfd_close(abfd); 1696 return 0; 1697 } 1698 1699 if (symbols_size < 0) 1700 goto out_close; 1701 1702 symbols = malloc(symbols_size); 1703 if (!symbols) 1704 goto out_close; 1705 1706 symbols_count = bfd_canonicalize_symtab(abfd, symbols); 1707 if (symbols_count < 0) 1708 goto out_free; 1709 1710 section = bfd_get_section_by_name(abfd, ".text"); 1711 if (section) { 1712 for (i = 0; i < symbols_count; ++i) { 1713 if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") || 1714 !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__")) 1715 break; 1716 } 1717 if (i < symbols_count) { 1718 /* PE symbols can only have 4 bytes, so use .text high bits */ 1719 dso->text_offset = section->vma - (u32)section->vma; 1720 dso->text_offset += (u32)bfd_asymbol_value(symbols[i]); 1721 } else { 1722 dso->text_offset = section->vma - section->filepos; 1723 } 1724 } 1725 1726 qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue); 1727 1728 #ifdef bfd_get_section 1729 #define bfd_asymbol_section bfd_get_section 1730 #endif 1731 for (i = 0; i < symbols_count; ++i) { 1732 sym = symbols[i]; 1733 section = bfd_asymbol_section(sym); 1734 if (bfd2elf_binding(sym) < 0) 1735 continue; 1736 1737 while (i + 1 < symbols_count && 1738 bfd_asymbol_section(symbols[i + 1]) == section && 1739 bfd2elf_binding(symbols[i + 1]) < 0) 1740 i++; 1741 1742 if (i + 1 < symbols_count && 1743 bfd_asymbol_section(symbols[i + 1]) == section) 1744 len = symbols[i + 1]->value - sym->value; 1745 else 1746 len = section->size - sym->value; 1747 1748 start = bfd_asymbol_value(sym) - dso->text_offset; 1749 symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC, 1750 bfd_asymbol_name(sym)); 1751 if (!symbol) 1752 goto out_free; 1753 1754 symbols__insert(&dso->symbols, symbol); 1755 } 1756 #ifdef bfd_get_section 1757 #undef bfd_asymbol_section 1758 #endif 1759 1760 symbols__fixup_end(&dso->symbols, false); 1761 symbols__fixup_duplicate(&dso->symbols); 1762 dso->adjust_symbols = 1; 1763 1764 err = 0; 1765 out_free: 1766 free(symbols); 1767 out_close: 1768 bfd_close(abfd); 1769 return err; 1770 } 1771 #endif 1772 1773 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, 1774 enum dso_binary_type type) 1775 { 1776 switch (type) { 1777 case DSO_BINARY_TYPE__JAVA_JIT: 1778 case DSO_BINARY_TYPE__DEBUGLINK: 1779 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1780 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1781 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1782 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 1783 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1784 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1785 return !kmod && dso->kernel == DSO_SPACE__USER; 1786 1787 case DSO_BINARY_TYPE__KALLSYMS: 1788 case DSO_BINARY_TYPE__VMLINUX: 1789 case DSO_BINARY_TYPE__KCORE: 1790 return dso->kernel == DSO_SPACE__KERNEL; 1791 1792 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1793 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1794 case DSO_BINARY_TYPE__GUEST_KCORE: 1795 return dso->kernel == DSO_SPACE__KERNEL_GUEST; 1796 1797 case DSO_BINARY_TYPE__GUEST_KMODULE: 1798 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 1799 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1800 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 1801 /* 1802 * kernel modules know their symtab type - it's set when 1803 * creating a module dso in machine__addnew_module_map(). 1804 */ 1805 return kmod && dso->symtab_type == type; 1806 1807 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1808 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 1809 return true; 1810 1811 case DSO_BINARY_TYPE__BPF_PROG_INFO: 1812 case DSO_BINARY_TYPE__BPF_IMAGE: 1813 case DSO_BINARY_TYPE__OOL: 1814 case DSO_BINARY_TYPE__NOT_FOUND: 1815 default: 1816 return false; 1817 } 1818 } 1819 1820 /* Checks for the existence of the perf-<pid>.map file in two different 1821 * locations. First, if the process is a separate mount namespace, check in 1822 * that namespace using the pid of the innermost pid namespace. If's not in a 1823 * namespace, or the file can't be found there, try in the mount namespace of 1824 * the tracing process using our view of its pid. 1825 */ 1826 static int dso__find_perf_map(char *filebuf, size_t bufsz, 1827 struct nsinfo **nsip) 1828 { 1829 struct nscookie nsc; 1830 struct nsinfo *nsi; 1831 struct nsinfo *nnsi; 1832 int rc = -1; 1833 1834 nsi = *nsip; 1835 1836 if (nsinfo__need_setns(nsi)) { 1837 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__nstgid(nsi)); 1838 nsinfo__mountns_enter(nsi, &nsc); 1839 rc = access(filebuf, R_OK); 1840 nsinfo__mountns_exit(&nsc); 1841 if (rc == 0) 1842 return rc; 1843 } 1844 1845 nnsi = nsinfo__copy(nsi); 1846 if (nnsi) { 1847 nsinfo__put(nsi); 1848 1849 nsinfo__clear_need_setns(nnsi); 1850 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__tgid(nnsi)); 1851 *nsip = nnsi; 1852 rc = 0; 1853 } 1854 1855 return rc; 1856 } 1857 1858 int dso__load(struct dso *dso, struct map *map) 1859 { 1860 char *name; 1861 int ret = -1; 1862 u_int i; 1863 struct machine *machine = NULL; 1864 char *root_dir = (char *) ""; 1865 int ss_pos = 0; 1866 struct symsrc ss_[2]; 1867 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1868 bool kmod; 1869 bool perfmap; 1870 struct build_id bid; 1871 struct nscookie nsc; 1872 char newmapname[PATH_MAX]; 1873 const char *map_path = dso->long_name; 1874 1875 mutex_lock(&dso->lock); 1876 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0; 1877 if (perfmap) { 1878 if (dso->nsinfo && (dso__find_perf_map(newmapname, 1879 sizeof(newmapname), &dso->nsinfo) == 0)) { 1880 map_path = newmapname; 1881 } 1882 } 1883 1884 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1885 1886 /* check again under the dso->lock */ 1887 if (dso__loaded(dso)) { 1888 ret = 1; 1889 goto out; 1890 } 1891 1892 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1893 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 1894 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || 1895 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 1896 1897 if (dso->kernel && !kmod) { 1898 if (dso->kernel == DSO_SPACE__KERNEL) 1899 ret = dso__load_kernel_sym(dso, map); 1900 else if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1901 ret = dso__load_guest_kernel_sym(dso, map); 1902 1903 machine = maps__machine(map__kmaps(map)); 1904 if (machine__is(machine, "x86_64")) 1905 machine__map_x86_64_entry_trampolines(machine, dso); 1906 goto out; 1907 } 1908 1909 dso->adjust_symbols = 0; 1910 1911 if (perfmap) { 1912 ret = dso__load_perf_map(map_path, dso); 1913 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1914 DSO_BINARY_TYPE__NOT_FOUND; 1915 goto out; 1916 } 1917 1918 if (machine) 1919 root_dir = machine->root_dir; 1920 1921 name = malloc(PATH_MAX); 1922 if (!name) 1923 goto out; 1924 1925 /* 1926 * Read the build id if possible. This is required for 1927 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work 1928 */ 1929 if (!dso->has_build_id && 1930 is_regular_file(dso->long_name)) { 1931 __symbol__join_symfs(name, PATH_MAX, dso->long_name); 1932 if (filename__read_build_id(name, &bid) > 0) 1933 dso__set_build_id(dso, &bid); 1934 } 1935 1936 /* 1937 * Iterate over candidate debug images. 1938 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1939 * and/or opd section) for processing. 1940 */ 1941 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1942 struct symsrc *ss = &ss_[ss_pos]; 1943 bool next_slot = false; 1944 bool is_reg; 1945 bool nsexit; 1946 int bfdrc = -1; 1947 int sirc = -1; 1948 1949 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1950 1951 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE || 1952 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO); 1953 1954 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) 1955 continue; 1956 1957 if (dso__read_binary_type_filename(dso, symtab_type, 1958 root_dir, name, PATH_MAX)) 1959 continue; 1960 1961 if (nsexit) 1962 nsinfo__mountns_exit(&nsc); 1963 1964 is_reg = is_regular_file(name); 1965 if (!is_reg && errno == ENOENT && dso->nsinfo) { 1966 char *new_name = filename_with_chroot(dso->nsinfo->pid, 1967 name); 1968 if (new_name) { 1969 is_reg = is_regular_file(new_name); 1970 strlcpy(name, new_name, PATH_MAX); 1971 free(new_name); 1972 } 1973 } 1974 1975 #ifdef HAVE_LIBBFD_SUPPORT 1976 if (is_reg) 1977 bfdrc = dso__load_bfd_symbols(dso, name); 1978 #endif 1979 if (is_reg && bfdrc < 0) 1980 sirc = symsrc__init(ss, dso, name, symtab_type); 1981 1982 if (nsexit) 1983 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1984 1985 if (bfdrc == 0) { 1986 ret = 0; 1987 break; 1988 } 1989 1990 if (!is_reg || sirc < 0) 1991 continue; 1992 1993 if (!syms_ss && symsrc__has_symtab(ss)) { 1994 syms_ss = ss; 1995 next_slot = true; 1996 if (!dso->symsrc_filename) 1997 dso->symsrc_filename = strdup(name); 1998 } 1999 2000 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 2001 runtime_ss = ss; 2002 next_slot = true; 2003 } 2004 2005 if (next_slot) { 2006 ss_pos++; 2007 2008 if (syms_ss && runtime_ss) 2009 break; 2010 } else { 2011 symsrc__destroy(ss); 2012 } 2013 2014 } 2015 2016 if (!runtime_ss && !syms_ss) 2017 goto out_free; 2018 2019 if (runtime_ss && !syms_ss) { 2020 syms_ss = runtime_ss; 2021 } 2022 2023 /* We'll have to hope for the best */ 2024 if (!runtime_ss && syms_ss) 2025 runtime_ss = syms_ss; 2026 2027 if (syms_ss) 2028 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 2029 else 2030 ret = -1; 2031 2032 if (ret > 0) { 2033 int nr_plt; 2034 2035 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss); 2036 if (nr_plt > 0) 2037 ret += nr_plt; 2038 } 2039 2040 for (; ss_pos > 0; ss_pos--) 2041 symsrc__destroy(&ss_[ss_pos - 1]); 2042 out_free: 2043 free(name); 2044 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 2045 ret = 0; 2046 out: 2047 dso__set_loaded(dso); 2048 mutex_unlock(&dso->lock); 2049 nsinfo__mountns_exit(&nsc); 2050 2051 return ret; 2052 } 2053 2054 static int map__strcmp(const void *a, const void *b) 2055 { 2056 const struct dso *dso_a = map__dso(*(const struct map **)a); 2057 const struct dso *dso_b = map__dso(*(const struct map **)b); 2058 2059 return strcmp(dso_a->short_name, dso_b->short_name); 2060 } 2061 2062 static int map__strcmp_name(const void *name, const void *b) 2063 { 2064 const struct dso *dso = map__dso(*(const struct map **)b); 2065 2066 return strcmp(name, dso->short_name); 2067 } 2068 2069 void __maps__sort_by_name(struct maps *maps) 2070 { 2071 qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp); 2072 } 2073 2074 static int map__groups__sort_by_name_from_rbtree(struct maps *maps) 2075 { 2076 struct map_rb_node *rb_node; 2077 struct map **maps_by_name = realloc(maps__maps_by_name(maps), 2078 maps__nr_maps(maps) * sizeof(struct map *)); 2079 int i = 0; 2080 2081 if (maps_by_name == NULL) 2082 return -1; 2083 2084 up_read(maps__lock(maps)); 2085 down_write(maps__lock(maps)); 2086 2087 maps->maps_by_name = maps_by_name; 2088 maps->nr_maps_allocated = maps__nr_maps(maps); 2089 2090 maps__for_each_entry(maps, rb_node) 2091 maps_by_name[i++] = rb_node->map; 2092 2093 __maps__sort_by_name(maps); 2094 2095 up_write(maps__lock(maps)); 2096 down_read(maps__lock(maps)); 2097 2098 return 0; 2099 } 2100 2101 static struct map *__maps__find_by_name(struct maps *maps, const char *name) 2102 { 2103 struct map **mapp; 2104 2105 if (maps__maps_by_name(maps) == NULL && 2106 map__groups__sort_by_name_from_rbtree(maps)) 2107 return NULL; 2108 2109 mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), 2110 sizeof(*mapp), map__strcmp_name); 2111 if (mapp) 2112 return *mapp; 2113 return NULL; 2114 } 2115 2116 struct map *maps__find_by_name(struct maps *maps, const char *name) 2117 { 2118 struct map_rb_node *rb_node; 2119 struct map *map; 2120 2121 down_read(maps__lock(maps)); 2122 2123 if (maps->last_search_by_name) { 2124 const struct dso *dso = map__dso(maps->last_search_by_name); 2125 2126 if (strcmp(dso->short_name, name) == 0) { 2127 map = maps->last_search_by_name; 2128 goto out_unlock; 2129 } 2130 } 2131 /* 2132 * If we have maps->maps_by_name, then the name isn't in the rbtree, 2133 * as maps->maps_by_name mirrors the rbtree when lookups by name are 2134 * made. 2135 */ 2136 map = __maps__find_by_name(maps, name); 2137 if (map || maps__maps_by_name(maps) != NULL) 2138 goto out_unlock; 2139 2140 /* Fallback to traversing the rbtree... */ 2141 maps__for_each_entry(maps, rb_node) { 2142 struct dso *dso; 2143 2144 map = rb_node->map; 2145 dso = map__dso(map); 2146 if (strcmp(dso->short_name, name) == 0) { 2147 maps->last_search_by_name = map; 2148 goto out_unlock; 2149 } 2150 } 2151 map = NULL; 2152 2153 out_unlock: 2154 up_read(maps__lock(maps)); 2155 return map; 2156 } 2157 2158 int dso__load_vmlinux(struct dso *dso, struct map *map, 2159 const char *vmlinux, bool vmlinux_allocated) 2160 { 2161 int err = -1; 2162 struct symsrc ss; 2163 char symfs_vmlinux[PATH_MAX]; 2164 enum dso_binary_type symtab_type; 2165 2166 if (vmlinux[0] == '/') 2167 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 2168 else 2169 symbol__join_symfs(symfs_vmlinux, vmlinux); 2170 2171 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 2172 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 2173 else 2174 symtab_type = DSO_BINARY_TYPE__VMLINUX; 2175 2176 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) 2177 return -1; 2178 2179 err = dso__load_sym(dso, map, &ss, &ss, 0); 2180 symsrc__destroy(&ss); 2181 2182 if (err > 0) { 2183 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 2184 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 2185 else 2186 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 2187 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 2188 dso__set_loaded(dso); 2189 pr_debug("Using %s for symbols\n", symfs_vmlinux); 2190 } 2191 2192 return err; 2193 } 2194 2195 int dso__load_vmlinux_path(struct dso *dso, struct map *map) 2196 { 2197 int i, err = 0; 2198 char *filename = NULL; 2199 2200 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 2201 vmlinux_path__nr_entries + 1); 2202 2203 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 2204 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false); 2205 if (err > 0) 2206 goto out; 2207 } 2208 2209 if (!symbol_conf.ignore_vmlinux_buildid) 2210 filename = dso__build_id_filename(dso, NULL, 0, false); 2211 if (filename != NULL) { 2212 err = dso__load_vmlinux(dso, map, filename, true); 2213 if (err > 0) 2214 goto out; 2215 free(filename); 2216 } 2217 out: 2218 return err; 2219 } 2220 2221 static bool visible_dir_filter(const char *name, struct dirent *d) 2222 { 2223 if (d->d_type != DT_DIR) 2224 return false; 2225 return lsdir_no_dot_filter(name, d); 2226 } 2227 2228 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 2229 { 2230 char kallsyms_filename[PATH_MAX]; 2231 int ret = -1; 2232 struct strlist *dirs; 2233 struct str_node *nd; 2234 2235 dirs = lsdir(dir, visible_dir_filter); 2236 if (!dirs) 2237 return -1; 2238 2239 strlist__for_each_entry(nd, dirs) { 2240 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 2241 "%s/%s/kallsyms", dir, nd->s); 2242 if (!validate_kcore_addresses(kallsyms_filename, map)) { 2243 strlcpy(dir, kallsyms_filename, dir_sz); 2244 ret = 0; 2245 break; 2246 } 2247 } 2248 2249 strlist__delete(dirs); 2250 2251 return ret; 2252 } 2253 2254 /* 2255 * Use open(O_RDONLY) to check readability directly instead of access(R_OK) 2256 * since access(R_OK) only checks with real UID/GID but open() use effective 2257 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO). 2258 */ 2259 static bool filename__readable(const char *file) 2260 { 2261 int fd = open(file, O_RDONLY); 2262 if (fd < 0) 2263 return false; 2264 close(fd); 2265 return true; 2266 } 2267 2268 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 2269 { 2270 struct build_id bid; 2271 char sbuild_id[SBUILD_ID_SIZE]; 2272 bool is_host = false; 2273 char path[PATH_MAX]; 2274 2275 if (!dso->has_build_id) { 2276 /* 2277 * Last resort, if we don't have a build-id and couldn't find 2278 * any vmlinux file, try the running kernel kallsyms table. 2279 */ 2280 goto proc_kallsyms; 2281 } 2282 2283 if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0) 2284 is_host = dso__build_id_equal(dso, &bid); 2285 2286 /* Try a fast path for /proc/kallsyms if possible */ 2287 if (is_host) { 2288 /* 2289 * Do not check the build-id cache, unless we know we cannot use 2290 * /proc/kcore or module maps don't match to /proc/kallsyms. 2291 * To check readability of /proc/kcore, do not use access(R_OK) 2292 * since /proc/kcore requires CAP_SYS_RAWIO to read and access 2293 * can't check it. 2294 */ 2295 if (filename__readable("/proc/kcore") && 2296 !validate_kcore_addresses("/proc/kallsyms", map)) 2297 goto proc_kallsyms; 2298 } 2299 2300 build_id__sprintf(&dso->bid, sbuild_id); 2301 2302 /* Find kallsyms in build-id cache with kcore */ 2303 scnprintf(path, sizeof(path), "%s/%s/%s", 2304 buildid_dir, DSO__NAME_KCORE, sbuild_id); 2305 2306 if (!find_matching_kcore(map, path, sizeof(path))) 2307 return strdup(path); 2308 2309 /* Use current /proc/kallsyms if possible */ 2310 if (is_host) { 2311 proc_kallsyms: 2312 return strdup("/proc/kallsyms"); 2313 } 2314 2315 /* Finally, find a cache of kallsyms */ 2316 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) { 2317 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 2318 sbuild_id); 2319 return NULL; 2320 } 2321 2322 return strdup(path); 2323 } 2324 2325 static int dso__load_kernel_sym(struct dso *dso, struct map *map) 2326 { 2327 int err; 2328 const char *kallsyms_filename = NULL; 2329 char *kallsyms_allocated_filename = NULL; 2330 char *filename = NULL; 2331 2332 /* 2333 * Step 1: if the user specified a kallsyms or vmlinux filename, use 2334 * it and only it, reporting errors to the user if it cannot be used. 2335 * 2336 * For instance, try to analyse an ARM perf.data file _without_ a 2337 * build-id, or if the user specifies the wrong path to the right 2338 * vmlinux file, obviously we can't fallback to another vmlinux (a 2339 * x86_86 one, on the machine where analysis is being performed, say), 2340 * or worse, /proc/kallsyms. 2341 * 2342 * If the specified file _has_ a build-id and there is a build-id 2343 * section in the perf.data file, we will still do the expected 2344 * validation in dso__load_vmlinux and will bail out if they don't 2345 * match. 2346 */ 2347 if (symbol_conf.kallsyms_name != NULL) { 2348 kallsyms_filename = symbol_conf.kallsyms_name; 2349 goto do_kallsyms; 2350 } 2351 2352 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 2353 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false); 2354 } 2355 2356 /* 2357 * Before checking on common vmlinux locations, check if it's 2358 * stored as standard build id binary (not kallsyms) under 2359 * .debug cache. 2360 */ 2361 if (!symbol_conf.ignore_vmlinux_buildid) 2362 filename = __dso__build_id_filename(dso, NULL, 0, false, false); 2363 if (filename != NULL) { 2364 err = dso__load_vmlinux(dso, map, filename, true); 2365 if (err > 0) 2366 return err; 2367 free(filename); 2368 } 2369 2370 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 2371 err = dso__load_vmlinux_path(dso, map); 2372 if (err > 0) 2373 return err; 2374 } 2375 2376 /* do not try local files if a symfs was given */ 2377 if (symbol_conf.symfs[0] != 0) 2378 return -1; 2379 2380 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 2381 if (!kallsyms_allocated_filename) 2382 return -1; 2383 2384 kallsyms_filename = kallsyms_allocated_filename; 2385 2386 do_kallsyms: 2387 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2388 if (err > 0) 2389 pr_debug("Using %s for symbols\n", kallsyms_filename); 2390 free(kallsyms_allocated_filename); 2391 2392 if (err > 0 && !dso__is_kcore(dso)) { 2393 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; 2394 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); 2395 map__fixup_start(map); 2396 map__fixup_end(map); 2397 } 2398 2399 return err; 2400 } 2401 2402 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) 2403 { 2404 int err; 2405 const char *kallsyms_filename; 2406 struct machine *machine = maps__machine(map__kmaps(map)); 2407 char path[PATH_MAX]; 2408 2409 if (machine->kallsyms_filename) { 2410 kallsyms_filename = machine->kallsyms_filename; 2411 } else if (machine__is_default_guest(machine)) { 2412 /* 2413 * if the user specified a vmlinux filename, use it and only 2414 * it, reporting errors to the user if it cannot be used. 2415 * Or use file guest_kallsyms inputted by user on commandline 2416 */ 2417 if (symbol_conf.default_guest_vmlinux_name != NULL) { 2418 err = dso__load_vmlinux(dso, map, 2419 symbol_conf.default_guest_vmlinux_name, 2420 false); 2421 return err; 2422 } 2423 2424 kallsyms_filename = symbol_conf.default_guest_kallsyms; 2425 if (!kallsyms_filename) 2426 return -1; 2427 } else { 2428 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 2429 kallsyms_filename = path; 2430 } 2431 2432 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2433 if (err > 0) 2434 pr_debug("Using %s for symbols\n", kallsyms_filename); 2435 if (err > 0 && !dso__is_kcore(dso)) { 2436 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 2437 dso__set_long_name(dso, machine->mmap_name, false); 2438 map__fixup_start(map); 2439 map__fixup_end(map); 2440 } 2441 2442 return err; 2443 } 2444 2445 static void vmlinux_path__exit(void) 2446 { 2447 while (--vmlinux_path__nr_entries >= 0) 2448 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 2449 vmlinux_path__nr_entries = 0; 2450 2451 zfree(&vmlinux_path); 2452 } 2453 2454 static const char * const vmlinux_paths[] = { 2455 "vmlinux", 2456 "/boot/vmlinux" 2457 }; 2458 2459 static const char * const vmlinux_paths_upd[] = { 2460 "/boot/vmlinux-%s", 2461 "/usr/lib/debug/boot/vmlinux-%s", 2462 "/lib/modules/%s/build/vmlinux", 2463 "/usr/lib/debug/lib/modules/%s/vmlinux", 2464 "/usr/lib/debug/boot/vmlinux-%s.debug" 2465 }; 2466 2467 static int vmlinux_path__add(const char *new_entry) 2468 { 2469 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry); 2470 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2471 return -1; 2472 ++vmlinux_path__nr_entries; 2473 2474 return 0; 2475 } 2476 2477 static int vmlinux_path__init(struct perf_env *env) 2478 { 2479 struct utsname uts; 2480 char bf[PATH_MAX]; 2481 char *kernel_version; 2482 unsigned int i; 2483 2484 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) + 2485 ARRAY_SIZE(vmlinux_paths_upd))); 2486 if (vmlinux_path == NULL) 2487 return -1; 2488 2489 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++) 2490 if (vmlinux_path__add(vmlinux_paths[i]) < 0) 2491 goto out_fail; 2492 2493 /* only try kernel version if no symfs was given */ 2494 if (symbol_conf.symfs[0] != 0) 2495 return 0; 2496 2497 if (env) { 2498 kernel_version = env->os_release; 2499 } else { 2500 if (uname(&uts) < 0) 2501 goto out_fail; 2502 2503 kernel_version = uts.release; 2504 } 2505 2506 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) { 2507 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version); 2508 if (vmlinux_path__add(bf) < 0) 2509 goto out_fail; 2510 } 2511 2512 return 0; 2513 2514 out_fail: 2515 vmlinux_path__exit(); 2516 return -1; 2517 } 2518 2519 int setup_list(struct strlist **list, const char *list_str, 2520 const char *list_name) 2521 { 2522 if (list_str == NULL) 2523 return 0; 2524 2525 *list = strlist__new(list_str, NULL); 2526 if (!*list) { 2527 pr_err("problems parsing %s list\n", list_name); 2528 return -1; 2529 } 2530 2531 symbol_conf.has_filter = true; 2532 return 0; 2533 } 2534 2535 int setup_intlist(struct intlist **list, const char *list_str, 2536 const char *list_name) 2537 { 2538 if (list_str == NULL) 2539 return 0; 2540 2541 *list = intlist__new(list_str); 2542 if (!*list) { 2543 pr_err("problems parsing %s list\n", list_name); 2544 return -1; 2545 } 2546 return 0; 2547 } 2548 2549 static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list) 2550 { 2551 struct str_node *pos, *tmp; 2552 unsigned long val; 2553 char *sep; 2554 const char *end; 2555 int i = 0, err; 2556 2557 *addr_list = intlist__new(NULL); 2558 if (!*addr_list) 2559 return -1; 2560 2561 strlist__for_each_entry_safe(pos, tmp, sym_list) { 2562 errno = 0; 2563 val = strtoul(pos->s, &sep, 16); 2564 if (errno || (sep == pos->s)) 2565 continue; 2566 2567 if (*sep != '\0') { 2568 end = pos->s + strlen(pos->s) - 1; 2569 while (end >= sep && isspace(*end)) 2570 end--; 2571 2572 if (end >= sep) 2573 continue; 2574 } 2575 2576 err = intlist__add(*addr_list, val); 2577 if (err) 2578 break; 2579 2580 strlist__remove(sym_list, pos); 2581 i++; 2582 } 2583 2584 if (i == 0) { 2585 intlist__delete(*addr_list); 2586 *addr_list = NULL; 2587 } 2588 2589 return 0; 2590 } 2591 2592 static bool symbol__read_kptr_restrict(void) 2593 { 2594 bool value = false; 2595 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 2596 2597 if (fp != NULL) { 2598 char line[8]; 2599 2600 if (fgets(line, sizeof(line), fp) != NULL) 2601 value = perf_cap__capable(CAP_SYSLOG) ? 2602 (atoi(line) >= 2) : 2603 (atoi(line) != 0); 2604 2605 fclose(fp); 2606 } 2607 2608 /* Per kernel/kallsyms.c: 2609 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG 2610 */ 2611 if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG)) 2612 value = true; 2613 2614 return value; 2615 } 2616 2617 int symbol__annotation_init(void) 2618 { 2619 if (symbol_conf.init_annotation) 2620 return 0; 2621 2622 if (symbol_conf.initialized) { 2623 pr_err("Annotation needs to be init before symbol__init()\n"); 2624 return -1; 2625 } 2626 2627 symbol_conf.priv_size += sizeof(struct annotation); 2628 symbol_conf.init_annotation = true; 2629 return 0; 2630 } 2631 2632 int symbol__init(struct perf_env *env) 2633 { 2634 const char *symfs; 2635 2636 if (symbol_conf.initialized) 2637 return 0; 2638 2639 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 2640 2641 symbol__elf_init(); 2642 2643 if (symbol_conf.sort_by_name) 2644 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 2645 sizeof(struct symbol)); 2646 2647 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) 2648 return -1; 2649 2650 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 2651 pr_err("'.' is the only non valid --field-separator argument\n"); 2652 return -1; 2653 } 2654 2655 if (setup_list(&symbol_conf.dso_list, 2656 symbol_conf.dso_list_str, "dso") < 0) 2657 return -1; 2658 2659 if (setup_list(&symbol_conf.comm_list, 2660 symbol_conf.comm_list_str, "comm") < 0) 2661 goto out_free_dso_list; 2662 2663 if (setup_intlist(&symbol_conf.pid_list, 2664 symbol_conf.pid_list_str, "pid") < 0) 2665 goto out_free_comm_list; 2666 2667 if (setup_intlist(&symbol_conf.tid_list, 2668 symbol_conf.tid_list_str, "tid") < 0) 2669 goto out_free_pid_list; 2670 2671 if (setup_list(&symbol_conf.sym_list, 2672 symbol_conf.sym_list_str, "symbol") < 0) 2673 goto out_free_tid_list; 2674 2675 if (symbol_conf.sym_list && 2676 setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0) 2677 goto out_free_sym_list; 2678 2679 if (setup_list(&symbol_conf.bt_stop_list, 2680 symbol_conf.bt_stop_list_str, "symbol") < 0) 2681 goto out_free_sym_list; 2682 2683 /* 2684 * A path to symbols of "/" is identical to "" 2685 * reset here for simplicity. 2686 */ 2687 symfs = realpath(symbol_conf.symfs, NULL); 2688 if (symfs == NULL) 2689 symfs = symbol_conf.symfs; 2690 if (strcmp(symfs, "/") == 0) 2691 symbol_conf.symfs = ""; 2692 if (symfs != symbol_conf.symfs) 2693 free((void *)symfs); 2694 2695 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 2696 2697 symbol_conf.initialized = true; 2698 return 0; 2699 2700 out_free_sym_list: 2701 strlist__delete(symbol_conf.sym_list); 2702 intlist__delete(symbol_conf.addr_list); 2703 out_free_tid_list: 2704 intlist__delete(symbol_conf.tid_list); 2705 out_free_pid_list: 2706 intlist__delete(symbol_conf.pid_list); 2707 out_free_comm_list: 2708 strlist__delete(symbol_conf.comm_list); 2709 out_free_dso_list: 2710 strlist__delete(symbol_conf.dso_list); 2711 return -1; 2712 } 2713 2714 void symbol__exit(void) 2715 { 2716 if (!symbol_conf.initialized) 2717 return; 2718 strlist__delete(symbol_conf.bt_stop_list); 2719 strlist__delete(symbol_conf.sym_list); 2720 strlist__delete(symbol_conf.dso_list); 2721 strlist__delete(symbol_conf.comm_list); 2722 intlist__delete(symbol_conf.tid_list); 2723 intlist__delete(symbol_conf.pid_list); 2724 intlist__delete(symbol_conf.addr_list); 2725 vmlinux_path__exit(); 2726 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2727 symbol_conf.bt_stop_list = NULL; 2728 symbol_conf.initialized = false; 2729 } 2730 2731 int symbol__config_symfs(const struct option *opt __maybe_unused, 2732 const char *dir, int unset __maybe_unused) 2733 { 2734 char *bf = NULL; 2735 int ret; 2736 2737 symbol_conf.symfs = strdup(dir); 2738 if (symbol_conf.symfs == NULL) 2739 return -ENOMEM; 2740 2741 /* skip the locally configured cache if a symfs is given, and 2742 * config buildid dir to symfs/.debug 2743 */ 2744 ret = asprintf(&bf, "%s/%s", dir, ".debug"); 2745 if (ret < 0) 2746 return -ENOMEM; 2747 2748 set_buildid_dir(bf); 2749 2750 free(bf); 2751 return 0; 2752 } 2753 2754 struct mem_info *mem_info__get(struct mem_info *mi) 2755 { 2756 if (mi) 2757 refcount_inc(&mi->refcnt); 2758 return mi; 2759 } 2760 2761 void mem_info__put(struct mem_info *mi) 2762 { 2763 if (mi && refcount_dec_and_test(&mi->refcnt)) 2764 free(mi); 2765 } 2766 2767 struct mem_info *mem_info__new(void) 2768 { 2769 struct mem_info *mi = zalloc(sizeof(*mi)); 2770 2771 if (mi) 2772 refcount_set(&mi->refcnt, 1); 2773 return mi; 2774 } 2775 2776 /* 2777 * Checks that user supplied symbol kernel files are accessible because 2778 * the default mechanism for accessing elf files fails silently. i.e. if 2779 * debug syms for a build ID aren't found perf carries on normally. When 2780 * they are user supplied we should assume that the user doesn't want to 2781 * silently fail. 2782 */ 2783 int symbol__validate_sym_arguments(void) 2784 { 2785 if (symbol_conf.vmlinux_name && 2786 access(symbol_conf.vmlinux_name, R_OK)) { 2787 pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); 2788 return -EINVAL; 2789 } 2790 if (symbol_conf.kallsyms_name && 2791 access(symbol_conf.kallsyms_name, R_OK)) { 2792 pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name); 2793 return -EINVAL; 2794 } 2795 return 0; 2796 } 2797