1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <stdlib.h> 5 #include <stdio.h> 6 #include <string.h> 7 #include <linux/capability.h> 8 #include <linux/kernel.h> 9 #include <linux/mman.h> 10 #include <linux/string.h> 11 #include <linux/time64.h> 12 #include <sys/types.h> 13 #include <sys/stat.h> 14 #include <sys/param.h> 15 #include <fcntl.h> 16 #include <unistd.h> 17 #include <inttypes.h> 18 #include "annotate.h" 19 #include "build-id.h" 20 #include "cap.h" 21 #include "dso.h" 22 #include "util.h" // lsdir() 23 #include "debug.h" 24 #include "event.h" 25 #include "machine.h" 26 #include "map.h" 27 #include "symbol.h" 28 #include "map_symbol.h" 29 #include "mem-events.h" 30 #include "symsrc.h" 31 #include "strlist.h" 32 #include "intlist.h" 33 #include "namespaces.h" 34 #include "header.h" 35 #include "path.h" 36 #include <linux/ctype.h> 37 #include <linux/zalloc.h> 38 39 #include <elf.h> 40 #include <limits.h> 41 #include <symbol/kallsyms.h> 42 #include <sys/utsname.h> 43 44 static int dso__load_kernel_sym(struct dso *dso, struct map *map); 45 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map); 46 static bool symbol__is_idle(const char *name); 47 48 int vmlinux_path__nr_entries; 49 char **vmlinux_path; 50 51 struct map_list_node { 52 struct list_head node; 53 struct map *map; 54 }; 55 56 struct symbol_conf symbol_conf = { 57 .nanosecs = false, 58 .use_modules = true, 59 .try_vmlinux_path = true, 60 .demangle = true, 61 .demangle_kernel = false, 62 .cumulate_callchain = true, 63 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */ 64 .show_hist_headers = true, 65 .symfs = "", 66 .event_group = true, 67 .inline_name = true, 68 .res_sample = 0, 69 }; 70 71 static enum dso_binary_type binary_type_symtab[] = { 72 DSO_BINARY_TYPE__KALLSYMS, 73 DSO_BINARY_TYPE__GUEST_KALLSYMS, 74 DSO_BINARY_TYPE__JAVA_JIT, 75 DSO_BINARY_TYPE__DEBUGLINK, 76 DSO_BINARY_TYPE__BUILD_ID_CACHE, 77 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, 78 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 79 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 80 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 81 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 82 DSO_BINARY_TYPE__GUEST_KMODULE, 83 DSO_BINARY_TYPE__GUEST_KMODULE_COMP, 84 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 85 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, 86 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 87 DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, 88 DSO_BINARY_TYPE__NOT_FOUND, 89 }; 90 91 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 92 93 static struct map_list_node *map_list_node__new(void) 94 { 95 return malloc(sizeof(struct map_list_node)); 96 } 97 98 static bool symbol_type__filter(char symbol_type) 99 { 100 symbol_type = toupper(symbol_type); 101 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B'; 102 } 103 104 static int prefix_underscores_count(const char *str) 105 { 106 const char *tail = str; 107 108 while (*tail == '_') 109 tail++; 110 111 return tail - str; 112 } 113 114 const char * __weak arch__normalize_symbol_name(const char *name) 115 { 116 return name; 117 } 118 119 int __weak arch__compare_symbol_names(const char *namea, const char *nameb) 120 { 121 return strcmp(namea, nameb); 122 } 123 124 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb, 125 unsigned int n) 126 { 127 return strncmp(namea, nameb, n); 128 } 129 130 int __weak arch__choose_best_symbol(struct symbol *syma, 131 struct symbol *symb __maybe_unused) 132 { 133 /* Avoid "SyS" kernel syscall aliases */ 134 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3)) 135 return SYMBOL_B; 136 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 137 return SYMBOL_B; 138 139 return SYMBOL_A; 140 } 141 142 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 143 { 144 s64 a; 145 s64 b; 146 size_t na, nb; 147 148 /* Prefer a symbol with non zero length */ 149 a = syma->end - syma->start; 150 b = symb->end - symb->start; 151 if ((b == 0) && (a > 0)) 152 return SYMBOL_A; 153 else if ((a == 0) && (b > 0)) 154 return SYMBOL_B; 155 156 /* Prefer a non weak symbol over a weak one */ 157 a = syma->binding == STB_WEAK; 158 b = symb->binding == STB_WEAK; 159 if (b && !a) 160 return SYMBOL_A; 161 if (a && !b) 162 return SYMBOL_B; 163 164 /* Prefer a global symbol over a non global one */ 165 a = syma->binding == STB_GLOBAL; 166 b = symb->binding == STB_GLOBAL; 167 if (a && !b) 168 return SYMBOL_A; 169 if (b && !a) 170 return SYMBOL_B; 171 172 /* Prefer a symbol with less underscores */ 173 a = prefix_underscores_count(syma->name); 174 b = prefix_underscores_count(symb->name); 175 if (b > a) 176 return SYMBOL_A; 177 else if (a > b) 178 return SYMBOL_B; 179 180 /* Choose the symbol with the longest name */ 181 na = strlen(syma->name); 182 nb = strlen(symb->name); 183 if (na > nb) 184 return SYMBOL_A; 185 else if (na < nb) 186 return SYMBOL_B; 187 188 return arch__choose_best_symbol(syma, symb); 189 } 190 191 void symbols__fixup_duplicate(struct rb_root_cached *symbols) 192 { 193 struct rb_node *nd; 194 struct symbol *curr, *next; 195 196 if (symbol_conf.allow_aliases) 197 return; 198 199 nd = rb_first_cached(symbols); 200 201 while (nd) { 202 curr = rb_entry(nd, struct symbol, rb_node); 203 again: 204 nd = rb_next(&curr->rb_node); 205 next = rb_entry(nd, struct symbol, rb_node); 206 207 if (!nd) 208 break; 209 210 if (curr->start != next->start) 211 continue; 212 213 if (choose_best_symbol(curr, next) == SYMBOL_A) { 214 if (next->type == STT_GNU_IFUNC) 215 curr->ifunc_alias = true; 216 rb_erase_cached(&next->rb_node, symbols); 217 symbol__delete(next); 218 goto again; 219 } else { 220 if (curr->type == STT_GNU_IFUNC) 221 next->ifunc_alias = true; 222 nd = rb_next(&curr->rb_node); 223 rb_erase_cached(&curr->rb_node, symbols); 224 symbol__delete(curr); 225 } 226 } 227 } 228 229 /* Update zero-sized symbols using the address of the next symbol */ 230 void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) 231 { 232 struct rb_node *nd, *prevnd = rb_first_cached(symbols); 233 struct symbol *curr, *prev; 234 235 if (prevnd == NULL) 236 return; 237 238 curr = rb_entry(prevnd, struct symbol, rb_node); 239 240 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 241 prev = curr; 242 curr = rb_entry(nd, struct symbol, rb_node); 243 244 /* 245 * On some architecture kernel text segment start is located at 246 * some low memory address, while modules are located at high 247 * memory addresses (or vice versa). The gap between end of 248 * kernel text segment and beginning of first module's text 249 * segment is very big. Therefore do not fill this gap and do 250 * not assign it to the kernel dso map (kallsyms). 251 * 252 * In kallsyms, it determines module symbols using '[' character 253 * like in: 254 * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi] 255 */ 256 if (prev->end == prev->start) { 257 /* Last kernel/module symbol mapped to end of page */ 258 if (is_kallsyms && (!strchr(prev->name, '[') != 259 !strchr(curr->name, '['))) 260 prev->end = roundup(prev->end + 4096, 4096); 261 else 262 prev->end = curr->start; 263 264 pr_debug4("%s sym:%s end:%#" PRIx64 "\n", 265 __func__, prev->name, prev->end); 266 } 267 } 268 269 /* Last entry */ 270 if (curr->end == curr->start) 271 curr->end = roundup(curr->start, 4096) + 4096; 272 } 273 274 void maps__fixup_end(struct maps *maps) 275 { 276 struct map_rb_node *prev = NULL, *curr; 277 278 down_write(maps__lock(maps)); 279 280 maps__for_each_entry(maps, curr) { 281 if (prev != NULL && !map__end(prev->map)) 282 map__set_end(prev->map, map__start(curr->map)); 283 284 prev = curr; 285 } 286 287 /* 288 * We still haven't the actual symbols, so guess the 289 * last map final address. 290 */ 291 if (curr && !map__end(curr->map)) 292 map__set_end(curr->map, ~0ULL); 293 294 up_write(maps__lock(maps)); 295 } 296 297 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) 298 { 299 size_t namelen = strlen(name) + 1; 300 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 301 sizeof(*sym) + namelen)); 302 if (sym == NULL) 303 return NULL; 304 305 if (symbol_conf.priv_size) { 306 if (symbol_conf.init_annotation) { 307 struct annotation *notes = (void *)sym; 308 annotation__init(notes); 309 } 310 sym = ((void *)sym) + symbol_conf.priv_size; 311 } 312 313 sym->start = start; 314 sym->end = len ? start + len : start; 315 sym->type = type; 316 sym->binding = binding; 317 sym->namelen = namelen - 1; 318 319 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 320 __func__, name, start, sym->end); 321 memcpy(sym->name, name, namelen); 322 323 return sym; 324 } 325 326 void symbol__delete(struct symbol *sym) 327 { 328 if (symbol_conf.priv_size) { 329 if (symbol_conf.init_annotation) { 330 struct annotation *notes = symbol__annotation(sym); 331 332 annotation__exit(notes); 333 } 334 } 335 free(((void *)sym) - symbol_conf.priv_size); 336 } 337 338 void symbols__delete(struct rb_root_cached *symbols) 339 { 340 struct symbol *pos; 341 struct rb_node *next = rb_first_cached(symbols); 342 343 while (next) { 344 pos = rb_entry(next, struct symbol, rb_node); 345 next = rb_next(&pos->rb_node); 346 rb_erase_cached(&pos->rb_node, symbols); 347 symbol__delete(pos); 348 } 349 } 350 351 void __symbols__insert(struct rb_root_cached *symbols, 352 struct symbol *sym, bool kernel) 353 { 354 struct rb_node **p = &symbols->rb_root.rb_node; 355 struct rb_node *parent = NULL; 356 const u64 ip = sym->start; 357 struct symbol *s; 358 bool leftmost = true; 359 360 if (kernel) { 361 const char *name = sym->name; 362 /* 363 * ppc64 uses function descriptors and appends a '.' to the 364 * start of every instruction address. Remove it. 365 */ 366 if (name[0] == '.') 367 name++; 368 sym->idle = symbol__is_idle(name); 369 } 370 371 while (*p != NULL) { 372 parent = *p; 373 s = rb_entry(parent, struct symbol, rb_node); 374 if (ip < s->start) 375 p = &(*p)->rb_left; 376 else { 377 p = &(*p)->rb_right; 378 leftmost = false; 379 } 380 } 381 rb_link_node(&sym->rb_node, parent, p); 382 rb_insert_color_cached(&sym->rb_node, symbols, leftmost); 383 } 384 385 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym) 386 { 387 __symbols__insert(symbols, sym, false); 388 } 389 390 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip) 391 { 392 struct rb_node *n; 393 394 if (symbols == NULL) 395 return NULL; 396 397 n = symbols->rb_root.rb_node; 398 399 while (n) { 400 struct symbol *s = rb_entry(n, struct symbol, rb_node); 401 402 if (ip < s->start) 403 n = n->rb_left; 404 else if (ip > s->end || (ip == s->end && ip != s->start)) 405 n = n->rb_right; 406 else 407 return s; 408 } 409 410 return NULL; 411 } 412 413 static struct symbol *symbols__first(struct rb_root_cached *symbols) 414 { 415 struct rb_node *n = rb_first_cached(symbols); 416 417 if (n) 418 return rb_entry(n, struct symbol, rb_node); 419 420 return NULL; 421 } 422 423 static struct symbol *symbols__last(struct rb_root_cached *symbols) 424 { 425 struct rb_node *n = rb_last(&symbols->rb_root); 426 427 if (n) 428 return rb_entry(n, struct symbol, rb_node); 429 430 return NULL; 431 } 432 433 static struct symbol *symbols__next(struct symbol *sym) 434 { 435 struct rb_node *n = rb_next(&sym->rb_node); 436 437 if (n) 438 return rb_entry(n, struct symbol, rb_node); 439 440 return NULL; 441 } 442 443 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym) 444 { 445 struct rb_node **p = &symbols->rb_root.rb_node; 446 struct rb_node *parent = NULL; 447 struct symbol_name_rb_node *symn, *s; 448 bool leftmost = true; 449 450 symn = container_of(sym, struct symbol_name_rb_node, sym); 451 452 while (*p != NULL) { 453 parent = *p; 454 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 455 if (strcmp(sym->name, s->sym.name) < 0) 456 p = &(*p)->rb_left; 457 else { 458 p = &(*p)->rb_right; 459 leftmost = false; 460 } 461 } 462 rb_link_node(&symn->rb_node, parent, p); 463 rb_insert_color_cached(&symn->rb_node, symbols, leftmost); 464 } 465 466 static void symbols__sort_by_name(struct rb_root_cached *symbols, 467 struct rb_root_cached *source) 468 { 469 struct rb_node *nd; 470 471 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { 472 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 473 symbols__insert_by_name(symbols, pos); 474 } 475 } 476 477 int symbol__match_symbol_name(const char *name, const char *str, 478 enum symbol_tag_include includes) 479 { 480 const char *versioning; 481 482 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY && 483 (versioning = strstr(name, "@@"))) { 484 int len = strlen(str); 485 486 if (len < versioning - name) 487 len = versioning - name; 488 489 return arch__compare_symbol_names_n(name, str, len); 490 } else 491 return arch__compare_symbol_names(name, str); 492 } 493 494 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols, 495 const char *name, 496 enum symbol_tag_include includes) 497 { 498 struct rb_node *n; 499 struct symbol_name_rb_node *s = NULL; 500 501 if (symbols == NULL) 502 return NULL; 503 504 n = symbols->rb_root.rb_node; 505 506 while (n) { 507 int cmp; 508 509 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 510 cmp = symbol__match_symbol_name(s->sym.name, name, includes); 511 512 if (cmp > 0) 513 n = n->rb_left; 514 else if (cmp < 0) 515 n = n->rb_right; 516 else 517 break; 518 } 519 520 if (n == NULL) 521 return NULL; 522 523 if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) 524 /* return first symbol that has same name (if any) */ 525 for (n = rb_prev(n); n; n = rb_prev(n)) { 526 struct symbol_name_rb_node *tmp; 527 528 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); 529 if (arch__compare_symbol_names(tmp->sym.name, s->sym.name)) 530 break; 531 532 s = tmp; 533 } 534 535 return &s->sym; 536 } 537 538 void dso__reset_find_symbol_cache(struct dso *dso) 539 { 540 dso->last_find_result.addr = 0; 541 dso->last_find_result.symbol = NULL; 542 } 543 544 void dso__insert_symbol(struct dso *dso, struct symbol *sym) 545 { 546 __symbols__insert(&dso->symbols, sym, dso->kernel); 547 548 /* update the symbol cache if necessary */ 549 if (dso->last_find_result.addr >= sym->start && 550 (dso->last_find_result.addr < sym->end || 551 sym->start == sym->end)) { 552 dso->last_find_result.symbol = sym; 553 } 554 } 555 556 void dso__delete_symbol(struct dso *dso, struct symbol *sym) 557 { 558 rb_erase_cached(&sym->rb_node, &dso->symbols); 559 symbol__delete(sym); 560 dso__reset_find_symbol_cache(dso); 561 } 562 563 struct symbol *dso__find_symbol(struct dso *dso, u64 addr) 564 { 565 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) { 566 dso->last_find_result.addr = addr; 567 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr); 568 } 569 570 return dso->last_find_result.symbol; 571 } 572 573 struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr) 574 { 575 return symbols__find(&dso->symbols, addr); 576 } 577 578 struct symbol *dso__first_symbol(struct dso *dso) 579 { 580 return symbols__first(&dso->symbols); 581 } 582 583 struct symbol *dso__last_symbol(struct dso *dso) 584 { 585 return symbols__last(&dso->symbols); 586 } 587 588 struct symbol *dso__next_symbol(struct symbol *sym) 589 { 590 return symbols__next(sym); 591 } 592 593 struct symbol *symbol__next_by_name(struct symbol *sym) 594 { 595 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); 596 struct rb_node *n = rb_next(&s->rb_node); 597 598 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; 599 } 600 601 /* 602 * Returns first symbol that matched with @name. 603 */ 604 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name) 605 { 606 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name, 607 SYMBOL_TAG_INCLUDE__NONE); 608 if (!s) 609 s = symbols__find_by_name(&dso->symbol_names, name, 610 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY); 611 return s; 612 } 613 614 void dso__sort_by_name(struct dso *dso) 615 { 616 dso__set_sorted_by_name(dso); 617 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols); 618 } 619 620 /* 621 * While we find nice hex chars, build a long_val. 622 * Return number of chars processed. 623 */ 624 static int hex2u64(const char *ptr, u64 *long_val) 625 { 626 char *p; 627 628 *long_val = strtoull(ptr, &p, 16); 629 630 return p - ptr; 631 } 632 633 634 int modules__parse(const char *filename, void *arg, 635 int (*process_module)(void *arg, const char *name, 636 u64 start, u64 size)) 637 { 638 char *line = NULL; 639 size_t n; 640 FILE *file; 641 int err = 0; 642 643 file = fopen(filename, "r"); 644 if (file == NULL) 645 return -1; 646 647 while (1) { 648 char name[PATH_MAX]; 649 u64 start, size; 650 char *sep, *endptr; 651 ssize_t line_len; 652 653 line_len = getline(&line, &n, file); 654 if (line_len < 0) { 655 if (feof(file)) 656 break; 657 err = -1; 658 goto out; 659 } 660 661 if (!line) { 662 err = -1; 663 goto out; 664 } 665 666 line[--line_len] = '\0'; /* \n */ 667 668 sep = strrchr(line, 'x'); 669 if (sep == NULL) 670 continue; 671 672 hex2u64(sep + 1, &start); 673 674 sep = strchr(line, ' '); 675 if (sep == NULL) 676 continue; 677 678 *sep = '\0'; 679 680 scnprintf(name, sizeof(name), "[%s]", line); 681 682 size = strtoul(sep + 1, &endptr, 0); 683 if (*endptr != ' ' && *endptr != '\t') 684 continue; 685 686 err = process_module(arg, name, start, size); 687 if (err) 688 break; 689 } 690 out: 691 free(line); 692 fclose(file); 693 return err; 694 } 695 696 /* 697 * These are symbols in the kernel image, so make sure that 698 * sym is from a kernel DSO. 699 */ 700 static bool symbol__is_idle(const char *name) 701 { 702 const char * const idle_symbols[] = { 703 "acpi_idle_do_entry", 704 "acpi_processor_ffh_cstate_enter", 705 "arch_cpu_idle", 706 "cpu_idle", 707 "cpu_startup_entry", 708 "idle_cpu", 709 "intel_idle", 710 "default_idle", 711 "native_safe_halt", 712 "enter_idle", 713 "exit_idle", 714 "mwait_idle", 715 "mwait_idle_with_hints", 716 "mwait_idle_with_hints.constprop.0", 717 "poll_idle", 718 "ppc64_runlatch_off", 719 "pseries_dedicated_idle_sleep", 720 "psw_idle", 721 "psw_idle_exit", 722 NULL 723 }; 724 int i; 725 static struct strlist *idle_symbols_list; 726 727 if (idle_symbols_list) 728 return strlist__has_entry(idle_symbols_list, name); 729 730 idle_symbols_list = strlist__new(NULL, NULL); 731 732 for (i = 0; idle_symbols[i]; i++) 733 strlist__add(idle_symbols_list, idle_symbols[i]); 734 735 return strlist__has_entry(idle_symbols_list, name); 736 } 737 738 static int map__process_kallsym_symbol(void *arg, const char *name, 739 char type, u64 start) 740 { 741 struct symbol *sym; 742 struct dso *dso = arg; 743 struct rb_root_cached *root = &dso->symbols; 744 745 if (!symbol_type__filter(type)) 746 return 0; 747 748 /* Ignore local symbols for ARM modules */ 749 if (name[0] == '$') 750 return 0; 751 752 /* 753 * module symbols are not sorted so we add all 754 * symbols, setting length to 0, and rely on 755 * symbols__fixup_end() to fix it up. 756 */ 757 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); 758 if (sym == NULL) 759 return -ENOMEM; 760 /* 761 * We will pass the symbols to the filter later, in 762 * map__split_kallsyms, when we have split the maps per module 763 */ 764 __symbols__insert(root, sym, !strchr(name, '[')); 765 766 return 0; 767 } 768 769 /* 770 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 771 * so that we can in the next step set the symbol ->end address and then 772 * call kernel_maps__split_kallsyms. 773 */ 774 static int dso__load_all_kallsyms(struct dso *dso, const char *filename) 775 { 776 return kallsyms__parse(filename, dso, map__process_kallsym_symbol); 777 } 778 779 static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) 780 { 781 struct map *curr_map; 782 struct symbol *pos; 783 int count = 0; 784 struct rb_root_cached old_root = dso->symbols; 785 struct rb_root_cached *root = &dso->symbols; 786 struct rb_node *next = rb_first_cached(root); 787 788 if (!kmaps) 789 return -1; 790 791 *root = RB_ROOT_CACHED; 792 793 while (next) { 794 struct dso *curr_map_dso; 795 char *module; 796 797 pos = rb_entry(next, struct symbol, rb_node); 798 next = rb_next(&pos->rb_node); 799 800 rb_erase_cached(&pos->rb_node, &old_root); 801 RB_CLEAR_NODE(&pos->rb_node); 802 module = strchr(pos->name, '\t'); 803 if (module) 804 *module = '\0'; 805 806 curr_map = maps__find(kmaps, pos->start); 807 808 if (!curr_map) { 809 symbol__delete(pos); 810 continue; 811 } 812 curr_map_dso = map__dso(curr_map); 813 pos->start -= map__start(curr_map) - map__pgoff(curr_map); 814 if (pos->end > map__end(curr_map)) 815 pos->end = map__end(curr_map); 816 if (pos->end) 817 pos->end -= map__start(curr_map) - map__pgoff(curr_map); 818 symbols__insert(&curr_map_dso->symbols, pos); 819 ++count; 820 } 821 822 /* Symbols have been adjusted */ 823 dso->adjust_symbols = 1; 824 825 return count; 826 } 827 828 /* 829 * Split the symbols into maps, making sure there are no overlaps, i.e. the 830 * kernel range is broken in several maps, named [kernel].N, as we don't have 831 * the original ELF section names vmlinux have. 832 */ 833 static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, 834 struct map *initial_map) 835 { 836 struct machine *machine; 837 struct map *curr_map = initial_map; 838 struct symbol *pos; 839 int count = 0, moved = 0; 840 struct rb_root_cached *root = &dso->symbols; 841 struct rb_node *next = rb_first_cached(root); 842 int kernel_range = 0; 843 bool x86_64; 844 845 if (!kmaps) 846 return -1; 847 848 machine = maps__machine(kmaps); 849 850 x86_64 = machine__is(machine, "x86_64"); 851 852 while (next) { 853 char *module; 854 855 pos = rb_entry(next, struct symbol, rb_node); 856 next = rb_next(&pos->rb_node); 857 858 module = strchr(pos->name, '\t'); 859 if (module) { 860 struct dso *curr_map_dso; 861 862 if (!symbol_conf.use_modules) 863 goto discard_symbol; 864 865 *module++ = '\0'; 866 curr_map_dso = map__dso(curr_map); 867 if (strcmp(curr_map_dso->short_name, module)) { 868 if (RC_CHK_ACCESS(curr_map) != RC_CHK_ACCESS(initial_map) && 869 dso->kernel == DSO_SPACE__KERNEL_GUEST && 870 machine__is_default_guest(machine)) { 871 /* 872 * We assume all symbols of a module are 873 * continuous in * kallsyms, so curr_map 874 * points to a module and all its 875 * symbols are in its kmap. Mark it as 876 * loaded. 877 */ 878 dso__set_loaded(curr_map_dso); 879 } 880 881 curr_map = maps__find_by_name(kmaps, module); 882 if (curr_map == NULL) { 883 pr_debug("%s/proc/{kallsyms,modules} " 884 "inconsistency while looking " 885 "for \"%s\" module!\n", 886 machine->root_dir, module); 887 curr_map = initial_map; 888 goto discard_symbol; 889 } 890 curr_map_dso = map__dso(curr_map); 891 if (curr_map_dso->loaded && 892 !machine__is_default_guest(machine)) 893 goto discard_symbol; 894 } 895 /* 896 * So that we look just like we get from .ko files, 897 * i.e. not prelinked, relative to initial_map->start. 898 */ 899 pos->start = map__map_ip(curr_map, pos->start); 900 pos->end = map__map_ip(curr_map, pos->end); 901 } else if (x86_64 && is_entry_trampoline(pos->name)) { 902 /* 903 * These symbols are not needed anymore since the 904 * trampoline maps refer to the text section and it's 905 * symbols instead. Avoid having to deal with 906 * relocations, and the assumption that the first symbol 907 * is the start of kernel text, by simply removing the 908 * symbols at this point. 909 */ 910 goto discard_symbol; 911 } else if (curr_map != initial_map) { 912 char dso_name[PATH_MAX]; 913 struct dso *ndso; 914 915 if (delta) { 916 /* Kernel was relocated at boot time */ 917 pos->start -= delta; 918 pos->end -= delta; 919 } 920 921 if (count == 0) { 922 curr_map = initial_map; 923 goto add_symbol; 924 } 925 926 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 927 snprintf(dso_name, sizeof(dso_name), 928 "[guest.kernel].%d", 929 kernel_range++); 930 else 931 snprintf(dso_name, sizeof(dso_name), 932 "[kernel].%d", 933 kernel_range++); 934 935 ndso = dso__new(dso_name); 936 if (ndso == NULL) 937 return -1; 938 939 ndso->kernel = dso->kernel; 940 941 curr_map = map__new2(pos->start, ndso); 942 if (curr_map == NULL) { 943 dso__put(ndso); 944 return -1; 945 } 946 947 map__set_map_ip(curr_map, identity__map_ip); 948 map__set_unmap_ip(curr_map, identity__map_ip); 949 if (maps__insert(kmaps, curr_map)) { 950 dso__put(ndso); 951 return -1; 952 } 953 ++kernel_range; 954 } else if (delta) { 955 /* Kernel was relocated at boot time */ 956 pos->start -= delta; 957 pos->end -= delta; 958 } 959 add_symbol: 960 if (curr_map != initial_map) { 961 struct dso *curr_map_dso = map__dso(curr_map); 962 963 rb_erase_cached(&pos->rb_node, root); 964 symbols__insert(&curr_map_dso->symbols, pos); 965 ++moved; 966 } else 967 ++count; 968 969 continue; 970 discard_symbol: 971 rb_erase_cached(&pos->rb_node, root); 972 symbol__delete(pos); 973 } 974 975 if (curr_map != initial_map && 976 dso->kernel == DSO_SPACE__KERNEL_GUEST && 977 machine__is_default_guest(maps__machine(kmaps))) { 978 dso__set_loaded(map__dso(curr_map)); 979 } 980 981 return count + moved; 982 } 983 984 bool symbol__restricted_filename(const char *filename, 985 const char *restricted_filename) 986 { 987 bool restricted = false; 988 989 if (symbol_conf.kptr_restrict) { 990 char *r = realpath(filename, NULL); 991 992 if (r != NULL) { 993 restricted = strcmp(r, restricted_filename) == 0; 994 free(r); 995 return restricted; 996 } 997 } 998 999 return restricted; 1000 } 1001 1002 struct module_info { 1003 struct rb_node rb_node; 1004 char *name; 1005 u64 start; 1006 }; 1007 1008 static void add_module(struct module_info *mi, struct rb_root *modules) 1009 { 1010 struct rb_node **p = &modules->rb_node; 1011 struct rb_node *parent = NULL; 1012 struct module_info *m; 1013 1014 while (*p != NULL) { 1015 parent = *p; 1016 m = rb_entry(parent, struct module_info, rb_node); 1017 if (strcmp(mi->name, m->name) < 0) 1018 p = &(*p)->rb_left; 1019 else 1020 p = &(*p)->rb_right; 1021 } 1022 rb_link_node(&mi->rb_node, parent, p); 1023 rb_insert_color(&mi->rb_node, modules); 1024 } 1025 1026 static void delete_modules(struct rb_root *modules) 1027 { 1028 struct module_info *mi; 1029 struct rb_node *next = rb_first(modules); 1030 1031 while (next) { 1032 mi = rb_entry(next, struct module_info, rb_node); 1033 next = rb_next(&mi->rb_node); 1034 rb_erase(&mi->rb_node, modules); 1035 zfree(&mi->name); 1036 free(mi); 1037 } 1038 } 1039 1040 static struct module_info *find_module(const char *name, 1041 struct rb_root *modules) 1042 { 1043 struct rb_node *n = modules->rb_node; 1044 1045 while (n) { 1046 struct module_info *m; 1047 int cmp; 1048 1049 m = rb_entry(n, struct module_info, rb_node); 1050 cmp = strcmp(name, m->name); 1051 if (cmp < 0) 1052 n = n->rb_left; 1053 else if (cmp > 0) 1054 n = n->rb_right; 1055 else 1056 return m; 1057 } 1058 1059 return NULL; 1060 } 1061 1062 static int __read_proc_modules(void *arg, const char *name, u64 start, 1063 u64 size __maybe_unused) 1064 { 1065 struct rb_root *modules = arg; 1066 struct module_info *mi; 1067 1068 mi = zalloc(sizeof(struct module_info)); 1069 if (!mi) 1070 return -ENOMEM; 1071 1072 mi->name = strdup(name); 1073 mi->start = start; 1074 1075 if (!mi->name) { 1076 free(mi); 1077 return -ENOMEM; 1078 } 1079 1080 add_module(mi, modules); 1081 1082 return 0; 1083 } 1084 1085 static int read_proc_modules(const char *filename, struct rb_root *modules) 1086 { 1087 if (symbol__restricted_filename(filename, "/proc/modules")) 1088 return -1; 1089 1090 if (modules__parse(filename, modules, __read_proc_modules)) { 1091 delete_modules(modules); 1092 return -1; 1093 } 1094 1095 return 0; 1096 } 1097 1098 int compare_proc_modules(const char *from, const char *to) 1099 { 1100 struct rb_root from_modules = RB_ROOT; 1101 struct rb_root to_modules = RB_ROOT; 1102 struct rb_node *from_node, *to_node; 1103 struct module_info *from_m, *to_m; 1104 int ret = -1; 1105 1106 if (read_proc_modules(from, &from_modules)) 1107 return -1; 1108 1109 if (read_proc_modules(to, &to_modules)) 1110 goto out_delete_from; 1111 1112 from_node = rb_first(&from_modules); 1113 to_node = rb_first(&to_modules); 1114 while (from_node) { 1115 if (!to_node) 1116 break; 1117 1118 from_m = rb_entry(from_node, struct module_info, rb_node); 1119 to_m = rb_entry(to_node, struct module_info, rb_node); 1120 1121 if (from_m->start != to_m->start || 1122 strcmp(from_m->name, to_m->name)) 1123 break; 1124 1125 from_node = rb_next(from_node); 1126 to_node = rb_next(to_node); 1127 } 1128 1129 if (!from_node && !to_node) 1130 ret = 0; 1131 1132 delete_modules(&to_modules); 1133 out_delete_from: 1134 delete_modules(&from_modules); 1135 1136 return ret; 1137 } 1138 1139 static int do_validate_kcore_modules(const char *filename, struct maps *kmaps) 1140 { 1141 struct rb_root modules = RB_ROOT; 1142 struct map_rb_node *old_node; 1143 int err; 1144 1145 err = read_proc_modules(filename, &modules); 1146 if (err) 1147 return err; 1148 1149 maps__for_each_entry(kmaps, old_node) { 1150 struct map *old_map = old_node->map; 1151 struct module_info *mi; 1152 struct dso *dso; 1153 1154 if (!__map__is_kmodule(old_map)) { 1155 continue; 1156 } 1157 dso = map__dso(old_map); 1158 /* Module must be in memory at the same address */ 1159 mi = find_module(dso->short_name, &modules); 1160 if (!mi || mi->start != map__start(old_map)) { 1161 err = -EINVAL; 1162 goto out; 1163 } 1164 } 1165 out: 1166 delete_modules(&modules); 1167 return err; 1168 } 1169 1170 /* 1171 * If kallsyms is referenced by name then we look for filename in the same 1172 * directory. 1173 */ 1174 static bool filename_from_kallsyms_filename(char *filename, 1175 const char *base_name, 1176 const char *kallsyms_filename) 1177 { 1178 char *name; 1179 1180 strcpy(filename, kallsyms_filename); 1181 name = strrchr(filename, '/'); 1182 if (!name) 1183 return false; 1184 1185 name += 1; 1186 1187 if (!strcmp(name, "kallsyms")) { 1188 strcpy(name, base_name); 1189 return true; 1190 } 1191 1192 return false; 1193 } 1194 1195 static int validate_kcore_modules(const char *kallsyms_filename, 1196 struct map *map) 1197 { 1198 struct maps *kmaps = map__kmaps(map); 1199 char modules_filename[PATH_MAX]; 1200 1201 if (!kmaps) 1202 return -EINVAL; 1203 1204 if (!filename_from_kallsyms_filename(modules_filename, "modules", 1205 kallsyms_filename)) 1206 return -EINVAL; 1207 1208 if (do_validate_kcore_modules(modules_filename, kmaps)) 1209 return -EINVAL; 1210 1211 return 0; 1212 } 1213 1214 static int validate_kcore_addresses(const char *kallsyms_filename, 1215 struct map *map) 1216 { 1217 struct kmap *kmap = map__kmap(map); 1218 1219 if (!kmap) 1220 return -EINVAL; 1221 1222 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 1223 u64 start; 1224 1225 if (kallsyms__get_function_start(kallsyms_filename, 1226 kmap->ref_reloc_sym->name, &start)) 1227 return -ENOENT; 1228 if (start != kmap->ref_reloc_sym->addr) 1229 return -EINVAL; 1230 } 1231 1232 return validate_kcore_modules(kallsyms_filename, map); 1233 } 1234 1235 struct kcore_mapfn_data { 1236 struct dso *dso; 1237 struct list_head maps; 1238 }; 1239 1240 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1241 { 1242 struct kcore_mapfn_data *md = data; 1243 struct map_list_node *list_node = map_list_node__new(); 1244 1245 if (!list_node) 1246 return -ENOMEM; 1247 1248 list_node->map = map__new2(start, md->dso); 1249 if (!list_node->map) { 1250 free(list_node); 1251 return -ENOMEM; 1252 } 1253 1254 map__set_end(list_node->map, map__start(list_node->map) + len); 1255 map__set_pgoff(list_node->map, pgoff); 1256 1257 list_add(&list_node->node, &md->maps); 1258 1259 return 0; 1260 } 1261 1262 /* 1263 * Merges map into maps by splitting the new map within the existing map 1264 * regions. 1265 */ 1266 int maps__merge_in(struct maps *kmaps, struct map *new_map) 1267 { 1268 struct map_rb_node *rb_node; 1269 LIST_HEAD(merged); 1270 int err = 0; 1271 1272 maps__for_each_entry(kmaps, rb_node) { 1273 struct map *old_map = rb_node->map; 1274 1275 /* no overload with this one */ 1276 if (map__end(new_map) < map__start(old_map) || 1277 map__start(new_map) >= map__end(old_map)) 1278 continue; 1279 1280 if (map__start(new_map) < map__start(old_map)) { 1281 /* 1282 * |new...... 1283 * |old.... 1284 */ 1285 if (map__end(new_map) < map__end(old_map)) { 1286 /* 1287 * |new......| -> |new..| 1288 * |old....| -> |old....| 1289 */ 1290 map__set_end(new_map, map__start(old_map)); 1291 } else { 1292 /* 1293 * |new.............| -> |new..| |new..| 1294 * |old....| -> |old....| 1295 */ 1296 struct map_list_node *m = map_list_node__new(); 1297 1298 if (!m) { 1299 err = -ENOMEM; 1300 goto out; 1301 } 1302 1303 m->map = map__clone(new_map); 1304 if (!m->map) { 1305 free(m); 1306 err = -ENOMEM; 1307 goto out; 1308 } 1309 1310 map__set_end(m->map, map__start(old_map)); 1311 list_add_tail(&m->node, &merged); 1312 map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); 1313 map__set_start(new_map, map__end(old_map)); 1314 } 1315 } else { 1316 /* 1317 * |new...... 1318 * |old.... 1319 */ 1320 if (map__end(new_map) < map__end(old_map)) { 1321 /* 1322 * |new..| -> x 1323 * |old.........| -> |old.........| 1324 */ 1325 map__put(new_map); 1326 new_map = NULL; 1327 break; 1328 } else { 1329 /* 1330 * |new......| -> |new...| 1331 * |old....| -> |old....| 1332 */ 1333 map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); 1334 map__set_start(new_map, map__end(old_map)); 1335 } 1336 } 1337 } 1338 1339 out: 1340 while (!list_empty(&merged)) { 1341 struct map_list_node *old_node; 1342 1343 old_node = list_entry(merged.next, struct map_list_node, node); 1344 list_del_init(&old_node->node); 1345 if (!err) 1346 err = maps__insert(kmaps, old_node->map); 1347 map__put(old_node->map); 1348 free(old_node); 1349 } 1350 1351 if (new_map) { 1352 if (!err) 1353 err = maps__insert(kmaps, new_map); 1354 map__put(new_map); 1355 } 1356 return err; 1357 } 1358 1359 static int dso__load_kcore(struct dso *dso, struct map *map, 1360 const char *kallsyms_filename) 1361 { 1362 struct maps *kmaps = map__kmaps(map); 1363 struct kcore_mapfn_data md; 1364 struct map *replacement_map = NULL; 1365 struct map_rb_node *old_node, *next; 1366 struct machine *machine; 1367 bool is_64_bit; 1368 int err, fd; 1369 char kcore_filename[PATH_MAX]; 1370 u64 stext; 1371 1372 if (!kmaps) 1373 return -EINVAL; 1374 1375 machine = maps__machine(kmaps); 1376 1377 /* This function requires that the map is the kernel map */ 1378 if (!__map__is_kernel(map)) 1379 return -EINVAL; 1380 1381 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1382 kallsyms_filename)) 1383 return -EINVAL; 1384 1385 /* Modules and kernel must be present at their original addresses */ 1386 if (validate_kcore_addresses(kallsyms_filename, map)) 1387 return -EINVAL; 1388 1389 md.dso = dso; 1390 INIT_LIST_HEAD(&md.maps); 1391 1392 fd = open(kcore_filename, O_RDONLY); 1393 if (fd < 0) { 1394 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n", 1395 kcore_filename); 1396 return -EINVAL; 1397 } 1398 1399 /* Read new maps into temporary lists */ 1400 err = file__read_maps(fd, map__prot(map) & PROT_EXEC, kcore_mapfn, &md, 1401 &is_64_bit); 1402 if (err) 1403 goto out_err; 1404 dso->is_64_bit = is_64_bit; 1405 1406 if (list_empty(&md.maps)) { 1407 err = -EINVAL; 1408 goto out_err; 1409 } 1410 1411 /* Remove old maps */ 1412 maps__for_each_entry_safe(kmaps, old_node, next) { 1413 struct map *old_map = old_node->map; 1414 1415 /* 1416 * We need to preserve eBPF maps even if they are 1417 * covered by kcore, because we need to access 1418 * eBPF dso for source data. 1419 */ 1420 if (old_map != map && !__map__is_bpf_prog(old_map)) 1421 maps__remove(kmaps, old_map); 1422 } 1423 machine->trampolines_mapped = false; 1424 1425 /* Find the kernel map using the '_stext' symbol */ 1426 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) { 1427 u64 replacement_size = 0; 1428 struct map_list_node *new_node; 1429 1430 list_for_each_entry(new_node, &md.maps, node) { 1431 struct map *new_map = new_node->map; 1432 u64 new_size = map__size(new_map); 1433 1434 if (!(stext >= map__start(new_map) && stext < map__end(new_map))) 1435 continue; 1436 1437 /* 1438 * On some architectures, ARM64 for example, the kernel 1439 * text can get allocated inside of the vmalloc segment. 1440 * Select the smallest matching segment, in case stext 1441 * falls within more than one in the list. 1442 */ 1443 if (!replacement_map || new_size < replacement_size) { 1444 replacement_map = new_map; 1445 replacement_size = new_size; 1446 } 1447 } 1448 } 1449 1450 if (!replacement_map) 1451 replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map; 1452 1453 /* Add new maps */ 1454 while (!list_empty(&md.maps)) { 1455 struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node); 1456 struct map *new_map = new_node->map; 1457 1458 list_del_init(&new_node->node); 1459 1460 if (RC_CHK_ACCESS(new_map) == RC_CHK_ACCESS(replacement_map)) { 1461 map__set_start(map, map__start(new_map)); 1462 map__set_end(map, map__end(new_map)); 1463 map__set_pgoff(map, map__pgoff(new_map)); 1464 map__set_map_ip(map, map__map_ip_ptr(new_map)); 1465 map__set_unmap_ip(map, map__unmap_ip_ptr(new_map)); 1466 /* Ensure maps are correctly ordered */ 1467 map__get(map); 1468 maps__remove(kmaps, map); 1469 err = maps__insert(kmaps, map); 1470 map__put(map); 1471 map__put(new_map); 1472 if (err) 1473 goto out_err; 1474 } else { 1475 /* 1476 * Merge kcore map into existing maps, 1477 * and ensure that current maps (eBPF) 1478 * stay intact. 1479 */ 1480 if (maps__merge_in(kmaps, new_map)) { 1481 err = -EINVAL; 1482 goto out_err; 1483 } 1484 } 1485 free(new_node); 1486 } 1487 1488 if (machine__is(machine, "x86_64")) { 1489 u64 addr; 1490 1491 /* 1492 * If one of the corresponding symbols is there, assume the 1493 * entry trampoline maps are too. 1494 */ 1495 if (!kallsyms__get_function_start(kallsyms_filename, 1496 ENTRY_TRAMPOLINE_NAME, 1497 &addr)) 1498 machine->trampolines_mapped = true; 1499 } 1500 1501 /* 1502 * Set the data type and long name so that kcore can be read via 1503 * dso__data_read_addr(). 1504 */ 1505 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1506 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; 1507 else 1508 dso->binary_type = DSO_BINARY_TYPE__KCORE; 1509 dso__set_long_name(dso, strdup(kcore_filename), true); 1510 1511 close(fd); 1512 1513 if (map__prot(map) & PROT_EXEC) 1514 pr_debug("Using %s for kernel object code\n", kcore_filename); 1515 else 1516 pr_debug("Using %s for kernel data\n", kcore_filename); 1517 1518 return 0; 1519 1520 out_err: 1521 while (!list_empty(&md.maps)) { 1522 struct map_list_node *list_node; 1523 1524 list_node = list_entry(md.maps.next, struct map_list_node, node); 1525 list_del_init(&list_node->node); 1526 map__zput(list_node->map); 1527 free(list_node); 1528 } 1529 close(fd); 1530 return err; 1531 } 1532 1533 /* 1534 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1535 * delta based on the relocation reference symbol. 1536 */ 1537 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) 1538 { 1539 u64 addr; 1540 1541 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1542 return 0; 1543 1544 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr)) 1545 return -1; 1546 1547 *delta = addr - kmap->ref_reloc_sym->addr; 1548 return 0; 1549 } 1550 1551 int __dso__load_kallsyms(struct dso *dso, const char *filename, 1552 struct map *map, bool no_kcore) 1553 { 1554 struct kmap *kmap = map__kmap(map); 1555 u64 delta = 0; 1556 1557 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1558 return -1; 1559 1560 if (!kmap || !kmap->kmaps) 1561 return -1; 1562 1563 if (dso__load_all_kallsyms(dso, filename) < 0) 1564 return -1; 1565 1566 if (kallsyms__delta(kmap, filename, &delta)) 1567 return -1; 1568 1569 symbols__fixup_end(&dso->symbols, true); 1570 symbols__fixup_duplicate(&dso->symbols); 1571 1572 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1573 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1574 else 1575 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1576 1577 if (!no_kcore && !dso__load_kcore(dso, map, filename)) 1578 return maps__split_kallsyms_for_kcore(kmap->kmaps, dso); 1579 else 1580 return maps__split_kallsyms(kmap->kmaps, dso, delta, map); 1581 } 1582 1583 int dso__load_kallsyms(struct dso *dso, const char *filename, 1584 struct map *map) 1585 { 1586 return __dso__load_kallsyms(dso, filename, map, false); 1587 } 1588 1589 static int dso__load_perf_map(const char *map_path, struct dso *dso) 1590 { 1591 char *line = NULL; 1592 size_t n; 1593 FILE *file; 1594 int nr_syms = 0; 1595 1596 file = fopen(map_path, "r"); 1597 if (file == NULL) 1598 goto out_failure; 1599 1600 while (!feof(file)) { 1601 u64 start, size; 1602 struct symbol *sym; 1603 int line_len, len; 1604 1605 line_len = getline(&line, &n, file); 1606 if (line_len < 0) 1607 break; 1608 1609 if (!line) 1610 goto out_failure; 1611 1612 line[--line_len] = '\0'; /* \n */ 1613 1614 len = hex2u64(line, &start); 1615 1616 len++; 1617 if (len + 2 >= line_len) 1618 continue; 1619 1620 len += hex2u64(line + len, &size); 1621 1622 len++; 1623 if (len + 2 >= line_len) 1624 continue; 1625 1626 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len); 1627 1628 if (sym == NULL) 1629 goto out_delete_line; 1630 1631 symbols__insert(&dso->symbols, sym); 1632 nr_syms++; 1633 } 1634 1635 free(line); 1636 fclose(file); 1637 1638 return nr_syms; 1639 1640 out_delete_line: 1641 free(line); 1642 out_failure: 1643 return -1; 1644 } 1645 1646 #ifdef HAVE_LIBBFD_SUPPORT 1647 #define PACKAGE 'perf' 1648 #include <bfd.h> 1649 1650 static int bfd_symbols__cmpvalue(const void *a, const void *b) 1651 { 1652 const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b; 1653 1654 if (bfd_asymbol_value(as) != bfd_asymbol_value(bs)) 1655 return bfd_asymbol_value(as) - bfd_asymbol_value(bs); 1656 1657 return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0]; 1658 } 1659 1660 static int bfd2elf_binding(asymbol *symbol) 1661 { 1662 if (symbol->flags & BSF_WEAK) 1663 return STB_WEAK; 1664 if (symbol->flags & BSF_GLOBAL) 1665 return STB_GLOBAL; 1666 if (symbol->flags & BSF_LOCAL) 1667 return STB_LOCAL; 1668 return -1; 1669 } 1670 1671 int dso__load_bfd_symbols(struct dso *dso, const char *debugfile) 1672 { 1673 int err = -1; 1674 long symbols_size, symbols_count, i; 1675 asection *section; 1676 asymbol **symbols, *sym; 1677 struct symbol *symbol; 1678 bfd *abfd; 1679 u64 start, len; 1680 1681 abfd = bfd_openr(debugfile, NULL); 1682 if (!abfd) 1683 return -1; 1684 1685 if (!bfd_check_format(abfd, bfd_object)) { 1686 pr_debug2("%s: cannot read %s bfd file.\n", __func__, 1687 dso->long_name); 1688 goto out_close; 1689 } 1690 1691 if (bfd_get_flavour(abfd) == bfd_target_elf_flavour) 1692 goto out_close; 1693 1694 symbols_size = bfd_get_symtab_upper_bound(abfd); 1695 if (symbols_size == 0) { 1696 bfd_close(abfd); 1697 return 0; 1698 } 1699 1700 if (symbols_size < 0) 1701 goto out_close; 1702 1703 symbols = malloc(symbols_size); 1704 if (!symbols) 1705 goto out_close; 1706 1707 symbols_count = bfd_canonicalize_symtab(abfd, symbols); 1708 if (symbols_count < 0) 1709 goto out_free; 1710 1711 section = bfd_get_section_by_name(abfd, ".text"); 1712 if (section) { 1713 for (i = 0; i < symbols_count; ++i) { 1714 if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") || 1715 !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__")) 1716 break; 1717 } 1718 if (i < symbols_count) { 1719 /* PE symbols can only have 4 bytes, so use .text high bits */ 1720 dso->text_offset = section->vma - (u32)section->vma; 1721 dso->text_offset += (u32)bfd_asymbol_value(symbols[i]); 1722 } else { 1723 dso->text_offset = section->vma - section->filepos; 1724 } 1725 } 1726 1727 qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue); 1728 1729 #ifdef bfd_get_section 1730 #define bfd_asymbol_section bfd_get_section 1731 #endif 1732 for (i = 0; i < symbols_count; ++i) { 1733 sym = symbols[i]; 1734 section = bfd_asymbol_section(sym); 1735 if (bfd2elf_binding(sym) < 0) 1736 continue; 1737 1738 while (i + 1 < symbols_count && 1739 bfd_asymbol_section(symbols[i + 1]) == section && 1740 bfd2elf_binding(symbols[i + 1]) < 0) 1741 i++; 1742 1743 if (i + 1 < symbols_count && 1744 bfd_asymbol_section(symbols[i + 1]) == section) 1745 len = symbols[i + 1]->value - sym->value; 1746 else 1747 len = section->size - sym->value; 1748 1749 start = bfd_asymbol_value(sym) - dso->text_offset; 1750 symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC, 1751 bfd_asymbol_name(sym)); 1752 if (!symbol) 1753 goto out_free; 1754 1755 symbols__insert(&dso->symbols, symbol); 1756 } 1757 #ifdef bfd_get_section 1758 #undef bfd_asymbol_section 1759 #endif 1760 1761 symbols__fixup_end(&dso->symbols, false); 1762 symbols__fixup_duplicate(&dso->symbols); 1763 dso->adjust_symbols = 1; 1764 1765 err = 0; 1766 out_free: 1767 free(symbols); 1768 out_close: 1769 bfd_close(abfd); 1770 return err; 1771 } 1772 #endif 1773 1774 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, 1775 enum dso_binary_type type) 1776 { 1777 switch (type) { 1778 case DSO_BINARY_TYPE__JAVA_JIT: 1779 case DSO_BINARY_TYPE__DEBUGLINK: 1780 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1781 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1782 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1783 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 1784 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1785 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1786 return !kmod && dso->kernel == DSO_SPACE__USER; 1787 1788 case DSO_BINARY_TYPE__KALLSYMS: 1789 case DSO_BINARY_TYPE__VMLINUX: 1790 case DSO_BINARY_TYPE__KCORE: 1791 return dso->kernel == DSO_SPACE__KERNEL; 1792 1793 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1794 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1795 case DSO_BINARY_TYPE__GUEST_KCORE: 1796 return dso->kernel == DSO_SPACE__KERNEL_GUEST; 1797 1798 case DSO_BINARY_TYPE__GUEST_KMODULE: 1799 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 1800 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1801 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 1802 /* 1803 * kernel modules know their symtab type - it's set when 1804 * creating a module dso in machine__addnew_module_map(). 1805 */ 1806 return kmod && dso->symtab_type == type; 1807 1808 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1809 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 1810 return true; 1811 1812 case DSO_BINARY_TYPE__BPF_PROG_INFO: 1813 case DSO_BINARY_TYPE__BPF_IMAGE: 1814 case DSO_BINARY_TYPE__OOL: 1815 case DSO_BINARY_TYPE__NOT_FOUND: 1816 default: 1817 return false; 1818 } 1819 } 1820 1821 /* Checks for the existence of the perf-<pid>.map file in two different 1822 * locations. First, if the process is a separate mount namespace, check in 1823 * that namespace using the pid of the innermost pid namespace. If's not in a 1824 * namespace, or the file can't be found there, try in the mount namespace of 1825 * the tracing process using our view of its pid. 1826 */ 1827 static int dso__find_perf_map(char *filebuf, size_t bufsz, 1828 struct nsinfo **nsip) 1829 { 1830 struct nscookie nsc; 1831 struct nsinfo *nsi; 1832 struct nsinfo *nnsi; 1833 int rc = -1; 1834 1835 nsi = *nsip; 1836 1837 if (nsinfo__need_setns(nsi)) { 1838 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__nstgid(nsi)); 1839 nsinfo__mountns_enter(nsi, &nsc); 1840 rc = access(filebuf, R_OK); 1841 nsinfo__mountns_exit(&nsc); 1842 if (rc == 0) 1843 return rc; 1844 } 1845 1846 nnsi = nsinfo__copy(nsi); 1847 if (nnsi) { 1848 nsinfo__put(nsi); 1849 1850 nsinfo__clear_need_setns(nnsi); 1851 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__tgid(nnsi)); 1852 *nsip = nnsi; 1853 rc = 0; 1854 } 1855 1856 return rc; 1857 } 1858 1859 int dso__load(struct dso *dso, struct map *map) 1860 { 1861 char *name; 1862 int ret = -1; 1863 u_int i; 1864 struct machine *machine = NULL; 1865 char *root_dir = (char *) ""; 1866 int ss_pos = 0; 1867 struct symsrc ss_[2]; 1868 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1869 bool kmod; 1870 bool perfmap; 1871 struct build_id bid; 1872 struct nscookie nsc; 1873 char newmapname[PATH_MAX]; 1874 const char *map_path = dso->long_name; 1875 1876 mutex_lock(&dso->lock); 1877 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0; 1878 if (perfmap) { 1879 if (dso->nsinfo && (dso__find_perf_map(newmapname, 1880 sizeof(newmapname), &dso->nsinfo) == 0)) { 1881 map_path = newmapname; 1882 } 1883 } 1884 1885 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1886 1887 /* check again under the dso->lock */ 1888 if (dso__loaded(dso)) { 1889 ret = 1; 1890 goto out; 1891 } 1892 1893 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1894 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 1895 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || 1896 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 1897 1898 if (dso->kernel && !kmod) { 1899 if (dso->kernel == DSO_SPACE__KERNEL) 1900 ret = dso__load_kernel_sym(dso, map); 1901 else if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1902 ret = dso__load_guest_kernel_sym(dso, map); 1903 1904 machine = maps__machine(map__kmaps(map)); 1905 if (machine__is(machine, "x86_64")) 1906 machine__map_x86_64_entry_trampolines(machine, dso); 1907 goto out; 1908 } 1909 1910 dso->adjust_symbols = 0; 1911 1912 if (perfmap) { 1913 ret = dso__load_perf_map(map_path, dso); 1914 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1915 DSO_BINARY_TYPE__NOT_FOUND; 1916 goto out; 1917 } 1918 1919 if (machine) 1920 root_dir = machine->root_dir; 1921 1922 name = malloc(PATH_MAX); 1923 if (!name) 1924 goto out; 1925 1926 /* 1927 * Read the build id if possible. This is required for 1928 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work 1929 */ 1930 if (!dso->has_build_id && 1931 is_regular_file(dso->long_name)) { 1932 __symbol__join_symfs(name, PATH_MAX, dso->long_name); 1933 if (filename__read_build_id(name, &bid) > 0) 1934 dso__set_build_id(dso, &bid); 1935 } 1936 1937 /* 1938 * Iterate over candidate debug images. 1939 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1940 * and/or opd section) for processing. 1941 */ 1942 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1943 struct symsrc *ss = &ss_[ss_pos]; 1944 bool next_slot = false; 1945 bool is_reg; 1946 bool nsexit; 1947 int bfdrc = -1; 1948 int sirc = -1; 1949 1950 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1951 1952 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE || 1953 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO); 1954 1955 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) 1956 continue; 1957 1958 if (dso__read_binary_type_filename(dso, symtab_type, 1959 root_dir, name, PATH_MAX)) 1960 continue; 1961 1962 if (nsexit) 1963 nsinfo__mountns_exit(&nsc); 1964 1965 is_reg = is_regular_file(name); 1966 if (!is_reg && errno == ENOENT && dso->nsinfo) { 1967 char *new_name = dso__filename_with_chroot(dso, name); 1968 if (new_name) { 1969 is_reg = is_regular_file(new_name); 1970 strlcpy(name, new_name, PATH_MAX); 1971 free(new_name); 1972 } 1973 } 1974 1975 #ifdef HAVE_LIBBFD_SUPPORT 1976 if (is_reg) 1977 bfdrc = dso__load_bfd_symbols(dso, name); 1978 #endif 1979 if (is_reg && bfdrc < 0) 1980 sirc = symsrc__init(ss, dso, name, symtab_type); 1981 1982 if (nsexit) 1983 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1984 1985 if (bfdrc == 0) { 1986 ret = 0; 1987 break; 1988 } 1989 1990 if (!is_reg || sirc < 0) 1991 continue; 1992 1993 if (!syms_ss && symsrc__has_symtab(ss)) { 1994 syms_ss = ss; 1995 next_slot = true; 1996 if (!dso->symsrc_filename) 1997 dso->symsrc_filename = strdup(name); 1998 } 1999 2000 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 2001 runtime_ss = ss; 2002 next_slot = true; 2003 } 2004 2005 if (next_slot) { 2006 ss_pos++; 2007 2008 if (syms_ss && runtime_ss) 2009 break; 2010 } else { 2011 symsrc__destroy(ss); 2012 } 2013 2014 } 2015 2016 if (!runtime_ss && !syms_ss) 2017 goto out_free; 2018 2019 if (runtime_ss && !syms_ss) { 2020 syms_ss = runtime_ss; 2021 } 2022 2023 /* We'll have to hope for the best */ 2024 if (!runtime_ss && syms_ss) 2025 runtime_ss = syms_ss; 2026 2027 if (syms_ss) 2028 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 2029 else 2030 ret = -1; 2031 2032 if (ret > 0) { 2033 int nr_plt; 2034 2035 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss); 2036 if (nr_plt > 0) 2037 ret += nr_plt; 2038 } 2039 2040 for (; ss_pos > 0; ss_pos--) 2041 symsrc__destroy(&ss_[ss_pos - 1]); 2042 out_free: 2043 free(name); 2044 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 2045 ret = 0; 2046 out: 2047 dso__set_loaded(dso); 2048 mutex_unlock(&dso->lock); 2049 nsinfo__mountns_exit(&nsc); 2050 2051 return ret; 2052 } 2053 2054 static int map__strcmp(const void *a, const void *b) 2055 { 2056 const struct map *map_a = *(const struct map **)a; 2057 const struct map *map_b = *(const struct map **)b; 2058 const struct dso *dso_a = map__dso(map_a); 2059 const struct dso *dso_b = map__dso(map_b); 2060 int ret = strcmp(dso_a->short_name, dso_b->short_name); 2061 2062 if (ret == 0 && map_a != map_b) { 2063 /* 2064 * Ensure distinct but name equal maps have an order in part to 2065 * aid reference counting. 2066 */ 2067 ret = (int)map__start(map_a) - (int)map__start(map_b); 2068 if (ret == 0) 2069 ret = (int)((intptr_t)map_a - (intptr_t)map_b); 2070 } 2071 2072 return ret; 2073 } 2074 2075 static int map__strcmp_name(const void *name, const void *b) 2076 { 2077 const struct dso *dso = map__dso(*(const struct map **)b); 2078 2079 return strcmp(name, dso->short_name); 2080 } 2081 2082 void __maps__sort_by_name(struct maps *maps) 2083 { 2084 qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp); 2085 } 2086 2087 static int map__groups__sort_by_name_from_rbtree(struct maps *maps) 2088 { 2089 struct map_rb_node *rb_node; 2090 struct map **maps_by_name = realloc(maps__maps_by_name(maps), 2091 maps__nr_maps(maps) * sizeof(struct map *)); 2092 int i = 0; 2093 2094 if (maps_by_name == NULL) 2095 return -1; 2096 2097 up_read(maps__lock(maps)); 2098 down_write(maps__lock(maps)); 2099 2100 RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name; 2101 RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps); 2102 2103 maps__for_each_entry(maps, rb_node) 2104 maps_by_name[i++] = map__get(rb_node->map); 2105 2106 __maps__sort_by_name(maps); 2107 2108 up_write(maps__lock(maps)); 2109 down_read(maps__lock(maps)); 2110 2111 return 0; 2112 } 2113 2114 static struct map *__maps__find_by_name(struct maps *maps, const char *name) 2115 { 2116 struct map **mapp; 2117 2118 if (maps__maps_by_name(maps) == NULL && 2119 map__groups__sort_by_name_from_rbtree(maps)) 2120 return NULL; 2121 2122 mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), 2123 sizeof(*mapp), map__strcmp_name); 2124 if (mapp) 2125 return *mapp; 2126 return NULL; 2127 } 2128 2129 struct map *maps__find_by_name(struct maps *maps, const char *name) 2130 { 2131 struct map_rb_node *rb_node; 2132 struct map *map; 2133 2134 down_read(maps__lock(maps)); 2135 2136 2137 if (RC_CHK_ACCESS(maps)->last_search_by_name) { 2138 const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name); 2139 2140 if (strcmp(dso->short_name, name) == 0) { 2141 map = RC_CHK_ACCESS(maps)->last_search_by_name; 2142 goto out_unlock; 2143 } 2144 } 2145 /* 2146 * If we have maps->maps_by_name, then the name isn't in the rbtree, 2147 * as maps->maps_by_name mirrors the rbtree when lookups by name are 2148 * made. 2149 */ 2150 map = __maps__find_by_name(maps, name); 2151 if (map || maps__maps_by_name(maps) != NULL) 2152 goto out_unlock; 2153 2154 /* Fallback to traversing the rbtree... */ 2155 maps__for_each_entry(maps, rb_node) { 2156 struct dso *dso; 2157 2158 map = rb_node->map; 2159 dso = map__dso(map); 2160 if (strcmp(dso->short_name, name) == 0) { 2161 RC_CHK_ACCESS(maps)->last_search_by_name = map; 2162 goto out_unlock; 2163 } 2164 } 2165 map = NULL; 2166 2167 out_unlock: 2168 up_read(maps__lock(maps)); 2169 return map; 2170 } 2171 2172 int dso__load_vmlinux(struct dso *dso, struct map *map, 2173 const char *vmlinux, bool vmlinux_allocated) 2174 { 2175 int err = -1; 2176 struct symsrc ss; 2177 char symfs_vmlinux[PATH_MAX]; 2178 enum dso_binary_type symtab_type; 2179 2180 if (vmlinux[0] == '/') 2181 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 2182 else 2183 symbol__join_symfs(symfs_vmlinux, vmlinux); 2184 2185 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 2186 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 2187 else 2188 symtab_type = DSO_BINARY_TYPE__VMLINUX; 2189 2190 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) 2191 return -1; 2192 2193 err = dso__load_sym(dso, map, &ss, &ss, 0); 2194 symsrc__destroy(&ss); 2195 2196 if (err > 0) { 2197 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 2198 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 2199 else 2200 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 2201 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 2202 dso__set_loaded(dso); 2203 pr_debug("Using %s for symbols\n", symfs_vmlinux); 2204 } 2205 2206 return err; 2207 } 2208 2209 int dso__load_vmlinux_path(struct dso *dso, struct map *map) 2210 { 2211 int i, err = 0; 2212 char *filename = NULL; 2213 2214 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 2215 vmlinux_path__nr_entries + 1); 2216 2217 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 2218 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false); 2219 if (err > 0) 2220 goto out; 2221 } 2222 2223 if (!symbol_conf.ignore_vmlinux_buildid) 2224 filename = dso__build_id_filename(dso, NULL, 0, false); 2225 if (filename != NULL) { 2226 err = dso__load_vmlinux(dso, map, filename, true); 2227 if (err > 0) 2228 goto out; 2229 free(filename); 2230 } 2231 out: 2232 return err; 2233 } 2234 2235 static bool visible_dir_filter(const char *name, struct dirent *d) 2236 { 2237 if (d->d_type != DT_DIR) 2238 return false; 2239 return lsdir_no_dot_filter(name, d); 2240 } 2241 2242 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 2243 { 2244 char kallsyms_filename[PATH_MAX]; 2245 int ret = -1; 2246 struct strlist *dirs; 2247 struct str_node *nd; 2248 2249 dirs = lsdir(dir, visible_dir_filter); 2250 if (!dirs) 2251 return -1; 2252 2253 strlist__for_each_entry(nd, dirs) { 2254 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 2255 "%s/%s/kallsyms", dir, nd->s); 2256 if (!validate_kcore_addresses(kallsyms_filename, map)) { 2257 strlcpy(dir, kallsyms_filename, dir_sz); 2258 ret = 0; 2259 break; 2260 } 2261 } 2262 2263 strlist__delete(dirs); 2264 2265 return ret; 2266 } 2267 2268 /* 2269 * Use open(O_RDONLY) to check readability directly instead of access(R_OK) 2270 * since access(R_OK) only checks with real UID/GID but open() use effective 2271 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO). 2272 */ 2273 static bool filename__readable(const char *file) 2274 { 2275 int fd = open(file, O_RDONLY); 2276 if (fd < 0) 2277 return false; 2278 close(fd); 2279 return true; 2280 } 2281 2282 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 2283 { 2284 struct build_id bid; 2285 char sbuild_id[SBUILD_ID_SIZE]; 2286 bool is_host = false; 2287 char path[PATH_MAX]; 2288 2289 if (!dso->has_build_id) { 2290 /* 2291 * Last resort, if we don't have a build-id and couldn't find 2292 * any vmlinux file, try the running kernel kallsyms table. 2293 */ 2294 goto proc_kallsyms; 2295 } 2296 2297 if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0) 2298 is_host = dso__build_id_equal(dso, &bid); 2299 2300 /* Try a fast path for /proc/kallsyms if possible */ 2301 if (is_host) { 2302 /* 2303 * Do not check the build-id cache, unless we know we cannot use 2304 * /proc/kcore or module maps don't match to /proc/kallsyms. 2305 * To check readability of /proc/kcore, do not use access(R_OK) 2306 * since /proc/kcore requires CAP_SYS_RAWIO to read and access 2307 * can't check it. 2308 */ 2309 if (filename__readable("/proc/kcore") && 2310 !validate_kcore_addresses("/proc/kallsyms", map)) 2311 goto proc_kallsyms; 2312 } 2313 2314 build_id__sprintf(&dso->bid, sbuild_id); 2315 2316 /* Find kallsyms in build-id cache with kcore */ 2317 scnprintf(path, sizeof(path), "%s/%s/%s", 2318 buildid_dir, DSO__NAME_KCORE, sbuild_id); 2319 2320 if (!find_matching_kcore(map, path, sizeof(path))) 2321 return strdup(path); 2322 2323 /* Use current /proc/kallsyms if possible */ 2324 if (is_host) { 2325 proc_kallsyms: 2326 return strdup("/proc/kallsyms"); 2327 } 2328 2329 /* Finally, find a cache of kallsyms */ 2330 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) { 2331 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 2332 sbuild_id); 2333 return NULL; 2334 } 2335 2336 return strdup(path); 2337 } 2338 2339 static int dso__load_kernel_sym(struct dso *dso, struct map *map) 2340 { 2341 int err; 2342 const char *kallsyms_filename = NULL; 2343 char *kallsyms_allocated_filename = NULL; 2344 char *filename = NULL; 2345 2346 /* 2347 * Step 1: if the user specified a kallsyms or vmlinux filename, use 2348 * it and only it, reporting errors to the user if it cannot be used. 2349 * 2350 * For instance, try to analyse an ARM perf.data file _without_ a 2351 * build-id, or if the user specifies the wrong path to the right 2352 * vmlinux file, obviously we can't fallback to another vmlinux (a 2353 * x86_86 one, on the machine where analysis is being performed, say), 2354 * or worse, /proc/kallsyms. 2355 * 2356 * If the specified file _has_ a build-id and there is a build-id 2357 * section in the perf.data file, we will still do the expected 2358 * validation in dso__load_vmlinux and will bail out if they don't 2359 * match. 2360 */ 2361 if (symbol_conf.kallsyms_name != NULL) { 2362 kallsyms_filename = symbol_conf.kallsyms_name; 2363 goto do_kallsyms; 2364 } 2365 2366 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 2367 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false); 2368 } 2369 2370 /* 2371 * Before checking on common vmlinux locations, check if it's 2372 * stored as standard build id binary (not kallsyms) under 2373 * .debug cache. 2374 */ 2375 if (!symbol_conf.ignore_vmlinux_buildid) 2376 filename = __dso__build_id_filename(dso, NULL, 0, false, false); 2377 if (filename != NULL) { 2378 err = dso__load_vmlinux(dso, map, filename, true); 2379 if (err > 0) 2380 return err; 2381 free(filename); 2382 } 2383 2384 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 2385 err = dso__load_vmlinux_path(dso, map); 2386 if (err > 0) 2387 return err; 2388 } 2389 2390 /* do not try local files if a symfs was given */ 2391 if (symbol_conf.symfs[0] != 0) 2392 return -1; 2393 2394 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 2395 if (!kallsyms_allocated_filename) 2396 return -1; 2397 2398 kallsyms_filename = kallsyms_allocated_filename; 2399 2400 do_kallsyms: 2401 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2402 if (err > 0) 2403 pr_debug("Using %s for symbols\n", kallsyms_filename); 2404 free(kallsyms_allocated_filename); 2405 2406 if (err > 0 && !dso__is_kcore(dso)) { 2407 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; 2408 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); 2409 map__fixup_start(map); 2410 map__fixup_end(map); 2411 } 2412 2413 return err; 2414 } 2415 2416 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) 2417 { 2418 int err; 2419 const char *kallsyms_filename; 2420 struct machine *machine = maps__machine(map__kmaps(map)); 2421 char path[PATH_MAX]; 2422 2423 if (machine->kallsyms_filename) { 2424 kallsyms_filename = machine->kallsyms_filename; 2425 } else if (machine__is_default_guest(machine)) { 2426 /* 2427 * if the user specified a vmlinux filename, use it and only 2428 * it, reporting errors to the user if it cannot be used. 2429 * Or use file guest_kallsyms inputted by user on commandline 2430 */ 2431 if (symbol_conf.default_guest_vmlinux_name != NULL) { 2432 err = dso__load_vmlinux(dso, map, 2433 symbol_conf.default_guest_vmlinux_name, 2434 false); 2435 return err; 2436 } 2437 2438 kallsyms_filename = symbol_conf.default_guest_kallsyms; 2439 if (!kallsyms_filename) 2440 return -1; 2441 } else { 2442 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 2443 kallsyms_filename = path; 2444 } 2445 2446 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2447 if (err > 0) 2448 pr_debug("Using %s for symbols\n", kallsyms_filename); 2449 if (err > 0 && !dso__is_kcore(dso)) { 2450 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 2451 dso__set_long_name(dso, machine->mmap_name, false); 2452 map__fixup_start(map); 2453 map__fixup_end(map); 2454 } 2455 2456 return err; 2457 } 2458 2459 static void vmlinux_path__exit(void) 2460 { 2461 while (--vmlinux_path__nr_entries >= 0) 2462 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 2463 vmlinux_path__nr_entries = 0; 2464 2465 zfree(&vmlinux_path); 2466 } 2467 2468 static const char * const vmlinux_paths[] = { 2469 "vmlinux", 2470 "/boot/vmlinux" 2471 }; 2472 2473 static const char * const vmlinux_paths_upd[] = { 2474 "/boot/vmlinux-%s", 2475 "/usr/lib/debug/boot/vmlinux-%s", 2476 "/lib/modules/%s/build/vmlinux", 2477 "/usr/lib/debug/lib/modules/%s/vmlinux", 2478 "/usr/lib/debug/boot/vmlinux-%s.debug" 2479 }; 2480 2481 static int vmlinux_path__add(const char *new_entry) 2482 { 2483 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry); 2484 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2485 return -1; 2486 ++vmlinux_path__nr_entries; 2487 2488 return 0; 2489 } 2490 2491 static int vmlinux_path__init(struct perf_env *env) 2492 { 2493 struct utsname uts; 2494 char bf[PATH_MAX]; 2495 char *kernel_version; 2496 unsigned int i; 2497 2498 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) + 2499 ARRAY_SIZE(vmlinux_paths_upd))); 2500 if (vmlinux_path == NULL) 2501 return -1; 2502 2503 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++) 2504 if (vmlinux_path__add(vmlinux_paths[i]) < 0) 2505 goto out_fail; 2506 2507 /* only try kernel version if no symfs was given */ 2508 if (symbol_conf.symfs[0] != 0) 2509 return 0; 2510 2511 if (env) { 2512 kernel_version = env->os_release; 2513 } else { 2514 if (uname(&uts) < 0) 2515 goto out_fail; 2516 2517 kernel_version = uts.release; 2518 } 2519 2520 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) { 2521 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version); 2522 if (vmlinux_path__add(bf) < 0) 2523 goto out_fail; 2524 } 2525 2526 return 0; 2527 2528 out_fail: 2529 vmlinux_path__exit(); 2530 return -1; 2531 } 2532 2533 int setup_list(struct strlist **list, const char *list_str, 2534 const char *list_name) 2535 { 2536 if (list_str == NULL) 2537 return 0; 2538 2539 *list = strlist__new(list_str, NULL); 2540 if (!*list) { 2541 pr_err("problems parsing %s list\n", list_name); 2542 return -1; 2543 } 2544 2545 symbol_conf.has_filter = true; 2546 return 0; 2547 } 2548 2549 int setup_intlist(struct intlist **list, const char *list_str, 2550 const char *list_name) 2551 { 2552 if (list_str == NULL) 2553 return 0; 2554 2555 *list = intlist__new(list_str); 2556 if (!*list) { 2557 pr_err("problems parsing %s list\n", list_name); 2558 return -1; 2559 } 2560 return 0; 2561 } 2562 2563 static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list) 2564 { 2565 struct str_node *pos, *tmp; 2566 unsigned long val; 2567 char *sep; 2568 const char *end; 2569 int i = 0, err; 2570 2571 *addr_list = intlist__new(NULL); 2572 if (!*addr_list) 2573 return -1; 2574 2575 strlist__for_each_entry_safe(pos, tmp, sym_list) { 2576 errno = 0; 2577 val = strtoul(pos->s, &sep, 16); 2578 if (errno || (sep == pos->s)) 2579 continue; 2580 2581 if (*sep != '\0') { 2582 end = pos->s + strlen(pos->s) - 1; 2583 while (end >= sep && isspace(*end)) 2584 end--; 2585 2586 if (end >= sep) 2587 continue; 2588 } 2589 2590 err = intlist__add(*addr_list, val); 2591 if (err) 2592 break; 2593 2594 strlist__remove(sym_list, pos); 2595 i++; 2596 } 2597 2598 if (i == 0) { 2599 intlist__delete(*addr_list); 2600 *addr_list = NULL; 2601 } 2602 2603 return 0; 2604 } 2605 2606 static bool symbol__read_kptr_restrict(void) 2607 { 2608 bool value = false; 2609 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 2610 2611 if (fp != NULL) { 2612 char line[8]; 2613 2614 if (fgets(line, sizeof(line), fp) != NULL) 2615 value = perf_cap__capable(CAP_SYSLOG) ? 2616 (atoi(line) >= 2) : 2617 (atoi(line) != 0); 2618 2619 fclose(fp); 2620 } 2621 2622 /* Per kernel/kallsyms.c: 2623 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG 2624 */ 2625 if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG)) 2626 value = true; 2627 2628 return value; 2629 } 2630 2631 int symbol__annotation_init(void) 2632 { 2633 if (symbol_conf.init_annotation) 2634 return 0; 2635 2636 if (symbol_conf.initialized) { 2637 pr_err("Annotation needs to be init before symbol__init()\n"); 2638 return -1; 2639 } 2640 2641 symbol_conf.priv_size += sizeof(struct annotation); 2642 symbol_conf.init_annotation = true; 2643 return 0; 2644 } 2645 2646 int symbol__init(struct perf_env *env) 2647 { 2648 const char *symfs; 2649 2650 if (symbol_conf.initialized) 2651 return 0; 2652 2653 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 2654 2655 symbol__elf_init(); 2656 2657 if (symbol_conf.sort_by_name) 2658 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 2659 sizeof(struct symbol)); 2660 2661 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) 2662 return -1; 2663 2664 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 2665 pr_err("'.' is the only non valid --field-separator argument\n"); 2666 return -1; 2667 } 2668 2669 if (setup_list(&symbol_conf.dso_list, 2670 symbol_conf.dso_list_str, "dso") < 0) 2671 return -1; 2672 2673 if (setup_list(&symbol_conf.comm_list, 2674 symbol_conf.comm_list_str, "comm") < 0) 2675 goto out_free_dso_list; 2676 2677 if (setup_intlist(&symbol_conf.pid_list, 2678 symbol_conf.pid_list_str, "pid") < 0) 2679 goto out_free_comm_list; 2680 2681 if (setup_intlist(&symbol_conf.tid_list, 2682 symbol_conf.tid_list_str, "tid") < 0) 2683 goto out_free_pid_list; 2684 2685 if (setup_list(&symbol_conf.sym_list, 2686 symbol_conf.sym_list_str, "symbol") < 0) 2687 goto out_free_tid_list; 2688 2689 if (symbol_conf.sym_list && 2690 setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0) 2691 goto out_free_sym_list; 2692 2693 if (setup_list(&symbol_conf.bt_stop_list, 2694 symbol_conf.bt_stop_list_str, "symbol") < 0) 2695 goto out_free_sym_list; 2696 2697 /* 2698 * A path to symbols of "/" is identical to "" 2699 * reset here for simplicity. 2700 */ 2701 symfs = realpath(symbol_conf.symfs, NULL); 2702 if (symfs == NULL) 2703 symfs = symbol_conf.symfs; 2704 if (strcmp(symfs, "/") == 0) 2705 symbol_conf.symfs = ""; 2706 if (symfs != symbol_conf.symfs) 2707 free((void *)symfs); 2708 2709 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 2710 2711 symbol_conf.initialized = true; 2712 return 0; 2713 2714 out_free_sym_list: 2715 strlist__delete(symbol_conf.sym_list); 2716 intlist__delete(symbol_conf.addr_list); 2717 out_free_tid_list: 2718 intlist__delete(symbol_conf.tid_list); 2719 out_free_pid_list: 2720 intlist__delete(symbol_conf.pid_list); 2721 out_free_comm_list: 2722 strlist__delete(symbol_conf.comm_list); 2723 out_free_dso_list: 2724 strlist__delete(symbol_conf.dso_list); 2725 return -1; 2726 } 2727 2728 void symbol__exit(void) 2729 { 2730 if (!symbol_conf.initialized) 2731 return; 2732 strlist__delete(symbol_conf.bt_stop_list); 2733 strlist__delete(symbol_conf.sym_list); 2734 strlist__delete(symbol_conf.dso_list); 2735 strlist__delete(symbol_conf.comm_list); 2736 intlist__delete(symbol_conf.tid_list); 2737 intlist__delete(symbol_conf.pid_list); 2738 intlist__delete(symbol_conf.addr_list); 2739 vmlinux_path__exit(); 2740 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2741 symbol_conf.bt_stop_list = NULL; 2742 symbol_conf.initialized = false; 2743 } 2744 2745 int symbol__config_symfs(const struct option *opt __maybe_unused, 2746 const char *dir, int unset __maybe_unused) 2747 { 2748 char *bf = NULL; 2749 int ret; 2750 2751 symbol_conf.symfs = strdup(dir); 2752 if (symbol_conf.symfs == NULL) 2753 return -ENOMEM; 2754 2755 /* skip the locally configured cache if a symfs is given, and 2756 * config buildid dir to symfs/.debug 2757 */ 2758 ret = asprintf(&bf, "%s/%s", dir, ".debug"); 2759 if (ret < 0) 2760 return -ENOMEM; 2761 2762 set_buildid_dir(bf); 2763 2764 free(bf); 2765 return 0; 2766 } 2767 2768 struct mem_info *mem_info__get(struct mem_info *mi) 2769 { 2770 if (mi) 2771 refcount_inc(&mi->refcnt); 2772 return mi; 2773 } 2774 2775 void mem_info__put(struct mem_info *mi) 2776 { 2777 if (mi && refcount_dec_and_test(&mi->refcnt)) 2778 free(mi); 2779 } 2780 2781 struct mem_info *mem_info__new(void) 2782 { 2783 struct mem_info *mi = zalloc(sizeof(*mi)); 2784 2785 if (mi) 2786 refcount_set(&mi->refcnt, 1); 2787 return mi; 2788 } 2789 2790 /* 2791 * Checks that user supplied symbol kernel files are accessible because 2792 * the default mechanism for accessing elf files fails silently. i.e. if 2793 * debug syms for a build ID aren't found perf carries on normally. When 2794 * they are user supplied we should assume that the user doesn't want to 2795 * silently fail. 2796 */ 2797 int symbol__validate_sym_arguments(void) 2798 { 2799 if (symbol_conf.vmlinux_name && 2800 access(symbol_conf.vmlinux_name, R_OK)) { 2801 pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name); 2802 return -EINVAL; 2803 } 2804 if (symbol_conf.kallsyms_name && 2805 access(symbol_conf.kallsyms_name, R_OK)) { 2806 pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name); 2807 return -EINVAL; 2808 } 2809 return 0; 2810 } 2811