1 // SPDX-License-Identifier: GPL-2.0 2 #include <dirent.h> 3 #include <errno.h> 4 #include <stdlib.h> 5 #include <stdio.h> 6 #include <string.h> 7 #include <linux/capability.h> 8 #include <linux/kernel.h> 9 #include <linux/mman.h> 10 #include <linux/string.h> 11 #include <linux/time64.h> 12 #include <sys/types.h> 13 #include <sys/stat.h> 14 #include <sys/param.h> 15 #include <fcntl.h> 16 #include <unistd.h> 17 #include <inttypes.h> 18 #include "annotate.h" 19 #include "build-id.h" 20 #include "cap.h" 21 #include "dso.h" 22 #include "util.h" // lsdir() 23 #include "debug.h" 24 #include "event.h" 25 #include "machine.h" 26 #include "map.h" 27 #include "symbol.h" 28 #include "map_symbol.h" 29 #include "mem-events.h" 30 #include "symsrc.h" 31 #include "strlist.h" 32 #include "intlist.h" 33 #include "namespaces.h" 34 #include "header.h" 35 #include "path.h" 36 #include <linux/ctype.h> 37 #include <linux/zalloc.h> 38 39 #include <elf.h> 40 #include <limits.h> 41 #include <symbol/kallsyms.h> 42 #include <sys/utsname.h> 43 44 static int dso__load_kernel_sym(struct dso *dso, struct map *map); 45 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map); 46 static bool symbol__is_idle(const char *name); 47 48 int vmlinux_path__nr_entries; 49 char **vmlinux_path; 50 51 struct symbol_conf symbol_conf = { 52 .nanosecs = false, 53 .use_modules = true, 54 .try_vmlinux_path = true, 55 .demangle = true, 56 .demangle_kernel = false, 57 .cumulate_callchain = true, 58 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */ 59 .show_hist_headers = true, 60 .symfs = "", 61 .event_group = true, 62 .inline_name = true, 63 .res_sample = 0, 64 }; 65 66 static enum dso_binary_type binary_type_symtab[] = { 67 DSO_BINARY_TYPE__KALLSYMS, 68 DSO_BINARY_TYPE__GUEST_KALLSYMS, 69 DSO_BINARY_TYPE__JAVA_JIT, 70 DSO_BINARY_TYPE__DEBUGLINK, 71 DSO_BINARY_TYPE__BUILD_ID_CACHE, 72 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO, 73 DSO_BINARY_TYPE__FEDORA_DEBUGINFO, 74 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, 75 DSO_BINARY_TYPE__BUILDID_DEBUGINFO, 76 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 77 DSO_BINARY_TYPE__GUEST_KMODULE, 78 DSO_BINARY_TYPE__GUEST_KMODULE_COMP, 79 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE, 80 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP, 81 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, 82 DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, 83 DSO_BINARY_TYPE__NOT_FOUND, 84 }; 85 86 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) 87 88 static bool symbol_type__filter(char symbol_type) 89 { 90 symbol_type = toupper(symbol_type); 91 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B'; 92 } 93 94 static int prefix_underscores_count(const char *str) 95 { 96 const char *tail = str; 97 98 while (*tail == '_') 99 tail++; 100 101 return tail - str; 102 } 103 104 void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c) 105 { 106 p->end = c->start; 107 } 108 109 const char * __weak arch__normalize_symbol_name(const char *name) 110 { 111 return name; 112 } 113 114 int __weak arch__compare_symbol_names(const char *namea, const char *nameb) 115 { 116 return strcmp(namea, nameb); 117 } 118 119 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb, 120 unsigned int n) 121 { 122 return strncmp(namea, nameb, n); 123 } 124 125 int __weak arch__choose_best_symbol(struct symbol *syma, 126 struct symbol *symb __maybe_unused) 127 { 128 /* Avoid "SyS" kernel syscall aliases */ 129 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3)) 130 return SYMBOL_B; 131 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10)) 132 return SYMBOL_B; 133 134 return SYMBOL_A; 135 } 136 137 static int choose_best_symbol(struct symbol *syma, struct symbol *symb) 138 { 139 s64 a; 140 s64 b; 141 size_t na, nb; 142 143 /* Prefer a symbol with non zero length */ 144 a = syma->end - syma->start; 145 b = symb->end - symb->start; 146 if ((b == 0) && (a > 0)) 147 return SYMBOL_A; 148 else if ((a == 0) && (b > 0)) 149 return SYMBOL_B; 150 151 /* Prefer a non weak symbol over a weak one */ 152 a = syma->binding == STB_WEAK; 153 b = symb->binding == STB_WEAK; 154 if (b && !a) 155 return SYMBOL_A; 156 if (a && !b) 157 return SYMBOL_B; 158 159 /* Prefer a global symbol over a non global one */ 160 a = syma->binding == STB_GLOBAL; 161 b = symb->binding == STB_GLOBAL; 162 if (a && !b) 163 return SYMBOL_A; 164 if (b && !a) 165 return SYMBOL_B; 166 167 /* Prefer a symbol with less underscores */ 168 a = prefix_underscores_count(syma->name); 169 b = prefix_underscores_count(symb->name); 170 if (b > a) 171 return SYMBOL_A; 172 else if (a > b) 173 return SYMBOL_B; 174 175 /* Choose the symbol with the longest name */ 176 na = strlen(syma->name); 177 nb = strlen(symb->name); 178 if (na > nb) 179 return SYMBOL_A; 180 else if (na < nb) 181 return SYMBOL_B; 182 183 return arch__choose_best_symbol(syma, symb); 184 } 185 186 void symbols__fixup_duplicate(struct rb_root_cached *symbols) 187 { 188 struct rb_node *nd; 189 struct symbol *curr, *next; 190 191 if (symbol_conf.allow_aliases) 192 return; 193 194 nd = rb_first_cached(symbols); 195 196 while (nd) { 197 curr = rb_entry(nd, struct symbol, rb_node); 198 again: 199 nd = rb_next(&curr->rb_node); 200 next = rb_entry(nd, struct symbol, rb_node); 201 202 if (!nd) 203 break; 204 205 if (curr->start != next->start) 206 continue; 207 208 if (choose_best_symbol(curr, next) == SYMBOL_A) { 209 rb_erase_cached(&next->rb_node, symbols); 210 symbol__delete(next); 211 goto again; 212 } else { 213 nd = rb_next(&curr->rb_node); 214 rb_erase_cached(&curr->rb_node, symbols); 215 symbol__delete(curr); 216 } 217 } 218 } 219 220 void symbols__fixup_end(struct rb_root_cached *symbols) 221 { 222 struct rb_node *nd, *prevnd = rb_first_cached(symbols); 223 struct symbol *curr, *prev; 224 225 if (prevnd == NULL) 226 return; 227 228 curr = rb_entry(prevnd, struct symbol, rb_node); 229 230 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) { 231 prev = curr; 232 curr = rb_entry(nd, struct symbol, rb_node); 233 234 if (prev->end == prev->start && prev->end != curr->start) 235 arch__symbols__fixup_end(prev, curr); 236 } 237 238 /* Last entry */ 239 if (curr->end == curr->start) 240 curr->end = roundup(curr->start, 4096) + 4096; 241 } 242 243 void maps__fixup_end(struct maps *maps) 244 { 245 struct map *prev = NULL, *curr; 246 247 down_write(&maps->lock); 248 249 maps__for_each_entry(maps, curr) { 250 if (prev != NULL && !prev->end) 251 prev->end = curr->start; 252 253 prev = curr; 254 } 255 256 /* 257 * We still haven't the actual symbols, so guess the 258 * last map final address. 259 */ 260 if (curr && !curr->end) 261 curr->end = ~0ULL; 262 263 up_write(&maps->lock); 264 } 265 266 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) 267 { 268 size_t namelen = strlen(name) + 1; 269 struct symbol *sym = calloc(1, (symbol_conf.priv_size + 270 sizeof(*sym) + namelen)); 271 if (sym == NULL) 272 return NULL; 273 274 if (symbol_conf.priv_size) { 275 if (symbol_conf.init_annotation) { 276 struct annotation *notes = (void *)sym; 277 pthread_mutex_init(¬es->lock, NULL); 278 } 279 sym = ((void *)sym) + symbol_conf.priv_size; 280 } 281 282 sym->start = start; 283 sym->end = len ? start + len : start; 284 sym->type = type; 285 sym->binding = binding; 286 sym->namelen = namelen - 1; 287 288 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n", 289 __func__, name, start, sym->end); 290 memcpy(sym->name, name, namelen); 291 292 return sym; 293 } 294 295 void symbol__delete(struct symbol *sym) 296 { 297 free(((void *)sym) - symbol_conf.priv_size); 298 } 299 300 void symbols__delete(struct rb_root_cached *symbols) 301 { 302 struct symbol *pos; 303 struct rb_node *next = rb_first_cached(symbols); 304 305 while (next) { 306 pos = rb_entry(next, struct symbol, rb_node); 307 next = rb_next(&pos->rb_node); 308 rb_erase_cached(&pos->rb_node, symbols); 309 symbol__delete(pos); 310 } 311 } 312 313 void __symbols__insert(struct rb_root_cached *symbols, 314 struct symbol *sym, bool kernel) 315 { 316 struct rb_node **p = &symbols->rb_root.rb_node; 317 struct rb_node *parent = NULL; 318 const u64 ip = sym->start; 319 struct symbol *s; 320 bool leftmost = true; 321 322 if (kernel) { 323 const char *name = sym->name; 324 /* 325 * ppc64 uses function descriptors and appends a '.' to the 326 * start of every instruction address. Remove it. 327 */ 328 if (name[0] == '.') 329 name++; 330 sym->idle = symbol__is_idle(name); 331 } 332 333 while (*p != NULL) { 334 parent = *p; 335 s = rb_entry(parent, struct symbol, rb_node); 336 if (ip < s->start) 337 p = &(*p)->rb_left; 338 else { 339 p = &(*p)->rb_right; 340 leftmost = false; 341 } 342 } 343 rb_link_node(&sym->rb_node, parent, p); 344 rb_insert_color_cached(&sym->rb_node, symbols, leftmost); 345 } 346 347 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym) 348 { 349 __symbols__insert(symbols, sym, false); 350 } 351 352 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip) 353 { 354 struct rb_node *n; 355 356 if (symbols == NULL) 357 return NULL; 358 359 n = symbols->rb_root.rb_node; 360 361 while (n) { 362 struct symbol *s = rb_entry(n, struct symbol, rb_node); 363 364 if (ip < s->start) 365 n = n->rb_left; 366 else if (ip > s->end || (ip == s->end && ip != s->start)) 367 n = n->rb_right; 368 else 369 return s; 370 } 371 372 return NULL; 373 } 374 375 static struct symbol *symbols__first(struct rb_root_cached *symbols) 376 { 377 struct rb_node *n = rb_first_cached(symbols); 378 379 if (n) 380 return rb_entry(n, struct symbol, rb_node); 381 382 return NULL; 383 } 384 385 static struct symbol *symbols__last(struct rb_root_cached *symbols) 386 { 387 struct rb_node *n = rb_last(&symbols->rb_root); 388 389 if (n) 390 return rb_entry(n, struct symbol, rb_node); 391 392 return NULL; 393 } 394 395 static struct symbol *symbols__next(struct symbol *sym) 396 { 397 struct rb_node *n = rb_next(&sym->rb_node); 398 399 if (n) 400 return rb_entry(n, struct symbol, rb_node); 401 402 return NULL; 403 } 404 405 static void symbols__insert_by_name(struct rb_root_cached *symbols, struct symbol *sym) 406 { 407 struct rb_node **p = &symbols->rb_root.rb_node; 408 struct rb_node *parent = NULL; 409 struct symbol_name_rb_node *symn, *s; 410 bool leftmost = true; 411 412 symn = container_of(sym, struct symbol_name_rb_node, sym); 413 414 while (*p != NULL) { 415 parent = *p; 416 s = rb_entry(parent, struct symbol_name_rb_node, rb_node); 417 if (strcmp(sym->name, s->sym.name) < 0) 418 p = &(*p)->rb_left; 419 else { 420 p = &(*p)->rb_right; 421 leftmost = false; 422 } 423 } 424 rb_link_node(&symn->rb_node, parent, p); 425 rb_insert_color_cached(&symn->rb_node, symbols, leftmost); 426 } 427 428 static void symbols__sort_by_name(struct rb_root_cached *symbols, 429 struct rb_root_cached *source) 430 { 431 struct rb_node *nd; 432 433 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) { 434 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 435 symbols__insert_by_name(symbols, pos); 436 } 437 } 438 439 int symbol__match_symbol_name(const char *name, const char *str, 440 enum symbol_tag_include includes) 441 { 442 const char *versioning; 443 444 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY && 445 (versioning = strstr(name, "@@"))) { 446 int len = strlen(str); 447 448 if (len < versioning - name) 449 len = versioning - name; 450 451 return arch__compare_symbol_names_n(name, str, len); 452 } else 453 return arch__compare_symbol_names(name, str); 454 } 455 456 static struct symbol *symbols__find_by_name(struct rb_root_cached *symbols, 457 const char *name, 458 enum symbol_tag_include includes) 459 { 460 struct rb_node *n; 461 struct symbol_name_rb_node *s = NULL; 462 463 if (symbols == NULL) 464 return NULL; 465 466 n = symbols->rb_root.rb_node; 467 468 while (n) { 469 int cmp; 470 471 s = rb_entry(n, struct symbol_name_rb_node, rb_node); 472 cmp = symbol__match_symbol_name(s->sym.name, name, includes); 473 474 if (cmp > 0) 475 n = n->rb_left; 476 else if (cmp < 0) 477 n = n->rb_right; 478 else 479 break; 480 } 481 482 if (n == NULL) 483 return NULL; 484 485 if (includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) 486 /* return first symbol that has same name (if any) */ 487 for (n = rb_prev(n); n; n = rb_prev(n)) { 488 struct symbol_name_rb_node *tmp; 489 490 tmp = rb_entry(n, struct symbol_name_rb_node, rb_node); 491 if (arch__compare_symbol_names(tmp->sym.name, s->sym.name)) 492 break; 493 494 s = tmp; 495 } 496 497 return &s->sym; 498 } 499 500 void dso__reset_find_symbol_cache(struct dso *dso) 501 { 502 dso->last_find_result.addr = 0; 503 dso->last_find_result.symbol = NULL; 504 } 505 506 void dso__insert_symbol(struct dso *dso, struct symbol *sym) 507 { 508 __symbols__insert(&dso->symbols, sym, dso->kernel); 509 510 /* update the symbol cache if necessary */ 511 if (dso->last_find_result.addr >= sym->start && 512 (dso->last_find_result.addr < sym->end || 513 sym->start == sym->end)) { 514 dso->last_find_result.symbol = sym; 515 } 516 } 517 518 struct symbol *dso__find_symbol(struct dso *dso, u64 addr) 519 { 520 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) { 521 dso->last_find_result.addr = addr; 522 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr); 523 } 524 525 return dso->last_find_result.symbol; 526 } 527 528 struct symbol *dso__first_symbol(struct dso *dso) 529 { 530 return symbols__first(&dso->symbols); 531 } 532 533 struct symbol *dso__last_symbol(struct dso *dso) 534 { 535 return symbols__last(&dso->symbols); 536 } 537 538 struct symbol *dso__next_symbol(struct symbol *sym) 539 { 540 return symbols__next(sym); 541 } 542 543 struct symbol *symbol__next_by_name(struct symbol *sym) 544 { 545 struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym); 546 struct rb_node *n = rb_next(&s->rb_node); 547 548 return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL; 549 } 550 551 /* 552 * Returns first symbol that matched with @name. 553 */ 554 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name) 555 { 556 struct symbol *s = symbols__find_by_name(&dso->symbol_names, name, 557 SYMBOL_TAG_INCLUDE__NONE); 558 if (!s) 559 s = symbols__find_by_name(&dso->symbol_names, name, 560 SYMBOL_TAG_INCLUDE__DEFAULT_ONLY); 561 return s; 562 } 563 564 void dso__sort_by_name(struct dso *dso) 565 { 566 dso__set_sorted_by_name(dso); 567 return symbols__sort_by_name(&dso->symbol_names, &dso->symbols); 568 } 569 570 /* 571 * While we find nice hex chars, build a long_val. 572 * Return number of chars processed. 573 */ 574 static int hex2u64(const char *ptr, u64 *long_val) 575 { 576 char *p; 577 578 *long_val = strtoull(ptr, &p, 16); 579 580 return p - ptr; 581 } 582 583 584 int modules__parse(const char *filename, void *arg, 585 int (*process_module)(void *arg, const char *name, 586 u64 start, u64 size)) 587 { 588 char *line = NULL; 589 size_t n; 590 FILE *file; 591 int err = 0; 592 593 file = fopen(filename, "r"); 594 if (file == NULL) 595 return -1; 596 597 while (1) { 598 char name[PATH_MAX]; 599 u64 start, size; 600 char *sep, *endptr; 601 ssize_t line_len; 602 603 line_len = getline(&line, &n, file); 604 if (line_len < 0) { 605 if (feof(file)) 606 break; 607 err = -1; 608 goto out; 609 } 610 611 if (!line) { 612 err = -1; 613 goto out; 614 } 615 616 line[--line_len] = '\0'; /* \n */ 617 618 sep = strrchr(line, 'x'); 619 if (sep == NULL) 620 continue; 621 622 hex2u64(sep + 1, &start); 623 624 sep = strchr(line, ' '); 625 if (sep == NULL) 626 continue; 627 628 *sep = '\0'; 629 630 scnprintf(name, sizeof(name), "[%s]", line); 631 632 size = strtoul(sep + 1, &endptr, 0); 633 if (*endptr != ' ' && *endptr != '\t') 634 continue; 635 636 err = process_module(arg, name, start, size); 637 if (err) 638 break; 639 } 640 out: 641 free(line); 642 fclose(file); 643 return err; 644 } 645 646 /* 647 * These are symbols in the kernel image, so make sure that 648 * sym is from a kernel DSO. 649 */ 650 static bool symbol__is_idle(const char *name) 651 { 652 const char * const idle_symbols[] = { 653 "acpi_idle_do_entry", 654 "acpi_processor_ffh_cstate_enter", 655 "arch_cpu_idle", 656 "cpu_idle", 657 "cpu_startup_entry", 658 "idle_cpu", 659 "intel_idle", 660 "default_idle", 661 "native_safe_halt", 662 "enter_idle", 663 "exit_idle", 664 "mwait_idle", 665 "mwait_idle_with_hints", 666 "poll_idle", 667 "ppc64_runlatch_off", 668 "pseries_dedicated_idle_sleep", 669 "psw_idle", 670 "psw_idle_exit", 671 NULL 672 }; 673 int i; 674 static struct strlist *idle_symbols_list; 675 676 if (idle_symbols_list) 677 return strlist__has_entry(idle_symbols_list, name); 678 679 idle_symbols_list = strlist__new(NULL, NULL); 680 681 for (i = 0; idle_symbols[i]; i++) 682 strlist__add(idle_symbols_list, idle_symbols[i]); 683 684 return strlist__has_entry(idle_symbols_list, name); 685 } 686 687 static int map__process_kallsym_symbol(void *arg, const char *name, 688 char type, u64 start) 689 { 690 struct symbol *sym; 691 struct dso *dso = arg; 692 struct rb_root_cached *root = &dso->symbols; 693 694 if (!symbol_type__filter(type)) 695 return 0; 696 697 /* 698 * module symbols are not sorted so we add all 699 * symbols, setting length to 0, and rely on 700 * symbols__fixup_end() to fix it up. 701 */ 702 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name); 703 if (sym == NULL) 704 return -ENOMEM; 705 /* 706 * We will pass the symbols to the filter later, in 707 * map__split_kallsyms, when we have split the maps per module 708 */ 709 __symbols__insert(root, sym, !strchr(name, '[')); 710 711 return 0; 712 } 713 714 /* 715 * Loads the function entries in /proc/kallsyms into kernel_map->dso, 716 * so that we can in the next step set the symbol ->end address and then 717 * call kernel_maps__split_kallsyms. 718 */ 719 static int dso__load_all_kallsyms(struct dso *dso, const char *filename) 720 { 721 return kallsyms__parse(filename, dso, map__process_kallsym_symbol); 722 } 723 724 static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso) 725 { 726 struct map *curr_map; 727 struct symbol *pos; 728 int count = 0; 729 struct rb_root_cached old_root = dso->symbols; 730 struct rb_root_cached *root = &dso->symbols; 731 struct rb_node *next = rb_first_cached(root); 732 733 if (!kmaps) 734 return -1; 735 736 *root = RB_ROOT_CACHED; 737 738 while (next) { 739 char *module; 740 741 pos = rb_entry(next, struct symbol, rb_node); 742 next = rb_next(&pos->rb_node); 743 744 rb_erase_cached(&pos->rb_node, &old_root); 745 RB_CLEAR_NODE(&pos->rb_node); 746 module = strchr(pos->name, '\t'); 747 if (module) 748 *module = '\0'; 749 750 curr_map = maps__find(kmaps, pos->start); 751 752 if (!curr_map) { 753 symbol__delete(pos); 754 continue; 755 } 756 757 pos->start -= curr_map->start - curr_map->pgoff; 758 if (pos->end > curr_map->end) 759 pos->end = curr_map->end; 760 if (pos->end) 761 pos->end -= curr_map->start - curr_map->pgoff; 762 symbols__insert(&curr_map->dso->symbols, pos); 763 ++count; 764 } 765 766 /* Symbols have been adjusted */ 767 dso->adjust_symbols = 1; 768 769 return count; 770 } 771 772 /* 773 * Split the symbols into maps, making sure there are no overlaps, i.e. the 774 * kernel range is broken in several maps, named [kernel].N, as we don't have 775 * the original ELF section names vmlinux have. 776 */ 777 static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, 778 struct map *initial_map) 779 { 780 struct machine *machine; 781 struct map *curr_map = initial_map; 782 struct symbol *pos; 783 int count = 0, moved = 0; 784 struct rb_root_cached *root = &dso->symbols; 785 struct rb_node *next = rb_first_cached(root); 786 int kernel_range = 0; 787 bool x86_64; 788 789 if (!kmaps) 790 return -1; 791 792 machine = kmaps->machine; 793 794 x86_64 = machine__is(machine, "x86_64"); 795 796 while (next) { 797 char *module; 798 799 pos = rb_entry(next, struct symbol, rb_node); 800 next = rb_next(&pos->rb_node); 801 802 module = strchr(pos->name, '\t'); 803 if (module) { 804 if (!symbol_conf.use_modules) 805 goto discard_symbol; 806 807 *module++ = '\0'; 808 809 if (strcmp(curr_map->dso->short_name, module)) { 810 if (curr_map != initial_map && 811 dso->kernel == DSO_SPACE__KERNEL_GUEST && 812 machine__is_default_guest(machine)) { 813 /* 814 * We assume all symbols of a module are 815 * continuous in * kallsyms, so curr_map 816 * points to a module and all its 817 * symbols are in its kmap. Mark it as 818 * loaded. 819 */ 820 dso__set_loaded(curr_map->dso); 821 } 822 823 curr_map = maps__find_by_name(kmaps, module); 824 if (curr_map == NULL) { 825 pr_debug("%s/proc/{kallsyms,modules} " 826 "inconsistency while looking " 827 "for \"%s\" module!\n", 828 machine->root_dir, module); 829 curr_map = initial_map; 830 goto discard_symbol; 831 } 832 833 if (curr_map->dso->loaded && 834 !machine__is_default_guest(machine)) 835 goto discard_symbol; 836 } 837 /* 838 * So that we look just like we get from .ko files, 839 * i.e. not prelinked, relative to initial_map->start. 840 */ 841 pos->start = curr_map->map_ip(curr_map, pos->start); 842 pos->end = curr_map->map_ip(curr_map, pos->end); 843 } else if (x86_64 && is_entry_trampoline(pos->name)) { 844 /* 845 * These symbols are not needed anymore since the 846 * trampoline maps refer to the text section and it's 847 * symbols instead. Avoid having to deal with 848 * relocations, and the assumption that the first symbol 849 * is the start of kernel text, by simply removing the 850 * symbols at this point. 851 */ 852 goto discard_symbol; 853 } else if (curr_map != initial_map) { 854 char dso_name[PATH_MAX]; 855 struct dso *ndso; 856 857 if (delta) { 858 /* Kernel was relocated at boot time */ 859 pos->start -= delta; 860 pos->end -= delta; 861 } 862 863 if (count == 0) { 864 curr_map = initial_map; 865 goto add_symbol; 866 } 867 868 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 869 snprintf(dso_name, sizeof(dso_name), 870 "[guest.kernel].%d", 871 kernel_range++); 872 else 873 snprintf(dso_name, sizeof(dso_name), 874 "[kernel].%d", 875 kernel_range++); 876 877 ndso = dso__new(dso_name); 878 if (ndso == NULL) 879 return -1; 880 881 ndso->kernel = dso->kernel; 882 883 curr_map = map__new2(pos->start, ndso); 884 if (curr_map == NULL) { 885 dso__put(ndso); 886 return -1; 887 } 888 889 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 890 maps__insert(kmaps, curr_map); 891 ++kernel_range; 892 } else if (delta) { 893 /* Kernel was relocated at boot time */ 894 pos->start -= delta; 895 pos->end -= delta; 896 } 897 add_symbol: 898 if (curr_map != initial_map) { 899 rb_erase_cached(&pos->rb_node, root); 900 symbols__insert(&curr_map->dso->symbols, pos); 901 ++moved; 902 } else 903 ++count; 904 905 continue; 906 discard_symbol: 907 rb_erase_cached(&pos->rb_node, root); 908 symbol__delete(pos); 909 } 910 911 if (curr_map != initial_map && 912 dso->kernel == DSO_SPACE__KERNEL_GUEST && 913 machine__is_default_guest(kmaps->machine)) { 914 dso__set_loaded(curr_map->dso); 915 } 916 917 return count + moved; 918 } 919 920 bool symbol__restricted_filename(const char *filename, 921 const char *restricted_filename) 922 { 923 bool restricted = false; 924 925 if (symbol_conf.kptr_restrict) { 926 char *r = realpath(filename, NULL); 927 928 if (r != NULL) { 929 restricted = strcmp(r, restricted_filename) == 0; 930 free(r); 931 return restricted; 932 } 933 } 934 935 return restricted; 936 } 937 938 struct module_info { 939 struct rb_node rb_node; 940 char *name; 941 u64 start; 942 }; 943 944 static void add_module(struct module_info *mi, struct rb_root *modules) 945 { 946 struct rb_node **p = &modules->rb_node; 947 struct rb_node *parent = NULL; 948 struct module_info *m; 949 950 while (*p != NULL) { 951 parent = *p; 952 m = rb_entry(parent, struct module_info, rb_node); 953 if (strcmp(mi->name, m->name) < 0) 954 p = &(*p)->rb_left; 955 else 956 p = &(*p)->rb_right; 957 } 958 rb_link_node(&mi->rb_node, parent, p); 959 rb_insert_color(&mi->rb_node, modules); 960 } 961 962 static void delete_modules(struct rb_root *modules) 963 { 964 struct module_info *mi; 965 struct rb_node *next = rb_first(modules); 966 967 while (next) { 968 mi = rb_entry(next, struct module_info, rb_node); 969 next = rb_next(&mi->rb_node); 970 rb_erase(&mi->rb_node, modules); 971 zfree(&mi->name); 972 free(mi); 973 } 974 } 975 976 static struct module_info *find_module(const char *name, 977 struct rb_root *modules) 978 { 979 struct rb_node *n = modules->rb_node; 980 981 while (n) { 982 struct module_info *m; 983 int cmp; 984 985 m = rb_entry(n, struct module_info, rb_node); 986 cmp = strcmp(name, m->name); 987 if (cmp < 0) 988 n = n->rb_left; 989 else if (cmp > 0) 990 n = n->rb_right; 991 else 992 return m; 993 } 994 995 return NULL; 996 } 997 998 static int __read_proc_modules(void *arg, const char *name, u64 start, 999 u64 size __maybe_unused) 1000 { 1001 struct rb_root *modules = arg; 1002 struct module_info *mi; 1003 1004 mi = zalloc(sizeof(struct module_info)); 1005 if (!mi) 1006 return -ENOMEM; 1007 1008 mi->name = strdup(name); 1009 mi->start = start; 1010 1011 if (!mi->name) { 1012 free(mi); 1013 return -ENOMEM; 1014 } 1015 1016 add_module(mi, modules); 1017 1018 return 0; 1019 } 1020 1021 static int read_proc_modules(const char *filename, struct rb_root *modules) 1022 { 1023 if (symbol__restricted_filename(filename, "/proc/modules")) 1024 return -1; 1025 1026 if (modules__parse(filename, modules, __read_proc_modules)) { 1027 delete_modules(modules); 1028 return -1; 1029 } 1030 1031 return 0; 1032 } 1033 1034 int compare_proc_modules(const char *from, const char *to) 1035 { 1036 struct rb_root from_modules = RB_ROOT; 1037 struct rb_root to_modules = RB_ROOT; 1038 struct rb_node *from_node, *to_node; 1039 struct module_info *from_m, *to_m; 1040 int ret = -1; 1041 1042 if (read_proc_modules(from, &from_modules)) 1043 return -1; 1044 1045 if (read_proc_modules(to, &to_modules)) 1046 goto out_delete_from; 1047 1048 from_node = rb_first(&from_modules); 1049 to_node = rb_first(&to_modules); 1050 while (from_node) { 1051 if (!to_node) 1052 break; 1053 1054 from_m = rb_entry(from_node, struct module_info, rb_node); 1055 to_m = rb_entry(to_node, struct module_info, rb_node); 1056 1057 if (from_m->start != to_m->start || 1058 strcmp(from_m->name, to_m->name)) 1059 break; 1060 1061 from_node = rb_next(from_node); 1062 to_node = rb_next(to_node); 1063 } 1064 1065 if (!from_node && !to_node) 1066 ret = 0; 1067 1068 delete_modules(&to_modules); 1069 out_delete_from: 1070 delete_modules(&from_modules); 1071 1072 return ret; 1073 } 1074 1075 static int do_validate_kcore_modules(const char *filename, struct maps *kmaps) 1076 { 1077 struct rb_root modules = RB_ROOT; 1078 struct map *old_map; 1079 int err; 1080 1081 err = read_proc_modules(filename, &modules); 1082 if (err) 1083 return err; 1084 1085 maps__for_each_entry(kmaps, old_map) { 1086 struct module_info *mi; 1087 1088 if (!__map__is_kmodule(old_map)) { 1089 continue; 1090 } 1091 1092 /* Module must be in memory at the same address */ 1093 mi = find_module(old_map->dso->short_name, &modules); 1094 if (!mi || mi->start != old_map->start) { 1095 err = -EINVAL; 1096 goto out; 1097 } 1098 } 1099 out: 1100 delete_modules(&modules); 1101 return err; 1102 } 1103 1104 /* 1105 * If kallsyms is referenced by name then we look for filename in the same 1106 * directory. 1107 */ 1108 static bool filename_from_kallsyms_filename(char *filename, 1109 const char *base_name, 1110 const char *kallsyms_filename) 1111 { 1112 char *name; 1113 1114 strcpy(filename, kallsyms_filename); 1115 name = strrchr(filename, '/'); 1116 if (!name) 1117 return false; 1118 1119 name += 1; 1120 1121 if (!strcmp(name, "kallsyms")) { 1122 strcpy(name, base_name); 1123 return true; 1124 } 1125 1126 return false; 1127 } 1128 1129 static int validate_kcore_modules(const char *kallsyms_filename, 1130 struct map *map) 1131 { 1132 struct maps *kmaps = map__kmaps(map); 1133 char modules_filename[PATH_MAX]; 1134 1135 if (!kmaps) 1136 return -EINVAL; 1137 1138 if (!filename_from_kallsyms_filename(modules_filename, "modules", 1139 kallsyms_filename)) 1140 return -EINVAL; 1141 1142 if (do_validate_kcore_modules(modules_filename, kmaps)) 1143 return -EINVAL; 1144 1145 return 0; 1146 } 1147 1148 static int validate_kcore_addresses(const char *kallsyms_filename, 1149 struct map *map) 1150 { 1151 struct kmap *kmap = map__kmap(map); 1152 1153 if (!kmap) 1154 return -EINVAL; 1155 1156 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { 1157 u64 start; 1158 1159 if (kallsyms__get_function_start(kallsyms_filename, 1160 kmap->ref_reloc_sym->name, &start)) 1161 return -ENOENT; 1162 if (start != kmap->ref_reloc_sym->addr) 1163 return -EINVAL; 1164 } 1165 1166 return validate_kcore_modules(kallsyms_filename, map); 1167 } 1168 1169 struct kcore_mapfn_data { 1170 struct dso *dso; 1171 struct list_head maps; 1172 }; 1173 1174 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) 1175 { 1176 struct kcore_mapfn_data *md = data; 1177 struct map *map; 1178 1179 map = map__new2(start, md->dso); 1180 if (map == NULL) 1181 return -ENOMEM; 1182 1183 map->end = map->start + len; 1184 map->pgoff = pgoff; 1185 1186 list_add(&map->node, &md->maps); 1187 1188 return 0; 1189 } 1190 1191 /* 1192 * Merges map into maps by splitting the new map within the existing map 1193 * regions. 1194 */ 1195 int maps__merge_in(struct maps *kmaps, struct map *new_map) 1196 { 1197 struct map *old_map; 1198 LIST_HEAD(merged); 1199 1200 maps__for_each_entry(kmaps, old_map) { 1201 /* no overload with this one */ 1202 if (new_map->end < old_map->start || 1203 new_map->start >= old_map->end) 1204 continue; 1205 1206 if (new_map->start < old_map->start) { 1207 /* 1208 * |new...... 1209 * |old.... 1210 */ 1211 if (new_map->end < old_map->end) { 1212 /* 1213 * |new......| -> |new..| 1214 * |old....| -> |old....| 1215 */ 1216 new_map->end = old_map->start; 1217 } else { 1218 /* 1219 * |new.............| -> |new..| |new..| 1220 * |old....| -> |old....| 1221 */ 1222 struct map *m = map__clone(new_map); 1223 1224 if (!m) 1225 return -ENOMEM; 1226 1227 m->end = old_map->start; 1228 list_add_tail(&m->node, &merged); 1229 new_map->pgoff += old_map->end - new_map->start; 1230 new_map->start = old_map->end; 1231 } 1232 } else { 1233 /* 1234 * |new...... 1235 * |old.... 1236 */ 1237 if (new_map->end < old_map->end) { 1238 /* 1239 * |new..| -> x 1240 * |old.........| -> |old.........| 1241 */ 1242 map__put(new_map); 1243 new_map = NULL; 1244 break; 1245 } else { 1246 /* 1247 * |new......| -> |new...| 1248 * |old....| -> |old....| 1249 */ 1250 new_map->pgoff += old_map->end - new_map->start; 1251 new_map->start = old_map->end; 1252 } 1253 } 1254 } 1255 1256 while (!list_empty(&merged)) { 1257 old_map = list_entry(merged.next, struct map, node); 1258 list_del_init(&old_map->node); 1259 maps__insert(kmaps, old_map); 1260 map__put(old_map); 1261 } 1262 1263 if (new_map) { 1264 maps__insert(kmaps, new_map); 1265 map__put(new_map); 1266 } 1267 return 0; 1268 } 1269 1270 static int dso__load_kcore(struct dso *dso, struct map *map, 1271 const char *kallsyms_filename) 1272 { 1273 struct maps *kmaps = map__kmaps(map); 1274 struct kcore_mapfn_data md; 1275 struct map *old_map, *new_map, *replacement_map = NULL, *next; 1276 struct machine *machine; 1277 bool is_64_bit; 1278 int err, fd; 1279 char kcore_filename[PATH_MAX]; 1280 u64 stext; 1281 1282 if (!kmaps) 1283 return -EINVAL; 1284 1285 machine = kmaps->machine; 1286 1287 /* This function requires that the map is the kernel map */ 1288 if (!__map__is_kernel(map)) 1289 return -EINVAL; 1290 1291 if (!filename_from_kallsyms_filename(kcore_filename, "kcore", 1292 kallsyms_filename)) 1293 return -EINVAL; 1294 1295 /* Modules and kernel must be present at their original addresses */ 1296 if (validate_kcore_addresses(kallsyms_filename, map)) 1297 return -EINVAL; 1298 1299 md.dso = dso; 1300 INIT_LIST_HEAD(&md.maps); 1301 1302 fd = open(kcore_filename, O_RDONLY); 1303 if (fd < 0) { 1304 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n", 1305 kcore_filename); 1306 return -EINVAL; 1307 } 1308 1309 /* Read new maps into temporary lists */ 1310 err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md, 1311 &is_64_bit); 1312 if (err) 1313 goto out_err; 1314 dso->is_64_bit = is_64_bit; 1315 1316 if (list_empty(&md.maps)) { 1317 err = -EINVAL; 1318 goto out_err; 1319 } 1320 1321 /* Remove old maps */ 1322 maps__for_each_entry_safe(kmaps, old_map, next) { 1323 /* 1324 * We need to preserve eBPF maps even if they are 1325 * covered by kcore, because we need to access 1326 * eBPF dso for source data. 1327 */ 1328 if (old_map != map && !__map__is_bpf_prog(old_map)) 1329 maps__remove(kmaps, old_map); 1330 } 1331 machine->trampolines_mapped = false; 1332 1333 /* Find the kernel map using the '_stext' symbol */ 1334 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) { 1335 list_for_each_entry(new_map, &md.maps, node) { 1336 if (stext >= new_map->start && stext < new_map->end) { 1337 replacement_map = new_map; 1338 break; 1339 } 1340 } 1341 } 1342 1343 if (!replacement_map) 1344 replacement_map = list_entry(md.maps.next, struct map, node); 1345 1346 /* Add new maps */ 1347 while (!list_empty(&md.maps)) { 1348 new_map = list_entry(md.maps.next, struct map, node); 1349 list_del_init(&new_map->node); 1350 if (new_map == replacement_map) { 1351 map->start = new_map->start; 1352 map->end = new_map->end; 1353 map->pgoff = new_map->pgoff; 1354 map->map_ip = new_map->map_ip; 1355 map->unmap_ip = new_map->unmap_ip; 1356 /* Ensure maps are correctly ordered */ 1357 map__get(map); 1358 maps__remove(kmaps, map); 1359 maps__insert(kmaps, map); 1360 map__put(map); 1361 map__put(new_map); 1362 } else { 1363 /* 1364 * Merge kcore map into existing maps, 1365 * and ensure that current maps (eBPF) 1366 * stay intact. 1367 */ 1368 if (maps__merge_in(kmaps, new_map)) 1369 goto out_err; 1370 } 1371 } 1372 1373 if (machine__is(machine, "x86_64")) { 1374 u64 addr; 1375 1376 /* 1377 * If one of the corresponding symbols is there, assume the 1378 * entry trampoline maps are too. 1379 */ 1380 if (!kallsyms__get_function_start(kallsyms_filename, 1381 ENTRY_TRAMPOLINE_NAME, 1382 &addr)) 1383 machine->trampolines_mapped = true; 1384 } 1385 1386 /* 1387 * Set the data type and long name so that kcore can be read via 1388 * dso__data_read_addr(). 1389 */ 1390 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1391 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE; 1392 else 1393 dso->binary_type = DSO_BINARY_TYPE__KCORE; 1394 dso__set_long_name(dso, strdup(kcore_filename), true); 1395 1396 close(fd); 1397 1398 if (map->prot & PROT_EXEC) 1399 pr_debug("Using %s for kernel object code\n", kcore_filename); 1400 else 1401 pr_debug("Using %s for kernel data\n", kcore_filename); 1402 1403 return 0; 1404 1405 out_err: 1406 while (!list_empty(&md.maps)) { 1407 map = list_entry(md.maps.next, struct map, node); 1408 list_del_init(&map->node); 1409 map__put(map); 1410 } 1411 close(fd); 1412 return -EINVAL; 1413 } 1414 1415 /* 1416 * If the kernel is relocated at boot time, kallsyms won't match. Compute the 1417 * delta based on the relocation reference symbol. 1418 */ 1419 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta) 1420 { 1421 u64 addr; 1422 1423 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) 1424 return 0; 1425 1426 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr)) 1427 return -1; 1428 1429 *delta = addr - kmap->ref_reloc_sym->addr; 1430 return 0; 1431 } 1432 1433 int __dso__load_kallsyms(struct dso *dso, const char *filename, 1434 struct map *map, bool no_kcore) 1435 { 1436 struct kmap *kmap = map__kmap(map); 1437 u64 delta = 0; 1438 1439 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 1440 return -1; 1441 1442 if (!kmap || !kmap->kmaps) 1443 return -1; 1444 1445 if (dso__load_all_kallsyms(dso, filename) < 0) 1446 return -1; 1447 1448 if (kallsyms__delta(kmap, filename, &delta)) 1449 return -1; 1450 1451 symbols__fixup_end(&dso->symbols); 1452 symbols__fixup_duplicate(&dso->symbols); 1453 1454 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1455 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 1456 else 1457 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS; 1458 1459 if (!no_kcore && !dso__load_kcore(dso, map, filename)) 1460 return maps__split_kallsyms_for_kcore(kmap->kmaps, dso); 1461 else 1462 return maps__split_kallsyms(kmap->kmaps, dso, delta, map); 1463 } 1464 1465 int dso__load_kallsyms(struct dso *dso, const char *filename, 1466 struct map *map) 1467 { 1468 return __dso__load_kallsyms(dso, filename, map, false); 1469 } 1470 1471 static int dso__load_perf_map(const char *map_path, struct dso *dso) 1472 { 1473 char *line = NULL; 1474 size_t n; 1475 FILE *file; 1476 int nr_syms = 0; 1477 1478 file = fopen(map_path, "r"); 1479 if (file == NULL) 1480 goto out_failure; 1481 1482 while (!feof(file)) { 1483 u64 start, size; 1484 struct symbol *sym; 1485 int line_len, len; 1486 1487 line_len = getline(&line, &n, file); 1488 if (line_len < 0) 1489 break; 1490 1491 if (!line) 1492 goto out_failure; 1493 1494 line[--line_len] = '\0'; /* \n */ 1495 1496 len = hex2u64(line, &start); 1497 1498 len++; 1499 if (len + 2 >= line_len) 1500 continue; 1501 1502 len += hex2u64(line + len, &size); 1503 1504 len++; 1505 if (len + 2 >= line_len) 1506 continue; 1507 1508 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len); 1509 1510 if (sym == NULL) 1511 goto out_delete_line; 1512 1513 symbols__insert(&dso->symbols, sym); 1514 nr_syms++; 1515 } 1516 1517 free(line); 1518 fclose(file); 1519 1520 return nr_syms; 1521 1522 out_delete_line: 1523 free(line); 1524 out_failure: 1525 return -1; 1526 } 1527 1528 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod, 1529 enum dso_binary_type type) 1530 { 1531 switch (type) { 1532 case DSO_BINARY_TYPE__JAVA_JIT: 1533 case DSO_BINARY_TYPE__DEBUGLINK: 1534 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1535 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1536 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1537 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 1538 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1539 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1540 return !kmod && dso->kernel == DSO_SPACE__USER; 1541 1542 case DSO_BINARY_TYPE__KALLSYMS: 1543 case DSO_BINARY_TYPE__VMLINUX: 1544 case DSO_BINARY_TYPE__KCORE: 1545 return dso->kernel == DSO_SPACE__KERNEL; 1546 1547 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1548 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1549 case DSO_BINARY_TYPE__GUEST_KCORE: 1550 return dso->kernel == DSO_SPACE__KERNEL_GUEST; 1551 1552 case DSO_BINARY_TYPE__GUEST_KMODULE: 1553 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 1554 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1555 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 1556 /* 1557 * kernel modules know their symtab type - it's set when 1558 * creating a module dso in machine__addnew_module_map(). 1559 */ 1560 return kmod && dso->symtab_type == type; 1561 1562 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1563 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 1564 return true; 1565 1566 case DSO_BINARY_TYPE__BPF_PROG_INFO: 1567 case DSO_BINARY_TYPE__BPF_IMAGE: 1568 case DSO_BINARY_TYPE__OOL: 1569 case DSO_BINARY_TYPE__NOT_FOUND: 1570 default: 1571 return false; 1572 } 1573 } 1574 1575 /* Checks for the existence of the perf-<pid>.map file in two different 1576 * locations. First, if the process is a separate mount namespace, check in 1577 * that namespace using the pid of the innermost pid namespace. If's not in a 1578 * namespace, or the file can't be found there, try in the mount namespace of 1579 * the tracing process using our view of its pid. 1580 */ 1581 static int dso__find_perf_map(char *filebuf, size_t bufsz, 1582 struct nsinfo **nsip) 1583 { 1584 struct nscookie nsc; 1585 struct nsinfo *nsi; 1586 struct nsinfo *nnsi; 1587 int rc = -1; 1588 1589 nsi = *nsip; 1590 1591 if (nsi->need_setns) { 1592 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsi->nstgid); 1593 nsinfo__mountns_enter(nsi, &nsc); 1594 rc = access(filebuf, R_OK); 1595 nsinfo__mountns_exit(&nsc); 1596 if (rc == 0) 1597 return rc; 1598 } 1599 1600 nnsi = nsinfo__copy(nsi); 1601 if (nnsi) { 1602 nsinfo__put(nsi); 1603 1604 nnsi->need_setns = false; 1605 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nnsi->tgid); 1606 *nsip = nnsi; 1607 rc = 0; 1608 } 1609 1610 return rc; 1611 } 1612 1613 int dso__load(struct dso *dso, struct map *map) 1614 { 1615 char *name; 1616 int ret = -1; 1617 u_int i; 1618 struct machine *machine = NULL; 1619 char *root_dir = (char *) ""; 1620 int ss_pos = 0; 1621 struct symsrc ss_[2]; 1622 struct symsrc *syms_ss = NULL, *runtime_ss = NULL; 1623 bool kmod; 1624 bool perfmap; 1625 unsigned char build_id[BUILD_ID_SIZE]; 1626 struct nscookie nsc; 1627 char newmapname[PATH_MAX]; 1628 const char *map_path = dso->long_name; 1629 1630 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0; 1631 if (perfmap) { 1632 if (dso->nsinfo && (dso__find_perf_map(newmapname, 1633 sizeof(newmapname), &dso->nsinfo) == 0)) { 1634 map_path = newmapname; 1635 } 1636 } 1637 1638 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1639 pthread_mutex_lock(&dso->lock); 1640 1641 /* check again under the dso->lock */ 1642 if (dso__loaded(dso)) { 1643 ret = 1; 1644 goto out; 1645 } 1646 1647 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE || 1648 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 1649 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE || 1650 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 1651 1652 if (dso->kernel && !kmod) { 1653 if (dso->kernel == DSO_SPACE__KERNEL) 1654 ret = dso__load_kernel_sym(dso, map); 1655 else if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1656 ret = dso__load_guest_kernel_sym(dso, map); 1657 1658 machine = map__kmaps(map)->machine; 1659 if (machine__is(machine, "x86_64")) 1660 machine__map_x86_64_entry_trampolines(machine, dso); 1661 goto out; 1662 } 1663 1664 dso->adjust_symbols = 0; 1665 1666 if (perfmap) { 1667 ret = dso__load_perf_map(map_path, dso); 1668 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : 1669 DSO_BINARY_TYPE__NOT_FOUND; 1670 goto out; 1671 } 1672 1673 if (machine) 1674 root_dir = machine->root_dir; 1675 1676 name = malloc(PATH_MAX); 1677 if (!name) 1678 goto out; 1679 1680 /* 1681 * Read the build id if possible. This is required for 1682 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work 1683 */ 1684 if (!dso->has_build_id && 1685 is_regular_file(dso->long_name)) { 1686 __symbol__join_symfs(name, PATH_MAX, dso->long_name); 1687 if (filename__read_build_id(name, build_id, BUILD_ID_SIZE) > 0) 1688 dso__set_build_id(dso, build_id); 1689 } 1690 1691 /* 1692 * Iterate over candidate debug images. 1693 * Keep track of "interesting" ones (those which have a symtab, dynsym, 1694 * and/or opd section) for processing. 1695 */ 1696 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { 1697 struct symsrc *ss = &ss_[ss_pos]; 1698 bool next_slot = false; 1699 bool is_reg; 1700 bool nsexit; 1701 int sirc = -1; 1702 1703 enum dso_binary_type symtab_type = binary_type_symtab[i]; 1704 1705 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE || 1706 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO); 1707 1708 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type)) 1709 continue; 1710 1711 if (dso__read_binary_type_filename(dso, symtab_type, 1712 root_dir, name, PATH_MAX)) 1713 continue; 1714 1715 if (nsexit) 1716 nsinfo__mountns_exit(&nsc); 1717 1718 is_reg = is_regular_file(name); 1719 if (is_reg) 1720 sirc = symsrc__init(ss, dso, name, symtab_type); 1721 1722 if (nsexit) 1723 nsinfo__mountns_enter(dso->nsinfo, &nsc); 1724 1725 if (!is_reg || sirc < 0) 1726 continue; 1727 1728 if (!syms_ss && symsrc__has_symtab(ss)) { 1729 syms_ss = ss; 1730 next_slot = true; 1731 if (!dso->symsrc_filename) 1732 dso->symsrc_filename = strdup(name); 1733 } 1734 1735 if (!runtime_ss && symsrc__possibly_runtime(ss)) { 1736 runtime_ss = ss; 1737 next_slot = true; 1738 } 1739 1740 if (next_slot) { 1741 ss_pos++; 1742 1743 if (syms_ss && runtime_ss) 1744 break; 1745 } else { 1746 symsrc__destroy(ss); 1747 } 1748 1749 } 1750 1751 if (!runtime_ss && !syms_ss) 1752 goto out_free; 1753 1754 if (runtime_ss && !syms_ss) { 1755 syms_ss = runtime_ss; 1756 } 1757 1758 /* We'll have to hope for the best */ 1759 if (!runtime_ss && syms_ss) 1760 runtime_ss = syms_ss; 1761 1762 if (syms_ss) 1763 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); 1764 else 1765 ret = -1; 1766 1767 if (ret > 0) { 1768 int nr_plt; 1769 1770 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss); 1771 if (nr_plt > 0) 1772 ret += nr_plt; 1773 } 1774 1775 for (; ss_pos > 0; ss_pos--) 1776 symsrc__destroy(&ss_[ss_pos - 1]); 1777 out_free: 1778 free(name); 1779 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) 1780 ret = 0; 1781 out: 1782 dso__set_loaded(dso); 1783 pthread_mutex_unlock(&dso->lock); 1784 nsinfo__mountns_exit(&nsc); 1785 1786 return ret; 1787 } 1788 1789 static int map__strcmp(const void *a, const void *b) 1790 { 1791 const struct map *ma = *(const struct map **)a, *mb = *(const struct map **)b; 1792 return strcmp(ma->dso->short_name, mb->dso->short_name); 1793 } 1794 1795 static int map__strcmp_name(const void *name, const void *b) 1796 { 1797 const struct map *map = *(const struct map **)b; 1798 return strcmp(name, map->dso->short_name); 1799 } 1800 1801 void __maps__sort_by_name(struct maps *maps) 1802 { 1803 qsort(maps->maps_by_name, maps->nr_maps, sizeof(struct map *), map__strcmp); 1804 } 1805 1806 static int map__groups__sort_by_name_from_rbtree(struct maps *maps) 1807 { 1808 struct map *map; 1809 struct map **maps_by_name = realloc(maps->maps_by_name, maps->nr_maps * sizeof(map)); 1810 int i = 0; 1811 1812 if (maps_by_name == NULL) 1813 return -1; 1814 1815 maps->maps_by_name = maps_by_name; 1816 maps->nr_maps_allocated = maps->nr_maps; 1817 1818 maps__for_each_entry(maps, map) 1819 maps_by_name[i++] = map; 1820 1821 __maps__sort_by_name(maps); 1822 return 0; 1823 } 1824 1825 static struct map *__maps__find_by_name(struct maps *maps, const char *name) 1826 { 1827 struct map **mapp; 1828 1829 if (maps->maps_by_name == NULL && 1830 map__groups__sort_by_name_from_rbtree(maps)) 1831 return NULL; 1832 1833 mapp = bsearch(name, maps->maps_by_name, maps->nr_maps, sizeof(*mapp), map__strcmp_name); 1834 if (mapp) 1835 return *mapp; 1836 return NULL; 1837 } 1838 1839 struct map *maps__find_by_name(struct maps *maps, const char *name) 1840 { 1841 struct map *map; 1842 1843 down_read(&maps->lock); 1844 1845 if (maps->last_search_by_name && strcmp(maps->last_search_by_name->dso->short_name, name) == 0) { 1846 map = maps->last_search_by_name; 1847 goto out_unlock; 1848 } 1849 /* 1850 * If we have maps->maps_by_name, then the name isn't in the rbtree, 1851 * as maps->maps_by_name mirrors the rbtree when lookups by name are 1852 * made. 1853 */ 1854 map = __maps__find_by_name(maps, name); 1855 if (map || maps->maps_by_name != NULL) 1856 goto out_unlock; 1857 1858 /* Fallback to traversing the rbtree... */ 1859 maps__for_each_entry(maps, map) 1860 if (strcmp(map->dso->short_name, name) == 0) { 1861 maps->last_search_by_name = map; 1862 goto out_unlock; 1863 } 1864 1865 map = NULL; 1866 1867 out_unlock: 1868 up_read(&maps->lock); 1869 return map; 1870 } 1871 1872 int dso__load_vmlinux(struct dso *dso, struct map *map, 1873 const char *vmlinux, bool vmlinux_allocated) 1874 { 1875 int err = -1; 1876 struct symsrc ss; 1877 char symfs_vmlinux[PATH_MAX]; 1878 enum dso_binary_type symtab_type; 1879 1880 if (vmlinux[0] == '/') 1881 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux); 1882 else 1883 symbol__join_symfs(symfs_vmlinux, vmlinux); 1884 1885 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1886 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1887 else 1888 symtab_type = DSO_BINARY_TYPE__VMLINUX; 1889 1890 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) 1891 return -1; 1892 1893 err = dso__load_sym(dso, map, &ss, &ss, 0); 1894 symsrc__destroy(&ss); 1895 1896 if (err > 0) { 1897 if (dso->kernel == DSO_SPACE__KERNEL_GUEST) 1898 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX; 1899 else 1900 dso->binary_type = DSO_BINARY_TYPE__VMLINUX; 1901 dso__set_long_name(dso, vmlinux, vmlinux_allocated); 1902 dso__set_loaded(dso); 1903 pr_debug("Using %s for symbols\n", symfs_vmlinux); 1904 } 1905 1906 return err; 1907 } 1908 1909 int dso__load_vmlinux_path(struct dso *dso, struct map *map) 1910 { 1911 int i, err = 0; 1912 char *filename = NULL; 1913 1914 pr_debug("Looking at the vmlinux_path (%d entries long)\n", 1915 vmlinux_path__nr_entries + 1); 1916 1917 for (i = 0; i < vmlinux_path__nr_entries; ++i) { 1918 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false); 1919 if (err > 0) 1920 goto out; 1921 } 1922 1923 if (!symbol_conf.ignore_vmlinux_buildid) 1924 filename = dso__build_id_filename(dso, NULL, 0, false); 1925 if (filename != NULL) { 1926 err = dso__load_vmlinux(dso, map, filename, true); 1927 if (err > 0) 1928 goto out; 1929 free(filename); 1930 } 1931 out: 1932 return err; 1933 } 1934 1935 static bool visible_dir_filter(const char *name, struct dirent *d) 1936 { 1937 if (d->d_type != DT_DIR) 1938 return false; 1939 return lsdir_no_dot_filter(name, d); 1940 } 1941 1942 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) 1943 { 1944 char kallsyms_filename[PATH_MAX]; 1945 int ret = -1; 1946 struct strlist *dirs; 1947 struct str_node *nd; 1948 1949 dirs = lsdir(dir, visible_dir_filter); 1950 if (!dirs) 1951 return -1; 1952 1953 strlist__for_each_entry(nd, dirs) { 1954 scnprintf(kallsyms_filename, sizeof(kallsyms_filename), 1955 "%s/%s/kallsyms", dir, nd->s); 1956 if (!validate_kcore_addresses(kallsyms_filename, map)) { 1957 strlcpy(dir, kallsyms_filename, dir_sz); 1958 ret = 0; 1959 break; 1960 } 1961 } 1962 1963 strlist__delete(dirs); 1964 1965 return ret; 1966 } 1967 1968 /* 1969 * Use open(O_RDONLY) to check readability directly instead of access(R_OK) 1970 * since access(R_OK) only checks with real UID/GID but open() use effective 1971 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO). 1972 */ 1973 static bool filename__readable(const char *file) 1974 { 1975 int fd = open(file, O_RDONLY); 1976 if (fd < 0) 1977 return false; 1978 close(fd); 1979 return true; 1980 } 1981 1982 static char *dso__find_kallsyms(struct dso *dso, struct map *map) 1983 { 1984 u8 host_build_id[BUILD_ID_SIZE]; 1985 char sbuild_id[SBUILD_ID_SIZE]; 1986 bool is_host = false; 1987 char path[PATH_MAX]; 1988 1989 if (!dso->has_build_id) { 1990 /* 1991 * Last resort, if we don't have a build-id and couldn't find 1992 * any vmlinux file, try the running kernel kallsyms table. 1993 */ 1994 goto proc_kallsyms; 1995 } 1996 1997 if (sysfs__read_build_id("/sys/kernel/notes", host_build_id, 1998 sizeof(host_build_id)) == 0) 1999 is_host = dso__build_id_equal(dso, host_build_id); 2000 2001 /* Try a fast path for /proc/kallsyms if possible */ 2002 if (is_host) { 2003 /* 2004 * Do not check the build-id cache, unless we know we cannot use 2005 * /proc/kcore or module maps don't match to /proc/kallsyms. 2006 * To check readability of /proc/kcore, do not use access(R_OK) 2007 * since /proc/kcore requires CAP_SYS_RAWIO to read and access 2008 * can't check it. 2009 */ 2010 if (filename__readable("/proc/kcore") && 2011 !validate_kcore_addresses("/proc/kallsyms", map)) 2012 goto proc_kallsyms; 2013 } 2014 2015 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 2016 2017 /* Find kallsyms in build-id cache with kcore */ 2018 scnprintf(path, sizeof(path), "%s/%s/%s", 2019 buildid_dir, DSO__NAME_KCORE, sbuild_id); 2020 2021 if (!find_matching_kcore(map, path, sizeof(path))) 2022 return strdup(path); 2023 2024 /* Use current /proc/kallsyms if possible */ 2025 if (is_host) { 2026 proc_kallsyms: 2027 return strdup("/proc/kallsyms"); 2028 } 2029 2030 /* Finally, find a cache of kallsyms */ 2031 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) { 2032 pr_err("No kallsyms or vmlinux with build-id %s was found\n", 2033 sbuild_id); 2034 return NULL; 2035 } 2036 2037 return strdup(path); 2038 } 2039 2040 static int dso__load_kernel_sym(struct dso *dso, struct map *map) 2041 { 2042 int err; 2043 const char *kallsyms_filename = NULL; 2044 char *kallsyms_allocated_filename = NULL; 2045 /* 2046 * Step 1: if the user specified a kallsyms or vmlinux filename, use 2047 * it and only it, reporting errors to the user if it cannot be used. 2048 * 2049 * For instance, try to analyse an ARM perf.data file _without_ a 2050 * build-id, or if the user specifies the wrong path to the right 2051 * vmlinux file, obviously we can't fallback to another vmlinux (a 2052 * x86_86 one, on the machine where analysis is being performed, say), 2053 * or worse, /proc/kallsyms. 2054 * 2055 * If the specified file _has_ a build-id and there is a build-id 2056 * section in the perf.data file, we will still do the expected 2057 * validation in dso__load_vmlinux and will bail out if they don't 2058 * match. 2059 */ 2060 if (symbol_conf.kallsyms_name != NULL) { 2061 kallsyms_filename = symbol_conf.kallsyms_name; 2062 goto do_kallsyms; 2063 } 2064 2065 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) { 2066 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false); 2067 } 2068 2069 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) { 2070 err = dso__load_vmlinux_path(dso, map); 2071 if (err > 0) 2072 return err; 2073 } 2074 2075 /* do not try local files if a symfs was given */ 2076 if (symbol_conf.symfs[0] != 0) 2077 return -1; 2078 2079 kallsyms_allocated_filename = dso__find_kallsyms(dso, map); 2080 if (!kallsyms_allocated_filename) 2081 return -1; 2082 2083 kallsyms_filename = kallsyms_allocated_filename; 2084 2085 do_kallsyms: 2086 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2087 if (err > 0) 2088 pr_debug("Using %s for symbols\n", kallsyms_filename); 2089 free(kallsyms_allocated_filename); 2090 2091 if (err > 0 && !dso__is_kcore(dso)) { 2092 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS; 2093 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false); 2094 map__fixup_start(map); 2095 map__fixup_end(map); 2096 } 2097 2098 return err; 2099 } 2100 2101 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) 2102 { 2103 int err; 2104 const char *kallsyms_filename = NULL; 2105 struct machine *machine = map__kmaps(map)->machine; 2106 char path[PATH_MAX]; 2107 2108 if (machine__is_default_guest(machine)) { 2109 /* 2110 * if the user specified a vmlinux filename, use it and only 2111 * it, reporting errors to the user if it cannot be used. 2112 * Or use file guest_kallsyms inputted by user on commandline 2113 */ 2114 if (symbol_conf.default_guest_vmlinux_name != NULL) { 2115 err = dso__load_vmlinux(dso, map, 2116 symbol_conf.default_guest_vmlinux_name, 2117 false); 2118 return err; 2119 } 2120 2121 kallsyms_filename = symbol_conf.default_guest_kallsyms; 2122 if (!kallsyms_filename) 2123 return -1; 2124 } else { 2125 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 2126 kallsyms_filename = path; 2127 } 2128 2129 err = dso__load_kallsyms(dso, kallsyms_filename, map); 2130 if (err > 0) 2131 pr_debug("Using %s for symbols\n", kallsyms_filename); 2132 if (err > 0 && !dso__is_kcore(dso)) { 2133 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; 2134 dso__set_long_name(dso, machine->mmap_name, false); 2135 map__fixup_start(map); 2136 map__fixup_end(map); 2137 } 2138 2139 return err; 2140 } 2141 2142 static void vmlinux_path__exit(void) 2143 { 2144 while (--vmlinux_path__nr_entries >= 0) 2145 zfree(&vmlinux_path[vmlinux_path__nr_entries]); 2146 vmlinux_path__nr_entries = 0; 2147 2148 zfree(&vmlinux_path); 2149 } 2150 2151 static const char * const vmlinux_paths[] = { 2152 "vmlinux", 2153 "/boot/vmlinux" 2154 }; 2155 2156 static const char * const vmlinux_paths_upd[] = { 2157 "/boot/vmlinux-%s", 2158 "/usr/lib/debug/boot/vmlinux-%s", 2159 "/lib/modules/%s/build/vmlinux", 2160 "/usr/lib/debug/lib/modules/%s/vmlinux", 2161 "/usr/lib/debug/boot/vmlinux-%s.debug" 2162 }; 2163 2164 static int vmlinux_path__add(const char *new_entry) 2165 { 2166 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry); 2167 if (vmlinux_path[vmlinux_path__nr_entries] == NULL) 2168 return -1; 2169 ++vmlinux_path__nr_entries; 2170 2171 return 0; 2172 } 2173 2174 static int vmlinux_path__init(struct perf_env *env) 2175 { 2176 struct utsname uts; 2177 char bf[PATH_MAX]; 2178 char *kernel_version; 2179 unsigned int i; 2180 2181 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) + 2182 ARRAY_SIZE(vmlinux_paths_upd))); 2183 if (vmlinux_path == NULL) 2184 return -1; 2185 2186 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++) 2187 if (vmlinux_path__add(vmlinux_paths[i]) < 0) 2188 goto out_fail; 2189 2190 /* only try kernel version if no symfs was given */ 2191 if (symbol_conf.symfs[0] != 0) 2192 return 0; 2193 2194 if (env) { 2195 kernel_version = env->os_release; 2196 } else { 2197 if (uname(&uts) < 0) 2198 goto out_fail; 2199 2200 kernel_version = uts.release; 2201 } 2202 2203 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) { 2204 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version); 2205 if (vmlinux_path__add(bf) < 0) 2206 goto out_fail; 2207 } 2208 2209 return 0; 2210 2211 out_fail: 2212 vmlinux_path__exit(); 2213 return -1; 2214 } 2215 2216 int setup_list(struct strlist **list, const char *list_str, 2217 const char *list_name) 2218 { 2219 if (list_str == NULL) 2220 return 0; 2221 2222 *list = strlist__new(list_str, NULL); 2223 if (!*list) { 2224 pr_err("problems parsing %s list\n", list_name); 2225 return -1; 2226 } 2227 2228 symbol_conf.has_filter = true; 2229 return 0; 2230 } 2231 2232 int setup_intlist(struct intlist **list, const char *list_str, 2233 const char *list_name) 2234 { 2235 if (list_str == NULL) 2236 return 0; 2237 2238 *list = intlist__new(list_str); 2239 if (!*list) { 2240 pr_err("problems parsing %s list\n", list_name); 2241 return -1; 2242 } 2243 return 0; 2244 } 2245 2246 static bool symbol__read_kptr_restrict(void) 2247 { 2248 bool value = false; 2249 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r"); 2250 2251 if (fp != NULL) { 2252 char line[8]; 2253 2254 if (fgets(line, sizeof(line), fp) != NULL) 2255 value = perf_cap__capable(CAP_SYSLOG) ? 2256 (atoi(line) >= 2) : 2257 (atoi(line) != 0); 2258 2259 fclose(fp); 2260 } 2261 2262 /* Per kernel/kallsyms.c: 2263 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG 2264 */ 2265 if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG)) 2266 value = true; 2267 2268 return value; 2269 } 2270 2271 int symbol__annotation_init(void) 2272 { 2273 if (symbol_conf.init_annotation) 2274 return 0; 2275 2276 if (symbol_conf.initialized) { 2277 pr_err("Annotation needs to be init before symbol__init()\n"); 2278 return -1; 2279 } 2280 2281 symbol_conf.priv_size += sizeof(struct annotation); 2282 symbol_conf.init_annotation = true; 2283 return 0; 2284 } 2285 2286 int symbol__init(struct perf_env *env) 2287 { 2288 const char *symfs; 2289 2290 if (symbol_conf.initialized) 2291 return 0; 2292 2293 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); 2294 2295 symbol__elf_init(); 2296 2297 if (symbol_conf.sort_by_name) 2298 symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - 2299 sizeof(struct symbol)); 2300 2301 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0) 2302 return -1; 2303 2304 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') { 2305 pr_err("'.' is the only non valid --field-separator argument\n"); 2306 return -1; 2307 } 2308 2309 if (setup_list(&symbol_conf.dso_list, 2310 symbol_conf.dso_list_str, "dso") < 0) 2311 return -1; 2312 2313 if (setup_list(&symbol_conf.comm_list, 2314 symbol_conf.comm_list_str, "comm") < 0) 2315 goto out_free_dso_list; 2316 2317 if (setup_intlist(&symbol_conf.pid_list, 2318 symbol_conf.pid_list_str, "pid") < 0) 2319 goto out_free_comm_list; 2320 2321 if (setup_intlist(&symbol_conf.tid_list, 2322 symbol_conf.tid_list_str, "tid") < 0) 2323 goto out_free_pid_list; 2324 2325 if (setup_list(&symbol_conf.sym_list, 2326 symbol_conf.sym_list_str, "symbol") < 0) 2327 goto out_free_tid_list; 2328 2329 if (setup_list(&symbol_conf.bt_stop_list, 2330 symbol_conf.bt_stop_list_str, "symbol") < 0) 2331 goto out_free_sym_list; 2332 2333 /* 2334 * A path to symbols of "/" is identical to "" 2335 * reset here for simplicity. 2336 */ 2337 symfs = realpath(symbol_conf.symfs, NULL); 2338 if (symfs == NULL) 2339 symfs = symbol_conf.symfs; 2340 if (strcmp(symfs, "/") == 0) 2341 symbol_conf.symfs = ""; 2342 if (symfs != symbol_conf.symfs) 2343 free((void *)symfs); 2344 2345 symbol_conf.kptr_restrict = symbol__read_kptr_restrict(); 2346 2347 symbol_conf.initialized = true; 2348 return 0; 2349 2350 out_free_sym_list: 2351 strlist__delete(symbol_conf.sym_list); 2352 out_free_tid_list: 2353 intlist__delete(symbol_conf.tid_list); 2354 out_free_pid_list: 2355 intlist__delete(symbol_conf.pid_list); 2356 out_free_comm_list: 2357 strlist__delete(symbol_conf.comm_list); 2358 out_free_dso_list: 2359 strlist__delete(symbol_conf.dso_list); 2360 return -1; 2361 } 2362 2363 void symbol__exit(void) 2364 { 2365 if (!symbol_conf.initialized) 2366 return; 2367 strlist__delete(symbol_conf.bt_stop_list); 2368 strlist__delete(symbol_conf.sym_list); 2369 strlist__delete(symbol_conf.dso_list); 2370 strlist__delete(symbol_conf.comm_list); 2371 intlist__delete(symbol_conf.tid_list); 2372 intlist__delete(symbol_conf.pid_list); 2373 vmlinux_path__exit(); 2374 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL; 2375 symbol_conf.bt_stop_list = NULL; 2376 symbol_conf.initialized = false; 2377 } 2378 2379 int symbol__config_symfs(const struct option *opt __maybe_unused, 2380 const char *dir, int unset __maybe_unused) 2381 { 2382 char *bf = NULL; 2383 int ret; 2384 2385 symbol_conf.symfs = strdup(dir); 2386 if (symbol_conf.symfs == NULL) 2387 return -ENOMEM; 2388 2389 /* skip the locally configured cache if a symfs is given, and 2390 * config buildid dir to symfs/.debug 2391 */ 2392 ret = asprintf(&bf, "%s/%s", dir, ".debug"); 2393 if (ret < 0) 2394 return -ENOMEM; 2395 2396 set_buildid_dir(bf); 2397 2398 free(bf); 2399 return 0; 2400 } 2401 2402 struct mem_info *mem_info__get(struct mem_info *mi) 2403 { 2404 if (mi) 2405 refcount_inc(&mi->refcnt); 2406 return mi; 2407 } 2408 2409 void mem_info__put(struct mem_info *mi) 2410 { 2411 if (mi && refcount_dec_and_test(&mi->refcnt)) 2412 free(mi); 2413 } 2414 2415 struct mem_info *mem_info__new(void) 2416 { 2417 struct mem_info *mi = zalloc(sizeof(*mi)); 2418 2419 if (mi) 2420 refcount_set(&mi->refcnt, 1); 2421 return mi; 2422 } 2423