1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <linux/string.h> 5 #include <linux/zalloc.h> 6 #include <sys/time.h> 7 #include <sys/resource.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include <errno.h> 12 #include <fcntl.h> 13 #include <stdlib.h> 14 #ifdef HAVE_LIBBPF_SUPPORT 15 #include <bpf/libbpf.h> 16 #include "bpf-event.h" 17 #include "bpf-utils.h" 18 #endif 19 #include "compress.h" 20 #include "env.h" 21 #include "namespaces.h" 22 #include "path.h" 23 #include "map.h" 24 #include "symbol.h" 25 #include "srcline.h" 26 #include "dso.h" 27 #include "dsos.h" 28 #include "machine.h" 29 #include "auxtrace.h" 30 #include "util.h" /* O_CLOEXEC for older systems */ 31 #include "debug.h" 32 #include "string2.h" 33 #include "vdso.h" 34 35 static const char * const debuglink_paths[] = { 36 "%.0s%s", 37 "%s/%s", 38 "%s/.debug/%s", 39 "/usr/lib/debug%s/%s" 40 }; 41 42 char dso__symtab_origin(const struct dso *dso) 43 { 44 static const char origin[] = { 45 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 46 [DSO_BINARY_TYPE__VMLINUX] = 'v', 47 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 48 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 49 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 50 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 51 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 52 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 53 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', 54 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 55 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 56 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 57 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 58 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 59 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 60 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 61 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 62 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 63 }; 64 65 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 66 return '!'; 67 return origin[dso->symtab_type]; 68 } 69 70 int dso__read_binary_type_filename(const struct dso *dso, 71 enum dso_binary_type type, 72 char *root_dir, char *filename, size_t size) 73 { 74 char build_id_hex[SBUILD_ID_SIZE]; 75 int ret = 0; 76 size_t len; 77 78 switch (type) { 79 case DSO_BINARY_TYPE__DEBUGLINK: 80 { 81 const char *last_slash; 82 char dso_dir[PATH_MAX]; 83 char symfile[PATH_MAX]; 84 unsigned int i; 85 86 len = __symbol__join_symfs(filename, size, dso->long_name); 87 last_slash = filename + len; 88 while (last_slash != filename && *last_slash != '/') 89 last_slash--; 90 91 strncpy(dso_dir, filename, last_slash - filename); 92 dso_dir[last_slash-filename] = '\0'; 93 94 if (!is_regular_file(filename)) { 95 ret = -1; 96 break; 97 } 98 99 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 100 if (ret) 101 break; 102 103 /* Check predefined locations where debug file might reside */ 104 ret = -1; 105 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 106 snprintf(filename, size, 107 debuglink_paths[i], dso_dir, symfile); 108 if (is_regular_file(filename)) { 109 ret = 0; 110 break; 111 } 112 } 113 114 break; 115 } 116 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 117 if (dso__build_id_filename(dso, filename, size, false) == NULL) 118 ret = -1; 119 break; 120 121 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 122 if (dso__build_id_filename(dso, filename, size, true) == NULL) 123 ret = -1; 124 break; 125 126 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 127 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 128 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 129 break; 130 131 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 132 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 133 snprintf(filename + len, size - len, "%s", dso->long_name); 134 break; 135 136 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 137 /* 138 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in 139 * /usr/lib/debug/lib when it is expected to be in 140 * /usr/lib/debug/usr/lib 141 */ 142 if (strlen(dso->long_name) < 9 || 143 strncmp(dso->long_name, "/usr/lib/", 9)) { 144 ret = -1; 145 break; 146 } 147 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 148 snprintf(filename + len, size - len, "%s", dso->long_name + 4); 149 break; 150 151 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 152 { 153 const char *last_slash; 154 size_t dir_size; 155 156 last_slash = dso->long_name + dso->long_name_len; 157 while (last_slash != dso->long_name && *last_slash != '/') 158 last_slash--; 159 160 len = __symbol__join_symfs(filename, size, ""); 161 dir_size = last_slash - dso->long_name + 2; 162 if (dir_size > (size - len)) { 163 ret = -1; 164 break; 165 } 166 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 167 len += scnprintf(filename + len , size - len, ".debug%s", 168 last_slash); 169 break; 170 } 171 172 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 173 if (!dso->has_build_id) { 174 ret = -1; 175 break; 176 } 177 178 build_id__sprintf(&dso->bid, build_id_hex); 179 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 180 snprintf(filename + len, size - len, "%.2s/%s.debug", 181 build_id_hex, build_id_hex + 2); 182 break; 183 184 case DSO_BINARY_TYPE__VMLINUX: 185 case DSO_BINARY_TYPE__GUEST_VMLINUX: 186 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 187 __symbol__join_symfs(filename, size, dso->long_name); 188 break; 189 190 case DSO_BINARY_TYPE__GUEST_KMODULE: 191 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 192 path__join3(filename, size, symbol_conf.symfs, 193 root_dir, dso->long_name); 194 break; 195 196 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 197 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 198 __symbol__join_symfs(filename, size, dso->long_name); 199 break; 200 201 case DSO_BINARY_TYPE__KCORE: 202 case DSO_BINARY_TYPE__GUEST_KCORE: 203 snprintf(filename, size, "%s", dso->long_name); 204 break; 205 206 default: 207 case DSO_BINARY_TYPE__KALLSYMS: 208 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 209 case DSO_BINARY_TYPE__JAVA_JIT: 210 case DSO_BINARY_TYPE__BPF_PROG_INFO: 211 case DSO_BINARY_TYPE__BPF_IMAGE: 212 case DSO_BINARY_TYPE__OOL: 213 case DSO_BINARY_TYPE__NOT_FOUND: 214 ret = -1; 215 break; 216 } 217 218 return ret; 219 } 220 221 enum { 222 COMP_ID__NONE = 0, 223 }; 224 225 static const struct { 226 const char *fmt; 227 int (*decompress)(const char *input, int output); 228 bool (*is_compressed)(const char *input); 229 } compressions[] = { 230 [COMP_ID__NONE] = { .fmt = NULL, }, 231 #ifdef HAVE_ZLIB_SUPPORT 232 { "gz", gzip_decompress_to_file, gzip_is_compressed }, 233 #endif 234 #ifdef HAVE_LZMA_SUPPORT 235 { "xz", lzma_decompress_to_file, lzma_is_compressed }, 236 #endif 237 { NULL, NULL, NULL }, 238 }; 239 240 static int is_supported_compression(const char *ext) 241 { 242 unsigned i; 243 244 for (i = 1; compressions[i].fmt; i++) { 245 if (!strcmp(ext, compressions[i].fmt)) 246 return i; 247 } 248 return COMP_ID__NONE; 249 } 250 251 bool is_kernel_module(const char *pathname, int cpumode) 252 { 253 struct kmod_path m; 254 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 255 256 WARN_ONCE(mode != cpumode, 257 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 258 cpumode); 259 260 switch (mode) { 261 case PERF_RECORD_MISC_USER: 262 case PERF_RECORD_MISC_HYPERVISOR: 263 case PERF_RECORD_MISC_GUEST_USER: 264 return false; 265 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 266 default: 267 if (kmod_path__parse(&m, pathname)) { 268 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 269 pathname); 270 return true; 271 } 272 } 273 274 return m.kmod; 275 } 276 277 bool dso__needs_decompress(struct dso *dso) 278 { 279 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 280 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 281 } 282 283 int filename__decompress(const char *name, char *pathname, 284 size_t len, int comp, int *err) 285 { 286 char tmpbuf[] = KMOD_DECOMP_NAME; 287 int fd = -1; 288 289 /* 290 * We have proper compression id for DSO and yet the file 291 * behind the 'name' can still be plain uncompressed object. 292 * 293 * The reason is behind the logic we open the DSO object files, 294 * when we try all possible 'debug' objects until we find the 295 * data. So even if the DSO is represented by 'krava.xz' module, 296 * we can end up here opening ~/.debug/....23432432/debug' file 297 * which is not compressed. 298 * 299 * To keep this transparent, we detect this and return the file 300 * descriptor to the uncompressed file. 301 */ 302 if (!compressions[comp].is_compressed(name)) 303 return open(name, O_RDONLY); 304 305 fd = mkstemp(tmpbuf); 306 if (fd < 0) { 307 *err = errno; 308 return -1; 309 } 310 311 if (compressions[comp].decompress(name, fd)) { 312 *err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 313 close(fd); 314 fd = -1; 315 } 316 317 if (!pathname || (fd < 0)) 318 unlink(tmpbuf); 319 320 if (pathname && (fd >= 0)) 321 strlcpy(pathname, tmpbuf, len); 322 323 return fd; 324 } 325 326 static int decompress_kmodule(struct dso *dso, const char *name, 327 char *pathname, size_t len) 328 { 329 if (!dso__needs_decompress(dso)) 330 return -1; 331 332 if (dso->comp == COMP_ID__NONE) 333 return -1; 334 335 return filename__decompress(name, pathname, len, dso->comp, 336 &dso->load_errno); 337 } 338 339 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 340 { 341 return decompress_kmodule(dso, name, NULL, 0); 342 } 343 344 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 345 char *pathname, size_t len) 346 { 347 int fd = decompress_kmodule(dso, name, pathname, len); 348 349 close(fd); 350 return fd >= 0 ? 0 : -1; 351 } 352 353 /* 354 * Parses kernel module specified in @path and updates 355 * @m argument like: 356 * 357 * @comp - true if @path contains supported compression suffix, 358 * false otherwise 359 * @kmod - true if @path contains '.ko' suffix in right position, 360 * false otherwise 361 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 362 * of the kernel module without suffixes, otherwise strudup-ed 363 * base name of @path 364 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 365 * the compression suffix 366 * 367 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 368 */ 369 int __kmod_path__parse(struct kmod_path *m, const char *path, 370 bool alloc_name) 371 { 372 const char *name = strrchr(path, '/'); 373 const char *ext = strrchr(path, '.'); 374 bool is_simple_name = false; 375 376 memset(m, 0x0, sizeof(*m)); 377 name = name ? name + 1 : path; 378 379 /* 380 * '.' is also a valid character for module name. For example: 381 * [aaa.bbb] is a valid module name. '[' should have higher 382 * priority than '.ko' suffix. 383 * 384 * The kernel names are from machine__mmap_name. Such 385 * name should belong to kernel itself, not kernel module. 386 */ 387 if (name[0] == '[') { 388 is_simple_name = true; 389 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 390 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 391 (strncmp(name, "[vdso]", 6) == 0) || 392 (strncmp(name, "[vdso32]", 8) == 0) || 393 (strncmp(name, "[vdsox32]", 9) == 0) || 394 (strncmp(name, "[vsyscall]", 10) == 0)) { 395 m->kmod = false; 396 397 } else 398 m->kmod = true; 399 } 400 401 /* No extension, just return name. */ 402 if ((ext == NULL) || is_simple_name) { 403 if (alloc_name) { 404 m->name = strdup(name); 405 return m->name ? 0 : -ENOMEM; 406 } 407 return 0; 408 } 409 410 m->comp = is_supported_compression(ext + 1); 411 if (m->comp > COMP_ID__NONE) 412 ext -= 3; 413 414 /* Check .ko extension only if there's enough name left. */ 415 if (ext > name) 416 m->kmod = !strncmp(ext, ".ko", 3); 417 418 if (alloc_name) { 419 if (m->kmod) { 420 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 421 return -ENOMEM; 422 } else { 423 if (asprintf(&m->name, "%s", name) == -1) 424 return -ENOMEM; 425 } 426 427 strreplace(m->name, '-', '_'); 428 } 429 430 return 0; 431 } 432 433 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 434 struct machine *machine) 435 { 436 if (machine__is_host(machine)) 437 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 438 else 439 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 440 441 /* _KMODULE_COMP should be next to _KMODULE */ 442 if (m->kmod && m->comp) { 443 dso->symtab_type++; 444 dso->comp = m->comp; 445 } 446 447 dso__set_short_name(dso, strdup(m->name), true); 448 } 449 450 /* 451 * Global list of open DSOs and the counter. 452 */ 453 static LIST_HEAD(dso__data_open); 454 static long dso__data_open_cnt; 455 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 456 457 static void dso__list_add(struct dso *dso) 458 { 459 list_add_tail(&dso->data.open_entry, &dso__data_open); 460 dso__data_open_cnt++; 461 } 462 463 static void dso__list_del(struct dso *dso) 464 { 465 list_del_init(&dso->data.open_entry); 466 WARN_ONCE(dso__data_open_cnt <= 0, 467 "DSO data fd counter out of bounds."); 468 dso__data_open_cnt--; 469 } 470 471 static void close_first_dso(void); 472 473 static int do_open(char *name) 474 { 475 int fd; 476 char sbuf[STRERR_BUFSIZE]; 477 478 do { 479 fd = open(name, O_RDONLY|O_CLOEXEC); 480 if (fd >= 0) 481 return fd; 482 483 pr_debug("dso open failed: %s\n", 484 str_error_r(errno, sbuf, sizeof(sbuf))); 485 if (!dso__data_open_cnt || errno != EMFILE) 486 break; 487 488 close_first_dso(); 489 } while (1); 490 491 return -1; 492 } 493 494 static int __open_dso(struct dso *dso, struct machine *machine) 495 { 496 int fd = -EINVAL; 497 char *root_dir = (char *)""; 498 char *name = malloc(PATH_MAX); 499 bool decomp = false; 500 501 if (!name) 502 return -ENOMEM; 503 504 if (machine) 505 root_dir = machine->root_dir; 506 507 if (dso__read_binary_type_filename(dso, dso->binary_type, 508 root_dir, name, PATH_MAX)) 509 goto out; 510 511 if (!is_regular_file(name)) { 512 char *new_name; 513 514 if (errno != ENOENT || dso->nsinfo == NULL) 515 goto out; 516 517 new_name = filename_with_chroot(dso->nsinfo->pid, name); 518 if (!new_name) 519 goto out; 520 521 free(name); 522 name = new_name; 523 } 524 525 if (dso__needs_decompress(dso)) { 526 char newpath[KMOD_DECOMP_LEN]; 527 size_t len = sizeof(newpath); 528 529 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 530 fd = -dso->load_errno; 531 goto out; 532 } 533 534 decomp = true; 535 strcpy(name, newpath); 536 } 537 538 fd = do_open(name); 539 540 if (decomp) 541 unlink(name); 542 543 out: 544 free(name); 545 return fd; 546 } 547 548 static void check_data_close(void); 549 550 /** 551 * dso_close - Open DSO data file 552 * @dso: dso object 553 * 554 * Open @dso's data file descriptor and updates 555 * list/count of open DSO objects. 556 */ 557 static int open_dso(struct dso *dso, struct machine *machine) 558 { 559 int fd; 560 struct nscookie nsc; 561 562 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 563 nsinfo__mountns_enter(dso->nsinfo, &nsc); 564 fd = __open_dso(dso, machine); 565 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 566 nsinfo__mountns_exit(&nsc); 567 568 if (fd >= 0) { 569 dso__list_add(dso); 570 /* 571 * Check if we crossed the allowed number 572 * of opened DSOs and close one if needed. 573 */ 574 check_data_close(); 575 } 576 577 return fd; 578 } 579 580 static void close_data_fd(struct dso *dso) 581 { 582 if (dso->data.fd >= 0) { 583 close(dso->data.fd); 584 dso->data.fd = -1; 585 dso->data.file_size = 0; 586 dso__list_del(dso); 587 } 588 } 589 590 /** 591 * dso_close - Close DSO data file 592 * @dso: dso object 593 * 594 * Close @dso's data file descriptor and updates 595 * list/count of open DSO objects. 596 */ 597 static void close_dso(struct dso *dso) 598 { 599 close_data_fd(dso); 600 } 601 602 static void close_first_dso(void) 603 { 604 struct dso *dso; 605 606 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 607 close_dso(dso); 608 } 609 610 static rlim_t get_fd_limit(void) 611 { 612 struct rlimit l; 613 rlim_t limit = 0; 614 615 /* Allow half of the current open fd limit. */ 616 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 617 if (l.rlim_cur == RLIM_INFINITY) 618 limit = l.rlim_cur; 619 else 620 limit = l.rlim_cur / 2; 621 } else { 622 pr_err("failed to get fd limit\n"); 623 limit = 1; 624 } 625 626 return limit; 627 } 628 629 static rlim_t fd_limit; 630 631 /* 632 * Used only by tests/dso-data.c to reset the environment 633 * for tests. I dont expect we should change this during 634 * standard runtime. 635 */ 636 void reset_fd_limit(void) 637 { 638 fd_limit = 0; 639 } 640 641 static bool may_cache_fd(void) 642 { 643 if (!fd_limit) 644 fd_limit = get_fd_limit(); 645 646 if (fd_limit == RLIM_INFINITY) 647 return true; 648 649 return fd_limit > (rlim_t) dso__data_open_cnt; 650 } 651 652 /* 653 * Check and close LRU dso if we crossed allowed limit 654 * for opened dso file descriptors. The limit is half 655 * of the RLIMIT_NOFILE files opened. 656 */ 657 static void check_data_close(void) 658 { 659 bool cache_fd = may_cache_fd(); 660 661 if (!cache_fd) 662 close_first_dso(); 663 } 664 665 /** 666 * dso__data_close - Close DSO data file 667 * @dso: dso object 668 * 669 * External interface to close @dso's data file descriptor. 670 */ 671 void dso__data_close(struct dso *dso) 672 { 673 pthread_mutex_lock(&dso__data_open_lock); 674 close_dso(dso); 675 pthread_mutex_unlock(&dso__data_open_lock); 676 } 677 678 static void try_to_open_dso(struct dso *dso, struct machine *machine) 679 { 680 enum dso_binary_type binary_type_data[] = { 681 DSO_BINARY_TYPE__BUILD_ID_CACHE, 682 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 683 DSO_BINARY_TYPE__NOT_FOUND, 684 }; 685 int i = 0; 686 687 if (dso->data.fd >= 0) 688 return; 689 690 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 691 dso->data.fd = open_dso(dso, machine); 692 goto out; 693 } 694 695 do { 696 dso->binary_type = binary_type_data[i++]; 697 698 dso->data.fd = open_dso(dso, machine); 699 if (dso->data.fd >= 0) 700 goto out; 701 702 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 703 out: 704 if (dso->data.fd >= 0) 705 dso->data.status = DSO_DATA_STATUS_OK; 706 else 707 dso->data.status = DSO_DATA_STATUS_ERROR; 708 } 709 710 /** 711 * dso__data_get_fd - Get dso's data file descriptor 712 * @dso: dso object 713 * @machine: machine object 714 * 715 * External interface to find dso's file, open it and 716 * returns file descriptor. It should be paired with 717 * dso__data_put_fd() if it returns non-negative value. 718 */ 719 int dso__data_get_fd(struct dso *dso, struct machine *machine) 720 { 721 if (dso->data.status == DSO_DATA_STATUS_ERROR) 722 return -1; 723 724 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 725 return -1; 726 727 try_to_open_dso(dso, machine); 728 729 if (dso->data.fd < 0) 730 pthread_mutex_unlock(&dso__data_open_lock); 731 732 return dso->data.fd; 733 } 734 735 void dso__data_put_fd(struct dso *dso __maybe_unused) 736 { 737 pthread_mutex_unlock(&dso__data_open_lock); 738 } 739 740 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 741 { 742 u32 flag = 1 << by; 743 744 if (dso->data.status_seen & flag) 745 return true; 746 747 dso->data.status_seen |= flag; 748 749 return false; 750 } 751 752 #ifdef HAVE_LIBBPF_SUPPORT 753 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) 754 { 755 struct bpf_prog_info_node *node; 756 ssize_t size = DSO__DATA_CACHE_SIZE; 757 u64 len; 758 u8 *buf; 759 760 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 761 if (!node || !node->info_linear) { 762 dso->data.status = DSO_DATA_STATUS_ERROR; 763 return -1; 764 } 765 766 len = node->info_linear->info.jited_prog_len; 767 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns; 768 769 if (offset >= len) 770 return -1; 771 772 size = (ssize_t)min(len - offset, (u64)size); 773 memcpy(data, buf + offset, size); 774 return size; 775 } 776 777 static int bpf_size(struct dso *dso) 778 { 779 struct bpf_prog_info_node *node; 780 781 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 782 if (!node || !node->info_linear) { 783 dso->data.status = DSO_DATA_STATUS_ERROR; 784 return -1; 785 } 786 787 dso->data.file_size = node->info_linear->info.jited_prog_len; 788 return 0; 789 } 790 #endif // HAVE_LIBBPF_SUPPORT 791 792 static void 793 dso_cache__free(struct dso *dso) 794 { 795 struct rb_root *root = &dso->data.cache; 796 struct rb_node *next = rb_first(root); 797 798 pthread_mutex_lock(&dso->lock); 799 while (next) { 800 struct dso_cache *cache; 801 802 cache = rb_entry(next, struct dso_cache, rb_node); 803 next = rb_next(&cache->rb_node); 804 rb_erase(&cache->rb_node, root); 805 free(cache); 806 } 807 pthread_mutex_unlock(&dso->lock); 808 } 809 810 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) 811 { 812 const struct rb_root *root = &dso->data.cache; 813 struct rb_node * const *p = &root->rb_node; 814 const struct rb_node *parent = NULL; 815 struct dso_cache *cache; 816 817 while (*p != NULL) { 818 u64 end; 819 820 parent = *p; 821 cache = rb_entry(parent, struct dso_cache, rb_node); 822 end = cache->offset + DSO__DATA_CACHE_SIZE; 823 824 if (offset < cache->offset) 825 p = &(*p)->rb_left; 826 else if (offset >= end) 827 p = &(*p)->rb_right; 828 else 829 return cache; 830 } 831 832 return NULL; 833 } 834 835 static struct dso_cache * 836 dso_cache__insert(struct dso *dso, struct dso_cache *new) 837 { 838 struct rb_root *root = &dso->data.cache; 839 struct rb_node **p = &root->rb_node; 840 struct rb_node *parent = NULL; 841 struct dso_cache *cache; 842 u64 offset = new->offset; 843 844 pthread_mutex_lock(&dso->lock); 845 while (*p != NULL) { 846 u64 end; 847 848 parent = *p; 849 cache = rb_entry(parent, struct dso_cache, rb_node); 850 end = cache->offset + DSO__DATA_CACHE_SIZE; 851 852 if (offset < cache->offset) 853 p = &(*p)->rb_left; 854 else if (offset >= end) 855 p = &(*p)->rb_right; 856 else 857 goto out; 858 } 859 860 rb_link_node(&new->rb_node, parent, p); 861 rb_insert_color(&new->rb_node, root); 862 863 cache = NULL; 864 out: 865 pthread_mutex_unlock(&dso->lock); 866 return cache; 867 } 868 869 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data, 870 u64 size, bool out) 871 { 872 u64 cache_offset = offset - cache->offset; 873 u64 cache_size = min(cache->size - cache_offset, size); 874 875 if (out) 876 memcpy(data, cache->data + cache_offset, cache_size); 877 else 878 memcpy(cache->data + cache_offset, data, cache_size); 879 return cache_size; 880 } 881 882 static ssize_t file_read(struct dso *dso, struct machine *machine, 883 u64 offset, char *data) 884 { 885 ssize_t ret; 886 887 pthread_mutex_lock(&dso__data_open_lock); 888 889 /* 890 * dso->data.fd might be closed if other thread opened another 891 * file (dso) due to open file limit (RLIMIT_NOFILE). 892 */ 893 try_to_open_dso(dso, machine); 894 895 if (dso->data.fd < 0) { 896 dso->data.status = DSO_DATA_STATUS_ERROR; 897 ret = -errno; 898 goto out; 899 } 900 901 ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset); 902 out: 903 pthread_mutex_unlock(&dso__data_open_lock); 904 return ret; 905 } 906 907 static struct dso_cache *dso_cache__populate(struct dso *dso, 908 struct machine *machine, 909 u64 offset, ssize_t *ret) 910 { 911 u64 cache_offset = offset & DSO__DATA_CACHE_MASK; 912 struct dso_cache *cache; 913 struct dso_cache *old; 914 915 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 916 if (!cache) { 917 *ret = -ENOMEM; 918 return NULL; 919 } 920 #ifdef HAVE_LIBBPF_SUPPORT 921 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 922 *ret = bpf_read(dso, cache_offset, cache->data); 923 else 924 #endif 925 if (dso->binary_type == DSO_BINARY_TYPE__OOL) 926 *ret = DSO__DATA_CACHE_SIZE; 927 else 928 *ret = file_read(dso, machine, cache_offset, cache->data); 929 930 if (*ret <= 0) { 931 free(cache); 932 return NULL; 933 } 934 935 cache->offset = cache_offset; 936 cache->size = *ret; 937 938 old = dso_cache__insert(dso, cache); 939 if (old) { 940 /* we lose the race */ 941 free(cache); 942 cache = old; 943 } 944 945 return cache; 946 } 947 948 static struct dso_cache *dso_cache__find(struct dso *dso, 949 struct machine *machine, 950 u64 offset, 951 ssize_t *ret) 952 { 953 struct dso_cache *cache = __dso_cache__find(dso, offset); 954 955 return cache ? cache : dso_cache__populate(dso, machine, offset, ret); 956 } 957 958 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine, 959 u64 offset, u8 *data, ssize_t size, bool out) 960 { 961 struct dso_cache *cache; 962 ssize_t ret = 0; 963 964 cache = dso_cache__find(dso, machine, offset, &ret); 965 if (!cache) 966 return ret; 967 968 return dso_cache__memcpy(cache, offset, data, size, out); 969 } 970 971 /* 972 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 973 * in the rb_tree. Any read to already cached data is served 974 * by cached data. Writes update the cache only, not the backing file. 975 */ 976 static ssize_t cached_io(struct dso *dso, struct machine *machine, 977 u64 offset, u8 *data, ssize_t size, bool out) 978 { 979 ssize_t r = 0; 980 u8 *p = data; 981 982 do { 983 ssize_t ret; 984 985 ret = dso_cache_io(dso, machine, offset, p, size, out); 986 if (ret < 0) 987 return ret; 988 989 /* Reached EOF, return what we have. */ 990 if (!ret) 991 break; 992 993 BUG_ON(ret > size); 994 995 r += ret; 996 p += ret; 997 offset += ret; 998 size -= ret; 999 1000 } while (size); 1001 1002 return r; 1003 } 1004 1005 static int file_size(struct dso *dso, struct machine *machine) 1006 { 1007 int ret = 0; 1008 struct stat st; 1009 char sbuf[STRERR_BUFSIZE]; 1010 1011 pthread_mutex_lock(&dso__data_open_lock); 1012 1013 /* 1014 * dso->data.fd might be closed if other thread opened another 1015 * file (dso) due to open file limit (RLIMIT_NOFILE). 1016 */ 1017 try_to_open_dso(dso, machine); 1018 1019 if (dso->data.fd < 0) { 1020 ret = -errno; 1021 dso->data.status = DSO_DATA_STATUS_ERROR; 1022 goto out; 1023 } 1024 1025 if (fstat(dso->data.fd, &st) < 0) { 1026 ret = -errno; 1027 pr_err("dso cache fstat failed: %s\n", 1028 str_error_r(errno, sbuf, sizeof(sbuf))); 1029 dso->data.status = DSO_DATA_STATUS_ERROR; 1030 goto out; 1031 } 1032 dso->data.file_size = st.st_size; 1033 1034 out: 1035 pthread_mutex_unlock(&dso__data_open_lock); 1036 return ret; 1037 } 1038 1039 int dso__data_file_size(struct dso *dso, struct machine *machine) 1040 { 1041 if (dso->data.file_size) 1042 return 0; 1043 1044 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1045 return -1; 1046 #ifdef HAVE_LIBBPF_SUPPORT 1047 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 1048 return bpf_size(dso); 1049 #endif 1050 return file_size(dso, machine); 1051 } 1052 1053 /** 1054 * dso__data_size - Return dso data size 1055 * @dso: dso object 1056 * @machine: machine object 1057 * 1058 * Return: dso data size 1059 */ 1060 off_t dso__data_size(struct dso *dso, struct machine *machine) 1061 { 1062 if (dso__data_file_size(dso, machine)) 1063 return -1; 1064 1065 /* For now just estimate dso data size is close to file size */ 1066 return dso->data.file_size; 1067 } 1068 1069 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, 1070 u64 offset, u8 *data, ssize_t size, 1071 bool out) 1072 { 1073 if (dso__data_file_size(dso, machine)) 1074 return -1; 1075 1076 /* Check the offset sanity. */ 1077 if (offset > dso->data.file_size) 1078 return -1; 1079 1080 if (offset + size < offset) 1081 return -1; 1082 1083 return cached_io(dso, machine, offset, data, size, out); 1084 } 1085 1086 /** 1087 * dso__data_read_offset - Read data from dso file offset 1088 * @dso: dso object 1089 * @machine: machine object 1090 * @offset: file offset 1091 * @data: buffer to store data 1092 * @size: size of the @data buffer 1093 * 1094 * External interface to read data from dso file offset. Open 1095 * dso data file and use cached_read to get the data. 1096 */ 1097 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 1098 u64 offset, u8 *data, ssize_t size) 1099 { 1100 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1101 return -1; 1102 1103 return data_read_write_offset(dso, machine, offset, data, size, true); 1104 } 1105 1106 /** 1107 * dso__data_read_addr - Read data from dso address 1108 * @dso: dso object 1109 * @machine: machine object 1110 * @add: virtual memory address 1111 * @data: buffer to store data 1112 * @size: size of the @data buffer 1113 * 1114 * External interface to read data from dso address. 1115 */ 1116 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1117 struct machine *machine, u64 addr, 1118 u8 *data, ssize_t size) 1119 { 1120 u64 offset = map->map_ip(map, addr); 1121 return dso__data_read_offset(dso, machine, offset, data, size); 1122 } 1123 1124 /** 1125 * dso__data_write_cache_offs - Write data to dso data cache at file offset 1126 * @dso: dso object 1127 * @machine: machine object 1128 * @offset: file offset 1129 * @data: buffer to write 1130 * @size: size of the @data buffer 1131 * 1132 * Write into the dso file data cache, but do not change the file itself. 1133 */ 1134 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine, 1135 u64 offset, const u8 *data_in, ssize_t size) 1136 { 1137 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */ 1138 1139 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1140 return -1; 1141 1142 return data_read_write_offset(dso, machine, offset, data, size, false); 1143 } 1144 1145 /** 1146 * dso__data_write_cache_addr - Write data to dso data cache at dso address 1147 * @dso: dso object 1148 * @machine: machine object 1149 * @add: virtual memory address 1150 * @data: buffer to write 1151 * @size: size of the @data buffer 1152 * 1153 * External interface to write into the dso file data cache, but do not change 1154 * the file itself. 1155 */ 1156 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map, 1157 struct machine *machine, u64 addr, 1158 const u8 *data, ssize_t size) 1159 { 1160 u64 offset = map->map_ip(map, addr); 1161 return dso__data_write_cache_offs(dso, machine, offset, data, size); 1162 } 1163 1164 struct map *dso__new_map(const char *name) 1165 { 1166 struct map *map = NULL; 1167 struct dso *dso = dso__new(name); 1168 1169 if (dso) { 1170 map = map__new2(0, dso); 1171 dso__put(dso); 1172 } 1173 1174 return map; 1175 } 1176 1177 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1178 const char *short_name, int dso_type) 1179 { 1180 /* 1181 * The kernel dso could be created by build_id processing. 1182 */ 1183 struct dso *dso = machine__findnew_dso(machine, name); 1184 1185 /* 1186 * We need to run this in all cases, since during the build_id 1187 * processing we had no idea this was the kernel dso. 1188 */ 1189 if (dso != NULL) { 1190 dso__set_short_name(dso, short_name, false); 1191 dso->kernel = dso_type; 1192 } 1193 1194 return dso; 1195 } 1196 1197 static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated) 1198 { 1199 struct rb_root *root = dso->root; 1200 1201 if (name == NULL) 1202 return; 1203 1204 if (dso->long_name_allocated) 1205 free((char *)dso->long_name); 1206 1207 if (root) { 1208 rb_erase(&dso->rb_node, root); 1209 /* 1210 * __dsos__findnew_link_by_longname_id() isn't guaranteed to 1211 * add it back, so a clean removal is required here. 1212 */ 1213 RB_CLEAR_NODE(&dso->rb_node); 1214 dso->root = NULL; 1215 } 1216 1217 dso->long_name = name; 1218 dso->long_name_len = strlen(name); 1219 dso->long_name_allocated = name_allocated; 1220 1221 if (root) 1222 __dsos__findnew_link_by_longname_id(root, dso, NULL, id); 1223 } 1224 1225 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1226 { 1227 dso__set_long_name_id(dso, name, NULL, name_allocated); 1228 } 1229 1230 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1231 { 1232 if (name == NULL) 1233 return; 1234 1235 if (dso->short_name_allocated) 1236 free((char *)dso->short_name); 1237 1238 dso->short_name = name; 1239 dso->short_name_len = strlen(name); 1240 dso->short_name_allocated = name_allocated; 1241 } 1242 1243 int dso__name_len(const struct dso *dso) 1244 { 1245 if (!dso) 1246 return strlen("[unknown]"); 1247 if (verbose > 0) 1248 return dso->long_name_len; 1249 1250 return dso->short_name_len; 1251 } 1252 1253 bool dso__loaded(const struct dso *dso) 1254 { 1255 return dso->loaded; 1256 } 1257 1258 bool dso__sorted_by_name(const struct dso *dso) 1259 { 1260 return dso->sorted_by_name; 1261 } 1262 1263 void dso__set_sorted_by_name(struct dso *dso) 1264 { 1265 dso->sorted_by_name = true; 1266 } 1267 1268 struct dso *dso__new_id(const char *name, struct dso_id *id) 1269 { 1270 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1271 1272 if (dso != NULL) { 1273 strcpy(dso->name, name); 1274 if (id) 1275 dso->id = *id; 1276 dso__set_long_name_id(dso, dso->name, id, false); 1277 dso__set_short_name(dso, dso->name, false); 1278 dso->symbols = dso->symbol_names = RB_ROOT_CACHED; 1279 dso->data.cache = RB_ROOT; 1280 dso->inlined_nodes = RB_ROOT_CACHED; 1281 dso->srclines = RB_ROOT_CACHED; 1282 dso->data.fd = -1; 1283 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1284 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1285 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1286 dso->is_64_bit = (sizeof(void *) == 8); 1287 dso->loaded = 0; 1288 dso->rel = 0; 1289 dso->sorted_by_name = 0; 1290 dso->has_build_id = 0; 1291 dso->has_srcline = 1; 1292 dso->a2l_fails = 1; 1293 dso->kernel = DSO_SPACE__USER; 1294 dso->needs_swap = DSO_SWAP__UNSET; 1295 dso->comp = COMP_ID__NONE; 1296 RB_CLEAR_NODE(&dso->rb_node); 1297 dso->root = NULL; 1298 INIT_LIST_HEAD(&dso->node); 1299 INIT_LIST_HEAD(&dso->data.open_entry); 1300 pthread_mutex_init(&dso->lock, NULL); 1301 refcount_set(&dso->refcnt, 1); 1302 } 1303 1304 return dso; 1305 } 1306 1307 struct dso *dso__new(const char *name) 1308 { 1309 return dso__new_id(name, NULL); 1310 } 1311 1312 void dso__delete(struct dso *dso) 1313 { 1314 if (!RB_EMPTY_NODE(&dso->rb_node)) 1315 pr_err("DSO %s is still in rbtree when being deleted!\n", 1316 dso->long_name); 1317 1318 /* free inlines first, as they reference symbols */ 1319 inlines__tree_delete(&dso->inlined_nodes); 1320 srcline__tree_delete(&dso->srclines); 1321 symbols__delete(&dso->symbols); 1322 1323 if (dso->short_name_allocated) { 1324 zfree((char **)&dso->short_name); 1325 dso->short_name_allocated = false; 1326 } 1327 1328 if (dso->long_name_allocated) { 1329 zfree((char **)&dso->long_name); 1330 dso->long_name_allocated = false; 1331 } 1332 1333 dso__data_close(dso); 1334 auxtrace_cache__free(dso->auxtrace_cache); 1335 dso_cache__free(dso); 1336 dso__free_a2l(dso); 1337 zfree(&dso->symsrc_filename); 1338 nsinfo__zput(dso->nsinfo); 1339 pthread_mutex_destroy(&dso->lock); 1340 free(dso); 1341 } 1342 1343 struct dso *dso__get(struct dso *dso) 1344 { 1345 if (dso) 1346 refcount_inc(&dso->refcnt); 1347 return dso; 1348 } 1349 1350 void dso__put(struct dso *dso) 1351 { 1352 if (dso && refcount_dec_and_test(&dso->refcnt)) 1353 dso__delete(dso); 1354 } 1355 1356 void dso__set_build_id(struct dso *dso, struct build_id *bid) 1357 { 1358 dso->bid = *bid; 1359 dso->has_build_id = 1; 1360 } 1361 1362 bool dso__build_id_equal(const struct dso *dso, struct build_id *bid) 1363 { 1364 if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) { 1365 /* 1366 * For the backward compatibility, it allows a build-id has 1367 * trailing zeros. 1368 */ 1369 return !memcmp(dso->bid.data, bid->data, bid->size) && 1370 !memchr_inv(&dso->bid.data[bid->size], 0, 1371 dso->bid.size - bid->size); 1372 } 1373 1374 return dso->bid.size == bid->size && 1375 memcmp(dso->bid.data, bid->data, dso->bid.size) == 0; 1376 } 1377 1378 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1379 { 1380 char path[PATH_MAX]; 1381 1382 if (machine__is_default_guest(machine)) 1383 return; 1384 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1385 if (sysfs__read_build_id(path, &dso->bid) == 0) 1386 dso->has_build_id = true; 1387 } 1388 1389 int dso__kernel_module_get_build_id(struct dso *dso, 1390 const char *root_dir) 1391 { 1392 char filename[PATH_MAX]; 1393 /* 1394 * kernel module short names are of the form "[module]" and 1395 * we need just "module" here. 1396 */ 1397 const char *name = dso->short_name + 1; 1398 1399 snprintf(filename, sizeof(filename), 1400 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1401 root_dir, (int)strlen(name) - 1, name); 1402 1403 if (sysfs__read_build_id(filename, &dso->bid) == 0) 1404 dso->has_build_id = true; 1405 1406 return 0; 1407 } 1408 1409 static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1410 { 1411 char sbuild_id[SBUILD_ID_SIZE]; 1412 1413 build_id__sprintf(&dso->bid, sbuild_id); 1414 return fprintf(fp, "%s", sbuild_id); 1415 } 1416 1417 size_t dso__fprintf(struct dso *dso, FILE *fp) 1418 { 1419 struct rb_node *nd; 1420 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1421 1422 if (dso->short_name != dso->long_name) 1423 ret += fprintf(fp, "%s, ", dso->long_name); 1424 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); 1425 ret += dso__fprintf_buildid(dso, fp); 1426 ret += fprintf(fp, ")\n"); 1427 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { 1428 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1429 ret += symbol__fprintf(pos, fp); 1430 } 1431 1432 return ret; 1433 } 1434 1435 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1436 { 1437 int fd; 1438 enum dso_type type = DSO__TYPE_UNKNOWN; 1439 1440 fd = dso__data_get_fd(dso, machine); 1441 if (fd >= 0) { 1442 type = dso__type_fd(fd); 1443 dso__data_put_fd(dso); 1444 } 1445 1446 return type; 1447 } 1448 1449 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1450 { 1451 int idx, errnum = dso->load_errno; 1452 /* 1453 * This must have a same ordering as the enum dso_load_errno. 1454 */ 1455 static const char *dso_load__error_str[] = { 1456 "Internal tools/perf/ library error", 1457 "Invalid ELF file", 1458 "Can not read build id", 1459 "Mismatching build id", 1460 "Decompression failure", 1461 }; 1462 1463 BUG_ON(buflen == 0); 1464 1465 if (errnum >= 0) { 1466 const char *err = str_error_r(errnum, buf, buflen); 1467 1468 if (err != buf) 1469 scnprintf(buf, buflen, "%s", err); 1470 1471 return 0; 1472 } 1473 1474 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1475 return -1; 1476 1477 idx = errnum - __DSO_LOAD_ERRNO__START; 1478 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1479 return 0; 1480 } 1481