1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <linux/string.h> 5 #include <linux/zalloc.h> 6 #include <sys/time.h> 7 #include <sys/resource.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include <errno.h> 12 #include <fcntl.h> 13 #include <stdlib.h> 14 #include <bpf/libbpf.h> 15 #include "bpf-event.h" 16 #include "compress.h" 17 #include "env.h" 18 #include "namespaces.h" 19 #include "path.h" 20 #include "map.h" 21 #include "symbol.h" 22 #include "srcline.h" 23 #include "dso.h" 24 #include "dsos.h" 25 #include "machine.h" 26 #include "auxtrace.h" 27 #include "util.h" /* O_CLOEXEC for older systems */ 28 #include "debug.h" 29 #include "string2.h" 30 #include "vdso.h" 31 32 static const char * const debuglink_paths[] = { 33 "%.0s%s", 34 "%s/%s", 35 "%s/.debug/%s", 36 "/usr/lib/debug%s/%s" 37 }; 38 39 char dso__symtab_origin(const struct dso *dso) 40 { 41 static const char origin[] = { 42 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 43 [DSO_BINARY_TYPE__VMLINUX] = 'v', 44 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 45 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 46 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 47 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 48 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 49 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 50 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', 51 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 52 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 53 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 54 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 55 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 56 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 57 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 58 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 59 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 60 }; 61 62 if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) 63 return '!'; 64 return origin[dso->symtab_type]; 65 } 66 67 int dso__read_binary_type_filename(const struct dso *dso, 68 enum dso_binary_type type, 69 char *root_dir, char *filename, size_t size) 70 { 71 char build_id_hex[SBUILD_ID_SIZE]; 72 int ret = 0; 73 size_t len; 74 75 switch (type) { 76 case DSO_BINARY_TYPE__DEBUGLINK: 77 { 78 const char *last_slash; 79 char dso_dir[PATH_MAX]; 80 char symfile[PATH_MAX]; 81 unsigned int i; 82 83 len = __symbol__join_symfs(filename, size, dso->long_name); 84 last_slash = filename + len; 85 while (last_slash != filename && *last_slash != '/') 86 last_slash--; 87 88 strncpy(dso_dir, filename, last_slash - filename); 89 dso_dir[last_slash-filename] = '\0'; 90 91 if (!is_regular_file(filename)) { 92 ret = -1; 93 break; 94 } 95 96 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 97 if (ret) 98 break; 99 100 /* Check predefined locations where debug file might reside */ 101 ret = -1; 102 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 103 snprintf(filename, size, 104 debuglink_paths[i], dso_dir, symfile); 105 if (is_regular_file(filename)) { 106 ret = 0; 107 break; 108 } 109 } 110 111 break; 112 } 113 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 114 if (dso__build_id_filename(dso, filename, size, false) == NULL) 115 ret = -1; 116 break; 117 118 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 119 if (dso__build_id_filename(dso, filename, size, true) == NULL) 120 ret = -1; 121 break; 122 123 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 124 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 125 snprintf(filename + len, size - len, "%s.debug", dso->long_name); 126 break; 127 128 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 129 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 130 snprintf(filename + len, size - len, "%s", dso->long_name); 131 break; 132 133 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 134 /* 135 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in 136 * /usr/lib/debug/lib when it is expected to be in 137 * /usr/lib/debug/usr/lib 138 */ 139 if (strlen(dso->long_name) < 9 || 140 strncmp(dso->long_name, "/usr/lib/", 9)) { 141 ret = -1; 142 break; 143 } 144 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 145 snprintf(filename + len, size - len, "%s", dso->long_name + 4); 146 break; 147 148 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 149 { 150 const char *last_slash; 151 size_t dir_size; 152 153 last_slash = dso->long_name + dso->long_name_len; 154 while (last_slash != dso->long_name && *last_slash != '/') 155 last_slash--; 156 157 len = __symbol__join_symfs(filename, size, ""); 158 dir_size = last_slash - dso->long_name + 2; 159 if (dir_size > (size - len)) { 160 ret = -1; 161 break; 162 } 163 len += scnprintf(filename + len, dir_size, "%s", dso->long_name); 164 len += scnprintf(filename + len , size - len, ".debug%s", 165 last_slash); 166 break; 167 } 168 169 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 170 if (!dso->has_build_id) { 171 ret = -1; 172 break; 173 } 174 175 build_id__sprintf(dso->build_id, 176 sizeof(dso->build_id), 177 build_id_hex); 178 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 179 snprintf(filename + len, size - len, "%.2s/%s.debug", 180 build_id_hex, build_id_hex + 2); 181 break; 182 183 case DSO_BINARY_TYPE__VMLINUX: 184 case DSO_BINARY_TYPE__GUEST_VMLINUX: 185 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 186 __symbol__join_symfs(filename, size, dso->long_name); 187 break; 188 189 case DSO_BINARY_TYPE__GUEST_KMODULE: 190 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 191 path__join3(filename, size, symbol_conf.symfs, 192 root_dir, dso->long_name); 193 break; 194 195 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 196 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 197 __symbol__join_symfs(filename, size, dso->long_name); 198 break; 199 200 case DSO_BINARY_TYPE__KCORE: 201 case DSO_BINARY_TYPE__GUEST_KCORE: 202 snprintf(filename, size, "%s", dso->long_name); 203 break; 204 205 default: 206 case DSO_BINARY_TYPE__KALLSYMS: 207 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 208 case DSO_BINARY_TYPE__JAVA_JIT: 209 case DSO_BINARY_TYPE__BPF_PROG_INFO: 210 case DSO_BINARY_TYPE__BPF_IMAGE: 211 case DSO_BINARY_TYPE__OOL: 212 case DSO_BINARY_TYPE__NOT_FOUND: 213 ret = -1; 214 break; 215 } 216 217 return ret; 218 } 219 220 enum { 221 COMP_ID__NONE = 0, 222 }; 223 224 static const struct { 225 const char *fmt; 226 int (*decompress)(const char *input, int output); 227 bool (*is_compressed)(const char *input); 228 } compressions[] = { 229 [COMP_ID__NONE] = { .fmt = NULL, }, 230 #ifdef HAVE_ZLIB_SUPPORT 231 { "gz", gzip_decompress_to_file, gzip_is_compressed }, 232 #endif 233 #ifdef HAVE_LZMA_SUPPORT 234 { "xz", lzma_decompress_to_file, lzma_is_compressed }, 235 #endif 236 { NULL, NULL, NULL }, 237 }; 238 239 static int is_supported_compression(const char *ext) 240 { 241 unsigned i; 242 243 for (i = 1; compressions[i].fmt; i++) { 244 if (!strcmp(ext, compressions[i].fmt)) 245 return i; 246 } 247 return COMP_ID__NONE; 248 } 249 250 bool is_kernel_module(const char *pathname, int cpumode) 251 { 252 struct kmod_path m; 253 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 254 255 WARN_ONCE(mode != cpumode, 256 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 257 cpumode); 258 259 switch (mode) { 260 case PERF_RECORD_MISC_USER: 261 case PERF_RECORD_MISC_HYPERVISOR: 262 case PERF_RECORD_MISC_GUEST_USER: 263 return false; 264 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 265 default: 266 if (kmod_path__parse(&m, pathname)) { 267 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 268 pathname); 269 return true; 270 } 271 } 272 273 return m.kmod; 274 } 275 276 bool dso__needs_decompress(struct dso *dso) 277 { 278 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 279 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 280 } 281 282 static int decompress_kmodule(struct dso *dso, const char *name, 283 char *pathname, size_t len) 284 { 285 char tmpbuf[] = KMOD_DECOMP_NAME; 286 int fd = -1; 287 288 if (!dso__needs_decompress(dso)) 289 return -1; 290 291 if (dso->comp == COMP_ID__NONE) 292 return -1; 293 294 /* 295 * We have proper compression id for DSO and yet the file 296 * behind the 'name' can still be plain uncompressed object. 297 * 298 * The reason is behind the logic we open the DSO object files, 299 * when we try all possible 'debug' objects until we find the 300 * data. So even if the DSO is represented by 'krava.xz' module, 301 * we can end up here opening ~/.debug/....23432432/debug' file 302 * which is not compressed. 303 * 304 * To keep this transparent, we detect this and return the file 305 * descriptor to the uncompressed file. 306 */ 307 if (!compressions[dso->comp].is_compressed(name)) 308 return open(name, O_RDONLY); 309 310 fd = mkstemp(tmpbuf); 311 if (fd < 0) { 312 dso->load_errno = errno; 313 return -1; 314 } 315 316 if (compressions[dso->comp].decompress(name, fd)) { 317 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 318 close(fd); 319 fd = -1; 320 } 321 322 if (!pathname || (fd < 0)) 323 unlink(tmpbuf); 324 325 if (pathname && (fd >= 0)) 326 strlcpy(pathname, tmpbuf, len); 327 328 return fd; 329 } 330 331 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 332 { 333 return decompress_kmodule(dso, name, NULL, 0); 334 } 335 336 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 337 char *pathname, size_t len) 338 { 339 int fd = decompress_kmodule(dso, name, pathname, len); 340 341 close(fd); 342 return fd >= 0 ? 0 : -1; 343 } 344 345 /* 346 * Parses kernel module specified in @path and updates 347 * @m argument like: 348 * 349 * @comp - true if @path contains supported compression suffix, 350 * false otherwise 351 * @kmod - true if @path contains '.ko' suffix in right position, 352 * false otherwise 353 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 354 * of the kernel module without suffixes, otherwise strudup-ed 355 * base name of @path 356 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 357 * the compression suffix 358 * 359 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 360 */ 361 int __kmod_path__parse(struct kmod_path *m, const char *path, 362 bool alloc_name) 363 { 364 const char *name = strrchr(path, '/'); 365 const char *ext = strrchr(path, '.'); 366 bool is_simple_name = false; 367 368 memset(m, 0x0, sizeof(*m)); 369 name = name ? name + 1 : path; 370 371 /* 372 * '.' is also a valid character for module name. For example: 373 * [aaa.bbb] is a valid module name. '[' should have higher 374 * priority than '.ko' suffix. 375 * 376 * The kernel names are from machine__mmap_name. Such 377 * name should belong to kernel itself, not kernel module. 378 */ 379 if (name[0] == '[') { 380 is_simple_name = true; 381 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 382 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 383 (strncmp(name, "[vdso]", 6) == 0) || 384 (strncmp(name, "[vdso32]", 8) == 0) || 385 (strncmp(name, "[vdsox32]", 9) == 0) || 386 (strncmp(name, "[vsyscall]", 10) == 0)) { 387 m->kmod = false; 388 389 } else 390 m->kmod = true; 391 } 392 393 /* No extension, just return name. */ 394 if ((ext == NULL) || is_simple_name) { 395 if (alloc_name) { 396 m->name = strdup(name); 397 return m->name ? 0 : -ENOMEM; 398 } 399 return 0; 400 } 401 402 m->comp = is_supported_compression(ext + 1); 403 if (m->comp > COMP_ID__NONE) 404 ext -= 3; 405 406 /* Check .ko extension only if there's enough name left. */ 407 if (ext > name) 408 m->kmod = !strncmp(ext, ".ko", 3); 409 410 if (alloc_name) { 411 if (m->kmod) { 412 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 413 return -ENOMEM; 414 } else { 415 if (asprintf(&m->name, "%s", name) == -1) 416 return -ENOMEM; 417 } 418 419 strreplace(m->name, '-', '_'); 420 } 421 422 return 0; 423 } 424 425 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 426 struct machine *machine) 427 { 428 if (machine__is_host(machine)) 429 dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE; 430 else 431 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; 432 433 /* _KMODULE_COMP should be next to _KMODULE */ 434 if (m->kmod && m->comp) { 435 dso->symtab_type++; 436 dso->comp = m->comp; 437 } 438 439 dso__set_short_name(dso, strdup(m->name), true); 440 } 441 442 /* 443 * Global list of open DSOs and the counter. 444 */ 445 static LIST_HEAD(dso__data_open); 446 static long dso__data_open_cnt; 447 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER; 448 449 static void dso__list_add(struct dso *dso) 450 { 451 list_add_tail(&dso->data.open_entry, &dso__data_open); 452 dso__data_open_cnt++; 453 } 454 455 static void dso__list_del(struct dso *dso) 456 { 457 list_del_init(&dso->data.open_entry); 458 WARN_ONCE(dso__data_open_cnt <= 0, 459 "DSO data fd counter out of bounds."); 460 dso__data_open_cnt--; 461 } 462 463 static void close_first_dso(void); 464 465 static int do_open(char *name) 466 { 467 int fd; 468 char sbuf[STRERR_BUFSIZE]; 469 470 do { 471 fd = open(name, O_RDONLY|O_CLOEXEC); 472 if (fd >= 0) 473 return fd; 474 475 pr_debug("dso open failed: %s\n", 476 str_error_r(errno, sbuf, sizeof(sbuf))); 477 if (!dso__data_open_cnt || errno != EMFILE) 478 break; 479 480 close_first_dso(); 481 } while (1); 482 483 return -1; 484 } 485 486 static int __open_dso(struct dso *dso, struct machine *machine) 487 { 488 int fd = -EINVAL; 489 char *root_dir = (char *)""; 490 char *name = malloc(PATH_MAX); 491 bool decomp = false; 492 493 if (!name) 494 return -ENOMEM; 495 496 if (machine) 497 root_dir = machine->root_dir; 498 499 if (dso__read_binary_type_filename(dso, dso->binary_type, 500 root_dir, name, PATH_MAX)) 501 goto out; 502 503 if (!is_regular_file(name)) 504 goto out; 505 506 if (dso__needs_decompress(dso)) { 507 char newpath[KMOD_DECOMP_LEN]; 508 size_t len = sizeof(newpath); 509 510 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 511 fd = -dso->load_errno; 512 goto out; 513 } 514 515 decomp = true; 516 strcpy(name, newpath); 517 } 518 519 fd = do_open(name); 520 521 if (decomp) 522 unlink(name); 523 524 out: 525 free(name); 526 return fd; 527 } 528 529 static void check_data_close(void); 530 531 /** 532 * dso_close - Open DSO data file 533 * @dso: dso object 534 * 535 * Open @dso's data file descriptor and updates 536 * list/count of open DSO objects. 537 */ 538 static int open_dso(struct dso *dso, struct machine *machine) 539 { 540 int fd; 541 struct nscookie nsc; 542 543 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 544 nsinfo__mountns_enter(dso->nsinfo, &nsc); 545 fd = __open_dso(dso, machine); 546 if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 547 nsinfo__mountns_exit(&nsc); 548 549 if (fd >= 0) { 550 dso__list_add(dso); 551 /* 552 * Check if we crossed the allowed number 553 * of opened DSOs and close one if needed. 554 */ 555 check_data_close(); 556 } 557 558 return fd; 559 } 560 561 static void close_data_fd(struct dso *dso) 562 { 563 if (dso->data.fd >= 0) { 564 close(dso->data.fd); 565 dso->data.fd = -1; 566 dso->data.file_size = 0; 567 dso__list_del(dso); 568 } 569 } 570 571 /** 572 * dso_close - Close DSO data file 573 * @dso: dso object 574 * 575 * Close @dso's data file descriptor and updates 576 * list/count of open DSO objects. 577 */ 578 static void close_dso(struct dso *dso) 579 { 580 close_data_fd(dso); 581 } 582 583 static void close_first_dso(void) 584 { 585 struct dso *dso; 586 587 dso = list_first_entry(&dso__data_open, struct dso, data.open_entry); 588 close_dso(dso); 589 } 590 591 static rlim_t get_fd_limit(void) 592 { 593 struct rlimit l; 594 rlim_t limit = 0; 595 596 /* Allow half of the current open fd limit. */ 597 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 598 if (l.rlim_cur == RLIM_INFINITY) 599 limit = l.rlim_cur; 600 else 601 limit = l.rlim_cur / 2; 602 } else { 603 pr_err("failed to get fd limit\n"); 604 limit = 1; 605 } 606 607 return limit; 608 } 609 610 static rlim_t fd_limit; 611 612 /* 613 * Used only by tests/dso-data.c to reset the environment 614 * for tests. I dont expect we should change this during 615 * standard runtime. 616 */ 617 void reset_fd_limit(void) 618 { 619 fd_limit = 0; 620 } 621 622 static bool may_cache_fd(void) 623 { 624 if (!fd_limit) 625 fd_limit = get_fd_limit(); 626 627 if (fd_limit == RLIM_INFINITY) 628 return true; 629 630 return fd_limit > (rlim_t) dso__data_open_cnt; 631 } 632 633 /* 634 * Check and close LRU dso if we crossed allowed limit 635 * for opened dso file descriptors. The limit is half 636 * of the RLIMIT_NOFILE files opened. 637 */ 638 static void check_data_close(void) 639 { 640 bool cache_fd = may_cache_fd(); 641 642 if (!cache_fd) 643 close_first_dso(); 644 } 645 646 /** 647 * dso__data_close - Close DSO data file 648 * @dso: dso object 649 * 650 * External interface to close @dso's data file descriptor. 651 */ 652 void dso__data_close(struct dso *dso) 653 { 654 pthread_mutex_lock(&dso__data_open_lock); 655 close_dso(dso); 656 pthread_mutex_unlock(&dso__data_open_lock); 657 } 658 659 static void try_to_open_dso(struct dso *dso, struct machine *machine) 660 { 661 enum dso_binary_type binary_type_data[] = { 662 DSO_BINARY_TYPE__BUILD_ID_CACHE, 663 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 664 DSO_BINARY_TYPE__NOT_FOUND, 665 }; 666 int i = 0; 667 668 if (dso->data.fd >= 0) 669 return; 670 671 if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) { 672 dso->data.fd = open_dso(dso, machine); 673 goto out; 674 } 675 676 do { 677 dso->binary_type = binary_type_data[i++]; 678 679 dso->data.fd = open_dso(dso, machine); 680 if (dso->data.fd >= 0) 681 goto out; 682 683 } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND); 684 out: 685 if (dso->data.fd >= 0) 686 dso->data.status = DSO_DATA_STATUS_OK; 687 else 688 dso->data.status = DSO_DATA_STATUS_ERROR; 689 } 690 691 /** 692 * dso__data_get_fd - Get dso's data file descriptor 693 * @dso: dso object 694 * @machine: machine object 695 * 696 * External interface to find dso's file, open it and 697 * returns file descriptor. It should be paired with 698 * dso__data_put_fd() if it returns non-negative value. 699 */ 700 int dso__data_get_fd(struct dso *dso, struct machine *machine) 701 { 702 if (dso->data.status == DSO_DATA_STATUS_ERROR) 703 return -1; 704 705 if (pthread_mutex_lock(&dso__data_open_lock) < 0) 706 return -1; 707 708 try_to_open_dso(dso, machine); 709 710 if (dso->data.fd < 0) 711 pthread_mutex_unlock(&dso__data_open_lock); 712 713 return dso->data.fd; 714 } 715 716 void dso__data_put_fd(struct dso *dso __maybe_unused) 717 { 718 pthread_mutex_unlock(&dso__data_open_lock); 719 } 720 721 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 722 { 723 u32 flag = 1 << by; 724 725 if (dso->data.status_seen & flag) 726 return true; 727 728 dso->data.status_seen |= flag; 729 730 return false; 731 } 732 733 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) 734 { 735 struct bpf_prog_info_node *node; 736 ssize_t size = DSO__DATA_CACHE_SIZE; 737 u64 len; 738 u8 *buf; 739 740 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 741 if (!node || !node->info_linear) { 742 dso->data.status = DSO_DATA_STATUS_ERROR; 743 return -1; 744 } 745 746 len = node->info_linear->info.jited_prog_len; 747 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns; 748 749 if (offset >= len) 750 return -1; 751 752 size = (ssize_t)min(len - offset, (u64)size); 753 memcpy(data, buf + offset, size); 754 return size; 755 } 756 757 static int bpf_size(struct dso *dso) 758 { 759 struct bpf_prog_info_node *node; 760 761 node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id); 762 if (!node || !node->info_linear) { 763 dso->data.status = DSO_DATA_STATUS_ERROR; 764 return -1; 765 } 766 767 dso->data.file_size = node->info_linear->info.jited_prog_len; 768 return 0; 769 } 770 771 static void 772 dso_cache__free(struct dso *dso) 773 { 774 struct rb_root *root = &dso->data.cache; 775 struct rb_node *next = rb_first(root); 776 777 pthread_mutex_lock(&dso->lock); 778 while (next) { 779 struct dso_cache *cache; 780 781 cache = rb_entry(next, struct dso_cache, rb_node); 782 next = rb_next(&cache->rb_node); 783 rb_erase(&cache->rb_node, root); 784 free(cache); 785 } 786 pthread_mutex_unlock(&dso->lock); 787 } 788 789 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) 790 { 791 const struct rb_root *root = &dso->data.cache; 792 struct rb_node * const *p = &root->rb_node; 793 const struct rb_node *parent = NULL; 794 struct dso_cache *cache; 795 796 while (*p != NULL) { 797 u64 end; 798 799 parent = *p; 800 cache = rb_entry(parent, struct dso_cache, rb_node); 801 end = cache->offset + DSO__DATA_CACHE_SIZE; 802 803 if (offset < cache->offset) 804 p = &(*p)->rb_left; 805 else if (offset >= end) 806 p = &(*p)->rb_right; 807 else 808 return cache; 809 } 810 811 return NULL; 812 } 813 814 static struct dso_cache * 815 dso_cache__insert(struct dso *dso, struct dso_cache *new) 816 { 817 struct rb_root *root = &dso->data.cache; 818 struct rb_node **p = &root->rb_node; 819 struct rb_node *parent = NULL; 820 struct dso_cache *cache; 821 u64 offset = new->offset; 822 823 pthread_mutex_lock(&dso->lock); 824 while (*p != NULL) { 825 u64 end; 826 827 parent = *p; 828 cache = rb_entry(parent, struct dso_cache, rb_node); 829 end = cache->offset + DSO__DATA_CACHE_SIZE; 830 831 if (offset < cache->offset) 832 p = &(*p)->rb_left; 833 else if (offset >= end) 834 p = &(*p)->rb_right; 835 else 836 goto out; 837 } 838 839 rb_link_node(&new->rb_node, parent, p); 840 rb_insert_color(&new->rb_node, root); 841 842 cache = NULL; 843 out: 844 pthread_mutex_unlock(&dso->lock); 845 return cache; 846 } 847 848 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data, 849 u64 size, bool out) 850 { 851 u64 cache_offset = offset - cache->offset; 852 u64 cache_size = min(cache->size - cache_offset, size); 853 854 if (out) 855 memcpy(data, cache->data + cache_offset, cache_size); 856 else 857 memcpy(cache->data + cache_offset, data, cache_size); 858 return cache_size; 859 } 860 861 static ssize_t file_read(struct dso *dso, struct machine *machine, 862 u64 offset, char *data) 863 { 864 ssize_t ret; 865 866 pthread_mutex_lock(&dso__data_open_lock); 867 868 /* 869 * dso->data.fd might be closed if other thread opened another 870 * file (dso) due to open file limit (RLIMIT_NOFILE). 871 */ 872 try_to_open_dso(dso, machine); 873 874 if (dso->data.fd < 0) { 875 dso->data.status = DSO_DATA_STATUS_ERROR; 876 ret = -errno; 877 goto out; 878 } 879 880 ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset); 881 out: 882 pthread_mutex_unlock(&dso__data_open_lock); 883 return ret; 884 } 885 886 static struct dso_cache *dso_cache__populate(struct dso *dso, 887 struct machine *machine, 888 u64 offset, ssize_t *ret) 889 { 890 u64 cache_offset = offset & DSO__DATA_CACHE_MASK; 891 struct dso_cache *cache; 892 struct dso_cache *old; 893 894 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 895 if (!cache) { 896 *ret = -ENOMEM; 897 return NULL; 898 } 899 900 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 901 *ret = bpf_read(dso, cache_offset, cache->data); 902 else if (dso->binary_type == DSO_BINARY_TYPE__OOL) 903 *ret = DSO__DATA_CACHE_SIZE; 904 else 905 *ret = file_read(dso, machine, cache_offset, cache->data); 906 907 if (*ret <= 0) { 908 free(cache); 909 return NULL; 910 } 911 912 cache->offset = cache_offset; 913 cache->size = *ret; 914 915 old = dso_cache__insert(dso, cache); 916 if (old) { 917 /* we lose the race */ 918 free(cache); 919 cache = old; 920 } 921 922 return cache; 923 } 924 925 static struct dso_cache *dso_cache__find(struct dso *dso, 926 struct machine *machine, 927 u64 offset, 928 ssize_t *ret) 929 { 930 struct dso_cache *cache = __dso_cache__find(dso, offset); 931 932 return cache ? cache : dso_cache__populate(dso, machine, offset, ret); 933 } 934 935 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine, 936 u64 offset, u8 *data, ssize_t size, bool out) 937 { 938 struct dso_cache *cache; 939 ssize_t ret = 0; 940 941 cache = dso_cache__find(dso, machine, offset, &ret); 942 if (!cache) 943 return ret; 944 945 return dso_cache__memcpy(cache, offset, data, size, out); 946 } 947 948 /* 949 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 950 * in the rb_tree. Any read to already cached data is served 951 * by cached data. Writes update the cache only, not the backing file. 952 */ 953 static ssize_t cached_io(struct dso *dso, struct machine *machine, 954 u64 offset, u8 *data, ssize_t size, bool out) 955 { 956 ssize_t r = 0; 957 u8 *p = data; 958 959 do { 960 ssize_t ret; 961 962 ret = dso_cache_io(dso, machine, offset, p, size, out); 963 if (ret < 0) 964 return ret; 965 966 /* Reached EOF, return what we have. */ 967 if (!ret) 968 break; 969 970 BUG_ON(ret > size); 971 972 r += ret; 973 p += ret; 974 offset += ret; 975 size -= ret; 976 977 } while (size); 978 979 return r; 980 } 981 982 static int file_size(struct dso *dso, struct machine *machine) 983 { 984 int ret = 0; 985 struct stat st; 986 char sbuf[STRERR_BUFSIZE]; 987 988 pthread_mutex_lock(&dso__data_open_lock); 989 990 /* 991 * dso->data.fd might be closed if other thread opened another 992 * file (dso) due to open file limit (RLIMIT_NOFILE). 993 */ 994 try_to_open_dso(dso, machine); 995 996 if (dso->data.fd < 0) { 997 ret = -errno; 998 dso->data.status = DSO_DATA_STATUS_ERROR; 999 goto out; 1000 } 1001 1002 if (fstat(dso->data.fd, &st) < 0) { 1003 ret = -errno; 1004 pr_err("dso cache fstat failed: %s\n", 1005 str_error_r(errno, sbuf, sizeof(sbuf))); 1006 dso->data.status = DSO_DATA_STATUS_ERROR; 1007 goto out; 1008 } 1009 dso->data.file_size = st.st_size; 1010 1011 out: 1012 pthread_mutex_unlock(&dso__data_open_lock); 1013 return ret; 1014 } 1015 1016 int dso__data_file_size(struct dso *dso, struct machine *machine) 1017 { 1018 if (dso->data.file_size) 1019 return 0; 1020 1021 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1022 return -1; 1023 1024 if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 1025 return bpf_size(dso); 1026 1027 return file_size(dso, machine); 1028 } 1029 1030 /** 1031 * dso__data_size - Return dso data size 1032 * @dso: dso object 1033 * @machine: machine object 1034 * 1035 * Return: dso data size 1036 */ 1037 off_t dso__data_size(struct dso *dso, struct machine *machine) 1038 { 1039 if (dso__data_file_size(dso, machine)) 1040 return -1; 1041 1042 /* For now just estimate dso data size is close to file size */ 1043 return dso->data.file_size; 1044 } 1045 1046 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, 1047 u64 offset, u8 *data, ssize_t size, 1048 bool out) 1049 { 1050 if (dso__data_file_size(dso, machine)) 1051 return -1; 1052 1053 /* Check the offset sanity. */ 1054 if (offset > dso->data.file_size) 1055 return -1; 1056 1057 if (offset + size < offset) 1058 return -1; 1059 1060 return cached_io(dso, machine, offset, data, size, out); 1061 } 1062 1063 /** 1064 * dso__data_read_offset - Read data from dso file offset 1065 * @dso: dso object 1066 * @machine: machine object 1067 * @offset: file offset 1068 * @data: buffer to store data 1069 * @size: size of the @data buffer 1070 * 1071 * External interface to read data from dso file offset. Open 1072 * dso data file and use cached_read to get the data. 1073 */ 1074 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 1075 u64 offset, u8 *data, ssize_t size) 1076 { 1077 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1078 return -1; 1079 1080 return data_read_write_offset(dso, machine, offset, data, size, true); 1081 } 1082 1083 /** 1084 * dso__data_read_addr - Read data from dso address 1085 * @dso: dso object 1086 * @machine: machine object 1087 * @add: virtual memory address 1088 * @data: buffer to store data 1089 * @size: size of the @data buffer 1090 * 1091 * External interface to read data from dso address. 1092 */ 1093 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1094 struct machine *machine, u64 addr, 1095 u8 *data, ssize_t size) 1096 { 1097 u64 offset = map->map_ip(map, addr); 1098 return dso__data_read_offset(dso, machine, offset, data, size); 1099 } 1100 1101 /** 1102 * dso__data_write_cache_offs - Write data to dso data cache at file offset 1103 * @dso: dso object 1104 * @machine: machine object 1105 * @offset: file offset 1106 * @data: buffer to write 1107 * @size: size of the @data buffer 1108 * 1109 * Write into the dso file data cache, but do not change the file itself. 1110 */ 1111 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine, 1112 u64 offset, const u8 *data_in, ssize_t size) 1113 { 1114 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */ 1115 1116 if (dso->data.status == DSO_DATA_STATUS_ERROR) 1117 return -1; 1118 1119 return data_read_write_offset(dso, machine, offset, data, size, false); 1120 } 1121 1122 /** 1123 * dso__data_write_cache_addr - Write data to dso data cache at dso address 1124 * @dso: dso object 1125 * @machine: machine object 1126 * @add: virtual memory address 1127 * @data: buffer to write 1128 * @size: size of the @data buffer 1129 * 1130 * External interface to write into the dso file data cache, but do not change 1131 * the file itself. 1132 */ 1133 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map, 1134 struct machine *machine, u64 addr, 1135 const u8 *data, ssize_t size) 1136 { 1137 u64 offset = map->map_ip(map, addr); 1138 return dso__data_write_cache_offs(dso, machine, offset, data, size); 1139 } 1140 1141 struct map *dso__new_map(const char *name) 1142 { 1143 struct map *map = NULL; 1144 struct dso *dso = dso__new(name); 1145 1146 if (dso) 1147 map = map__new2(0, dso); 1148 1149 return map; 1150 } 1151 1152 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1153 const char *short_name, int dso_type) 1154 { 1155 /* 1156 * The kernel dso could be created by build_id processing. 1157 */ 1158 struct dso *dso = machine__findnew_dso(machine, name); 1159 1160 /* 1161 * We need to run this in all cases, since during the build_id 1162 * processing we had no idea this was the kernel dso. 1163 */ 1164 if (dso != NULL) { 1165 dso__set_short_name(dso, short_name, false); 1166 dso->kernel = dso_type; 1167 } 1168 1169 return dso; 1170 } 1171 1172 static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated) 1173 { 1174 struct rb_root *root = dso->root; 1175 1176 if (name == NULL) 1177 return; 1178 1179 if (dso->long_name_allocated) 1180 free((char *)dso->long_name); 1181 1182 if (root) { 1183 rb_erase(&dso->rb_node, root); 1184 /* 1185 * __dsos__findnew_link_by_longname_id() isn't guaranteed to 1186 * add it back, so a clean removal is required here. 1187 */ 1188 RB_CLEAR_NODE(&dso->rb_node); 1189 dso->root = NULL; 1190 } 1191 1192 dso->long_name = name; 1193 dso->long_name_len = strlen(name); 1194 dso->long_name_allocated = name_allocated; 1195 1196 if (root) 1197 __dsos__findnew_link_by_longname_id(root, dso, NULL, id); 1198 } 1199 1200 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1201 { 1202 dso__set_long_name_id(dso, name, NULL, name_allocated); 1203 } 1204 1205 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1206 { 1207 if (name == NULL) 1208 return; 1209 1210 if (dso->short_name_allocated) 1211 free((char *)dso->short_name); 1212 1213 dso->short_name = name; 1214 dso->short_name_len = strlen(name); 1215 dso->short_name_allocated = name_allocated; 1216 } 1217 1218 int dso__name_len(const struct dso *dso) 1219 { 1220 if (!dso) 1221 return strlen("[unknown]"); 1222 if (verbose > 0) 1223 return dso->long_name_len; 1224 1225 return dso->short_name_len; 1226 } 1227 1228 bool dso__loaded(const struct dso *dso) 1229 { 1230 return dso->loaded; 1231 } 1232 1233 bool dso__sorted_by_name(const struct dso *dso) 1234 { 1235 return dso->sorted_by_name; 1236 } 1237 1238 void dso__set_sorted_by_name(struct dso *dso) 1239 { 1240 dso->sorted_by_name = true; 1241 } 1242 1243 struct dso *dso__new_id(const char *name, struct dso_id *id) 1244 { 1245 struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1); 1246 1247 if (dso != NULL) { 1248 strcpy(dso->name, name); 1249 if (id) 1250 dso->id = *id; 1251 dso__set_long_name_id(dso, dso->name, id, false); 1252 dso__set_short_name(dso, dso->name, false); 1253 dso->symbols = dso->symbol_names = RB_ROOT_CACHED; 1254 dso->data.cache = RB_ROOT; 1255 dso->inlined_nodes = RB_ROOT_CACHED; 1256 dso->srclines = RB_ROOT_CACHED; 1257 dso->data.fd = -1; 1258 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1259 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1260 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1261 dso->is_64_bit = (sizeof(void *) == 8); 1262 dso->loaded = 0; 1263 dso->rel = 0; 1264 dso->sorted_by_name = 0; 1265 dso->has_build_id = 0; 1266 dso->has_srcline = 1; 1267 dso->a2l_fails = 1; 1268 dso->kernel = DSO_SPACE__USER; 1269 dso->needs_swap = DSO_SWAP__UNSET; 1270 dso->comp = COMP_ID__NONE; 1271 RB_CLEAR_NODE(&dso->rb_node); 1272 dso->root = NULL; 1273 INIT_LIST_HEAD(&dso->node); 1274 INIT_LIST_HEAD(&dso->data.open_entry); 1275 pthread_mutex_init(&dso->lock, NULL); 1276 refcount_set(&dso->refcnt, 1); 1277 } 1278 1279 return dso; 1280 } 1281 1282 struct dso *dso__new(const char *name) 1283 { 1284 return dso__new_id(name, NULL); 1285 } 1286 1287 void dso__delete(struct dso *dso) 1288 { 1289 if (!RB_EMPTY_NODE(&dso->rb_node)) 1290 pr_err("DSO %s is still in rbtree when being deleted!\n", 1291 dso->long_name); 1292 1293 /* free inlines first, as they reference symbols */ 1294 inlines__tree_delete(&dso->inlined_nodes); 1295 srcline__tree_delete(&dso->srclines); 1296 symbols__delete(&dso->symbols); 1297 1298 if (dso->short_name_allocated) { 1299 zfree((char **)&dso->short_name); 1300 dso->short_name_allocated = false; 1301 } 1302 1303 if (dso->long_name_allocated) { 1304 zfree((char **)&dso->long_name); 1305 dso->long_name_allocated = false; 1306 } 1307 1308 dso__data_close(dso); 1309 auxtrace_cache__free(dso->auxtrace_cache); 1310 dso_cache__free(dso); 1311 dso__free_a2l(dso); 1312 zfree(&dso->symsrc_filename); 1313 nsinfo__zput(dso->nsinfo); 1314 pthread_mutex_destroy(&dso->lock); 1315 free(dso); 1316 } 1317 1318 struct dso *dso__get(struct dso *dso) 1319 { 1320 if (dso) 1321 refcount_inc(&dso->refcnt); 1322 return dso; 1323 } 1324 1325 void dso__put(struct dso *dso) 1326 { 1327 if (dso && refcount_dec_and_test(&dso->refcnt)) 1328 dso__delete(dso); 1329 } 1330 1331 void dso__set_build_id(struct dso *dso, void *build_id) 1332 { 1333 memcpy(dso->build_id, build_id, sizeof(dso->build_id)); 1334 dso->has_build_id = 1; 1335 } 1336 1337 bool dso__build_id_equal(const struct dso *dso, u8 *build_id) 1338 { 1339 return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; 1340 } 1341 1342 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1343 { 1344 char path[PATH_MAX]; 1345 1346 if (machine__is_default_guest(machine)) 1347 return; 1348 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1349 if (sysfs__read_build_id(path, dso->build_id, 1350 sizeof(dso->build_id)) == 0) 1351 dso->has_build_id = true; 1352 } 1353 1354 int dso__kernel_module_get_build_id(struct dso *dso, 1355 const char *root_dir) 1356 { 1357 char filename[PATH_MAX]; 1358 /* 1359 * kernel module short names are of the form "[module]" and 1360 * we need just "module" here. 1361 */ 1362 const char *name = dso->short_name + 1; 1363 1364 snprintf(filename, sizeof(filename), 1365 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1366 root_dir, (int)strlen(name) - 1, name); 1367 1368 if (sysfs__read_build_id(filename, dso->build_id, 1369 sizeof(dso->build_id)) == 0) 1370 dso->has_build_id = true; 1371 1372 return 0; 1373 } 1374 1375 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1376 { 1377 char sbuild_id[SBUILD_ID_SIZE]; 1378 1379 build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id); 1380 return fprintf(fp, "%s", sbuild_id); 1381 } 1382 1383 size_t dso__fprintf(struct dso *dso, FILE *fp) 1384 { 1385 struct rb_node *nd; 1386 size_t ret = fprintf(fp, "dso: %s (", dso->short_name); 1387 1388 if (dso->short_name != dso->long_name) 1389 ret += fprintf(fp, "%s, ", dso->long_name); 1390 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); 1391 ret += dso__fprintf_buildid(dso, fp); 1392 ret += fprintf(fp, ")\n"); 1393 for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) { 1394 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1395 ret += symbol__fprintf(pos, fp); 1396 } 1397 1398 return ret; 1399 } 1400 1401 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1402 { 1403 int fd; 1404 enum dso_type type = DSO__TYPE_UNKNOWN; 1405 1406 fd = dso__data_get_fd(dso, machine); 1407 if (fd >= 0) { 1408 type = dso__type_fd(fd); 1409 dso__data_put_fd(dso); 1410 } 1411 1412 return type; 1413 } 1414 1415 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1416 { 1417 int idx, errnum = dso->load_errno; 1418 /* 1419 * This must have a same ordering as the enum dso_load_errno. 1420 */ 1421 static const char *dso_load__error_str[] = { 1422 "Internal tools/perf/ library error", 1423 "Invalid ELF file", 1424 "Can not read build id", 1425 "Mismatching build id", 1426 "Decompression failure", 1427 }; 1428 1429 BUG_ON(buflen == 0); 1430 1431 if (errnum >= 0) { 1432 const char *err = str_error_r(errnum, buf, buflen); 1433 1434 if (err != buf) 1435 scnprintf(buf, buflen, "%s", err); 1436 1437 return 0; 1438 } 1439 1440 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1441 return -1; 1442 1443 idx = errnum - __DSO_LOAD_ERRNO__START; 1444 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1445 return 0; 1446 } 1447