1 // SPDX-License-Identifier: GPL-2.0 2 #include <asm/bug.h> 3 #include <linux/kernel.h> 4 #include <linux/string.h> 5 #include <linux/zalloc.h> 6 #include <sys/time.h> 7 #include <sys/resource.h> 8 #include <sys/types.h> 9 #include <sys/stat.h> 10 #include <unistd.h> 11 #include <errno.h> 12 #include <fcntl.h> 13 #include <stdlib.h> 14 #ifdef HAVE_LIBBPF_SUPPORT 15 #include <bpf/libbpf.h> 16 #include "bpf-event.h" 17 #include "bpf-utils.h" 18 #endif 19 #include "compress.h" 20 #include "env.h" 21 #include "namespaces.h" 22 #include "path.h" 23 #include "map.h" 24 #include "symbol.h" 25 #include "srcline.h" 26 #include "dso.h" 27 #include "dsos.h" 28 #include "machine.h" 29 #include "auxtrace.h" 30 #include "util.h" /* O_CLOEXEC for older systems */ 31 #include "debug.h" 32 #include "string2.h" 33 #include "vdso.h" 34 #include "annotate-data.h" 35 #include "libdw.h" 36 37 static const char * const debuglink_paths[] = { 38 "%.0s%s", 39 "%s/%s", 40 "%s/.debug/%s", 41 "/usr/lib/debug%s/%s" 42 }; 43 44 void dso__set_nsinfo(struct dso *dso, struct nsinfo *nsi) 45 { 46 nsinfo__put(RC_CHK_ACCESS(dso)->nsinfo); 47 RC_CHK_ACCESS(dso)->nsinfo = nsi; 48 } 49 50 char dso__symtab_origin(const struct dso *dso) 51 { 52 static const char origin[] = { 53 [DSO_BINARY_TYPE__KALLSYMS] = 'k', 54 [DSO_BINARY_TYPE__VMLINUX] = 'v', 55 [DSO_BINARY_TYPE__JAVA_JIT] = 'j', 56 [DSO_BINARY_TYPE__DEBUGLINK] = 'l', 57 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', 58 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D', 59 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f', 60 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u', 61 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x', 62 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o', 63 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b', 64 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd', 65 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', 66 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm', 67 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', 68 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', 69 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M', 70 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', 71 [DSO_BINARY_TYPE__GNU_DEBUGDATA] = 'n', 72 }; 73 74 if (dso == NULL || dso__symtab_type(dso) == DSO_BINARY_TYPE__NOT_FOUND) 75 return '!'; 76 return origin[dso__symtab_type(dso)]; 77 } 78 79 bool dso__is_object_file(const struct dso *dso) 80 { 81 switch (dso__binary_type(dso)) { 82 case DSO_BINARY_TYPE__KALLSYMS: 83 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 84 case DSO_BINARY_TYPE__JAVA_JIT: 85 case DSO_BINARY_TYPE__BPF_PROG_INFO: 86 case DSO_BINARY_TYPE__BPF_IMAGE: 87 case DSO_BINARY_TYPE__OOL: 88 return false; 89 case DSO_BINARY_TYPE__VMLINUX: 90 case DSO_BINARY_TYPE__GUEST_VMLINUX: 91 case DSO_BINARY_TYPE__DEBUGLINK: 92 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 93 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 94 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 95 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 96 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 97 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 98 case DSO_BINARY_TYPE__GNU_DEBUGDATA: 99 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 100 case DSO_BINARY_TYPE__GUEST_KMODULE: 101 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 102 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 103 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 104 case DSO_BINARY_TYPE__KCORE: 105 case DSO_BINARY_TYPE__GUEST_KCORE: 106 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 107 case DSO_BINARY_TYPE__NOT_FOUND: 108 default: 109 return true; 110 } 111 } 112 113 int dso__read_binary_type_filename(const struct dso *dso, 114 enum dso_binary_type type, 115 const char *root_dir, char *filename, size_t size) 116 { 117 char build_id_hex[SBUILD_ID_SIZE]; 118 int ret = 0; 119 size_t len; 120 121 switch (type) { 122 case DSO_BINARY_TYPE__DEBUGLINK: 123 { 124 const char *last_slash; 125 char dso_dir[PATH_MAX]; 126 char symfile[PATH_MAX]; 127 unsigned int i; 128 129 len = __symbol__join_symfs(filename, size, dso__long_name(dso)); 130 last_slash = filename + len; 131 while (last_slash != filename && *last_slash != '/') 132 last_slash--; 133 134 strncpy(dso_dir, filename, last_slash - filename); 135 dso_dir[last_slash-filename] = '\0'; 136 137 if (!is_regular_file(filename)) { 138 ret = -1; 139 break; 140 } 141 142 ret = filename__read_debuglink(filename, symfile, PATH_MAX); 143 if (ret) 144 break; 145 146 /* Check predefined locations where debug file might reside */ 147 ret = -1; 148 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) { 149 snprintf(filename, size, 150 debuglink_paths[i], dso_dir, symfile); 151 if (is_regular_file(filename)) { 152 ret = 0; 153 break; 154 } 155 } 156 157 break; 158 } 159 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 160 if (dso__build_id_filename(dso, filename, size, false) == NULL) 161 ret = -1; 162 break; 163 164 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 165 if (dso__build_id_filename(dso, filename, size, true) == NULL) 166 ret = -1; 167 break; 168 169 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 170 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 171 snprintf(filename + len, size - len, "%s.debug", dso__long_name(dso)); 172 break; 173 174 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 175 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 176 snprintf(filename + len, size - len, "%s", dso__long_name(dso)); 177 break; 178 179 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 180 /* 181 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in 182 * /usr/lib/debug/lib when it is expected to be in 183 * /usr/lib/debug/usr/lib 184 */ 185 if (strlen(dso__long_name(dso)) < 9 || 186 strncmp(dso__long_name(dso), "/usr/lib/", 9)) { 187 ret = -1; 188 break; 189 } 190 len = __symbol__join_symfs(filename, size, "/usr/lib/debug"); 191 snprintf(filename + len, size - len, "%s", dso__long_name(dso) + 4); 192 break; 193 194 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 195 { 196 const char *last_slash; 197 size_t dir_size; 198 199 last_slash = dso__long_name(dso) + dso__long_name_len(dso); 200 while (last_slash != dso__long_name(dso) && *last_slash != '/') 201 last_slash--; 202 203 len = __symbol__join_symfs(filename, size, ""); 204 dir_size = last_slash - dso__long_name(dso) + 2; 205 if (dir_size > (size - len)) { 206 ret = -1; 207 break; 208 } 209 len += scnprintf(filename + len, dir_size, "%s", dso__long_name(dso)); 210 len += scnprintf(filename + len , size - len, ".debug%s", 211 last_slash); 212 break; 213 } 214 215 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 216 if (!dso__has_build_id(dso)) { 217 ret = -1; 218 break; 219 } 220 221 build_id__snprintf(dso__bid(dso), build_id_hex, sizeof(build_id_hex)); 222 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/"); 223 snprintf(filename + len, size - len, "%.2s/%s.debug", 224 build_id_hex, build_id_hex + 2); 225 break; 226 227 case DSO_BINARY_TYPE__VMLINUX: 228 case DSO_BINARY_TYPE__GUEST_VMLINUX: 229 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 230 case DSO_BINARY_TYPE__GNU_DEBUGDATA: 231 __symbol__join_symfs(filename, size, dso__long_name(dso)); 232 break; 233 234 case DSO_BINARY_TYPE__GUEST_KMODULE: 235 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 236 path__join3(filename, size, symbol_conf.symfs, 237 root_dir, dso__long_name(dso)); 238 break; 239 240 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 241 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 242 __symbol__join_symfs(filename, size, dso__long_name(dso)); 243 break; 244 245 case DSO_BINARY_TYPE__KCORE: 246 case DSO_BINARY_TYPE__GUEST_KCORE: 247 snprintf(filename, size, "%s", dso__long_name(dso)); 248 break; 249 250 default: 251 case DSO_BINARY_TYPE__KALLSYMS: 252 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 253 case DSO_BINARY_TYPE__JAVA_JIT: 254 case DSO_BINARY_TYPE__BPF_PROG_INFO: 255 case DSO_BINARY_TYPE__BPF_IMAGE: 256 case DSO_BINARY_TYPE__OOL: 257 case DSO_BINARY_TYPE__NOT_FOUND: 258 ret = -1; 259 break; 260 } 261 262 return ret; 263 } 264 265 enum { 266 COMP_ID__NONE = 0, 267 }; 268 269 static const struct { 270 const char *fmt; 271 int (*decompress)(const char *input, int output); 272 bool (*is_compressed)(const char *input); 273 } compressions[] = { 274 [COMP_ID__NONE] = { .fmt = NULL, }, 275 #ifdef HAVE_ZLIB_SUPPORT 276 { "gz", gzip_decompress_to_file, gzip_is_compressed }, 277 #endif 278 #ifdef HAVE_LZMA_SUPPORT 279 { "xz", lzma_decompress_to_file, lzma_is_compressed }, 280 #endif 281 { NULL, NULL, NULL }, 282 }; 283 284 static int is_supported_compression(const char *ext) 285 { 286 unsigned i; 287 288 for (i = 1; compressions[i].fmt; i++) { 289 if (!strcmp(ext, compressions[i].fmt)) 290 return i; 291 } 292 return COMP_ID__NONE; 293 } 294 295 bool is_kernel_module(const char *pathname, int cpumode) 296 { 297 struct kmod_path m; 298 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK; 299 300 WARN_ONCE(mode != cpumode, 301 "Internal error: passing unmasked cpumode (%x) to is_kernel_module", 302 cpumode); 303 304 switch (mode) { 305 case PERF_RECORD_MISC_USER: 306 case PERF_RECORD_MISC_HYPERVISOR: 307 case PERF_RECORD_MISC_GUEST_USER: 308 return false; 309 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */ 310 default: 311 if (kmod_path__parse(&m, pathname)) { 312 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.", 313 pathname); 314 return true; 315 } 316 } 317 318 return m.kmod; 319 } 320 321 bool dso__needs_decompress(struct dso *dso) 322 { 323 return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || 324 dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; 325 } 326 327 int filename__decompress(const char *name, char *pathname, 328 size_t len, int comp, int *err) 329 { 330 char tmpbuf[] = KMOD_DECOMP_NAME; 331 int fd = -1; 332 333 /* 334 * We have proper compression id for DSO and yet the file 335 * behind the 'name' can still be plain uncompressed object. 336 * 337 * The reason is behind the logic we open the DSO object files, 338 * when we try all possible 'debug' objects until we find the 339 * data. So even if the DSO is represented by 'krava.xz' module, 340 * we can end up here opening ~/.debug/....23432432/debug' file 341 * which is not compressed. 342 * 343 * To keep this transparent, we detect this and return the file 344 * descriptor to the uncompressed file. 345 */ 346 if (!compressions[comp].is_compressed(name)) 347 return open(name, O_RDONLY); 348 349 fd = mkstemp(tmpbuf); 350 if (fd < 0) { 351 *err = errno; 352 return -1; 353 } 354 355 if (compressions[comp].decompress(name, fd)) { 356 *err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 357 close(fd); 358 fd = -1; 359 } 360 361 if (!pathname || (fd < 0)) 362 unlink(tmpbuf); 363 364 if (pathname && (fd >= 0)) 365 strlcpy(pathname, tmpbuf, len); 366 367 return fd; 368 } 369 370 static int decompress_kmodule(struct dso *dso, const char *name, 371 char *pathname, size_t len) 372 { 373 if (!dso__needs_decompress(dso)) 374 return -1; 375 376 if (dso__comp(dso) == COMP_ID__NONE) 377 return -1; 378 379 return filename__decompress(name, pathname, len, dso__comp(dso), dso__load_errno(dso)); 380 } 381 382 int dso__decompress_kmodule_fd(struct dso *dso, const char *name) 383 { 384 return decompress_kmodule(dso, name, NULL, 0); 385 } 386 387 int dso__decompress_kmodule_path(struct dso *dso, const char *name, 388 char *pathname, size_t len) 389 { 390 int fd = decompress_kmodule(dso, name, pathname, len); 391 392 close(fd); 393 return fd >= 0 ? 0 : -1; 394 } 395 396 /* 397 * Parses kernel module specified in @path and updates 398 * @m argument like: 399 * 400 * @comp - true if @path contains supported compression suffix, 401 * false otherwise 402 * @kmod - true if @path contains '.ko' suffix in right position, 403 * false otherwise 404 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name 405 * of the kernel module without suffixes, otherwise strudup-ed 406 * base name of @path 407 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string 408 * the compression suffix 409 * 410 * Returns 0 if there's no strdup error, -ENOMEM otherwise. 411 */ 412 int __kmod_path__parse(struct kmod_path *m, const char *path, 413 bool alloc_name) 414 { 415 const char *name = strrchr(path, '/'); 416 const char *ext = strrchr(path, '.'); 417 bool is_simple_name = false; 418 419 memset(m, 0x0, sizeof(*m)); 420 name = name ? name + 1 : path; 421 422 /* 423 * '.' is also a valid character for module name. For example: 424 * [aaa.bbb] is a valid module name. '[' should have higher 425 * priority than '.ko' suffix. 426 * 427 * The kernel names are from machine__mmap_name. Such 428 * name should belong to kernel itself, not kernel module. 429 */ 430 if (name[0] == '[') { 431 is_simple_name = true; 432 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) || 433 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) || 434 (strncmp(name, "[vdso]", 6) == 0) || 435 (strncmp(name, "[vdso32]", 8) == 0) || 436 (strncmp(name, "[vdsox32]", 9) == 0) || 437 (strncmp(name, "[vsyscall]", 10) == 0)) { 438 m->kmod = false; 439 440 } else 441 m->kmod = true; 442 } 443 444 /* No extension, just return name. */ 445 if ((ext == NULL) || is_simple_name) { 446 if (alloc_name) { 447 m->name = strdup(name); 448 return m->name ? 0 : -ENOMEM; 449 } 450 return 0; 451 } 452 453 m->comp = is_supported_compression(ext + 1); 454 if (m->comp > COMP_ID__NONE) 455 ext -= 3; 456 457 /* Check .ko extension only if there's enough name left. */ 458 if (ext > name) 459 m->kmod = !strncmp(ext, ".ko", 3); 460 461 if (alloc_name) { 462 if (m->kmod) { 463 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1) 464 return -ENOMEM; 465 } else { 466 if (asprintf(&m->name, "%s", name) == -1) 467 return -ENOMEM; 468 } 469 470 strreplace(m->name, '-', '_'); 471 } 472 473 return 0; 474 } 475 476 void dso__set_module_info(struct dso *dso, struct kmod_path *m, 477 struct machine *machine) 478 { 479 if (machine__is_host(machine)) 480 dso__set_symtab_type(dso, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE); 481 else 482 dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KMODULE); 483 484 /* _KMODULE_COMP should be next to _KMODULE */ 485 if (m->kmod && m->comp) { 486 dso__set_symtab_type(dso, dso__symtab_type(dso) + 1); 487 dso__set_comp(dso, m->comp); 488 } 489 490 dso__set_is_kmod(dso); 491 dso__set_short_name(dso, strdup(m->name), true); 492 } 493 494 /* 495 * Global list of open DSOs and the counter. 496 */ 497 struct mutex _dso__data_open_lock; 498 static LIST_HEAD(dso__data_open); 499 static long dso__data_open_cnt GUARDED_BY(_dso__data_open_lock); 500 501 static void dso__data_open_lock_init(void) 502 { 503 mutex_init(&_dso__data_open_lock); 504 } 505 506 static struct mutex *dso__data_open_lock(void) LOCK_RETURNED(_dso__data_open_lock) 507 { 508 static pthread_once_t data_open_lock_once = PTHREAD_ONCE_INIT; 509 510 pthread_once(&data_open_lock_once, dso__data_open_lock_init); 511 512 return &_dso__data_open_lock; 513 } 514 515 static void dso__list_add(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 516 { 517 list_add_tail(&dso__data(dso)->open_entry, &dso__data_open); 518 #ifdef REFCNT_CHECKING 519 dso__data(dso)->dso = dso__get(dso); 520 #endif 521 /* Assume the dso is part of dsos, hence the optional reference count above. */ 522 assert(dso__dsos(dso)); 523 dso__data_open_cnt++; 524 } 525 526 static void dso__list_del(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 527 { 528 list_del_init(&dso__data(dso)->open_entry); 529 #ifdef REFCNT_CHECKING 530 mutex_unlock(dso__data_open_lock()); 531 dso__put(dso__data(dso)->dso); 532 mutex_lock(dso__data_open_lock()); 533 #endif 534 WARN_ONCE(dso__data_open_cnt <= 0, 535 "DSO data fd counter out of bounds."); 536 dso__data_open_cnt--; 537 } 538 539 static void close_first_dso(void); 540 541 static int do_open(char *name) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 542 { 543 do { 544 int fd = open(name, O_RDONLY|O_CLOEXEC); 545 546 if (fd >= 0) 547 return fd; 548 549 pr_debug("dso open failed: %m\n"); 550 if (!dso__data_open_cnt || errno != EMFILE) 551 break; 552 553 close_first_dso(); 554 } while (1); 555 556 return -1; 557 } 558 559 char *dso__filename_with_chroot(const struct dso *dso, const char *filename) 560 { 561 return filename_with_chroot(nsinfo__pid(dso__nsinfo_const(dso)), filename); 562 } 563 564 static char *dso__get_filename(struct dso *dso, const char *root_dir, 565 bool *decomp) 566 { 567 char *name = malloc(PATH_MAX); 568 569 *decomp = false; 570 571 if (name == NULL) 572 return NULL; 573 574 if (dso__read_binary_type_filename(dso, dso__binary_type(dso), 575 root_dir, name, PATH_MAX)) 576 goto out; 577 578 if (!is_regular_file(name)) { 579 char *new_name; 580 581 if (errno != ENOENT || dso__nsinfo(dso) == NULL) 582 goto out; 583 584 new_name = dso__filename_with_chroot(dso, name); 585 if (!new_name) 586 goto out; 587 588 free(name); 589 name = new_name; 590 } 591 592 if (dso__needs_decompress(dso)) { 593 char newpath[KMOD_DECOMP_LEN]; 594 size_t len = sizeof(newpath); 595 596 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { 597 errno = *dso__load_errno(dso); 598 goto out; 599 } 600 601 *decomp = true; 602 strcpy(name, newpath); 603 } 604 return name; 605 606 out: 607 free(name); 608 return NULL; 609 } 610 611 static int __open_dso(struct dso *dso, struct machine *machine) 612 EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 613 { 614 int fd = -EINVAL; 615 char *name; 616 bool decomp = false; 617 618 mutex_lock(dso__lock(dso)); 619 620 name = dso__get_filename(dso, machine ? machine->root_dir : "", &decomp); 621 if (name) 622 fd = do_open(name); 623 else 624 fd = -errno; 625 626 if (decomp) 627 unlink(name); 628 629 mutex_unlock(dso__lock(dso)); 630 free(name); 631 return fd; 632 } 633 634 static void check_data_close(void); 635 636 /** 637 * dso_close - Open DSO data file 638 * @dso: dso object 639 * 640 * Open @dso's data file descriptor and updates 641 * list/count of open DSO objects. 642 */ 643 static int open_dso(struct dso *dso, struct machine *machine) 644 EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 645 { 646 int fd; 647 struct nscookie nsc; 648 649 if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) { 650 mutex_lock(dso__lock(dso)); 651 nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); 652 mutex_unlock(dso__lock(dso)); 653 } 654 fd = __open_dso(dso, machine); 655 if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) 656 nsinfo__mountns_exit(&nsc); 657 658 if (fd >= 0) { 659 dso__list_add(dso); 660 /* 661 * Check if we crossed the allowed number 662 * of opened DSOs and close one if needed. 663 */ 664 check_data_close(); 665 } 666 667 return fd; 668 } 669 670 static void close_data_fd(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 671 { 672 if (dso__data(dso)->fd >= 0) { 673 close(dso__data(dso)->fd); 674 dso__data(dso)->fd = -1; 675 dso__data(dso)->file_size = 0; 676 dso__list_del(dso); 677 } 678 } 679 680 /** 681 * dso_close - Close DSO data file 682 * @dso: dso object 683 * 684 * Close @dso's data file descriptor and updates 685 * list/count of open DSO objects. 686 */ 687 static void close_dso(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 688 { 689 close_data_fd(dso); 690 } 691 692 static void close_first_dso(void) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 693 { 694 struct dso_data *dso_data; 695 struct dso *dso; 696 697 dso_data = list_first_entry(&dso__data_open, struct dso_data, open_entry); 698 #ifdef REFCNT_CHECKING 699 dso = dso_data->dso; 700 #else 701 dso = container_of(dso_data, struct dso, data); 702 #endif 703 close_dso(dso); 704 } 705 706 static rlim_t get_fd_limit(void) 707 { 708 struct rlimit l; 709 rlim_t limit = 0; 710 711 /* Allow half of the current open fd limit. */ 712 if (getrlimit(RLIMIT_NOFILE, &l) == 0) { 713 if (l.rlim_cur == RLIM_INFINITY) 714 limit = l.rlim_cur; 715 else 716 limit = l.rlim_cur / 2; 717 } else { 718 pr_err("failed to get fd limit\n"); 719 limit = 1; 720 } 721 722 return limit; 723 } 724 725 static rlim_t fd_limit; 726 727 /* 728 * Used only by tests/dso-data.c to reset the environment 729 * for tests. I dont expect we should change this during 730 * standard runtime. 731 */ 732 void reset_fd_limit(void) 733 { 734 fd_limit = 0; 735 } 736 737 static bool may_cache_fd(void) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 738 { 739 if (!fd_limit) 740 fd_limit = get_fd_limit(); 741 742 if (fd_limit == RLIM_INFINITY) 743 return true; 744 745 return fd_limit > (rlim_t) dso__data_open_cnt; 746 } 747 748 /* 749 * Check and close LRU dso if we crossed allowed limit 750 * for opened dso file descriptors. The limit is half 751 * of the RLIMIT_NOFILE files opened. 752 */ 753 static void check_data_close(void) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 754 { 755 bool cache_fd = may_cache_fd(); 756 757 if (!cache_fd) 758 close_first_dso(); 759 } 760 761 /** 762 * dso__data_close - Close DSO data file 763 * @dso: dso object 764 * 765 * External interface to close @dso's data file descriptor. 766 */ 767 void dso__data_close(struct dso *dso) 768 { 769 mutex_lock(dso__data_open_lock()); 770 close_dso(dso); 771 mutex_unlock(dso__data_open_lock()); 772 } 773 774 static void try_to_open_dso(struct dso *dso, struct machine *machine) 775 EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock) 776 { 777 enum dso_binary_type binary_type_data[] = { 778 DSO_BINARY_TYPE__BUILD_ID_CACHE, 779 DSO_BINARY_TYPE__SYSTEM_PATH_DSO, 780 DSO_BINARY_TYPE__NOT_FOUND, 781 }; 782 int i = 0; 783 struct dso_data *dso_data = dso__data(dso); 784 785 if (dso_data->fd >= 0) 786 return; 787 788 if (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND) { 789 dso_data->fd = open_dso(dso, machine); 790 goto out; 791 } 792 793 do { 794 dso__set_binary_type(dso, binary_type_data[i++]); 795 796 dso_data->fd = open_dso(dso, machine); 797 if (dso_data->fd >= 0) 798 goto out; 799 800 } while (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND); 801 out: 802 if (dso_data->fd >= 0) 803 dso_data->status = DSO_DATA_STATUS_OK; 804 else 805 dso_data->status = DSO_DATA_STATUS_ERROR; 806 } 807 808 /** 809 * dso__data_get_fd - Get dso's data file descriptor 810 * @dso: dso object 811 * @machine: machine object 812 * 813 * External interface to find dso's file, open it and 814 * returns file descriptor. It should be paired with 815 * dso__data_put_fd() if it returns non-negative value. 816 */ 817 bool dso__data_get_fd(struct dso *dso, struct machine *machine, int *fd) 818 { 819 *fd = -1; 820 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) 821 return false; 822 823 mutex_lock(dso__data_open_lock()); 824 825 try_to_open_dso(dso, machine); 826 827 *fd = dso__data(dso)->fd; 828 if (*fd >= 0) 829 return true; 830 831 mutex_unlock(dso__data_open_lock()); 832 return false; 833 } 834 835 void dso__data_put_fd(struct dso *dso __maybe_unused) 836 { 837 mutex_unlock(dso__data_open_lock()); 838 } 839 840 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by) 841 { 842 u32 flag = 1 << by; 843 844 if (dso__data(dso)->status_seen & flag) 845 return true; 846 847 dso__data(dso)->status_seen |= flag; 848 849 return false; 850 } 851 852 #ifdef HAVE_LIBBPF_SUPPORT 853 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data) 854 { 855 struct bpf_prog_info_node *node; 856 ssize_t size = DSO__DATA_CACHE_SIZE; 857 struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso); 858 u64 len; 859 u8 *buf; 860 861 node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id); 862 if (!node || !node->info_linear) { 863 dso__data(dso)->status = DSO_DATA_STATUS_ERROR; 864 return -1; 865 } 866 867 len = node->info_linear->info.jited_prog_len; 868 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns; 869 870 if (offset >= len) 871 return -1; 872 873 size = (ssize_t)min(len - offset, (u64)size); 874 memcpy(data, buf + offset, size); 875 return size; 876 } 877 878 static int bpf_size(struct dso *dso) 879 { 880 struct bpf_prog_info_node *node; 881 struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso); 882 883 node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id); 884 if (!node || !node->info_linear) { 885 dso__data(dso)->status = DSO_DATA_STATUS_ERROR; 886 return -1; 887 } 888 889 dso__data(dso)->file_size = node->info_linear->info.jited_prog_len; 890 return 0; 891 } 892 #endif // HAVE_LIBBPF_SUPPORT 893 894 static void 895 dso_cache__free(struct dso *dso) 896 { 897 struct rb_root *root = &dso__data(dso)->cache; 898 struct rb_node *next = rb_first(root); 899 900 mutex_lock(dso__lock(dso)); 901 while (next) { 902 struct dso_cache *cache; 903 904 cache = rb_entry(next, struct dso_cache, rb_node); 905 next = rb_next(&cache->rb_node); 906 rb_erase(&cache->rb_node, root); 907 free(cache); 908 } 909 mutex_unlock(dso__lock(dso)); 910 } 911 912 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset) 913 { 914 const struct rb_root *root = &dso__data(dso)->cache; 915 struct rb_node * const *p = &root->rb_node; 916 const struct rb_node *parent = NULL; 917 struct dso_cache *cache; 918 919 while (*p != NULL) { 920 u64 end; 921 922 parent = *p; 923 cache = rb_entry(parent, struct dso_cache, rb_node); 924 end = cache->offset + DSO__DATA_CACHE_SIZE; 925 926 if (offset < cache->offset) 927 p = &(*p)->rb_left; 928 else if (offset >= end) 929 p = &(*p)->rb_right; 930 else 931 return cache; 932 } 933 934 return NULL; 935 } 936 937 static struct dso_cache * 938 dso_cache__insert(struct dso *dso, struct dso_cache *new) 939 { 940 struct rb_root *root = &dso__data(dso)->cache; 941 struct rb_node **p = &root->rb_node; 942 struct rb_node *parent = NULL; 943 struct dso_cache *cache; 944 u64 offset = new->offset; 945 946 mutex_lock(dso__lock(dso)); 947 while (*p != NULL) { 948 u64 end; 949 950 parent = *p; 951 cache = rb_entry(parent, struct dso_cache, rb_node); 952 end = cache->offset + DSO__DATA_CACHE_SIZE; 953 954 if (offset < cache->offset) 955 p = &(*p)->rb_left; 956 else if (offset >= end) 957 p = &(*p)->rb_right; 958 else 959 goto out; 960 } 961 962 rb_link_node(&new->rb_node, parent, p); 963 rb_insert_color(&new->rb_node, root); 964 965 cache = NULL; 966 out: 967 mutex_unlock(dso__lock(dso)); 968 return cache; 969 } 970 971 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data, 972 u64 size, bool out) 973 { 974 u64 cache_offset = offset - cache->offset; 975 u64 cache_size = min(cache->size - cache_offset, size); 976 977 if (out) 978 memcpy(data, cache->data + cache_offset, cache_size); 979 else 980 memcpy(cache->data + cache_offset, data, cache_size); 981 return cache_size; 982 } 983 984 static ssize_t file_read(struct dso *dso, struct machine *machine, 985 u64 offset, char *data) 986 { 987 ssize_t ret; 988 989 mutex_lock(dso__data_open_lock()); 990 991 /* 992 * dso__data(dso)->fd might be closed if other thread opened another 993 * file (dso) due to open file limit (RLIMIT_NOFILE). 994 */ 995 try_to_open_dso(dso, machine); 996 997 if (dso__data(dso)->fd < 0) { 998 dso__data(dso)->status = DSO_DATA_STATUS_ERROR; 999 ret = -errno; 1000 goto out; 1001 } 1002 1003 ret = pread(dso__data(dso)->fd, data, DSO__DATA_CACHE_SIZE, offset); 1004 out: 1005 mutex_unlock(dso__data_open_lock()); 1006 return ret; 1007 } 1008 1009 static struct dso_cache *dso_cache__populate(struct dso *dso, 1010 struct machine *machine, 1011 u64 offset, ssize_t *ret) 1012 { 1013 u64 cache_offset = offset & DSO__DATA_CACHE_MASK; 1014 struct dso_cache *cache; 1015 struct dso_cache *old; 1016 1017 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); 1018 if (!cache) { 1019 *ret = -ENOMEM; 1020 return NULL; 1021 } 1022 #ifdef HAVE_LIBBPF_SUPPORT 1023 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) 1024 *ret = bpf_read(dso, cache_offset, cache->data); 1025 else 1026 #endif 1027 if (dso__binary_type(dso) == DSO_BINARY_TYPE__OOL) 1028 *ret = DSO__DATA_CACHE_SIZE; 1029 else 1030 *ret = file_read(dso, machine, cache_offset, cache->data); 1031 1032 if (*ret <= 0) { 1033 free(cache); 1034 return NULL; 1035 } 1036 1037 cache->offset = cache_offset; 1038 cache->size = *ret; 1039 1040 old = dso_cache__insert(dso, cache); 1041 if (old) { 1042 /* we lose the race */ 1043 free(cache); 1044 cache = old; 1045 } 1046 1047 return cache; 1048 } 1049 1050 static struct dso_cache *dso_cache__find(struct dso *dso, 1051 struct machine *machine, 1052 u64 offset, 1053 ssize_t *ret) 1054 { 1055 struct dso_cache *cache = __dso_cache__find(dso, offset); 1056 1057 return cache ? cache : dso_cache__populate(dso, machine, offset, ret); 1058 } 1059 1060 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine, 1061 u64 offset, u8 *data, ssize_t size, bool out) 1062 { 1063 struct dso_cache *cache; 1064 ssize_t ret = 0; 1065 1066 cache = dso_cache__find(dso, machine, offset, &ret); 1067 if (!cache) 1068 return ret; 1069 1070 return dso_cache__memcpy(cache, offset, data, size, out); 1071 } 1072 1073 /* 1074 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks 1075 * in the rb_tree. Any read to already cached data is served 1076 * by cached data. Writes update the cache only, not the backing file. 1077 */ 1078 static ssize_t cached_io(struct dso *dso, struct machine *machine, 1079 u64 offset, u8 *data, ssize_t size, bool out) 1080 { 1081 ssize_t r = 0; 1082 u8 *p = data; 1083 1084 do { 1085 ssize_t ret; 1086 1087 ret = dso_cache_io(dso, machine, offset, p, size, out); 1088 if (ret < 0) 1089 return ret; 1090 1091 /* Reached EOF, return what we have. */ 1092 if (!ret) 1093 break; 1094 1095 BUG_ON(ret > size); 1096 1097 r += ret; 1098 p += ret; 1099 offset += ret; 1100 size -= ret; 1101 1102 } while (size); 1103 1104 return r; 1105 } 1106 1107 static int file_size(struct dso *dso, struct machine *machine) 1108 { 1109 int ret = 0; 1110 struct stat st; 1111 1112 mutex_lock(dso__data_open_lock()); 1113 1114 /* 1115 * dso__data(dso)->fd might be closed if other thread opened another 1116 * file (dso) due to open file limit (RLIMIT_NOFILE). 1117 */ 1118 try_to_open_dso(dso, machine); 1119 1120 if (dso__data(dso)->fd < 0) { 1121 ret = -errno; 1122 dso__data(dso)->status = DSO_DATA_STATUS_ERROR; 1123 goto out; 1124 } 1125 1126 if (fstat(dso__data(dso)->fd, &st) < 0) { 1127 ret = -errno; 1128 pr_err("dso cache fstat failed: %m\n"); 1129 dso__data(dso)->status = DSO_DATA_STATUS_ERROR; 1130 goto out; 1131 } 1132 dso__data(dso)->file_size = st.st_size; 1133 1134 out: 1135 mutex_unlock(dso__data_open_lock()); 1136 return ret; 1137 } 1138 1139 int dso__data_file_size(struct dso *dso, struct machine *machine) 1140 { 1141 if (dso__data(dso)->file_size) 1142 return 0; 1143 1144 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) 1145 return -1; 1146 #ifdef HAVE_LIBBPF_SUPPORT 1147 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) 1148 return bpf_size(dso); 1149 #endif 1150 return file_size(dso, machine); 1151 } 1152 1153 /** 1154 * dso__data_size - Return dso data size 1155 * @dso: dso object 1156 * @machine: machine object 1157 * 1158 * Return: dso data size 1159 */ 1160 off_t dso__data_size(struct dso *dso, struct machine *machine) 1161 { 1162 if (dso__data_file_size(dso, machine)) 1163 return -1; 1164 1165 /* For now just estimate dso data size is close to file size */ 1166 return dso__data(dso)->file_size; 1167 } 1168 1169 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine, 1170 u64 offset, u8 *data, ssize_t size, 1171 bool out) 1172 { 1173 if (dso__data_file_size(dso, machine)) 1174 return -1; 1175 1176 /* Check the offset sanity. */ 1177 if (offset > dso__data(dso)->file_size) 1178 return -1; 1179 1180 if (offset + size < offset) 1181 return -1; 1182 1183 return cached_io(dso, machine, offset, data, size, out); 1184 } 1185 1186 /** 1187 * dso__data_read_offset - Read data from dso file offset 1188 * @dso: dso object 1189 * @machine: machine object 1190 * @offset: file offset 1191 * @data: buffer to store data 1192 * @size: size of the @data buffer 1193 * 1194 * External interface to read data from dso file offset. Open 1195 * dso data file and use cached_read to get the data. 1196 */ 1197 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, 1198 u64 offset, u8 *data, ssize_t size) 1199 { 1200 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) 1201 return -1; 1202 1203 return data_read_write_offset(dso, machine, offset, data, size, true); 1204 } 1205 1206 uint16_t dso__e_machine(struct dso *dso, struct machine *machine) 1207 { 1208 uint16_t e_machine = EM_NONE; 1209 int fd; 1210 1211 switch (dso__binary_type(dso)) { 1212 case DSO_BINARY_TYPE__KALLSYMS: 1213 case DSO_BINARY_TYPE__GUEST_KALLSYMS: 1214 case DSO_BINARY_TYPE__VMLINUX: 1215 case DSO_BINARY_TYPE__GUEST_VMLINUX: 1216 case DSO_BINARY_TYPE__GUEST_KMODULE: 1217 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP: 1218 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE: 1219 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP: 1220 case DSO_BINARY_TYPE__KCORE: 1221 case DSO_BINARY_TYPE__GUEST_KCORE: 1222 case DSO_BINARY_TYPE__BPF_PROG_INFO: 1223 case DSO_BINARY_TYPE__BPF_IMAGE: 1224 case DSO_BINARY_TYPE__OOL: 1225 case DSO_BINARY_TYPE__JAVA_JIT: 1226 return EM_HOST; 1227 case DSO_BINARY_TYPE__DEBUGLINK: 1228 case DSO_BINARY_TYPE__BUILD_ID_CACHE: 1229 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO: 1230 case DSO_BINARY_TYPE__GNU_DEBUGDATA: 1231 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO: 1232 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO: 1233 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO: 1234 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO: 1235 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO: 1236 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO: 1237 break; 1238 case DSO_BINARY_TYPE__NOT_FOUND: 1239 default: 1240 return EM_NONE; 1241 } 1242 1243 mutex_lock(dso__data_open_lock()); 1244 1245 /* 1246 * dso__data(dso)->fd might be closed if other thread opened another 1247 * file (dso) due to open file limit (RLIMIT_NOFILE). 1248 */ 1249 try_to_open_dso(dso, machine); 1250 fd = dso__data(dso)->fd; 1251 if (fd >= 0) { 1252 unsigned char e_ident[EI_NIDENT]; 1253 1254 _Static_assert(offsetof(Elf32_Ehdr, e_ident) == 0, "Unexpected offset"); 1255 _Static_assert(offsetof(Elf64_Ehdr, e_ident) == 0, "Unexpected offset"); 1256 if (pread(fd, &e_ident, sizeof(e_ident), 0) == sizeof(e_ident) && 1257 memcmp(e_ident, ELFMAG, SELFMAG) == 0 && 1258 e_ident[EI_CLASS] > ELFCLASSNONE && e_ident[EI_CLASS] < ELFCLASSNUM && 1259 e_ident[EI_DATA] > ELFDATANONE && e_ident[EI_DATA] < ELFDATANUM && 1260 e_ident[EI_VERSION] == EV_CURRENT) { 1261 _Static_assert(offsetof(Elf32_Ehdr, e_machine) == 18, "Unexpected offset"); 1262 _Static_assert(offsetof(Elf64_Ehdr, e_machine) == 18, "Unexpected offset"); 1263 1264 if (dso__needs_swap(dso) == DSO_SWAP__UNSET) 1265 dso__swap_init(dso, e_ident[EI_DATA]); 1266 1267 if (dso__needs_swap(dso) != DSO_SWAP__UNSET && 1268 pread(fd, &e_machine, sizeof(e_machine), 18) == sizeof(e_machine) && 1269 e_machine < EM_NUM) 1270 e_machine = DSO__SWAP(dso, uint16_t, e_machine); 1271 else 1272 e_machine = EM_NONE; 1273 } 1274 } 1275 mutex_unlock(dso__data_open_lock()); 1276 return e_machine; 1277 } 1278 1279 /** 1280 * dso__data_read_addr - Read data from dso address 1281 * @dso: dso object 1282 * @machine: machine object 1283 * @add: virtual memory address 1284 * @data: buffer to store data 1285 * @size: size of the @data buffer 1286 * 1287 * External interface to read data from dso address. 1288 */ 1289 ssize_t dso__data_read_addr(struct dso *dso, struct map *map, 1290 struct machine *machine, u64 addr, 1291 u8 *data, ssize_t size) 1292 { 1293 u64 offset = map__map_ip(map, addr); 1294 1295 return dso__data_read_offset(dso, machine, offset, data, size); 1296 } 1297 1298 /** 1299 * dso__data_write_cache_offs - Write data to dso data cache at file offset 1300 * @dso: dso object 1301 * @machine: machine object 1302 * @offset: file offset 1303 * @data: buffer to write 1304 * @size: size of the @data buffer 1305 * 1306 * Write into the dso file data cache, but do not change the file itself. 1307 */ 1308 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine, 1309 u64 offset, const u8 *data_in, ssize_t size) 1310 { 1311 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */ 1312 1313 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR) 1314 return -1; 1315 1316 return data_read_write_offset(dso, machine, offset, data, size, false); 1317 } 1318 1319 /** 1320 * dso__data_write_cache_addr - Write data to dso data cache at dso address 1321 * @dso: dso object 1322 * @machine: machine object 1323 * @add: virtual memory address 1324 * @data: buffer to write 1325 * @size: size of the @data buffer 1326 * 1327 * External interface to write into the dso file data cache, but do not change 1328 * the file itself. 1329 */ 1330 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map, 1331 struct machine *machine, u64 addr, 1332 const u8 *data, ssize_t size) 1333 { 1334 u64 offset = map__map_ip(map, addr); 1335 1336 return dso__data_write_cache_offs(dso, machine, offset, data, size); 1337 } 1338 1339 struct map *dso__new_map(const char *name) 1340 { 1341 struct map *map = NULL; 1342 struct dso *dso = dso__new(name); 1343 1344 if (dso) { 1345 map = map__new2(0, dso); 1346 dso__put(dso); 1347 } 1348 1349 return map; 1350 } 1351 1352 struct dso *machine__findnew_kernel(struct machine *machine, const char *name, 1353 const char *short_name, int dso_type) 1354 { 1355 /* 1356 * The kernel dso could be created by build_id processing. 1357 */ 1358 struct dso *dso = machine__findnew_dso(machine, name); 1359 1360 /* 1361 * We need to run this in all cases, since during the build_id 1362 * processing we had no idea this was the kernel dso. 1363 */ 1364 if (dso != NULL) { 1365 dso__set_short_name(dso, short_name, false); 1366 dso__set_kernel(dso, dso_type); 1367 } 1368 1369 return dso; 1370 } 1371 1372 static void __dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated) 1373 { 1374 if (dso__long_name_allocated(dso)) 1375 free((char *)dso__long_name(dso)); 1376 1377 RC_CHK_ACCESS(dso)->long_name = name; 1378 RC_CHK_ACCESS(dso)->long_name_len = strlen(name); 1379 dso__set_long_name_allocated(dso, name_allocated); 1380 } 1381 1382 static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated) 1383 { 1384 struct dsos *dsos = dso__dsos(dso); 1385 1386 if (name == NULL) 1387 return; 1388 1389 if (dsos) { 1390 /* 1391 * Need to avoid re-sorting the dsos breaking by non-atomically 1392 * renaming the dso. 1393 */ 1394 down_write(&dsos->lock); 1395 __dso__set_long_name_id(dso, name, name_allocated); 1396 dsos->sorted = false; 1397 up_write(&dsos->lock); 1398 } else { 1399 __dso__set_long_name_id(dso, name, name_allocated); 1400 } 1401 } 1402 1403 static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b) 1404 { 1405 if (a->mmap2_valid && b->mmap2_valid) { 1406 if (a->maj > b->maj) return -1; 1407 if (a->maj < b->maj) return 1; 1408 1409 if (a->min > b->min) return -1; 1410 if (a->min < b->min) return 1; 1411 1412 if (a->ino > b->ino) return -1; 1413 if (a->ino < b->ino) return 1; 1414 } 1415 if (a->mmap2_ino_generation_valid && b->mmap2_ino_generation_valid) { 1416 if (a->ino_generation > b->ino_generation) return -1; 1417 if (a->ino_generation < b->ino_generation) return 1; 1418 } 1419 if (build_id__is_defined(&a->build_id) && build_id__is_defined(&b->build_id)) { 1420 if (a->build_id.size != b->build_id.size) 1421 return a->build_id.size < b->build_id.size ? -1 : 1; 1422 return memcmp(a->build_id.data, b->build_id.data, a->build_id.size); 1423 } 1424 return 0; 1425 } 1426 1427 const struct dso_id dso_id_empty = { 1428 { 1429 .maj = 0, 1430 .min = 0, 1431 .ino = 0, 1432 .ino_generation = 0, 1433 }, 1434 .mmap2_valid = false, 1435 .mmap2_ino_generation_valid = false, 1436 { 1437 .size = 0, 1438 } 1439 }; 1440 1441 void __dso__improve_id(struct dso *dso, const struct dso_id *id) 1442 { 1443 struct dsos *dsos = dso__dsos(dso); 1444 struct dso_id *dso_id = dso__id(dso); 1445 bool changed = false; 1446 1447 /* dsos write lock held by caller. */ 1448 1449 if (id->mmap2_valid && !dso_id->mmap2_valid) { 1450 dso_id->maj = id->maj; 1451 dso_id->min = id->min; 1452 dso_id->ino = id->ino; 1453 dso_id->mmap2_valid = true; 1454 changed = true; 1455 } 1456 if (id->mmap2_ino_generation_valid && !dso_id->mmap2_ino_generation_valid) { 1457 dso_id->ino_generation = id->ino_generation; 1458 dso_id->mmap2_ino_generation_valid = true; 1459 changed = true; 1460 } 1461 if (build_id__is_defined(&id->build_id) && !build_id__is_defined(&dso_id->build_id)) { 1462 dso_id->build_id = id->build_id; 1463 changed = true; 1464 } 1465 if (changed && dsos) 1466 dsos->sorted = false; 1467 } 1468 1469 int dso_id__cmp(const struct dso_id *a, const struct dso_id *b) 1470 { 1471 if (a == &dso_id_empty || b == &dso_id_empty) { 1472 /* There is no valid data to compare so the comparison always returns identical. */ 1473 return 0; 1474 } 1475 1476 return __dso_id__cmp(a, b); 1477 } 1478 1479 int dso__cmp_id(struct dso *a, struct dso *b) 1480 { 1481 return __dso_id__cmp(dso__id(a), dso__id(b)); 1482 } 1483 1484 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated) 1485 { 1486 dso__set_long_name_id(dso, name, name_allocated); 1487 } 1488 1489 static void __dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1490 { 1491 if (dso__short_name_allocated(dso)) 1492 free((char *)dso__short_name(dso)); 1493 1494 RC_CHK_ACCESS(dso)->short_name = name; 1495 RC_CHK_ACCESS(dso)->short_name_len = strlen(name); 1496 dso__set_short_name_allocated(dso, name_allocated); 1497 } 1498 1499 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated) 1500 { 1501 struct dsos *dsos = dso__dsos(dso); 1502 1503 if (name == NULL) 1504 return; 1505 1506 if (dsos) { 1507 /* 1508 * Need to avoid re-sorting the dsos breaking by non-atomically 1509 * renaming the dso. 1510 */ 1511 down_write(&dsos->lock); 1512 __dso__set_short_name(dso, name, name_allocated); 1513 dsos->sorted = false; 1514 up_write(&dsos->lock); 1515 } else { 1516 __dso__set_short_name(dso, name, name_allocated); 1517 } 1518 } 1519 1520 int dso__name_len(const struct dso *dso) 1521 { 1522 if (!dso) 1523 return strlen("[unknown]"); 1524 if (verbose > 0) 1525 return dso__long_name_len(dso); 1526 1527 return dso__short_name_len(dso); 1528 } 1529 1530 bool dso__loaded(const struct dso *dso) 1531 { 1532 return RC_CHK_ACCESS(dso)->loaded; 1533 } 1534 1535 bool dso__sorted_by_name(const struct dso *dso) 1536 { 1537 return RC_CHK_ACCESS(dso)->sorted_by_name; 1538 } 1539 1540 void dso__set_sorted_by_name(struct dso *dso) 1541 { 1542 RC_CHK_ACCESS(dso)->sorted_by_name = true; 1543 } 1544 1545 struct dso *dso__new_id(const char *name, const struct dso_id *id) 1546 { 1547 RC_STRUCT(dso) *dso = zalloc(sizeof(*dso) + strlen(name) + 1); 1548 struct dso *res; 1549 struct dso_data *data; 1550 1551 if (!dso) 1552 return NULL; 1553 1554 if (ADD_RC_CHK(res, dso)) { 1555 strcpy(dso->name, name); 1556 if (id) 1557 dso->id = *id; 1558 dso__set_long_name_id(res, dso->name, false); 1559 dso__set_short_name(res, dso->name, false); 1560 dso->symbols = RB_ROOT_CACHED; 1561 dso->symbol_names = NULL; 1562 dso->symbol_names_len = 0; 1563 dso->inlined_nodes = RB_ROOT_CACHED; 1564 dso->srclines = RB_ROOT_CACHED; 1565 dso->data_types = RB_ROOT; 1566 dso->global_vars = RB_ROOT; 1567 dso->data.fd = -1; 1568 dso->data.status = DSO_DATA_STATUS_UNKNOWN; 1569 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; 1570 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND; 1571 dso->is_64_bit = (sizeof(void *) == 8); 1572 dso->loaded = 0; 1573 dso->rel = 0; 1574 dso->sorted_by_name = 0; 1575 dso->has_srcline = 1; 1576 dso->a2l_fails = 1; 1577 dso->kernel = DSO_SPACE__USER; 1578 dso->is_kmod = 0; 1579 dso->needs_swap = DSO_SWAP__UNSET; 1580 dso->comp = COMP_ID__NONE; 1581 mutex_init(&dso->lock); 1582 refcount_set(&dso->refcnt, 1); 1583 data = &dso->data; 1584 data->cache = RB_ROOT; 1585 data->fd = -1; 1586 data->status = DSO_DATA_STATUS_UNKNOWN; 1587 INIT_LIST_HEAD(&data->open_entry); 1588 #ifdef REFCNT_CHECKING 1589 data->dso = NULL; /* Set when on the open_entry list. */ 1590 #endif 1591 } 1592 return res; 1593 } 1594 1595 struct dso *dso__new(const char *name) 1596 { 1597 return dso__new_id(name, NULL); 1598 } 1599 1600 void dso__delete(struct dso *dso) 1601 { 1602 if (dso__dsos(dso)) 1603 pr_err("DSO %s is still in rbtree when being deleted!\n", dso__long_name(dso)); 1604 1605 /* free inlines first, as they reference symbols */ 1606 inlines__tree_delete(&RC_CHK_ACCESS(dso)->inlined_nodes); 1607 srcline__tree_delete(&RC_CHK_ACCESS(dso)->srclines); 1608 symbols__delete(&RC_CHK_ACCESS(dso)->symbols); 1609 RC_CHK_ACCESS(dso)->symbol_names_len = 0; 1610 zfree(&RC_CHK_ACCESS(dso)->symbol_names); 1611 annotated_data_type__tree_delete(dso__data_types(dso)); 1612 global_var_type__tree_delete(dso__global_vars(dso)); 1613 1614 if (RC_CHK_ACCESS(dso)->short_name_allocated) { 1615 zfree((char **)&RC_CHK_ACCESS(dso)->short_name); 1616 RC_CHK_ACCESS(dso)->short_name_allocated = false; 1617 } 1618 1619 if (RC_CHK_ACCESS(dso)->long_name_allocated) { 1620 zfree((char **)&RC_CHK_ACCESS(dso)->long_name); 1621 RC_CHK_ACCESS(dso)->long_name_allocated = false; 1622 } 1623 1624 dso__data_close(dso); 1625 auxtrace_cache__free(RC_CHK_ACCESS(dso)->auxtrace_cache); 1626 dso_cache__free(dso); 1627 dso__free_a2l(dso); 1628 dso__free_libdw(dso); 1629 dso__free_symsrc_filename(dso); 1630 nsinfo__zput(RC_CHK_ACCESS(dso)->nsinfo); 1631 mutex_destroy(dso__lock(dso)); 1632 RC_CHK_FREE(dso); 1633 } 1634 1635 struct dso *dso__get(struct dso *dso) 1636 { 1637 struct dso *result; 1638 1639 if (RC_CHK_GET(result, dso)) 1640 refcount_inc(&RC_CHK_ACCESS(dso)->refcnt); 1641 1642 return result; 1643 } 1644 1645 void dso__put(struct dso *dso) 1646 { 1647 #ifdef REFCNT_CHECKING 1648 if (dso && dso__data(dso) && refcount_read(&RC_CHK_ACCESS(dso)->refcnt) == 2) 1649 dso__data_close(dso); 1650 #endif 1651 if (dso && refcount_dec_and_test(&RC_CHK_ACCESS(dso)->refcnt)) 1652 dso__delete(dso); 1653 else 1654 RC_CHK_PUT(dso); 1655 } 1656 1657 int dso__swap_init(struct dso *dso, unsigned char eidata) 1658 { 1659 static unsigned int const endian = 1; 1660 1661 dso__set_needs_swap(dso, DSO_SWAP__NO); 1662 1663 switch (eidata) { 1664 case ELFDATA2LSB: 1665 /* We are big endian, DSO is little endian. */ 1666 if (*(unsigned char const *)&endian != 1) 1667 dso__set_needs_swap(dso, DSO_SWAP__YES); 1668 break; 1669 1670 case ELFDATA2MSB: 1671 /* We are little endian, DSO is big endian. */ 1672 if (*(unsigned char const *)&endian != 0) 1673 dso__set_needs_swap(dso, DSO_SWAP__YES); 1674 break; 1675 1676 default: 1677 pr_err("unrecognized DSO data encoding %d\n", eidata); 1678 return -EINVAL; 1679 } 1680 1681 return 0; 1682 } 1683 1684 void dso__set_build_id(struct dso *dso, const struct build_id *bid) 1685 { 1686 dso__id(dso)->build_id = *bid; 1687 } 1688 1689 bool dso__build_id_equal(const struct dso *dso, const struct build_id *bid) 1690 { 1691 const struct build_id *dso_bid = dso__bid(dso); 1692 1693 if (dso_bid->size > bid->size && dso_bid->size == BUILD_ID_SIZE) { 1694 /* 1695 * For the backward compatibility, it allows a build-id has 1696 * trailing zeros. 1697 */ 1698 return !memcmp(dso_bid->data, bid->data, bid->size) && 1699 !memchr_inv(&dso_bid->data[bid->size], 0, 1700 dso_bid->size - bid->size); 1701 } 1702 1703 return dso_bid->size == bid->size && 1704 memcmp(dso_bid->data, bid->data, dso_bid->size) == 0; 1705 } 1706 1707 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine) 1708 { 1709 char path[PATH_MAX]; 1710 struct build_id bid = { .size = 0, }; 1711 1712 if (machine__is_default_guest(machine)) 1713 return; 1714 sprintf(path, "%s/sys/kernel/notes", machine->root_dir); 1715 sysfs__read_build_id(path, &bid); 1716 dso__set_build_id(dso, &bid); 1717 } 1718 1719 int dso__kernel_module_get_build_id(struct dso *dso, 1720 const char *root_dir) 1721 { 1722 char filename[PATH_MAX]; 1723 struct build_id bid = { .size = 0, }; 1724 /* 1725 * kernel module short names are of the form "[module]" and 1726 * we need just "module" here. 1727 */ 1728 const char *name = dso__short_name(dso) + 1; 1729 1730 snprintf(filename, sizeof(filename), 1731 "%s/sys/module/%.*s/notes/.note.gnu.build-id", 1732 root_dir, (int)strlen(name) - 1, name); 1733 1734 sysfs__read_build_id(filename, &bid); 1735 dso__set_build_id(dso, &bid); 1736 return 0; 1737 } 1738 1739 static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp) 1740 { 1741 char sbuild_id[SBUILD_ID_SIZE]; 1742 1743 build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id)); 1744 return fprintf(fp, "%s", sbuild_id); 1745 } 1746 1747 size_t dso__fprintf(struct dso *dso, FILE *fp) 1748 { 1749 struct rb_node *nd; 1750 size_t ret = fprintf(fp, "dso: %s (", dso__short_name(dso)); 1751 1752 if (dso__short_name(dso) != dso__long_name(dso)) 1753 ret += fprintf(fp, "%s, ", dso__long_name(dso)); 1754 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT "); 1755 ret += dso__fprintf_buildid(dso, fp); 1756 ret += fprintf(fp, ")\n"); 1757 for (nd = rb_first_cached(dso__symbols(dso)); nd; nd = rb_next(nd)) { 1758 struct symbol *pos = rb_entry(nd, struct symbol, rb_node); 1759 ret += symbol__fprintf(pos, fp); 1760 } 1761 1762 return ret; 1763 } 1764 1765 enum dso_type dso__type(struct dso *dso, struct machine *machine) 1766 { 1767 int fd = -1; 1768 enum dso_type type = DSO__TYPE_UNKNOWN; 1769 1770 if (dso__data_get_fd(dso, machine, &fd)) { 1771 type = dso__type_fd(fd); 1772 dso__data_put_fd(dso); 1773 } 1774 1775 return type; 1776 } 1777 1778 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen) 1779 { 1780 int idx, errnum = *dso__load_errno(dso); 1781 /* 1782 * This must have a same ordering as the enum dso_load_errno. 1783 */ 1784 static const char *dso_load__error_str[] = { 1785 "Internal tools/perf/ library error", 1786 "Invalid ELF file", 1787 "Can not read build id", 1788 "Mismatching build id", 1789 "Decompression failure", 1790 }; 1791 1792 BUG_ON(buflen == 0); 1793 1794 if (errnum >= 0) { 1795 errno = errnum; 1796 scnprintf(buf, buflen, "%m"); 1797 1798 return 0; 1799 } 1800 1801 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END) 1802 return -1; 1803 1804 idx = errnum - __DSO_LOAD_ERRNO__START; 1805 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]); 1806 return 0; 1807 } 1808 1809 bool perf_pid_map_tid(const char *dso_name, int *tid) 1810 { 1811 return sscanf(dso_name, "/tmp/perf-%d.map", tid) == 1; 1812 } 1813 1814 bool is_perf_pid_map_name(const char *dso_name) 1815 { 1816 int tid; 1817 1818 return perf_pid_map_tid(dso_name, &tid); 1819 } 1820 1821 struct find_file_offset_data { 1822 u64 ip; 1823 u64 offset; 1824 }; 1825 1826 /* This will be called for each PHDR in an ELF binary */ 1827 static int find_file_offset(u64 start, u64 len, u64 pgoff, void *arg) 1828 { 1829 struct find_file_offset_data *data = arg; 1830 1831 if (start <= data->ip && data->ip < start + len) { 1832 data->offset = pgoff + data->ip - start; 1833 return 1; 1834 } 1835 return 0; 1836 } 1837 1838 static const u8 *__dso__read_symbol(struct dso *dso, const char *symfs_filename, 1839 u64 start, size_t len, 1840 u8 **out_buf, u64 *out_buf_len, bool *is_64bit) 1841 { 1842 struct nscookie nsc; 1843 int fd; 1844 ssize_t count; 1845 struct find_file_offset_data data = { 1846 .ip = start, 1847 }; 1848 u8 *code_buf = NULL; 1849 int saved_errno; 1850 1851 nsinfo__mountns_enter(dso__nsinfo(dso), &nsc); 1852 fd = open(symfs_filename, O_RDONLY); 1853 saved_errno = errno; 1854 nsinfo__mountns_exit(&nsc); 1855 if (fd < 0) { 1856 errno = saved_errno; 1857 return NULL; 1858 } 1859 if (file__read_maps(fd, /*exe=*/true, find_file_offset, &data, is_64bit) <= 0) { 1860 close(fd); 1861 errno = ENOENT; 1862 return NULL; 1863 } 1864 code_buf = malloc(len); 1865 if (code_buf == NULL) { 1866 close(fd); 1867 errno = ENOMEM; 1868 return NULL; 1869 } 1870 count = pread(fd, code_buf, len, data.offset); 1871 saved_errno = errno; 1872 close(fd); 1873 if ((u64)count != len) { 1874 free(code_buf); 1875 errno = saved_errno; 1876 return NULL; 1877 } 1878 *out_buf = code_buf; 1879 *out_buf_len = len; 1880 return code_buf; 1881 } 1882 1883 /* 1884 * Read a symbol into memory for disassembly by a library like capstone of 1885 * libLLVM. If memory is allocated out_buf holds it. 1886 */ 1887 const u8 *dso__read_symbol(struct dso *dso, const char *symfs_filename, 1888 const struct map *map, const struct symbol *sym, 1889 u8 **out_buf, u64 *out_buf_len, bool *is_64bit) 1890 { 1891 u64 start = map__rip_2objdump(map, sym->start); 1892 u64 end = map__rip_2objdump(map, sym->end); 1893 size_t len = end - start; 1894 1895 *out_buf = NULL; 1896 *out_buf_len = 0; 1897 *is_64bit = false; 1898 1899 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) { 1900 /* 1901 * Note, there is fallback BPF image disassembly in the objdump 1902 * version but it currently does nothing. 1903 */ 1904 errno = EOPNOTSUPP; 1905 return NULL; 1906 } 1907 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) { 1908 #ifdef HAVE_LIBBPF_SUPPORT 1909 struct bpf_prog_info_node *info_node; 1910 struct perf_bpil *info_linear; 1911 1912 *is_64bit = sizeof(void *) == sizeof(u64); 1913 info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env, 1914 dso__bpf_prog(dso)->id); 1915 if (!info_node) { 1916 errno = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF; 1917 return NULL; 1918 } 1919 info_linear = info_node->info_linear; 1920 assert(len <= info_linear->info.jited_prog_len); 1921 *out_buf_len = len; 1922 return (const u8 *)(uintptr_t)(info_linear->info.jited_prog_insns); 1923 #else 1924 pr_debug("No BPF program disassembly support\n"); 1925 errno = EOPNOTSUPP; 1926 return NULL; 1927 #endif 1928 } 1929 return __dso__read_symbol(dso, symfs_filename, start, len, 1930 out_buf, out_buf_len, is_64bit); 1931 } 1932 1933 struct debuginfo *dso__debuginfo(struct dso *dso) 1934 { 1935 char *name; 1936 bool decomp = false; 1937 struct debuginfo *dinfo = NULL; 1938 1939 mutex_lock(dso__lock(dso)); 1940 1941 name = dso__get_filename(dso, "", &decomp); 1942 if (name) 1943 dinfo = debuginfo__new(name); 1944 1945 if (decomp) 1946 unlink(name); 1947 1948 mutex_unlock(dso__lock(dso)); 1949 free(name); 1950 return dinfo; 1951 } 1952