1 #include <fcntl.h> 2 #include <stdio.h> 3 #include <errno.h> 4 #include <string.h> 5 #include <unistd.h> 6 #include <inttypes.h> 7 8 #include "symbol.h" 9 #include "demangle-java.h" 10 #include "demangle-rust.h" 11 #include "machine.h" 12 #include "vdso.h" 13 #include <symbol/kallsyms.h> 14 #include "debug.h" 15 16 #ifndef EM_AARCH64 17 #define EM_AARCH64 183 /* ARM 64 bit */ 18 #endif 19 20 typedef Elf64_Nhdr GElf_Nhdr; 21 22 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT 23 extern char *cplus_demangle(const char *, int); 24 25 static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) 26 { 27 return cplus_demangle(c, i); 28 } 29 #else 30 #ifdef NO_DEMANGLE 31 static inline char *bfd_demangle(void __maybe_unused *v, 32 const char __maybe_unused *c, 33 int __maybe_unused i) 34 { 35 return NULL; 36 } 37 #else 38 #define PACKAGE 'perf' 39 #include <bfd.h> 40 #endif 41 #endif 42 43 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT 44 static int elf_getphdrnum(Elf *elf, size_t *dst) 45 { 46 GElf_Ehdr gehdr; 47 GElf_Ehdr *ehdr; 48 49 ehdr = gelf_getehdr(elf, &gehdr); 50 if (!ehdr) 51 return -1; 52 53 *dst = ehdr->e_phnum; 54 55 return 0; 56 } 57 #endif 58 59 #ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT 60 static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused) 61 { 62 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__); 63 return -1; 64 } 65 #endif 66 67 #ifndef NT_GNU_BUILD_ID 68 #define NT_GNU_BUILD_ID 3 69 #endif 70 71 /** 72 * elf_symtab__for_each_symbol - iterate thru all the symbols 73 * 74 * @syms: struct elf_symtab instance to iterate 75 * @idx: uint32_t idx 76 * @sym: GElf_Sym iterator 77 */ 78 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ 79 for (idx = 0, gelf_getsym(syms, idx, &sym);\ 80 idx < nr_syms; \ 81 idx++, gelf_getsym(syms, idx, &sym)) 82 83 static inline uint8_t elf_sym__type(const GElf_Sym *sym) 84 { 85 return GELF_ST_TYPE(sym->st_info); 86 } 87 88 #ifndef STT_GNU_IFUNC 89 #define STT_GNU_IFUNC 10 90 #endif 91 92 static inline int elf_sym__is_function(const GElf_Sym *sym) 93 { 94 return (elf_sym__type(sym) == STT_FUNC || 95 elf_sym__type(sym) == STT_GNU_IFUNC) && 96 sym->st_name != 0 && 97 sym->st_shndx != SHN_UNDEF; 98 } 99 100 static inline bool elf_sym__is_object(const GElf_Sym *sym) 101 { 102 return elf_sym__type(sym) == STT_OBJECT && 103 sym->st_name != 0 && 104 sym->st_shndx != SHN_UNDEF; 105 } 106 107 static inline int elf_sym__is_label(const GElf_Sym *sym) 108 { 109 return elf_sym__type(sym) == STT_NOTYPE && 110 sym->st_name != 0 && 111 sym->st_shndx != SHN_UNDEF && 112 sym->st_shndx != SHN_ABS; 113 } 114 115 static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) 116 { 117 switch (type) { 118 case MAP__FUNCTION: 119 return elf_sym__is_function(sym); 120 case MAP__VARIABLE: 121 return elf_sym__is_object(sym); 122 default: 123 return false; 124 } 125 } 126 127 static inline const char *elf_sym__name(const GElf_Sym *sym, 128 const Elf_Data *symstrs) 129 { 130 return symstrs->d_buf + sym->st_name; 131 } 132 133 static inline const char *elf_sec__name(const GElf_Shdr *shdr, 134 const Elf_Data *secstrs) 135 { 136 return secstrs->d_buf + shdr->sh_name; 137 } 138 139 static inline int elf_sec__is_text(const GElf_Shdr *shdr, 140 const Elf_Data *secstrs) 141 { 142 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; 143 } 144 145 static inline bool elf_sec__is_data(const GElf_Shdr *shdr, 146 const Elf_Data *secstrs) 147 { 148 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; 149 } 150 151 static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, 152 enum map_type type) 153 { 154 switch (type) { 155 case MAP__FUNCTION: 156 return elf_sec__is_text(shdr, secstrs); 157 case MAP__VARIABLE: 158 return elf_sec__is_data(shdr, secstrs); 159 default: 160 return false; 161 } 162 } 163 164 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) 165 { 166 Elf_Scn *sec = NULL; 167 GElf_Shdr shdr; 168 size_t cnt = 1; 169 170 while ((sec = elf_nextscn(elf, sec)) != NULL) { 171 gelf_getshdr(sec, &shdr); 172 173 if ((addr >= shdr.sh_addr) && 174 (addr < (shdr.sh_addr + shdr.sh_size))) 175 return cnt; 176 177 ++cnt; 178 } 179 180 return -1; 181 } 182 183 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, 184 GElf_Shdr *shp, const char *name, size_t *idx) 185 { 186 Elf_Scn *sec = NULL; 187 size_t cnt = 1; 188 189 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 190 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) 191 return NULL; 192 193 while ((sec = elf_nextscn(elf, sec)) != NULL) { 194 char *str; 195 196 gelf_getshdr(sec, shp); 197 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); 198 if (str && !strcmp(name, str)) { 199 if (idx) 200 *idx = cnt; 201 return sec; 202 } 203 ++cnt; 204 } 205 206 return NULL; 207 } 208 209 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ 210 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ 211 idx < nr_entries; \ 212 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) 213 214 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ 215 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ 216 idx < nr_entries; \ 217 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) 218 219 /* 220 * We need to check if we have a .dynsym, so that we can handle the 221 * .plt, synthesizing its symbols, that aren't on the symtabs (be it 222 * .dynsym or .symtab). 223 * And always look at the original dso, not at debuginfo packages, that 224 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 225 */ 226 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map, 227 symbol_filter_t filter) 228 { 229 uint32_t nr_rel_entries, idx; 230 GElf_Sym sym; 231 u64 plt_offset; 232 GElf_Shdr shdr_plt; 233 struct symbol *f; 234 GElf_Shdr shdr_rel_plt, shdr_dynsym; 235 Elf_Data *reldata, *syms, *symstrs; 236 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; 237 size_t dynsym_idx; 238 GElf_Ehdr ehdr; 239 char sympltname[1024]; 240 Elf *elf; 241 int nr = 0, symidx, err = 0; 242 243 if (!ss->dynsym) 244 return 0; 245 246 elf = ss->elf; 247 ehdr = ss->ehdr; 248 249 scn_dynsym = ss->dynsym; 250 shdr_dynsym = ss->dynshdr; 251 dynsym_idx = ss->dynsym_idx; 252 253 if (scn_dynsym == NULL) 254 goto out_elf_end; 255 256 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 257 ".rela.plt", NULL); 258 if (scn_plt_rel == NULL) { 259 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 260 ".rel.plt", NULL); 261 if (scn_plt_rel == NULL) 262 goto out_elf_end; 263 } 264 265 err = -1; 266 267 if (shdr_rel_plt.sh_link != dynsym_idx) 268 goto out_elf_end; 269 270 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) 271 goto out_elf_end; 272 273 /* 274 * Fetch the relocation section to find the idxes to the GOT 275 * and the symbols in the .dynsym they refer to. 276 */ 277 reldata = elf_getdata(scn_plt_rel, NULL); 278 if (reldata == NULL) 279 goto out_elf_end; 280 281 syms = elf_getdata(scn_dynsym, NULL); 282 if (syms == NULL) 283 goto out_elf_end; 284 285 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); 286 if (scn_symstrs == NULL) 287 goto out_elf_end; 288 289 symstrs = elf_getdata(scn_symstrs, NULL); 290 if (symstrs == NULL) 291 goto out_elf_end; 292 293 if (symstrs->d_size == 0) 294 goto out_elf_end; 295 296 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; 297 plt_offset = shdr_plt.sh_offset; 298 299 if (shdr_rel_plt.sh_type == SHT_RELA) { 300 GElf_Rela pos_mem, *pos; 301 302 elf_section__for_each_rela(reldata, pos, pos_mem, idx, 303 nr_rel_entries) { 304 symidx = GELF_R_SYM(pos->r_info); 305 plt_offset += shdr_plt.sh_entsize; 306 gelf_getsym(syms, symidx, &sym); 307 snprintf(sympltname, sizeof(sympltname), 308 "%s@plt", elf_sym__name(&sym, symstrs)); 309 310 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 311 STB_GLOBAL, sympltname); 312 if (!f) 313 goto out_elf_end; 314 315 if (filter && filter(map, f)) 316 symbol__delete(f); 317 else { 318 symbols__insert(&dso->symbols[map->type], f); 319 ++nr; 320 } 321 } 322 } else if (shdr_rel_plt.sh_type == SHT_REL) { 323 GElf_Rel pos_mem, *pos; 324 elf_section__for_each_rel(reldata, pos, pos_mem, idx, 325 nr_rel_entries) { 326 symidx = GELF_R_SYM(pos->r_info); 327 plt_offset += shdr_plt.sh_entsize; 328 gelf_getsym(syms, symidx, &sym); 329 snprintf(sympltname, sizeof(sympltname), 330 "%s@plt", elf_sym__name(&sym, symstrs)); 331 332 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 333 STB_GLOBAL, sympltname); 334 if (!f) 335 goto out_elf_end; 336 337 if (filter && filter(map, f)) 338 symbol__delete(f); 339 else { 340 symbols__insert(&dso->symbols[map->type], f); 341 ++nr; 342 } 343 } 344 } 345 346 err = 0; 347 out_elf_end: 348 if (err == 0) 349 return nr; 350 pr_debug("%s: problems reading %s PLT info.\n", 351 __func__, dso->long_name); 352 return 0; 353 } 354 355 /* 356 * Align offset to 4 bytes as needed for note name and descriptor data. 357 */ 358 #define NOTE_ALIGN(n) (((n) + 3) & -4U) 359 360 static int elf_read_build_id(Elf *elf, void *bf, size_t size) 361 { 362 int err = -1; 363 GElf_Ehdr ehdr; 364 GElf_Shdr shdr; 365 Elf_Data *data; 366 Elf_Scn *sec; 367 Elf_Kind ek; 368 void *ptr; 369 370 if (size < BUILD_ID_SIZE) 371 goto out; 372 373 ek = elf_kind(elf); 374 if (ek != ELF_K_ELF) 375 goto out; 376 377 if (gelf_getehdr(elf, &ehdr) == NULL) { 378 pr_err("%s: cannot get elf header.\n", __func__); 379 goto out; 380 } 381 382 /* 383 * Check following sections for notes: 384 * '.note.gnu.build-id' 385 * '.notes' 386 * '.note' (VDSO specific) 387 */ 388 do { 389 sec = elf_section_by_name(elf, &ehdr, &shdr, 390 ".note.gnu.build-id", NULL); 391 if (sec) 392 break; 393 394 sec = elf_section_by_name(elf, &ehdr, &shdr, 395 ".notes", NULL); 396 if (sec) 397 break; 398 399 sec = elf_section_by_name(elf, &ehdr, &shdr, 400 ".note", NULL); 401 if (sec) 402 break; 403 404 return err; 405 406 } while (0); 407 408 data = elf_getdata(sec, NULL); 409 if (data == NULL) 410 goto out; 411 412 ptr = data->d_buf; 413 while (ptr < (data->d_buf + data->d_size)) { 414 GElf_Nhdr *nhdr = ptr; 415 size_t namesz = NOTE_ALIGN(nhdr->n_namesz), 416 descsz = NOTE_ALIGN(nhdr->n_descsz); 417 const char *name; 418 419 ptr += sizeof(*nhdr); 420 name = ptr; 421 ptr += namesz; 422 if (nhdr->n_type == NT_GNU_BUILD_ID && 423 nhdr->n_namesz == sizeof("GNU")) { 424 if (memcmp(name, "GNU", sizeof("GNU")) == 0) { 425 size_t sz = min(size, descsz); 426 memcpy(bf, ptr, sz); 427 memset(bf + sz, 0, size - sz); 428 err = descsz; 429 break; 430 } 431 } 432 ptr += descsz; 433 } 434 435 out: 436 return err; 437 } 438 439 int filename__read_build_id(const char *filename, void *bf, size_t size) 440 { 441 int fd, err = -1; 442 Elf *elf; 443 444 if (size < BUILD_ID_SIZE) 445 goto out; 446 447 fd = open(filename, O_RDONLY); 448 if (fd < 0) 449 goto out; 450 451 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 452 if (elf == NULL) { 453 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 454 goto out_close; 455 } 456 457 err = elf_read_build_id(elf, bf, size); 458 459 elf_end(elf); 460 out_close: 461 close(fd); 462 out: 463 return err; 464 } 465 466 int sysfs__read_build_id(const char *filename, void *build_id, size_t size) 467 { 468 int fd, err = -1; 469 470 if (size < BUILD_ID_SIZE) 471 goto out; 472 473 fd = open(filename, O_RDONLY); 474 if (fd < 0) 475 goto out; 476 477 while (1) { 478 char bf[BUFSIZ]; 479 GElf_Nhdr nhdr; 480 size_t namesz, descsz; 481 482 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) 483 break; 484 485 namesz = NOTE_ALIGN(nhdr.n_namesz); 486 descsz = NOTE_ALIGN(nhdr.n_descsz); 487 if (nhdr.n_type == NT_GNU_BUILD_ID && 488 nhdr.n_namesz == sizeof("GNU")) { 489 if (read(fd, bf, namesz) != (ssize_t)namesz) 490 break; 491 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { 492 size_t sz = min(descsz, size); 493 if (read(fd, build_id, sz) == (ssize_t)sz) { 494 memset(build_id + sz, 0, size - sz); 495 err = 0; 496 break; 497 } 498 } else if (read(fd, bf, descsz) != (ssize_t)descsz) 499 break; 500 } else { 501 int n = namesz + descsz; 502 if (read(fd, bf, n) != n) 503 break; 504 } 505 } 506 close(fd); 507 out: 508 return err; 509 } 510 511 int filename__read_debuglink(const char *filename, char *debuglink, 512 size_t size) 513 { 514 int fd, err = -1; 515 Elf *elf; 516 GElf_Ehdr ehdr; 517 GElf_Shdr shdr; 518 Elf_Data *data; 519 Elf_Scn *sec; 520 Elf_Kind ek; 521 522 fd = open(filename, O_RDONLY); 523 if (fd < 0) 524 goto out; 525 526 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 527 if (elf == NULL) { 528 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 529 goto out_close; 530 } 531 532 ek = elf_kind(elf); 533 if (ek != ELF_K_ELF) 534 goto out_elf_end; 535 536 if (gelf_getehdr(elf, &ehdr) == NULL) { 537 pr_err("%s: cannot get elf header.\n", __func__); 538 goto out_elf_end; 539 } 540 541 sec = elf_section_by_name(elf, &ehdr, &shdr, 542 ".gnu_debuglink", NULL); 543 if (sec == NULL) 544 goto out_elf_end; 545 546 data = elf_getdata(sec, NULL); 547 if (data == NULL) 548 goto out_elf_end; 549 550 /* the start of this section is a zero-terminated string */ 551 strncpy(debuglink, data->d_buf, size); 552 553 err = 0; 554 555 out_elf_end: 556 elf_end(elf); 557 out_close: 558 close(fd); 559 out: 560 return err; 561 } 562 563 static int dso__swap_init(struct dso *dso, unsigned char eidata) 564 { 565 static unsigned int const endian = 1; 566 567 dso->needs_swap = DSO_SWAP__NO; 568 569 switch (eidata) { 570 case ELFDATA2LSB: 571 /* We are big endian, DSO is little endian. */ 572 if (*(unsigned char const *)&endian != 1) 573 dso->needs_swap = DSO_SWAP__YES; 574 break; 575 576 case ELFDATA2MSB: 577 /* We are little endian, DSO is big endian. */ 578 if (*(unsigned char const *)&endian != 0) 579 dso->needs_swap = DSO_SWAP__YES; 580 break; 581 582 default: 583 pr_err("unrecognized DSO data encoding %d\n", eidata); 584 return -EINVAL; 585 } 586 587 return 0; 588 } 589 590 static int decompress_kmodule(struct dso *dso, const char *name, 591 enum dso_binary_type type) 592 { 593 int fd = -1; 594 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; 595 struct kmod_path m; 596 597 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && 598 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP && 599 type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 600 return -1; 601 602 if (type == DSO_BINARY_TYPE__BUILD_ID_CACHE) 603 name = dso->long_name; 604 605 if (kmod_path__parse_ext(&m, name) || !m.comp) 606 return -1; 607 608 fd = mkstemp(tmpbuf); 609 if (fd < 0) { 610 dso->load_errno = errno; 611 goto out; 612 } 613 614 if (!decompress_to_file(m.ext, name, fd)) { 615 dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; 616 close(fd); 617 fd = -1; 618 } 619 620 unlink(tmpbuf); 621 622 out: 623 free(m.ext); 624 return fd; 625 } 626 627 bool symsrc__possibly_runtime(struct symsrc *ss) 628 { 629 return ss->dynsym || ss->opdsec; 630 } 631 632 bool symsrc__has_symtab(struct symsrc *ss) 633 { 634 return ss->symtab != NULL; 635 } 636 637 void symsrc__destroy(struct symsrc *ss) 638 { 639 zfree(&ss->name); 640 elf_end(ss->elf); 641 close(ss->fd); 642 } 643 644 bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr) 645 { 646 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL; 647 } 648 649 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, 650 enum dso_binary_type type) 651 { 652 int err = -1; 653 GElf_Ehdr ehdr; 654 Elf *elf; 655 int fd; 656 657 if (dso__needs_decompress(dso)) { 658 fd = decompress_kmodule(dso, name, type); 659 if (fd < 0) 660 return -1; 661 } else { 662 fd = open(name, O_RDONLY); 663 if (fd < 0) { 664 dso->load_errno = errno; 665 return -1; 666 } 667 } 668 669 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 670 if (elf == NULL) { 671 pr_debug("%s: cannot read %s ELF file.\n", __func__, name); 672 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; 673 goto out_close; 674 } 675 676 if (gelf_getehdr(elf, &ehdr) == NULL) { 677 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; 678 pr_debug("%s: cannot get elf header.\n", __func__); 679 goto out_elf_end; 680 } 681 682 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) { 683 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR; 684 goto out_elf_end; 685 } 686 687 /* Always reject images with a mismatched build-id: */ 688 if (dso->has_build_id) { 689 u8 build_id[BUILD_ID_SIZE]; 690 691 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) { 692 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID; 693 goto out_elf_end; 694 } 695 696 if (!dso__build_id_equal(dso, build_id)) { 697 pr_debug("%s: build id mismatch for %s.\n", __func__, name); 698 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID; 699 goto out_elf_end; 700 } 701 } 702 703 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 704 705 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", 706 NULL); 707 if (ss->symshdr.sh_type != SHT_SYMTAB) 708 ss->symtab = NULL; 709 710 ss->dynsym_idx = 0; 711 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", 712 &ss->dynsym_idx); 713 if (ss->dynshdr.sh_type != SHT_DYNSYM) 714 ss->dynsym = NULL; 715 716 ss->opdidx = 0; 717 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", 718 &ss->opdidx); 719 if (ss->opdshdr.sh_type != SHT_PROGBITS) 720 ss->opdsec = NULL; 721 722 if (dso->kernel == DSO_TYPE_USER) 723 ss->adjust_symbols = true; 724 else 725 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr); 726 727 ss->name = strdup(name); 728 if (!ss->name) { 729 dso->load_errno = errno; 730 goto out_elf_end; 731 } 732 733 ss->elf = elf; 734 ss->fd = fd; 735 ss->ehdr = ehdr; 736 ss->type = type; 737 738 return 0; 739 740 out_elf_end: 741 elf_end(elf); 742 out_close: 743 close(fd); 744 return err; 745 } 746 747 /** 748 * ref_reloc_sym_not_found - has kernel relocation symbol been found. 749 * @kmap: kernel maps and relocation reference symbol 750 * 751 * This function returns %true if we are dealing with the kernel maps and the 752 * relocation reference symbol has not yet been found. Otherwise %false is 753 * returned. 754 */ 755 static bool ref_reloc_sym_not_found(struct kmap *kmap) 756 { 757 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && 758 !kmap->ref_reloc_sym->unrelocated_addr; 759 } 760 761 /** 762 * ref_reloc - kernel relocation offset. 763 * @kmap: kernel maps and relocation reference symbol 764 * 765 * This function returns the offset of kernel addresses as determined by using 766 * the relocation reference symbol i.e. if the kernel has not been relocated 767 * then the return value is zero. 768 */ 769 static u64 ref_reloc(struct kmap *kmap) 770 { 771 if (kmap && kmap->ref_reloc_sym && 772 kmap->ref_reloc_sym->unrelocated_addr) 773 return kmap->ref_reloc_sym->addr - 774 kmap->ref_reloc_sym->unrelocated_addr; 775 return 0; 776 } 777 778 static bool want_demangle(bool is_kernel_sym) 779 { 780 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; 781 } 782 783 void __weak arch__sym_update(struct symbol *s __maybe_unused, 784 GElf_Sym *sym __maybe_unused) { } 785 786 int dso__load_sym(struct dso *dso, struct map *map, 787 struct symsrc *syms_ss, struct symsrc *runtime_ss, 788 symbol_filter_t filter, int kmodule) 789 { 790 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; 791 struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL; 792 struct map *curr_map = map; 793 struct dso *curr_dso = dso; 794 Elf_Data *symstrs, *secstrs; 795 uint32_t nr_syms; 796 int err = -1; 797 uint32_t idx; 798 GElf_Ehdr ehdr; 799 GElf_Shdr shdr; 800 GElf_Shdr tshdr; 801 Elf_Data *syms, *opddata = NULL; 802 GElf_Sym sym; 803 Elf_Scn *sec, *sec_strndx; 804 Elf *elf; 805 int nr = 0; 806 bool remap_kernel = false, adjust_kernel_syms = false; 807 808 if (kmap && !kmaps) 809 return -1; 810 811 dso->symtab_type = syms_ss->type; 812 dso->is_64_bit = syms_ss->is_64_bit; 813 dso->rel = syms_ss->ehdr.e_type == ET_REL; 814 815 /* 816 * Modules may already have symbols from kallsyms, but those symbols 817 * have the wrong values for the dso maps, so remove them. 818 */ 819 if (kmodule && syms_ss->symtab) 820 symbols__delete(&dso->symbols[map->type]); 821 822 if (!syms_ss->symtab) { 823 /* 824 * If the vmlinux is stripped, fail so we will fall back 825 * to using kallsyms. The vmlinux runtime symbols aren't 826 * of much use. 827 */ 828 if (dso->kernel) 829 goto out_elf_end; 830 831 syms_ss->symtab = syms_ss->dynsym; 832 syms_ss->symshdr = syms_ss->dynshdr; 833 } 834 835 elf = syms_ss->elf; 836 ehdr = syms_ss->ehdr; 837 sec = syms_ss->symtab; 838 shdr = syms_ss->symshdr; 839 840 if (elf_section_by_name(elf, &ehdr, &tshdr, ".text", NULL)) 841 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset; 842 843 if (runtime_ss->opdsec) 844 opddata = elf_rawdata(runtime_ss->opdsec, NULL); 845 846 syms = elf_getdata(sec, NULL); 847 if (syms == NULL) 848 goto out_elf_end; 849 850 sec = elf_getscn(elf, shdr.sh_link); 851 if (sec == NULL) 852 goto out_elf_end; 853 854 symstrs = elf_getdata(sec, NULL); 855 if (symstrs == NULL) 856 goto out_elf_end; 857 858 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx); 859 if (sec_strndx == NULL) 860 goto out_elf_end; 861 862 secstrs = elf_getdata(sec_strndx, NULL); 863 if (secstrs == NULL) 864 goto out_elf_end; 865 866 nr_syms = shdr.sh_size / shdr.sh_entsize; 867 868 memset(&sym, 0, sizeof(sym)); 869 870 /* 871 * The kernel relocation symbol is needed in advance in order to adjust 872 * kernel maps correctly. 873 */ 874 if (ref_reloc_sym_not_found(kmap)) { 875 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 876 const char *elf_name = elf_sym__name(&sym, symstrs); 877 878 if (strcmp(elf_name, kmap->ref_reloc_sym->name)) 879 continue; 880 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; 881 map->reloc = kmap->ref_reloc_sym->addr - 882 kmap->ref_reloc_sym->unrelocated_addr; 883 break; 884 } 885 } 886 887 /* 888 * Handle any relocation of vdso necessary because older kernels 889 * attempted to prelink vdso to its virtual address. 890 */ 891 if (dso__is_vdso(dso)) 892 map->reloc = map->start - dso->text_offset; 893 894 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); 895 /* 896 * Initial kernel and module mappings do not map to the dso. For 897 * function mappings, flag the fixups. 898 */ 899 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) { 900 remap_kernel = true; 901 adjust_kernel_syms = dso->adjust_symbols; 902 } 903 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 904 struct symbol *f; 905 const char *elf_name = elf_sym__name(&sym, symstrs); 906 char *demangled = NULL; 907 int is_label = elf_sym__is_label(&sym); 908 const char *section_name; 909 bool used_opd = false; 910 911 if (!is_label && !elf_sym__is_a(&sym, map->type)) 912 continue; 913 914 /* Reject ARM ELF "mapping symbols": these aren't unique and 915 * don't identify functions, so will confuse the profile 916 * output: */ 917 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) { 918 if (elf_name[0] == '$' && strchr("adtx", elf_name[1]) 919 && (elf_name[2] == '\0' || elf_name[2] == '.')) 920 continue; 921 } 922 923 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) { 924 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr; 925 u64 *opd = opddata->d_buf + offset; 926 sym.st_value = DSO__SWAP(dso, u64, *opd); 927 sym.st_shndx = elf_addr_to_index(runtime_ss->elf, 928 sym.st_value); 929 used_opd = true; 930 } 931 /* 932 * When loading symbols in a data mapping, ABS symbols (which 933 * has a value of SHN_ABS in its st_shndx) failed at 934 * elf_getscn(). And it marks the loading as a failure so 935 * already loaded symbols cannot be fixed up. 936 * 937 * I'm not sure what should be done. Just ignore them for now. 938 * - Namhyung Kim 939 */ 940 if (sym.st_shndx == SHN_ABS) 941 continue; 942 943 sec = elf_getscn(runtime_ss->elf, sym.st_shndx); 944 if (!sec) 945 goto out_elf_end; 946 947 gelf_getshdr(sec, &shdr); 948 949 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) 950 continue; 951 952 section_name = elf_sec__name(&shdr, secstrs); 953 954 /* On ARM, symbols for thumb functions have 1 added to 955 * the symbol address as a flag - remove it */ 956 if ((ehdr.e_machine == EM_ARM) && 957 (map->type == MAP__FUNCTION) && 958 (sym.st_value & 1)) 959 --sym.st_value; 960 961 if (dso->kernel || kmodule) { 962 char dso_name[PATH_MAX]; 963 964 /* Adjust symbol to map to file offset */ 965 if (adjust_kernel_syms) 966 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 967 968 if (strcmp(section_name, 969 (curr_dso->short_name + 970 dso->short_name_len)) == 0) 971 goto new_symbol; 972 973 if (strcmp(section_name, ".text") == 0) { 974 /* 975 * The initial kernel mapping is based on 976 * kallsyms and identity maps. Overwrite it to 977 * map to the kernel dso. 978 */ 979 if (remap_kernel && dso->kernel) { 980 remap_kernel = false; 981 map->start = shdr.sh_addr + 982 ref_reloc(kmap); 983 map->end = map->start + shdr.sh_size; 984 map->pgoff = shdr.sh_offset; 985 map->map_ip = map__map_ip; 986 map->unmap_ip = map__unmap_ip; 987 /* Ensure maps are correctly ordered */ 988 if (kmaps) { 989 map__get(map); 990 map_groups__remove(kmaps, map); 991 map_groups__insert(kmaps, map); 992 map__put(map); 993 } 994 } 995 996 /* 997 * The initial module mapping is based on 998 * /proc/modules mapped to offset zero. 999 * Overwrite it to map to the module dso. 1000 */ 1001 if (remap_kernel && kmodule) { 1002 remap_kernel = false; 1003 map->pgoff = shdr.sh_offset; 1004 } 1005 1006 curr_map = map; 1007 curr_dso = dso; 1008 goto new_symbol; 1009 } 1010 1011 if (!kmap) 1012 goto new_symbol; 1013 1014 snprintf(dso_name, sizeof(dso_name), 1015 "%s%s", dso->short_name, section_name); 1016 1017 curr_map = map_groups__find_by_name(kmaps, map->type, dso_name); 1018 if (curr_map == NULL) { 1019 u64 start = sym.st_value; 1020 1021 if (kmodule) 1022 start += map->start + shdr.sh_offset; 1023 1024 curr_dso = dso__new(dso_name); 1025 if (curr_dso == NULL) 1026 goto out_elf_end; 1027 curr_dso->kernel = dso->kernel; 1028 curr_dso->long_name = dso->long_name; 1029 curr_dso->long_name_len = dso->long_name_len; 1030 curr_map = map__new2(start, curr_dso, 1031 map->type); 1032 dso__put(curr_dso); 1033 if (curr_map == NULL) { 1034 goto out_elf_end; 1035 } 1036 if (adjust_kernel_syms) { 1037 curr_map->start = shdr.sh_addr + 1038 ref_reloc(kmap); 1039 curr_map->end = curr_map->start + 1040 shdr.sh_size; 1041 curr_map->pgoff = shdr.sh_offset; 1042 } else { 1043 curr_map->map_ip = identity__map_ip; 1044 curr_map->unmap_ip = identity__map_ip; 1045 } 1046 curr_dso->symtab_type = dso->symtab_type; 1047 map_groups__insert(kmaps, curr_map); 1048 /* 1049 * Add it before we drop the referece to curr_map, 1050 * i.e. while we still are sure to have a reference 1051 * to this DSO via curr_map->dso. 1052 */ 1053 dsos__add(&map->groups->machine->dsos, curr_dso); 1054 /* kmaps already got it */ 1055 map__put(curr_map); 1056 dso__set_loaded(curr_dso, map->type); 1057 } else 1058 curr_dso = curr_map->dso; 1059 1060 goto new_symbol; 1061 } 1062 1063 if ((used_opd && runtime_ss->adjust_symbols) 1064 || (!used_opd && syms_ss->adjust_symbols)) { 1065 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " 1066 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, 1067 (u64)sym.st_value, (u64)shdr.sh_addr, 1068 (u64)shdr.sh_offset); 1069 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1070 } 1071 new_symbol: 1072 /* 1073 * We need to figure out if the object was created from C++ sources 1074 * DWARF DW_compile_unit has this, but we don't always have access 1075 * to it... 1076 */ 1077 if (want_demangle(dso->kernel || kmodule)) { 1078 int demangle_flags = DMGL_NO_OPTS; 1079 if (verbose) 1080 demangle_flags = DMGL_PARAMS | DMGL_ANSI; 1081 1082 demangled = bfd_demangle(NULL, elf_name, demangle_flags); 1083 if (demangled == NULL) 1084 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET); 1085 else if (rust_is_mangled(demangled)) 1086 /* 1087 * Input to Rust demangling is the BFD-demangled 1088 * name which it Rust-demangles in place. 1089 */ 1090 rust_demangle_sym(demangled); 1091 1092 if (demangled != NULL) 1093 elf_name = demangled; 1094 } 1095 f = symbol__new(sym.st_value, sym.st_size, 1096 GELF_ST_BIND(sym.st_info), elf_name); 1097 free(demangled); 1098 if (!f) 1099 goto out_elf_end; 1100 1101 arch__sym_update(f, &sym); 1102 1103 if (filter && filter(curr_map, f)) 1104 symbol__delete(f); 1105 else { 1106 symbols__insert(&curr_dso->symbols[curr_map->type], f); 1107 nr++; 1108 } 1109 } 1110 1111 /* 1112 * For misannotated, zeroed, ASM function sizes. 1113 */ 1114 if (nr > 0) { 1115 if (!symbol_conf.allow_aliases) 1116 symbols__fixup_duplicate(&dso->symbols[map->type]); 1117 symbols__fixup_end(&dso->symbols[map->type]); 1118 if (kmap) { 1119 /* 1120 * We need to fixup this here too because we create new 1121 * maps here, for things like vsyscall sections. 1122 */ 1123 __map_groups__fixup_end(kmaps, map->type); 1124 } 1125 } 1126 err = nr; 1127 out_elf_end: 1128 return err; 1129 } 1130 1131 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) 1132 { 1133 GElf_Phdr phdr; 1134 size_t i, phdrnum; 1135 int err; 1136 u64 sz; 1137 1138 if (elf_getphdrnum(elf, &phdrnum)) 1139 return -1; 1140 1141 for (i = 0; i < phdrnum; i++) { 1142 if (gelf_getphdr(elf, i, &phdr) == NULL) 1143 return -1; 1144 if (phdr.p_type != PT_LOAD) 1145 continue; 1146 if (exe) { 1147 if (!(phdr.p_flags & PF_X)) 1148 continue; 1149 } else { 1150 if (!(phdr.p_flags & PF_R)) 1151 continue; 1152 } 1153 sz = min(phdr.p_memsz, phdr.p_filesz); 1154 if (!sz) 1155 continue; 1156 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); 1157 if (err) 1158 return err; 1159 } 1160 return 0; 1161 } 1162 1163 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, 1164 bool *is_64_bit) 1165 { 1166 int err; 1167 Elf *elf; 1168 1169 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1170 if (elf == NULL) 1171 return -1; 1172 1173 if (is_64_bit) 1174 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 1175 1176 err = elf_read_maps(elf, exe, mapfn, data); 1177 1178 elf_end(elf); 1179 return err; 1180 } 1181 1182 enum dso_type dso__type_fd(int fd) 1183 { 1184 enum dso_type dso_type = DSO__TYPE_UNKNOWN; 1185 GElf_Ehdr ehdr; 1186 Elf_Kind ek; 1187 Elf *elf; 1188 1189 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1190 if (elf == NULL) 1191 goto out; 1192 1193 ek = elf_kind(elf); 1194 if (ek != ELF_K_ELF) 1195 goto out_end; 1196 1197 if (gelf_getclass(elf) == ELFCLASS64) { 1198 dso_type = DSO__TYPE_64BIT; 1199 goto out_end; 1200 } 1201 1202 if (gelf_getehdr(elf, &ehdr) == NULL) 1203 goto out_end; 1204 1205 if (ehdr.e_machine == EM_X86_64) 1206 dso_type = DSO__TYPE_X32BIT; 1207 else 1208 dso_type = DSO__TYPE_32BIT; 1209 out_end: 1210 elf_end(elf); 1211 out: 1212 return dso_type; 1213 } 1214 1215 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len) 1216 { 1217 ssize_t r; 1218 size_t n; 1219 int err = -1; 1220 char *buf = malloc(page_size); 1221 1222 if (buf == NULL) 1223 return -1; 1224 1225 if (lseek(to, to_offs, SEEK_SET) != to_offs) 1226 goto out; 1227 1228 if (lseek(from, from_offs, SEEK_SET) != from_offs) 1229 goto out; 1230 1231 while (len) { 1232 n = page_size; 1233 if (len < n) 1234 n = len; 1235 /* Use read because mmap won't work on proc files */ 1236 r = read(from, buf, n); 1237 if (r < 0) 1238 goto out; 1239 if (!r) 1240 break; 1241 n = r; 1242 r = write(to, buf, n); 1243 if (r < 0) 1244 goto out; 1245 if ((size_t)r != n) 1246 goto out; 1247 len -= n; 1248 } 1249 1250 err = 0; 1251 out: 1252 free(buf); 1253 return err; 1254 } 1255 1256 struct kcore { 1257 int fd; 1258 int elfclass; 1259 Elf *elf; 1260 GElf_Ehdr ehdr; 1261 }; 1262 1263 static int kcore__open(struct kcore *kcore, const char *filename) 1264 { 1265 GElf_Ehdr *ehdr; 1266 1267 kcore->fd = open(filename, O_RDONLY); 1268 if (kcore->fd == -1) 1269 return -1; 1270 1271 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL); 1272 if (!kcore->elf) 1273 goto out_close; 1274 1275 kcore->elfclass = gelf_getclass(kcore->elf); 1276 if (kcore->elfclass == ELFCLASSNONE) 1277 goto out_end; 1278 1279 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1280 if (!ehdr) 1281 goto out_end; 1282 1283 return 0; 1284 1285 out_end: 1286 elf_end(kcore->elf); 1287 out_close: 1288 close(kcore->fd); 1289 return -1; 1290 } 1291 1292 static int kcore__init(struct kcore *kcore, char *filename, int elfclass, 1293 bool temp) 1294 { 1295 kcore->elfclass = elfclass; 1296 1297 if (temp) 1298 kcore->fd = mkstemp(filename); 1299 else 1300 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400); 1301 if (kcore->fd == -1) 1302 return -1; 1303 1304 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL); 1305 if (!kcore->elf) 1306 goto out_close; 1307 1308 if (!gelf_newehdr(kcore->elf, elfclass)) 1309 goto out_end; 1310 1311 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); 1312 1313 return 0; 1314 1315 out_end: 1316 elf_end(kcore->elf); 1317 out_close: 1318 close(kcore->fd); 1319 unlink(filename); 1320 return -1; 1321 } 1322 1323 static void kcore__close(struct kcore *kcore) 1324 { 1325 elf_end(kcore->elf); 1326 close(kcore->fd); 1327 } 1328 1329 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) 1330 { 1331 GElf_Ehdr *ehdr = &to->ehdr; 1332 GElf_Ehdr *kehdr = &from->ehdr; 1333 1334 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT); 1335 ehdr->e_type = kehdr->e_type; 1336 ehdr->e_machine = kehdr->e_machine; 1337 ehdr->e_version = kehdr->e_version; 1338 ehdr->e_entry = 0; 1339 ehdr->e_shoff = 0; 1340 ehdr->e_flags = kehdr->e_flags; 1341 ehdr->e_phnum = count; 1342 ehdr->e_shentsize = 0; 1343 ehdr->e_shnum = 0; 1344 ehdr->e_shstrndx = 0; 1345 1346 if (from->elfclass == ELFCLASS32) { 1347 ehdr->e_phoff = sizeof(Elf32_Ehdr); 1348 ehdr->e_ehsize = sizeof(Elf32_Ehdr); 1349 ehdr->e_phentsize = sizeof(Elf32_Phdr); 1350 } else { 1351 ehdr->e_phoff = sizeof(Elf64_Ehdr); 1352 ehdr->e_ehsize = sizeof(Elf64_Ehdr); 1353 ehdr->e_phentsize = sizeof(Elf64_Phdr); 1354 } 1355 1356 if (!gelf_update_ehdr(to->elf, ehdr)) 1357 return -1; 1358 1359 if (!gelf_newphdr(to->elf, count)) 1360 return -1; 1361 1362 return 0; 1363 } 1364 1365 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, 1366 u64 addr, u64 len) 1367 { 1368 GElf_Phdr phdr = { 1369 .p_type = PT_LOAD, 1370 .p_flags = PF_R | PF_W | PF_X, 1371 .p_offset = offset, 1372 .p_vaddr = addr, 1373 .p_paddr = 0, 1374 .p_filesz = len, 1375 .p_memsz = len, 1376 .p_align = page_size, 1377 }; 1378 1379 if (!gelf_update_phdr(kcore->elf, idx, &phdr)) 1380 return -1; 1381 1382 return 0; 1383 } 1384 1385 static off_t kcore__write(struct kcore *kcore) 1386 { 1387 return elf_update(kcore->elf, ELF_C_WRITE); 1388 } 1389 1390 struct phdr_data { 1391 off_t offset; 1392 u64 addr; 1393 u64 len; 1394 }; 1395 1396 struct kcore_copy_info { 1397 u64 stext; 1398 u64 etext; 1399 u64 first_symbol; 1400 u64 last_symbol; 1401 u64 first_module; 1402 u64 last_module_symbol; 1403 struct phdr_data kernel_map; 1404 struct phdr_data modules_map; 1405 }; 1406 1407 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, 1408 u64 start) 1409 { 1410 struct kcore_copy_info *kci = arg; 1411 1412 if (!symbol_type__is_a(type, MAP__FUNCTION)) 1413 return 0; 1414 1415 if (strchr(name, '[')) { 1416 if (start > kci->last_module_symbol) 1417 kci->last_module_symbol = start; 1418 return 0; 1419 } 1420 1421 if (!kci->first_symbol || start < kci->first_symbol) 1422 kci->first_symbol = start; 1423 1424 if (!kci->last_symbol || start > kci->last_symbol) 1425 kci->last_symbol = start; 1426 1427 if (!strcmp(name, "_stext")) { 1428 kci->stext = start; 1429 return 0; 1430 } 1431 1432 if (!strcmp(name, "_etext")) { 1433 kci->etext = start; 1434 return 0; 1435 } 1436 1437 return 0; 1438 } 1439 1440 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci, 1441 const char *dir) 1442 { 1443 char kallsyms_filename[PATH_MAX]; 1444 1445 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir); 1446 1447 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms")) 1448 return -1; 1449 1450 if (kallsyms__parse(kallsyms_filename, kci, 1451 kcore_copy__process_kallsyms) < 0) 1452 return -1; 1453 1454 return 0; 1455 } 1456 1457 static int kcore_copy__process_modules(void *arg, 1458 const char *name __maybe_unused, 1459 u64 start) 1460 { 1461 struct kcore_copy_info *kci = arg; 1462 1463 if (!kci->first_module || start < kci->first_module) 1464 kci->first_module = start; 1465 1466 return 0; 1467 } 1468 1469 static int kcore_copy__parse_modules(struct kcore_copy_info *kci, 1470 const char *dir) 1471 { 1472 char modules_filename[PATH_MAX]; 1473 1474 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir); 1475 1476 if (symbol__restricted_filename(modules_filename, "/proc/modules")) 1477 return -1; 1478 1479 if (modules__parse(modules_filename, kci, 1480 kcore_copy__process_modules) < 0) 1481 return -1; 1482 1483 return 0; 1484 } 1485 1486 static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff, 1487 u64 s, u64 e) 1488 { 1489 if (p->addr || s < start || s >= end) 1490 return; 1491 1492 p->addr = s; 1493 p->offset = (s - start) + pgoff; 1494 p->len = e < end ? e - s : end - s; 1495 } 1496 1497 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) 1498 { 1499 struct kcore_copy_info *kci = data; 1500 u64 end = start + len; 1501 1502 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext, 1503 kci->etext); 1504 1505 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module, 1506 kci->last_module_symbol); 1507 1508 return 0; 1509 } 1510 1511 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) 1512 { 1513 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) 1514 return -1; 1515 1516 return 0; 1517 } 1518 1519 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, 1520 Elf *elf) 1521 { 1522 if (kcore_copy__parse_kallsyms(kci, dir)) 1523 return -1; 1524 1525 if (kcore_copy__parse_modules(kci, dir)) 1526 return -1; 1527 1528 if (kci->stext) 1529 kci->stext = round_down(kci->stext, page_size); 1530 else 1531 kci->stext = round_down(kci->first_symbol, page_size); 1532 1533 if (kci->etext) { 1534 kci->etext = round_up(kci->etext, page_size); 1535 } else if (kci->last_symbol) { 1536 kci->etext = round_up(kci->last_symbol, page_size); 1537 kci->etext += page_size; 1538 } 1539 1540 kci->first_module = round_down(kci->first_module, page_size); 1541 1542 if (kci->last_module_symbol) { 1543 kci->last_module_symbol = round_up(kci->last_module_symbol, 1544 page_size); 1545 kci->last_module_symbol += page_size; 1546 } 1547 1548 if (!kci->stext || !kci->etext) 1549 return -1; 1550 1551 if (kci->first_module && !kci->last_module_symbol) 1552 return -1; 1553 1554 return kcore_copy__read_maps(kci, elf); 1555 } 1556 1557 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, 1558 const char *name) 1559 { 1560 char from_filename[PATH_MAX]; 1561 char to_filename[PATH_MAX]; 1562 1563 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1564 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1565 1566 return copyfile_mode(from_filename, to_filename, 0400); 1567 } 1568 1569 static int kcore_copy__unlink(const char *dir, const char *name) 1570 { 1571 char filename[PATH_MAX]; 1572 1573 scnprintf(filename, PATH_MAX, "%s/%s", dir, name); 1574 1575 return unlink(filename); 1576 } 1577 1578 static int kcore_copy__compare_fds(int from, int to) 1579 { 1580 char *buf_from; 1581 char *buf_to; 1582 ssize_t ret; 1583 size_t len; 1584 int err = -1; 1585 1586 buf_from = malloc(page_size); 1587 buf_to = malloc(page_size); 1588 if (!buf_from || !buf_to) 1589 goto out; 1590 1591 while (1) { 1592 /* Use read because mmap won't work on proc files */ 1593 ret = read(from, buf_from, page_size); 1594 if (ret < 0) 1595 goto out; 1596 1597 if (!ret) 1598 break; 1599 1600 len = ret; 1601 1602 if (readn(to, buf_to, len) != (int)len) 1603 goto out; 1604 1605 if (memcmp(buf_from, buf_to, len)) 1606 goto out; 1607 } 1608 1609 err = 0; 1610 out: 1611 free(buf_to); 1612 free(buf_from); 1613 return err; 1614 } 1615 1616 static int kcore_copy__compare_files(const char *from_filename, 1617 const char *to_filename) 1618 { 1619 int from, to, err = -1; 1620 1621 from = open(from_filename, O_RDONLY); 1622 if (from < 0) 1623 return -1; 1624 1625 to = open(to_filename, O_RDONLY); 1626 if (to < 0) 1627 goto out_close_from; 1628 1629 err = kcore_copy__compare_fds(from, to); 1630 1631 close(to); 1632 out_close_from: 1633 close(from); 1634 return err; 1635 } 1636 1637 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, 1638 const char *name) 1639 { 1640 char from_filename[PATH_MAX]; 1641 char to_filename[PATH_MAX]; 1642 1643 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1644 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1645 1646 return kcore_copy__compare_files(from_filename, to_filename); 1647 } 1648 1649 /** 1650 * kcore_copy - copy kallsyms, modules and kcore from one directory to another. 1651 * @from_dir: from directory 1652 * @to_dir: to directory 1653 * 1654 * This function copies kallsyms, modules and kcore files from one directory to 1655 * another. kallsyms and modules are copied entirely. Only code segments are 1656 * copied from kcore. It is assumed that two segments suffice: one for the 1657 * kernel proper and one for all the modules. The code segments are determined 1658 * from kallsyms and modules files. The kernel map starts at _stext or the 1659 * lowest function symbol, and ends at _etext or the highest function symbol. 1660 * The module map starts at the lowest module address and ends at the highest 1661 * module symbol. Start addresses are rounded down to the nearest page. End 1662 * addresses are rounded up to the nearest page. An extra page is added to the 1663 * highest kernel symbol and highest module symbol to, hopefully, encompass that 1664 * symbol too. Because it contains only code sections, the resulting kcore is 1665 * unusual. One significant peculiarity is that the mapping (start -> pgoff) 1666 * is not the same for the kernel map and the modules map. That happens because 1667 * the data is copied adjacently whereas the original kcore has gaps. Finally, 1668 * kallsyms and modules files are compared with their copies to check that 1669 * modules have not been loaded or unloaded while the copies were taking place. 1670 * 1671 * Return: %0 on success, %-1 on failure. 1672 */ 1673 int kcore_copy(const char *from_dir, const char *to_dir) 1674 { 1675 struct kcore kcore; 1676 struct kcore extract; 1677 size_t count = 2; 1678 int idx = 0, err = -1; 1679 off_t offset = page_size, sz, modules_offset = 0; 1680 struct kcore_copy_info kci = { .stext = 0, }; 1681 char kcore_filename[PATH_MAX]; 1682 char extract_filename[PATH_MAX]; 1683 1684 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) 1685 return -1; 1686 1687 if (kcore_copy__copy_file(from_dir, to_dir, "modules")) 1688 goto out_unlink_kallsyms; 1689 1690 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir); 1691 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir); 1692 1693 if (kcore__open(&kcore, kcore_filename)) 1694 goto out_unlink_modules; 1695 1696 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf)) 1697 goto out_kcore_close; 1698 1699 if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) 1700 goto out_kcore_close; 1701 1702 if (!kci.modules_map.addr) 1703 count -= 1; 1704 1705 if (kcore__copy_hdr(&kcore, &extract, count)) 1706 goto out_extract_close; 1707 1708 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, 1709 kci.kernel_map.len)) 1710 goto out_extract_close; 1711 1712 if (kci.modules_map.addr) { 1713 modules_offset = offset + kci.kernel_map.len; 1714 if (kcore__add_phdr(&extract, idx, modules_offset, 1715 kci.modules_map.addr, kci.modules_map.len)) 1716 goto out_extract_close; 1717 } 1718 1719 sz = kcore__write(&extract); 1720 if (sz < 0 || sz > offset) 1721 goto out_extract_close; 1722 1723 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset, 1724 kci.kernel_map.len)) 1725 goto out_extract_close; 1726 1727 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset, 1728 extract.fd, modules_offset, 1729 kci.modules_map.len)) 1730 goto out_extract_close; 1731 1732 if (kcore_copy__compare_file(from_dir, to_dir, "modules")) 1733 goto out_extract_close; 1734 1735 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) 1736 goto out_extract_close; 1737 1738 err = 0; 1739 1740 out_extract_close: 1741 kcore__close(&extract); 1742 if (err) 1743 unlink(extract_filename); 1744 out_kcore_close: 1745 kcore__close(&kcore); 1746 out_unlink_modules: 1747 if (err) 1748 kcore_copy__unlink(to_dir, "modules"); 1749 out_unlink_kallsyms: 1750 if (err) 1751 kcore_copy__unlink(to_dir, "kallsyms"); 1752 1753 return err; 1754 } 1755 1756 int kcore_extract__create(struct kcore_extract *kce) 1757 { 1758 struct kcore kcore; 1759 struct kcore extract; 1760 size_t count = 1; 1761 int idx = 0, err = -1; 1762 off_t offset = page_size, sz; 1763 1764 if (kcore__open(&kcore, kce->kcore_filename)) 1765 return -1; 1766 1767 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT); 1768 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true)) 1769 goto out_kcore_close; 1770 1771 if (kcore__copy_hdr(&kcore, &extract, count)) 1772 goto out_extract_close; 1773 1774 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len)) 1775 goto out_extract_close; 1776 1777 sz = kcore__write(&extract); 1778 if (sz < 0 || sz > offset) 1779 goto out_extract_close; 1780 1781 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len)) 1782 goto out_extract_close; 1783 1784 err = 0; 1785 1786 out_extract_close: 1787 kcore__close(&extract); 1788 if (err) 1789 unlink(kce->extract_filename); 1790 out_kcore_close: 1791 kcore__close(&kcore); 1792 1793 return err; 1794 } 1795 1796 void kcore_extract__delete(struct kcore_extract *kce) 1797 { 1798 unlink(kce->extract_filename); 1799 } 1800 1801 #ifdef HAVE_GELF_GETNOTE_SUPPORT 1802 /** 1803 * populate_sdt_note : Parse raw data and identify SDT note 1804 * @elf: elf of the opened file 1805 * @data: raw data of a section with description offset applied 1806 * @len: note description size 1807 * @type: type of the note 1808 * @sdt_notes: List to add the SDT note 1809 * 1810 * Responsible for parsing the @data in section .note.stapsdt in @elf and 1811 * if its an SDT note, it appends to @sdt_notes list. 1812 */ 1813 static int populate_sdt_note(Elf **elf, const char *data, size_t len, 1814 struct list_head *sdt_notes) 1815 { 1816 const char *provider, *name; 1817 struct sdt_note *tmp = NULL; 1818 GElf_Ehdr ehdr; 1819 GElf_Addr base_off = 0; 1820 GElf_Shdr shdr; 1821 int ret = -EINVAL; 1822 1823 union { 1824 Elf64_Addr a64[NR_ADDR]; 1825 Elf32_Addr a32[NR_ADDR]; 1826 } buf; 1827 1828 Elf_Data dst = { 1829 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT, 1830 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT), 1831 .d_off = 0, .d_align = 0 1832 }; 1833 Elf_Data src = { 1834 .d_buf = (void *) data, .d_type = ELF_T_ADDR, 1835 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0, 1836 .d_align = 0 1837 }; 1838 1839 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note)); 1840 if (!tmp) { 1841 ret = -ENOMEM; 1842 goto out_err; 1843 } 1844 1845 INIT_LIST_HEAD(&tmp->note_list); 1846 1847 if (len < dst.d_size + 3) 1848 goto out_free_note; 1849 1850 /* Translation from file representation to memory representation */ 1851 if (gelf_xlatetom(*elf, &dst, &src, 1852 elf_getident(*elf, NULL)[EI_DATA]) == NULL) { 1853 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1)); 1854 goto out_free_note; 1855 } 1856 1857 /* Populate the fields of sdt_note */ 1858 provider = data + dst.d_size; 1859 1860 name = (const char *)memchr(provider, '\0', data + len - provider); 1861 if (name++ == NULL) 1862 goto out_free_note; 1863 1864 tmp->provider = strdup(provider); 1865 if (!tmp->provider) { 1866 ret = -ENOMEM; 1867 goto out_free_note; 1868 } 1869 tmp->name = strdup(name); 1870 if (!tmp->name) { 1871 ret = -ENOMEM; 1872 goto out_free_prov; 1873 } 1874 1875 if (gelf_getclass(*elf) == ELFCLASS32) { 1876 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr)); 1877 tmp->bit32 = true; 1878 } else { 1879 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr)); 1880 tmp->bit32 = false; 1881 } 1882 1883 if (!gelf_getehdr(*elf, &ehdr)) { 1884 pr_debug("%s : cannot get elf header.\n", __func__); 1885 ret = -EBADF; 1886 goto out_free_name; 1887 } 1888 1889 /* Adjust the prelink effect : 1890 * Find out the .stapsdt.base section. 1891 * This scn will help us to handle prelinking (if present). 1892 * Compare the retrieved file offset of the base section with the 1893 * base address in the description of the SDT note. If its different, 1894 * then accordingly, adjust the note location. 1895 */ 1896 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL)) { 1897 base_off = shdr.sh_offset; 1898 if (base_off) { 1899 if (tmp->bit32) 1900 tmp->addr.a32[0] = tmp->addr.a32[0] + base_off - 1901 tmp->addr.a32[1]; 1902 else 1903 tmp->addr.a64[0] = tmp->addr.a64[0] + base_off - 1904 tmp->addr.a64[1]; 1905 } 1906 } 1907 1908 list_add_tail(&tmp->note_list, sdt_notes); 1909 return 0; 1910 1911 out_free_name: 1912 free(tmp->name); 1913 out_free_prov: 1914 free(tmp->provider); 1915 out_free_note: 1916 free(tmp); 1917 out_err: 1918 return ret; 1919 } 1920 1921 /** 1922 * construct_sdt_notes_list : constructs a list of SDT notes 1923 * @elf : elf to look into 1924 * @sdt_notes : empty list_head 1925 * 1926 * Scans the sections in 'elf' for the section 1927 * .note.stapsdt. It, then calls populate_sdt_note to find 1928 * out the SDT events and populates the 'sdt_notes'. 1929 */ 1930 static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes) 1931 { 1932 GElf_Ehdr ehdr; 1933 Elf_Scn *scn = NULL; 1934 Elf_Data *data; 1935 GElf_Shdr shdr; 1936 size_t shstrndx, next; 1937 GElf_Nhdr nhdr; 1938 size_t name_off, desc_off, offset; 1939 int ret = 0; 1940 1941 if (gelf_getehdr(elf, &ehdr) == NULL) { 1942 ret = -EBADF; 1943 goto out_ret; 1944 } 1945 if (elf_getshdrstrndx(elf, &shstrndx) != 0) { 1946 ret = -EBADF; 1947 goto out_ret; 1948 } 1949 1950 /* Look for the required section */ 1951 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL); 1952 if (!scn) { 1953 ret = -ENOENT; 1954 goto out_ret; 1955 } 1956 1957 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) { 1958 ret = -ENOENT; 1959 goto out_ret; 1960 } 1961 1962 data = elf_getdata(scn, NULL); 1963 1964 /* Get the SDT notes */ 1965 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off, 1966 &desc_off)) > 0; offset = next) { 1967 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) && 1968 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME, 1969 sizeof(SDT_NOTE_NAME))) { 1970 /* Check the type of the note */ 1971 if (nhdr.n_type != SDT_NOTE_TYPE) 1972 goto out_ret; 1973 1974 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off), 1975 nhdr.n_descsz, sdt_notes); 1976 if (ret < 0) 1977 goto out_ret; 1978 } 1979 } 1980 if (list_empty(sdt_notes)) 1981 ret = -ENOENT; 1982 1983 out_ret: 1984 return ret; 1985 } 1986 1987 /** 1988 * get_sdt_note_list : Wrapper to construct a list of sdt notes 1989 * @head : empty list_head 1990 * @target : file to find SDT notes from 1991 * 1992 * This opens the file, initializes 1993 * the ELF and then calls construct_sdt_notes_list. 1994 */ 1995 int get_sdt_note_list(struct list_head *head, const char *target) 1996 { 1997 Elf *elf; 1998 int fd, ret; 1999 2000 fd = open(target, O_RDONLY); 2001 if (fd < 0) 2002 return -EBADF; 2003 2004 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 2005 if (!elf) { 2006 ret = -EBADF; 2007 goto out_close; 2008 } 2009 ret = construct_sdt_notes_list(elf, head); 2010 elf_end(elf); 2011 out_close: 2012 close(fd); 2013 return ret; 2014 } 2015 2016 /** 2017 * cleanup_sdt_note_list : free the sdt notes' list 2018 * @sdt_notes: sdt notes' list 2019 * 2020 * Free up the SDT notes in @sdt_notes. 2021 * Returns the number of SDT notes free'd. 2022 */ 2023 int cleanup_sdt_note_list(struct list_head *sdt_notes) 2024 { 2025 struct sdt_note *tmp, *pos; 2026 int nr_free = 0; 2027 2028 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) { 2029 list_del(&pos->note_list); 2030 free(pos->name); 2031 free(pos->provider); 2032 free(pos); 2033 nr_free++; 2034 } 2035 return nr_free; 2036 } 2037 2038 /** 2039 * sdt_notes__get_count: Counts the number of sdt events 2040 * @start: list_head to sdt_notes list 2041 * 2042 * Returns the number of SDT notes in a list 2043 */ 2044 int sdt_notes__get_count(struct list_head *start) 2045 { 2046 struct sdt_note *sdt_ptr; 2047 int count = 0; 2048 2049 list_for_each_entry(sdt_ptr, start, note_list) 2050 count++; 2051 return count; 2052 } 2053 #endif 2054 2055 void symbol__elf_init(void) 2056 { 2057 elf_version(EV_CURRENT); 2058 } 2059