1 #include <fcntl.h> 2 #include <stdio.h> 3 #include <errno.h> 4 #include <string.h> 5 #include <unistd.h> 6 #include <inttypes.h> 7 8 #include "symbol.h" 9 #include "machine.h" 10 #include "vdso.h" 11 #include <symbol/kallsyms.h> 12 #include "debug.h" 13 14 #ifndef EM_AARCH64 15 #define EM_AARCH64 183 /* ARM 64 bit */ 16 #endif 17 18 19 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT 20 extern char *cplus_demangle(const char *, int); 21 22 static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) 23 { 24 return cplus_demangle(c, i); 25 } 26 #else 27 #ifdef NO_DEMANGLE 28 static inline char *bfd_demangle(void __maybe_unused *v, 29 const char __maybe_unused *c, 30 int __maybe_unused i) 31 { 32 return NULL; 33 } 34 #else 35 #define PACKAGE 'perf' 36 #include <bfd.h> 37 #endif 38 #endif 39 40 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT 41 static int elf_getphdrnum(Elf *elf, size_t *dst) 42 { 43 GElf_Ehdr gehdr; 44 GElf_Ehdr *ehdr; 45 46 ehdr = gelf_getehdr(elf, &gehdr); 47 if (!ehdr) 48 return -1; 49 50 *dst = ehdr->e_phnum; 51 52 return 0; 53 } 54 #endif 55 56 #ifndef NT_GNU_BUILD_ID 57 #define NT_GNU_BUILD_ID 3 58 #endif 59 60 /** 61 * elf_symtab__for_each_symbol - iterate thru all the symbols 62 * 63 * @syms: struct elf_symtab instance to iterate 64 * @idx: uint32_t idx 65 * @sym: GElf_Sym iterator 66 */ 67 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ 68 for (idx = 0, gelf_getsym(syms, idx, &sym);\ 69 idx < nr_syms; \ 70 idx++, gelf_getsym(syms, idx, &sym)) 71 72 static inline uint8_t elf_sym__type(const GElf_Sym *sym) 73 { 74 return GELF_ST_TYPE(sym->st_info); 75 } 76 77 static inline int elf_sym__is_function(const GElf_Sym *sym) 78 { 79 return (elf_sym__type(sym) == STT_FUNC || 80 elf_sym__type(sym) == STT_GNU_IFUNC) && 81 sym->st_name != 0 && 82 sym->st_shndx != SHN_UNDEF; 83 } 84 85 static inline bool elf_sym__is_object(const GElf_Sym *sym) 86 { 87 return elf_sym__type(sym) == STT_OBJECT && 88 sym->st_name != 0 && 89 sym->st_shndx != SHN_UNDEF; 90 } 91 92 static inline int elf_sym__is_label(const GElf_Sym *sym) 93 { 94 return elf_sym__type(sym) == STT_NOTYPE && 95 sym->st_name != 0 && 96 sym->st_shndx != SHN_UNDEF && 97 sym->st_shndx != SHN_ABS; 98 } 99 100 static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) 101 { 102 switch (type) { 103 case MAP__FUNCTION: 104 return elf_sym__is_function(sym); 105 case MAP__VARIABLE: 106 return elf_sym__is_object(sym); 107 default: 108 return false; 109 } 110 } 111 112 static inline const char *elf_sym__name(const GElf_Sym *sym, 113 const Elf_Data *symstrs) 114 { 115 return symstrs->d_buf + sym->st_name; 116 } 117 118 static inline const char *elf_sec__name(const GElf_Shdr *shdr, 119 const Elf_Data *secstrs) 120 { 121 return secstrs->d_buf + shdr->sh_name; 122 } 123 124 static inline int elf_sec__is_text(const GElf_Shdr *shdr, 125 const Elf_Data *secstrs) 126 { 127 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; 128 } 129 130 static inline bool elf_sec__is_data(const GElf_Shdr *shdr, 131 const Elf_Data *secstrs) 132 { 133 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; 134 } 135 136 static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, 137 enum map_type type) 138 { 139 switch (type) { 140 case MAP__FUNCTION: 141 return elf_sec__is_text(shdr, secstrs); 142 case MAP__VARIABLE: 143 return elf_sec__is_data(shdr, secstrs); 144 default: 145 return false; 146 } 147 } 148 149 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) 150 { 151 Elf_Scn *sec = NULL; 152 GElf_Shdr shdr; 153 size_t cnt = 1; 154 155 while ((sec = elf_nextscn(elf, sec)) != NULL) { 156 gelf_getshdr(sec, &shdr); 157 158 if ((addr >= shdr.sh_addr) && 159 (addr < (shdr.sh_addr + shdr.sh_size))) 160 return cnt; 161 162 ++cnt; 163 } 164 165 return -1; 166 } 167 168 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, 169 GElf_Shdr *shp, const char *name, size_t *idx) 170 { 171 Elf_Scn *sec = NULL; 172 size_t cnt = 1; 173 174 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 175 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) 176 return NULL; 177 178 while ((sec = elf_nextscn(elf, sec)) != NULL) { 179 char *str; 180 181 gelf_getshdr(sec, shp); 182 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); 183 if (str && !strcmp(name, str)) { 184 if (idx) 185 *idx = cnt; 186 return sec; 187 } 188 ++cnt; 189 } 190 191 return NULL; 192 } 193 194 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ 195 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ 196 idx < nr_entries; \ 197 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) 198 199 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ 200 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ 201 idx < nr_entries; \ 202 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) 203 204 /* 205 * We need to check if we have a .dynsym, so that we can handle the 206 * .plt, synthesizing its symbols, that aren't on the symtabs (be it 207 * .dynsym or .symtab). 208 * And always look at the original dso, not at debuginfo packages, that 209 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 210 */ 211 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map, 212 symbol_filter_t filter) 213 { 214 uint32_t nr_rel_entries, idx; 215 GElf_Sym sym; 216 u64 plt_offset; 217 GElf_Shdr shdr_plt; 218 struct symbol *f; 219 GElf_Shdr shdr_rel_plt, shdr_dynsym; 220 Elf_Data *reldata, *syms, *symstrs; 221 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; 222 size_t dynsym_idx; 223 GElf_Ehdr ehdr; 224 char sympltname[1024]; 225 Elf *elf; 226 int nr = 0, symidx, err = 0; 227 228 if (!ss->dynsym) 229 return 0; 230 231 elf = ss->elf; 232 ehdr = ss->ehdr; 233 234 scn_dynsym = ss->dynsym; 235 shdr_dynsym = ss->dynshdr; 236 dynsym_idx = ss->dynsym_idx; 237 238 if (scn_dynsym == NULL) 239 goto out_elf_end; 240 241 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 242 ".rela.plt", NULL); 243 if (scn_plt_rel == NULL) { 244 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 245 ".rel.plt", NULL); 246 if (scn_plt_rel == NULL) 247 goto out_elf_end; 248 } 249 250 err = -1; 251 252 if (shdr_rel_plt.sh_link != dynsym_idx) 253 goto out_elf_end; 254 255 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) 256 goto out_elf_end; 257 258 /* 259 * Fetch the relocation section to find the idxes to the GOT 260 * and the symbols in the .dynsym they refer to. 261 */ 262 reldata = elf_getdata(scn_plt_rel, NULL); 263 if (reldata == NULL) 264 goto out_elf_end; 265 266 syms = elf_getdata(scn_dynsym, NULL); 267 if (syms == NULL) 268 goto out_elf_end; 269 270 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); 271 if (scn_symstrs == NULL) 272 goto out_elf_end; 273 274 symstrs = elf_getdata(scn_symstrs, NULL); 275 if (symstrs == NULL) 276 goto out_elf_end; 277 278 if (symstrs->d_size == 0) 279 goto out_elf_end; 280 281 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; 282 plt_offset = shdr_plt.sh_offset; 283 284 if (shdr_rel_plt.sh_type == SHT_RELA) { 285 GElf_Rela pos_mem, *pos; 286 287 elf_section__for_each_rela(reldata, pos, pos_mem, idx, 288 nr_rel_entries) { 289 symidx = GELF_R_SYM(pos->r_info); 290 plt_offset += shdr_plt.sh_entsize; 291 gelf_getsym(syms, symidx, &sym); 292 snprintf(sympltname, sizeof(sympltname), 293 "%s@plt", elf_sym__name(&sym, symstrs)); 294 295 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 296 STB_GLOBAL, sympltname); 297 if (!f) 298 goto out_elf_end; 299 300 if (filter && filter(map, f)) 301 symbol__delete(f); 302 else { 303 symbols__insert(&dso->symbols[map->type], f); 304 ++nr; 305 } 306 } 307 } else if (shdr_rel_plt.sh_type == SHT_REL) { 308 GElf_Rel pos_mem, *pos; 309 elf_section__for_each_rel(reldata, pos, pos_mem, idx, 310 nr_rel_entries) { 311 symidx = GELF_R_SYM(pos->r_info); 312 plt_offset += shdr_plt.sh_entsize; 313 gelf_getsym(syms, symidx, &sym); 314 snprintf(sympltname, sizeof(sympltname), 315 "%s@plt", elf_sym__name(&sym, symstrs)); 316 317 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 318 STB_GLOBAL, sympltname); 319 if (!f) 320 goto out_elf_end; 321 322 if (filter && filter(map, f)) 323 symbol__delete(f); 324 else { 325 symbols__insert(&dso->symbols[map->type], f); 326 ++nr; 327 } 328 } 329 } 330 331 err = 0; 332 out_elf_end: 333 if (err == 0) 334 return nr; 335 pr_debug("%s: problems reading %s PLT info.\n", 336 __func__, dso->long_name); 337 return 0; 338 } 339 340 /* 341 * Align offset to 4 bytes as needed for note name and descriptor data. 342 */ 343 #define NOTE_ALIGN(n) (((n) + 3) & -4U) 344 345 static int elf_read_build_id(Elf *elf, void *bf, size_t size) 346 { 347 int err = -1; 348 GElf_Ehdr ehdr; 349 GElf_Shdr shdr; 350 Elf_Data *data; 351 Elf_Scn *sec; 352 Elf_Kind ek; 353 void *ptr; 354 355 if (size < BUILD_ID_SIZE) 356 goto out; 357 358 ek = elf_kind(elf); 359 if (ek != ELF_K_ELF) 360 goto out; 361 362 if (gelf_getehdr(elf, &ehdr) == NULL) { 363 pr_err("%s: cannot get elf header.\n", __func__); 364 goto out; 365 } 366 367 /* 368 * Check following sections for notes: 369 * '.note.gnu.build-id' 370 * '.notes' 371 * '.note' (VDSO specific) 372 */ 373 do { 374 sec = elf_section_by_name(elf, &ehdr, &shdr, 375 ".note.gnu.build-id", NULL); 376 if (sec) 377 break; 378 379 sec = elf_section_by_name(elf, &ehdr, &shdr, 380 ".notes", NULL); 381 if (sec) 382 break; 383 384 sec = elf_section_by_name(elf, &ehdr, &shdr, 385 ".note", NULL); 386 if (sec) 387 break; 388 389 return err; 390 391 } while (0); 392 393 data = elf_getdata(sec, NULL); 394 if (data == NULL) 395 goto out; 396 397 ptr = data->d_buf; 398 while (ptr < (data->d_buf + data->d_size)) { 399 GElf_Nhdr *nhdr = ptr; 400 size_t namesz = NOTE_ALIGN(nhdr->n_namesz), 401 descsz = NOTE_ALIGN(nhdr->n_descsz); 402 const char *name; 403 404 ptr += sizeof(*nhdr); 405 name = ptr; 406 ptr += namesz; 407 if (nhdr->n_type == NT_GNU_BUILD_ID && 408 nhdr->n_namesz == sizeof("GNU")) { 409 if (memcmp(name, "GNU", sizeof("GNU")) == 0) { 410 size_t sz = min(size, descsz); 411 memcpy(bf, ptr, sz); 412 memset(bf + sz, 0, size - sz); 413 err = descsz; 414 break; 415 } 416 } 417 ptr += descsz; 418 } 419 420 out: 421 return err; 422 } 423 424 int filename__read_build_id(const char *filename, void *bf, size_t size) 425 { 426 int fd, err = -1; 427 Elf *elf; 428 429 if (size < BUILD_ID_SIZE) 430 goto out; 431 432 fd = open(filename, O_RDONLY); 433 if (fd < 0) 434 goto out; 435 436 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 437 if (elf == NULL) { 438 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 439 goto out_close; 440 } 441 442 err = elf_read_build_id(elf, bf, size); 443 444 elf_end(elf); 445 out_close: 446 close(fd); 447 out: 448 return err; 449 } 450 451 int sysfs__read_build_id(const char *filename, void *build_id, size_t size) 452 { 453 int fd, err = -1; 454 455 if (size < BUILD_ID_SIZE) 456 goto out; 457 458 fd = open(filename, O_RDONLY); 459 if (fd < 0) 460 goto out; 461 462 while (1) { 463 char bf[BUFSIZ]; 464 GElf_Nhdr nhdr; 465 size_t namesz, descsz; 466 467 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) 468 break; 469 470 namesz = NOTE_ALIGN(nhdr.n_namesz); 471 descsz = NOTE_ALIGN(nhdr.n_descsz); 472 if (nhdr.n_type == NT_GNU_BUILD_ID && 473 nhdr.n_namesz == sizeof("GNU")) { 474 if (read(fd, bf, namesz) != (ssize_t)namesz) 475 break; 476 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { 477 size_t sz = min(descsz, size); 478 if (read(fd, build_id, sz) == (ssize_t)sz) { 479 memset(build_id + sz, 0, size - sz); 480 err = 0; 481 break; 482 } 483 } else if (read(fd, bf, descsz) != (ssize_t)descsz) 484 break; 485 } else { 486 int n = namesz + descsz; 487 if (read(fd, bf, n) != n) 488 break; 489 } 490 } 491 close(fd); 492 out: 493 return err; 494 } 495 496 int filename__read_debuglink(const char *filename, char *debuglink, 497 size_t size) 498 { 499 int fd, err = -1; 500 Elf *elf; 501 GElf_Ehdr ehdr; 502 GElf_Shdr shdr; 503 Elf_Data *data; 504 Elf_Scn *sec; 505 Elf_Kind ek; 506 507 fd = open(filename, O_RDONLY); 508 if (fd < 0) 509 goto out; 510 511 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 512 if (elf == NULL) { 513 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 514 goto out_close; 515 } 516 517 ek = elf_kind(elf); 518 if (ek != ELF_K_ELF) 519 goto out_elf_end; 520 521 if (gelf_getehdr(elf, &ehdr) == NULL) { 522 pr_err("%s: cannot get elf header.\n", __func__); 523 goto out_elf_end; 524 } 525 526 sec = elf_section_by_name(elf, &ehdr, &shdr, 527 ".gnu_debuglink", NULL); 528 if (sec == NULL) 529 goto out_elf_end; 530 531 data = elf_getdata(sec, NULL); 532 if (data == NULL) 533 goto out_elf_end; 534 535 /* the start of this section is a zero-terminated string */ 536 strncpy(debuglink, data->d_buf, size); 537 538 err = 0; 539 540 out_elf_end: 541 elf_end(elf); 542 out_close: 543 close(fd); 544 out: 545 return err; 546 } 547 548 static int dso__swap_init(struct dso *dso, unsigned char eidata) 549 { 550 static unsigned int const endian = 1; 551 552 dso->needs_swap = DSO_SWAP__NO; 553 554 switch (eidata) { 555 case ELFDATA2LSB: 556 /* We are big endian, DSO is little endian. */ 557 if (*(unsigned char const *)&endian != 1) 558 dso->needs_swap = DSO_SWAP__YES; 559 break; 560 561 case ELFDATA2MSB: 562 /* We are little endian, DSO is big endian. */ 563 if (*(unsigned char const *)&endian != 0) 564 dso->needs_swap = DSO_SWAP__YES; 565 break; 566 567 default: 568 pr_err("unrecognized DSO data encoding %d\n", eidata); 569 return -EINVAL; 570 } 571 572 return 0; 573 } 574 575 static int decompress_kmodule(struct dso *dso, const char *name, 576 enum dso_binary_type type) 577 { 578 int fd; 579 const char *ext = strrchr(name, '.'); 580 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; 581 582 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && 583 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP && 584 type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 585 return -1; 586 587 if (!ext || !is_supported_compression(ext + 1)) { 588 ext = strrchr(dso->name, '.'); 589 if (!ext || !is_supported_compression(ext + 1)) 590 return -1; 591 } 592 593 fd = mkstemp(tmpbuf); 594 if (fd < 0) 595 return -1; 596 597 if (!decompress_to_file(ext + 1, name, fd)) { 598 close(fd); 599 fd = -1; 600 } 601 602 unlink(tmpbuf); 603 604 return fd; 605 } 606 607 bool symsrc__possibly_runtime(struct symsrc *ss) 608 { 609 return ss->dynsym || ss->opdsec; 610 } 611 612 bool symsrc__has_symtab(struct symsrc *ss) 613 { 614 return ss->symtab != NULL; 615 } 616 617 void symsrc__destroy(struct symsrc *ss) 618 { 619 zfree(&ss->name); 620 elf_end(ss->elf); 621 close(ss->fd); 622 } 623 624 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, 625 enum dso_binary_type type) 626 { 627 int err = -1; 628 GElf_Ehdr ehdr; 629 Elf *elf; 630 int fd; 631 632 if (dso__needs_decompress(dso)) 633 fd = decompress_kmodule(dso, name, type); 634 else 635 fd = open(name, O_RDONLY); 636 637 if (fd < 0) 638 return -1; 639 640 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 641 if (elf == NULL) { 642 pr_debug("%s: cannot read %s ELF file.\n", __func__, name); 643 goto out_close; 644 } 645 646 if (gelf_getehdr(elf, &ehdr) == NULL) { 647 pr_debug("%s: cannot get elf header.\n", __func__); 648 goto out_elf_end; 649 } 650 651 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) 652 goto out_elf_end; 653 654 /* Always reject images with a mismatched build-id: */ 655 if (dso->has_build_id) { 656 u8 build_id[BUILD_ID_SIZE]; 657 658 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) 659 goto out_elf_end; 660 661 if (!dso__build_id_equal(dso, build_id)) 662 goto out_elf_end; 663 } 664 665 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 666 667 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", 668 NULL); 669 if (ss->symshdr.sh_type != SHT_SYMTAB) 670 ss->symtab = NULL; 671 672 ss->dynsym_idx = 0; 673 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", 674 &ss->dynsym_idx); 675 if (ss->dynshdr.sh_type != SHT_DYNSYM) 676 ss->dynsym = NULL; 677 678 ss->opdidx = 0; 679 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", 680 &ss->opdidx); 681 if (ss->opdshdr.sh_type != SHT_PROGBITS) 682 ss->opdsec = NULL; 683 684 if (dso->kernel == DSO_TYPE_USER) { 685 GElf_Shdr shdr; 686 ss->adjust_symbols = (ehdr.e_type == ET_EXEC || 687 ehdr.e_type == ET_REL || 688 dso__is_vdso(dso) || 689 elf_section_by_name(elf, &ehdr, &shdr, 690 ".gnu.prelink_undo", 691 NULL) != NULL); 692 } else { 693 ss->adjust_symbols = ehdr.e_type == ET_EXEC || 694 ehdr.e_type == ET_REL; 695 } 696 697 ss->name = strdup(name); 698 if (!ss->name) 699 goto out_elf_end; 700 701 ss->elf = elf; 702 ss->fd = fd; 703 ss->ehdr = ehdr; 704 ss->type = type; 705 706 return 0; 707 708 out_elf_end: 709 elf_end(elf); 710 out_close: 711 close(fd); 712 return err; 713 } 714 715 /** 716 * ref_reloc_sym_not_found - has kernel relocation symbol been found. 717 * @kmap: kernel maps and relocation reference symbol 718 * 719 * This function returns %true if we are dealing with the kernel maps and the 720 * relocation reference symbol has not yet been found. Otherwise %false is 721 * returned. 722 */ 723 static bool ref_reloc_sym_not_found(struct kmap *kmap) 724 { 725 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && 726 !kmap->ref_reloc_sym->unrelocated_addr; 727 } 728 729 /** 730 * ref_reloc - kernel relocation offset. 731 * @kmap: kernel maps and relocation reference symbol 732 * 733 * This function returns the offset of kernel addresses as determined by using 734 * the relocation reference symbol i.e. if the kernel has not been relocated 735 * then the return value is zero. 736 */ 737 static u64 ref_reloc(struct kmap *kmap) 738 { 739 if (kmap && kmap->ref_reloc_sym && 740 kmap->ref_reloc_sym->unrelocated_addr) 741 return kmap->ref_reloc_sym->addr - 742 kmap->ref_reloc_sym->unrelocated_addr; 743 return 0; 744 } 745 746 static bool want_demangle(bool is_kernel_sym) 747 { 748 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; 749 } 750 751 int dso__load_sym(struct dso *dso, struct map *map, 752 struct symsrc *syms_ss, struct symsrc *runtime_ss, 753 symbol_filter_t filter, int kmodule) 754 { 755 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; 756 struct map *curr_map = map; 757 struct dso *curr_dso = dso; 758 Elf_Data *symstrs, *secstrs; 759 uint32_t nr_syms; 760 int err = -1; 761 uint32_t idx; 762 GElf_Ehdr ehdr; 763 GElf_Shdr shdr; 764 Elf_Data *syms, *opddata = NULL; 765 GElf_Sym sym; 766 Elf_Scn *sec, *sec_strndx; 767 Elf *elf; 768 int nr = 0; 769 bool remap_kernel = false, adjust_kernel_syms = false; 770 771 dso->symtab_type = syms_ss->type; 772 dso->is_64_bit = syms_ss->is_64_bit; 773 dso->rel = syms_ss->ehdr.e_type == ET_REL; 774 775 /* 776 * Modules may already have symbols from kallsyms, but those symbols 777 * have the wrong values for the dso maps, so remove them. 778 */ 779 if (kmodule && syms_ss->symtab) 780 symbols__delete(&dso->symbols[map->type]); 781 782 if (!syms_ss->symtab) { 783 /* 784 * If the vmlinux is stripped, fail so we will fall back 785 * to using kallsyms. The vmlinux runtime symbols aren't 786 * of much use. 787 */ 788 if (dso->kernel) 789 goto out_elf_end; 790 791 syms_ss->symtab = syms_ss->dynsym; 792 syms_ss->symshdr = syms_ss->dynshdr; 793 } 794 795 elf = syms_ss->elf; 796 ehdr = syms_ss->ehdr; 797 sec = syms_ss->symtab; 798 shdr = syms_ss->symshdr; 799 800 if (runtime_ss->opdsec) 801 opddata = elf_rawdata(runtime_ss->opdsec, NULL); 802 803 syms = elf_getdata(sec, NULL); 804 if (syms == NULL) 805 goto out_elf_end; 806 807 sec = elf_getscn(elf, shdr.sh_link); 808 if (sec == NULL) 809 goto out_elf_end; 810 811 symstrs = elf_getdata(sec, NULL); 812 if (symstrs == NULL) 813 goto out_elf_end; 814 815 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx); 816 if (sec_strndx == NULL) 817 goto out_elf_end; 818 819 secstrs = elf_getdata(sec_strndx, NULL); 820 if (secstrs == NULL) 821 goto out_elf_end; 822 823 nr_syms = shdr.sh_size / shdr.sh_entsize; 824 825 memset(&sym, 0, sizeof(sym)); 826 827 /* 828 * The kernel relocation symbol is needed in advance in order to adjust 829 * kernel maps correctly. 830 */ 831 if (ref_reloc_sym_not_found(kmap)) { 832 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 833 const char *elf_name = elf_sym__name(&sym, symstrs); 834 835 if (strcmp(elf_name, kmap->ref_reloc_sym->name)) 836 continue; 837 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; 838 map->reloc = kmap->ref_reloc_sym->addr - 839 kmap->ref_reloc_sym->unrelocated_addr; 840 break; 841 } 842 } 843 844 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); 845 /* 846 * Initial kernel and module mappings do not map to the dso. For 847 * function mappings, flag the fixups. 848 */ 849 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) { 850 remap_kernel = true; 851 adjust_kernel_syms = dso->adjust_symbols; 852 } 853 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 854 struct symbol *f; 855 const char *elf_name = elf_sym__name(&sym, symstrs); 856 char *demangled = NULL; 857 int is_label = elf_sym__is_label(&sym); 858 const char *section_name; 859 bool used_opd = false; 860 861 if (!is_label && !elf_sym__is_a(&sym, map->type)) 862 continue; 863 864 /* Reject ARM ELF "mapping symbols": these aren't unique and 865 * don't identify functions, so will confuse the profile 866 * output: */ 867 if (ehdr.e_machine == EM_ARM) { 868 if (!strcmp(elf_name, "$a") || 869 !strcmp(elf_name, "$d") || 870 !strcmp(elf_name, "$t")) 871 continue; 872 } 873 874 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) { 875 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr; 876 u64 *opd = opddata->d_buf + offset; 877 sym.st_value = DSO__SWAP(dso, u64, *opd); 878 sym.st_shndx = elf_addr_to_index(runtime_ss->elf, 879 sym.st_value); 880 used_opd = true; 881 } 882 /* 883 * When loading symbols in a data mapping, ABS symbols (which 884 * has a value of SHN_ABS in its st_shndx) failed at 885 * elf_getscn(). And it marks the loading as a failure so 886 * already loaded symbols cannot be fixed up. 887 * 888 * I'm not sure what should be done. Just ignore them for now. 889 * - Namhyung Kim 890 */ 891 if (sym.st_shndx == SHN_ABS) 892 continue; 893 894 sec = elf_getscn(runtime_ss->elf, sym.st_shndx); 895 if (!sec) 896 goto out_elf_end; 897 898 gelf_getshdr(sec, &shdr); 899 900 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) 901 continue; 902 903 section_name = elf_sec__name(&shdr, secstrs); 904 905 /* On ARM, symbols for thumb functions have 1 added to 906 * the symbol address as a flag - remove it */ 907 if ((ehdr.e_machine == EM_ARM) && 908 (map->type == MAP__FUNCTION) && 909 (sym.st_value & 1)) 910 --sym.st_value; 911 912 if (dso->kernel || kmodule) { 913 char dso_name[PATH_MAX]; 914 915 /* Adjust symbol to map to file offset */ 916 if (adjust_kernel_syms) 917 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 918 919 if (strcmp(section_name, 920 (curr_dso->short_name + 921 dso->short_name_len)) == 0) 922 goto new_symbol; 923 924 if (strcmp(section_name, ".text") == 0) { 925 /* 926 * The initial kernel mapping is based on 927 * kallsyms and identity maps. Overwrite it to 928 * map to the kernel dso. 929 */ 930 if (remap_kernel && dso->kernel) { 931 remap_kernel = false; 932 map->start = shdr.sh_addr + 933 ref_reloc(kmap); 934 map->end = map->start + shdr.sh_size; 935 map->pgoff = shdr.sh_offset; 936 map->map_ip = map__map_ip; 937 map->unmap_ip = map__unmap_ip; 938 /* Ensure maps are correctly ordered */ 939 map_groups__remove(kmap->kmaps, map); 940 map_groups__insert(kmap->kmaps, map); 941 } 942 943 /* 944 * The initial module mapping is based on 945 * /proc/modules mapped to offset zero. 946 * Overwrite it to map to the module dso. 947 */ 948 if (remap_kernel && kmodule) { 949 remap_kernel = false; 950 map->pgoff = shdr.sh_offset; 951 } 952 953 curr_map = map; 954 curr_dso = dso; 955 goto new_symbol; 956 } 957 958 if (!kmap) 959 goto new_symbol; 960 961 snprintf(dso_name, sizeof(dso_name), 962 "%s%s", dso->short_name, section_name); 963 964 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); 965 if (curr_map == NULL) { 966 u64 start = sym.st_value; 967 968 if (kmodule) 969 start += map->start + shdr.sh_offset; 970 971 curr_dso = dso__new(dso_name); 972 if (curr_dso == NULL) 973 goto out_elf_end; 974 curr_dso->kernel = dso->kernel; 975 curr_dso->long_name = dso->long_name; 976 curr_dso->long_name_len = dso->long_name_len; 977 curr_map = map__new2(start, curr_dso, 978 map->type); 979 if (curr_map == NULL) { 980 dso__delete(curr_dso); 981 goto out_elf_end; 982 } 983 if (adjust_kernel_syms) { 984 curr_map->start = shdr.sh_addr + 985 ref_reloc(kmap); 986 curr_map->end = curr_map->start + 987 shdr.sh_size; 988 curr_map->pgoff = shdr.sh_offset; 989 } else { 990 curr_map->map_ip = identity__map_ip; 991 curr_map->unmap_ip = identity__map_ip; 992 } 993 curr_dso->symtab_type = dso->symtab_type; 994 map_groups__insert(kmap->kmaps, curr_map); 995 /* 996 * The new DSO should go to the kernel DSOS 997 */ 998 dsos__add(&map->groups->machine->kernel_dsos, 999 curr_dso); 1000 dso__set_loaded(curr_dso, map->type); 1001 } else 1002 curr_dso = curr_map->dso; 1003 1004 goto new_symbol; 1005 } 1006 1007 if ((used_opd && runtime_ss->adjust_symbols) 1008 || (!used_opd && syms_ss->adjust_symbols)) { 1009 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " 1010 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, 1011 (u64)sym.st_value, (u64)shdr.sh_addr, 1012 (u64)shdr.sh_offset); 1013 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1014 } 1015 new_symbol: 1016 /* 1017 * We need to figure out if the object was created from C++ sources 1018 * DWARF DW_compile_unit has this, but we don't always have access 1019 * to it... 1020 */ 1021 if (want_demangle(dso->kernel || kmodule)) { 1022 int demangle_flags = DMGL_NO_OPTS; 1023 if (verbose) 1024 demangle_flags = DMGL_PARAMS | DMGL_ANSI; 1025 1026 demangled = bfd_demangle(NULL, elf_name, demangle_flags); 1027 if (demangled != NULL) 1028 elf_name = demangled; 1029 } 1030 f = symbol__new(sym.st_value, sym.st_size, 1031 GELF_ST_BIND(sym.st_info), elf_name); 1032 free(demangled); 1033 if (!f) 1034 goto out_elf_end; 1035 1036 if (filter && filter(curr_map, f)) 1037 symbol__delete(f); 1038 else { 1039 symbols__insert(&curr_dso->symbols[curr_map->type], f); 1040 nr++; 1041 } 1042 } 1043 1044 /* 1045 * For misannotated, zeroed, ASM function sizes. 1046 */ 1047 if (nr > 0) { 1048 symbols__fixup_duplicate(&dso->symbols[map->type]); 1049 symbols__fixup_end(&dso->symbols[map->type]); 1050 if (kmap) { 1051 /* 1052 * We need to fixup this here too because we create new 1053 * maps here, for things like vsyscall sections. 1054 */ 1055 __map_groups__fixup_end(kmap->kmaps, map->type); 1056 } 1057 } 1058 err = nr; 1059 out_elf_end: 1060 return err; 1061 } 1062 1063 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) 1064 { 1065 GElf_Phdr phdr; 1066 size_t i, phdrnum; 1067 int err; 1068 u64 sz; 1069 1070 if (elf_getphdrnum(elf, &phdrnum)) 1071 return -1; 1072 1073 for (i = 0; i < phdrnum; i++) { 1074 if (gelf_getphdr(elf, i, &phdr) == NULL) 1075 return -1; 1076 if (phdr.p_type != PT_LOAD) 1077 continue; 1078 if (exe) { 1079 if (!(phdr.p_flags & PF_X)) 1080 continue; 1081 } else { 1082 if (!(phdr.p_flags & PF_R)) 1083 continue; 1084 } 1085 sz = min(phdr.p_memsz, phdr.p_filesz); 1086 if (!sz) 1087 continue; 1088 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); 1089 if (err) 1090 return err; 1091 } 1092 return 0; 1093 } 1094 1095 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, 1096 bool *is_64_bit) 1097 { 1098 int err; 1099 Elf *elf; 1100 1101 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1102 if (elf == NULL) 1103 return -1; 1104 1105 if (is_64_bit) 1106 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 1107 1108 err = elf_read_maps(elf, exe, mapfn, data); 1109 1110 elf_end(elf); 1111 return err; 1112 } 1113 1114 enum dso_type dso__type_fd(int fd) 1115 { 1116 enum dso_type dso_type = DSO__TYPE_UNKNOWN; 1117 GElf_Ehdr ehdr; 1118 Elf_Kind ek; 1119 Elf *elf; 1120 1121 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1122 if (elf == NULL) 1123 goto out; 1124 1125 ek = elf_kind(elf); 1126 if (ek != ELF_K_ELF) 1127 goto out_end; 1128 1129 if (gelf_getclass(elf) == ELFCLASS64) { 1130 dso_type = DSO__TYPE_64BIT; 1131 goto out_end; 1132 } 1133 1134 if (gelf_getehdr(elf, &ehdr) == NULL) 1135 goto out_end; 1136 1137 if (ehdr.e_machine == EM_X86_64) 1138 dso_type = DSO__TYPE_X32BIT; 1139 else 1140 dso_type = DSO__TYPE_32BIT; 1141 out_end: 1142 elf_end(elf); 1143 out: 1144 return dso_type; 1145 } 1146 1147 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len) 1148 { 1149 ssize_t r; 1150 size_t n; 1151 int err = -1; 1152 char *buf = malloc(page_size); 1153 1154 if (buf == NULL) 1155 return -1; 1156 1157 if (lseek(to, to_offs, SEEK_SET) != to_offs) 1158 goto out; 1159 1160 if (lseek(from, from_offs, SEEK_SET) != from_offs) 1161 goto out; 1162 1163 while (len) { 1164 n = page_size; 1165 if (len < n) 1166 n = len; 1167 /* Use read because mmap won't work on proc files */ 1168 r = read(from, buf, n); 1169 if (r < 0) 1170 goto out; 1171 if (!r) 1172 break; 1173 n = r; 1174 r = write(to, buf, n); 1175 if (r < 0) 1176 goto out; 1177 if ((size_t)r != n) 1178 goto out; 1179 len -= n; 1180 } 1181 1182 err = 0; 1183 out: 1184 free(buf); 1185 return err; 1186 } 1187 1188 struct kcore { 1189 int fd; 1190 int elfclass; 1191 Elf *elf; 1192 GElf_Ehdr ehdr; 1193 }; 1194 1195 static int kcore__open(struct kcore *kcore, const char *filename) 1196 { 1197 GElf_Ehdr *ehdr; 1198 1199 kcore->fd = open(filename, O_RDONLY); 1200 if (kcore->fd == -1) 1201 return -1; 1202 1203 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL); 1204 if (!kcore->elf) 1205 goto out_close; 1206 1207 kcore->elfclass = gelf_getclass(kcore->elf); 1208 if (kcore->elfclass == ELFCLASSNONE) 1209 goto out_end; 1210 1211 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1212 if (!ehdr) 1213 goto out_end; 1214 1215 return 0; 1216 1217 out_end: 1218 elf_end(kcore->elf); 1219 out_close: 1220 close(kcore->fd); 1221 return -1; 1222 } 1223 1224 static int kcore__init(struct kcore *kcore, char *filename, int elfclass, 1225 bool temp) 1226 { 1227 GElf_Ehdr *ehdr; 1228 1229 kcore->elfclass = elfclass; 1230 1231 if (temp) 1232 kcore->fd = mkstemp(filename); 1233 else 1234 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400); 1235 if (kcore->fd == -1) 1236 return -1; 1237 1238 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL); 1239 if (!kcore->elf) 1240 goto out_close; 1241 1242 if (!gelf_newehdr(kcore->elf, elfclass)) 1243 goto out_end; 1244 1245 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1246 if (!ehdr) 1247 goto out_end; 1248 1249 return 0; 1250 1251 out_end: 1252 elf_end(kcore->elf); 1253 out_close: 1254 close(kcore->fd); 1255 unlink(filename); 1256 return -1; 1257 } 1258 1259 static void kcore__close(struct kcore *kcore) 1260 { 1261 elf_end(kcore->elf); 1262 close(kcore->fd); 1263 } 1264 1265 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) 1266 { 1267 GElf_Ehdr *ehdr = &to->ehdr; 1268 GElf_Ehdr *kehdr = &from->ehdr; 1269 1270 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT); 1271 ehdr->e_type = kehdr->e_type; 1272 ehdr->e_machine = kehdr->e_machine; 1273 ehdr->e_version = kehdr->e_version; 1274 ehdr->e_entry = 0; 1275 ehdr->e_shoff = 0; 1276 ehdr->e_flags = kehdr->e_flags; 1277 ehdr->e_phnum = count; 1278 ehdr->e_shentsize = 0; 1279 ehdr->e_shnum = 0; 1280 ehdr->e_shstrndx = 0; 1281 1282 if (from->elfclass == ELFCLASS32) { 1283 ehdr->e_phoff = sizeof(Elf32_Ehdr); 1284 ehdr->e_ehsize = sizeof(Elf32_Ehdr); 1285 ehdr->e_phentsize = sizeof(Elf32_Phdr); 1286 } else { 1287 ehdr->e_phoff = sizeof(Elf64_Ehdr); 1288 ehdr->e_ehsize = sizeof(Elf64_Ehdr); 1289 ehdr->e_phentsize = sizeof(Elf64_Phdr); 1290 } 1291 1292 if (!gelf_update_ehdr(to->elf, ehdr)) 1293 return -1; 1294 1295 if (!gelf_newphdr(to->elf, count)) 1296 return -1; 1297 1298 return 0; 1299 } 1300 1301 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, 1302 u64 addr, u64 len) 1303 { 1304 GElf_Phdr gphdr; 1305 GElf_Phdr *phdr; 1306 1307 phdr = gelf_getphdr(kcore->elf, idx, &gphdr); 1308 if (!phdr) 1309 return -1; 1310 1311 phdr->p_type = PT_LOAD; 1312 phdr->p_flags = PF_R | PF_W | PF_X; 1313 phdr->p_offset = offset; 1314 phdr->p_vaddr = addr; 1315 phdr->p_paddr = 0; 1316 phdr->p_filesz = len; 1317 phdr->p_memsz = len; 1318 phdr->p_align = page_size; 1319 1320 if (!gelf_update_phdr(kcore->elf, idx, phdr)) 1321 return -1; 1322 1323 return 0; 1324 } 1325 1326 static off_t kcore__write(struct kcore *kcore) 1327 { 1328 return elf_update(kcore->elf, ELF_C_WRITE); 1329 } 1330 1331 struct phdr_data { 1332 off_t offset; 1333 u64 addr; 1334 u64 len; 1335 }; 1336 1337 struct kcore_copy_info { 1338 u64 stext; 1339 u64 etext; 1340 u64 first_symbol; 1341 u64 last_symbol; 1342 u64 first_module; 1343 u64 last_module_symbol; 1344 struct phdr_data kernel_map; 1345 struct phdr_data modules_map; 1346 }; 1347 1348 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, 1349 u64 start) 1350 { 1351 struct kcore_copy_info *kci = arg; 1352 1353 if (!symbol_type__is_a(type, MAP__FUNCTION)) 1354 return 0; 1355 1356 if (strchr(name, '[')) { 1357 if (start > kci->last_module_symbol) 1358 kci->last_module_symbol = start; 1359 return 0; 1360 } 1361 1362 if (!kci->first_symbol || start < kci->first_symbol) 1363 kci->first_symbol = start; 1364 1365 if (!kci->last_symbol || start > kci->last_symbol) 1366 kci->last_symbol = start; 1367 1368 if (!strcmp(name, "_stext")) { 1369 kci->stext = start; 1370 return 0; 1371 } 1372 1373 if (!strcmp(name, "_etext")) { 1374 kci->etext = start; 1375 return 0; 1376 } 1377 1378 return 0; 1379 } 1380 1381 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci, 1382 const char *dir) 1383 { 1384 char kallsyms_filename[PATH_MAX]; 1385 1386 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir); 1387 1388 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms")) 1389 return -1; 1390 1391 if (kallsyms__parse(kallsyms_filename, kci, 1392 kcore_copy__process_kallsyms) < 0) 1393 return -1; 1394 1395 return 0; 1396 } 1397 1398 static int kcore_copy__process_modules(void *arg, 1399 const char *name __maybe_unused, 1400 u64 start) 1401 { 1402 struct kcore_copy_info *kci = arg; 1403 1404 if (!kci->first_module || start < kci->first_module) 1405 kci->first_module = start; 1406 1407 return 0; 1408 } 1409 1410 static int kcore_copy__parse_modules(struct kcore_copy_info *kci, 1411 const char *dir) 1412 { 1413 char modules_filename[PATH_MAX]; 1414 1415 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir); 1416 1417 if (symbol__restricted_filename(modules_filename, "/proc/modules")) 1418 return -1; 1419 1420 if (modules__parse(modules_filename, kci, 1421 kcore_copy__process_modules) < 0) 1422 return -1; 1423 1424 return 0; 1425 } 1426 1427 static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff, 1428 u64 s, u64 e) 1429 { 1430 if (p->addr || s < start || s >= end) 1431 return; 1432 1433 p->addr = s; 1434 p->offset = (s - start) + pgoff; 1435 p->len = e < end ? e - s : end - s; 1436 } 1437 1438 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) 1439 { 1440 struct kcore_copy_info *kci = data; 1441 u64 end = start + len; 1442 1443 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext, 1444 kci->etext); 1445 1446 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module, 1447 kci->last_module_symbol); 1448 1449 return 0; 1450 } 1451 1452 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) 1453 { 1454 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) 1455 return -1; 1456 1457 return 0; 1458 } 1459 1460 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, 1461 Elf *elf) 1462 { 1463 if (kcore_copy__parse_kallsyms(kci, dir)) 1464 return -1; 1465 1466 if (kcore_copy__parse_modules(kci, dir)) 1467 return -1; 1468 1469 if (kci->stext) 1470 kci->stext = round_down(kci->stext, page_size); 1471 else 1472 kci->stext = round_down(kci->first_symbol, page_size); 1473 1474 if (kci->etext) { 1475 kci->etext = round_up(kci->etext, page_size); 1476 } else if (kci->last_symbol) { 1477 kci->etext = round_up(kci->last_symbol, page_size); 1478 kci->etext += page_size; 1479 } 1480 1481 kci->first_module = round_down(kci->first_module, page_size); 1482 1483 if (kci->last_module_symbol) { 1484 kci->last_module_symbol = round_up(kci->last_module_symbol, 1485 page_size); 1486 kci->last_module_symbol += page_size; 1487 } 1488 1489 if (!kci->stext || !kci->etext) 1490 return -1; 1491 1492 if (kci->first_module && !kci->last_module_symbol) 1493 return -1; 1494 1495 return kcore_copy__read_maps(kci, elf); 1496 } 1497 1498 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, 1499 const char *name) 1500 { 1501 char from_filename[PATH_MAX]; 1502 char to_filename[PATH_MAX]; 1503 1504 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1505 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1506 1507 return copyfile_mode(from_filename, to_filename, 0400); 1508 } 1509 1510 static int kcore_copy__unlink(const char *dir, const char *name) 1511 { 1512 char filename[PATH_MAX]; 1513 1514 scnprintf(filename, PATH_MAX, "%s/%s", dir, name); 1515 1516 return unlink(filename); 1517 } 1518 1519 static int kcore_copy__compare_fds(int from, int to) 1520 { 1521 char *buf_from; 1522 char *buf_to; 1523 ssize_t ret; 1524 size_t len; 1525 int err = -1; 1526 1527 buf_from = malloc(page_size); 1528 buf_to = malloc(page_size); 1529 if (!buf_from || !buf_to) 1530 goto out; 1531 1532 while (1) { 1533 /* Use read because mmap won't work on proc files */ 1534 ret = read(from, buf_from, page_size); 1535 if (ret < 0) 1536 goto out; 1537 1538 if (!ret) 1539 break; 1540 1541 len = ret; 1542 1543 if (readn(to, buf_to, len) != (int)len) 1544 goto out; 1545 1546 if (memcmp(buf_from, buf_to, len)) 1547 goto out; 1548 } 1549 1550 err = 0; 1551 out: 1552 free(buf_to); 1553 free(buf_from); 1554 return err; 1555 } 1556 1557 static int kcore_copy__compare_files(const char *from_filename, 1558 const char *to_filename) 1559 { 1560 int from, to, err = -1; 1561 1562 from = open(from_filename, O_RDONLY); 1563 if (from < 0) 1564 return -1; 1565 1566 to = open(to_filename, O_RDONLY); 1567 if (to < 0) 1568 goto out_close_from; 1569 1570 err = kcore_copy__compare_fds(from, to); 1571 1572 close(to); 1573 out_close_from: 1574 close(from); 1575 return err; 1576 } 1577 1578 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, 1579 const char *name) 1580 { 1581 char from_filename[PATH_MAX]; 1582 char to_filename[PATH_MAX]; 1583 1584 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1585 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1586 1587 return kcore_copy__compare_files(from_filename, to_filename); 1588 } 1589 1590 /** 1591 * kcore_copy - copy kallsyms, modules and kcore from one directory to another. 1592 * @from_dir: from directory 1593 * @to_dir: to directory 1594 * 1595 * This function copies kallsyms, modules and kcore files from one directory to 1596 * another. kallsyms and modules are copied entirely. Only code segments are 1597 * copied from kcore. It is assumed that two segments suffice: one for the 1598 * kernel proper and one for all the modules. The code segments are determined 1599 * from kallsyms and modules files. The kernel map starts at _stext or the 1600 * lowest function symbol, and ends at _etext or the highest function symbol. 1601 * The module map starts at the lowest module address and ends at the highest 1602 * module symbol. Start addresses are rounded down to the nearest page. End 1603 * addresses are rounded up to the nearest page. An extra page is added to the 1604 * highest kernel symbol and highest module symbol to, hopefully, encompass that 1605 * symbol too. Because it contains only code sections, the resulting kcore is 1606 * unusual. One significant peculiarity is that the mapping (start -> pgoff) 1607 * is not the same for the kernel map and the modules map. That happens because 1608 * the data is copied adjacently whereas the original kcore has gaps. Finally, 1609 * kallsyms and modules files are compared with their copies to check that 1610 * modules have not been loaded or unloaded while the copies were taking place. 1611 * 1612 * Return: %0 on success, %-1 on failure. 1613 */ 1614 int kcore_copy(const char *from_dir, const char *to_dir) 1615 { 1616 struct kcore kcore; 1617 struct kcore extract; 1618 size_t count = 2; 1619 int idx = 0, err = -1; 1620 off_t offset = page_size, sz, modules_offset = 0; 1621 struct kcore_copy_info kci = { .stext = 0, }; 1622 char kcore_filename[PATH_MAX]; 1623 char extract_filename[PATH_MAX]; 1624 1625 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) 1626 return -1; 1627 1628 if (kcore_copy__copy_file(from_dir, to_dir, "modules")) 1629 goto out_unlink_kallsyms; 1630 1631 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir); 1632 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir); 1633 1634 if (kcore__open(&kcore, kcore_filename)) 1635 goto out_unlink_modules; 1636 1637 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf)) 1638 goto out_kcore_close; 1639 1640 if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) 1641 goto out_kcore_close; 1642 1643 if (!kci.modules_map.addr) 1644 count -= 1; 1645 1646 if (kcore__copy_hdr(&kcore, &extract, count)) 1647 goto out_extract_close; 1648 1649 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, 1650 kci.kernel_map.len)) 1651 goto out_extract_close; 1652 1653 if (kci.modules_map.addr) { 1654 modules_offset = offset + kci.kernel_map.len; 1655 if (kcore__add_phdr(&extract, idx, modules_offset, 1656 kci.modules_map.addr, kci.modules_map.len)) 1657 goto out_extract_close; 1658 } 1659 1660 sz = kcore__write(&extract); 1661 if (sz < 0 || sz > offset) 1662 goto out_extract_close; 1663 1664 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset, 1665 kci.kernel_map.len)) 1666 goto out_extract_close; 1667 1668 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset, 1669 extract.fd, modules_offset, 1670 kci.modules_map.len)) 1671 goto out_extract_close; 1672 1673 if (kcore_copy__compare_file(from_dir, to_dir, "modules")) 1674 goto out_extract_close; 1675 1676 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) 1677 goto out_extract_close; 1678 1679 err = 0; 1680 1681 out_extract_close: 1682 kcore__close(&extract); 1683 if (err) 1684 unlink(extract_filename); 1685 out_kcore_close: 1686 kcore__close(&kcore); 1687 out_unlink_modules: 1688 if (err) 1689 kcore_copy__unlink(to_dir, "modules"); 1690 out_unlink_kallsyms: 1691 if (err) 1692 kcore_copy__unlink(to_dir, "kallsyms"); 1693 1694 return err; 1695 } 1696 1697 int kcore_extract__create(struct kcore_extract *kce) 1698 { 1699 struct kcore kcore; 1700 struct kcore extract; 1701 size_t count = 1; 1702 int idx = 0, err = -1; 1703 off_t offset = page_size, sz; 1704 1705 if (kcore__open(&kcore, kce->kcore_filename)) 1706 return -1; 1707 1708 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT); 1709 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true)) 1710 goto out_kcore_close; 1711 1712 if (kcore__copy_hdr(&kcore, &extract, count)) 1713 goto out_extract_close; 1714 1715 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len)) 1716 goto out_extract_close; 1717 1718 sz = kcore__write(&extract); 1719 if (sz < 0 || sz > offset) 1720 goto out_extract_close; 1721 1722 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len)) 1723 goto out_extract_close; 1724 1725 err = 0; 1726 1727 out_extract_close: 1728 kcore__close(&extract); 1729 if (err) 1730 unlink(kce->extract_filename); 1731 out_kcore_close: 1732 kcore__close(&kcore); 1733 1734 return err; 1735 } 1736 1737 void kcore_extract__delete(struct kcore_extract *kce) 1738 { 1739 unlink(kce->extract_filename); 1740 } 1741 1742 void symbol__elf_init(void) 1743 { 1744 elf_version(EV_CURRENT); 1745 } 1746