1 // SPDX-License-Identifier: GPL-2.0 2 #include "symbol.h" 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <limits.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <stdio.h> 9 #include <unistd.h> 10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 11 #include "map.h" 12 #include "thread.h" 13 #include "vdso.h" 14 #include "build-id.h" 15 #include "util.h" 16 #include "debug.h" 17 #include "machine.h" 18 #include <linux/string.h> 19 #include "srcline.h" 20 #include "namespaces.h" 21 #include "unwind.h" 22 #include "srccode.h" 23 24 static void __maps__insert(struct maps *maps, struct map *map); 25 static void __maps__insert_name(struct maps *maps, struct map *map); 26 27 static inline int is_anon_memory(const char *filename, u32 flags) 28 { 29 return flags & MAP_HUGETLB || 30 !strcmp(filename, "//anon") || 31 !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) || 32 !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1); 33 } 34 35 static inline int is_no_dso_memory(const char *filename) 36 { 37 return !strncmp(filename, "[stack", 6) || 38 !strncmp(filename, "/SYSV",5) || 39 !strcmp(filename, "[heap]"); 40 } 41 42 static inline int is_android_lib(const char *filename) 43 { 44 return !strncmp(filename, "/data/app-lib", 13) || 45 !strncmp(filename, "/system/lib", 11); 46 } 47 48 static inline bool replace_android_lib(const char *filename, char *newfilename) 49 { 50 const char *libname; 51 char *app_abi; 52 size_t app_abi_length, new_length; 53 size_t lib_length = 0; 54 55 libname = strrchr(filename, '/'); 56 if (libname) 57 lib_length = strlen(libname); 58 59 app_abi = getenv("APP_ABI"); 60 if (!app_abi) 61 return false; 62 63 app_abi_length = strlen(app_abi); 64 65 if (!strncmp(filename, "/data/app-lib", 13)) { 66 char *apk_path; 67 68 if (!app_abi_length) 69 return false; 70 71 new_length = 7 + app_abi_length + lib_length; 72 73 apk_path = getenv("APK_PATH"); 74 if (apk_path) { 75 new_length += strlen(apk_path) + 1; 76 if (new_length > PATH_MAX) 77 return false; 78 snprintf(newfilename, new_length, 79 "%s/libs/%s/%s", apk_path, app_abi, libname); 80 } else { 81 if (new_length > PATH_MAX) 82 return false; 83 snprintf(newfilename, new_length, 84 "libs/%s/%s", app_abi, libname); 85 } 86 return true; 87 } 88 89 if (!strncmp(filename, "/system/lib/", 11)) { 90 char *ndk, *app; 91 const char *arch; 92 size_t ndk_length; 93 size_t app_length; 94 95 ndk = getenv("NDK_ROOT"); 96 app = getenv("APP_PLATFORM"); 97 98 if (!(ndk && app)) 99 return false; 100 101 ndk_length = strlen(ndk); 102 app_length = strlen(app); 103 104 if (!(ndk_length && app_length && app_abi_length)) 105 return false; 106 107 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 108 !strncmp(app_abi, "mips", 4) ? "mips" : 109 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 110 111 if (!arch) 112 return false; 113 114 new_length = 27 + ndk_length + 115 app_length + lib_length 116 + strlen(arch); 117 118 if (new_length > PATH_MAX) 119 return false; 120 snprintf(newfilename, new_length, 121 "%s/platforms/%s/arch-%s/usr/lib/%s", 122 ndk, app, arch, libname); 123 124 return true; 125 } 126 return false; 127 } 128 129 void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) 130 { 131 map->start = start; 132 map->end = end; 133 map->pgoff = pgoff; 134 map->reloc = 0; 135 map->dso = dso__get(dso); 136 map->map_ip = map__map_ip; 137 map->unmap_ip = map__unmap_ip; 138 RB_CLEAR_NODE(&map->rb_node); 139 map->groups = NULL; 140 map->erange_warned = false; 141 refcount_set(&map->refcnt, 1); 142 } 143 144 struct map *map__new(struct machine *machine, u64 start, u64 len, 145 u64 pgoff, u32 d_maj, u32 d_min, u64 ino, 146 u64 ino_gen, u32 prot, u32 flags, char *filename, 147 struct thread *thread) 148 { 149 struct map *map = malloc(sizeof(*map)); 150 struct nsinfo *nsi = NULL; 151 struct nsinfo *nnsi; 152 153 if (map != NULL) { 154 char newfilename[PATH_MAX]; 155 struct dso *dso; 156 int anon, no_dso, vdso, android; 157 158 android = is_android_lib(filename); 159 anon = is_anon_memory(filename, flags); 160 vdso = is_vdso_map(filename); 161 no_dso = is_no_dso_memory(filename); 162 163 map->maj = d_maj; 164 map->min = d_min; 165 map->ino = ino; 166 map->ino_generation = ino_gen; 167 map->prot = prot; 168 map->flags = flags; 169 nsi = nsinfo__get(thread->nsinfo); 170 171 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) { 172 snprintf(newfilename, sizeof(newfilename), 173 "/tmp/perf-%d.map", nsi->pid); 174 filename = newfilename; 175 } 176 177 if (android) { 178 if (replace_android_lib(filename, newfilename)) 179 filename = newfilename; 180 } 181 182 if (vdso) { 183 /* The vdso maps are always on the host and not the 184 * container. Ensure that we don't use setns to look 185 * them up. 186 */ 187 nnsi = nsinfo__copy(nsi); 188 if (nnsi) { 189 nsinfo__put(nsi); 190 nnsi->need_setns = false; 191 nsi = nnsi; 192 } 193 pgoff = 0; 194 dso = machine__findnew_vdso(machine, thread); 195 } else 196 dso = machine__findnew_dso(machine, filename); 197 198 if (dso == NULL) 199 goto out_delete; 200 201 map__init(map, start, start + len, pgoff, dso); 202 203 if (anon || no_dso) { 204 map->map_ip = map->unmap_ip = identity__map_ip; 205 206 /* 207 * Set memory without DSO as loaded. All map__find_* 208 * functions still return NULL, and we avoid the 209 * unnecessary map__load warning. 210 */ 211 if (!(prot & PROT_EXEC)) 212 dso__set_loaded(dso); 213 } 214 dso->nsinfo = nsi; 215 dso__put(dso); 216 } 217 return map; 218 out_delete: 219 nsinfo__put(nsi); 220 free(map); 221 return NULL; 222 } 223 224 /* 225 * Constructor variant for modules (where we know from /proc/modules where 226 * they are loaded) and for vmlinux, where only after we load all the 227 * symbols we'll know where it starts and ends. 228 */ 229 struct map *map__new2(u64 start, struct dso *dso) 230 { 231 struct map *map = calloc(1, (sizeof(*map) + 232 (dso->kernel ? sizeof(struct kmap) : 0))); 233 if (map != NULL) { 234 /* 235 * ->end will be filled after we load all the symbols 236 */ 237 map__init(map, start, 0, 0, dso); 238 } 239 240 return map; 241 } 242 243 /* 244 * Use this and __map__is_kmodule() for map instances that are in 245 * machine->kmaps, and thus have map->groups->machine all properly set, to 246 * disambiguate between the kernel and modules. 247 * 248 * When the need arises, introduce map__is_{kernel,kmodule)() that 249 * checks (map->groups != NULL && map->groups->machine != NULL && 250 * map->dso->kernel) before calling __map__is_{kernel,kmodule}()) 251 */ 252 bool __map__is_kernel(const struct map *map) 253 { 254 return machine__kernel_map(map->groups->machine) == map; 255 } 256 257 bool __map__is_extra_kernel_map(const struct map *map) 258 { 259 struct kmap *kmap = __map__kmap((struct map *)map); 260 261 return kmap && kmap->name[0]; 262 } 263 264 bool __map__is_bpf_prog(const struct map *map) 265 { 266 const char *name; 267 268 if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 269 return true; 270 271 /* 272 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have 273 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can 274 * guess the type based on name. 275 */ 276 name = map->dso->short_name; 277 return name && (strstr(name, "bpf_prog_") == name); 278 } 279 280 bool map__has_symbols(const struct map *map) 281 { 282 return dso__has_symbols(map->dso); 283 } 284 285 static void map__exit(struct map *map) 286 { 287 BUG_ON(!RB_EMPTY_NODE(&map->rb_node)); 288 dso__zput(map->dso); 289 } 290 291 void map__delete(struct map *map) 292 { 293 map__exit(map); 294 free(map); 295 } 296 297 void map__put(struct map *map) 298 { 299 if (map && refcount_dec_and_test(&map->refcnt)) 300 map__delete(map); 301 } 302 303 void map__fixup_start(struct map *map) 304 { 305 struct rb_root_cached *symbols = &map->dso->symbols; 306 struct rb_node *nd = rb_first_cached(symbols); 307 if (nd != NULL) { 308 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 309 map->start = sym->start; 310 } 311 } 312 313 void map__fixup_end(struct map *map) 314 { 315 struct rb_root_cached *symbols = &map->dso->symbols; 316 struct rb_node *nd = rb_last(&symbols->rb_root); 317 if (nd != NULL) { 318 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 319 map->end = sym->end; 320 } 321 } 322 323 #define DSO__DELETED "(deleted)" 324 325 int map__load(struct map *map) 326 { 327 const char *name = map->dso->long_name; 328 int nr; 329 330 if (dso__loaded(map->dso)) 331 return 0; 332 333 nr = dso__load(map->dso, map); 334 if (nr < 0) { 335 if (map->dso->has_build_id) { 336 char sbuild_id[SBUILD_ID_SIZE]; 337 338 build_id__sprintf(map->dso->build_id, 339 sizeof(map->dso->build_id), 340 sbuild_id); 341 pr_debug("%s with build id %s not found", name, sbuild_id); 342 } else 343 pr_debug("Failed to open %s", name); 344 345 pr_debug(", continuing without symbols\n"); 346 return -1; 347 } else if (nr == 0) { 348 #ifdef HAVE_LIBELF_SUPPORT 349 const size_t len = strlen(name); 350 const size_t real_len = len - sizeof(DSO__DELETED); 351 352 if (len > sizeof(DSO__DELETED) && 353 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 354 pr_debug("%.*s was updated (is prelink enabled?). " 355 "Restart the long running apps that use it!\n", 356 (int)real_len, name); 357 } else { 358 pr_debug("no symbols found in %s, maybe install a debug package?\n", name); 359 } 360 #endif 361 return -1; 362 } 363 364 return 0; 365 } 366 367 struct symbol *map__find_symbol(struct map *map, u64 addr) 368 { 369 if (map__load(map) < 0) 370 return NULL; 371 372 return dso__find_symbol(map->dso, addr); 373 } 374 375 struct symbol *map__find_symbol_by_name(struct map *map, const char *name) 376 { 377 if (map__load(map) < 0) 378 return NULL; 379 380 if (!dso__sorted_by_name(map->dso)) 381 dso__sort_by_name(map->dso); 382 383 return dso__find_symbol_by_name(map->dso, name); 384 } 385 386 struct map *map__clone(struct map *from) 387 { 388 struct map *map = memdup(from, sizeof(*map)); 389 390 if (map != NULL) { 391 refcount_set(&map->refcnt, 1); 392 RB_CLEAR_NODE(&map->rb_node); 393 dso__get(map->dso); 394 map->groups = NULL; 395 } 396 397 return map; 398 } 399 400 size_t map__fprintf(struct map *map, FILE *fp) 401 { 402 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 403 map->start, map->end, map->pgoff, map->dso->name); 404 } 405 406 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 407 { 408 const char *dsoname = "[unknown]"; 409 410 if (map && map->dso) { 411 if (symbol_conf.show_kernel_path && map->dso->long_name) 412 dsoname = map->dso->long_name; 413 else 414 dsoname = map->dso->name; 415 } 416 417 return fprintf(fp, "%s", dsoname); 418 } 419 420 char *map__srcline(struct map *map, u64 addr, struct symbol *sym) 421 { 422 if (map == NULL) 423 return SRCLINE_UNKNOWN; 424 return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr); 425 } 426 427 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 428 FILE *fp) 429 { 430 int ret = 0; 431 432 if (map && map->dso) { 433 char *srcline = map__srcline(map, addr, NULL); 434 if (srcline != SRCLINE_UNKNOWN) 435 ret = fprintf(fp, "%s%s", prefix, srcline); 436 free_srcline(srcline); 437 } 438 return ret; 439 } 440 441 int map__fprintf_srccode(struct map *map, u64 addr, 442 FILE *fp, 443 struct srccode_state *state) 444 { 445 char *srcfile; 446 int ret = 0; 447 unsigned line; 448 int len; 449 char *srccode; 450 451 if (!map || !map->dso) 452 return 0; 453 srcfile = get_srcline_split(map->dso, 454 map__rip_2objdump(map, addr), 455 &line); 456 if (!srcfile) 457 return 0; 458 459 /* Avoid redundant printing */ 460 if (state && 461 state->srcfile && 462 !strcmp(state->srcfile, srcfile) && 463 state->line == line) { 464 free(srcfile); 465 return 0; 466 } 467 468 srccode = find_sourceline(srcfile, line, &len); 469 if (!srccode) 470 goto out_free_line; 471 472 ret = fprintf(fp, "|%-8d %.*s", line, len, srccode); 473 state->srcfile = srcfile; 474 state->line = line; 475 return ret; 476 477 out_free_line: 478 free(srcfile); 479 return ret; 480 } 481 482 483 void srccode_state_free(struct srccode_state *state) 484 { 485 zfree(&state->srcfile); 486 state->line = 0; 487 } 488 489 /** 490 * map__rip_2objdump - convert symbol start address to objdump address. 491 * @map: memory map 492 * @rip: symbol start address 493 * 494 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 495 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 496 * relative to section start. 497 * 498 * Return: Address suitable for passing to "objdump --start-address=" 499 */ 500 u64 map__rip_2objdump(struct map *map, u64 rip) 501 { 502 struct kmap *kmap = __map__kmap(map); 503 504 /* 505 * vmlinux does not have program headers for PTI entry trampolines and 506 * kcore may not either. However the trampoline object code is on the 507 * main kernel map, so just use that instead. 508 */ 509 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) { 510 struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine); 511 512 if (kernel_map) 513 map = kernel_map; 514 } 515 516 if (!map->dso->adjust_symbols) 517 return rip; 518 519 if (map->dso->rel) 520 return rip - map->pgoff; 521 522 /* 523 * kernel modules also have DSO_TYPE_USER in dso->kernel, 524 * but all kernel modules are ET_REL, so won't get here. 525 */ 526 if (map->dso->kernel == DSO_TYPE_USER) 527 return rip + map->dso->text_offset; 528 529 return map->unmap_ip(map, rip) - map->reloc; 530 } 531 532 /** 533 * map__objdump_2mem - convert objdump address to a memory address. 534 * @map: memory map 535 * @ip: objdump address 536 * 537 * Closely related to map__rip_2objdump(), this function takes an address from 538 * objdump and converts it to a memory address. Note this assumes that @map 539 * contains the address. To be sure the result is valid, check it forwards 540 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip 541 * 542 * Return: Memory address. 543 */ 544 u64 map__objdump_2mem(struct map *map, u64 ip) 545 { 546 if (!map->dso->adjust_symbols) 547 return map->unmap_ip(map, ip); 548 549 if (map->dso->rel) 550 return map->unmap_ip(map, ip + map->pgoff); 551 552 /* 553 * kernel modules also have DSO_TYPE_USER in dso->kernel, 554 * but all kernel modules are ET_REL, so won't get here. 555 */ 556 if (map->dso->kernel == DSO_TYPE_USER) 557 return map->unmap_ip(map, ip - map->dso->text_offset); 558 559 return ip + map->reloc; 560 } 561 562 static void maps__init(struct maps *maps) 563 { 564 maps->entries = RB_ROOT; 565 maps->names = RB_ROOT; 566 init_rwsem(&maps->lock); 567 } 568 569 void map_groups__init(struct map_groups *mg, struct machine *machine) 570 { 571 maps__init(&mg->maps); 572 mg->machine = machine; 573 refcount_set(&mg->refcnt, 1); 574 } 575 576 void map_groups__insert(struct map_groups *mg, struct map *map) 577 { 578 maps__insert(&mg->maps, map); 579 map->groups = mg; 580 } 581 582 static void __maps__purge(struct maps *maps) 583 { 584 struct rb_root *root = &maps->entries; 585 struct rb_node *next = rb_first(root); 586 587 while (next) { 588 struct map *pos = rb_entry(next, struct map, rb_node); 589 590 next = rb_next(&pos->rb_node); 591 rb_erase_init(&pos->rb_node, root); 592 map__put(pos); 593 } 594 } 595 596 static void __maps__purge_names(struct maps *maps) 597 { 598 struct rb_root *root = &maps->names; 599 struct rb_node *next = rb_first(root); 600 601 while (next) { 602 struct map *pos = rb_entry(next, struct map, rb_node_name); 603 604 next = rb_next(&pos->rb_node_name); 605 rb_erase_init(&pos->rb_node_name, root); 606 map__put(pos); 607 } 608 } 609 610 static void maps__exit(struct maps *maps) 611 { 612 down_write(&maps->lock); 613 __maps__purge(maps); 614 __maps__purge_names(maps); 615 up_write(&maps->lock); 616 } 617 618 void map_groups__exit(struct map_groups *mg) 619 { 620 maps__exit(&mg->maps); 621 } 622 623 bool map_groups__empty(struct map_groups *mg) 624 { 625 return !maps__first(&mg->maps); 626 } 627 628 struct map_groups *map_groups__new(struct machine *machine) 629 { 630 struct map_groups *mg = malloc(sizeof(*mg)); 631 632 if (mg != NULL) 633 map_groups__init(mg, machine); 634 635 return mg; 636 } 637 638 void map_groups__delete(struct map_groups *mg) 639 { 640 map_groups__exit(mg); 641 free(mg); 642 } 643 644 void map_groups__put(struct map_groups *mg) 645 { 646 if (mg && refcount_dec_and_test(&mg->refcnt)) 647 map_groups__delete(mg); 648 } 649 650 struct symbol *map_groups__find_symbol(struct map_groups *mg, 651 u64 addr, struct map **mapp) 652 { 653 struct map *map = map_groups__find(mg, addr); 654 655 /* Ensure map is loaded before using map->map_ip */ 656 if (map != NULL && map__load(map) >= 0) { 657 if (mapp != NULL) 658 *mapp = map; 659 return map__find_symbol(map, map->map_ip(map, addr)); 660 } 661 662 return NULL; 663 } 664 665 static bool map__contains_symbol(struct map *map, struct symbol *sym) 666 { 667 u64 ip = map->unmap_ip(map, sym->start); 668 669 return ip >= map->start && ip < map->end; 670 } 671 672 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, 673 struct map **mapp) 674 { 675 struct symbol *sym; 676 struct rb_node *nd; 677 678 down_read(&maps->lock); 679 680 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 681 struct map *pos = rb_entry(nd, struct map, rb_node); 682 683 sym = map__find_symbol_by_name(pos, name); 684 685 if (sym == NULL) 686 continue; 687 if (!map__contains_symbol(pos, sym)) { 688 sym = NULL; 689 continue; 690 } 691 if (mapp != NULL) 692 *mapp = pos; 693 goto out; 694 } 695 696 sym = NULL; 697 out: 698 up_read(&maps->lock); 699 return sym; 700 } 701 702 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 703 const char *name, 704 struct map **mapp) 705 { 706 return maps__find_symbol_by_name(&mg->maps, name, mapp); 707 } 708 709 int map_groups__find_ams(struct addr_map_symbol *ams) 710 { 711 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { 712 if (ams->map->groups == NULL) 713 return -1; 714 ams->map = map_groups__find(ams->map->groups, ams->addr); 715 if (ams->map == NULL) 716 return -1; 717 } 718 719 ams->al_addr = ams->map->map_ip(ams->map, ams->addr); 720 ams->sym = map__find_symbol(ams->map, ams->al_addr); 721 722 return ams->sym ? 0 : -1; 723 } 724 725 static size_t maps__fprintf(struct maps *maps, FILE *fp) 726 { 727 size_t printed = 0; 728 struct rb_node *nd; 729 730 down_read(&maps->lock); 731 732 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 733 struct map *pos = rb_entry(nd, struct map, rb_node); 734 printed += fprintf(fp, "Map:"); 735 printed += map__fprintf(pos, fp); 736 if (verbose > 2) { 737 printed += dso__fprintf(pos->dso, fp); 738 printed += fprintf(fp, "--\n"); 739 } 740 } 741 742 up_read(&maps->lock); 743 744 return printed; 745 } 746 747 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) 748 { 749 return maps__fprintf(&mg->maps, fp); 750 } 751 752 static void __map_groups__insert(struct map_groups *mg, struct map *map) 753 { 754 __maps__insert(&mg->maps, map); 755 __maps__insert_name(&mg->maps, map); 756 map->groups = mg; 757 } 758 759 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 760 { 761 struct rb_root *root; 762 struct rb_node *next, *first; 763 int err = 0; 764 765 down_write(&maps->lock); 766 767 root = &maps->entries; 768 769 /* 770 * Find first map where end > map->start. 771 * Same as find_vma() in kernel. 772 */ 773 next = root->rb_node; 774 first = NULL; 775 while (next) { 776 struct map *pos = rb_entry(next, struct map, rb_node); 777 778 if (pos->end > map->start) { 779 first = next; 780 if (pos->start <= map->start) 781 break; 782 next = next->rb_left; 783 } else 784 next = next->rb_right; 785 } 786 787 next = first; 788 while (next) { 789 struct map *pos = rb_entry(next, struct map, rb_node); 790 next = rb_next(&pos->rb_node); 791 792 /* 793 * Stop if current map starts after map->end. 794 * Maps are ordered by start: next will not overlap for sure. 795 */ 796 if (pos->start >= map->end) 797 break; 798 799 if (verbose >= 2) { 800 801 if (use_browser) { 802 pr_debug("overlapping maps in %s (disable tui for more info)\n", 803 map->dso->name); 804 } else { 805 fputs("overlapping maps:\n", fp); 806 map__fprintf(map, fp); 807 map__fprintf(pos, fp); 808 } 809 } 810 811 rb_erase_init(&pos->rb_node, root); 812 /* 813 * Now check if we need to create new maps for areas not 814 * overlapped by the new map: 815 */ 816 if (map->start > pos->start) { 817 struct map *before = map__clone(pos); 818 819 if (before == NULL) { 820 err = -ENOMEM; 821 goto put_map; 822 } 823 824 before->end = map->start; 825 __map_groups__insert(pos->groups, before); 826 if (verbose >= 2 && !use_browser) 827 map__fprintf(before, fp); 828 map__put(before); 829 } 830 831 if (map->end < pos->end) { 832 struct map *after = map__clone(pos); 833 834 if (after == NULL) { 835 err = -ENOMEM; 836 goto put_map; 837 } 838 839 after->start = map->end; 840 __map_groups__insert(pos->groups, after); 841 if (verbose >= 2 && !use_browser) 842 map__fprintf(after, fp); 843 map__put(after); 844 } 845 put_map: 846 map__put(pos); 847 848 if (err) 849 goto out; 850 } 851 852 err = 0; 853 out: 854 up_write(&maps->lock); 855 return err; 856 } 857 858 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 859 FILE *fp) 860 { 861 return maps__fixup_overlappings(&mg->maps, map, fp); 862 } 863 864 /* 865 * XXX This should not really _copy_ te maps, but refcount them. 866 */ 867 int map_groups__clone(struct thread *thread, struct map_groups *parent) 868 { 869 struct map_groups *mg = thread->mg; 870 int err = -ENOMEM; 871 struct map *map; 872 struct maps *maps = &parent->maps; 873 874 down_read(&maps->lock); 875 876 for (map = maps__first(maps); map; map = map__next(map)) { 877 struct map *new = map__clone(map); 878 if (new == NULL) 879 goto out_unlock; 880 881 err = unwind__prepare_access(thread, new, NULL); 882 if (err) 883 goto out_unlock; 884 885 map_groups__insert(mg, new); 886 map__put(new); 887 } 888 889 err = 0; 890 out_unlock: 891 up_read(&maps->lock); 892 return err; 893 } 894 895 static void __maps__insert(struct maps *maps, struct map *map) 896 { 897 struct rb_node **p = &maps->entries.rb_node; 898 struct rb_node *parent = NULL; 899 const u64 ip = map->start; 900 struct map *m; 901 902 while (*p != NULL) { 903 parent = *p; 904 m = rb_entry(parent, struct map, rb_node); 905 if (ip < m->start) 906 p = &(*p)->rb_left; 907 else 908 p = &(*p)->rb_right; 909 } 910 911 rb_link_node(&map->rb_node, parent, p); 912 rb_insert_color(&map->rb_node, &maps->entries); 913 map__get(map); 914 } 915 916 static void __maps__insert_name(struct maps *maps, struct map *map) 917 { 918 struct rb_node **p = &maps->names.rb_node; 919 struct rb_node *parent = NULL; 920 struct map *m; 921 int rc; 922 923 while (*p != NULL) { 924 parent = *p; 925 m = rb_entry(parent, struct map, rb_node_name); 926 rc = strcmp(m->dso->short_name, map->dso->short_name); 927 if (rc < 0) 928 p = &(*p)->rb_left; 929 else 930 p = &(*p)->rb_right; 931 } 932 rb_link_node(&map->rb_node_name, parent, p); 933 rb_insert_color(&map->rb_node_name, &maps->names); 934 map__get(map); 935 } 936 937 void maps__insert(struct maps *maps, struct map *map) 938 { 939 down_write(&maps->lock); 940 __maps__insert(maps, map); 941 __maps__insert_name(maps, map); 942 up_write(&maps->lock); 943 } 944 945 static void __maps__remove(struct maps *maps, struct map *map) 946 { 947 rb_erase_init(&map->rb_node, &maps->entries); 948 map__put(map); 949 950 rb_erase_init(&map->rb_node_name, &maps->names); 951 map__put(map); 952 } 953 954 void maps__remove(struct maps *maps, struct map *map) 955 { 956 down_write(&maps->lock); 957 __maps__remove(maps, map); 958 up_write(&maps->lock); 959 } 960 961 struct map *maps__find(struct maps *maps, u64 ip) 962 { 963 struct rb_node *p; 964 struct map *m; 965 966 down_read(&maps->lock); 967 968 p = maps->entries.rb_node; 969 while (p != NULL) { 970 m = rb_entry(p, struct map, rb_node); 971 if (ip < m->start) 972 p = p->rb_left; 973 else if (ip >= m->end) 974 p = p->rb_right; 975 else 976 goto out; 977 } 978 979 m = NULL; 980 out: 981 up_read(&maps->lock); 982 return m; 983 } 984 985 struct map *maps__first(struct maps *maps) 986 { 987 struct rb_node *first = rb_first(&maps->entries); 988 989 if (first) 990 return rb_entry(first, struct map, rb_node); 991 return NULL; 992 } 993 994 struct map *map__next(struct map *map) 995 { 996 struct rb_node *next = rb_next(&map->rb_node); 997 998 if (next) 999 return rb_entry(next, struct map, rb_node); 1000 return NULL; 1001 } 1002 1003 struct kmap *__map__kmap(struct map *map) 1004 { 1005 if (!map->dso || !map->dso->kernel) 1006 return NULL; 1007 return (struct kmap *)(map + 1); 1008 } 1009 1010 struct kmap *map__kmap(struct map *map) 1011 { 1012 struct kmap *kmap = __map__kmap(map); 1013 1014 if (!kmap) 1015 pr_err("Internal error: map__kmap with a non-kernel map\n"); 1016 return kmap; 1017 } 1018 1019 struct map_groups *map__kmaps(struct map *map) 1020 { 1021 struct kmap *kmap = map__kmap(map); 1022 1023 if (!kmap || !kmap->kmaps) { 1024 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 1025 return NULL; 1026 } 1027 return kmap->kmaps; 1028 } 1029