1 // SPDX-License-Identifier: GPL-2.0 2 #include "symbol.h" 3 #include <errno.h> 4 #include <inttypes.h> 5 #include <limits.h> 6 #include <stdlib.h> 7 #include <string.h> 8 #include <stdio.h> 9 #include <unistd.h> 10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 11 #include "map.h" 12 #include "thread.h" 13 #include "vdso.h" 14 #include "build-id.h" 15 #include "util.h" 16 #include "debug.h" 17 #include "machine.h" 18 #include <linux/string.h> 19 #include "srcline.h" 20 #include "namespaces.h" 21 #include "unwind.h" 22 #include "srccode.h" 23 24 static void __maps__insert(struct maps *maps, struct map *map); 25 static void __maps__insert_name(struct maps *maps, struct map *map); 26 27 static inline int is_anon_memory(const char *filename, u32 flags) 28 { 29 return flags & MAP_HUGETLB || 30 !strcmp(filename, "//anon") || 31 !strncmp(filename, "/dev/zero", sizeof("/dev/zero") - 1) || 32 !strncmp(filename, "/anon_hugepage", sizeof("/anon_hugepage") - 1); 33 } 34 35 static inline int is_no_dso_memory(const char *filename) 36 { 37 return !strncmp(filename, "[stack", 6) || 38 !strncmp(filename, "/SYSV",5) || 39 !strcmp(filename, "[heap]"); 40 } 41 42 static inline int is_android_lib(const char *filename) 43 { 44 return !strncmp(filename, "/data/app-lib", 13) || 45 !strncmp(filename, "/system/lib", 11); 46 } 47 48 static inline bool replace_android_lib(const char *filename, char *newfilename) 49 { 50 const char *libname; 51 char *app_abi; 52 size_t app_abi_length, new_length; 53 size_t lib_length = 0; 54 55 libname = strrchr(filename, '/'); 56 if (libname) 57 lib_length = strlen(libname); 58 59 app_abi = getenv("APP_ABI"); 60 if (!app_abi) 61 return false; 62 63 app_abi_length = strlen(app_abi); 64 65 if (!strncmp(filename, "/data/app-lib", 13)) { 66 char *apk_path; 67 68 if (!app_abi_length) 69 return false; 70 71 new_length = 7 + app_abi_length + lib_length; 72 73 apk_path = getenv("APK_PATH"); 74 if (apk_path) { 75 new_length += strlen(apk_path) + 1; 76 if (new_length > PATH_MAX) 77 return false; 78 snprintf(newfilename, new_length, 79 "%s/libs/%s/%s", apk_path, app_abi, libname); 80 } else { 81 if (new_length > PATH_MAX) 82 return false; 83 snprintf(newfilename, new_length, 84 "libs/%s/%s", app_abi, libname); 85 } 86 return true; 87 } 88 89 if (!strncmp(filename, "/system/lib/", 11)) { 90 char *ndk, *app; 91 const char *arch; 92 size_t ndk_length; 93 size_t app_length; 94 95 ndk = getenv("NDK_ROOT"); 96 app = getenv("APP_PLATFORM"); 97 98 if (!(ndk && app)) 99 return false; 100 101 ndk_length = strlen(ndk); 102 app_length = strlen(app); 103 104 if (!(ndk_length && app_length && app_abi_length)) 105 return false; 106 107 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 108 !strncmp(app_abi, "mips", 4) ? "mips" : 109 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 110 111 if (!arch) 112 return false; 113 114 new_length = 27 + ndk_length + 115 app_length + lib_length 116 + strlen(arch); 117 118 if (new_length > PATH_MAX) 119 return false; 120 snprintf(newfilename, new_length, 121 "%s/platforms/%s/arch-%s/usr/lib/%s", 122 ndk, app, arch, libname); 123 124 return true; 125 } 126 return false; 127 } 128 129 void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) 130 { 131 map->start = start; 132 map->end = end; 133 map->pgoff = pgoff; 134 map->reloc = 0; 135 map->dso = dso__get(dso); 136 map->map_ip = map__map_ip; 137 map->unmap_ip = map__unmap_ip; 138 RB_CLEAR_NODE(&map->rb_node); 139 map->groups = NULL; 140 map->erange_warned = false; 141 refcount_set(&map->refcnt, 1); 142 } 143 144 struct map *map__new(struct machine *machine, u64 start, u64 len, 145 u64 pgoff, u32 d_maj, u32 d_min, u64 ino, 146 u64 ino_gen, u32 prot, u32 flags, char *filename, 147 struct thread *thread) 148 { 149 struct map *map = malloc(sizeof(*map)); 150 struct nsinfo *nsi = NULL; 151 struct nsinfo *nnsi; 152 153 if (map != NULL) { 154 char newfilename[PATH_MAX]; 155 struct dso *dso; 156 int anon, no_dso, vdso, android; 157 158 android = is_android_lib(filename); 159 anon = is_anon_memory(filename, flags); 160 vdso = is_vdso_map(filename); 161 no_dso = is_no_dso_memory(filename); 162 163 map->maj = d_maj; 164 map->min = d_min; 165 map->ino = ino; 166 map->ino_generation = ino_gen; 167 map->prot = prot; 168 map->flags = flags; 169 nsi = nsinfo__get(thread->nsinfo); 170 171 if ((anon || no_dso) && nsi && (prot & PROT_EXEC)) { 172 snprintf(newfilename, sizeof(newfilename), 173 "/tmp/perf-%d.map", nsi->pid); 174 filename = newfilename; 175 } 176 177 if (android) { 178 if (replace_android_lib(filename, newfilename)) 179 filename = newfilename; 180 } 181 182 if (vdso) { 183 /* The vdso maps are always on the host and not the 184 * container. Ensure that we don't use setns to look 185 * them up. 186 */ 187 nnsi = nsinfo__copy(nsi); 188 if (nnsi) { 189 nsinfo__put(nsi); 190 nnsi->need_setns = false; 191 nsi = nnsi; 192 } 193 pgoff = 0; 194 dso = machine__findnew_vdso(machine, thread); 195 } else 196 dso = machine__findnew_dso(machine, filename); 197 198 if (dso == NULL) 199 goto out_delete; 200 201 map__init(map, start, start + len, pgoff, dso); 202 203 if (anon || no_dso) { 204 map->map_ip = map->unmap_ip = identity__map_ip; 205 206 /* 207 * Set memory without DSO as loaded. All map__find_* 208 * functions still return NULL, and we avoid the 209 * unnecessary map__load warning. 210 */ 211 if (!(prot & PROT_EXEC)) 212 dso__set_loaded(dso); 213 } 214 dso->nsinfo = nsi; 215 dso__put(dso); 216 } 217 return map; 218 out_delete: 219 nsinfo__put(nsi); 220 free(map); 221 return NULL; 222 } 223 224 /* 225 * Constructor variant for modules (where we know from /proc/modules where 226 * they are loaded) and for vmlinux, where only after we load all the 227 * symbols we'll know where it starts and ends. 228 */ 229 struct map *map__new2(u64 start, struct dso *dso) 230 { 231 struct map *map = calloc(1, (sizeof(*map) + 232 (dso->kernel ? sizeof(struct kmap) : 0))); 233 if (map != NULL) { 234 /* 235 * ->end will be filled after we load all the symbols 236 */ 237 map__init(map, start, 0, 0, dso); 238 } 239 240 return map; 241 } 242 243 /* 244 * Use this and __map__is_kmodule() for map instances that are in 245 * machine->kmaps, and thus have map->groups->machine all properly set, to 246 * disambiguate between the kernel and modules. 247 * 248 * When the need arises, introduce map__is_{kernel,kmodule)() that 249 * checks (map->groups != NULL && map->groups->machine != NULL && 250 * map->dso->kernel) before calling __map__is_{kernel,kmodule}()) 251 */ 252 bool __map__is_kernel(const struct map *map) 253 { 254 return machine__kernel_map(map->groups->machine) == map; 255 } 256 257 bool __map__is_extra_kernel_map(const struct map *map) 258 { 259 struct kmap *kmap = __map__kmap((struct map *)map); 260 261 return kmap && kmap->name[0]; 262 } 263 264 bool __map__is_bpf_prog(const struct map *map) 265 { 266 const char *name; 267 268 if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) 269 return true; 270 271 /* 272 * If PERF_RECORD_BPF_EVENT is not included, the dso will not have 273 * type of DSO_BINARY_TYPE__BPF_PROG_INFO. In such cases, we can 274 * guess the type based on name. 275 */ 276 name = map->dso->short_name; 277 return name && (strstr(name, "bpf_prog_") == name); 278 } 279 280 bool map__has_symbols(const struct map *map) 281 { 282 return dso__has_symbols(map->dso); 283 } 284 285 static void map__exit(struct map *map) 286 { 287 BUG_ON(!RB_EMPTY_NODE(&map->rb_node)); 288 dso__zput(map->dso); 289 } 290 291 void map__delete(struct map *map) 292 { 293 map__exit(map); 294 free(map); 295 } 296 297 void map__put(struct map *map) 298 { 299 if (map && refcount_dec_and_test(&map->refcnt)) 300 map__delete(map); 301 } 302 303 void map__fixup_start(struct map *map) 304 { 305 struct rb_root_cached *symbols = &map->dso->symbols; 306 struct rb_node *nd = rb_first_cached(symbols); 307 if (nd != NULL) { 308 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 309 map->start = sym->start; 310 } 311 } 312 313 void map__fixup_end(struct map *map) 314 { 315 struct rb_root_cached *symbols = &map->dso->symbols; 316 struct rb_node *nd = rb_last(&symbols->rb_root); 317 if (nd != NULL) { 318 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 319 map->end = sym->end; 320 } 321 } 322 323 #define DSO__DELETED "(deleted)" 324 325 int map__load(struct map *map) 326 { 327 const char *name = map->dso->long_name; 328 int nr; 329 330 if (dso__loaded(map->dso)) 331 return 0; 332 333 nr = dso__load(map->dso, map); 334 if (nr < 0) { 335 if (map->dso->has_build_id) { 336 char sbuild_id[SBUILD_ID_SIZE]; 337 338 build_id__sprintf(map->dso->build_id, 339 sizeof(map->dso->build_id), 340 sbuild_id); 341 pr_debug("%s with build id %s not found", name, sbuild_id); 342 } else 343 pr_debug("Failed to open %s", name); 344 345 pr_debug(", continuing without symbols\n"); 346 return -1; 347 } else if (nr == 0) { 348 #ifdef HAVE_LIBELF_SUPPORT 349 const size_t len = strlen(name); 350 const size_t real_len = len - sizeof(DSO__DELETED); 351 352 if (len > sizeof(DSO__DELETED) && 353 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 354 pr_debug("%.*s was updated (is prelink enabled?). " 355 "Restart the long running apps that use it!\n", 356 (int)real_len, name); 357 } else { 358 pr_debug("no symbols found in %s, maybe install a debug package?\n", name); 359 } 360 #endif 361 return -1; 362 } 363 364 return 0; 365 } 366 367 struct symbol *map__find_symbol(struct map *map, u64 addr) 368 { 369 if (map__load(map) < 0) 370 return NULL; 371 372 return dso__find_symbol(map->dso, addr); 373 } 374 375 struct symbol *map__find_symbol_by_name(struct map *map, const char *name) 376 { 377 if (map__load(map) < 0) 378 return NULL; 379 380 if (!dso__sorted_by_name(map->dso)) 381 dso__sort_by_name(map->dso); 382 383 return dso__find_symbol_by_name(map->dso, name); 384 } 385 386 struct map *map__clone(struct map *from) 387 { 388 struct map *map = memdup(from, sizeof(*map)); 389 390 if (map != NULL) { 391 refcount_set(&map->refcnt, 1); 392 RB_CLEAR_NODE(&map->rb_node); 393 dso__get(map->dso); 394 map->groups = NULL; 395 } 396 397 return map; 398 } 399 400 size_t map__fprintf(struct map *map, FILE *fp) 401 { 402 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 403 map->start, map->end, map->pgoff, map->dso->name); 404 } 405 406 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 407 { 408 char buf[symbol_conf.pad_output_len_dso + 1]; 409 const char *dsoname = "[unknown]"; 410 411 if (map && map->dso) { 412 if (symbol_conf.show_kernel_path && map->dso->long_name) 413 dsoname = map->dso->long_name; 414 else 415 dsoname = map->dso->name; 416 } 417 418 if (symbol_conf.pad_output_len_dso) { 419 scnprintf_pad(buf, symbol_conf.pad_output_len_dso, "%s", dsoname); 420 dsoname = buf; 421 } 422 423 return fprintf(fp, "%s", dsoname); 424 } 425 426 char *map__srcline(struct map *map, u64 addr, struct symbol *sym) 427 { 428 if (map == NULL) 429 return SRCLINE_UNKNOWN; 430 return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr); 431 } 432 433 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 434 FILE *fp) 435 { 436 int ret = 0; 437 438 if (map && map->dso) { 439 char *srcline = map__srcline(map, addr, NULL); 440 if (srcline != SRCLINE_UNKNOWN) 441 ret = fprintf(fp, "%s%s", prefix, srcline); 442 free_srcline(srcline); 443 } 444 return ret; 445 } 446 447 int map__fprintf_srccode(struct map *map, u64 addr, 448 FILE *fp, 449 struct srccode_state *state) 450 { 451 char *srcfile; 452 int ret = 0; 453 unsigned line; 454 int len; 455 char *srccode; 456 457 if (!map || !map->dso) 458 return 0; 459 srcfile = get_srcline_split(map->dso, 460 map__rip_2objdump(map, addr), 461 &line); 462 if (!srcfile) 463 return 0; 464 465 /* Avoid redundant printing */ 466 if (state && 467 state->srcfile && 468 !strcmp(state->srcfile, srcfile) && 469 state->line == line) { 470 free(srcfile); 471 return 0; 472 } 473 474 srccode = find_sourceline(srcfile, line, &len); 475 if (!srccode) 476 goto out_free_line; 477 478 ret = fprintf(fp, "|%-8d %.*s", line, len, srccode); 479 state->srcfile = srcfile; 480 state->line = line; 481 return ret; 482 483 out_free_line: 484 free(srcfile); 485 return ret; 486 } 487 488 489 void srccode_state_free(struct srccode_state *state) 490 { 491 zfree(&state->srcfile); 492 state->line = 0; 493 } 494 495 /** 496 * map__rip_2objdump - convert symbol start address to objdump address. 497 * @map: memory map 498 * @rip: symbol start address 499 * 500 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 501 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 502 * relative to section start. 503 * 504 * Return: Address suitable for passing to "objdump --start-address=" 505 */ 506 u64 map__rip_2objdump(struct map *map, u64 rip) 507 { 508 struct kmap *kmap = __map__kmap(map); 509 510 /* 511 * vmlinux does not have program headers for PTI entry trampolines and 512 * kcore may not either. However the trampoline object code is on the 513 * main kernel map, so just use that instead. 514 */ 515 if (kmap && is_entry_trampoline(kmap->name) && kmap->kmaps && kmap->kmaps->machine) { 516 struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine); 517 518 if (kernel_map) 519 map = kernel_map; 520 } 521 522 if (!map->dso->adjust_symbols) 523 return rip; 524 525 if (map->dso->rel) 526 return rip - map->pgoff; 527 528 /* 529 * kernel modules also have DSO_TYPE_USER in dso->kernel, 530 * but all kernel modules are ET_REL, so won't get here. 531 */ 532 if (map->dso->kernel == DSO_TYPE_USER) 533 return rip + map->dso->text_offset; 534 535 return map->unmap_ip(map, rip) - map->reloc; 536 } 537 538 /** 539 * map__objdump_2mem - convert objdump address to a memory address. 540 * @map: memory map 541 * @ip: objdump address 542 * 543 * Closely related to map__rip_2objdump(), this function takes an address from 544 * objdump and converts it to a memory address. Note this assumes that @map 545 * contains the address. To be sure the result is valid, check it forwards 546 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip 547 * 548 * Return: Memory address. 549 */ 550 u64 map__objdump_2mem(struct map *map, u64 ip) 551 { 552 if (!map->dso->adjust_symbols) 553 return map->unmap_ip(map, ip); 554 555 if (map->dso->rel) 556 return map->unmap_ip(map, ip + map->pgoff); 557 558 /* 559 * kernel modules also have DSO_TYPE_USER in dso->kernel, 560 * but all kernel modules are ET_REL, so won't get here. 561 */ 562 if (map->dso->kernel == DSO_TYPE_USER) 563 return map->unmap_ip(map, ip - map->dso->text_offset); 564 565 return ip + map->reloc; 566 } 567 568 static void maps__init(struct maps *maps) 569 { 570 maps->entries = RB_ROOT; 571 maps->names = RB_ROOT; 572 init_rwsem(&maps->lock); 573 } 574 575 void map_groups__init(struct map_groups *mg, struct machine *machine) 576 { 577 maps__init(&mg->maps); 578 mg->machine = machine; 579 refcount_set(&mg->refcnt, 1); 580 } 581 582 void map_groups__insert(struct map_groups *mg, struct map *map) 583 { 584 maps__insert(&mg->maps, map); 585 map->groups = mg; 586 } 587 588 static void __maps__purge(struct maps *maps) 589 { 590 struct rb_root *root = &maps->entries; 591 struct rb_node *next = rb_first(root); 592 593 while (next) { 594 struct map *pos = rb_entry(next, struct map, rb_node); 595 596 next = rb_next(&pos->rb_node); 597 rb_erase_init(&pos->rb_node, root); 598 map__put(pos); 599 } 600 } 601 602 static void __maps__purge_names(struct maps *maps) 603 { 604 struct rb_root *root = &maps->names; 605 struct rb_node *next = rb_first(root); 606 607 while (next) { 608 struct map *pos = rb_entry(next, struct map, rb_node_name); 609 610 next = rb_next(&pos->rb_node_name); 611 rb_erase_init(&pos->rb_node_name, root); 612 map__put(pos); 613 } 614 } 615 616 static void maps__exit(struct maps *maps) 617 { 618 down_write(&maps->lock); 619 __maps__purge(maps); 620 __maps__purge_names(maps); 621 up_write(&maps->lock); 622 } 623 624 void map_groups__exit(struct map_groups *mg) 625 { 626 maps__exit(&mg->maps); 627 } 628 629 bool map_groups__empty(struct map_groups *mg) 630 { 631 return !maps__first(&mg->maps); 632 } 633 634 struct map_groups *map_groups__new(struct machine *machine) 635 { 636 struct map_groups *mg = malloc(sizeof(*mg)); 637 638 if (mg != NULL) 639 map_groups__init(mg, machine); 640 641 return mg; 642 } 643 644 void map_groups__delete(struct map_groups *mg) 645 { 646 map_groups__exit(mg); 647 free(mg); 648 } 649 650 void map_groups__put(struct map_groups *mg) 651 { 652 if (mg && refcount_dec_and_test(&mg->refcnt)) 653 map_groups__delete(mg); 654 } 655 656 struct symbol *map_groups__find_symbol(struct map_groups *mg, 657 u64 addr, struct map **mapp) 658 { 659 struct map *map = map_groups__find(mg, addr); 660 661 /* Ensure map is loaded before using map->map_ip */ 662 if (map != NULL && map__load(map) >= 0) { 663 if (mapp != NULL) 664 *mapp = map; 665 return map__find_symbol(map, map->map_ip(map, addr)); 666 } 667 668 return NULL; 669 } 670 671 static bool map__contains_symbol(struct map *map, struct symbol *sym) 672 { 673 u64 ip = map->unmap_ip(map, sym->start); 674 675 return ip >= map->start && ip < map->end; 676 } 677 678 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, 679 struct map **mapp) 680 { 681 struct symbol *sym; 682 struct rb_node *nd; 683 684 down_read(&maps->lock); 685 686 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 687 struct map *pos = rb_entry(nd, struct map, rb_node); 688 689 sym = map__find_symbol_by_name(pos, name); 690 691 if (sym == NULL) 692 continue; 693 if (!map__contains_symbol(pos, sym)) { 694 sym = NULL; 695 continue; 696 } 697 if (mapp != NULL) 698 *mapp = pos; 699 goto out; 700 } 701 702 sym = NULL; 703 out: 704 up_read(&maps->lock); 705 return sym; 706 } 707 708 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 709 const char *name, 710 struct map **mapp) 711 { 712 return maps__find_symbol_by_name(&mg->maps, name, mapp); 713 } 714 715 int map_groups__find_ams(struct addr_map_symbol *ams) 716 { 717 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { 718 if (ams->map->groups == NULL) 719 return -1; 720 ams->map = map_groups__find(ams->map->groups, ams->addr); 721 if (ams->map == NULL) 722 return -1; 723 } 724 725 ams->al_addr = ams->map->map_ip(ams->map, ams->addr); 726 ams->sym = map__find_symbol(ams->map, ams->al_addr); 727 728 return ams->sym ? 0 : -1; 729 } 730 731 static size_t maps__fprintf(struct maps *maps, FILE *fp) 732 { 733 size_t printed = 0; 734 struct rb_node *nd; 735 736 down_read(&maps->lock); 737 738 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 739 struct map *pos = rb_entry(nd, struct map, rb_node); 740 printed += fprintf(fp, "Map:"); 741 printed += map__fprintf(pos, fp); 742 if (verbose > 2) { 743 printed += dso__fprintf(pos->dso, fp); 744 printed += fprintf(fp, "--\n"); 745 } 746 } 747 748 up_read(&maps->lock); 749 750 return printed; 751 } 752 753 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) 754 { 755 return maps__fprintf(&mg->maps, fp); 756 } 757 758 static void __map_groups__insert(struct map_groups *mg, struct map *map) 759 { 760 __maps__insert(&mg->maps, map); 761 __maps__insert_name(&mg->maps, map); 762 map->groups = mg; 763 } 764 765 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 766 { 767 struct rb_root *root; 768 struct rb_node *next, *first; 769 int err = 0; 770 771 down_write(&maps->lock); 772 773 root = &maps->entries; 774 775 /* 776 * Find first map where end > map->start. 777 * Same as find_vma() in kernel. 778 */ 779 next = root->rb_node; 780 first = NULL; 781 while (next) { 782 struct map *pos = rb_entry(next, struct map, rb_node); 783 784 if (pos->end > map->start) { 785 first = next; 786 if (pos->start <= map->start) 787 break; 788 next = next->rb_left; 789 } else 790 next = next->rb_right; 791 } 792 793 next = first; 794 while (next) { 795 struct map *pos = rb_entry(next, struct map, rb_node); 796 next = rb_next(&pos->rb_node); 797 798 /* 799 * Stop if current map starts after map->end. 800 * Maps are ordered by start: next will not overlap for sure. 801 */ 802 if (pos->start >= map->end) 803 break; 804 805 if (verbose >= 2) { 806 807 if (use_browser) { 808 pr_debug("overlapping maps in %s (disable tui for more info)\n", 809 map->dso->name); 810 } else { 811 fputs("overlapping maps:\n", fp); 812 map__fprintf(map, fp); 813 map__fprintf(pos, fp); 814 } 815 } 816 817 rb_erase_init(&pos->rb_node, root); 818 /* 819 * Now check if we need to create new maps for areas not 820 * overlapped by the new map: 821 */ 822 if (map->start > pos->start) { 823 struct map *before = map__clone(pos); 824 825 if (before == NULL) { 826 err = -ENOMEM; 827 goto put_map; 828 } 829 830 before->end = map->start; 831 __map_groups__insert(pos->groups, before); 832 if (verbose >= 2 && !use_browser) 833 map__fprintf(before, fp); 834 map__put(before); 835 } 836 837 if (map->end < pos->end) { 838 struct map *after = map__clone(pos); 839 840 if (after == NULL) { 841 err = -ENOMEM; 842 goto put_map; 843 } 844 845 after->start = map->end; 846 __map_groups__insert(pos->groups, after); 847 if (verbose >= 2 && !use_browser) 848 map__fprintf(after, fp); 849 map__put(after); 850 } 851 put_map: 852 map__put(pos); 853 854 if (err) 855 goto out; 856 } 857 858 err = 0; 859 out: 860 up_write(&maps->lock); 861 return err; 862 } 863 864 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 865 FILE *fp) 866 { 867 return maps__fixup_overlappings(&mg->maps, map, fp); 868 } 869 870 /* 871 * XXX This should not really _copy_ te maps, but refcount them. 872 */ 873 int map_groups__clone(struct thread *thread, struct map_groups *parent) 874 { 875 struct map_groups *mg = thread->mg; 876 int err = -ENOMEM; 877 struct map *map; 878 struct maps *maps = &parent->maps; 879 880 down_read(&maps->lock); 881 882 for (map = maps__first(maps); map; map = map__next(map)) { 883 struct map *new = map__clone(map); 884 if (new == NULL) 885 goto out_unlock; 886 887 err = unwind__prepare_access(thread, new, NULL); 888 if (err) 889 goto out_unlock; 890 891 map_groups__insert(mg, new); 892 map__put(new); 893 } 894 895 err = 0; 896 out_unlock: 897 up_read(&maps->lock); 898 return err; 899 } 900 901 static void __maps__insert(struct maps *maps, struct map *map) 902 { 903 struct rb_node **p = &maps->entries.rb_node; 904 struct rb_node *parent = NULL; 905 const u64 ip = map->start; 906 struct map *m; 907 908 while (*p != NULL) { 909 parent = *p; 910 m = rb_entry(parent, struct map, rb_node); 911 if (ip < m->start) 912 p = &(*p)->rb_left; 913 else 914 p = &(*p)->rb_right; 915 } 916 917 rb_link_node(&map->rb_node, parent, p); 918 rb_insert_color(&map->rb_node, &maps->entries); 919 map__get(map); 920 } 921 922 static void __maps__insert_name(struct maps *maps, struct map *map) 923 { 924 struct rb_node **p = &maps->names.rb_node; 925 struct rb_node *parent = NULL; 926 struct map *m; 927 int rc; 928 929 while (*p != NULL) { 930 parent = *p; 931 m = rb_entry(parent, struct map, rb_node_name); 932 rc = strcmp(m->dso->short_name, map->dso->short_name); 933 if (rc < 0) 934 p = &(*p)->rb_left; 935 else 936 p = &(*p)->rb_right; 937 } 938 rb_link_node(&map->rb_node_name, parent, p); 939 rb_insert_color(&map->rb_node_name, &maps->names); 940 map__get(map); 941 } 942 943 void maps__insert(struct maps *maps, struct map *map) 944 { 945 down_write(&maps->lock); 946 __maps__insert(maps, map); 947 __maps__insert_name(maps, map); 948 up_write(&maps->lock); 949 } 950 951 static void __maps__remove(struct maps *maps, struct map *map) 952 { 953 rb_erase_init(&map->rb_node, &maps->entries); 954 map__put(map); 955 956 rb_erase_init(&map->rb_node_name, &maps->names); 957 map__put(map); 958 } 959 960 void maps__remove(struct maps *maps, struct map *map) 961 { 962 down_write(&maps->lock); 963 __maps__remove(maps, map); 964 up_write(&maps->lock); 965 } 966 967 struct map *maps__find(struct maps *maps, u64 ip) 968 { 969 struct rb_node *p; 970 struct map *m; 971 972 down_read(&maps->lock); 973 974 p = maps->entries.rb_node; 975 while (p != NULL) { 976 m = rb_entry(p, struct map, rb_node); 977 if (ip < m->start) 978 p = p->rb_left; 979 else if (ip >= m->end) 980 p = p->rb_right; 981 else 982 goto out; 983 } 984 985 m = NULL; 986 out: 987 up_read(&maps->lock); 988 return m; 989 } 990 991 struct map *maps__first(struct maps *maps) 992 { 993 struct rb_node *first = rb_first(&maps->entries); 994 995 if (first) 996 return rb_entry(first, struct map, rb_node); 997 return NULL; 998 } 999 1000 struct map *map__next(struct map *map) 1001 { 1002 struct rb_node *next = rb_next(&map->rb_node); 1003 1004 if (next) 1005 return rb_entry(next, struct map, rb_node); 1006 return NULL; 1007 } 1008 1009 struct kmap *__map__kmap(struct map *map) 1010 { 1011 if (!map->dso || !map->dso->kernel) 1012 return NULL; 1013 return (struct kmap *)(map + 1); 1014 } 1015 1016 struct kmap *map__kmap(struct map *map) 1017 { 1018 struct kmap *kmap = __map__kmap(map); 1019 1020 if (!kmap) 1021 pr_err("Internal error: map__kmap with a non-kernel map\n"); 1022 return kmap; 1023 } 1024 1025 struct map_groups *map__kmaps(struct map *map) 1026 { 1027 struct kmap *kmap = map__kmap(map); 1028 1029 if (!kmap || !kmap->kmaps) { 1030 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 1031 return NULL; 1032 } 1033 return kmap->kmaps; 1034 } 1035