1 #include "symbol.h" 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <limits.h> 5 #include <stdlib.h> 6 #include <string.h> 7 #include <stdio.h> 8 #include <unistd.h> 9 #include "map.h" 10 #include "thread.h" 11 #include "strlist.h" 12 #include "vdso.h" 13 #include "build-id.h" 14 #include "util.h" 15 #include "debug.h" 16 #include "machine.h" 17 #include <linux/string.h> 18 19 static void __maps__insert(struct maps *maps, struct map *map); 20 21 const char *map_type__name[MAP__NR_TYPES] = { 22 [MAP__FUNCTION] = "Functions", 23 [MAP__VARIABLE] = "Variables", 24 }; 25 26 static inline int is_anon_memory(const char *filename) 27 { 28 return !strcmp(filename, "//anon") || 29 !strcmp(filename, "/dev/zero (deleted)") || 30 !strcmp(filename, "/anon_hugepage (deleted)"); 31 } 32 33 static inline int is_no_dso_memory(const char *filename) 34 { 35 return !strncmp(filename, "[stack", 6) || 36 !strncmp(filename, "/SYSV",5) || 37 !strcmp(filename, "[heap]"); 38 } 39 40 static inline int is_android_lib(const char *filename) 41 { 42 return !strncmp(filename, "/data/app-lib", 13) || 43 !strncmp(filename, "/system/lib", 11); 44 } 45 46 static inline bool replace_android_lib(const char *filename, char *newfilename) 47 { 48 const char *libname; 49 char *app_abi; 50 size_t app_abi_length, new_length; 51 size_t lib_length = 0; 52 53 libname = strrchr(filename, '/'); 54 if (libname) 55 lib_length = strlen(libname); 56 57 app_abi = getenv("APP_ABI"); 58 if (!app_abi) 59 return false; 60 61 app_abi_length = strlen(app_abi); 62 63 if (!strncmp(filename, "/data/app-lib", 13)) { 64 char *apk_path; 65 66 if (!app_abi_length) 67 return false; 68 69 new_length = 7 + app_abi_length + lib_length; 70 71 apk_path = getenv("APK_PATH"); 72 if (apk_path) { 73 new_length += strlen(apk_path) + 1; 74 if (new_length > PATH_MAX) 75 return false; 76 snprintf(newfilename, new_length, 77 "%s/libs/%s/%s", apk_path, app_abi, libname); 78 } else { 79 if (new_length > PATH_MAX) 80 return false; 81 snprintf(newfilename, new_length, 82 "libs/%s/%s", app_abi, libname); 83 } 84 return true; 85 } 86 87 if (!strncmp(filename, "/system/lib/", 11)) { 88 char *ndk, *app; 89 const char *arch; 90 size_t ndk_length; 91 size_t app_length; 92 93 ndk = getenv("NDK_ROOT"); 94 app = getenv("APP_PLATFORM"); 95 96 if (!(ndk && app)) 97 return false; 98 99 ndk_length = strlen(ndk); 100 app_length = strlen(app); 101 102 if (!(ndk_length && app_length && app_abi_length)) 103 return false; 104 105 arch = !strncmp(app_abi, "arm", 3) ? "arm" : 106 !strncmp(app_abi, "mips", 4) ? "mips" : 107 !strncmp(app_abi, "x86", 3) ? "x86" : NULL; 108 109 if (!arch) 110 return false; 111 112 new_length = 27 + ndk_length + 113 app_length + lib_length 114 + strlen(arch); 115 116 if (new_length > PATH_MAX) 117 return false; 118 snprintf(newfilename, new_length, 119 "%s/platforms/%s/arch-%s/usr/lib/%s", 120 ndk, app, arch, libname); 121 122 return true; 123 } 124 return false; 125 } 126 127 void map__init(struct map *map, enum map_type type, 128 u64 start, u64 end, u64 pgoff, struct dso *dso) 129 { 130 map->type = type; 131 map->start = start; 132 map->end = end; 133 map->pgoff = pgoff; 134 map->reloc = 0; 135 map->dso = dso; 136 map->map_ip = map__map_ip; 137 map->unmap_ip = map__unmap_ip; 138 RB_CLEAR_NODE(&map->rb_node); 139 map->groups = NULL; 140 map->referenced = false; 141 map->erange_warned = false; 142 atomic_set(&map->refcnt, 1); 143 } 144 145 struct map *map__new(struct machine *machine, u64 start, u64 len, 146 u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino, 147 u64 ino_gen, u32 prot, u32 flags, char *filename, 148 enum map_type type, struct thread *thread) 149 { 150 struct map *map = malloc(sizeof(*map)); 151 152 if (map != NULL) { 153 char newfilename[PATH_MAX]; 154 struct dso *dso; 155 int anon, no_dso, vdso, android; 156 157 android = is_android_lib(filename); 158 anon = is_anon_memory(filename); 159 vdso = is_vdso_map(filename); 160 no_dso = is_no_dso_memory(filename); 161 162 map->maj = d_maj; 163 map->min = d_min; 164 map->ino = ino; 165 map->ino_generation = ino_gen; 166 map->prot = prot; 167 map->flags = flags; 168 169 if ((anon || no_dso) && type == MAP__FUNCTION) { 170 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); 171 filename = newfilename; 172 } 173 174 if (android) { 175 if (replace_android_lib(filename, newfilename)) 176 filename = newfilename; 177 } 178 179 if (vdso) { 180 pgoff = 0; 181 dso = vdso__dso_findnew(machine, thread); 182 } else 183 dso = __dsos__findnew(&machine->user_dsos, filename); 184 185 if (dso == NULL) 186 goto out_delete; 187 188 map__init(map, type, start, start + len, pgoff, dso); 189 190 if (anon || no_dso) { 191 map->map_ip = map->unmap_ip = identity__map_ip; 192 193 /* 194 * Set memory without DSO as loaded. All map__find_* 195 * functions still return NULL, and we avoid the 196 * unnecessary map__load warning. 197 */ 198 if (type != MAP__FUNCTION) 199 dso__set_loaded(dso, map->type); 200 } 201 } 202 return map; 203 out_delete: 204 free(map); 205 return NULL; 206 } 207 208 /* 209 * Constructor variant for modules (where we know from /proc/modules where 210 * they are loaded) and for vmlinux, where only after we load all the 211 * symbols we'll know where it starts and ends. 212 */ 213 struct map *map__new2(u64 start, struct dso *dso, enum map_type type) 214 { 215 struct map *map = calloc(1, (sizeof(*map) + 216 (dso->kernel ? sizeof(struct kmap) : 0))); 217 if (map != NULL) { 218 /* 219 * ->end will be filled after we load all the symbols 220 */ 221 map__init(map, type, start, 0, 0, dso); 222 } 223 224 return map; 225 } 226 227 void map__delete(struct map *map) 228 { 229 BUG_ON(!RB_EMPTY_NODE(&map->rb_node)); 230 free(map); 231 } 232 233 void map__put(struct map *map) 234 { 235 if (map && atomic_dec_and_test(&map->refcnt)) 236 map__delete(map); 237 } 238 239 void map__fixup_start(struct map *map) 240 { 241 struct rb_root *symbols = &map->dso->symbols[map->type]; 242 struct rb_node *nd = rb_first(symbols); 243 if (nd != NULL) { 244 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 245 map->start = sym->start; 246 } 247 } 248 249 void map__fixup_end(struct map *map) 250 { 251 struct rb_root *symbols = &map->dso->symbols[map->type]; 252 struct rb_node *nd = rb_last(symbols); 253 if (nd != NULL) { 254 struct symbol *sym = rb_entry(nd, struct symbol, rb_node); 255 map->end = sym->end; 256 } 257 } 258 259 #define DSO__DELETED "(deleted)" 260 261 int map__load(struct map *map, symbol_filter_t filter) 262 { 263 const char *name = map->dso->long_name; 264 int nr; 265 266 if (dso__loaded(map->dso, map->type)) 267 return 0; 268 269 nr = dso__load(map->dso, map, filter); 270 if (nr < 0) { 271 if (map->dso->has_build_id) { 272 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 273 274 build_id__sprintf(map->dso->build_id, 275 sizeof(map->dso->build_id), 276 sbuild_id); 277 pr_warning("%s with build id %s not found", 278 name, sbuild_id); 279 } else 280 pr_warning("Failed to open %s", name); 281 282 pr_warning(", continuing without symbols\n"); 283 return -1; 284 } else if (nr == 0) { 285 #ifdef HAVE_LIBELF_SUPPORT 286 const size_t len = strlen(name); 287 const size_t real_len = len - sizeof(DSO__DELETED); 288 289 if (len > sizeof(DSO__DELETED) && 290 strcmp(name + real_len + 1, DSO__DELETED) == 0) { 291 pr_warning("%.*s was updated (is prelink enabled?). " 292 "Restart the long running apps that use it!\n", 293 (int)real_len, name); 294 } else { 295 pr_warning("no symbols found in %s, maybe install " 296 "a debug package?\n", name); 297 } 298 #endif 299 return -1; 300 } 301 302 return 0; 303 } 304 305 int __weak arch__compare_symbol_names(const char *namea, const char *nameb) 306 { 307 return strcmp(namea, nameb); 308 } 309 310 struct symbol *map__find_symbol(struct map *map, u64 addr, 311 symbol_filter_t filter) 312 { 313 if (map__load(map, filter) < 0) 314 return NULL; 315 316 return dso__find_symbol(map->dso, map->type, addr); 317 } 318 319 struct symbol *map__find_symbol_by_name(struct map *map, const char *name, 320 symbol_filter_t filter) 321 { 322 if (map__load(map, filter) < 0) 323 return NULL; 324 325 if (!dso__sorted_by_name(map->dso, map->type)) 326 dso__sort_by_name(map->dso, map->type); 327 328 return dso__find_symbol_by_name(map->dso, map->type, name); 329 } 330 331 struct map *map__clone(struct map *map) 332 { 333 return memdup(map, sizeof(*map)); 334 } 335 336 int map__overlap(struct map *l, struct map *r) 337 { 338 if (l->start > r->start) { 339 struct map *t = l; 340 l = r; 341 r = t; 342 } 343 344 if (l->end > r->start) 345 return 1; 346 347 return 0; 348 } 349 350 size_t map__fprintf(struct map *map, FILE *fp) 351 { 352 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", 353 map->start, map->end, map->pgoff, map->dso->name); 354 } 355 356 size_t map__fprintf_dsoname(struct map *map, FILE *fp) 357 { 358 const char *dsoname = "[unknown]"; 359 360 if (map && map->dso && (map->dso->name || map->dso->long_name)) { 361 if (symbol_conf.show_kernel_path && map->dso->long_name) 362 dsoname = map->dso->long_name; 363 else if (map->dso->name) 364 dsoname = map->dso->name; 365 } 366 367 return fprintf(fp, "%s", dsoname); 368 } 369 370 int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, 371 FILE *fp) 372 { 373 char *srcline; 374 int ret = 0; 375 376 if (map && map->dso) { 377 srcline = get_srcline(map->dso, 378 map__rip_2objdump(map, addr), NULL, true); 379 if (srcline != SRCLINE_UNKNOWN) 380 ret = fprintf(fp, "%s%s", prefix, srcline); 381 free_srcline(srcline); 382 } 383 return ret; 384 } 385 386 /** 387 * map__rip_2objdump - convert symbol start address to objdump address. 388 * @map: memory map 389 * @rip: symbol start address 390 * 391 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. 392 * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is 393 * relative to section start. 394 * 395 * Return: Address suitable for passing to "objdump --start-address=" 396 */ 397 u64 map__rip_2objdump(struct map *map, u64 rip) 398 { 399 if (!map->dso->adjust_symbols) 400 return rip; 401 402 if (map->dso->rel) 403 return rip - map->pgoff; 404 405 return map->unmap_ip(map, rip) - map->reloc; 406 } 407 408 /** 409 * map__objdump_2mem - convert objdump address to a memory address. 410 * @map: memory map 411 * @ip: objdump address 412 * 413 * Closely related to map__rip_2objdump(), this function takes an address from 414 * objdump and converts it to a memory address. Note this assumes that @map 415 * contains the address. To be sure the result is valid, check it forwards 416 * e.g. map__rip_2objdump(map->map_ip(map, map__objdump_2mem(map, ip))) == ip 417 * 418 * Return: Memory address. 419 */ 420 u64 map__objdump_2mem(struct map *map, u64 ip) 421 { 422 if (!map->dso->adjust_symbols) 423 return map->unmap_ip(map, ip); 424 425 if (map->dso->rel) 426 return map->unmap_ip(map, ip + map->pgoff); 427 428 return ip + map->reloc; 429 } 430 431 static void maps__init(struct maps *maps) 432 { 433 maps->entries = RB_ROOT; 434 pthread_rwlock_init(&maps->lock, NULL); 435 INIT_LIST_HEAD(&maps->removed_maps); 436 } 437 438 void map_groups__init(struct map_groups *mg, struct machine *machine) 439 { 440 int i; 441 for (i = 0; i < MAP__NR_TYPES; ++i) { 442 maps__init(&mg->maps[i]); 443 } 444 mg->machine = machine; 445 atomic_set(&mg->refcnt, 1); 446 } 447 448 static void __maps__purge(struct maps *maps) 449 { 450 struct rb_root *root = &maps->entries; 451 struct rb_node *next = rb_first(root); 452 453 while (next) { 454 struct map *pos = rb_entry(next, struct map, rb_node); 455 456 next = rb_next(&pos->rb_node); 457 rb_erase_init(&pos->rb_node, root); 458 map__put(pos); 459 } 460 } 461 462 static void __maps__purge_removed_maps(struct maps *maps) 463 { 464 struct map *pos, *n; 465 466 list_for_each_entry_safe(pos, n, &maps->removed_maps, node) { 467 list_del_init(&pos->node); 468 map__put(pos); 469 } 470 } 471 472 static void maps__exit(struct maps *maps) 473 { 474 pthread_rwlock_wrlock(&maps->lock); 475 __maps__purge(maps); 476 __maps__purge_removed_maps(maps); 477 pthread_rwlock_unlock(&maps->lock); 478 } 479 480 void map_groups__exit(struct map_groups *mg) 481 { 482 int i; 483 484 for (i = 0; i < MAP__NR_TYPES; ++i) 485 maps__exit(&mg->maps[i]); 486 } 487 488 bool map_groups__empty(struct map_groups *mg) 489 { 490 int i; 491 492 for (i = 0; i < MAP__NR_TYPES; ++i) { 493 if (maps__first(&mg->maps[i])) 494 return false; 495 if (!list_empty(&mg->maps[i].removed_maps)) 496 return false; 497 } 498 499 return true; 500 } 501 502 struct map_groups *map_groups__new(struct machine *machine) 503 { 504 struct map_groups *mg = malloc(sizeof(*mg)); 505 506 if (mg != NULL) 507 map_groups__init(mg, machine); 508 509 return mg; 510 } 511 512 void map_groups__delete(struct map_groups *mg) 513 { 514 map_groups__exit(mg); 515 free(mg); 516 } 517 518 void map_groups__put(struct map_groups *mg) 519 { 520 if (mg && atomic_dec_and_test(&mg->refcnt)) 521 map_groups__delete(mg); 522 } 523 524 struct symbol *map_groups__find_symbol(struct map_groups *mg, 525 enum map_type type, u64 addr, 526 struct map **mapp, 527 symbol_filter_t filter) 528 { 529 struct map *map = map_groups__find(mg, type, addr); 530 531 /* Ensure map is loaded before using map->map_ip */ 532 if (map != NULL && map__load(map, filter) >= 0) { 533 if (mapp != NULL) 534 *mapp = map; 535 return map__find_symbol(map, map->map_ip(map, addr), filter); 536 } 537 538 return NULL; 539 } 540 541 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, 542 enum map_type type, 543 const char *name, 544 struct map **mapp, 545 symbol_filter_t filter) 546 { 547 struct maps *maps = &mg->maps[type]; 548 struct symbol *sym; 549 struct rb_node *nd; 550 551 pthread_rwlock_rdlock(&maps->lock); 552 553 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 554 struct map *pos = rb_entry(nd, struct map, rb_node); 555 556 sym = map__find_symbol_by_name(pos, name, filter); 557 558 if (sym == NULL) 559 continue; 560 if (mapp != NULL) 561 *mapp = pos; 562 goto out; 563 } 564 565 sym = NULL; 566 out: 567 pthread_rwlock_unlock(&maps->lock); 568 return sym; 569 } 570 571 int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter) 572 { 573 if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { 574 if (ams->map->groups == NULL) 575 return -1; 576 ams->map = map_groups__find(ams->map->groups, ams->map->type, 577 ams->addr); 578 if (ams->map == NULL) 579 return -1; 580 } 581 582 ams->al_addr = ams->map->map_ip(ams->map, ams->addr); 583 ams->sym = map__find_symbol(ams->map, ams->al_addr, filter); 584 585 return ams->sym ? 0 : -1; 586 } 587 588 static size_t maps__fprintf(struct maps *maps, FILE *fp) 589 { 590 size_t printed = 0; 591 struct rb_node *nd; 592 593 pthread_rwlock_rdlock(&maps->lock); 594 595 for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) { 596 struct map *pos = rb_entry(nd, struct map, rb_node); 597 printed += fprintf(fp, "Map:"); 598 printed += map__fprintf(pos, fp); 599 if (verbose > 2) { 600 printed += dso__fprintf(pos->dso, pos->type, fp); 601 printed += fprintf(fp, "--\n"); 602 } 603 } 604 605 pthread_rwlock_unlock(&maps->lock); 606 607 return printed; 608 } 609 610 size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, 611 FILE *fp) 612 { 613 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); 614 return printed += maps__fprintf(&mg->maps[type], fp); 615 } 616 617 static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp) 618 { 619 size_t printed = 0, i; 620 for (i = 0; i < MAP__NR_TYPES; ++i) 621 printed += __map_groups__fprintf_maps(mg, i, fp); 622 return printed; 623 } 624 625 static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg, 626 enum map_type type, FILE *fp) 627 { 628 struct map *pos; 629 size_t printed = 0; 630 631 list_for_each_entry(pos, &mg->maps[type].removed_maps, node) { 632 printed += fprintf(fp, "Map:"); 633 printed += map__fprintf(pos, fp); 634 if (verbose > 1) { 635 printed += dso__fprintf(pos->dso, type, fp); 636 printed += fprintf(fp, "--\n"); 637 } 638 } 639 return printed; 640 } 641 642 static size_t map_groups__fprintf_removed_maps(struct map_groups *mg, 643 FILE *fp) 644 { 645 size_t printed = 0, i; 646 for (i = 0; i < MAP__NR_TYPES; ++i) 647 printed += __map_groups__fprintf_removed_maps(mg, i, fp); 648 return printed; 649 } 650 651 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) 652 { 653 size_t printed = map_groups__fprintf_maps(mg, fp); 654 printed += fprintf(fp, "Removed maps:\n"); 655 return printed + map_groups__fprintf_removed_maps(mg, fp); 656 } 657 658 static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) 659 { 660 struct rb_root *root; 661 struct rb_node *next; 662 int err = 0; 663 664 pthread_rwlock_wrlock(&maps->lock); 665 666 root = &maps->entries; 667 next = rb_first(root); 668 669 while (next) { 670 struct map *pos = rb_entry(next, struct map, rb_node); 671 next = rb_next(&pos->rb_node); 672 673 if (!map__overlap(pos, map)) 674 continue; 675 676 if (verbose >= 2) { 677 fputs("overlapping maps:\n", fp); 678 map__fprintf(map, fp); 679 map__fprintf(pos, fp); 680 } 681 682 rb_erase_init(&pos->rb_node, root); 683 /* 684 * Now check if we need to create new maps for areas not 685 * overlapped by the new map: 686 */ 687 if (map->start > pos->start) { 688 struct map *before = map__clone(pos); 689 690 if (before == NULL) { 691 err = -ENOMEM; 692 goto put_map; 693 } 694 695 before->end = map->start; 696 __maps__insert(maps, before); 697 if (verbose >= 2) 698 map__fprintf(before, fp); 699 } 700 701 if (map->end < pos->end) { 702 struct map *after = map__clone(pos); 703 704 if (after == NULL) { 705 err = -ENOMEM; 706 goto put_map; 707 } 708 709 after->start = map->end; 710 __maps__insert(maps, after); 711 if (verbose >= 2) 712 map__fprintf(after, fp); 713 } 714 put_map: 715 /* 716 * If we have references, just move them to a separate list. 717 */ 718 if (pos->referenced) 719 list_add_tail(&pos->node, &maps->removed_maps); 720 else 721 map__put(pos); 722 723 if (err) 724 goto out; 725 } 726 727 err = 0; 728 out: 729 pthread_rwlock_unlock(&maps->lock); 730 return err; 731 } 732 733 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, 734 FILE *fp) 735 { 736 return maps__fixup_overlappings(&mg->maps[map->type], map, fp); 737 } 738 739 /* 740 * XXX This should not really _copy_ te maps, but refcount them. 741 */ 742 int map_groups__clone(struct map_groups *mg, 743 struct map_groups *parent, enum map_type type) 744 { 745 int err = -ENOMEM; 746 struct map *map; 747 struct maps *maps = &parent->maps[type]; 748 749 pthread_rwlock_rdlock(&maps->lock); 750 751 for (map = maps__first(maps); map; map = map__next(map)) { 752 struct map *new = map__clone(map); 753 if (new == NULL) 754 goto out_unlock; 755 map_groups__insert(mg, new); 756 } 757 758 err = 0; 759 out_unlock: 760 pthread_rwlock_unlock(&maps->lock); 761 return err; 762 } 763 764 static void __maps__insert(struct maps *maps, struct map *map) 765 { 766 struct rb_node **p = &maps->entries.rb_node; 767 struct rb_node *parent = NULL; 768 const u64 ip = map->start; 769 struct map *m; 770 771 while (*p != NULL) { 772 parent = *p; 773 m = rb_entry(parent, struct map, rb_node); 774 if (ip < m->start) 775 p = &(*p)->rb_left; 776 else 777 p = &(*p)->rb_right; 778 } 779 780 rb_link_node(&map->rb_node, parent, p); 781 rb_insert_color(&map->rb_node, &maps->entries); 782 map__get(map); 783 } 784 785 void maps__insert(struct maps *maps, struct map *map) 786 { 787 pthread_rwlock_wrlock(&maps->lock); 788 __maps__insert(maps, map); 789 pthread_rwlock_unlock(&maps->lock); 790 } 791 792 static void __maps__remove(struct maps *maps, struct map *map) 793 { 794 rb_erase_init(&map->rb_node, &maps->entries); 795 map__put(map); 796 } 797 798 void maps__remove(struct maps *maps, struct map *map) 799 { 800 pthread_rwlock_wrlock(&maps->lock); 801 __maps__remove(maps, map); 802 pthread_rwlock_unlock(&maps->lock); 803 } 804 805 struct map *maps__find(struct maps *maps, u64 ip) 806 { 807 struct rb_node **p, *parent = NULL; 808 struct map *m; 809 810 pthread_rwlock_rdlock(&maps->lock); 811 812 p = &maps->entries.rb_node; 813 while (*p != NULL) { 814 parent = *p; 815 m = rb_entry(parent, struct map, rb_node); 816 if (ip < m->start) 817 p = &(*p)->rb_left; 818 else if (ip >= m->end) 819 p = &(*p)->rb_right; 820 else 821 goto out; 822 } 823 824 m = NULL; 825 out: 826 pthread_rwlock_unlock(&maps->lock); 827 return m; 828 } 829 830 struct map *maps__first(struct maps *maps) 831 { 832 struct rb_node *first = rb_first(&maps->entries); 833 834 if (first) 835 return rb_entry(first, struct map, rb_node); 836 return NULL; 837 } 838 839 struct map *map__next(struct map *map) 840 { 841 struct rb_node *next = rb_next(&map->rb_node); 842 843 if (next) 844 return rb_entry(next, struct map, rb_node); 845 return NULL; 846 } 847 848 struct kmap *map__kmap(struct map *map) 849 { 850 if (!map->dso || !map->dso->kernel) { 851 pr_err("Internal error: map__kmap with a non-kernel map\n"); 852 return NULL; 853 } 854 return (struct kmap *)(map + 1); 855 } 856 857 struct map_groups *map__kmaps(struct map *map) 858 { 859 struct kmap *kmap = map__kmap(map); 860 861 if (!kmap || !kmap->kmaps) { 862 pr_err("Internal error: map__kmaps with a non-kernel map\n"); 863 return NULL; 864 } 865 return kmap->kmaps; 866 } 867