1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <linux/zalloc.h> 5 #include "debug.h" 6 #include "dso.h" 7 #include "map.h" 8 #include "maps.h" 9 #include "rwsem.h" 10 #include "thread.h" 11 #include "ui/ui.h" 12 #include "unwind.h" 13 #include <internal/rc_check.h> 14 15 /* 16 * Locking/sorting note: 17 * 18 * Sorting is done with the write lock, iteration and binary searching happens 19 * under the read lock requiring being sorted. There is a race between sorting 20 * releasing the write lock and acquiring the read lock for iteration/searching 21 * where another thread could insert and break the sorting of the maps. In 22 * practice inserting maps should be rare meaning that the race shouldn't lead 23 * to live lock. Removal of maps doesn't break being sorted. 24 */ 25 26 DECLARE_RC_STRUCT(maps) { 27 struct rw_semaphore lock; 28 /** 29 * @maps_by_address: array of maps sorted by their starting address if 30 * maps_by_address_sorted is true. 31 */ 32 struct map **maps_by_address; 33 /** 34 * @maps_by_name: optional array of maps sorted by their dso name if 35 * maps_by_name_sorted is true. 36 */ 37 struct map **maps_by_name; 38 struct machine *machine; 39 #ifdef HAVE_LIBUNWIND_SUPPORT 40 void *addr_space; 41 const struct unwind_libunwind_ops *unwind_libunwind_ops; 42 #endif 43 refcount_t refcnt; 44 /** 45 * @nr_maps: number of maps_by_address, and possibly maps_by_name, 46 * entries that contain maps. 47 */ 48 unsigned int nr_maps; 49 /** 50 * @nr_maps_allocated: number of entries in maps_by_address and possibly 51 * maps_by_name. 52 */ 53 unsigned int nr_maps_allocated; 54 /** 55 * @last_search_by_name_idx: cache of last found by name entry's index 56 * as frequent searches for the same dso name are common. 57 */ 58 unsigned int last_search_by_name_idx; 59 /** @maps_by_address_sorted: is maps_by_address sorted. */ 60 bool maps_by_address_sorted; 61 /** @maps_by_name_sorted: is maps_by_name sorted. */ 62 bool maps_by_name_sorted; 63 /** @ends_broken: does the map contain a map where end values are unset/unsorted? */ 64 bool ends_broken; 65 }; 66 67 static void check_invariants(const struct maps *maps __maybe_unused) 68 { 69 #ifndef NDEBUG 70 assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated); 71 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { 72 struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i]; 73 74 /* Check map is well-formed. */ 75 assert(map__end(map) == 0 || map__start(map) <= map__end(map)); 76 /* Expect at least 1 reference count. */ 77 assert(refcount_read(map__refcnt(map)) > 0); 78 79 if (map__dso(map) && dso__kernel(map__dso(map))) 80 assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps)); 81 82 if (i > 0) { 83 struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1]; 84 85 /* If addresses are sorted... */ 86 if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) { 87 /* Maps should be in start address order. */ 88 assert(map__start(prev) <= map__start(map)); 89 /* 90 * If the ends of maps aren't broken (during 91 * construction) then they should be ordered 92 * too. 93 */ 94 if (!RC_CHK_ACCESS(maps)->ends_broken) { 95 assert(map__end(prev) <= map__end(map)); 96 assert(map__end(prev) <= map__start(map) || 97 map__start(prev) == map__start(map)); 98 } 99 } 100 } 101 } 102 if (RC_CHK_ACCESS(maps)->maps_by_name) { 103 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { 104 struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i]; 105 106 /* 107 * Maps by name maps should be in maps_by_address, so 108 * the reference count should be higher. 109 */ 110 assert(refcount_read(map__refcnt(map)) > 1); 111 } 112 } 113 #endif 114 } 115 116 static struct map **maps__maps_by_address(const struct maps *maps) 117 { 118 return RC_CHK_ACCESS(maps)->maps_by_address; 119 } 120 121 static void maps__set_maps_by_address(struct maps *maps, struct map **new) 122 { 123 RC_CHK_ACCESS(maps)->maps_by_address = new; 124 125 } 126 127 static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated) 128 { 129 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated; 130 } 131 132 static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps) 133 { 134 RC_CHK_ACCESS(maps)->nr_maps = nr_maps; 135 } 136 137 /* Not in the header, to aid reference counting. */ 138 static struct map **maps__maps_by_name(const struct maps *maps) 139 { 140 return RC_CHK_ACCESS(maps)->maps_by_name; 141 142 } 143 144 static void maps__set_maps_by_name(struct maps *maps, struct map **new) 145 { 146 RC_CHK_ACCESS(maps)->maps_by_name = new; 147 148 } 149 150 static bool maps__maps_by_address_sorted(const struct maps *maps) 151 { 152 return RC_CHK_ACCESS(maps)->maps_by_address_sorted; 153 } 154 155 static void maps__set_maps_by_address_sorted(struct maps *maps, bool value) 156 { 157 RC_CHK_ACCESS(maps)->maps_by_address_sorted = value; 158 } 159 160 static bool maps__maps_by_name_sorted(const struct maps *maps) 161 { 162 return RC_CHK_ACCESS(maps)->maps_by_name_sorted; 163 } 164 165 static void maps__set_maps_by_name_sorted(struct maps *maps, bool value) 166 { 167 RC_CHK_ACCESS(maps)->maps_by_name_sorted = value; 168 } 169 170 struct machine *maps__machine(const struct maps *maps) 171 { 172 return RC_CHK_ACCESS(maps)->machine; 173 } 174 175 unsigned int maps__nr_maps(const struct maps *maps) 176 { 177 return RC_CHK_ACCESS(maps)->nr_maps; 178 } 179 180 refcount_t *maps__refcnt(struct maps *maps) 181 { 182 return &RC_CHK_ACCESS(maps)->refcnt; 183 } 184 185 #ifdef HAVE_LIBUNWIND_SUPPORT 186 void *maps__addr_space(const struct maps *maps) 187 { 188 return RC_CHK_ACCESS(maps)->addr_space; 189 } 190 191 void maps__set_addr_space(struct maps *maps, void *addr_space) 192 { 193 RC_CHK_ACCESS(maps)->addr_space = addr_space; 194 } 195 196 const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps) 197 { 198 return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; 199 } 200 201 void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops) 202 { 203 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops; 204 } 205 #endif 206 207 static struct rw_semaphore *maps__lock(struct maps *maps) 208 { 209 return &RC_CHK_ACCESS(maps)->lock; 210 } 211 212 static void maps__init(struct maps *maps, struct machine *machine) 213 { 214 init_rwsem(maps__lock(maps)); 215 RC_CHK_ACCESS(maps)->maps_by_address = NULL; 216 RC_CHK_ACCESS(maps)->maps_by_name = NULL; 217 RC_CHK_ACCESS(maps)->machine = machine; 218 #ifdef HAVE_LIBUNWIND_SUPPORT 219 RC_CHK_ACCESS(maps)->addr_space = NULL; 220 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL; 221 #endif 222 refcount_set(maps__refcnt(maps), 1); 223 RC_CHK_ACCESS(maps)->nr_maps = 0; 224 RC_CHK_ACCESS(maps)->nr_maps_allocated = 0; 225 RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0; 226 RC_CHK_ACCESS(maps)->maps_by_address_sorted = true; 227 RC_CHK_ACCESS(maps)->maps_by_name_sorted = false; 228 } 229 230 static void maps__exit(struct maps *maps) 231 { 232 struct map **maps_by_address = maps__maps_by_address(maps); 233 struct map **maps_by_name = maps__maps_by_name(maps); 234 235 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 236 map__zput(maps_by_address[i]); 237 if (maps_by_name) 238 map__zput(maps_by_name[i]); 239 } 240 zfree(&maps_by_address); 241 zfree(&maps_by_name); 242 unwind__finish_access(maps); 243 } 244 245 struct maps *maps__new(struct machine *machine) 246 { 247 struct maps *result; 248 RC_STRUCT(maps) *maps = zalloc(sizeof(*maps)); 249 250 if (ADD_RC_CHK(result, maps)) 251 maps__init(result, machine); 252 253 return result; 254 } 255 256 static void maps__delete(struct maps *maps) 257 { 258 maps__exit(maps); 259 RC_CHK_FREE(maps); 260 } 261 262 struct maps *maps__get(struct maps *maps) 263 { 264 struct maps *result; 265 266 if (RC_CHK_GET(result, maps)) 267 refcount_inc(maps__refcnt(maps)); 268 269 return result; 270 } 271 272 void maps__put(struct maps *maps) 273 { 274 if (maps && refcount_dec_and_test(maps__refcnt(maps))) 275 maps__delete(maps); 276 else 277 RC_CHK_PUT(maps); 278 } 279 280 static void __maps__free_maps_by_name(struct maps *maps) 281 { 282 if (!maps__maps_by_name(maps)) 283 return; 284 285 /* 286 * Free everything to try to do it from the rbtree in the next search 287 */ 288 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) 289 map__put(maps__maps_by_name(maps)[i]); 290 291 zfree(&RC_CHK_ACCESS(maps)->maps_by_name); 292 293 /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */ 294 maps__set_maps_by_name_sorted(maps, false); 295 } 296 297 static int map__start_cmp(const void *a, const void *b) 298 { 299 const struct map *map_a = *(const struct map * const *)a; 300 const struct map *map_b = *(const struct map * const *)b; 301 u64 map_a_start = map__start(map_a); 302 u64 map_b_start = map__start(map_b); 303 304 if (map_a_start == map_b_start) { 305 u64 map_a_end = map__end(map_a); 306 u64 map_b_end = map__end(map_b); 307 308 if (map_a_end == map_b_end) { 309 /* Ensure maps with the same addresses have a fixed order. */ 310 if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b)) 311 return 0; 312 return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b) 313 ? 1 : -1; 314 } 315 return map_a_end > map_b_end ? 1 : -1; 316 } 317 return map_a_start > map_b_start ? 1 : -1; 318 } 319 320 static void __maps__sort_by_address(struct maps *maps) 321 { 322 if (maps__maps_by_address_sorted(maps)) 323 return; 324 325 qsort(maps__maps_by_address(maps), 326 maps__nr_maps(maps), 327 sizeof(struct map *), 328 map__start_cmp); 329 maps__set_maps_by_address_sorted(maps, true); 330 } 331 332 static void maps__sort_by_address(struct maps *maps) 333 { 334 down_write(maps__lock(maps)); 335 __maps__sort_by_address(maps); 336 up_write(maps__lock(maps)); 337 } 338 339 static int map__strcmp(const void *a, const void *b) 340 { 341 const struct map *map_a = *(const struct map * const *)a; 342 const struct map *map_b = *(const struct map * const *)b; 343 const struct dso *dso_a = map__dso(map_a); 344 const struct dso *dso_b = map__dso(map_b); 345 int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b)); 346 347 if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) { 348 /* Ensure distinct but name equal maps have an order. */ 349 return map__start_cmp(a, b); 350 } 351 return ret; 352 } 353 354 static int maps__sort_by_name(struct maps *maps) 355 { 356 int err = 0; 357 358 down_write(maps__lock(maps)); 359 if (!maps__maps_by_name_sorted(maps)) { 360 struct map **maps_by_name = maps__maps_by_name(maps); 361 362 if (!maps_by_name) { 363 maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated * 364 sizeof(*maps_by_name)); 365 if (!maps_by_name) 366 err = -ENOMEM; 367 else { 368 struct map **maps_by_address = maps__maps_by_address(maps); 369 unsigned int n = maps__nr_maps(maps); 370 371 maps__set_maps_by_name(maps, maps_by_name); 372 for (unsigned int i = 0; i < n; i++) 373 maps_by_name[i] = map__get(maps_by_address[i]); 374 } 375 } 376 if (!err) { 377 qsort(maps_by_name, 378 maps__nr_maps(maps), 379 sizeof(struct map *), 380 map__strcmp); 381 maps__set_maps_by_name_sorted(maps, true); 382 } 383 } 384 check_invariants(maps); 385 up_write(maps__lock(maps)); 386 return err; 387 } 388 389 static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map) 390 { 391 struct map **maps_by_address = maps__maps_by_address(maps); 392 393 if (maps__maps_by_address_sorted(maps)) { 394 struct map **mapp = 395 bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps), 396 sizeof(*mapp), map__start_cmp); 397 398 if (mapp) 399 return mapp - maps_by_address; 400 } else { 401 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 402 if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map)) 403 return i; 404 } 405 } 406 pr_err("Map missing from maps"); 407 return -1; 408 } 409 410 static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map) 411 { 412 struct map **maps_by_name = maps__maps_by_name(maps); 413 414 if (maps__maps_by_name_sorted(maps)) { 415 struct map **mapp = 416 bsearch(&map, maps_by_name, maps__nr_maps(maps), 417 sizeof(*mapp), map__strcmp); 418 419 if (mapp) 420 return mapp - maps_by_name; 421 } else { 422 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 423 if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map)) 424 return i; 425 } 426 } 427 pr_err("Map missing from maps"); 428 return -1; 429 } 430 431 static void map__set_kmap_maps(struct map *map, struct maps *maps) 432 { 433 struct dso *dso; 434 435 if (map == NULL) 436 return; 437 438 dso = map__dso(map); 439 440 if (dso && dso__kernel(dso)) { 441 struct kmap *kmap = map__kmap(map); 442 443 if (kmap) 444 kmap->kmaps = maps; 445 else 446 pr_err("Internal error: kernel dso with non kernel map\n"); 447 } 448 } 449 450 static int __maps__insert(struct maps *maps, struct map *new) 451 { 452 struct map **maps_by_address = maps__maps_by_address(maps); 453 struct map **maps_by_name = maps__maps_by_name(maps); 454 unsigned int nr_maps = maps__nr_maps(maps); 455 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated; 456 457 if (nr_maps + 1 > nr_allocate) { 458 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2; 459 460 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new)); 461 if (!maps_by_address) 462 return -ENOMEM; 463 464 maps__set_maps_by_address(maps, maps_by_address); 465 if (maps_by_name) { 466 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new)); 467 if (!maps_by_name) { 468 /* 469 * If by name fails, just disable by name and it will 470 * recompute next time it is required. 471 */ 472 __maps__free_maps_by_name(maps); 473 } 474 maps__set_maps_by_name(maps, maps_by_name); 475 } 476 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; 477 } 478 /* Insert the value at the end. */ 479 maps_by_address[nr_maps] = map__get(new); 480 map__set_kmap_maps(new, maps); 481 if (maps_by_name) 482 maps_by_name[nr_maps] = map__get(new); 483 484 nr_maps++; 485 RC_CHK_ACCESS(maps)->nr_maps = nr_maps; 486 487 /* 488 * Recompute if things are sorted. If things are inserted in a sorted 489 * manner, for example by processing /proc/pid/maps, then no 490 * sorting/resorting will be necessary. 491 */ 492 if (nr_maps == 1) { 493 /* If there's just 1 entry then maps are sorted. */ 494 maps__set_maps_by_address_sorted(maps, true); 495 maps__set_maps_by_name_sorted(maps, maps_by_name != NULL); 496 } else { 497 /* Sorted if maps were already sorted and this map starts after the last one. */ 498 maps__set_maps_by_address_sorted(maps, 499 maps__maps_by_address_sorted(maps) && 500 map__end(maps_by_address[nr_maps - 2]) <= map__start(new)); 501 maps__set_maps_by_name_sorted(maps, false); 502 } 503 if (map__end(new) < map__start(new)) 504 RC_CHK_ACCESS(maps)->ends_broken = true; 505 506 return 0; 507 } 508 509 int maps__insert(struct maps *maps, struct map *map) 510 { 511 int ret; 512 513 down_write(maps__lock(maps)); 514 ret = __maps__insert(maps, map); 515 check_invariants(maps); 516 up_write(maps__lock(maps)); 517 return ret; 518 } 519 520 static void __maps__remove(struct maps *maps, struct map *map) 521 { 522 struct map **maps_by_address = maps__maps_by_address(maps); 523 struct map **maps_by_name = maps__maps_by_name(maps); 524 unsigned int nr_maps = maps__nr_maps(maps); 525 unsigned int address_idx; 526 527 /* Slide later mappings over the one to remove */ 528 address_idx = maps__by_address_index(maps, map); 529 map__put(maps_by_address[address_idx]); 530 memmove(&maps_by_address[address_idx], 531 &maps_by_address[address_idx + 1], 532 (nr_maps - address_idx - 1) * sizeof(*maps_by_address)); 533 534 if (maps_by_name) { 535 unsigned int name_idx = maps__by_name_index(maps, map); 536 537 map__put(maps_by_name[name_idx]); 538 memmove(&maps_by_name[name_idx], 539 &maps_by_name[name_idx + 1], 540 (nr_maps - name_idx - 1) * sizeof(*maps_by_name)); 541 } 542 543 --RC_CHK_ACCESS(maps)->nr_maps; 544 } 545 546 void maps__remove(struct maps *maps, struct map *map) 547 { 548 down_write(maps__lock(maps)); 549 __maps__remove(maps, map); 550 check_invariants(maps); 551 up_write(maps__lock(maps)); 552 } 553 554 bool maps__empty(struct maps *maps) 555 { 556 bool res; 557 558 down_read(maps__lock(maps)); 559 res = maps__nr_maps(maps) == 0; 560 up_read(maps__lock(maps)); 561 562 return res; 563 } 564 565 bool maps__equal(struct maps *a, struct maps *b) 566 { 567 return RC_CHK_EQUAL(a, b); 568 } 569 570 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data) 571 { 572 bool done = false; 573 int ret = 0; 574 575 /* See locking/sorting note. */ 576 while (!done) { 577 down_read(maps__lock(maps)); 578 if (maps__maps_by_address_sorted(maps)) { 579 /* 580 * maps__for_each_map callbacks may buggily/unsafely 581 * insert into maps_by_address. Deliberately reload 582 * maps__nr_maps and maps_by_address on each iteration 583 * to avoid using memory freed by maps__insert growing 584 * the array - this may cause maps to be skipped or 585 * repeated. 586 */ 587 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 588 struct map **maps_by_address = maps__maps_by_address(maps); 589 struct map *map = maps_by_address[i]; 590 591 ret = cb(map, data); 592 if (ret) 593 break; 594 } 595 done = true; 596 } 597 up_read(maps__lock(maps)); 598 if (!done) 599 maps__sort_by_address(maps); 600 } 601 return ret; 602 } 603 604 void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data) 605 { 606 struct map **maps_by_address; 607 608 down_write(maps__lock(maps)); 609 610 maps_by_address = maps__maps_by_address(maps); 611 for (unsigned int i = 0; i < maps__nr_maps(maps);) { 612 if (cb(maps_by_address[i], data)) 613 __maps__remove(maps, maps_by_address[i]); 614 else 615 i++; 616 } 617 check_invariants(maps); 618 up_write(maps__lock(maps)); 619 } 620 621 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) 622 { 623 struct map *map = maps__find(maps, addr); 624 struct symbol *result = NULL; 625 626 /* Ensure map is loaded before using map->map_ip */ 627 if (map != NULL && map__load(map) >= 0) 628 result = map__find_symbol(map, map__map_ip(map, addr)); 629 630 if (mapp) 631 *mapp = map; 632 else 633 map__put(map); 634 635 return result; 636 } 637 638 struct maps__find_symbol_by_name_args { 639 struct map **mapp; 640 const char *name; 641 struct symbol *sym; 642 }; 643 644 static int maps__find_symbol_by_name_cb(struct map *map, void *data) 645 { 646 struct maps__find_symbol_by_name_args *args = data; 647 648 args->sym = map__find_symbol_by_name(map, args->name); 649 if (!args->sym) 650 return 0; 651 652 if (!map__contains_symbol(map, args->sym)) { 653 args->sym = NULL; 654 return 0; 655 } 656 657 if (args->mapp != NULL) 658 *args->mapp = map__get(map); 659 return 1; 660 } 661 662 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp) 663 { 664 struct maps__find_symbol_by_name_args args = { 665 .mapp = mapp, 666 .name = name, 667 .sym = NULL, 668 }; 669 670 maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args); 671 return args.sym; 672 } 673 674 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams) 675 { 676 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) { 677 if (maps == NULL) 678 return -1; 679 ams->ms.map = maps__find(maps, ams->addr); 680 if (ams->ms.map == NULL) 681 return -1; 682 } 683 684 ams->al_addr = map__map_ip(ams->ms.map, ams->addr); 685 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr); 686 687 return ams->ms.sym ? 0 : -1; 688 } 689 690 struct maps__fprintf_args { 691 FILE *fp; 692 size_t printed; 693 }; 694 695 static int maps__fprintf_cb(struct map *map, void *data) 696 { 697 struct maps__fprintf_args *args = data; 698 699 args->printed += fprintf(args->fp, "Map:"); 700 args->printed += map__fprintf(map, args->fp); 701 if (verbose > 2) { 702 args->printed += dso__fprintf(map__dso(map), args->fp); 703 args->printed += fprintf(args->fp, "--\n"); 704 } 705 return 0; 706 } 707 708 size_t maps__fprintf(struct maps *maps, FILE *fp) 709 { 710 struct maps__fprintf_args args = { 711 .fp = fp, 712 .printed = 0, 713 }; 714 715 maps__for_each_map(maps, maps__fprintf_cb, &args); 716 717 return args.printed; 718 } 719 720 /* 721 * Find first map where end > map->start. 722 * Same as find_vma() in kernel. 723 */ 724 static unsigned int first_ending_after(struct maps *maps, const struct map *map) 725 { 726 struct map **maps_by_address = maps__maps_by_address(maps); 727 int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1; 728 729 assert(maps__maps_by_address_sorted(maps)); 730 if (low <= high && map__end(maps_by_address[0]) > map__start(map)) 731 return 0; 732 733 while (low <= high) { 734 int mid = (low + high) / 2; 735 struct map *pos = maps_by_address[mid]; 736 737 if (map__end(pos) > map__start(map)) { 738 first = mid; 739 if (map__start(pos) <= map__start(map)) { 740 /* Entry overlaps map. */ 741 break; 742 } 743 high = mid - 1; 744 } else 745 low = mid + 1; 746 } 747 return first; 748 } 749 750 static int __maps__insert_sorted(struct maps *maps, unsigned int first_after_index, 751 struct map *new1, struct map *new2) 752 { 753 struct map **maps_by_address = maps__maps_by_address(maps); 754 struct map **maps_by_name = maps__maps_by_name(maps); 755 unsigned int nr_maps = maps__nr_maps(maps); 756 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated; 757 unsigned int to_add = new2 ? 2 : 1; 758 759 assert(maps__maps_by_address_sorted(maps)); 760 assert(first_after_index == nr_maps || 761 map__end(new1) <= map__start(maps_by_address[first_after_index])); 762 assert(!new2 || map__end(new1) <= map__start(new2)); 763 assert(first_after_index == nr_maps || !new2 || 764 map__end(new2) <= map__start(maps_by_address[first_after_index])); 765 766 if (nr_maps + to_add > nr_allocate) { 767 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2; 768 769 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new1)); 770 if (!maps_by_address) 771 return -ENOMEM; 772 773 maps__set_maps_by_address(maps, maps_by_address); 774 if (maps_by_name) { 775 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new1)); 776 if (!maps_by_name) { 777 /* 778 * If by name fails, just disable by name and it will 779 * recompute next time it is required. 780 */ 781 __maps__free_maps_by_name(maps); 782 } 783 maps__set_maps_by_name(maps, maps_by_name); 784 } 785 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; 786 } 787 memmove(&maps_by_address[first_after_index+to_add], 788 &maps_by_address[first_after_index], 789 (nr_maps - first_after_index) * sizeof(new1)); 790 maps_by_address[first_after_index] = map__get(new1); 791 if (maps_by_name) 792 maps_by_name[nr_maps] = map__get(new1); 793 if (new2) { 794 maps_by_address[first_after_index + 1] = map__get(new2); 795 if (maps_by_name) 796 maps_by_name[nr_maps + 1] = map__get(new2); 797 } 798 RC_CHK_ACCESS(maps)->nr_maps = nr_maps + to_add; 799 maps__set_maps_by_name_sorted(maps, false); 800 map__set_kmap_maps(new1, maps); 801 map__set_kmap_maps(new2, maps); 802 803 check_invariants(maps); 804 return 0; 805 } 806 807 /* 808 * Adds new to maps, if new overlaps existing entries then the existing maps are 809 * adjusted or removed so that new fits without overlapping any entries. 810 */ 811 static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) 812 { 813 int err = 0; 814 FILE *fp = debug_file(); 815 unsigned int i, ni = INT_MAX; // Some gcc complain, but depends on maps_by_name... 816 817 if (!maps__maps_by_address_sorted(maps)) 818 __maps__sort_by_address(maps); 819 820 /* 821 * Iterate through entries where the end of the existing entry is 822 * greater-than the new map's start. 823 */ 824 for (i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) { 825 struct map **maps_by_address = maps__maps_by_address(maps); 826 struct map **maps_by_name = maps__maps_by_name(maps); 827 struct map *pos = maps_by_address[i]; 828 struct map *before = NULL, *after = NULL; 829 830 /* 831 * Stop if current map starts after map->end. 832 * Maps are ordered by start: next will not overlap for sure. 833 */ 834 if (map__start(pos) >= map__end(new)) 835 break; 836 837 if (use_browser) { 838 pr_debug("overlapping maps in %s (disable tui for more info)\n", 839 dso__name(map__dso(new))); 840 } else if (verbose >= 2) { 841 pr_debug("overlapping maps:\n"); 842 map__fprintf(new, fp); 843 map__fprintf(pos, fp); 844 } 845 846 if (maps_by_name) 847 ni = maps__by_name_index(maps, pos); 848 849 /* 850 * Now check if we need to create new maps for areas not 851 * overlapped by the new map: 852 */ 853 if (map__start(new) > map__start(pos)) { 854 /* Map starts within existing map. Need to shorten the existing map. */ 855 before = map__clone(pos); 856 857 if (before == NULL) { 858 err = -ENOMEM; 859 goto out_err; 860 } 861 map__set_end(before, map__start(new)); 862 863 if (verbose >= 2 && !use_browser) 864 map__fprintf(before, fp); 865 } 866 if (map__end(new) < map__end(pos)) { 867 /* The new map isn't as long as the existing map. */ 868 after = map__clone(pos); 869 870 if (after == NULL) { 871 map__zput(before); 872 err = -ENOMEM; 873 goto out_err; 874 } 875 876 map__set_start(after, map__end(new)); 877 map__add_pgoff(after, map__end(new) - map__start(pos)); 878 assert(map__map_ip(pos, map__end(new)) == 879 map__map_ip(after, map__end(new))); 880 881 if (verbose >= 2 && !use_browser) 882 map__fprintf(after, fp); 883 } 884 /* 885 * If adding one entry, for `before` or `after`, we can replace 886 * the existing entry. If both `before` and `after` are 887 * necessary than an insert is needed. If the existing entry 888 * entirely overlaps the existing entry it can just be removed. 889 */ 890 if (before) { 891 map__put(maps_by_address[i]); 892 maps_by_address[i] = before; 893 map__set_kmap_maps(before, maps); 894 895 if (maps_by_name) { 896 map__put(maps_by_name[ni]); 897 maps_by_name[ni] = map__get(before); 898 } 899 900 /* Maps are still ordered, go to next one. */ 901 i++; 902 if (after) { 903 /* 904 * 'before' and 'after' mean 'new' split the 905 * 'pos' mapping and therefore there are no 906 * later mappings. 907 */ 908 err = __maps__insert_sorted(maps, i, new, after); 909 map__put(after); 910 check_invariants(maps); 911 return err; 912 } 913 check_invariants(maps); 914 } else if (after) { 915 /* 916 * 'after' means 'new' split 'pos' and there are no 917 * later mappings. 918 */ 919 map__put(maps_by_address[i]); 920 maps_by_address[i] = map__get(new); 921 map__set_kmap_maps(new, maps); 922 923 if (maps_by_name) { 924 map__put(maps_by_name[ni]); 925 maps_by_name[ni] = map__get(new); 926 } 927 928 err = __maps__insert_sorted(maps, i + 1, after, NULL); 929 map__put(after); 930 check_invariants(maps); 931 return err; 932 } else { 933 struct map *next = NULL; 934 935 if (i + 1 < maps__nr_maps(maps)) 936 next = maps_by_address[i + 1]; 937 938 if (!next || map__start(next) >= map__end(new)) { 939 /* 940 * Replace existing mapping and end knowing 941 * there aren't later overlapping or any 942 * mappings. 943 */ 944 map__put(maps_by_address[i]); 945 maps_by_address[i] = map__get(new); 946 map__set_kmap_maps(new, maps); 947 948 if (maps_by_name) { 949 map__put(maps_by_name[ni]); 950 maps_by_name[ni] = map__get(new); 951 } 952 953 check_invariants(maps); 954 return err; 955 } 956 __maps__remove(maps, pos); 957 check_invariants(maps); 958 /* 959 * Maps are ordered but no need to increase `i` as the 960 * later maps were moved down. 961 */ 962 } 963 } 964 /* Add the map. */ 965 err = __maps__insert_sorted(maps, i, new, NULL); 966 out_err: 967 return err; 968 } 969 970 int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) 971 { 972 int err; 973 974 down_write(maps__lock(maps)); 975 err = __maps__fixup_overlap_and_insert(maps, new); 976 up_write(maps__lock(maps)); 977 return err; 978 } 979 980 int maps__copy_from(struct maps *dest, struct maps *parent) 981 { 982 /* Note, if struct map were immutable then cloning could use ref counts. */ 983 struct map **parent_maps_by_address; 984 int err = 0; 985 unsigned int n; 986 987 down_write(maps__lock(dest)); 988 down_read(maps__lock(parent)); 989 990 parent_maps_by_address = maps__maps_by_address(parent); 991 n = maps__nr_maps(parent); 992 if (maps__nr_maps(dest) == 0) { 993 /* No existing mappings so just copy from parent to avoid reallocs in insert. */ 994 unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated; 995 struct map **dest_maps_by_address = 996 malloc(nr_maps_allocated * sizeof(struct map *)); 997 struct map **dest_maps_by_name = NULL; 998 999 if (!dest_maps_by_address) 1000 err = -ENOMEM; 1001 else { 1002 if (maps__maps_by_name(parent)) { 1003 dest_maps_by_name = 1004 malloc(nr_maps_allocated * sizeof(struct map *)); 1005 } 1006 1007 RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address; 1008 RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name; 1009 RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated; 1010 } 1011 1012 for (unsigned int i = 0; !err && i < n; i++) { 1013 struct map *pos = parent_maps_by_address[i]; 1014 struct map *new = map__clone(pos); 1015 1016 if (!new) 1017 err = -ENOMEM; 1018 else { 1019 err = unwind__prepare_access(dest, new, NULL); 1020 if (!err) { 1021 dest_maps_by_address[i] = new; 1022 map__set_kmap_maps(new, dest); 1023 if (dest_maps_by_name) 1024 dest_maps_by_name[i] = map__get(new); 1025 RC_CHK_ACCESS(dest)->nr_maps = i + 1; 1026 } 1027 } 1028 if (err) 1029 map__put(new); 1030 } 1031 maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent)); 1032 if (!err) { 1033 RC_CHK_ACCESS(dest)->last_search_by_name_idx = 1034 RC_CHK_ACCESS(parent)->last_search_by_name_idx; 1035 maps__set_maps_by_name_sorted(dest, 1036 dest_maps_by_name && 1037 maps__maps_by_name_sorted(parent)); 1038 } else { 1039 RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0; 1040 maps__set_maps_by_name_sorted(dest, false); 1041 } 1042 } else { 1043 /* Unexpected copying to a maps containing entries. */ 1044 for (unsigned int i = 0; !err && i < n; i++) { 1045 struct map *pos = parent_maps_by_address[i]; 1046 struct map *new = map__clone(pos); 1047 1048 if (!new) 1049 err = -ENOMEM; 1050 else { 1051 err = unwind__prepare_access(dest, new, NULL); 1052 if (!err) 1053 err = __maps__insert(dest, new); 1054 } 1055 map__put(new); 1056 } 1057 } 1058 check_invariants(dest); 1059 1060 up_read(maps__lock(parent)); 1061 up_write(maps__lock(dest)); 1062 return err; 1063 } 1064 1065 static int map__addr_cmp(const void *key, const void *entry) 1066 { 1067 const u64 ip = *(const u64 *)key; 1068 const struct map *map = *(const struct map * const *)entry; 1069 1070 if (ip < map__start(map)) 1071 return -1; 1072 if (ip >= map__end(map)) 1073 return 1; 1074 return 0; 1075 } 1076 1077 struct map *maps__find(struct maps *maps, u64 ip) 1078 { 1079 struct map *result = NULL; 1080 bool done = false; 1081 1082 /* See locking/sorting note. */ 1083 while (!done) { 1084 down_read(maps__lock(maps)); 1085 if (maps__maps_by_address_sorted(maps)) { 1086 struct map **mapp = NULL; 1087 struct map **maps_by_address = maps__maps_by_address(maps); 1088 unsigned int nr_maps = maps__nr_maps(maps); 1089 1090 if (maps_by_address && nr_maps) 1091 mapp = bsearch(&ip, maps_by_address, nr_maps, sizeof(*mapp), 1092 map__addr_cmp); 1093 if (mapp) 1094 result = map__get(*mapp); 1095 done = true; 1096 } 1097 up_read(maps__lock(maps)); 1098 if (!done) 1099 maps__sort_by_address(maps); 1100 } 1101 return result; 1102 } 1103 1104 static int map__strcmp_name(const void *name, const void *b) 1105 { 1106 const struct dso *dso = map__dso(*(const struct map **)b); 1107 1108 return strcmp(name, dso__short_name(dso)); 1109 } 1110 1111 struct map *maps__find_by_name(struct maps *maps, const char *name) 1112 { 1113 struct map *result = NULL; 1114 bool done = false; 1115 1116 /* See locking/sorting note. */ 1117 while (!done) { 1118 unsigned int i; 1119 1120 down_read(maps__lock(maps)); 1121 1122 /* First check last found entry. */ 1123 i = RC_CHK_ACCESS(maps)->last_search_by_name_idx; 1124 if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) { 1125 struct dso *dso = map__dso(maps__maps_by_name(maps)[i]); 1126 1127 if (dso && strcmp(dso__short_name(dso), name) == 0) { 1128 result = map__get(maps__maps_by_name(maps)[i]); 1129 done = true; 1130 } 1131 } 1132 1133 /* Second search sorted array. */ 1134 if (!done && maps__maps_by_name_sorted(maps)) { 1135 struct map **mapp = 1136 bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), 1137 sizeof(*mapp), map__strcmp_name); 1138 1139 if (mapp) { 1140 result = map__get(*mapp); 1141 i = mapp - maps__maps_by_name(maps); 1142 RC_CHK_ACCESS(maps)->last_search_by_name_idx = i; 1143 } 1144 done = true; 1145 } 1146 up_read(maps__lock(maps)); 1147 if (!done) { 1148 /* Sort and retry binary search. */ 1149 if (maps__sort_by_name(maps)) { 1150 /* 1151 * Memory allocation failed do linear search 1152 * through address sorted maps. 1153 */ 1154 struct map **maps_by_address; 1155 unsigned int n; 1156 1157 down_read(maps__lock(maps)); 1158 maps_by_address = maps__maps_by_address(maps); 1159 n = maps__nr_maps(maps); 1160 for (i = 0; i < n; i++) { 1161 struct map *pos = maps_by_address[i]; 1162 struct dso *dso = map__dso(pos); 1163 1164 if (dso && strcmp(dso__short_name(dso), name) == 0) { 1165 result = map__get(pos); 1166 break; 1167 } 1168 } 1169 up_read(maps__lock(maps)); 1170 done = true; 1171 } 1172 } 1173 } 1174 return result; 1175 } 1176 1177 struct map *maps__find_next_entry(struct maps *maps, struct map *map) 1178 { 1179 unsigned int i; 1180 struct map *result = NULL; 1181 1182 down_read(maps__lock(maps)); 1183 while (!maps__maps_by_address_sorted(maps)) { 1184 up_read(maps__lock(maps)); 1185 maps__sort_by_address(maps); 1186 down_read(maps__lock(maps)); 1187 } 1188 i = maps__by_address_index(maps, map); 1189 if (++i < maps__nr_maps(maps)) 1190 result = map__get(maps__maps_by_address(maps)[i]); 1191 1192 up_read(maps__lock(maps)); 1193 return result; 1194 } 1195 1196 void maps__fixup_end(struct maps *maps) 1197 { 1198 struct map **maps_by_address; 1199 unsigned int n; 1200 1201 down_write(maps__lock(maps)); 1202 if (!maps__maps_by_address_sorted(maps)) 1203 __maps__sort_by_address(maps); 1204 1205 maps_by_address = maps__maps_by_address(maps); 1206 n = maps__nr_maps(maps); 1207 for (unsigned int i = 1; i < n; i++) { 1208 struct map *prev = maps_by_address[i - 1]; 1209 struct map *curr = maps_by_address[i]; 1210 1211 if (!map__end(prev) || map__end(prev) > map__start(curr)) 1212 map__set_end(prev, map__start(curr)); 1213 } 1214 1215 /* 1216 * We still haven't the actual symbols, so guess the 1217 * last map final address. 1218 */ 1219 if (n > 0 && !map__end(maps_by_address[n - 1])) 1220 map__set_end(maps_by_address[n - 1], ~0ULL); 1221 1222 RC_CHK_ACCESS(maps)->ends_broken = false; 1223 check_invariants(maps); 1224 1225 up_write(maps__lock(maps)); 1226 } 1227 1228 /* 1229 * Merges map into maps by splitting the new map within the existing map 1230 * regions. 1231 */ 1232 int maps__merge_in(struct maps *kmaps, struct map *new_map) 1233 { 1234 unsigned int first_after_, kmaps__nr_maps; 1235 struct map **kmaps_maps_by_address; 1236 struct map **merged_maps_by_address; 1237 unsigned int merged_nr_maps_allocated; 1238 1239 /* First try under a read lock. */ 1240 while (true) { 1241 down_read(maps__lock(kmaps)); 1242 if (maps__maps_by_address_sorted(kmaps)) 1243 break; 1244 1245 up_read(maps__lock(kmaps)); 1246 1247 /* First after binary search requires sorted maps. Sort and try again. */ 1248 maps__sort_by_address(kmaps); 1249 } 1250 first_after_ = first_ending_after(kmaps, new_map); 1251 kmaps_maps_by_address = maps__maps_by_address(kmaps); 1252 1253 if (first_after_ >= maps__nr_maps(kmaps) || 1254 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { 1255 /* No overlap so regular insert suffices. */ 1256 up_read(maps__lock(kmaps)); 1257 return maps__insert(kmaps, new_map); 1258 } 1259 up_read(maps__lock(kmaps)); 1260 1261 /* Plain insert with a read-lock failed, try again now with the write lock. */ 1262 down_write(maps__lock(kmaps)); 1263 if (!maps__maps_by_address_sorted(kmaps)) 1264 __maps__sort_by_address(kmaps); 1265 1266 first_after_ = first_ending_after(kmaps, new_map); 1267 kmaps_maps_by_address = maps__maps_by_address(kmaps); 1268 kmaps__nr_maps = maps__nr_maps(kmaps); 1269 1270 if (first_after_ >= kmaps__nr_maps || 1271 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { 1272 /* No overlap so regular insert suffices. */ 1273 int ret = __maps__insert(kmaps, new_map); 1274 1275 check_invariants(kmaps); 1276 up_write(maps__lock(kmaps)); 1277 return ret; 1278 } 1279 /* Array to merge into, possibly 1 more for the sake of new_map. */ 1280 merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated; 1281 if (kmaps__nr_maps + 1 == merged_nr_maps_allocated) 1282 merged_nr_maps_allocated++; 1283 1284 merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address)); 1285 if (!merged_maps_by_address) { 1286 up_write(maps__lock(kmaps)); 1287 return -ENOMEM; 1288 } 1289 maps__set_maps_by_address(kmaps, merged_maps_by_address); 1290 maps__set_maps_by_address_sorted(kmaps, true); 1291 __maps__free_maps_by_name(kmaps); 1292 maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated); 1293 1294 /* Copy entries before the new_map that can't overlap. */ 1295 for (unsigned int i = 0; i < first_after_; i++) 1296 merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]); 1297 1298 maps__set_nr_maps(kmaps, first_after_); 1299 1300 /* Add the new map, it will be split when the later overlapping mappings are added. */ 1301 __maps__insert(kmaps, new_map); 1302 1303 /* Insert mappings after new_map, splitting new_map in the process. */ 1304 for (unsigned int i = first_after_; i < kmaps__nr_maps; i++) 1305 __maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]); 1306 1307 /* Copy the maps from merged into kmaps. */ 1308 for (unsigned int i = 0; i < kmaps__nr_maps; i++) 1309 map__zput(kmaps_maps_by_address[i]); 1310 1311 free(kmaps_maps_by_address); 1312 check_invariants(kmaps); 1313 up_write(maps__lock(kmaps)); 1314 return 0; 1315 } 1316 1317 void maps__load_first(struct maps *maps) 1318 { 1319 down_read(maps__lock(maps)); 1320 1321 if (maps__nr_maps(maps) > 0) 1322 map__load(maps__maps_by_address(maps)[0]); 1323 1324 up_read(maps__lock(maps)); 1325 } 1326