1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <linux/zalloc.h> 5 #include "debug.h" 6 #include "dso.h" 7 #include "map.h" 8 #include "maps.h" 9 #include "rwsem.h" 10 #include "thread.h" 11 #include "ui/ui.h" 12 #include "unwind.h" 13 #include "unwind-libdw.h" 14 #include <internal/rc_check.h> 15 16 /* 17 * Locking/sorting note: 18 * 19 * Sorting is done with the write lock, iteration and binary searching happens 20 * under the read lock requiring being sorted. There is a race between sorting 21 * releasing the write lock and acquiring the read lock for iteration/searching 22 * where another thread could insert and break the sorting of the maps. In 23 * practice inserting maps should be rare meaning that the race shouldn't lead 24 * to live lock. Removal of maps doesn't break being sorted. 25 */ 26 27 DECLARE_RC_STRUCT(maps) { 28 struct rw_semaphore lock; 29 /** 30 * @maps_by_address: array of maps sorted by their starting address if 31 * maps_by_address_sorted is true. 32 */ 33 struct map **maps_by_address; 34 /** 35 * @maps_by_name: optional array of maps sorted by their dso name if 36 * maps_by_name_sorted is true. 37 */ 38 struct map **maps_by_name; 39 struct machine *machine; 40 #ifdef HAVE_LIBUNWIND_SUPPORT 41 void *addr_space; 42 const struct unwind_libunwind_ops *unwind_libunwind_ops; 43 #endif 44 #ifdef HAVE_LIBDW_SUPPORT 45 void *libdw_addr_space_dwfl; 46 #endif 47 refcount_t refcnt; 48 /** 49 * @nr_maps: number of maps_by_address, and possibly maps_by_name, 50 * entries that contain maps. 51 */ 52 unsigned int nr_maps; 53 /** 54 * @nr_maps_allocated: number of entries in maps_by_address and possibly 55 * maps_by_name. 56 */ 57 unsigned int nr_maps_allocated; 58 /** 59 * @last_search_by_name_idx: cache of last found by name entry's index 60 * as frequent searches for the same dso name are common. 61 */ 62 unsigned int last_search_by_name_idx; 63 /** @maps_by_address_sorted: is maps_by_address sorted. */ 64 bool maps_by_address_sorted; 65 /** @maps_by_name_sorted: is maps_by_name sorted. */ 66 bool maps_by_name_sorted; 67 /** @ends_broken: does the map contain a map where end values are unset/unsorted? */ 68 bool ends_broken; 69 }; 70 71 static void check_invariants(const struct maps *maps __maybe_unused) 72 { 73 #ifndef NDEBUG 74 assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated); 75 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { 76 struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i]; 77 78 /* Check map is well-formed. */ 79 assert(map__end(map) == 0 || map__start(map) <= map__end(map)); 80 /* Expect at least 1 reference count. */ 81 assert(refcount_read(map__refcnt(map)) > 0); 82 83 if (map__dso(map) && dso__kernel(map__dso(map))) 84 assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps)); 85 86 if (i > 0) { 87 struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1]; 88 89 /* If addresses are sorted... */ 90 if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) { 91 /* Maps should be in start address order. */ 92 assert(map__start(prev) <= map__start(map)); 93 /* 94 * If the ends of maps aren't broken (during 95 * construction) then they should be ordered 96 * too. 97 */ 98 if (!RC_CHK_ACCESS(maps)->ends_broken) { 99 assert(map__end(prev) <= map__end(map)); 100 assert(map__end(prev) <= map__start(map) || 101 map__start(prev) == map__start(map)); 102 } 103 } 104 } 105 } 106 if (RC_CHK_ACCESS(maps)->maps_by_name) { 107 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { 108 struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i]; 109 110 /* 111 * Maps by name maps should be in maps_by_address, so 112 * the reference count should be higher. 113 */ 114 assert(refcount_read(map__refcnt(map)) > 1); 115 } 116 } 117 #endif 118 } 119 120 static struct map **maps__maps_by_address(const struct maps *maps) 121 { 122 return RC_CHK_ACCESS(maps)->maps_by_address; 123 } 124 125 static void maps__set_maps_by_address(struct maps *maps, struct map **new) 126 { 127 RC_CHK_ACCESS(maps)->maps_by_address = new; 128 129 } 130 131 static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated) 132 { 133 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated; 134 } 135 136 static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps) 137 { 138 RC_CHK_ACCESS(maps)->nr_maps = nr_maps; 139 } 140 141 /* Not in the header, to aid reference counting. */ 142 static struct map **maps__maps_by_name(const struct maps *maps) 143 { 144 return RC_CHK_ACCESS(maps)->maps_by_name; 145 146 } 147 148 static void maps__set_maps_by_name(struct maps *maps, struct map **new) 149 { 150 RC_CHK_ACCESS(maps)->maps_by_name = new; 151 152 } 153 154 static bool maps__maps_by_address_sorted(const struct maps *maps) 155 { 156 return RC_CHK_ACCESS(maps)->maps_by_address_sorted; 157 } 158 159 static void maps__set_maps_by_address_sorted(struct maps *maps, bool value) 160 { 161 RC_CHK_ACCESS(maps)->maps_by_address_sorted = value; 162 } 163 164 static bool maps__maps_by_name_sorted(const struct maps *maps) 165 { 166 return RC_CHK_ACCESS(maps)->maps_by_name_sorted; 167 } 168 169 static void maps__set_maps_by_name_sorted(struct maps *maps, bool value) 170 { 171 RC_CHK_ACCESS(maps)->maps_by_name_sorted = value; 172 } 173 174 struct machine *maps__machine(const struct maps *maps) 175 { 176 return RC_CHK_ACCESS(maps)->machine; 177 } 178 179 unsigned int maps__nr_maps(const struct maps *maps) 180 { 181 return RC_CHK_ACCESS(maps)->nr_maps; 182 } 183 184 refcount_t *maps__refcnt(struct maps *maps) 185 { 186 return &RC_CHK_ACCESS(maps)->refcnt; 187 } 188 189 #ifdef HAVE_LIBUNWIND_SUPPORT 190 void *maps__addr_space(const struct maps *maps) 191 { 192 return RC_CHK_ACCESS(maps)->addr_space; 193 } 194 195 void maps__set_addr_space(struct maps *maps, void *addr_space) 196 { 197 RC_CHK_ACCESS(maps)->addr_space = addr_space; 198 } 199 200 const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps) 201 { 202 return RC_CHK_ACCESS(maps)->unwind_libunwind_ops; 203 } 204 205 void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops) 206 { 207 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops; 208 } 209 #endif 210 #ifdef HAVE_LIBDW_SUPPORT 211 void *maps__libdw_addr_space_dwfl(const struct maps *maps) 212 { 213 return RC_CHK_ACCESS(maps)->libdw_addr_space_dwfl; 214 } 215 216 void maps__set_libdw_addr_space_dwfl(struct maps *maps, void *dwfl) 217 { 218 RC_CHK_ACCESS(maps)->libdw_addr_space_dwfl = dwfl; 219 } 220 #endif 221 222 static struct rw_semaphore *maps__lock(struct maps *maps) 223 { 224 return &RC_CHK_ACCESS(maps)->lock; 225 } 226 227 static void maps__init(struct maps *maps, struct machine *machine) 228 { 229 init_rwsem(maps__lock(maps)); 230 RC_CHK_ACCESS(maps)->maps_by_address = NULL; 231 RC_CHK_ACCESS(maps)->maps_by_name = NULL; 232 RC_CHK_ACCESS(maps)->machine = machine; 233 #ifdef HAVE_LIBUNWIND_SUPPORT 234 RC_CHK_ACCESS(maps)->addr_space = NULL; 235 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL; 236 #endif 237 #ifdef HAVE_LIBDW_SUPPORT 238 RC_CHK_ACCESS(maps)->libdw_addr_space_dwfl = NULL; 239 #endif 240 refcount_set(maps__refcnt(maps), 1); 241 RC_CHK_ACCESS(maps)->nr_maps = 0; 242 RC_CHK_ACCESS(maps)->nr_maps_allocated = 0; 243 RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0; 244 RC_CHK_ACCESS(maps)->maps_by_address_sorted = true; 245 RC_CHK_ACCESS(maps)->maps_by_name_sorted = false; 246 } 247 248 static void maps__exit(struct maps *maps) 249 { 250 struct map **maps_by_address = maps__maps_by_address(maps); 251 struct map **maps_by_name = maps__maps_by_name(maps); 252 253 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 254 map__zput(maps_by_address[i]); 255 if (maps_by_name) 256 map__zput(maps_by_name[i]); 257 } 258 zfree(&maps_by_address); 259 zfree(&maps_by_name); 260 unwind__finish_access(maps); 261 #ifdef HAVE_LIBDW_SUPPORT 262 libdw__invalidate_dwfl(maps, maps__libdw_addr_space_dwfl(maps)); 263 #endif 264 } 265 266 struct maps *maps__new(struct machine *machine) 267 { 268 struct maps *result; 269 RC_STRUCT(maps) *maps = zalloc(sizeof(*maps)); 270 271 if (ADD_RC_CHK(result, maps)) 272 maps__init(result, machine); 273 274 return result; 275 } 276 277 static void maps__delete(struct maps *maps) 278 { 279 maps__exit(maps); 280 RC_CHK_FREE(maps); 281 } 282 283 struct maps *maps__get(struct maps *maps) 284 { 285 struct maps *result; 286 287 if (RC_CHK_GET(result, maps)) 288 refcount_inc(maps__refcnt(maps)); 289 290 return result; 291 } 292 293 void maps__put(struct maps *maps) 294 { 295 if (maps && refcount_dec_and_test(maps__refcnt(maps))) 296 maps__delete(maps); 297 else 298 RC_CHK_PUT(maps); 299 } 300 301 static void __maps__free_maps_by_name(struct maps *maps) 302 { 303 if (!maps__maps_by_name(maps)) 304 return; 305 306 /* 307 * Free everything to try to do it from the rbtree in the next search 308 */ 309 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) 310 map__put(maps__maps_by_name(maps)[i]); 311 312 zfree(&RC_CHK_ACCESS(maps)->maps_by_name); 313 314 /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */ 315 maps__set_maps_by_name_sorted(maps, false); 316 } 317 318 static int map__start_cmp(const void *a, const void *b) 319 { 320 const struct map *map_a = *(const struct map * const *)a; 321 const struct map *map_b = *(const struct map * const *)b; 322 u64 map_a_start = map__start(map_a); 323 u64 map_b_start = map__start(map_b); 324 325 if (map_a_start == map_b_start) { 326 u64 map_a_end = map__end(map_a); 327 u64 map_b_end = map__end(map_b); 328 329 if (map_a_end == map_b_end) { 330 /* Ensure maps with the same addresses have a fixed order. */ 331 if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b)) 332 return 0; 333 return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b) 334 ? 1 : -1; 335 } 336 return map_a_end > map_b_end ? 1 : -1; 337 } 338 return map_a_start > map_b_start ? 1 : -1; 339 } 340 341 static void __maps__sort_by_address(struct maps *maps) 342 { 343 if (maps__maps_by_address_sorted(maps)) 344 return; 345 346 qsort(maps__maps_by_address(maps), 347 maps__nr_maps(maps), 348 sizeof(struct map *), 349 map__start_cmp); 350 maps__set_maps_by_address_sorted(maps, true); 351 } 352 353 static void maps__sort_by_address(struct maps *maps) 354 { 355 down_write(maps__lock(maps)); 356 __maps__sort_by_address(maps); 357 up_write(maps__lock(maps)); 358 } 359 360 static int map__strcmp(const void *a, const void *b) 361 { 362 const struct map *map_a = *(const struct map * const *)a; 363 const struct map *map_b = *(const struct map * const *)b; 364 const struct dso *dso_a = map__dso(map_a); 365 const struct dso *dso_b = map__dso(map_b); 366 int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b)); 367 368 if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) { 369 /* Ensure distinct but name equal maps have an order. */ 370 return map__start_cmp(a, b); 371 } 372 return ret; 373 } 374 375 static int maps__sort_by_name(struct maps *maps) 376 { 377 int err = 0; 378 379 down_write(maps__lock(maps)); 380 if (!maps__maps_by_name_sorted(maps)) { 381 struct map **maps_by_name = maps__maps_by_name(maps); 382 383 if (!maps_by_name) { 384 maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated * 385 sizeof(*maps_by_name)); 386 if (!maps_by_name) 387 err = -ENOMEM; 388 else { 389 struct map **maps_by_address = maps__maps_by_address(maps); 390 unsigned int n = maps__nr_maps(maps); 391 392 maps__set_maps_by_name(maps, maps_by_name); 393 for (unsigned int i = 0; i < n; i++) 394 maps_by_name[i] = map__get(maps_by_address[i]); 395 } 396 } 397 if (!err) { 398 qsort(maps_by_name, 399 maps__nr_maps(maps), 400 sizeof(struct map *), 401 map__strcmp); 402 maps__set_maps_by_name_sorted(maps, true); 403 } 404 } 405 check_invariants(maps); 406 up_write(maps__lock(maps)); 407 return err; 408 } 409 410 static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map) 411 { 412 struct map **maps_by_address = maps__maps_by_address(maps); 413 414 if (maps__maps_by_address_sorted(maps)) { 415 struct map **mapp = 416 bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps), 417 sizeof(*mapp), map__start_cmp); 418 419 if (mapp) 420 return mapp - maps_by_address; 421 } else { 422 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 423 if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map)) 424 return i; 425 } 426 } 427 pr_err("Map missing from maps"); 428 return -1; 429 } 430 431 static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map) 432 { 433 struct map **maps_by_name = maps__maps_by_name(maps); 434 435 if (maps__maps_by_name_sorted(maps)) { 436 struct map **mapp = 437 bsearch(&map, maps_by_name, maps__nr_maps(maps), 438 sizeof(*mapp), map__strcmp); 439 440 if (mapp) 441 return mapp - maps_by_name; 442 } else { 443 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 444 if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map)) 445 return i; 446 } 447 } 448 pr_err("Map missing from maps"); 449 return -1; 450 } 451 452 static void map__set_kmap_maps(struct map *map, struct maps *maps) 453 { 454 struct dso *dso; 455 456 if (map == NULL) 457 return; 458 459 dso = map__dso(map); 460 461 if (dso && dso__kernel(dso)) { 462 struct kmap *kmap = map__kmap(map); 463 464 if (kmap) 465 kmap->kmaps = maps; 466 else 467 pr_err("Internal error: kernel dso with non kernel map\n"); 468 } 469 } 470 471 static int __maps__insert(struct maps *maps, struct map *new) 472 { 473 struct map **maps_by_address = maps__maps_by_address(maps); 474 struct map **maps_by_name = maps__maps_by_name(maps); 475 unsigned int nr_maps = maps__nr_maps(maps); 476 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated; 477 478 if (nr_maps + 1 > nr_allocate) { 479 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2; 480 481 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new)); 482 if (!maps_by_address) 483 return -ENOMEM; 484 485 maps__set_maps_by_address(maps, maps_by_address); 486 if (maps_by_name) { 487 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new)); 488 if (!maps_by_name) { 489 /* 490 * If by name fails, just disable by name and it will 491 * recompute next time it is required. 492 */ 493 __maps__free_maps_by_name(maps); 494 } 495 maps__set_maps_by_name(maps, maps_by_name); 496 } 497 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; 498 } 499 /* Insert the value at the end. */ 500 maps_by_address[nr_maps] = map__get(new); 501 map__set_kmap_maps(new, maps); 502 if (maps_by_name) 503 maps_by_name[nr_maps] = map__get(new); 504 505 nr_maps++; 506 RC_CHK_ACCESS(maps)->nr_maps = nr_maps; 507 508 /* 509 * Recompute if things are sorted. If things are inserted in a sorted 510 * manner, for example by processing /proc/pid/maps, then no 511 * sorting/resorting will be necessary. 512 */ 513 if (nr_maps == 1) { 514 /* If there's just 1 entry then maps are sorted. */ 515 maps__set_maps_by_address_sorted(maps, true); 516 maps__set_maps_by_name_sorted(maps, maps_by_name != NULL); 517 } else { 518 /* Sorted if maps were already sorted and this map starts after the last one. */ 519 maps__set_maps_by_address_sorted(maps, 520 maps__maps_by_address_sorted(maps) && 521 map__end(maps_by_address[nr_maps - 2]) <= map__start(new)); 522 maps__set_maps_by_name_sorted(maps, false); 523 } 524 if (map__end(new) < map__start(new)) 525 RC_CHK_ACCESS(maps)->ends_broken = true; 526 527 return 0; 528 } 529 530 int maps__insert(struct maps *maps, struct map *map) 531 { 532 int ret; 533 534 down_write(maps__lock(maps)); 535 ret = __maps__insert(maps, map); 536 check_invariants(maps); 537 up_write(maps__lock(maps)); 538 return ret; 539 } 540 541 static void __maps__remove(struct maps *maps, struct map *map) 542 { 543 struct map **maps_by_address = maps__maps_by_address(maps); 544 struct map **maps_by_name = maps__maps_by_name(maps); 545 unsigned int nr_maps = maps__nr_maps(maps); 546 unsigned int address_idx; 547 548 /* Slide later mappings over the one to remove */ 549 address_idx = maps__by_address_index(maps, map); 550 map__put(maps_by_address[address_idx]); 551 memmove(&maps_by_address[address_idx], 552 &maps_by_address[address_idx + 1], 553 (nr_maps - address_idx - 1) * sizeof(*maps_by_address)); 554 555 if (maps_by_name) { 556 unsigned int name_idx = maps__by_name_index(maps, map); 557 558 map__put(maps_by_name[name_idx]); 559 memmove(&maps_by_name[name_idx], 560 &maps_by_name[name_idx + 1], 561 (nr_maps - name_idx - 1) * sizeof(*maps_by_name)); 562 } 563 564 --RC_CHK_ACCESS(maps)->nr_maps; 565 } 566 567 void maps__remove(struct maps *maps, struct map *map) 568 { 569 down_write(maps__lock(maps)); 570 __maps__remove(maps, map); 571 check_invariants(maps); 572 up_write(maps__lock(maps)); 573 #ifdef HAVE_LIBDW_SUPPORT 574 libdw__invalidate_dwfl(maps, maps__libdw_addr_space_dwfl(maps)); 575 #endif 576 } 577 578 bool maps__empty(struct maps *maps) 579 { 580 bool res; 581 582 down_read(maps__lock(maps)); 583 res = maps__nr_maps(maps) == 0; 584 up_read(maps__lock(maps)); 585 586 return res; 587 } 588 589 bool maps__equal(struct maps *a, struct maps *b) 590 { 591 return RC_CHK_EQUAL(a, b); 592 } 593 594 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data) 595 { 596 bool done = false; 597 int ret = 0; 598 599 /* See locking/sorting note. */ 600 while (!done) { 601 down_read(maps__lock(maps)); 602 if (maps__maps_by_address_sorted(maps)) { 603 /* 604 * maps__for_each_map callbacks may buggily/unsafely 605 * insert into maps_by_address. Deliberately reload 606 * maps__nr_maps and maps_by_address on each iteration 607 * to avoid using memory freed by maps__insert growing 608 * the array - this may cause maps to be skipped or 609 * repeated. 610 */ 611 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) { 612 struct map **maps_by_address = maps__maps_by_address(maps); 613 struct map *map = maps_by_address[i]; 614 615 ret = cb(map, data); 616 if (ret) 617 break; 618 } 619 done = true; 620 } 621 up_read(maps__lock(maps)); 622 if (!done) 623 maps__sort_by_address(maps); 624 } 625 return ret; 626 } 627 628 void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data) 629 { 630 struct map **maps_by_address; 631 bool removed = false; 632 633 down_write(maps__lock(maps)); 634 635 maps_by_address = maps__maps_by_address(maps); 636 for (unsigned int i = 0; i < maps__nr_maps(maps);) { 637 if (cb(maps_by_address[i], data)) { 638 __maps__remove(maps, maps_by_address[i]); 639 removed = true; 640 } else { 641 i++; 642 } 643 } 644 check_invariants(maps); 645 up_write(maps__lock(maps)); 646 if (removed) { 647 #ifdef HAVE_LIBDW_SUPPORT 648 libdw__invalidate_dwfl(maps, maps__libdw_addr_space_dwfl(maps)); 649 #endif 650 } 651 } 652 653 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) 654 { 655 struct map *map = maps__find(maps, addr); 656 struct symbol *result = NULL; 657 658 /* Ensure map is loaded before using map->map_ip */ 659 if (map != NULL && map__load(map) >= 0) 660 result = map__find_symbol(map, map__map_ip(map, addr)); 661 662 if (mapp) 663 *mapp = map; 664 else 665 map__put(map); 666 667 return result; 668 } 669 670 struct maps__find_symbol_by_name_args { 671 struct map **mapp; 672 const char *name; 673 struct symbol *sym; 674 }; 675 676 static int maps__find_symbol_by_name_cb(struct map *map, void *data) 677 { 678 struct maps__find_symbol_by_name_args *args = data; 679 680 args->sym = map__find_symbol_by_name(map, args->name); 681 if (!args->sym) 682 return 0; 683 684 if (!map__contains_symbol(map, args->sym)) { 685 args->sym = NULL; 686 return 0; 687 } 688 689 if (args->mapp != NULL) 690 *args->mapp = map__get(map); 691 return 1; 692 } 693 694 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp) 695 { 696 struct maps__find_symbol_by_name_args args = { 697 .mapp = mapp, 698 .name = name, 699 .sym = NULL, 700 }; 701 702 maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args); 703 return args.sym; 704 } 705 706 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams) 707 { 708 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) { 709 if (maps == NULL) 710 return -1; 711 ams->ms.map = maps__find(maps, ams->addr); 712 if (ams->ms.map == NULL) 713 return -1; 714 } 715 716 ams->al_addr = map__map_ip(ams->ms.map, ams->addr); 717 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr); 718 719 return ams->ms.sym ? 0 : -1; 720 } 721 722 struct maps__fprintf_args { 723 FILE *fp; 724 size_t printed; 725 }; 726 727 static int maps__fprintf_cb(struct map *map, void *data) 728 { 729 struct maps__fprintf_args *args = data; 730 731 args->printed += fprintf(args->fp, "Map:"); 732 args->printed += map__fprintf(map, args->fp); 733 if (verbose > 2) { 734 args->printed += dso__fprintf(map__dso(map), args->fp); 735 args->printed += fprintf(args->fp, "--\n"); 736 } 737 return 0; 738 } 739 740 size_t maps__fprintf(struct maps *maps, FILE *fp) 741 { 742 struct maps__fprintf_args args = { 743 .fp = fp, 744 .printed = 0, 745 }; 746 747 maps__for_each_map(maps, maps__fprintf_cb, &args); 748 749 return args.printed; 750 } 751 752 /* 753 * Find first map where end > map->start. 754 * Same as find_vma() in kernel. 755 */ 756 static unsigned int first_ending_after(struct maps *maps, const struct map *map) 757 { 758 struct map **maps_by_address = maps__maps_by_address(maps); 759 int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1; 760 761 assert(maps__maps_by_address_sorted(maps)); 762 if (low <= high && map__end(maps_by_address[0]) > map__start(map)) 763 return 0; 764 765 while (low <= high) { 766 int mid = (low + high) / 2; 767 struct map *pos = maps_by_address[mid]; 768 769 if (map__end(pos) > map__start(map)) { 770 first = mid; 771 if (map__start(pos) <= map__start(map)) { 772 /* Entry overlaps map. */ 773 break; 774 } 775 high = mid - 1; 776 } else 777 low = mid + 1; 778 } 779 return first; 780 } 781 782 static int __maps__insert_sorted(struct maps *maps, unsigned int first_after_index, 783 struct map *new1, struct map *new2) 784 { 785 struct map **maps_by_address = maps__maps_by_address(maps); 786 struct map **maps_by_name = maps__maps_by_name(maps); 787 unsigned int nr_maps = maps__nr_maps(maps); 788 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated; 789 unsigned int to_add = new2 ? 2 : 1; 790 791 assert(maps__maps_by_address_sorted(maps)); 792 assert(first_after_index == nr_maps || 793 map__end(new1) <= map__start(maps_by_address[first_after_index])); 794 assert(!new2 || map__end(new1) <= map__start(new2)); 795 assert(first_after_index == nr_maps || !new2 || 796 map__end(new2) <= map__start(maps_by_address[first_after_index])); 797 798 if (nr_maps + to_add > nr_allocate) { 799 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2; 800 801 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new1)); 802 if (!maps_by_address) 803 return -ENOMEM; 804 805 maps__set_maps_by_address(maps, maps_by_address); 806 if (maps_by_name) { 807 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new1)); 808 if (!maps_by_name) { 809 /* 810 * If by name fails, just disable by name and it will 811 * recompute next time it is required. 812 */ 813 __maps__free_maps_by_name(maps); 814 } 815 maps__set_maps_by_name(maps, maps_by_name); 816 } 817 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate; 818 } 819 memmove(&maps_by_address[first_after_index+to_add], 820 &maps_by_address[first_after_index], 821 (nr_maps - first_after_index) * sizeof(new1)); 822 maps_by_address[first_after_index] = map__get(new1); 823 if (maps_by_name) 824 maps_by_name[nr_maps] = map__get(new1); 825 if (new2) { 826 maps_by_address[first_after_index + 1] = map__get(new2); 827 if (maps_by_name) 828 maps_by_name[nr_maps + 1] = map__get(new2); 829 } 830 RC_CHK_ACCESS(maps)->nr_maps = nr_maps + to_add; 831 maps__set_maps_by_name_sorted(maps, false); 832 map__set_kmap_maps(new1, maps); 833 map__set_kmap_maps(new2, maps); 834 835 check_invariants(maps); 836 return 0; 837 } 838 839 /* 840 * Adds new to maps, if new overlaps existing entries then the existing maps are 841 * adjusted or removed so that new fits without overlapping any entries. 842 */ 843 static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) 844 { 845 int err = 0; 846 FILE *fp = debug_file(); 847 unsigned int i, ni = INT_MAX; // Some gcc complain, but depends on maps_by_name... 848 849 if (!maps__maps_by_address_sorted(maps)) 850 __maps__sort_by_address(maps); 851 852 /* 853 * Iterate through entries where the end of the existing entry is 854 * greater-than the new map's start. 855 */ 856 for (i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) { 857 struct map **maps_by_address = maps__maps_by_address(maps); 858 struct map **maps_by_name = maps__maps_by_name(maps); 859 struct map *pos = maps_by_address[i]; 860 struct map *before = NULL, *after = NULL; 861 862 /* 863 * Stop if current map starts after map->end. 864 * Maps are ordered by start: next will not overlap for sure. 865 */ 866 if (map__start(pos) >= map__end(new)) 867 break; 868 869 if (use_browser) { 870 pr_debug("overlapping maps in %s (disable tui for more info)\n", 871 dso__name(map__dso(new))); 872 } else if (verbose >= 2) { 873 pr_debug("overlapping maps:\n"); 874 map__fprintf(new, fp); 875 map__fprintf(pos, fp); 876 } 877 878 if (maps_by_name) 879 ni = maps__by_name_index(maps, pos); 880 881 /* 882 * Now check if we need to create new maps for areas not 883 * overlapped by the new map: 884 */ 885 if (map__start(new) > map__start(pos)) { 886 /* Map starts within existing map. Need to shorten the existing map. */ 887 before = map__clone(pos); 888 889 if (before == NULL) { 890 err = -ENOMEM; 891 goto out_err; 892 } 893 map__set_end(before, map__start(new)); 894 895 if (verbose >= 2 && !use_browser) 896 map__fprintf(before, fp); 897 } 898 if (map__end(new) < map__end(pos)) { 899 /* The new map isn't as long as the existing map. */ 900 after = map__clone(pos); 901 902 if (after == NULL) { 903 map__zput(before); 904 err = -ENOMEM; 905 goto out_err; 906 } 907 908 map__set_start(after, map__end(new)); 909 map__add_pgoff(after, map__end(new) - map__start(pos)); 910 assert(map__map_ip(pos, map__end(new)) == 911 map__map_ip(after, map__end(new))); 912 913 if (verbose >= 2 && !use_browser) 914 map__fprintf(after, fp); 915 } 916 /* 917 * If adding one entry, for `before` or `after`, we can replace 918 * the existing entry. If both `before` and `after` are 919 * necessary than an insert is needed. If the existing entry 920 * entirely overlaps the existing entry it can just be removed. 921 */ 922 if (before) { 923 map__put(maps_by_address[i]); 924 maps_by_address[i] = before; 925 map__set_kmap_maps(before, maps); 926 927 if (maps_by_name) { 928 map__put(maps_by_name[ni]); 929 maps_by_name[ni] = map__get(before); 930 } 931 932 /* Maps are still ordered, go to next one. */ 933 i++; 934 if (after) { 935 /* 936 * 'before' and 'after' mean 'new' split the 937 * 'pos' mapping and therefore there are no 938 * later mappings. 939 */ 940 err = __maps__insert_sorted(maps, i, new, after); 941 map__put(after); 942 check_invariants(maps); 943 return err; 944 } 945 check_invariants(maps); 946 } else if (after) { 947 /* 948 * 'after' means 'new' split 'pos' and there are no 949 * later mappings. 950 */ 951 map__put(maps_by_address[i]); 952 maps_by_address[i] = map__get(new); 953 map__set_kmap_maps(new, maps); 954 955 if (maps_by_name) { 956 map__put(maps_by_name[ni]); 957 maps_by_name[ni] = map__get(new); 958 } 959 960 err = __maps__insert_sorted(maps, i + 1, after, NULL); 961 map__put(after); 962 check_invariants(maps); 963 return err; 964 } else { 965 struct map *next = NULL; 966 unsigned int nr_maps = maps__nr_maps(maps); 967 968 if (i + 1 < nr_maps) 969 next = maps_by_address[i + 1]; 970 971 if (!next || map__start(next) >= map__end(new)) { 972 /* 973 * Replace existing mapping and end knowing 974 * there aren't later overlapping or any 975 * mappings. 976 */ 977 map__put(maps_by_address[i]); 978 maps_by_address[i] = map__get(new); 979 map__set_kmap_maps(new, maps); 980 981 if (maps_by_name) { 982 map__put(maps_by_name[ni]); 983 maps_by_name[ni] = map__get(new); 984 } 985 986 check_invariants(maps); 987 return err; 988 } 989 /* 990 * pos fully covers the previous mapping so remove 991 * it. The following is an inlined version of 992 * maps__remove that reuses the already computed 993 * indices. 994 */ 995 map__put(maps_by_address[i]); 996 memmove(&maps_by_address[i], 997 &maps_by_address[i + 1], 998 (nr_maps - i - 1) * sizeof(*maps_by_address)); 999 1000 if (maps_by_name) { 1001 map__put(maps_by_name[ni]); 1002 memmove(&maps_by_name[ni], 1003 &maps_by_name[ni + 1], 1004 (nr_maps - ni - 1) * sizeof(*maps_by_name)); 1005 } 1006 --RC_CHK_ACCESS(maps)->nr_maps; 1007 check_invariants(maps); 1008 /* 1009 * Maps are ordered but no need to increase `i` as the 1010 * later maps were moved down. 1011 */ 1012 } 1013 } 1014 /* Add the map. */ 1015 err = __maps__insert_sorted(maps, i, new, NULL); 1016 out_err: 1017 return err; 1018 } 1019 1020 int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) 1021 { 1022 int err; 1023 1024 down_write(maps__lock(maps)); 1025 err = __maps__fixup_overlap_and_insert(maps, new); 1026 up_write(maps__lock(maps)); 1027 return err; 1028 } 1029 1030 int maps__copy_from(struct maps *dest, struct maps *parent) 1031 { 1032 /* Note, if struct map were immutable then cloning could use ref counts. */ 1033 struct map **parent_maps_by_address; 1034 int err = 0; 1035 unsigned int n; 1036 1037 down_write(maps__lock(dest)); 1038 down_read(maps__lock(parent)); 1039 1040 parent_maps_by_address = maps__maps_by_address(parent); 1041 n = maps__nr_maps(parent); 1042 if (maps__nr_maps(dest) == 0) { 1043 /* No existing mappings so just copy from parent to avoid reallocs in insert. */ 1044 unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated; 1045 struct map **dest_maps_by_address = 1046 malloc(nr_maps_allocated * sizeof(struct map *)); 1047 struct map **dest_maps_by_name = NULL; 1048 1049 if (!dest_maps_by_address) 1050 err = -ENOMEM; 1051 else { 1052 if (maps__maps_by_name(parent)) { 1053 dest_maps_by_name = 1054 malloc(nr_maps_allocated * sizeof(struct map *)); 1055 } 1056 1057 RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address; 1058 RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name; 1059 RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated; 1060 } 1061 1062 for (unsigned int i = 0; !err && i < n; i++) { 1063 struct map *pos = parent_maps_by_address[i]; 1064 struct map *new = map__clone(pos); 1065 1066 if (!new) 1067 err = -ENOMEM; 1068 else { 1069 err = unwind__prepare_access(dest, new, NULL); 1070 if (!err) { 1071 dest_maps_by_address[i] = new; 1072 map__set_kmap_maps(new, dest); 1073 if (dest_maps_by_name) 1074 dest_maps_by_name[i] = map__get(new); 1075 RC_CHK_ACCESS(dest)->nr_maps = i + 1; 1076 } 1077 } 1078 if (err) 1079 map__put(new); 1080 } 1081 maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent)); 1082 if (!err) { 1083 RC_CHK_ACCESS(dest)->last_search_by_name_idx = 1084 RC_CHK_ACCESS(parent)->last_search_by_name_idx; 1085 maps__set_maps_by_name_sorted(dest, 1086 dest_maps_by_name && 1087 maps__maps_by_name_sorted(parent)); 1088 } else { 1089 RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0; 1090 maps__set_maps_by_name_sorted(dest, false); 1091 } 1092 } else { 1093 /* Unexpected copying to a maps containing entries. */ 1094 for (unsigned int i = 0; !err && i < n; i++) { 1095 struct map *pos = parent_maps_by_address[i]; 1096 struct map *new = map__clone(pos); 1097 1098 if (!new) 1099 err = -ENOMEM; 1100 else { 1101 err = unwind__prepare_access(dest, new, NULL); 1102 if (!err) 1103 err = __maps__insert(dest, new); 1104 } 1105 map__put(new); 1106 } 1107 } 1108 check_invariants(dest); 1109 1110 up_read(maps__lock(parent)); 1111 up_write(maps__lock(dest)); 1112 return err; 1113 } 1114 1115 static int map__addr_cmp(const void *key, const void *entry) 1116 { 1117 const u64 ip = *(const u64 *)key; 1118 const struct map *map = *(const struct map * const *)entry; 1119 1120 if (ip < map__start(map)) 1121 return -1; 1122 if (ip >= map__end(map)) 1123 return 1; 1124 return 0; 1125 } 1126 1127 struct map *maps__find(struct maps *maps, u64 ip) 1128 { 1129 struct map *result = NULL; 1130 bool done = false; 1131 1132 /* See locking/sorting note. */ 1133 while (!done) { 1134 down_read(maps__lock(maps)); 1135 if (maps__maps_by_address_sorted(maps)) { 1136 struct map **mapp = NULL; 1137 struct map **maps_by_address = maps__maps_by_address(maps); 1138 unsigned int nr_maps = maps__nr_maps(maps); 1139 1140 if (maps_by_address && nr_maps) 1141 mapp = bsearch(&ip, maps_by_address, nr_maps, sizeof(*mapp), 1142 map__addr_cmp); 1143 if (mapp) 1144 result = map__get(*mapp); 1145 done = true; 1146 } 1147 up_read(maps__lock(maps)); 1148 if (!done) 1149 maps__sort_by_address(maps); 1150 } 1151 return result; 1152 } 1153 1154 static int map__strcmp_name(const void *name, const void *b) 1155 { 1156 const struct dso *dso = map__dso(*(const struct map **)b); 1157 1158 return strcmp(name, dso__short_name(dso)); 1159 } 1160 1161 struct map *maps__find_by_name(struct maps *maps, const char *name) 1162 { 1163 struct map *result = NULL; 1164 bool done = false; 1165 1166 /* See locking/sorting note. */ 1167 while (!done) { 1168 unsigned int i; 1169 1170 down_read(maps__lock(maps)); 1171 1172 /* First check last found entry. */ 1173 i = RC_CHK_ACCESS(maps)->last_search_by_name_idx; 1174 if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) { 1175 struct dso *dso = map__dso(maps__maps_by_name(maps)[i]); 1176 1177 if (dso && strcmp(dso__short_name(dso), name) == 0) { 1178 result = map__get(maps__maps_by_name(maps)[i]); 1179 done = true; 1180 } 1181 } 1182 1183 /* Second search sorted array. */ 1184 if (!done && maps__maps_by_name_sorted(maps)) { 1185 struct map **mapp = 1186 bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), 1187 sizeof(*mapp), map__strcmp_name); 1188 1189 if (mapp) { 1190 result = map__get(*mapp); 1191 i = mapp - maps__maps_by_name(maps); 1192 RC_CHK_ACCESS(maps)->last_search_by_name_idx = i; 1193 } 1194 done = true; 1195 } 1196 up_read(maps__lock(maps)); 1197 if (!done) { 1198 /* Sort and retry binary search. */ 1199 if (maps__sort_by_name(maps)) { 1200 /* 1201 * Memory allocation failed do linear search 1202 * through address sorted maps. 1203 */ 1204 struct map **maps_by_address; 1205 unsigned int n; 1206 1207 down_read(maps__lock(maps)); 1208 maps_by_address = maps__maps_by_address(maps); 1209 n = maps__nr_maps(maps); 1210 for (i = 0; i < n; i++) { 1211 struct map *pos = maps_by_address[i]; 1212 struct dso *dso = map__dso(pos); 1213 1214 if (dso && strcmp(dso__short_name(dso), name) == 0) { 1215 result = map__get(pos); 1216 break; 1217 } 1218 } 1219 up_read(maps__lock(maps)); 1220 done = true; 1221 } 1222 } 1223 } 1224 return result; 1225 } 1226 1227 struct map *maps__find_next_entry(struct maps *maps, struct map *map) 1228 { 1229 unsigned int i; 1230 struct map *result = NULL; 1231 1232 down_read(maps__lock(maps)); 1233 while (!maps__maps_by_address_sorted(maps)) { 1234 up_read(maps__lock(maps)); 1235 maps__sort_by_address(maps); 1236 down_read(maps__lock(maps)); 1237 } 1238 i = maps__by_address_index(maps, map); 1239 if (++i < maps__nr_maps(maps)) 1240 result = map__get(maps__maps_by_address(maps)[i]); 1241 1242 up_read(maps__lock(maps)); 1243 return result; 1244 } 1245 1246 void maps__fixup_end(struct maps *maps) 1247 { 1248 struct map **maps_by_address; 1249 unsigned int n; 1250 1251 down_write(maps__lock(maps)); 1252 if (!maps__maps_by_address_sorted(maps)) 1253 __maps__sort_by_address(maps); 1254 1255 maps_by_address = maps__maps_by_address(maps); 1256 n = maps__nr_maps(maps); 1257 for (unsigned int i = 1; i < n; i++) { 1258 struct map *prev = maps_by_address[i - 1]; 1259 struct map *curr = maps_by_address[i]; 1260 1261 if (!map__end(prev) || map__end(prev) > map__start(curr)) 1262 map__set_end(prev, map__start(curr)); 1263 } 1264 1265 /* 1266 * We still haven't the actual symbols, so guess the 1267 * last map final address. 1268 */ 1269 if (n > 0 && !map__end(maps_by_address[n - 1])) 1270 map__set_end(maps_by_address[n - 1], ~0ULL); 1271 1272 RC_CHK_ACCESS(maps)->ends_broken = false; 1273 check_invariants(maps); 1274 1275 up_write(maps__lock(maps)); 1276 } 1277 1278 /* 1279 * Merges map into maps by splitting the new map within the existing map 1280 * regions. 1281 */ 1282 int maps__merge_in(struct maps *kmaps, struct map *new_map) 1283 { 1284 unsigned int first_after_, kmaps__nr_maps; 1285 struct map **kmaps_maps_by_address; 1286 struct map **merged_maps_by_address; 1287 unsigned int merged_nr_maps_allocated; 1288 1289 /* First try under a read lock. */ 1290 while (true) { 1291 down_read(maps__lock(kmaps)); 1292 if (maps__maps_by_address_sorted(kmaps)) 1293 break; 1294 1295 up_read(maps__lock(kmaps)); 1296 1297 /* First after binary search requires sorted maps. Sort and try again. */ 1298 maps__sort_by_address(kmaps); 1299 } 1300 first_after_ = first_ending_after(kmaps, new_map); 1301 kmaps_maps_by_address = maps__maps_by_address(kmaps); 1302 1303 if (first_after_ >= maps__nr_maps(kmaps) || 1304 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { 1305 /* No overlap so regular insert suffices. */ 1306 up_read(maps__lock(kmaps)); 1307 return maps__insert(kmaps, new_map); 1308 } 1309 up_read(maps__lock(kmaps)); 1310 1311 /* Plain insert with a read-lock failed, try again now with the write lock. */ 1312 down_write(maps__lock(kmaps)); 1313 if (!maps__maps_by_address_sorted(kmaps)) 1314 __maps__sort_by_address(kmaps); 1315 1316 first_after_ = first_ending_after(kmaps, new_map); 1317 kmaps_maps_by_address = maps__maps_by_address(kmaps); 1318 kmaps__nr_maps = maps__nr_maps(kmaps); 1319 1320 if (first_after_ >= kmaps__nr_maps || 1321 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { 1322 /* No overlap so regular insert suffices. */ 1323 int ret = __maps__insert(kmaps, new_map); 1324 1325 check_invariants(kmaps); 1326 up_write(maps__lock(kmaps)); 1327 return ret; 1328 } 1329 /* Array to merge into, possibly 1 more for the sake of new_map. */ 1330 merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated; 1331 if (kmaps__nr_maps + 1 == merged_nr_maps_allocated) 1332 merged_nr_maps_allocated++; 1333 1334 merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address)); 1335 if (!merged_maps_by_address) { 1336 up_write(maps__lock(kmaps)); 1337 return -ENOMEM; 1338 } 1339 maps__set_maps_by_address(kmaps, merged_maps_by_address); 1340 maps__set_maps_by_address_sorted(kmaps, true); 1341 __maps__free_maps_by_name(kmaps); 1342 maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated); 1343 1344 /* Copy entries before the new_map that can't overlap. */ 1345 for (unsigned int i = 0; i < first_after_; i++) 1346 merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]); 1347 1348 maps__set_nr_maps(kmaps, first_after_); 1349 1350 /* Add the new map, it will be split when the later overlapping mappings are added. */ 1351 __maps__insert(kmaps, new_map); 1352 1353 /* Insert mappings after new_map, splitting new_map in the process. */ 1354 for (unsigned int i = first_after_; i < kmaps__nr_maps; i++) 1355 __maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]); 1356 1357 /* Copy the maps from merged into kmaps. */ 1358 for (unsigned int i = 0; i < kmaps__nr_maps; i++) 1359 map__zput(kmaps_maps_by_address[i]); 1360 1361 free(kmaps_maps_by_address); 1362 check_invariants(kmaps); 1363 up_write(maps__lock(kmaps)); 1364 return 0; 1365 } 1366 1367 void maps__load_first(struct maps *maps) 1368 { 1369 down_read(maps__lock(maps)); 1370 1371 if (maps__nr_maps(maps) > 0) 1372 map__load(maps__maps_by_address(maps)[0]); 1373 1374 up_read(maps__lock(maps)); 1375 } 1376