1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <linux/zalloc.h>
5 #include "debug.h"
6 #include "dso.h"
7 #include "map.h"
8 #include "maps.h"
9 #include "rwsem.h"
10 #include "thread.h"
11 #include "ui/ui.h"
12 #include "unwind.h"
13 #include "unwind-libdw.h"
14 #include <internal/rc_check.h>
15
16 /*
17 * Locking/sorting note:
18 *
19 * Sorting is done with the write lock, iteration and binary searching happens
20 * under the read lock requiring being sorted. There is a race between sorting
21 * releasing the write lock and acquiring the read lock for iteration/searching
22 * where another thread could insert and break the sorting of the maps. In
23 * practice inserting maps should be rare meaning that the race shouldn't lead
24 * to live lock. Removal of maps doesn't break being sorted.
25 */
26
DECLARE_RC_STRUCT(maps)27 DECLARE_RC_STRUCT(maps) {
28 struct rw_semaphore lock;
29 /**
30 * @maps_by_address: array of maps sorted by their starting address if
31 * maps_by_address_sorted is true.
32 */
33 struct map **maps_by_address;
34 /**
35 * @maps_by_name: optional array of maps sorted by their dso name if
36 * maps_by_name_sorted is true.
37 */
38 struct map **maps_by_name;
39 struct machine *machine;
40 #ifdef HAVE_LIBUNWIND_SUPPORT
41 void *addr_space;
42 const struct unwind_libunwind_ops *unwind_libunwind_ops;
43 #endif
44 #ifdef HAVE_LIBDW_SUPPORT
45 void *libdw_addr_space_dwfl;
46 #endif
47 refcount_t refcnt;
48 /**
49 * @nr_maps: number of maps_by_address, and possibly maps_by_name,
50 * entries that contain maps.
51 */
52 unsigned int nr_maps;
53 /**
54 * @nr_maps_allocated: number of entries in maps_by_address and possibly
55 * maps_by_name.
56 */
57 unsigned int nr_maps_allocated;
58 /**
59 * @last_search_by_name_idx: cache of last found by name entry's index
60 * as frequent searches for the same dso name are common.
61 */
62 unsigned int last_search_by_name_idx;
63 /** @maps_by_address_sorted: is maps_by_address sorted. */
64 bool maps_by_address_sorted;
65 /** @maps_by_name_sorted: is maps_by_name sorted. */
66 bool maps_by_name_sorted;
67 /** @ends_broken: does the map contain a map where end values are unset/unsorted? */
68 bool ends_broken;
69 };
70
check_invariants(const struct maps * maps __maybe_unused)71 static void check_invariants(const struct maps *maps __maybe_unused)
72 {
73 #ifndef NDEBUG
74 assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated);
75 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
76 struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i];
77
78 /* Check map is well-formed. */
79 assert(map__end(map) == 0 || map__start(map) <= map__end(map));
80 /* Expect at least 1 reference count. */
81 assert(refcount_read(map__refcnt(map)) > 0);
82
83 if (map__dso(map) && dso__kernel(map__dso(map)))
84 assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
85
86 if (i > 0) {
87 struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1];
88
89 /* If addresses are sorted... */
90 if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) {
91 /* Maps should be in start address order. */
92 assert(map__start(prev) <= map__start(map));
93 /*
94 * If the ends of maps aren't broken (during
95 * construction) then they should be ordered
96 * too.
97 */
98 if (!RC_CHK_ACCESS(maps)->ends_broken) {
99 assert(map__end(prev) <= map__end(map));
100 assert(map__end(prev) <= map__start(map) ||
101 map__start(prev) == map__start(map));
102 }
103 }
104 }
105 }
106 if (RC_CHK_ACCESS(maps)->maps_by_name) {
107 for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
108 struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i];
109
110 /*
111 * Maps by name maps should be in maps_by_address, so
112 * the reference count should be higher.
113 */
114 assert(refcount_read(map__refcnt(map)) > 1);
115 }
116 }
117 #endif
118 }
119
maps__maps_by_address(const struct maps * maps)120 static struct map **maps__maps_by_address(const struct maps *maps)
121 {
122 return RC_CHK_ACCESS(maps)->maps_by_address;
123 }
124
maps__set_maps_by_address(struct maps * maps,struct map ** new)125 static void maps__set_maps_by_address(struct maps *maps, struct map **new)
126 {
127 RC_CHK_ACCESS(maps)->maps_by_address = new;
128
129 }
130
maps__set_nr_maps_allocated(struct maps * maps,unsigned int nr_maps_allocated)131 static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated)
132 {
133 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated;
134 }
135
maps__set_nr_maps(struct maps * maps,unsigned int nr_maps)136 static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps)
137 {
138 RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
139 }
140
141 /* Not in the header, to aid reference counting. */
maps__maps_by_name(const struct maps * maps)142 static struct map **maps__maps_by_name(const struct maps *maps)
143 {
144 return RC_CHK_ACCESS(maps)->maps_by_name;
145
146 }
147
maps__set_maps_by_name(struct maps * maps,struct map ** new)148 static void maps__set_maps_by_name(struct maps *maps, struct map **new)
149 {
150 RC_CHK_ACCESS(maps)->maps_by_name = new;
151
152 }
153
maps__maps_by_address_sorted(const struct maps * maps)154 static bool maps__maps_by_address_sorted(const struct maps *maps)
155 {
156 return RC_CHK_ACCESS(maps)->maps_by_address_sorted;
157 }
158
maps__set_maps_by_address_sorted(struct maps * maps,bool value)159 static void maps__set_maps_by_address_sorted(struct maps *maps, bool value)
160 {
161 RC_CHK_ACCESS(maps)->maps_by_address_sorted = value;
162 }
163
maps__maps_by_name_sorted(const struct maps * maps)164 static bool maps__maps_by_name_sorted(const struct maps *maps)
165 {
166 return RC_CHK_ACCESS(maps)->maps_by_name_sorted;
167 }
168
maps__set_maps_by_name_sorted(struct maps * maps,bool value)169 static void maps__set_maps_by_name_sorted(struct maps *maps, bool value)
170 {
171 RC_CHK_ACCESS(maps)->maps_by_name_sorted = value;
172 }
173
maps__machine(const struct maps * maps)174 struct machine *maps__machine(const struct maps *maps)
175 {
176 return RC_CHK_ACCESS(maps)->machine;
177 }
178
maps__nr_maps(const struct maps * maps)179 unsigned int maps__nr_maps(const struct maps *maps)
180 {
181 return RC_CHK_ACCESS(maps)->nr_maps;
182 }
183
maps__refcnt(struct maps * maps)184 refcount_t *maps__refcnt(struct maps *maps)
185 {
186 return &RC_CHK_ACCESS(maps)->refcnt;
187 }
188
189 #ifdef HAVE_LIBUNWIND_SUPPORT
maps__addr_space(const struct maps * maps)190 void *maps__addr_space(const struct maps *maps)
191 {
192 return RC_CHK_ACCESS(maps)->addr_space;
193 }
194
maps__set_addr_space(struct maps * maps,void * addr_space)195 void maps__set_addr_space(struct maps *maps, void *addr_space)
196 {
197 RC_CHK_ACCESS(maps)->addr_space = addr_space;
198 }
199
maps__unwind_libunwind_ops(const struct maps * maps)200 const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
201 {
202 return RC_CHK_ACCESS(maps)->unwind_libunwind_ops;
203 }
204
maps__set_unwind_libunwind_ops(struct maps * maps,const struct unwind_libunwind_ops * ops)205 void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops)
206 {
207 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
208 }
209 #endif
210 #ifdef HAVE_LIBDW_SUPPORT
maps__libdw_addr_space_dwfl(const struct maps * maps)211 void *maps__libdw_addr_space_dwfl(const struct maps *maps)
212 {
213 return RC_CHK_ACCESS(maps)->libdw_addr_space_dwfl;
214 }
215
maps__set_libdw_addr_space_dwfl(struct maps * maps,void * dwfl)216 void maps__set_libdw_addr_space_dwfl(struct maps *maps, void *dwfl)
217 {
218 RC_CHK_ACCESS(maps)->libdw_addr_space_dwfl = dwfl;
219 }
220 #endif
221
maps__lock(struct maps * maps)222 static struct rw_semaphore *maps__lock(struct maps *maps)
223 {
224 return &RC_CHK_ACCESS(maps)->lock;
225 }
226
maps__init(struct maps * maps,struct machine * machine)227 static void maps__init(struct maps *maps, struct machine *machine)
228 {
229 init_rwsem(maps__lock(maps));
230 RC_CHK_ACCESS(maps)->maps_by_address = NULL;
231 RC_CHK_ACCESS(maps)->maps_by_name = NULL;
232 RC_CHK_ACCESS(maps)->machine = machine;
233 #ifdef HAVE_LIBUNWIND_SUPPORT
234 RC_CHK_ACCESS(maps)->addr_space = NULL;
235 RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL;
236 #endif
237 #ifdef HAVE_LIBDW_SUPPORT
238 RC_CHK_ACCESS(maps)->libdw_addr_space_dwfl = NULL;
239 #endif
240 refcount_set(maps__refcnt(maps), 1);
241 RC_CHK_ACCESS(maps)->nr_maps = 0;
242 RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
243 RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0;
244 RC_CHK_ACCESS(maps)->maps_by_address_sorted = true;
245 RC_CHK_ACCESS(maps)->maps_by_name_sorted = false;
246 }
247
maps__exit(struct maps * maps)248 static void maps__exit(struct maps *maps)
249 {
250 struct map **maps_by_address = maps__maps_by_address(maps);
251 struct map **maps_by_name = maps__maps_by_name(maps);
252
253 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
254 map__zput(maps_by_address[i]);
255 if (maps_by_name)
256 map__zput(maps_by_name[i]);
257 }
258 zfree(&maps_by_address);
259 zfree(&maps_by_name);
260 unwind__finish_access(maps);
261 #ifdef HAVE_LIBDW_SUPPORT
262 libdw__invalidate_dwfl(maps, maps__libdw_addr_space_dwfl(maps));
263 #endif
264 }
265
maps__new(struct machine * machine)266 struct maps *maps__new(struct machine *machine)
267 {
268 struct maps *result;
269 RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
270
271 if (ADD_RC_CHK(result, maps))
272 maps__init(result, machine);
273
274 return result;
275 }
276
maps__delete(struct maps * maps)277 static void maps__delete(struct maps *maps)
278 {
279 maps__exit(maps);
280 RC_CHK_FREE(maps);
281 }
282
maps__get(struct maps * maps)283 struct maps *maps__get(struct maps *maps)
284 {
285 struct maps *result;
286
287 if (RC_CHK_GET(result, maps))
288 refcount_inc(maps__refcnt(maps));
289
290 return result;
291 }
292
maps__put(struct maps * maps)293 void maps__put(struct maps *maps)
294 {
295 if (maps && refcount_dec_and_test(maps__refcnt(maps)))
296 maps__delete(maps);
297 else
298 RC_CHK_PUT(maps);
299 }
300
__maps__free_maps_by_name(struct maps * maps)301 static void __maps__free_maps_by_name(struct maps *maps)
302 {
303 if (!maps__maps_by_name(maps))
304 return;
305
306 /*
307 * Free everything to try to do it from the rbtree in the next search
308 */
309 for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
310 map__put(maps__maps_by_name(maps)[i]);
311
312 zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
313
314 /* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */
315 maps__set_maps_by_name_sorted(maps, false);
316 }
317
map__start_cmp(const void * a,const void * b)318 static int map__start_cmp(const void *a, const void *b)
319 {
320 const struct map *map_a = *(const struct map * const *)a;
321 const struct map *map_b = *(const struct map * const *)b;
322 u64 map_a_start = map__start(map_a);
323 u64 map_b_start = map__start(map_b);
324
325 if (map_a_start == map_b_start) {
326 u64 map_a_end = map__end(map_a);
327 u64 map_b_end = map__end(map_b);
328
329 if (map_a_end == map_b_end) {
330 /* Ensure maps with the same addresses have a fixed order. */
331 if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b))
332 return 0;
333 return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b)
334 ? 1 : -1;
335 }
336 return map_a_end > map_b_end ? 1 : -1;
337 }
338 return map_a_start > map_b_start ? 1 : -1;
339 }
340
__maps__sort_by_address(struct maps * maps)341 static void __maps__sort_by_address(struct maps *maps)
342 {
343 if (maps__maps_by_address_sorted(maps))
344 return;
345
346 qsort(maps__maps_by_address(maps),
347 maps__nr_maps(maps),
348 sizeof(struct map *),
349 map__start_cmp);
350 maps__set_maps_by_address_sorted(maps, true);
351 }
352
maps__sort_by_address(struct maps * maps)353 static void maps__sort_by_address(struct maps *maps)
354 {
355 down_write(maps__lock(maps));
356 __maps__sort_by_address(maps);
357 up_write(maps__lock(maps));
358 }
359
map__strcmp(const void * a,const void * b)360 static int map__strcmp(const void *a, const void *b)
361 {
362 const struct map *map_a = *(const struct map * const *)a;
363 const struct map *map_b = *(const struct map * const *)b;
364 const struct dso *dso_a = map__dso(map_a);
365 const struct dso *dso_b = map__dso(map_b);
366 int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b));
367
368 if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) {
369 /* Ensure distinct but name equal maps have an order. */
370 return map__start_cmp(a, b);
371 }
372 return ret;
373 }
374
maps__sort_by_name(struct maps * maps)375 static int maps__sort_by_name(struct maps *maps)
376 {
377 int err = 0;
378
379 down_write(maps__lock(maps));
380 if (!maps__maps_by_name_sorted(maps)) {
381 struct map **maps_by_name = maps__maps_by_name(maps);
382
383 if (!maps_by_name) {
384 maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated *
385 sizeof(*maps_by_name));
386 if (!maps_by_name)
387 err = -ENOMEM;
388 else {
389 struct map **maps_by_address = maps__maps_by_address(maps);
390 unsigned int n = maps__nr_maps(maps);
391
392 maps__set_maps_by_name(maps, maps_by_name);
393 for (unsigned int i = 0; i < n; i++)
394 maps_by_name[i] = map__get(maps_by_address[i]);
395 }
396 }
397 if (!err) {
398 qsort(maps_by_name,
399 maps__nr_maps(maps),
400 sizeof(struct map *),
401 map__strcmp);
402 maps__set_maps_by_name_sorted(maps, true);
403 }
404 }
405 check_invariants(maps);
406 up_write(maps__lock(maps));
407 return err;
408 }
409
maps__by_address_index(const struct maps * maps,const struct map * map)410 static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map)
411 {
412 struct map **maps_by_address = maps__maps_by_address(maps);
413
414 if (maps__maps_by_address_sorted(maps)) {
415 struct map **mapp =
416 bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps),
417 sizeof(*mapp), map__start_cmp);
418
419 if (mapp)
420 return mapp - maps_by_address;
421 } else {
422 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
423 if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map))
424 return i;
425 }
426 }
427 pr_err("Map missing from maps");
428 return -1;
429 }
430
maps__by_name_index(const struct maps * maps,const struct map * map)431 static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map)
432 {
433 struct map **maps_by_name = maps__maps_by_name(maps);
434
435 if (maps__maps_by_name_sorted(maps)) {
436 struct map **mapp =
437 bsearch(&map, maps_by_name, maps__nr_maps(maps),
438 sizeof(*mapp), map__strcmp);
439
440 if (mapp)
441 return mapp - maps_by_name;
442 } else {
443 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
444 if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map))
445 return i;
446 }
447 }
448 pr_err("Map missing from maps");
449 return -1;
450 }
451
map__set_kmap_maps(struct map * map,struct maps * maps)452 static void map__set_kmap_maps(struct map *map, struct maps *maps)
453 {
454 struct dso *dso;
455
456 if (map == NULL)
457 return;
458
459 dso = map__dso(map);
460
461 if (dso && dso__kernel(dso)) {
462 struct kmap *kmap = map__kmap(map);
463
464 if (kmap)
465 kmap->kmaps = maps;
466 else
467 pr_err("Internal error: kernel dso with non kernel map\n");
468 }
469 }
470
__maps__insert(struct maps * maps,struct map * new)471 static int __maps__insert(struct maps *maps, struct map *new)
472 {
473 struct map **maps_by_address = maps__maps_by_address(maps);
474 struct map **maps_by_name = maps__maps_by_name(maps);
475 unsigned int nr_maps = maps__nr_maps(maps);
476 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
477
478 if (nr_maps + 1 > nr_allocate) {
479 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
480
481 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new));
482 if (!maps_by_address)
483 return -ENOMEM;
484
485 maps__set_maps_by_address(maps, maps_by_address);
486 if (maps_by_name) {
487 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new));
488 if (!maps_by_name) {
489 /*
490 * If by name fails, just disable by name and it will
491 * recompute next time it is required.
492 */
493 __maps__free_maps_by_name(maps);
494 }
495 maps__set_maps_by_name(maps, maps_by_name);
496 }
497 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
498 }
499 /* Insert the value at the end. */
500 maps_by_address[nr_maps] = map__get(new);
501 map__set_kmap_maps(new, maps);
502 if (maps_by_name)
503 maps_by_name[nr_maps] = map__get(new);
504
505 nr_maps++;
506 RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
507
508 /*
509 * Recompute if things are sorted. If things are inserted in a sorted
510 * manner, for example by processing /proc/pid/maps, then no
511 * sorting/resorting will be necessary.
512 */
513 if (nr_maps == 1) {
514 /* If there's just 1 entry then maps are sorted. */
515 maps__set_maps_by_address_sorted(maps, true);
516 maps__set_maps_by_name_sorted(maps, maps_by_name != NULL);
517 } else {
518 /* Sorted if maps were already sorted and this map starts after the last one. */
519 maps__set_maps_by_address_sorted(maps,
520 maps__maps_by_address_sorted(maps) &&
521 map__end(maps_by_address[nr_maps - 2]) <= map__start(new));
522 maps__set_maps_by_name_sorted(maps, false);
523 }
524 if (map__end(new) < map__start(new))
525 RC_CHK_ACCESS(maps)->ends_broken = true;
526
527 return 0;
528 }
529
maps__insert(struct maps * maps,struct map * map)530 int maps__insert(struct maps *maps, struct map *map)
531 {
532 int ret;
533
534 down_write(maps__lock(maps));
535 ret = __maps__insert(maps, map);
536 check_invariants(maps);
537 up_write(maps__lock(maps));
538 return ret;
539 }
540
__maps__remove(struct maps * maps,struct map * map)541 static void __maps__remove(struct maps *maps, struct map *map)
542 {
543 struct map **maps_by_address = maps__maps_by_address(maps);
544 struct map **maps_by_name = maps__maps_by_name(maps);
545 unsigned int nr_maps = maps__nr_maps(maps);
546 unsigned int address_idx;
547
548 /* Slide later mappings over the one to remove */
549 address_idx = maps__by_address_index(maps, map);
550 map__put(maps_by_address[address_idx]);
551 memmove(&maps_by_address[address_idx],
552 &maps_by_address[address_idx + 1],
553 (nr_maps - address_idx - 1) * sizeof(*maps_by_address));
554
555 if (maps_by_name) {
556 unsigned int name_idx = maps__by_name_index(maps, map);
557
558 map__put(maps_by_name[name_idx]);
559 memmove(&maps_by_name[name_idx],
560 &maps_by_name[name_idx + 1],
561 (nr_maps - name_idx - 1) * sizeof(*maps_by_name));
562 }
563
564 --RC_CHK_ACCESS(maps)->nr_maps;
565 }
566
maps__remove(struct maps * maps,struct map * map)567 void maps__remove(struct maps *maps, struct map *map)
568 {
569 down_write(maps__lock(maps));
570 __maps__remove(maps, map);
571 check_invariants(maps);
572 up_write(maps__lock(maps));
573 #ifdef HAVE_LIBDW_SUPPORT
574 libdw__invalidate_dwfl(maps, maps__libdw_addr_space_dwfl(maps));
575 #endif
576 }
577
maps__empty(struct maps * maps)578 bool maps__empty(struct maps *maps)
579 {
580 bool res;
581
582 down_read(maps__lock(maps));
583 res = maps__nr_maps(maps) == 0;
584 up_read(maps__lock(maps));
585
586 return res;
587 }
588
maps__equal(struct maps * a,struct maps * b)589 bool maps__equal(struct maps *a, struct maps *b)
590 {
591 return RC_CHK_EQUAL(a, b);
592 }
593
maps__for_each_map(struct maps * maps,int (* cb)(struct map * map,void * data),void * data)594 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
595 {
596 bool done = false;
597 int ret = 0;
598
599 /* See locking/sorting note. */
600 while (!done) {
601 down_read(maps__lock(maps));
602 if (maps__maps_by_address_sorted(maps)) {
603 /*
604 * maps__for_each_map callbacks may buggily/unsafely
605 * insert into maps_by_address. Deliberately reload
606 * maps__nr_maps and maps_by_address on each iteration
607 * to avoid using memory freed by maps__insert growing
608 * the array - this may cause maps to be skipped or
609 * repeated.
610 */
611 for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
612 struct map **maps_by_address = maps__maps_by_address(maps);
613 struct map *map = maps_by_address[i];
614
615 ret = cb(map, data);
616 if (ret)
617 break;
618 }
619 done = true;
620 }
621 up_read(maps__lock(maps));
622 if (!done)
623 maps__sort_by_address(maps);
624 }
625 return ret;
626 }
627
maps__remove_maps(struct maps * maps,bool (* cb)(struct map * map,void * data),void * data)628 void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
629 {
630 struct map **maps_by_address;
631 bool removed = false;
632
633 down_write(maps__lock(maps));
634
635 maps_by_address = maps__maps_by_address(maps);
636 for (unsigned int i = 0; i < maps__nr_maps(maps);) {
637 if (cb(maps_by_address[i], data)) {
638 __maps__remove(maps, maps_by_address[i]);
639 removed = true;
640 } else {
641 i++;
642 }
643 }
644 check_invariants(maps);
645 up_write(maps__lock(maps));
646 if (removed) {
647 #ifdef HAVE_LIBDW_SUPPORT
648 libdw__invalidate_dwfl(maps, maps__libdw_addr_space_dwfl(maps));
649 #endif
650 }
651 }
652
maps__find_symbol(struct maps * maps,u64 addr,struct map ** mapp)653 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
654 {
655 struct map *map = maps__find(maps, addr);
656 struct symbol *result = NULL;
657
658 /* Ensure map is loaded before using map->map_ip */
659 if (map != NULL && map__load(map) >= 0)
660 result = map__find_symbol(map, map__map_ip(map, addr));
661
662 if (mapp)
663 *mapp = map;
664 else
665 map__put(map);
666
667 return result;
668 }
669
670 struct maps__find_symbol_by_name_args {
671 struct map **mapp;
672 const char *name;
673 struct symbol *sym;
674 };
675
maps__find_symbol_by_name_cb(struct map * map,void * data)676 static int maps__find_symbol_by_name_cb(struct map *map, void *data)
677 {
678 struct maps__find_symbol_by_name_args *args = data;
679
680 args->sym = map__find_symbol_by_name(map, args->name);
681 if (!args->sym)
682 return 0;
683
684 if (!map__contains_symbol(map, args->sym)) {
685 args->sym = NULL;
686 return 0;
687 }
688
689 if (args->mapp != NULL)
690 *args->mapp = map__get(map);
691 return 1;
692 }
693
maps__find_symbol_by_name(struct maps * maps,const char * name,struct map ** mapp)694 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
695 {
696 struct maps__find_symbol_by_name_args args = {
697 .mapp = mapp,
698 .name = name,
699 .sym = NULL,
700 };
701
702 maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
703 return args.sym;
704 }
705
maps__find_ams(struct maps * maps,struct addr_map_symbol * ams)706 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
707 {
708 if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
709 if (maps == NULL)
710 return -1;
711 map__put(ams->ms.map);
712 ams->ms.map = maps__find(maps, ams->addr);
713 if (ams->ms.map == NULL)
714 return -1;
715 }
716
717 ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
718 ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
719
720 return ams->ms.sym ? 0 : -1;
721 }
722
723 struct maps__fprintf_args {
724 FILE *fp;
725 size_t printed;
726 };
727
maps__fprintf_cb(struct map * map,void * data)728 static int maps__fprintf_cb(struct map *map, void *data)
729 {
730 struct maps__fprintf_args *args = data;
731
732 args->printed += fprintf(args->fp, "Map:");
733 args->printed += map__fprintf(map, args->fp);
734 if (verbose > 2) {
735 args->printed += dso__fprintf(map__dso(map), args->fp);
736 args->printed += fprintf(args->fp, "--\n");
737 }
738 return 0;
739 }
740
maps__fprintf(struct maps * maps,FILE * fp)741 size_t maps__fprintf(struct maps *maps, FILE *fp)
742 {
743 struct maps__fprintf_args args = {
744 .fp = fp,
745 .printed = 0,
746 };
747
748 maps__for_each_map(maps, maps__fprintf_cb, &args);
749
750 return args.printed;
751 }
752
753 /*
754 * Find first map where end > map->start.
755 * Same as find_vma() in kernel.
756 */
first_ending_after(struct maps * maps,const struct map * map)757 static unsigned int first_ending_after(struct maps *maps, const struct map *map)
758 {
759 struct map **maps_by_address = maps__maps_by_address(maps);
760 int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1;
761
762 assert(maps__maps_by_address_sorted(maps));
763 if (low <= high && map__end(maps_by_address[0]) > map__start(map))
764 return 0;
765
766 while (low <= high) {
767 int mid = (low + high) / 2;
768 struct map *pos = maps_by_address[mid];
769
770 if (map__end(pos) > map__start(map)) {
771 first = mid;
772 if (map__start(pos) <= map__start(map)) {
773 /* Entry overlaps map. */
774 break;
775 }
776 high = mid - 1;
777 } else
778 low = mid + 1;
779 }
780 return first;
781 }
782
__maps__insert_sorted(struct maps * maps,unsigned int first_after_index,struct map * new1,struct map * new2)783 static int __maps__insert_sorted(struct maps *maps, unsigned int first_after_index,
784 struct map *new1, struct map *new2)
785 {
786 struct map **maps_by_address = maps__maps_by_address(maps);
787 struct map **maps_by_name = maps__maps_by_name(maps);
788 unsigned int nr_maps = maps__nr_maps(maps);
789 unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
790 unsigned int to_add = new2 ? 2 : 1;
791
792 assert(maps__maps_by_address_sorted(maps));
793 assert(first_after_index == nr_maps ||
794 map__end(new1) <= map__start(maps_by_address[first_after_index]));
795 assert(!new2 || map__end(new1) <= map__start(new2));
796 assert(first_after_index == nr_maps || !new2 ||
797 map__end(new2) <= map__start(maps_by_address[first_after_index]));
798
799 if (nr_maps + to_add > nr_allocate) {
800 nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
801
802 maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new1));
803 if (!maps_by_address)
804 return -ENOMEM;
805
806 maps__set_maps_by_address(maps, maps_by_address);
807 if (maps_by_name) {
808 maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new1));
809 if (!maps_by_name) {
810 /*
811 * If by name fails, just disable by name and it will
812 * recompute next time it is required.
813 */
814 __maps__free_maps_by_name(maps);
815 }
816 maps__set_maps_by_name(maps, maps_by_name);
817 }
818 RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
819 }
820 memmove(&maps_by_address[first_after_index+to_add],
821 &maps_by_address[first_after_index],
822 (nr_maps - first_after_index) * sizeof(new1));
823 maps_by_address[first_after_index] = map__get(new1);
824 if (maps_by_name)
825 maps_by_name[nr_maps] = map__get(new1);
826 if (new2) {
827 maps_by_address[first_after_index + 1] = map__get(new2);
828 if (maps_by_name)
829 maps_by_name[nr_maps + 1] = map__get(new2);
830 }
831 RC_CHK_ACCESS(maps)->nr_maps = nr_maps + to_add;
832 maps__set_maps_by_name_sorted(maps, false);
833 map__set_kmap_maps(new1, maps);
834 map__set_kmap_maps(new2, maps);
835
836 check_invariants(maps);
837 return 0;
838 }
839
840 /*
841 * Adds new to maps, if new overlaps existing entries then the existing maps are
842 * adjusted or removed so that new fits without overlapping any entries.
843 */
__maps__fixup_overlap_and_insert(struct maps * maps,struct map * new)844 static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
845 {
846 int err = 0;
847 FILE *fp = debug_file();
848 unsigned int i, ni = INT_MAX; // Some gcc complain, but depends on maps_by_name...
849
850 if (!maps__maps_by_address_sorted(maps))
851 __maps__sort_by_address(maps);
852
853 /*
854 * Iterate through entries where the end of the existing entry is
855 * greater-than the new map's start.
856 */
857 for (i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
858 struct map **maps_by_address = maps__maps_by_address(maps);
859 struct map **maps_by_name = maps__maps_by_name(maps);
860 struct map *pos = maps_by_address[i];
861 struct map *before = NULL, *after = NULL;
862
863 /*
864 * Stop if current map starts after map->end.
865 * Maps are ordered by start: next will not overlap for sure.
866 */
867 if (map__start(pos) >= map__end(new))
868 break;
869
870 if (use_browser) {
871 pr_debug("overlapping maps in %s (disable tui for more info)\n",
872 dso__name(map__dso(new)));
873 } else if (verbose >= 2) {
874 pr_debug("overlapping maps:\n");
875 map__fprintf(new, fp);
876 map__fprintf(pos, fp);
877 }
878
879 if (maps_by_name)
880 ni = maps__by_name_index(maps, pos);
881
882 /*
883 * Now check if we need to create new maps for areas not
884 * overlapped by the new map:
885 */
886 if (map__start(new) > map__start(pos)) {
887 /* Map starts within existing map. Need to shorten the existing map. */
888 before = map__clone(pos);
889
890 if (before == NULL) {
891 err = -ENOMEM;
892 goto out_err;
893 }
894 map__set_end(before, map__start(new));
895
896 if (verbose >= 2 && !use_browser)
897 map__fprintf(before, fp);
898 }
899 if (map__end(new) < map__end(pos)) {
900 /* The new map isn't as long as the existing map. */
901 after = map__clone(pos);
902
903 if (after == NULL) {
904 map__zput(before);
905 err = -ENOMEM;
906 goto out_err;
907 }
908
909 map__set_start(after, map__end(new));
910 map__add_pgoff(after, map__end(new) - map__start(pos));
911 assert(map__map_ip(pos, map__end(new)) ==
912 map__map_ip(after, map__end(new)));
913
914 if (verbose >= 2 && !use_browser)
915 map__fprintf(after, fp);
916 }
917 /*
918 * If adding one entry, for `before` or `after`, we can replace
919 * the existing entry. If both `before` and `after` are
920 * necessary than an insert is needed. If the existing entry
921 * entirely overlaps the existing entry it can just be removed.
922 */
923 if (before) {
924 map__put(maps_by_address[i]);
925 maps_by_address[i] = before;
926 map__set_kmap_maps(before, maps);
927
928 if (maps_by_name) {
929 map__put(maps_by_name[ni]);
930 maps_by_name[ni] = map__get(before);
931 }
932
933 /* Maps are still ordered, go to next one. */
934 i++;
935 if (after) {
936 /*
937 * 'before' and 'after' mean 'new' split the
938 * 'pos' mapping and therefore there are no
939 * later mappings.
940 */
941 err = __maps__insert_sorted(maps, i, new, after);
942 map__put(after);
943 check_invariants(maps);
944 return err;
945 }
946 check_invariants(maps);
947 } else if (after) {
948 /*
949 * 'after' means 'new' split 'pos' and there are no
950 * later mappings.
951 */
952 map__put(maps_by_address[i]);
953 maps_by_address[i] = map__get(new);
954 map__set_kmap_maps(new, maps);
955
956 if (maps_by_name) {
957 map__put(maps_by_name[ni]);
958 maps_by_name[ni] = map__get(new);
959 }
960
961 err = __maps__insert_sorted(maps, i + 1, after, NULL);
962 map__put(after);
963 check_invariants(maps);
964 return err;
965 } else {
966 struct map *next = NULL;
967 unsigned int nr_maps = maps__nr_maps(maps);
968
969 if (i + 1 < nr_maps)
970 next = maps_by_address[i + 1];
971
972 if (!next || map__start(next) >= map__end(new)) {
973 /*
974 * Replace existing mapping and end knowing
975 * there aren't later overlapping or any
976 * mappings.
977 */
978 map__put(maps_by_address[i]);
979 maps_by_address[i] = map__get(new);
980 map__set_kmap_maps(new, maps);
981
982 if (maps_by_name) {
983 map__put(maps_by_name[ni]);
984 maps_by_name[ni] = map__get(new);
985 }
986
987 check_invariants(maps);
988 return err;
989 }
990 /*
991 * pos fully covers the previous mapping so remove
992 * it. The following is an inlined version of
993 * maps__remove that reuses the already computed
994 * indices.
995 */
996 map__put(maps_by_address[i]);
997 memmove(&maps_by_address[i],
998 &maps_by_address[i + 1],
999 (nr_maps - i - 1) * sizeof(*maps_by_address));
1000
1001 if (maps_by_name) {
1002 map__put(maps_by_name[ni]);
1003 memmove(&maps_by_name[ni],
1004 &maps_by_name[ni + 1],
1005 (nr_maps - ni - 1) * sizeof(*maps_by_name));
1006 }
1007 --RC_CHK_ACCESS(maps)->nr_maps;
1008 check_invariants(maps);
1009 /*
1010 * Maps are ordered but no need to increase `i` as the
1011 * later maps were moved down.
1012 */
1013 }
1014 }
1015 /* Add the map. */
1016 err = __maps__insert_sorted(maps, i, new, NULL);
1017 out_err:
1018 return err;
1019 }
1020
maps__fixup_overlap_and_insert(struct maps * maps,struct map * new)1021 int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
1022 {
1023 int err;
1024
1025 down_write(maps__lock(maps));
1026 err = __maps__fixup_overlap_and_insert(maps, new);
1027 up_write(maps__lock(maps));
1028 return err;
1029 }
1030
maps__copy_from(struct maps * dest,struct maps * parent)1031 int maps__copy_from(struct maps *dest, struct maps *parent)
1032 {
1033 /* Note, if struct map were immutable then cloning could use ref counts. */
1034 struct map **parent_maps_by_address;
1035 int err = 0;
1036 unsigned int n;
1037
1038 down_write(maps__lock(dest));
1039 down_read(maps__lock(parent));
1040
1041 parent_maps_by_address = maps__maps_by_address(parent);
1042 n = maps__nr_maps(parent);
1043 if (maps__nr_maps(dest) == 0) {
1044 /* No existing mappings so just copy from parent to avoid reallocs in insert. */
1045 unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated;
1046 struct map **dest_maps_by_address =
1047 malloc(nr_maps_allocated * sizeof(struct map *));
1048 struct map **dest_maps_by_name = NULL;
1049
1050 if (!dest_maps_by_address)
1051 err = -ENOMEM;
1052 else {
1053 if (maps__maps_by_name(parent)) {
1054 dest_maps_by_name =
1055 malloc(nr_maps_allocated * sizeof(struct map *));
1056 }
1057
1058 RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address;
1059 RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name;
1060 RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated;
1061 }
1062
1063 for (unsigned int i = 0; !err && i < n; i++) {
1064 struct map *pos = parent_maps_by_address[i];
1065 struct map *new = map__clone(pos);
1066
1067 if (!new)
1068 err = -ENOMEM;
1069 else {
1070 err = unwind__prepare_access(dest, new, NULL);
1071 if (!err) {
1072 dest_maps_by_address[i] = new;
1073 map__set_kmap_maps(new, dest);
1074 if (dest_maps_by_name)
1075 dest_maps_by_name[i] = map__get(new);
1076 RC_CHK_ACCESS(dest)->nr_maps = i + 1;
1077 }
1078 }
1079 if (err)
1080 map__put(new);
1081 }
1082 maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent));
1083 if (!err) {
1084 RC_CHK_ACCESS(dest)->last_search_by_name_idx =
1085 RC_CHK_ACCESS(parent)->last_search_by_name_idx;
1086 maps__set_maps_by_name_sorted(dest,
1087 dest_maps_by_name &&
1088 maps__maps_by_name_sorted(parent));
1089 } else {
1090 RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0;
1091 maps__set_maps_by_name_sorted(dest, false);
1092 }
1093 } else {
1094 /* Unexpected copying to a maps containing entries. */
1095 for (unsigned int i = 0; !err && i < n; i++) {
1096 struct map *pos = parent_maps_by_address[i];
1097 struct map *new = map__clone(pos);
1098
1099 if (!new)
1100 err = -ENOMEM;
1101 else {
1102 err = unwind__prepare_access(dest, new, NULL);
1103 if (!err)
1104 err = __maps__insert(dest, new);
1105 }
1106 map__put(new);
1107 }
1108 }
1109 check_invariants(dest);
1110
1111 up_read(maps__lock(parent));
1112 up_write(maps__lock(dest));
1113 return err;
1114 }
1115
map__addr_cmp(const void * key,const void * entry)1116 static int map__addr_cmp(const void *key, const void *entry)
1117 {
1118 const u64 ip = *(const u64 *)key;
1119 const struct map *map = *(const struct map * const *)entry;
1120
1121 if (ip < map__start(map))
1122 return -1;
1123 if (ip >= map__end(map))
1124 return 1;
1125 return 0;
1126 }
1127
maps__find(struct maps * maps,u64 ip)1128 struct map *maps__find(struct maps *maps, u64 ip)
1129 {
1130 struct map *result = NULL;
1131 bool done = false;
1132
1133 /* See locking/sorting note. */
1134 while (!done) {
1135 down_read(maps__lock(maps));
1136 if (maps__maps_by_address_sorted(maps)) {
1137 struct map **mapp = NULL;
1138 struct map **maps_by_address = maps__maps_by_address(maps);
1139 unsigned int nr_maps = maps__nr_maps(maps);
1140
1141 if (maps_by_address && nr_maps)
1142 mapp = bsearch(&ip, maps_by_address, nr_maps, sizeof(*mapp),
1143 map__addr_cmp);
1144 if (mapp)
1145 result = map__get(*mapp);
1146 done = true;
1147 }
1148 up_read(maps__lock(maps));
1149 if (!done)
1150 maps__sort_by_address(maps);
1151 }
1152 return result;
1153 }
1154
map__strcmp_name(const void * name,const void * b)1155 static int map__strcmp_name(const void *name, const void *b)
1156 {
1157 const struct dso *dso = map__dso(*(const struct map **)b);
1158
1159 return strcmp(name, dso__short_name(dso));
1160 }
1161
maps__find_by_name(struct maps * maps,const char * name)1162 struct map *maps__find_by_name(struct maps *maps, const char *name)
1163 {
1164 struct map *result = NULL;
1165 bool done = false;
1166
1167 /* See locking/sorting note. */
1168 while (!done) {
1169 unsigned int i;
1170
1171 down_read(maps__lock(maps));
1172
1173 /* First check last found entry. */
1174 i = RC_CHK_ACCESS(maps)->last_search_by_name_idx;
1175 if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) {
1176 struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
1177
1178 if (dso && strcmp(dso__short_name(dso), name) == 0) {
1179 result = map__get(maps__maps_by_name(maps)[i]);
1180 done = true;
1181 }
1182 }
1183
1184 /* Second search sorted array. */
1185 if (!done && maps__maps_by_name_sorted(maps)) {
1186 struct map **mapp =
1187 bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
1188 sizeof(*mapp), map__strcmp_name);
1189
1190 if (mapp) {
1191 result = map__get(*mapp);
1192 i = mapp - maps__maps_by_name(maps);
1193 RC_CHK_ACCESS(maps)->last_search_by_name_idx = i;
1194 }
1195 done = true;
1196 }
1197 up_read(maps__lock(maps));
1198 if (!done) {
1199 /* Sort and retry binary search. */
1200 if (maps__sort_by_name(maps)) {
1201 /*
1202 * Memory allocation failed do linear search
1203 * through address sorted maps.
1204 */
1205 struct map **maps_by_address;
1206 unsigned int n;
1207
1208 down_read(maps__lock(maps));
1209 maps_by_address = maps__maps_by_address(maps);
1210 n = maps__nr_maps(maps);
1211 for (i = 0; i < n; i++) {
1212 struct map *pos = maps_by_address[i];
1213 struct dso *dso = map__dso(pos);
1214
1215 if (dso && strcmp(dso__short_name(dso), name) == 0) {
1216 result = map__get(pos);
1217 break;
1218 }
1219 }
1220 up_read(maps__lock(maps));
1221 done = true;
1222 }
1223 }
1224 }
1225 return result;
1226 }
1227
maps__find_next_entry(struct maps * maps,struct map * map)1228 struct map *maps__find_next_entry(struct maps *maps, struct map *map)
1229 {
1230 unsigned int i;
1231 struct map *result = NULL;
1232
1233 down_read(maps__lock(maps));
1234 while (!maps__maps_by_address_sorted(maps)) {
1235 up_read(maps__lock(maps));
1236 maps__sort_by_address(maps);
1237 down_read(maps__lock(maps));
1238 }
1239 i = maps__by_address_index(maps, map);
1240 if (++i < maps__nr_maps(maps))
1241 result = map__get(maps__maps_by_address(maps)[i]);
1242
1243 up_read(maps__lock(maps));
1244 return result;
1245 }
1246
maps__fixup_end(struct maps * maps)1247 void maps__fixup_end(struct maps *maps)
1248 {
1249 struct map **maps_by_address;
1250 unsigned int n;
1251
1252 down_write(maps__lock(maps));
1253 if (!maps__maps_by_address_sorted(maps))
1254 __maps__sort_by_address(maps);
1255
1256 maps_by_address = maps__maps_by_address(maps);
1257 n = maps__nr_maps(maps);
1258 for (unsigned int i = 1; i < n; i++) {
1259 struct map *prev = maps_by_address[i - 1];
1260 struct map *curr = maps_by_address[i];
1261
1262 if (!map__end(prev) || map__end(prev) > map__start(curr))
1263 map__set_end(prev, map__start(curr));
1264 }
1265
1266 /*
1267 * We still haven't the actual symbols, so guess the
1268 * last map final address.
1269 */
1270 if (n > 0 && !map__end(maps_by_address[n - 1]))
1271 map__set_end(maps_by_address[n - 1], ~0ULL);
1272
1273 RC_CHK_ACCESS(maps)->ends_broken = false;
1274 check_invariants(maps);
1275
1276 up_write(maps__lock(maps));
1277 }
1278
1279 /*
1280 * Merges map into maps by splitting the new map within the existing map
1281 * regions.
1282 */
maps__merge_in(struct maps * kmaps,struct map * new_map)1283 int maps__merge_in(struct maps *kmaps, struct map *new_map)
1284 {
1285 unsigned int first_after_, kmaps__nr_maps;
1286 struct map **kmaps_maps_by_address;
1287 struct map **merged_maps_by_address;
1288 unsigned int merged_nr_maps_allocated;
1289
1290 /* First try under a read lock. */
1291 while (true) {
1292 down_read(maps__lock(kmaps));
1293 if (maps__maps_by_address_sorted(kmaps))
1294 break;
1295
1296 up_read(maps__lock(kmaps));
1297
1298 /* First after binary search requires sorted maps. Sort and try again. */
1299 maps__sort_by_address(kmaps);
1300 }
1301 first_after_ = first_ending_after(kmaps, new_map);
1302 kmaps_maps_by_address = maps__maps_by_address(kmaps);
1303
1304 if (first_after_ >= maps__nr_maps(kmaps) ||
1305 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1306 /* No overlap so regular insert suffices. */
1307 up_read(maps__lock(kmaps));
1308 return maps__insert(kmaps, new_map);
1309 }
1310 up_read(maps__lock(kmaps));
1311
1312 /* Plain insert with a read-lock failed, try again now with the write lock. */
1313 down_write(maps__lock(kmaps));
1314 if (!maps__maps_by_address_sorted(kmaps))
1315 __maps__sort_by_address(kmaps);
1316
1317 first_after_ = first_ending_after(kmaps, new_map);
1318 kmaps_maps_by_address = maps__maps_by_address(kmaps);
1319 kmaps__nr_maps = maps__nr_maps(kmaps);
1320
1321 if (first_after_ >= kmaps__nr_maps ||
1322 map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1323 /* No overlap so regular insert suffices. */
1324 int ret = __maps__insert(kmaps, new_map);
1325
1326 check_invariants(kmaps);
1327 up_write(maps__lock(kmaps));
1328 return ret;
1329 }
1330 /* Array to merge into, possibly 1 more for the sake of new_map. */
1331 merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated;
1332 if (kmaps__nr_maps + 1 == merged_nr_maps_allocated)
1333 merged_nr_maps_allocated++;
1334
1335 merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address));
1336 if (!merged_maps_by_address) {
1337 up_write(maps__lock(kmaps));
1338 return -ENOMEM;
1339 }
1340 maps__set_maps_by_address(kmaps, merged_maps_by_address);
1341 maps__set_maps_by_address_sorted(kmaps, true);
1342 __maps__free_maps_by_name(kmaps);
1343 maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated);
1344
1345 /* Copy entries before the new_map that can't overlap. */
1346 for (unsigned int i = 0; i < first_after_; i++)
1347 merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]);
1348
1349 maps__set_nr_maps(kmaps, first_after_);
1350
1351 /* Add the new map, it will be split when the later overlapping mappings are added. */
1352 __maps__insert(kmaps, new_map);
1353
1354 /* Insert mappings after new_map, splitting new_map in the process. */
1355 for (unsigned int i = first_after_; i < kmaps__nr_maps; i++)
1356 __maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]);
1357
1358 /* Copy the maps from merged into kmaps. */
1359 for (unsigned int i = 0; i < kmaps__nr_maps; i++)
1360 map__zput(kmaps_maps_by_address[i]);
1361
1362 free(kmaps_maps_by_address);
1363 check_invariants(kmaps);
1364 up_write(maps__lock(kmaps));
1365 return 0;
1366 }
1367
maps__load_first(struct maps * maps)1368 void maps__load_first(struct maps *maps)
1369 {
1370 down_read(maps__lock(maps));
1371
1372 if (maps__nr_maps(maps) > 0)
1373 map__load(maps__maps_by_address(maps)[0]);
1374
1375 up_read(maps__lock(maps));
1376 }
1377