xref: /linux/tools/perf/util/maps.c (revision 6af91e3d2cfc8bb579b1aa2d22cd91f8c34acdf6)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <linux/zalloc.h>
5 #include "debug.h"
6 #include "dso.h"
7 #include "map.h"
8 #include "maps.h"
9 #include "rwsem.h"
10 #include "thread.h"
11 #include "ui/ui.h"
12 #include "unwind.h"
13 #include <internal/rc_check.h>
14 
15 /*
16  * Locking/sorting note:
17  *
18  * Sorting is done with the write lock, iteration and binary searching happens
19  * under the read lock requiring being sorted. There is a race between sorting
20  * releasing the write lock and acquiring the read lock for iteration/searching
21  * where another thread could insert and break the sorting of the maps. In
22  * practice inserting maps should be rare meaning that the race shouldn't lead
23  * to live lock. Removal of maps doesn't break being sorted.
24  */
25 
26 DECLARE_RC_STRUCT(maps) {
27 	struct rw_semaphore lock;
28 	/**
29 	 * @maps_by_address: array of maps sorted by their starting address if
30 	 * maps_by_address_sorted is true.
31 	 */
32 	struct map	 **maps_by_address;
33 	/**
34 	 * @maps_by_name: optional array of maps sorted by their dso name if
35 	 * maps_by_name_sorted is true.
36 	 */
37 	struct map	 **maps_by_name;
38 	struct machine	 *machine;
39 #ifdef HAVE_LIBUNWIND_SUPPORT
40 	void		*addr_space;
41 	const struct unwind_libunwind_ops *unwind_libunwind_ops;
42 #endif
43 	refcount_t	 refcnt;
44 	/**
45 	 * @nr_maps: number of maps_by_address, and possibly maps_by_name,
46 	 * entries that contain maps.
47 	 */
48 	unsigned int	 nr_maps;
49 	/**
50 	 * @nr_maps_allocated: number of entries in maps_by_address and possibly
51 	 * maps_by_name.
52 	 */
53 	unsigned int	 nr_maps_allocated;
54 	/**
55 	 * @last_search_by_name_idx: cache of last found by name entry's index
56 	 * as frequent searches for the same dso name are common.
57 	 */
58 	unsigned int	 last_search_by_name_idx;
59 	/** @maps_by_address_sorted: is maps_by_address sorted. */
60 	bool		 maps_by_address_sorted;
61 	/** @maps_by_name_sorted: is maps_by_name sorted. */
62 	bool		 maps_by_name_sorted;
63 	/** @ends_broken: does the map contain a map where end values are unset/unsorted? */
64 	bool		 ends_broken;
65 };
66 
67 static void check_invariants(const struct maps *maps __maybe_unused)
68 {
69 #ifndef NDEBUG
70 	assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated);
71 	for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
72 		struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i];
73 
74 		/* Check map is well-formed. */
75 		assert(map__end(map) == 0 || map__start(map) <= map__end(map));
76 		/* Expect at least 1 reference count. */
77 		assert(refcount_read(map__refcnt(map)) > 0);
78 
79 		if (map__dso(map) && dso__kernel(map__dso(map)))
80 			assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
81 
82 		if (i > 0) {
83 			struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1];
84 
85 			/* If addresses are sorted... */
86 			if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) {
87 				/* Maps should be in start address order. */
88 				assert(map__start(prev) <= map__start(map));
89 				/*
90 				 * If the ends of maps aren't broken (during
91 				 * construction) then they should be ordered
92 				 * too.
93 				 */
94 				if (!RC_CHK_ACCESS(maps)->ends_broken) {
95 					assert(map__end(prev) <= map__end(map));
96 					assert(map__end(prev) <= map__start(map) ||
97 					       map__start(prev) == map__start(map));
98 				}
99 			}
100 		}
101 	}
102 	if (RC_CHK_ACCESS(maps)->maps_by_name) {
103 		for (unsigned int i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) {
104 			struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i];
105 
106 			/*
107 			 * Maps by name maps should be in maps_by_address, so
108 			 * the reference count should be higher.
109 			 */
110 			assert(refcount_read(map__refcnt(map)) > 1);
111 		}
112 	}
113 #endif
114 }
115 
116 static struct map **maps__maps_by_address(const struct maps *maps)
117 {
118 	return RC_CHK_ACCESS(maps)->maps_by_address;
119 }
120 
121 static void maps__set_maps_by_address(struct maps *maps, struct map **new)
122 {
123 	RC_CHK_ACCESS(maps)->maps_by_address = new;
124 
125 }
126 
127 static void maps__set_nr_maps_allocated(struct maps *maps, unsigned int nr_maps_allocated)
128 {
129 	RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_maps_allocated;
130 }
131 
132 static void maps__set_nr_maps(struct maps *maps, unsigned int nr_maps)
133 {
134 	RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
135 }
136 
137 /* Not in the header, to aid reference counting. */
138 static struct map **maps__maps_by_name(const struct maps *maps)
139 {
140 	return RC_CHK_ACCESS(maps)->maps_by_name;
141 
142 }
143 
144 static void maps__set_maps_by_name(struct maps *maps, struct map **new)
145 {
146 	RC_CHK_ACCESS(maps)->maps_by_name = new;
147 
148 }
149 
150 static bool maps__maps_by_address_sorted(const struct maps *maps)
151 {
152 	return RC_CHK_ACCESS(maps)->maps_by_address_sorted;
153 }
154 
155 static void maps__set_maps_by_address_sorted(struct maps *maps, bool value)
156 {
157 	RC_CHK_ACCESS(maps)->maps_by_address_sorted = value;
158 }
159 
160 static bool maps__maps_by_name_sorted(const struct maps *maps)
161 {
162 	return RC_CHK_ACCESS(maps)->maps_by_name_sorted;
163 }
164 
165 static void maps__set_maps_by_name_sorted(struct maps *maps, bool value)
166 {
167 	RC_CHK_ACCESS(maps)->maps_by_name_sorted = value;
168 }
169 
170 struct machine *maps__machine(const struct maps *maps)
171 {
172 	return RC_CHK_ACCESS(maps)->machine;
173 }
174 
175 unsigned int maps__nr_maps(const struct maps *maps)
176 {
177 	return RC_CHK_ACCESS(maps)->nr_maps;
178 }
179 
180 refcount_t *maps__refcnt(struct maps *maps)
181 {
182 	return &RC_CHK_ACCESS(maps)->refcnt;
183 }
184 
185 #ifdef HAVE_LIBUNWIND_SUPPORT
186 void *maps__addr_space(const struct maps *maps)
187 {
188 	return RC_CHK_ACCESS(maps)->addr_space;
189 }
190 
191 void maps__set_addr_space(struct maps *maps, void *addr_space)
192 {
193 	RC_CHK_ACCESS(maps)->addr_space = addr_space;
194 }
195 
196 const struct unwind_libunwind_ops *maps__unwind_libunwind_ops(const struct maps *maps)
197 {
198 	return RC_CHK_ACCESS(maps)->unwind_libunwind_ops;
199 }
200 
201 void maps__set_unwind_libunwind_ops(struct maps *maps, const struct unwind_libunwind_ops *ops)
202 {
203 	RC_CHK_ACCESS(maps)->unwind_libunwind_ops = ops;
204 }
205 #endif
206 
207 static struct rw_semaphore *maps__lock(struct maps *maps)
208 {
209 	return &RC_CHK_ACCESS(maps)->lock;
210 }
211 
212 static void maps__init(struct maps *maps, struct machine *machine)
213 {
214 	init_rwsem(maps__lock(maps));
215 	RC_CHK_ACCESS(maps)->maps_by_address = NULL;
216 	RC_CHK_ACCESS(maps)->maps_by_name = NULL;
217 	RC_CHK_ACCESS(maps)->machine = machine;
218 #ifdef HAVE_LIBUNWIND_SUPPORT
219 	RC_CHK_ACCESS(maps)->addr_space = NULL;
220 	RC_CHK_ACCESS(maps)->unwind_libunwind_ops = NULL;
221 #endif
222 	refcount_set(maps__refcnt(maps), 1);
223 	RC_CHK_ACCESS(maps)->nr_maps = 0;
224 	RC_CHK_ACCESS(maps)->nr_maps_allocated = 0;
225 	RC_CHK_ACCESS(maps)->last_search_by_name_idx = 0;
226 	RC_CHK_ACCESS(maps)->maps_by_address_sorted = true;
227 	RC_CHK_ACCESS(maps)->maps_by_name_sorted = false;
228 }
229 
230 static void maps__exit(struct maps *maps)
231 {
232 	struct map **maps_by_address = maps__maps_by_address(maps);
233 	struct map **maps_by_name = maps__maps_by_name(maps);
234 
235 	for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
236 		map__zput(maps_by_address[i]);
237 		if (maps_by_name)
238 			map__zput(maps_by_name[i]);
239 	}
240 	zfree(&maps_by_address);
241 	zfree(&maps_by_name);
242 	unwind__finish_access(maps);
243 }
244 
245 struct maps *maps__new(struct machine *machine)
246 {
247 	struct maps *result;
248 	RC_STRUCT(maps) *maps = zalloc(sizeof(*maps));
249 
250 	if (ADD_RC_CHK(result, maps))
251 		maps__init(result, machine);
252 
253 	return result;
254 }
255 
256 static void maps__delete(struct maps *maps)
257 {
258 	maps__exit(maps);
259 	RC_CHK_FREE(maps);
260 }
261 
262 struct maps *maps__get(struct maps *maps)
263 {
264 	struct maps *result;
265 
266 	if (RC_CHK_GET(result, maps))
267 		refcount_inc(maps__refcnt(maps));
268 
269 	return result;
270 }
271 
272 void maps__put(struct maps *maps)
273 {
274 	if (maps && refcount_dec_and_test(maps__refcnt(maps)))
275 		maps__delete(maps);
276 	else
277 		RC_CHK_PUT(maps);
278 }
279 
280 static void __maps__free_maps_by_name(struct maps *maps)
281 {
282 	if (!maps__maps_by_name(maps))
283 		return;
284 
285 	/*
286 	 * Free everything to try to do it from the rbtree in the next search
287 	 */
288 	for (unsigned int i = 0; i < maps__nr_maps(maps); i++)
289 		map__put(maps__maps_by_name(maps)[i]);
290 
291 	zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
292 
293 	/* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */
294 	maps__set_maps_by_name_sorted(maps, false);
295 }
296 
297 static int map__start_cmp(const void *a, const void *b)
298 {
299 	const struct map *map_a = *(const struct map * const *)a;
300 	const struct map *map_b = *(const struct map * const *)b;
301 	u64 map_a_start = map__start(map_a);
302 	u64 map_b_start = map__start(map_b);
303 
304 	if (map_a_start == map_b_start) {
305 		u64 map_a_end = map__end(map_a);
306 		u64 map_b_end = map__end(map_b);
307 
308 		if  (map_a_end == map_b_end) {
309 			/* Ensure maps with the same addresses have a fixed order. */
310 			if (RC_CHK_ACCESS(map_a) == RC_CHK_ACCESS(map_b))
311 				return 0;
312 			return (intptr_t)RC_CHK_ACCESS(map_a) > (intptr_t)RC_CHK_ACCESS(map_b)
313 				? 1 : -1;
314 		}
315 		return map_a_end > map_b_end ? 1 : -1;
316 	}
317 	return map_a_start > map_b_start ? 1 : -1;
318 }
319 
320 static void __maps__sort_by_address(struct maps *maps)
321 {
322 	if (maps__maps_by_address_sorted(maps))
323 		return;
324 
325 	qsort(maps__maps_by_address(maps),
326 		maps__nr_maps(maps),
327 		sizeof(struct map *),
328 		map__start_cmp);
329 	maps__set_maps_by_address_sorted(maps, true);
330 }
331 
332 static void maps__sort_by_address(struct maps *maps)
333 {
334 	down_write(maps__lock(maps));
335 	__maps__sort_by_address(maps);
336 	up_write(maps__lock(maps));
337 }
338 
339 static int map__strcmp(const void *a, const void *b)
340 {
341 	const struct map *map_a = *(const struct map * const *)a;
342 	const struct map *map_b = *(const struct map * const *)b;
343 	const struct dso *dso_a = map__dso(map_a);
344 	const struct dso *dso_b = map__dso(map_b);
345 	int ret = strcmp(dso__short_name(dso_a), dso__short_name(dso_b));
346 
347 	if (ret == 0 && RC_CHK_ACCESS(map_a) != RC_CHK_ACCESS(map_b)) {
348 		/* Ensure distinct but name equal maps have an order. */
349 		return map__start_cmp(a, b);
350 	}
351 	return ret;
352 }
353 
354 static int maps__sort_by_name(struct maps *maps)
355 {
356 	int err = 0;
357 
358 	down_write(maps__lock(maps));
359 	if (!maps__maps_by_name_sorted(maps)) {
360 		struct map **maps_by_name = maps__maps_by_name(maps);
361 
362 		if (!maps_by_name) {
363 			maps_by_name = malloc(RC_CHK_ACCESS(maps)->nr_maps_allocated *
364 					sizeof(*maps_by_name));
365 			if (!maps_by_name)
366 				err = -ENOMEM;
367 			else {
368 				struct map **maps_by_address = maps__maps_by_address(maps);
369 				unsigned int n = maps__nr_maps(maps);
370 
371 				maps__set_maps_by_name(maps, maps_by_name);
372 				for (unsigned int i = 0; i < n; i++)
373 					maps_by_name[i] = map__get(maps_by_address[i]);
374 			}
375 		}
376 		if (!err) {
377 			qsort(maps_by_name,
378 				maps__nr_maps(maps),
379 				sizeof(struct map *),
380 				map__strcmp);
381 			maps__set_maps_by_name_sorted(maps, true);
382 		}
383 	}
384 	check_invariants(maps);
385 	up_write(maps__lock(maps));
386 	return err;
387 }
388 
389 static unsigned int maps__by_address_index(const struct maps *maps, const struct map *map)
390 {
391 	struct map **maps_by_address = maps__maps_by_address(maps);
392 
393 	if (maps__maps_by_address_sorted(maps)) {
394 		struct map **mapp =
395 			bsearch(&map, maps__maps_by_address(maps), maps__nr_maps(maps),
396 				sizeof(*mapp), map__start_cmp);
397 
398 		if (mapp)
399 			return mapp - maps_by_address;
400 	} else {
401 		for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
402 			if (RC_CHK_ACCESS(maps_by_address[i]) == RC_CHK_ACCESS(map))
403 				return i;
404 		}
405 	}
406 	pr_err("Map missing from maps");
407 	return -1;
408 }
409 
410 static unsigned int maps__by_name_index(const struct maps *maps, const struct map *map)
411 {
412 	struct map **maps_by_name = maps__maps_by_name(maps);
413 
414 	if (maps__maps_by_name_sorted(maps)) {
415 		struct map **mapp =
416 			bsearch(&map, maps_by_name, maps__nr_maps(maps),
417 				sizeof(*mapp), map__strcmp);
418 
419 		if (mapp)
420 			return mapp - maps_by_name;
421 	} else {
422 		for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
423 			if (RC_CHK_ACCESS(maps_by_name[i]) == RC_CHK_ACCESS(map))
424 				return i;
425 		}
426 	}
427 	pr_err("Map missing from maps");
428 	return -1;
429 }
430 
431 static int __maps__insert(struct maps *maps, struct map *new)
432 {
433 	struct map **maps_by_address = maps__maps_by_address(maps);
434 	struct map **maps_by_name = maps__maps_by_name(maps);
435 	const struct dso *dso = map__dso(new);
436 	unsigned int nr_maps = maps__nr_maps(maps);
437 	unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
438 
439 	if (nr_maps + 1 > nr_allocate) {
440 		nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
441 
442 		maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new));
443 		if (!maps_by_address)
444 			return -ENOMEM;
445 
446 		maps__set_maps_by_address(maps, maps_by_address);
447 		if (maps_by_name) {
448 			maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new));
449 			if (!maps_by_name) {
450 				/*
451 				 * If by name fails, just disable by name and it will
452 				 * recompute next time it is required.
453 				 */
454 				__maps__free_maps_by_name(maps);
455 			}
456 			maps__set_maps_by_name(maps, maps_by_name);
457 		}
458 		RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
459 	}
460 	/* Insert the value at the end. */
461 	maps_by_address[nr_maps] = map__get(new);
462 	if (maps_by_name)
463 		maps_by_name[nr_maps] = map__get(new);
464 
465 	nr_maps++;
466 	RC_CHK_ACCESS(maps)->nr_maps = nr_maps;
467 
468 	/*
469 	 * Recompute if things are sorted. If things are inserted in a sorted
470 	 * manner, for example by processing /proc/pid/maps, then no
471 	 * sorting/resorting will be necessary.
472 	 */
473 	if (nr_maps == 1) {
474 		/* If there's just 1 entry then maps are sorted. */
475 		maps__set_maps_by_address_sorted(maps, true);
476 		maps__set_maps_by_name_sorted(maps, maps_by_name != NULL);
477 	} else {
478 		/* Sorted if maps were already sorted and this map starts after the last one. */
479 		maps__set_maps_by_address_sorted(maps,
480 			maps__maps_by_address_sorted(maps) &&
481 			map__end(maps_by_address[nr_maps - 2]) <= map__start(new));
482 		maps__set_maps_by_name_sorted(maps, false);
483 	}
484 	if (map__end(new) < map__start(new))
485 		RC_CHK_ACCESS(maps)->ends_broken = true;
486 	if (dso && dso__kernel(dso)) {
487 		struct kmap *kmap = map__kmap(new);
488 
489 		if (kmap)
490 			kmap->kmaps = maps;
491 		else
492 			pr_err("Internal error: kernel dso with non kernel map\n");
493 	}
494 	return 0;
495 }
496 
497 int maps__insert(struct maps *maps, struct map *map)
498 {
499 	int ret;
500 
501 	down_write(maps__lock(maps));
502 	ret = __maps__insert(maps, map);
503 	check_invariants(maps);
504 	up_write(maps__lock(maps));
505 	return ret;
506 }
507 
508 static void __maps__remove(struct maps *maps, struct map *map)
509 {
510 	struct map **maps_by_address = maps__maps_by_address(maps);
511 	struct map **maps_by_name = maps__maps_by_name(maps);
512 	unsigned int nr_maps = maps__nr_maps(maps);
513 	unsigned int address_idx;
514 
515 	/* Slide later mappings over the one to remove */
516 	address_idx = maps__by_address_index(maps, map);
517 	map__put(maps_by_address[address_idx]);
518 	memmove(&maps_by_address[address_idx],
519 		&maps_by_address[address_idx + 1],
520 		(nr_maps - address_idx - 1) * sizeof(*maps_by_address));
521 
522 	if (maps_by_name) {
523 		unsigned int name_idx = maps__by_name_index(maps, map);
524 
525 		map__put(maps_by_name[name_idx]);
526 		memmove(&maps_by_name[name_idx],
527 			&maps_by_name[name_idx + 1],
528 			(nr_maps - name_idx - 1) *  sizeof(*maps_by_name));
529 	}
530 
531 	--RC_CHK_ACCESS(maps)->nr_maps;
532 }
533 
534 void maps__remove(struct maps *maps, struct map *map)
535 {
536 	down_write(maps__lock(maps));
537 	__maps__remove(maps, map);
538 	check_invariants(maps);
539 	up_write(maps__lock(maps));
540 }
541 
542 bool maps__empty(struct maps *maps)
543 {
544 	bool res;
545 
546 	down_read(maps__lock(maps));
547 	res = maps__nr_maps(maps) == 0;
548 	up_read(maps__lock(maps));
549 
550 	return res;
551 }
552 
553 bool maps__equal(struct maps *a, struct maps *b)
554 {
555 	return RC_CHK_EQUAL(a, b);
556 }
557 
558 int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
559 {
560 	bool done = false;
561 	int ret = 0;
562 
563 	/* See locking/sorting note. */
564 	while (!done) {
565 		down_read(maps__lock(maps));
566 		if (maps__maps_by_address_sorted(maps)) {
567 			/*
568 			 * maps__for_each_map callbacks may buggily/unsafely
569 			 * insert into maps_by_address. Deliberately reload
570 			 * maps__nr_maps and maps_by_address on each iteration
571 			 * to avoid using memory freed by maps__insert growing
572 			 * the array - this may cause maps to be skipped or
573 			 * repeated.
574 			 */
575 			for (unsigned int i = 0; i < maps__nr_maps(maps); i++) {
576 				struct map **maps_by_address = maps__maps_by_address(maps);
577 				struct map *map = maps_by_address[i];
578 
579 				ret = cb(map, data);
580 				if (ret)
581 					break;
582 			}
583 			done = true;
584 		}
585 		up_read(maps__lock(maps));
586 		if (!done)
587 			maps__sort_by_address(maps);
588 	}
589 	return ret;
590 }
591 
592 void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data)
593 {
594 	struct map **maps_by_address;
595 
596 	down_write(maps__lock(maps));
597 
598 	maps_by_address = maps__maps_by_address(maps);
599 	for (unsigned int i = 0; i < maps__nr_maps(maps);) {
600 		if (cb(maps_by_address[i], data))
601 			__maps__remove(maps, maps_by_address[i]);
602 		else
603 			i++;
604 	}
605 	check_invariants(maps);
606 	up_write(maps__lock(maps));
607 }
608 
609 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
610 {
611 	struct map *map = maps__find(maps, addr);
612 	struct symbol *result = NULL;
613 
614 	/* Ensure map is loaded before using map->map_ip */
615 	if (map != NULL && map__load(map) >= 0)
616 		result = map__find_symbol(map, map__map_ip(map, addr));
617 
618 	if (mapp)
619 		*mapp = map;
620 	else
621 		map__put(map);
622 
623 	return result;
624 }
625 
626 struct maps__find_symbol_by_name_args {
627 	struct map **mapp;
628 	const char *name;
629 	struct symbol *sym;
630 };
631 
632 static int maps__find_symbol_by_name_cb(struct map *map, void *data)
633 {
634 	struct maps__find_symbol_by_name_args *args = data;
635 
636 	args->sym = map__find_symbol_by_name(map, args->name);
637 	if (!args->sym)
638 		return 0;
639 
640 	if (!map__contains_symbol(map, args->sym)) {
641 		args->sym = NULL;
642 		return 0;
643 	}
644 
645 	if (args->mapp != NULL)
646 		*args->mapp = map__get(map);
647 	return 1;
648 }
649 
650 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
651 {
652 	struct maps__find_symbol_by_name_args args = {
653 		.mapp = mapp,
654 		.name = name,
655 		.sym = NULL,
656 	};
657 
658 	maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args);
659 	return args.sym;
660 }
661 
662 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
663 {
664 	if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
665 		if (maps == NULL)
666 			return -1;
667 		ams->ms.map = maps__find(maps, ams->addr);
668 		if (ams->ms.map == NULL)
669 			return -1;
670 	}
671 
672 	ams->al_addr = map__map_ip(ams->ms.map, ams->addr);
673 	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
674 
675 	return ams->ms.sym ? 0 : -1;
676 }
677 
678 struct maps__fprintf_args {
679 	FILE *fp;
680 	size_t printed;
681 };
682 
683 static int maps__fprintf_cb(struct map *map, void *data)
684 {
685 	struct maps__fprintf_args *args = data;
686 
687 	args->printed += fprintf(args->fp, "Map:");
688 	args->printed += map__fprintf(map, args->fp);
689 	if (verbose > 2) {
690 		args->printed += dso__fprintf(map__dso(map), args->fp);
691 		args->printed += fprintf(args->fp, "--\n");
692 	}
693 	return 0;
694 }
695 
696 size_t maps__fprintf(struct maps *maps, FILE *fp)
697 {
698 	struct maps__fprintf_args args = {
699 		.fp = fp,
700 		.printed = 0,
701 	};
702 
703 	maps__for_each_map(maps, maps__fprintf_cb, &args);
704 
705 	return args.printed;
706 }
707 
708 /*
709  * Find first map where end > map->start.
710  * Same as find_vma() in kernel.
711  */
712 static unsigned int first_ending_after(struct maps *maps, const struct map *map)
713 {
714 	struct map **maps_by_address = maps__maps_by_address(maps);
715 	int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1;
716 
717 	assert(maps__maps_by_address_sorted(maps));
718 	if (low <= high && map__end(maps_by_address[0]) > map__start(map))
719 		return 0;
720 
721 	while (low <= high) {
722 		int mid = (low + high) / 2;
723 		struct map *pos = maps_by_address[mid];
724 
725 		if (map__end(pos) > map__start(map)) {
726 			first = mid;
727 			if (map__start(pos) <= map__start(map)) {
728 				/* Entry overlaps map. */
729 				break;
730 			}
731 			high = mid - 1;
732 		} else
733 			low = mid + 1;
734 	}
735 	return first;
736 }
737 
738 static int __maps__insert_sorted(struct maps *maps, unsigned int first_after_index,
739 				 struct map *new1, struct map *new2)
740 {
741 	struct map **maps_by_address = maps__maps_by_address(maps);
742 	struct map **maps_by_name = maps__maps_by_name(maps);
743 	unsigned int nr_maps = maps__nr_maps(maps);
744 	unsigned int nr_allocate = RC_CHK_ACCESS(maps)->nr_maps_allocated;
745 	unsigned int to_add = new2 ? 2 : 1;
746 
747 	assert(maps__maps_by_address_sorted(maps));
748 	assert(first_after_index == nr_maps ||
749 	       map__end(new1) <= map__start(maps_by_address[first_after_index]));
750 	assert(!new2 || map__end(new1) <= map__start(new2));
751 	assert(first_after_index == nr_maps || !new2 ||
752 	       map__end(new2) <= map__start(maps_by_address[first_after_index]));
753 
754 	if (nr_maps + to_add > nr_allocate) {
755 		nr_allocate = !nr_allocate ? 32 : nr_allocate * 2;
756 
757 		maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new1));
758 		if (!maps_by_address)
759 			return -ENOMEM;
760 
761 		maps__set_maps_by_address(maps, maps_by_address);
762 		if (maps_by_name) {
763 			maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new1));
764 			if (!maps_by_name) {
765 				/*
766 				 * If by name fails, just disable by name and it will
767 				 * recompute next time it is required.
768 				 */
769 				__maps__free_maps_by_name(maps);
770 			}
771 			maps__set_maps_by_name(maps, maps_by_name);
772 		}
773 		RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
774 	}
775 	memmove(&maps_by_address[first_after_index+to_add],
776 		&maps_by_address[first_after_index],
777 		(nr_maps - first_after_index) * sizeof(new1));
778 	maps_by_address[first_after_index] = map__get(new1);
779 	if (maps_by_name)
780 		maps_by_name[nr_maps] = map__get(new1);
781 	if (new2) {
782 		maps_by_address[first_after_index + 1] = map__get(new2);
783 		if (maps_by_name)
784 			maps_by_name[nr_maps + 1] = map__get(new2);
785 	}
786 	RC_CHK_ACCESS(maps)->nr_maps = nr_maps + to_add;
787 	maps__set_maps_by_name_sorted(maps, false);
788 	check_invariants(maps);
789 	return 0;
790 }
791 
792 /*
793  * Adds new to maps, if new overlaps existing entries then the existing maps are
794  * adjusted or removed so that new fits without overlapping any entries.
795  */
796 static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
797 {
798 	int err = 0;
799 	FILE *fp = debug_file();
800 	unsigned int i;
801 
802 	if (!maps__maps_by_address_sorted(maps))
803 		__maps__sort_by_address(maps);
804 
805 	/*
806 	 * Iterate through entries where the end of the existing entry is
807 	 * greater-than the new map's start.
808 	 */
809 	for (i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) {
810 		struct map **maps_by_address = maps__maps_by_address(maps);
811 		struct map *pos = maps_by_address[i];
812 		struct map *before = NULL, *after = NULL;
813 
814 		/*
815 		 * Stop if current map starts after map->end.
816 		 * Maps are ordered by start: next will not overlap for sure.
817 		 */
818 		if (map__start(pos) >= map__end(new))
819 			break;
820 
821 		if (use_browser) {
822 			pr_debug("overlapping maps in %s (disable tui for more info)\n",
823 				dso__name(map__dso(new)));
824 		} else if (verbose >= 2) {
825 			pr_debug("overlapping maps:\n");
826 			map__fprintf(new, fp);
827 			map__fprintf(pos, fp);
828 		}
829 
830 		/*
831 		 * Now check if we need to create new maps for areas not
832 		 * overlapped by the new map:
833 		 */
834 		if (map__start(new) > map__start(pos)) {
835 			/* Map starts within existing map. Need to shorten the existing map. */
836 			before = map__clone(pos);
837 
838 			if (before == NULL) {
839 				err = -ENOMEM;
840 				goto out_err;
841 			}
842 			map__set_end(before, map__start(new));
843 
844 			if (verbose >= 2 && !use_browser)
845 				map__fprintf(before, fp);
846 		}
847 		if (map__end(new) < map__end(pos)) {
848 			/* The new map isn't as long as the existing map. */
849 			after = map__clone(pos);
850 
851 			if (after == NULL) {
852 				map__zput(before);
853 				err = -ENOMEM;
854 				goto out_err;
855 			}
856 
857 			map__set_start(after, map__end(new));
858 			map__add_pgoff(after, map__end(new) - map__start(pos));
859 			assert(map__map_ip(pos, map__end(new)) ==
860 			       map__map_ip(after, map__end(new)));
861 
862 			if (verbose >= 2 && !use_browser)
863 				map__fprintf(after, fp);
864 		}
865 		/*
866 		 * If adding one entry, for `before` or `after`, we can replace
867 		 * the existing entry. If both `before` and `after` are
868 		 * necessary than an insert is needed. If the existing entry
869 		 * entirely overlaps the existing entry it can just be removed.
870 		 */
871 		if (before) {
872 			map__put(maps_by_address[i]);
873 			maps_by_address[i] = before;
874 			/* Maps are still ordered, go to next one. */
875 			i++;
876 			if (after) {
877 				/*
878 				 * 'before' and 'after' mean 'new' split the
879 				 * 'pos' mapping and therefore there are no
880 				 * later mappings.
881 				 */
882 				err = __maps__insert_sorted(maps, i, new, after);
883 				map__put(after);
884 				check_invariants(maps);
885 				return err;
886 			}
887 			check_invariants(maps);
888 		} else if (after) {
889 			/*
890 			 * 'after' means 'new' split 'pos' and there are no
891 			 * later mappings.
892 			 */
893 			map__put(maps_by_address[i]);
894 			maps_by_address[i] = map__get(new);
895 			err = __maps__insert_sorted(maps, i + 1, after, NULL);
896 			map__put(after);
897 			check_invariants(maps);
898 			return err;
899 		} else {
900 			struct map *next = NULL;
901 
902 			if (i + 1 < maps__nr_maps(maps))
903 				next = maps_by_address[i + 1];
904 
905 			if (!next  || map__start(next) >= map__end(new)) {
906 				/*
907 				 * Replace existing mapping and end knowing
908 				 * there aren't later overlapping or any
909 				 * mappings.
910 				 */
911 				map__put(maps_by_address[i]);
912 				maps_by_address[i] = map__get(new);
913 				check_invariants(maps);
914 				return err;
915 			}
916 			__maps__remove(maps, pos);
917 			check_invariants(maps);
918 			/*
919 			 * Maps are ordered but no need to increase `i` as the
920 			 * later maps were moved down.
921 			 */
922 		}
923 	}
924 	/* Add the map. */
925 	err = __maps__insert_sorted(maps, i, new, NULL);
926 out_err:
927 	return err;
928 }
929 
930 int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
931 {
932 	int err;
933 
934 	down_write(maps__lock(maps));
935 	err =  __maps__fixup_overlap_and_insert(maps, new);
936 	up_write(maps__lock(maps));
937 	return err;
938 }
939 
940 int maps__copy_from(struct maps *dest, struct maps *parent)
941 {
942 	/* Note, if struct map were immutable then cloning could use ref counts. */
943 	struct map **parent_maps_by_address;
944 	int err = 0;
945 	unsigned int n;
946 
947 	down_write(maps__lock(dest));
948 	down_read(maps__lock(parent));
949 
950 	parent_maps_by_address = maps__maps_by_address(parent);
951 	n = maps__nr_maps(parent);
952 	if (maps__nr_maps(dest) == 0) {
953 		/* No existing mappings so just copy from parent to avoid reallocs in insert. */
954 		unsigned int nr_maps_allocated = RC_CHK_ACCESS(parent)->nr_maps_allocated;
955 		struct map **dest_maps_by_address =
956 			malloc(nr_maps_allocated * sizeof(struct map *));
957 		struct map **dest_maps_by_name = NULL;
958 
959 		if (!dest_maps_by_address)
960 			err = -ENOMEM;
961 		else {
962 			if (maps__maps_by_name(parent)) {
963 				dest_maps_by_name =
964 					malloc(nr_maps_allocated * sizeof(struct map *));
965 			}
966 
967 			RC_CHK_ACCESS(dest)->maps_by_address = dest_maps_by_address;
968 			RC_CHK_ACCESS(dest)->maps_by_name = dest_maps_by_name;
969 			RC_CHK_ACCESS(dest)->nr_maps_allocated = nr_maps_allocated;
970 		}
971 
972 		for (unsigned int i = 0; !err && i < n; i++) {
973 			struct map *pos = parent_maps_by_address[i];
974 			struct map *new = map__clone(pos);
975 
976 			if (!new)
977 				err = -ENOMEM;
978 			else {
979 				err = unwind__prepare_access(dest, new, NULL);
980 				if (!err) {
981 					dest_maps_by_address[i] = new;
982 					if (dest_maps_by_name)
983 						dest_maps_by_name[i] = map__get(new);
984 					RC_CHK_ACCESS(dest)->nr_maps = i + 1;
985 				}
986 			}
987 			if (err)
988 				map__put(new);
989 		}
990 		maps__set_maps_by_address_sorted(dest, maps__maps_by_address_sorted(parent));
991 		if (!err) {
992 			RC_CHK_ACCESS(dest)->last_search_by_name_idx =
993 				RC_CHK_ACCESS(parent)->last_search_by_name_idx;
994 			maps__set_maps_by_name_sorted(dest,
995 						dest_maps_by_name &&
996 						maps__maps_by_name_sorted(parent));
997 		} else {
998 			RC_CHK_ACCESS(dest)->last_search_by_name_idx = 0;
999 			maps__set_maps_by_name_sorted(dest, false);
1000 		}
1001 	} else {
1002 		/* Unexpected copying to a maps containing entries. */
1003 		for (unsigned int i = 0; !err && i < n; i++) {
1004 			struct map *pos = parent_maps_by_address[i];
1005 			struct map *new = map__clone(pos);
1006 
1007 			if (!new)
1008 				err = -ENOMEM;
1009 			else {
1010 				err = unwind__prepare_access(dest, new, NULL);
1011 				if (!err)
1012 					err = __maps__insert(dest, new);
1013 			}
1014 			map__put(new);
1015 		}
1016 	}
1017 	check_invariants(dest);
1018 
1019 	up_read(maps__lock(parent));
1020 	up_write(maps__lock(dest));
1021 	return err;
1022 }
1023 
1024 static int map__addr_cmp(const void *key, const void *entry)
1025 {
1026 	const u64 ip = *(const u64 *)key;
1027 	const struct map *map = *(const struct map * const *)entry;
1028 
1029 	if (ip < map__start(map))
1030 		return -1;
1031 	if (ip >= map__end(map))
1032 		return 1;
1033 	return 0;
1034 }
1035 
1036 struct map *maps__find(struct maps *maps, u64 ip)
1037 {
1038 	struct map *result = NULL;
1039 	bool done = false;
1040 
1041 	/* See locking/sorting note. */
1042 	while (!done) {
1043 		down_read(maps__lock(maps));
1044 		if (maps__maps_by_address_sorted(maps)) {
1045 			struct map **mapp =
1046 				bsearch(&ip, maps__maps_by_address(maps), maps__nr_maps(maps),
1047 					sizeof(*mapp), map__addr_cmp);
1048 
1049 			if (mapp)
1050 				result = map__get(*mapp);
1051 			done = true;
1052 		}
1053 		up_read(maps__lock(maps));
1054 		if (!done)
1055 			maps__sort_by_address(maps);
1056 	}
1057 	return result;
1058 }
1059 
1060 static int map__strcmp_name(const void *name, const void *b)
1061 {
1062 	const struct dso *dso = map__dso(*(const struct map **)b);
1063 
1064 	return strcmp(name, dso__short_name(dso));
1065 }
1066 
1067 struct map *maps__find_by_name(struct maps *maps, const char *name)
1068 {
1069 	struct map *result = NULL;
1070 	bool done = false;
1071 
1072 	/* See locking/sorting note. */
1073 	while (!done) {
1074 		unsigned int i;
1075 
1076 		down_read(maps__lock(maps));
1077 
1078 		/* First check last found entry. */
1079 		i = RC_CHK_ACCESS(maps)->last_search_by_name_idx;
1080 		if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) {
1081 			struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
1082 
1083 			if (dso && strcmp(dso__short_name(dso), name) == 0) {
1084 				result = map__get(maps__maps_by_name(maps)[i]);
1085 				done = true;
1086 			}
1087 		}
1088 
1089 		/* Second search sorted array. */
1090 		if (!done && maps__maps_by_name_sorted(maps)) {
1091 			struct map **mapp =
1092 				bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
1093 					sizeof(*mapp), map__strcmp_name);
1094 
1095 			if (mapp) {
1096 				result = map__get(*mapp);
1097 				i = mapp - maps__maps_by_name(maps);
1098 				RC_CHK_ACCESS(maps)->last_search_by_name_idx = i;
1099 			}
1100 			done = true;
1101 		}
1102 		up_read(maps__lock(maps));
1103 		if (!done) {
1104 			/* Sort and retry binary search. */
1105 			if (maps__sort_by_name(maps)) {
1106 				/*
1107 				 * Memory allocation failed do linear search
1108 				 * through address sorted maps.
1109 				 */
1110 				struct map **maps_by_address;
1111 				unsigned int n;
1112 
1113 				down_read(maps__lock(maps));
1114 				maps_by_address =  maps__maps_by_address(maps);
1115 				n = maps__nr_maps(maps);
1116 				for (i = 0; i < n; i++) {
1117 					struct map *pos = maps_by_address[i];
1118 					struct dso *dso = map__dso(pos);
1119 
1120 					if (dso && strcmp(dso__short_name(dso), name) == 0) {
1121 						result = map__get(pos);
1122 						break;
1123 					}
1124 				}
1125 				up_read(maps__lock(maps));
1126 				done = true;
1127 			}
1128 		}
1129 	}
1130 	return result;
1131 }
1132 
1133 struct map *maps__find_next_entry(struct maps *maps, struct map *map)
1134 {
1135 	unsigned int i;
1136 	struct map *result = NULL;
1137 
1138 	down_read(maps__lock(maps));
1139 	i = maps__by_address_index(maps, map);
1140 	if (i < maps__nr_maps(maps))
1141 		result = map__get(maps__maps_by_address(maps)[i]);
1142 
1143 	up_read(maps__lock(maps));
1144 	return result;
1145 }
1146 
1147 void maps__fixup_end(struct maps *maps)
1148 {
1149 	struct map **maps_by_address;
1150 	unsigned int n;
1151 
1152 	down_write(maps__lock(maps));
1153 	if (!maps__maps_by_address_sorted(maps))
1154 		__maps__sort_by_address(maps);
1155 
1156 	maps_by_address = maps__maps_by_address(maps);
1157 	n = maps__nr_maps(maps);
1158 	for (unsigned int i = 1; i < n; i++) {
1159 		struct map *prev = maps_by_address[i - 1];
1160 		struct map *curr = maps_by_address[i];
1161 
1162 		if (!map__end(prev) || map__end(prev) > map__start(curr))
1163 			map__set_end(prev, map__start(curr));
1164 	}
1165 
1166 	/*
1167 	 * We still haven't the actual symbols, so guess the
1168 	 * last map final address.
1169 	 */
1170 	if (n > 0 && !map__end(maps_by_address[n - 1]))
1171 		map__set_end(maps_by_address[n - 1], ~0ULL);
1172 
1173 	RC_CHK_ACCESS(maps)->ends_broken = false;
1174 	check_invariants(maps);
1175 
1176 	up_write(maps__lock(maps));
1177 }
1178 
1179 /*
1180  * Merges map into maps by splitting the new map within the existing map
1181  * regions.
1182  */
1183 int maps__merge_in(struct maps *kmaps, struct map *new_map)
1184 {
1185 	unsigned int first_after_, kmaps__nr_maps;
1186 	struct map **kmaps_maps_by_address;
1187 	struct map **merged_maps_by_address;
1188 	unsigned int merged_nr_maps_allocated;
1189 
1190 	/* First try under a read lock. */
1191 	while (true) {
1192 		down_read(maps__lock(kmaps));
1193 		if (maps__maps_by_address_sorted(kmaps))
1194 			break;
1195 
1196 		up_read(maps__lock(kmaps));
1197 
1198 		/* First after binary search requires sorted maps. Sort and try again. */
1199 		maps__sort_by_address(kmaps);
1200 	}
1201 	first_after_ = first_ending_after(kmaps, new_map);
1202 	kmaps_maps_by_address = maps__maps_by_address(kmaps);
1203 
1204 	if (first_after_ >= maps__nr_maps(kmaps) ||
1205 	    map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1206 		/* No overlap so regular insert suffices. */
1207 		up_read(maps__lock(kmaps));
1208 		return maps__insert(kmaps, new_map);
1209 	}
1210 	up_read(maps__lock(kmaps));
1211 
1212 	/* Plain insert with a read-lock failed, try again now with the write lock. */
1213 	down_write(maps__lock(kmaps));
1214 	if (!maps__maps_by_address_sorted(kmaps))
1215 		__maps__sort_by_address(kmaps);
1216 
1217 	first_after_ = first_ending_after(kmaps, new_map);
1218 	kmaps_maps_by_address = maps__maps_by_address(kmaps);
1219 	kmaps__nr_maps = maps__nr_maps(kmaps);
1220 
1221 	if (first_after_ >= kmaps__nr_maps ||
1222 	    map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) {
1223 		/* No overlap so regular insert suffices. */
1224 		int ret = __maps__insert(kmaps, new_map);
1225 
1226 		check_invariants(kmaps);
1227 		up_write(maps__lock(kmaps));
1228 		return ret;
1229 	}
1230 	/* Array to merge into, possibly 1 more for the sake of new_map. */
1231 	merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated;
1232 	if (kmaps__nr_maps + 1 == merged_nr_maps_allocated)
1233 		merged_nr_maps_allocated++;
1234 
1235 	merged_maps_by_address = malloc(merged_nr_maps_allocated * sizeof(*merged_maps_by_address));
1236 	if (!merged_maps_by_address) {
1237 		up_write(maps__lock(kmaps));
1238 		return -ENOMEM;
1239 	}
1240 	maps__set_maps_by_address(kmaps, merged_maps_by_address);
1241 	maps__set_maps_by_address_sorted(kmaps, true);
1242 	__maps__free_maps_by_name(kmaps);
1243 	maps__set_nr_maps_allocated(kmaps, merged_nr_maps_allocated);
1244 
1245 	/* Copy entries before the new_map that can't overlap. */
1246 	for (unsigned int i = 0; i < first_after_; i++)
1247 		merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]);
1248 
1249 	maps__set_nr_maps(kmaps, first_after_);
1250 
1251 	/* Add the new map, it will be split when the later overlapping mappings are added. */
1252 	__maps__insert(kmaps, new_map);
1253 
1254 	/* Insert mappings after new_map, splitting new_map in the process. */
1255 	for (unsigned int i = first_after_; i < kmaps__nr_maps; i++)
1256 		__maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]);
1257 
1258 	/* Copy the maps from merged into kmaps. */
1259 	for (unsigned int i = 0; i < kmaps__nr_maps; i++)
1260 		map__zput(kmaps_maps_by_address[i]);
1261 
1262 	free(kmaps_maps_by_address);
1263 	check_invariants(kmaps);
1264 	up_write(maps__lock(kmaps));
1265 	return 0;
1266 }
1267 
1268 void maps__load_first(struct maps *maps)
1269 {
1270 	down_read(maps__lock(maps));
1271 
1272 	if (maps__nr_maps(maps) > 0)
1273 		map__load(maps__maps_by_address(maps)[0]);
1274 
1275 	up_read(maps__lock(maps));
1276 }
1277