xref: /linux/tools/perf/util/maps.c (revision f7a858bffcddaaf70c71b6b656e7cc21b6107cec)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <linux/zalloc.h>
5 #include "debug.h"
6 #include "dso.h"
7 #include "map.h"
8 #include "maps.h"
9 #include "thread.h"
10 #include "ui/ui.h"
11 #include "unwind.h"
12 
13 static void maps__init(struct maps *maps, struct machine *machine)
14 {
15 	maps->entries = RB_ROOT;
16 	init_rwsem(maps__lock(maps));
17 	maps->machine = machine;
18 	maps->last_search_by_name = NULL;
19 	maps->nr_maps = 0;
20 	maps->maps_by_name = NULL;
21 	refcount_set(&maps->refcnt, 1);
22 }
23 
24 static void __maps__free_maps_by_name(struct maps *maps)
25 {
26 	/*
27 	 * Free everything to try to do it from the rbtree in the next search
28 	 */
29 	zfree(&maps->maps_by_name);
30 	maps->nr_maps_allocated = 0;
31 }
32 
33 static int __maps__insert(struct maps *maps, struct map *map)
34 {
35 	struct rb_node **p = &maps__entries(maps)->rb_node;
36 	struct rb_node *parent = NULL;
37 	const u64 ip = map__start(map);
38 	struct map_rb_node *m, *new_rb_node;
39 
40 	new_rb_node = malloc(sizeof(*new_rb_node));
41 	if (!new_rb_node)
42 		return -ENOMEM;
43 
44 	RB_CLEAR_NODE(&new_rb_node->rb_node);
45 	new_rb_node->map = map;
46 
47 	while (*p != NULL) {
48 		parent = *p;
49 		m = rb_entry(parent, struct map_rb_node, rb_node);
50 		if (ip < map__start(m->map))
51 			p = &(*p)->rb_left;
52 		else
53 			p = &(*p)->rb_right;
54 	}
55 
56 	rb_link_node(&new_rb_node->rb_node, parent, p);
57 	rb_insert_color(&new_rb_node->rb_node, maps__entries(maps));
58 	map__get(map);
59 	return 0;
60 }
61 
62 int maps__insert(struct maps *maps, struct map *map)
63 {
64 	int err;
65 	const struct dso *dso = map__dso(map);
66 
67 	down_write(maps__lock(maps));
68 	err = __maps__insert(maps, map);
69 	if (err)
70 		goto out;
71 
72 	++maps->nr_maps;
73 
74 	if (dso && dso->kernel) {
75 		struct kmap *kmap = map__kmap(map);
76 
77 		if (kmap)
78 			kmap->kmaps = maps;
79 		else
80 			pr_err("Internal error: kernel dso with non kernel map\n");
81 	}
82 
83 
84 	/*
85 	 * If we already performed some search by name, then we need to add the just
86 	 * inserted map and resort.
87 	 */
88 	if (maps__maps_by_name(maps)) {
89 		if (maps__nr_maps(maps) > maps->nr_maps_allocated) {
90 			int nr_allocate = maps__nr_maps(maps) * 2;
91 			struct map **maps_by_name = realloc(maps__maps_by_name(maps),
92 							    nr_allocate * sizeof(map));
93 
94 			if (maps_by_name == NULL) {
95 				__maps__free_maps_by_name(maps);
96 				err = -ENOMEM;
97 				goto out;
98 			}
99 
100 			maps->maps_by_name = maps_by_name;
101 			maps->nr_maps_allocated = nr_allocate;
102 		}
103 		maps__maps_by_name(maps)[maps__nr_maps(maps) - 1] = map;
104 		__maps__sort_by_name(maps);
105 	}
106  out:
107 	up_write(maps__lock(maps));
108 	return err;
109 }
110 
111 static void __maps__remove(struct maps *maps, struct map_rb_node *rb_node)
112 {
113 	rb_erase_init(&rb_node->rb_node, maps__entries(maps));
114 	map__put(rb_node->map);
115 	free(rb_node);
116 }
117 
118 void maps__remove(struct maps *maps, struct map *map)
119 {
120 	struct map_rb_node *rb_node;
121 
122 	down_write(maps__lock(maps));
123 	if (maps->last_search_by_name == map)
124 		maps->last_search_by_name = NULL;
125 
126 	rb_node = maps__find_node(maps, map);
127 	assert(rb_node->map == map);
128 	__maps__remove(maps, rb_node);
129 	--maps->nr_maps;
130 	if (maps__maps_by_name(maps))
131 		__maps__free_maps_by_name(maps);
132 	up_write(maps__lock(maps));
133 }
134 
135 static void __maps__purge(struct maps *maps)
136 {
137 	struct map_rb_node *pos, *next;
138 
139 	maps__for_each_entry_safe(maps, pos, next) {
140 		rb_erase_init(&pos->rb_node,  maps__entries(maps));
141 		map__put(pos->map);
142 		free(pos);
143 	}
144 }
145 
146 static void maps__exit(struct maps *maps)
147 {
148 	down_write(maps__lock(maps));
149 	__maps__purge(maps);
150 	up_write(maps__lock(maps));
151 }
152 
153 bool maps__empty(struct maps *maps)
154 {
155 	return !maps__first(maps);
156 }
157 
158 struct maps *maps__new(struct machine *machine)
159 {
160 	struct maps *maps = zalloc(sizeof(*maps));
161 
162 	if (maps != NULL)
163 		maps__init(maps, machine);
164 
165 	return maps;
166 }
167 
168 void maps__delete(struct maps *maps)
169 {
170 	maps__exit(maps);
171 	unwind__finish_access(maps);
172 	free(maps);
173 }
174 
175 struct maps *maps__get(struct maps *maps)
176 {
177 	if (maps)
178 		refcount_inc(&maps->refcnt);
179 
180 	return maps;
181 }
182 
183 void maps__put(struct maps *maps)
184 {
185 	if (maps && refcount_dec_and_test(&maps->refcnt))
186 		maps__delete(maps);
187 }
188 
189 struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp)
190 {
191 	struct map *map = maps__find(maps, addr);
192 
193 	/* Ensure map is loaded before using map->map_ip */
194 	if (map != NULL && map__load(map) >= 0) {
195 		if (mapp != NULL)
196 			*mapp = map;
197 		return map__find_symbol(map, map->map_ip(map, addr));
198 	}
199 
200 	return NULL;
201 }
202 
203 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp)
204 {
205 	struct symbol *sym;
206 	struct map_rb_node *pos;
207 
208 	down_read(maps__lock(maps));
209 
210 	maps__for_each_entry(maps, pos) {
211 		sym = map__find_symbol_by_name(pos->map, name);
212 
213 		if (sym == NULL)
214 			continue;
215 		if (!map__contains_symbol(pos->map, sym)) {
216 			sym = NULL;
217 			continue;
218 		}
219 		if (mapp != NULL)
220 			*mapp = pos->map;
221 		goto out;
222 	}
223 
224 	sym = NULL;
225 out:
226 	up_read(maps__lock(maps));
227 	return sym;
228 }
229 
230 int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams)
231 {
232 	if (ams->addr < map__start(ams->ms.map) || ams->addr >= map__end(ams->ms.map)) {
233 		if (maps == NULL)
234 			return -1;
235 		ams->ms.map = maps__find(maps, ams->addr);
236 		if (ams->ms.map == NULL)
237 			return -1;
238 	}
239 
240 	ams->al_addr = ams->ms.map->map_ip(ams->ms.map, ams->addr);
241 	ams->ms.sym = map__find_symbol(ams->ms.map, ams->al_addr);
242 
243 	return ams->ms.sym ? 0 : -1;
244 }
245 
246 size_t maps__fprintf(struct maps *maps, FILE *fp)
247 {
248 	size_t printed = 0;
249 	struct map_rb_node *pos;
250 
251 	down_read(maps__lock(maps));
252 
253 	maps__for_each_entry(maps, pos) {
254 		printed += fprintf(fp, "Map:");
255 		printed += map__fprintf(pos->map, fp);
256 		if (verbose > 2) {
257 			printed += dso__fprintf(map__dso(pos->map), fp);
258 			printed += fprintf(fp, "--\n");
259 		}
260 	}
261 
262 	up_read(maps__lock(maps));
263 
264 	return printed;
265 }
266 
267 int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
268 {
269 	struct rb_root *root;
270 	struct rb_node *next, *first;
271 	int err = 0;
272 
273 	down_write(maps__lock(maps));
274 
275 	root = maps__entries(maps);
276 
277 	/*
278 	 * Find first map where end > map->start.
279 	 * Same as find_vma() in kernel.
280 	 */
281 	next = root->rb_node;
282 	first = NULL;
283 	while (next) {
284 		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
285 
286 		if (map__end(pos->map) > map__start(map)) {
287 			first = next;
288 			if (map__start(pos->map) <= map__start(map))
289 				break;
290 			next = next->rb_left;
291 		} else
292 			next = next->rb_right;
293 	}
294 
295 	next = first;
296 	while (next) {
297 		struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node);
298 		next = rb_next(&pos->rb_node);
299 
300 		/*
301 		 * Stop if current map starts after map->end.
302 		 * Maps are ordered by start: next will not overlap for sure.
303 		 */
304 		if (map__start(pos->map) >= map__end(map))
305 			break;
306 
307 		if (verbose >= 2) {
308 
309 			if (use_browser) {
310 				pr_debug("overlapping maps in %s (disable tui for more info)\n",
311 					 map__dso(map)->name);
312 			} else {
313 				fputs("overlapping maps:\n", fp);
314 				map__fprintf(map, fp);
315 				map__fprintf(pos->map, fp);
316 			}
317 		}
318 
319 		rb_erase_init(&pos->rb_node, root);
320 		/*
321 		 * Now check if we need to create new maps for areas not
322 		 * overlapped by the new map:
323 		 */
324 		if (map__start(map) > map__start(pos->map)) {
325 			struct map *before = map__clone(pos->map);
326 
327 			if (before == NULL) {
328 				err = -ENOMEM;
329 				goto put_map;
330 			}
331 
332 			before->end = map__start(map);
333 			err = __maps__insert(maps, before);
334 			if (err)
335 				goto put_map;
336 
337 			if (verbose >= 2 && !use_browser)
338 				map__fprintf(before, fp);
339 			map__put(before);
340 		}
341 
342 		if (map__end(map) < map__end(pos->map)) {
343 			struct map *after = map__clone(pos->map);
344 
345 			if (after == NULL) {
346 				err = -ENOMEM;
347 				goto put_map;
348 			}
349 
350 			after->start = map__end(map);
351 			after->pgoff += map__end(map) - map__start(pos->map);
352 			assert(pos->map->map_ip(pos->map, map__end(map)) ==
353 				after->map_ip(after, map__end(map)));
354 			err = __maps__insert(maps, after);
355 			if (err)
356 				goto put_map;
357 
358 			if (verbose >= 2 && !use_browser)
359 				map__fprintf(after, fp);
360 			map__put(after);
361 		}
362 put_map:
363 		map__put(pos->map);
364 
365 		if (err)
366 			goto out;
367 	}
368 
369 	err = 0;
370 out:
371 	up_write(maps__lock(maps));
372 	return err;
373 }
374 
375 /*
376  * XXX This should not really _copy_ te maps, but refcount them.
377  */
378 int maps__clone(struct thread *thread, struct maps *parent)
379 {
380 	struct maps *maps = thread->maps;
381 	int err;
382 	struct map_rb_node *rb_node;
383 
384 	down_read(maps__lock(parent));
385 
386 	maps__for_each_entry(parent, rb_node) {
387 		struct map *new = map__clone(rb_node->map);
388 
389 		if (new == NULL) {
390 			err = -ENOMEM;
391 			goto out_unlock;
392 		}
393 
394 		err = unwind__prepare_access(maps, new, NULL);
395 		if (err)
396 			goto out_unlock;
397 
398 		err = maps__insert(maps, new);
399 		if (err)
400 			goto out_unlock;
401 
402 		map__put(new);
403 	}
404 
405 	err = 0;
406 out_unlock:
407 	up_read(maps__lock(parent));
408 	return err;
409 }
410 
411 struct map_rb_node *maps__find_node(struct maps *maps, struct map *map)
412 {
413 	struct map_rb_node *rb_node;
414 
415 	maps__for_each_entry(maps, rb_node) {
416 		if (rb_node->map == map)
417 			return rb_node;
418 	}
419 	return NULL;
420 }
421 
422 struct map *maps__find(struct maps *maps, u64 ip)
423 {
424 	struct rb_node *p;
425 	struct map_rb_node *m;
426 
427 
428 	down_read(maps__lock(maps));
429 
430 	p = maps__entries(maps)->rb_node;
431 	while (p != NULL) {
432 		m = rb_entry(p, struct map_rb_node, rb_node);
433 		if (ip < map__start(m->map))
434 			p = p->rb_left;
435 		else if (ip >= map__end(m->map))
436 			p = p->rb_right;
437 		else
438 			goto out;
439 	}
440 
441 	m = NULL;
442 out:
443 	up_read(maps__lock(maps));
444 	return m ? m->map : NULL;
445 }
446 
447 struct map_rb_node *maps__first(struct maps *maps)
448 {
449 	struct rb_node *first = rb_first(maps__entries(maps));
450 
451 	if (first)
452 		return rb_entry(first, struct map_rb_node, rb_node);
453 	return NULL;
454 }
455 
456 struct map_rb_node *map_rb_node__next(struct map_rb_node *node)
457 {
458 	struct rb_node *next;
459 
460 	if (!node)
461 		return NULL;
462 
463 	next = rb_next(&node->rb_node);
464 
465 	if (!next)
466 		return NULL;
467 
468 	return rb_entry(next, struct map_rb_node, rb_node);
469 }
470