xref: /linux/tools/perf/util/dso.c (revision 9f3926e08c26607a0dd5b1bc8a8aa1d03f72fcdc)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/bug.h>
3 #include <linux/kernel.h>
4 #include <sys/time.h>
5 #include <sys/resource.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <unistd.h>
9 #include <errno.h>
10 #include <fcntl.h>
11 #include <libgen.h>
12 #include <bpf/libbpf.h>
13 #include "bpf-event.h"
14 #include "compress.h"
15 #include "namespaces.h"
16 #include "path.h"
17 #include "map.h"
18 #include "symbol.h"
19 #include "srcline.h"
20 #include "dso.h"
21 #include "machine.h"
22 #include "auxtrace.h"
23 #include "util.h"
24 #include "debug.h"
25 #include "string2.h"
26 #include "vdso.h"
27 
28 static const char * const debuglink_paths[] = {
29 	"%.0s%s",
30 	"%s/%s",
31 	"%s/.debug/%s",
32 	"/usr/lib/debug%s/%s"
33 };
34 
35 char dso__symtab_origin(const struct dso *dso)
36 {
37 	static const char origin[] = {
38 		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
39 		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
40 		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
41 		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
42 		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
43 		[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO]	= 'D',
44 		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
45 		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
46 		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
47 		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
48 		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
49 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
50 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
51 		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
52 		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
53 		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
54 		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
55 	};
56 
57 	if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
58 		return '!';
59 	return origin[dso->symtab_type];
60 }
61 
62 int dso__read_binary_type_filename(const struct dso *dso,
63 				   enum dso_binary_type type,
64 				   char *root_dir, char *filename, size_t size)
65 {
66 	char build_id_hex[SBUILD_ID_SIZE];
67 	int ret = 0;
68 	size_t len;
69 
70 	switch (type) {
71 	case DSO_BINARY_TYPE__DEBUGLINK:
72 	{
73 		const char *last_slash;
74 		char dso_dir[PATH_MAX];
75 		char symfile[PATH_MAX];
76 		unsigned int i;
77 
78 		len = __symbol__join_symfs(filename, size, dso->long_name);
79 		last_slash = filename + len;
80 		while (last_slash != filename && *last_slash != '/')
81 			last_slash--;
82 
83 		strncpy(dso_dir, filename, last_slash - filename);
84 		dso_dir[last_slash-filename] = '\0';
85 
86 		if (!is_regular_file(filename)) {
87 			ret = -1;
88 			break;
89 		}
90 
91 		ret = filename__read_debuglink(filename, symfile, PATH_MAX);
92 		if (ret)
93 			break;
94 
95 		/* Check predefined locations where debug file might reside */
96 		ret = -1;
97 		for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
98 			snprintf(filename, size,
99 					debuglink_paths[i], dso_dir, symfile);
100 			if (is_regular_file(filename)) {
101 				ret = 0;
102 				break;
103 			}
104 		}
105 
106 		break;
107 	}
108 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
109 		if (dso__build_id_filename(dso, filename, size, false) == NULL)
110 			ret = -1;
111 		break;
112 
113 	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
114 		if (dso__build_id_filename(dso, filename, size, true) == NULL)
115 			ret = -1;
116 		break;
117 
118 	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
119 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
120 		snprintf(filename + len, size - len, "%s.debug", dso->long_name);
121 		break;
122 
123 	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
124 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
125 		snprintf(filename + len, size - len, "%s", dso->long_name);
126 		break;
127 
128 	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
129 	{
130 		const char *last_slash;
131 		size_t dir_size;
132 
133 		last_slash = dso->long_name + dso->long_name_len;
134 		while (last_slash != dso->long_name && *last_slash != '/')
135 			last_slash--;
136 
137 		len = __symbol__join_symfs(filename, size, "");
138 		dir_size = last_slash - dso->long_name + 2;
139 		if (dir_size > (size - len)) {
140 			ret = -1;
141 			break;
142 		}
143 		len += scnprintf(filename + len, dir_size, "%s",  dso->long_name);
144 		len += scnprintf(filename + len , size - len, ".debug%s",
145 								last_slash);
146 		break;
147 	}
148 
149 	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
150 		if (!dso->has_build_id) {
151 			ret = -1;
152 			break;
153 		}
154 
155 		build_id__sprintf(dso->build_id,
156 				  sizeof(dso->build_id),
157 				  build_id_hex);
158 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
159 		snprintf(filename + len, size - len, "%.2s/%s.debug",
160 			 build_id_hex, build_id_hex + 2);
161 		break;
162 
163 	case DSO_BINARY_TYPE__VMLINUX:
164 	case DSO_BINARY_TYPE__GUEST_VMLINUX:
165 	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
166 		__symbol__join_symfs(filename, size, dso->long_name);
167 		break;
168 
169 	case DSO_BINARY_TYPE__GUEST_KMODULE:
170 	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
171 		path__join3(filename, size, symbol_conf.symfs,
172 			    root_dir, dso->long_name);
173 		break;
174 
175 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
176 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
177 		__symbol__join_symfs(filename, size, dso->long_name);
178 		break;
179 
180 	case DSO_BINARY_TYPE__KCORE:
181 	case DSO_BINARY_TYPE__GUEST_KCORE:
182 		snprintf(filename, size, "%s", dso->long_name);
183 		break;
184 
185 	default:
186 	case DSO_BINARY_TYPE__KALLSYMS:
187 	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
188 	case DSO_BINARY_TYPE__JAVA_JIT:
189 	case DSO_BINARY_TYPE__BPF_PROG_INFO:
190 	case DSO_BINARY_TYPE__NOT_FOUND:
191 		ret = -1;
192 		break;
193 	}
194 
195 	return ret;
196 }
197 
198 enum {
199 	COMP_ID__NONE = 0,
200 };
201 
202 static const struct {
203 	const char *fmt;
204 	int (*decompress)(const char *input, int output);
205 	bool (*is_compressed)(const char *input);
206 } compressions[] = {
207 	[COMP_ID__NONE] = { .fmt = NULL, },
208 #ifdef HAVE_ZLIB_SUPPORT
209 	{ "gz", gzip_decompress_to_file, gzip_is_compressed },
210 #endif
211 #ifdef HAVE_LZMA_SUPPORT
212 	{ "xz", lzma_decompress_to_file, lzma_is_compressed },
213 #endif
214 	{ NULL, NULL, NULL },
215 };
216 
217 static int is_supported_compression(const char *ext)
218 {
219 	unsigned i;
220 
221 	for (i = 1; compressions[i].fmt; i++) {
222 		if (!strcmp(ext, compressions[i].fmt))
223 			return i;
224 	}
225 	return COMP_ID__NONE;
226 }
227 
228 bool is_kernel_module(const char *pathname, int cpumode)
229 {
230 	struct kmod_path m;
231 	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
232 
233 	WARN_ONCE(mode != cpumode,
234 		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
235 		  cpumode);
236 
237 	switch (mode) {
238 	case PERF_RECORD_MISC_USER:
239 	case PERF_RECORD_MISC_HYPERVISOR:
240 	case PERF_RECORD_MISC_GUEST_USER:
241 		return false;
242 	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
243 	default:
244 		if (kmod_path__parse(&m, pathname)) {
245 			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
246 					pathname);
247 			return true;
248 		}
249 	}
250 
251 	return m.kmod;
252 }
253 
254 bool dso__needs_decompress(struct dso *dso)
255 {
256 	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
257 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
258 }
259 
260 static int decompress_kmodule(struct dso *dso, const char *name,
261 			      char *pathname, size_t len)
262 {
263 	char tmpbuf[] = KMOD_DECOMP_NAME;
264 	int fd = -1;
265 
266 	if (!dso__needs_decompress(dso))
267 		return -1;
268 
269 	if (dso->comp == COMP_ID__NONE)
270 		return -1;
271 
272 	/*
273 	 * We have proper compression id for DSO and yet the file
274 	 * behind the 'name' can still be plain uncompressed object.
275 	 *
276 	 * The reason is behind the logic we open the DSO object files,
277 	 * when we try all possible 'debug' objects until we find the
278 	 * data. So even if the DSO is represented by 'krava.xz' module,
279 	 * we can end up here opening ~/.debug/....23432432/debug' file
280 	 * which is not compressed.
281 	 *
282 	 * To keep this transparent, we detect this and return the file
283 	 * descriptor to the uncompressed file.
284 	 */
285 	if (!compressions[dso->comp].is_compressed(name))
286 		return open(name, O_RDONLY);
287 
288 	fd = mkstemp(tmpbuf);
289 	if (fd < 0) {
290 		dso->load_errno = errno;
291 		return -1;
292 	}
293 
294 	if (compressions[dso->comp].decompress(name, fd)) {
295 		dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
296 		close(fd);
297 		fd = -1;
298 	}
299 
300 	if (!pathname || (fd < 0))
301 		unlink(tmpbuf);
302 
303 	if (pathname && (fd >= 0))
304 		strlcpy(pathname, tmpbuf, len);
305 
306 	return fd;
307 }
308 
309 int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
310 {
311 	return decompress_kmodule(dso, name, NULL, 0);
312 }
313 
314 int dso__decompress_kmodule_path(struct dso *dso, const char *name,
315 				 char *pathname, size_t len)
316 {
317 	int fd = decompress_kmodule(dso, name, pathname, len);
318 
319 	close(fd);
320 	return fd >= 0 ? 0 : -1;
321 }
322 
323 /*
324  * Parses kernel module specified in @path and updates
325  * @m argument like:
326  *
327  *    @comp - true if @path contains supported compression suffix,
328  *            false otherwise
329  *    @kmod - true if @path contains '.ko' suffix in right position,
330  *            false otherwise
331  *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
332  *            of the kernel module without suffixes, otherwise strudup-ed
333  *            base name of @path
334  *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
335  *            the compression suffix
336  *
337  * Returns 0 if there's no strdup error, -ENOMEM otherwise.
338  */
339 int __kmod_path__parse(struct kmod_path *m, const char *path,
340 		       bool alloc_name)
341 {
342 	const char *name = strrchr(path, '/');
343 	const char *ext  = strrchr(path, '.');
344 	bool is_simple_name = false;
345 
346 	memset(m, 0x0, sizeof(*m));
347 	name = name ? name + 1 : path;
348 
349 	/*
350 	 * '.' is also a valid character for module name. For example:
351 	 * [aaa.bbb] is a valid module name. '[' should have higher
352 	 * priority than '.ko' suffix.
353 	 *
354 	 * The kernel names are from machine__mmap_name. Such
355 	 * name should belong to kernel itself, not kernel module.
356 	 */
357 	if (name[0] == '[') {
358 		is_simple_name = true;
359 		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
360 		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
361 		    (strncmp(name, "[vdso]", 6) == 0) ||
362 		    (strncmp(name, "[vdso32]", 8) == 0) ||
363 		    (strncmp(name, "[vdsox32]", 9) == 0) ||
364 		    (strncmp(name, "[vsyscall]", 10) == 0)) {
365 			m->kmod = false;
366 
367 		} else
368 			m->kmod = true;
369 	}
370 
371 	/* No extension, just return name. */
372 	if ((ext == NULL) || is_simple_name) {
373 		if (alloc_name) {
374 			m->name = strdup(name);
375 			return m->name ? 0 : -ENOMEM;
376 		}
377 		return 0;
378 	}
379 
380 	m->comp = is_supported_compression(ext + 1);
381 	if (m->comp > COMP_ID__NONE)
382 		ext -= 3;
383 
384 	/* Check .ko extension only if there's enough name left. */
385 	if (ext > name)
386 		m->kmod = !strncmp(ext, ".ko", 3);
387 
388 	if (alloc_name) {
389 		if (m->kmod) {
390 			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
391 				return -ENOMEM;
392 		} else {
393 			if (asprintf(&m->name, "%s", name) == -1)
394 				return -ENOMEM;
395 		}
396 
397 		strxfrchar(m->name, '-', '_');
398 	}
399 
400 	return 0;
401 }
402 
403 void dso__set_module_info(struct dso *dso, struct kmod_path *m,
404 			  struct machine *machine)
405 {
406 	if (machine__is_host(machine))
407 		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
408 	else
409 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
410 
411 	/* _KMODULE_COMP should be next to _KMODULE */
412 	if (m->kmod && m->comp) {
413 		dso->symtab_type++;
414 		dso->comp = m->comp;
415 	}
416 
417 	dso__set_short_name(dso, strdup(m->name), true);
418 }
419 
420 /*
421  * Global list of open DSOs and the counter.
422  */
423 static LIST_HEAD(dso__data_open);
424 static long dso__data_open_cnt;
425 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
426 
427 static void dso__list_add(struct dso *dso)
428 {
429 	list_add_tail(&dso->data.open_entry, &dso__data_open);
430 	dso__data_open_cnt++;
431 }
432 
433 static void dso__list_del(struct dso *dso)
434 {
435 	list_del(&dso->data.open_entry);
436 	WARN_ONCE(dso__data_open_cnt <= 0,
437 		  "DSO data fd counter out of bounds.");
438 	dso__data_open_cnt--;
439 }
440 
441 static void close_first_dso(void);
442 
443 static int do_open(char *name)
444 {
445 	int fd;
446 	char sbuf[STRERR_BUFSIZE];
447 
448 	do {
449 		fd = open(name, O_RDONLY|O_CLOEXEC);
450 		if (fd >= 0)
451 			return fd;
452 
453 		pr_debug("dso open failed: %s\n",
454 			 str_error_r(errno, sbuf, sizeof(sbuf)));
455 		if (!dso__data_open_cnt || errno != EMFILE)
456 			break;
457 
458 		close_first_dso();
459 	} while (1);
460 
461 	return -1;
462 }
463 
464 static int __open_dso(struct dso *dso, struct machine *machine)
465 {
466 	int fd = -EINVAL;
467 	char *root_dir = (char *)"";
468 	char *name = malloc(PATH_MAX);
469 	bool decomp = false;
470 
471 	if (!name)
472 		return -ENOMEM;
473 
474 	if (machine)
475 		root_dir = machine->root_dir;
476 
477 	if (dso__read_binary_type_filename(dso, dso->binary_type,
478 					    root_dir, name, PATH_MAX))
479 		goto out;
480 
481 	if (!is_regular_file(name))
482 		goto out;
483 
484 	if (dso__needs_decompress(dso)) {
485 		char newpath[KMOD_DECOMP_LEN];
486 		size_t len = sizeof(newpath);
487 
488 		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
489 			fd = -dso->load_errno;
490 			goto out;
491 		}
492 
493 		decomp = true;
494 		strcpy(name, newpath);
495 	}
496 
497 	fd = do_open(name);
498 
499 	if (decomp)
500 		unlink(name);
501 
502 out:
503 	free(name);
504 	return fd;
505 }
506 
507 static void check_data_close(void);
508 
509 /**
510  * dso_close - Open DSO data file
511  * @dso: dso object
512  *
513  * Open @dso's data file descriptor and updates
514  * list/count of open DSO objects.
515  */
516 static int open_dso(struct dso *dso, struct machine *machine)
517 {
518 	int fd;
519 	struct nscookie nsc;
520 
521 	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
522 		nsinfo__mountns_enter(dso->nsinfo, &nsc);
523 	fd = __open_dso(dso, machine);
524 	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
525 		nsinfo__mountns_exit(&nsc);
526 
527 	if (fd >= 0) {
528 		dso__list_add(dso);
529 		/*
530 		 * Check if we crossed the allowed number
531 		 * of opened DSOs and close one if needed.
532 		 */
533 		check_data_close();
534 	}
535 
536 	return fd;
537 }
538 
539 static void close_data_fd(struct dso *dso)
540 {
541 	if (dso->data.fd >= 0) {
542 		close(dso->data.fd);
543 		dso->data.fd = -1;
544 		dso->data.file_size = 0;
545 		dso__list_del(dso);
546 	}
547 }
548 
549 /**
550  * dso_close - Close DSO data file
551  * @dso: dso object
552  *
553  * Close @dso's data file descriptor and updates
554  * list/count of open DSO objects.
555  */
556 static void close_dso(struct dso *dso)
557 {
558 	close_data_fd(dso);
559 }
560 
561 static void close_first_dso(void)
562 {
563 	struct dso *dso;
564 
565 	dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
566 	close_dso(dso);
567 }
568 
569 static rlim_t get_fd_limit(void)
570 {
571 	struct rlimit l;
572 	rlim_t limit = 0;
573 
574 	/* Allow half of the current open fd limit. */
575 	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
576 		if (l.rlim_cur == RLIM_INFINITY)
577 			limit = l.rlim_cur;
578 		else
579 			limit = l.rlim_cur / 2;
580 	} else {
581 		pr_err("failed to get fd limit\n");
582 		limit = 1;
583 	}
584 
585 	return limit;
586 }
587 
588 static rlim_t fd_limit;
589 
590 /*
591  * Used only by tests/dso-data.c to reset the environment
592  * for tests. I dont expect we should change this during
593  * standard runtime.
594  */
595 void reset_fd_limit(void)
596 {
597 	fd_limit = 0;
598 }
599 
600 static bool may_cache_fd(void)
601 {
602 	if (!fd_limit)
603 		fd_limit = get_fd_limit();
604 
605 	if (fd_limit == RLIM_INFINITY)
606 		return true;
607 
608 	return fd_limit > (rlim_t) dso__data_open_cnt;
609 }
610 
611 /*
612  * Check and close LRU dso if we crossed allowed limit
613  * for opened dso file descriptors. The limit is half
614  * of the RLIMIT_NOFILE files opened.
615 */
616 static void check_data_close(void)
617 {
618 	bool cache_fd = may_cache_fd();
619 
620 	if (!cache_fd)
621 		close_first_dso();
622 }
623 
624 /**
625  * dso__data_close - Close DSO data file
626  * @dso: dso object
627  *
628  * External interface to close @dso's data file descriptor.
629  */
630 void dso__data_close(struct dso *dso)
631 {
632 	pthread_mutex_lock(&dso__data_open_lock);
633 	close_dso(dso);
634 	pthread_mutex_unlock(&dso__data_open_lock);
635 }
636 
637 static void try_to_open_dso(struct dso *dso, struct machine *machine)
638 {
639 	enum dso_binary_type binary_type_data[] = {
640 		DSO_BINARY_TYPE__BUILD_ID_CACHE,
641 		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
642 		DSO_BINARY_TYPE__NOT_FOUND,
643 	};
644 	int i = 0;
645 
646 	if (dso->data.fd >= 0)
647 		return;
648 
649 	if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
650 		dso->data.fd = open_dso(dso, machine);
651 		goto out;
652 	}
653 
654 	do {
655 		dso->binary_type = binary_type_data[i++];
656 
657 		dso->data.fd = open_dso(dso, machine);
658 		if (dso->data.fd >= 0)
659 			goto out;
660 
661 	} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
662 out:
663 	if (dso->data.fd >= 0)
664 		dso->data.status = DSO_DATA_STATUS_OK;
665 	else
666 		dso->data.status = DSO_DATA_STATUS_ERROR;
667 }
668 
669 /**
670  * dso__data_get_fd - Get dso's data file descriptor
671  * @dso: dso object
672  * @machine: machine object
673  *
674  * External interface to find dso's file, open it and
675  * returns file descriptor.  It should be paired with
676  * dso__data_put_fd() if it returns non-negative value.
677  */
678 int dso__data_get_fd(struct dso *dso, struct machine *machine)
679 {
680 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
681 		return -1;
682 
683 	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
684 		return -1;
685 
686 	try_to_open_dso(dso, machine);
687 
688 	if (dso->data.fd < 0)
689 		pthread_mutex_unlock(&dso__data_open_lock);
690 
691 	return dso->data.fd;
692 }
693 
694 void dso__data_put_fd(struct dso *dso __maybe_unused)
695 {
696 	pthread_mutex_unlock(&dso__data_open_lock);
697 }
698 
699 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
700 {
701 	u32 flag = 1 << by;
702 
703 	if (dso->data.status_seen & flag)
704 		return true;
705 
706 	dso->data.status_seen |= flag;
707 
708 	return false;
709 }
710 
711 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
712 {
713 	struct bpf_prog_info_node *node;
714 	ssize_t size = DSO__DATA_CACHE_SIZE;
715 	u64 len;
716 	u8 *buf;
717 
718 	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
719 	if (!node || !node->info_linear) {
720 		dso->data.status = DSO_DATA_STATUS_ERROR;
721 		return -1;
722 	}
723 
724 	len = node->info_linear->info.jited_prog_len;
725 	buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
726 
727 	if (offset >= len)
728 		return -1;
729 
730 	size = (ssize_t)min(len - offset, (u64)size);
731 	memcpy(data, buf + offset, size);
732 	return size;
733 }
734 
735 static int bpf_size(struct dso *dso)
736 {
737 	struct bpf_prog_info_node *node;
738 
739 	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
740 	if (!node || !node->info_linear) {
741 		dso->data.status = DSO_DATA_STATUS_ERROR;
742 		return -1;
743 	}
744 
745 	dso->data.file_size = node->info_linear->info.jited_prog_len;
746 	return 0;
747 }
748 
749 static void
750 dso_cache__free(struct dso *dso)
751 {
752 	struct rb_root *root = &dso->data.cache;
753 	struct rb_node *next = rb_first(root);
754 
755 	pthread_mutex_lock(&dso->lock);
756 	while (next) {
757 		struct dso_cache *cache;
758 
759 		cache = rb_entry(next, struct dso_cache, rb_node);
760 		next = rb_next(&cache->rb_node);
761 		rb_erase(&cache->rb_node, root);
762 		free(cache);
763 	}
764 	pthread_mutex_unlock(&dso->lock);
765 }
766 
767 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
768 {
769 	const struct rb_root *root = &dso->data.cache;
770 	struct rb_node * const *p = &root->rb_node;
771 	const struct rb_node *parent = NULL;
772 	struct dso_cache *cache;
773 
774 	while (*p != NULL) {
775 		u64 end;
776 
777 		parent = *p;
778 		cache = rb_entry(parent, struct dso_cache, rb_node);
779 		end = cache->offset + DSO__DATA_CACHE_SIZE;
780 
781 		if (offset < cache->offset)
782 			p = &(*p)->rb_left;
783 		else if (offset >= end)
784 			p = &(*p)->rb_right;
785 		else
786 			return cache;
787 	}
788 
789 	return NULL;
790 }
791 
792 static struct dso_cache *
793 dso_cache__insert(struct dso *dso, struct dso_cache *new)
794 {
795 	struct rb_root *root = &dso->data.cache;
796 	struct rb_node **p = &root->rb_node;
797 	struct rb_node *parent = NULL;
798 	struct dso_cache *cache;
799 	u64 offset = new->offset;
800 
801 	pthread_mutex_lock(&dso->lock);
802 	while (*p != NULL) {
803 		u64 end;
804 
805 		parent = *p;
806 		cache = rb_entry(parent, struct dso_cache, rb_node);
807 		end = cache->offset + DSO__DATA_CACHE_SIZE;
808 
809 		if (offset < cache->offset)
810 			p = &(*p)->rb_left;
811 		else if (offset >= end)
812 			p = &(*p)->rb_right;
813 		else
814 			goto out;
815 	}
816 
817 	rb_link_node(&new->rb_node, parent, p);
818 	rb_insert_color(&new->rb_node, root);
819 
820 	cache = NULL;
821 out:
822 	pthread_mutex_unlock(&dso->lock);
823 	return cache;
824 }
825 
826 static ssize_t
827 dso_cache__memcpy(struct dso_cache *cache, u64 offset,
828 		  u8 *data, u64 size)
829 {
830 	u64 cache_offset = offset - cache->offset;
831 	u64 cache_size   = min(cache->size - cache_offset, size);
832 
833 	memcpy(data, cache->data + cache_offset, cache_size);
834 	return cache_size;
835 }
836 
837 static ssize_t file_read(struct dso *dso, struct machine *machine,
838 			 u64 offset, char *data)
839 {
840 	ssize_t ret;
841 
842 	pthread_mutex_lock(&dso__data_open_lock);
843 
844 	/*
845 	 * dso->data.fd might be closed if other thread opened another
846 	 * file (dso) due to open file limit (RLIMIT_NOFILE).
847 	 */
848 	try_to_open_dso(dso, machine);
849 
850 	if (dso->data.fd < 0) {
851 		dso->data.status = DSO_DATA_STATUS_ERROR;
852 		ret = -errno;
853 		goto out;
854 	}
855 
856 	ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
857 out:
858 	pthread_mutex_unlock(&dso__data_open_lock);
859 	return ret;
860 }
861 
862 static ssize_t
863 dso_cache__read(struct dso *dso, struct machine *machine,
864 		u64 offset, u8 *data, ssize_t size)
865 {
866 	u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
867 	struct dso_cache *cache;
868 	struct dso_cache *old;
869 	ssize_t ret;
870 
871 	cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
872 	if (!cache)
873 		return -ENOMEM;
874 
875 	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
876 		ret = bpf_read(dso, cache_offset, cache->data);
877 	else
878 		ret = file_read(dso, machine, cache_offset, cache->data);
879 
880 	if (ret > 0) {
881 		cache->offset = cache_offset;
882 		cache->size   = ret;
883 
884 		old = dso_cache__insert(dso, cache);
885 		if (old) {
886 			/* we lose the race */
887 			free(cache);
888 			cache = old;
889 		}
890 
891 		ret = dso_cache__memcpy(cache, offset, data, size);
892 	}
893 
894 	if (ret <= 0)
895 		free(cache);
896 
897 	return ret;
898 }
899 
900 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
901 			      u64 offset, u8 *data, ssize_t size)
902 {
903 	struct dso_cache *cache;
904 
905 	cache = dso_cache__find(dso, offset);
906 	if (cache)
907 		return dso_cache__memcpy(cache, offset, data, size);
908 	else
909 		return dso_cache__read(dso, machine, offset, data, size);
910 }
911 
912 /*
913  * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
914  * in the rb_tree. Any read to already cached data is served
915  * by cached data.
916  */
917 static ssize_t cached_read(struct dso *dso, struct machine *machine,
918 			   u64 offset, u8 *data, ssize_t size)
919 {
920 	ssize_t r = 0;
921 	u8 *p = data;
922 
923 	do {
924 		ssize_t ret;
925 
926 		ret = dso_cache_read(dso, machine, offset, p, size);
927 		if (ret < 0)
928 			return ret;
929 
930 		/* Reached EOF, return what we have. */
931 		if (!ret)
932 			break;
933 
934 		BUG_ON(ret > size);
935 
936 		r      += ret;
937 		p      += ret;
938 		offset += ret;
939 		size   -= ret;
940 
941 	} while (size);
942 
943 	return r;
944 }
945 
946 static int file_size(struct dso *dso, struct machine *machine)
947 {
948 	int ret = 0;
949 	struct stat st;
950 	char sbuf[STRERR_BUFSIZE];
951 
952 	pthread_mutex_lock(&dso__data_open_lock);
953 
954 	/*
955 	 * dso->data.fd might be closed if other thread opened another
956 	 * file (dso) due to open file limit (RLIMIT_NOFILE).
957 	 */
958 	try_to_open_dso(dso, machine);
959 
960 	if (dso->data.fd < 0) {
961 		ret = -errno;
962 		dso->data.status = DSO_DATA_STATUS_ERROR;
963 		goto out;
964 	}
965 
966 	if (fstat(dso->data.fd, &st) < 0) {
967 		ret = -errno;
968 		pr_err("dso cache fstat failed: %s\n",
969 		       str_error_r(errno, sbuf, sizeof(sbuf)));
970 		dso->data.status = DSO_DATA_STATUS_ERROR;
971 		goto out;
972 	}
973 	dso->data.file_size = st.st_size;
974 
975 out:
976 	pthread_mutex_unlock(&dso__data_open_lock);
977 	return ret;
978 }
979 
980 int dso__data_file_size(struct dso *dso, struct machine *machine)
981 {
982 	if (dso->data.file_size)
983 		return 0;
984 
985 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
986 		return -1;
987 
988 	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
989 		return bpf_size(dso);
990 
991 	return file_size(dso, machine);
992 }
993 
994 /**
995  * dso__data_size - Return dso data size
996  * @dso: dso object
997  * @machine: machine object
998  *
999  * Return: dso data size
1000  */
1001 off_t dso__data_size(struct dso *dso, struct machine *machine)
1002 {
1003 	if (dso__data_file_size(dso, machine))
1004 		return -1;
1005 
1006 	/* For now just estimate dso data size is close to file size */
1007 	return dso->data.file_size;
1008 }
1009 
1010 static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
1011 				u64 offset, u8 *data, ssize_t size)
1012 {
1013 	if (dso__data_file_size(dso, machine))
1014 		return -1;
1015 
1016 	/* Check the offset sanity. */
1017 	if (offset > dso->data.file_size)
1018 		return -1;
1019 
1020 	if (offset + size < offset)
1021 		return -1;
1022 
1023 	return cached_read(dso, machine, offset, data, size);
1024 }
1025 
1026 /**
1027  * dso__data_read_offset - Read data from dso file offset
1028  * @dso: dso object
1029  * @machine: machine object
1030  * @offset: file offset
1031  * @data: buffer to store data
1032  * @size: size of the @data buffer
1033  *
1034  * External interface to read data from dso file offset. Open
1035  * dso data file and use cached_read to get the data.
1036  */
1037 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1038 			      u64 offset, u8 *data, ssize_t size)
1039 {
1040 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1041 		return -1;
1042 
1043 	return data_read_offset(dso, machine, offset, data, size);
1044 }
1045 
1046 /**
1047  * dso__data_read_addr - Read data from dso address
1048  * @dso: dso object
1049  * @machine: machine object
1050  * @add: virtual memory address
1051  * @data: buffer to store data
1052  * @size: size of the @data buffer
1053  *
1054  * External interface to read data from dso address.
1055  */
1056 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1057 			    struct machine *machine, u64 addr,
1058 			    u8 *data, ssize_t size)
1059 {
1060 	u64 offset = map->map_ip(map, addr);
1061 	return dso__data_read_offset(dso, machine, offset, data, size);
1062 }
1063 
1064 struct map *dso__new_map(const char *name)
1065 {
1066 	struct map *map = NULL;
1067 	struct dso *dso = dso__new(name);
1068 
1069 	if (dso)
1070 		map = map__new2(0, dso);
1071 
1072 	return map;
1073 }
1074 
1075 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1076 				    const char *short_name, int dso_type)
1077 {
1078 	/*
1079 	 * The kernel dso could be created by build_id processing.
1080 	 */
1081 	struct dso *dso = machine__findnew_dso(machine, name);
1082 
1083 	/*
1084 	 * We need to run this in all cases, since during the build_id
1085 	 * processing we had no idea this was the kernel dso.
1086 	 */
1087 	if (dso != NULL) {
1088 		dso__set_short_name(dso, short_name, false);
1089 		dso->kernel = dso_type;
1090 	}
1091 
1092 	return dso;
1093 }
1094 
1095 /*
1096  * Find a matching entry and/or link current entry to RB tree.
1097  * Either one of the dso or name parameter must be non-NULL or the
1098  * function will not work.
1099  */
1100 static struct dso *__dso__findlink_by_longname(struct rb_root *root,
1101 					       struct dso *dso, const char *name)
1102 {
1103 	struct rb_node **p = &root->rb_node;
1104 	struct rb_node  *parent = NULL;
1105 
1106 	if (!name)
1107 		name = dso->long_name;
1108 	/*
1109 	 * Find node with the matching name
1110 	 */
1111 	while (*p) {
1112 		struct dso *this = rb_entry(*p, struct dso, rb_node);
1113 		int rc = strcmp(name, this->long_name);
1114 
1115 		parent = *p;
1116 		if (rc == 0) {
1117 			/*
1118 			 * In case the new DSO is a duplicate of an existing
1119 			 * one, print a one-time warning & put the new entry
1120 			 * at the end of the list of duplicates.
1121 			 */
1122 			if (!dso || (dso == this))
1123 				return this;	/* Find matching dso */
1124 			/*
1125 			 * The core kernel DSOs may have duplicated long name.
1126 			 * In this case, the short name should be different.
1127 			 * Comparing the short names to differentiate the DSOs.
1128 			 */
1129 			rc = strcmp(dso->short_name, this->short_name);
1130 			if (rc == 0) {
1131 				pr_err("Duplicated dso name: %s\n", name);
1132 				return NULL;
1133 			}
1134 		}
1135 		if (rc < 0)
1136 			p = &parent->rb_left;
1137 		else
1138 			p = &parent->rb_right;
1139 	}
1140 	if (dso) {
1141 		/* Add new node and rebalance tree */
1142 		rb_link_node(&dso->rb_node, parent, p);
1143 		rb_insert_color(&dso->rb_node, root);
1144 		dso->root = root;
1145 	}
1146 	return NULL;
1147 }
1148 
1149 static inline struct dso *__dso__find_by_longname(struct rb_root *root,
1150 						  const char *name)
1151 {
1152 	return __dso__findlink_by_longname(root, NULL, name);
1153 }
1154 
1155 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1156 {
1157 	struct rb_root *root = dso->root;
1158 
1159 	if (name == NULL)
1160 		return;
1161 
1162 	if (dso->long_name_allocated)
1163 		free((char *)dso->long_name);
1164 
1165 	if (root) {
1166 		rb_erase(&dso->rb_node, root);
1167 		/*
1168 		 * __dso__findlink_by_longname() isn't guaranteed to add it
1169 		 * back, so a clean removal is required here.
1170 		 */
1171 		RB_CLEAR_NODE(&dso->rb_node);
1172 		dso->root = NULL;
1173 	}
1174 
1175 	dso->long_name		 = name;
1176 	dso->long_name_len	 = strlen(name);
1177 	dso->long_name_allocated = name_allocated;
1178 
1179 	if (root)
1180 		__dso__findlink_by_longname(root, dso, NULL);
1181 }
1182 
1183 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1184 {
1185 	if (name == NULL)
1186 		return;
1187 
1188 	if (dso->short_name_allocated)
1189 		free((char *)dso->short_name);
1190 
1191 	dso->short_name		  = name;
1192 	dso->short_name_len	  = strlen(name);
1193 	dso->short_name_allocated = name_allocated;
1194 }
1195 
1196 static void dso__set_basename(struct dso *dso)
1197 {
1198 	char *base, *lname;
1199 	int tid;
1200 
1201 	if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
1202 		if (asprintf(&base, "[JIT] tid %d", tid) < 0)
1203 			return;
1204 	} else {
1205 	      /*
1206 	       * basename() may modify path buffer, so we must pass
1207                * a copy.
1208                */
1209 		lname = strdup(dso->long_name);
1210 		if (!lname)
1211 			return;
1212 
1213 		/*
1214 		 * basename() may return a pointer to internal
1215 		 * storage which is reused in subsequent calls
1216 		 * so copy the result.
1217 		 */
1218 		base = strdup(basename(lname));
1219 
1220 		free(lname);
1221 
1222 		if (!base)
1223 			return;
1224 	}
1225 	dso__set_short_name(dso, base, true);
1226 }
1227 
1228 int dso__name_len(const struct dso *dso)
1229 {
1230 	if (!dso)
1231 		return strlen("[unknown]");
1232 	if (verbose > 0)
1233 		return dso->long_name_len;
1234 
1235 	return dso->short_name_len;
1236 }
1237 
1238 bool dso__loaded(const struct dso *dso)
1239 {
1240 	return dso->loaded;
1241 }
1242 
1243 bool dso__sorted_by_name(const struct dso *dso)
1244 {
1245 	return dso->sorted_by_name;
1246 }
1247 
1248 void dso__set_sorted_by_name(struct dso *dso)
1249 {
1250 	dso->sorted_by_name = true;
1251 }
1252 
1253 struct dso *dso__new(const char *name)
1254 {
1255 	struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1256 
1257 	if (dso != NULL) {
1258 		strcpy(dso->name, name);
1259 		dso__set_long_name(dso, dso->name, false);
1260 		dso__set_short_name(dso, dso->name, false);
1261 		dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
1262 		dso->data.cache = RB_ROOT;
1263 		dso->inlined_nodes = RB_ROOT_CACHED;
1264 		dso->srclines = RB_ROOT_CACHED;
1265 		dso->data.fd = -1;
1266 		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1267 		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1268 		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1269 		dso->is_64_bit = (sizeof(void *) == 8);
1270 		dso->loaded = 0;
1271 		dso->rel = 0;
1272 		dso->sorted_by_name = 0;
1273 		dso->has_build_id = 0;
1274 		dso->has_srcline = 1;
1275 		dso->a2l_fails = 1;
1276 		dso->kernel = DSO_TYPE_USER;
1277 		dso->needs_swap = DSO_SWAP__UNSET;
1278 		dso->comp = COMP_ID__NONE;
1279 		RB_CLEAR_NODE(&dso->rb_node);
1280 		dso->root = NULL;
1281 		INIT_LIST_HEAD(&dso->node);
1282 		INIT_LIST_HEAD(&dso->data.open_entry);
1283 		pthread_mutex_init(&dso->lock, NULL);
1284 		refcount_set(&dso->refcnt, 1);
1285 	}
1286 
1287 	return dso;
1288 }
1289 
1290 void dso__delete(struct dso *dso)
1291 {
1292 	if (!RB_EMPTY_NODE(&dso->rb_node))
1293 		pr_err("DSO %s is still in rbtree when being deleted!\n",
1294 		       dso->long_name);
1295 
1296 	/* free inlines first, as they reference symbols */
1297 	inlines__tree_delete(&dso->inlined_nodes);
1298 	srcline__tree_delete(&dso->srclines);
1299 	symbols__delete(&dso->symbols);
1300 
1301 	if (dso->short_name_allocated) {
1302 		zfree((char **)&dso->short_name);
1303 		dso->short_name_allocated = false;
1304 	}
1305 
1306 	if (dso->long_name_allocated) {
1307 		zfree((char **)&dso->long_name);
1308 		dso->long_name_allocated = false;
1309 	}
1310 
1311 	dso__data_close(dso);
1312 	auxtrace_cache__free(dso->auxtrace_cache);
1313 	dso_cache__free(dso);
1314 	dso__free_a2l(dso);
1315 	zfree(&dso->symsrc_filename);
1316 	nsinfo__zput(dso->nsinfo);
1317 	pthread_mutex_destroy(&dso->lock);
1318 	free(dso);
1319 }
1320 
1321 struct dso *dso__get(struct dso *dso)
1322 {
1323 	if (dso)
1324 		refcount_inc(&dso->refcnt);
1325 	return dso;
1326 }
1327 
1328 void dso__put(struct dso *dso)
1329 {
1330 	if (dso && refcount_dec_and_test(&dso->refcnt))
1331 		dso__delete(dso);
1332 }
1333 
1334 void dso__set_build_id(struct dso *dso, void *build_id)
1335 {
1336 	memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1337 	dso->has_build_id = 1;
1338 }
1339 
1340 bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1341 {
1342 	return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1343 }
1344 
1345 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1346 {
1347 	char path[PATH_MAX];
1348 
1349 	if (machine__is_default_guest(machine))
1350 		return;
1351 	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1352 	if (sysfs__read_build_id(path, dso->build_id,
1353 				 sizeof(dso->build_id)) == 0)
1354 		dso->has_build_id = true;
1355 }
1356 
1357 int dso__kernel_module_get_build_id(struct dso *dso,
1358 				    const char *root_dir)
1359 {
1360 	char filename[PATH_MAX];
1361 	/*
1362 	 * kernel module short names are of the form "[module]" and
1363 	 * we need just "module" here.
1364 	 */
1365 	const char *name = dso->short_name + 1;
1366 
1367 	snprintf(filename, sizeof(filename),
1368 		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1369 		 root_dir, (int)strlen(name) - 1, name);
1370 
1371 	if (sysfs__read_build_id(filename, dso->build_id,
1372 				 sizeof(dso->build_id)) == 0)
1373 		dso->has_build_id = true;
1374 
1375 	return 0;
1376 }
1377 
1378 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1379 {
1380 	bool have_build_id = false;
1381 	struct dso *pos;
1382 	struct nscookie nsc;
1383 
1384 	list_for_each_entry(pos, head, node) {
1385 		if (with_hits && !pos->hit && !dso__is_vdso(pos))
1386 			continue;
1387 		if (pos->has_build_id) {
1388 			have_build_id = true;
1389 			continue;
1390 		}
1391 		nsinfo__mountns_enter(pos->nsinfo, &nsc);
1392 		if (filename__read_build_id(pos->long_name, pos->build_id,
1393 					    sizeof(pos->build_id)) > 0) {
1394 			have_build_id	  = true;
1395 			pos->has_build_id = true;
1396 		}
1397 		nsinfo__mountns_exit(&nsc);
1398 	}
1399 
1400 	return have_build_id;
1401 }
1402 
1403 void __dsos__add(struct dsos *dsos, struct dso *dso)
1404 {
1405 	list_add_tail(&dso->node, &dsos->head);
1406 	__dso__findlink_by_longname(&dsos->root, dso, NULL);
1407 	/*
1408 	 * It is now in the linked list, grab a reference, then garbage collect
1409 	 * this when needing memory, by looking at LRU dso instances in the
1410 	 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1411 	 * anywhere besides the one for the list, do, under a lock for the
1412 	 * list: remove it from the list, then a dso__put(), that probably will
1413 	 * be the last and will then call dso__delete(), end of life.
1414 	 *
1415 	 * That, or at the end of the 'struct machine' lifetime, when all
1416 	 * 'struct dso' instances will be removed from the list, in
1417 	 * dsos__exit(), if they have no other reference from some other data
1418 	 * structure.
1419 	 *
1420 	 * E.g.: after processing a 'perf.data' file and storing references
1421 	 * to objects instantiated while processing events, we will have
1422 	 * references to the 'thread', 'map', 'dso' structs all from 'struct
1423 	 * hist_entry' instances, but we may not need anything not referenced,
1424 	 * so we might as well call machines__exit()/machines__delete() and
1425 	 * garbage collect it.
1426 	 */
1427 	dso__get(dso);
1428 }
1429 
1430 void dsos__add(struct dsos *dsos, struct dso *dso)
1431 {
1432 	down_write(&dsos->lock);
1433 	__dsos__add(dsos, dso);
1434 	up_write(&dsos->lock);
1435 }
1436 
1437 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1438 {
1439 	struct dso *pos;
1440 
1441 	if (cmp_short) {
1442 		list_for_each_entry(pos, &dsos->head, node)
1443 			if (strcmp(pos->short_name, name) == 0)
1444 				return pos;
1445 		return NULL;
1446 	}
1447 	return __dso__find_by_longname(&dsos->root, name);
1448 }
1449 
1450 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1451 {
1452 	struct dso *dso;
1453 	down_read(&dsos->lock);
1454 	dso = __dsos__find(dsos, name, cmp_short);
1455 	up_read(&dsos->lock);
1456 	return dso;
1457 }
1458 
1459 struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
1460 {
1461 	struct dso *dso = dso__new(name);
1462 
1463 	if (dso != NULL) {
1464 		__dsos__add(dsos, dso);
1465 		dso__set_basename(dso);
1466 		/* Put dso here because __dsos_add already got it */
1467 		dso__put(dso);
1468 	}
1469 	return dso;
1470 }
1471 
1472 struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1473 {
1474 	struct dso *dso = __dsos__find(dsos, name, false);
1475 
1476 	return dso ? dso : __dsos__addnew(dsos, name);
1477 }
1478 
1479 struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1480 {
1481 	struct dso *dso;
1482 	down_write(&dsos->lock);
1483 	dso = dso__get(__dsos__findnew(dsos, name));
1484 	up_write(&dsos->lock);
1485 	return dso;
1486 }
1487 
1488 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1489 			       bool (skip)(struct dso *dso, int parm), int parm)
1490 {
1491 	struct dso *pos;
1492 	size_t ret = 0;
1493 
1494 	list_for_each_entry(pos, head, node) {
1495 		if (skip && skip(pos, parm))
1496 			continue;
1497 		ret += dso__fprintf_buildid(pos, fp);
1498 		ret += fprintf(fp, " %s\n", pos->long_name);
1499 	}
1500 	return ret;
1501 }
1502 
1503 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1504 {
1505 	struct dso *pos;
1506 	size_t ret = 0;
1507 
1508 	list_for_each_entry(pos, head, node) {
1509 		ret += dso__fprintf(pos, fp);
1510 	}
1511 
1512 	return ret;
1513 }
1514 
1515 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1516 {
1517 	char sbuild_id[SBUILD_ID_SIZE];
1518 
1519 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1520 	return fprintf(fp, "%s", sbuild_id);
1521 }
1522 
1523 size_t dso__fprintf(struct dso *dso, FILE *fp)
1524 {
1525 	struct rb_node *nd;
1526 	size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1527 
1528 	if (dso->short_name != dso->long_name)
1529 		ret += fprintf(fp, "%s, ", dso->long_name);
1530 	ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1531 	ret += dso__fprintf_buildid(dso, fp);
1532 	ret += fprintf(fp, ")\n");
1533 	for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
1534 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1535 		ret += symbol__fprintf(pos, fp);
1536 	}
1537 
1538 	return ret;
1539 }
1540 
1541 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1542 {
1543 	int fd;
1544 	enum dso_type type = DSO__TYPE_UNKNOWN;
1545 
1546 	fd = dso__data_get_fd(dso, machine);
1547 	if (fd >= 0) {
1548 		type = dso__type_fd(fd);
1549 		dso__data_put_fd(dso);
1550 	}
1551 
1552 	return type;
1553 }
1554 
1555 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1556 {
1557 	int idx, errnum = dso->load_errno;
1558 	/*
1559 	 * This must have a same ordering as the enum dso_load_errno.
1560 	 */
1561 	static const char *dso_load__error_str[] = {
1562 	"Internal tools/perf/ library error",
1563 	"Invalid ELF file",
1564 	"Can not read build id",
1565 	"Mismatching build id",
1566 	"Decompression failure",
1567 	};
1568 
1569 	BUG_ON(buflen == 0);
1570 
1571 	if (errnum >= 0) {
1572 		const char *err = str_error_r(errnum, buf, buflen);
1573 
1574 		if (err != buf)
1575 			scnprintf(buf, buflen, "%s", err);
1576 
1577 		return 0;
1578 	}
1579 
1580 	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1581 		return -1;
1582 
1583 	idx = errnum - __DSO_LOAD_ERRNO__START;
1584 	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1585 	return 0;
1586 }
1587