xref: /linux/tools/perf/util/dso.c (revision 2f5947dfcaecb99f2dd559156eecbeb7b95e4c02)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/bug.h>
3 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <sys/time.h>
6 #include <sys/resource.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <unistd.h>
10 #include <errno.h>
11 #include <fcntl.h>
12 #include <libgen.h>
13 #include <bpf/libbpf.h>
14 #include "bpf-event.h"
15 #include "compress.h"
16 #include "namespaces.h"
17 #include "path.h"
18 #include "map.h"
19 #include "symbol.h"
20 #include "srcline.h"
21 #include "dso.h"
22 #include "machine.h"
23 #include "auxtrace.h"
24 #include "util.h"
25 #include "debug.h"
26 #include "string2.h"
27 #include "vdso.h"
28 
29 static const char * const debuglink_paths[] = {
30 	"%.0s%s",
31 	"%s/%s",
32 	"%s/.debug/%s",
33 	"/usr/lib/debug%s/%s"
34 };
35 
36 char dso__symtab_origin(const struct dso *dso)
37 {
38 	static const char origin[] = {
39 		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
40 		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
41 		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
42 		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
43 		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
44 		[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO]	= 'D',
45 		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
46 		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
47 		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
48 		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
49 		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
50 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
51 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
52 		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
53 		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
54 		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
55 		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
56 	};
57 
58 	if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
59 		return '!';
60 	return origin[dso->symtab_type];
61 }
62 
63 int dso__read_binary_type_filename(const struct dso *dso,
64 				   enum dso_binary_type type,
65 				   char *root_dir, char *filename, size_t size)
66 {
67 	char build_id_hex[SBUILD_ID_SIZE];
68 	int ret = 0;
69 	size_t len;
70 
71 	switch (type) {
72 	case DSO_BINARY_TYPE__DEBUGLINK:
73 	{
74 		const char *last_slash;
75 		char dso_dir[PATH_MAX];
76 		char symfile[PATH_MAX];
77 		unsigned int i;
78 
79 		len = __symbol__join_symfs(filename, size, dso->long_name);
80 		last_slash = filename + len;
81 		while (last_slash != filename && *last_slash != '/')
82 			last_slash--;
83 
84 		strncpy(dso_dir, filename, last_slash - filename);
85 		dso_dir[last_slash-filename] = '\0';
86 
87 		if (!is_regular_file(filename)) {
88 			ret = -1;
89 			break;
90 		}
91 
92 		ret = filename__read_debuglink(filename, symfile, PATH_MAX);
93 		if (ret)
94 			break;
95 
96 		/* Check predefined locations where debug file might reside */
97 		ret = -1;
98 		for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
99 			snprintf(filename, size,
100 					debuglink_paths[i], dso_dir, symfile);
101 			if (is_regular_file(filename)) {
102 				ret = 0;
103 				break;
104 			}
105 		}
106 
107 		break;
108 	}
109 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
110 		if (dso__build_id_filename(dso, filename, size, false) == NULL)
111 			ret = -1;
112 		break;
113 
114 	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
115 		if (dso__build_id_filename(dso, filename, size, true) == NULL)
116 			ret = -1;
117 		break;
118 
119 	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
120 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
121 		snprintf(filename + len, size - len, "%s.debug", dso->long_name);
122 		break;
123 
124 	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
125 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
126 		snprintf(filename + len, size - len, "%s", dso->long_name);
127 		break;
128 
129 	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
130 	{
131 		const char *last_slash;
132 		size_t dir_size;
133 
134 		last_slash = dso->long_name + dso->long_name_len;
135 		while (last_slash != dso->long_name && *last_slash != '/')
136 			last_slash--;
137 
138 		len = __symbol__join_symfs(filename, size, "");
139 		dir_size = last_slash - dso->long_name + 2;
140 		if (dir_size > (size - len)) {
141 			ret = -1;
142 			break;
143 		}
144 		len += scnprintf(filename + len, dir_size, "%s",  dso->long_name);
145 		len += scnprintf(filename + len , size - len, ".debug%s",
146 								last_slash);
147 		break;
148 	}
149 
150 	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
151 		if (!dso->has_build_id) {
152 			ret = -1;
153 			break;
154 		}
155 
156 		build_id__sprintf(dso->build_id,
157 				  sizeof(dso->build_id),
158 				  build_id_hex);
159 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
160 		snprintf(filename + len, size - len, "%.2s/%s.debug",
161 			 build_id_hex, build_id_hex + 2);
162 		break;
163 
164 	case DSO_BINARY_TYPE__VMLINUX:
165 	case DSO_BINARY_TYPE__GUEST_VMLINUX:
166 	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
167 		__symbol__join_symfs(filename, size, dso->long_name);
168 		break;
169 
170 	case DSO_BINARY_TYPE__GUEST_KMODULE:
171 	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
172 		path__join3(filename, size, symbol_conf.symfs,
173 			    root_dir, dso->long_name);
174 		break;
175 
176 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
177 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
178 		__symbol__join_symfs(filename, size, dso->long_name);
179 		break;
180 
181 	case DSO_BINARY_TYPE__KCORE:
182 	case DSO_BINARY_TYPE__GUEST_KCORE:
183 		snprintf(filename, size, "%s", dso->long_name);
184 		break;
185 
186 	default:
187 	case DSO_BINARY_TYPE__KALLSYMS:
188 	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
189 	case DSO_BINARY_TYPE__JAVA_JIT:
190 	case DSO_BINARY_TYPE__BPF_PROG_INFO:
191 	case DSO_BINARY_TYPE__NOT_FOUND:
192 		ret = -1;
193 		break;
194 	}
195 
196 	return ret;
197 }
198 
199 enum {
200 	COMP_ID__NONE = 0,
201 };
202 
203 static const struct {
204 	const char *fmt;
205 	int (*decompress)(const char *input, int output);
206 	bool (*is_compressed)(const char *input);
207 } compressions[] = {
208 	[COMP_ID__NONE] = { .fmt = NULL, },
209 #ifdef HAVE_ZLIB_SUPPORT
210 	{ "gz", gzip_decompress_to_file, gzip_is_compressed },
211 #endif
212 #ifdef HAVE_LZMA_SUPPORT
213 	{ "xz", lzma_decompress_to_file, lzma_is_compressed },
214 #endif
215 	{ NULL, NULL, NULL },
216 };
217 
218 static int is_supported_compression(const char *ext)
219 {
220 	unsigned i;
221 
222 	for (i = 1; compressions[i].fmt; i++) {
223 		if (!strcmp(ext, compressions[i].fmt))
224 			return i;
225 	}
226 	return COMP_ID__NONE;
227 }
228 
229 bool is_kernel_module(const char *pathname, int cpumode)
230 {
231 	struct kmod_path m;
232 	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
233 
234 	WARN_ONCE(mode != cpumode,
235 		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
236 		  cpumode);
237 
238 	switch (mode) {
239 	case PERF_RECORD_MISC_USER:
240 	case PERF_RECORD_MISC_HYPERVISOR:
241 	case PERF_RECORD_MISC_GUEST_USER:
242 		return false;
243 	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
244 	default:
245 		if (kmod_path__parse(&m, pathname)) {
246 			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
247 					pathname);
248 			return true;
249 		}
250 	}
251 
252 	return m.kmod;
253 }
254 
255 bool dso__needs_decompress(struct dso *dso)
256 {
257 	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
258 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
259 }
260 
261 static int decompress_kmodule(struct dso *dso, const char *name,
262 			      char *pathname, size_t len)
263 {
264 	char tmpbuf[] = KMOD_DECOMP_NAME;
265 	int fd = -1;
266 
267 	if (!dso__needs_decompress(dso))
268 		return -1;
269 
270 	if (dso->comp == COMP_ID__NONE)
271 		return -1;
272 
273 	/*
274 	 * We have proper compression id for DSO and yet the file
275 	 * behind the 'name' can still be plain uncompressed object.
276 	 *
277 	 * The reason is behind the logic we open the DSO object files,
278 	 * when we try all possible 'debug' objects until we find the
279 	 * data. So even if the DSO is represented by 'krava.xz' module,
280 	 * we can end up here opening ~/.debug/....23432432/debug' file
281 	 * which is not compressed.
282 	 *
283 	 * To keep this transparent, we detect this and return the file
284 	 * descriptor to the uncompressed file.
285 	 */
286 	if (!compressions[dso->comp].is_compressed(name))
287 		return open(name, O_RDONLY);
288 
289 	fd = mkstemp(tmpbuf);
290 	if (fd < 0) {
291 		dso->load_errno = errno;
292 		return -1;
293 	}
294 
295 	if (compressions[dso->comp].decompress(name, fd)) {
296 		dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
297 		close(fd);
298 		fd = -1;
299 	}
300 
301 	if (!pathname || (fd < 0))
302 		unlink(tmpbuf);
303 
304 	if (pathname && (fd >= 0))
305 		strlcpy(pathname, tmpbuf, len);
306 
307 	return fd;
308 }
309 
310 int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
311 {
312 	return decompress_kmodule(dso, name, NULL, 0);
313 }
314 
315 int dso__decompress_kmodule_path(struct dso *dso, const char *name,
316 				 char *pathname, size_t len)
317 {
318 	int fd = decompress_kmodule(dso, name, pathname, len);
319 
320 	close(fd);
321 	return fd >= 0 ? 0 : -1;
322 }
323 
324 /*
325  * Parses kernel module specified in @path and updates
326  * @m argument like:
327  *
328  *    @comp - true if @path contains supported compression suffix,
329  *            false otherwise
330  *    @kmod - true if @path contains '.ko' suffix in right position,
331  *            false otherwise
332  *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
333  *            of the kernel module without suffixes, otherwise strudup-ed
334  *            base name of @path
335  *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
336  *            the compression suffix
337  *
338  * Returns 0 if there's no strdup error, -ENOMEM otherwise.
339  */
340 int __kmod_path__parse(struct kmod_path *m, const char *path,
341 		       bool alloc_name)
342 {
343 	const char *name = strrchr(path, '/');
344 	const char *ext  = strrchr(path, '.');
345 	bool is_simple_name = false;
346 
347 	memset(m, 0x0, sizeof(*m));
348 	name = name ? name + 1 : path;
349 
350 	/*
351 	 * '.' is also a valid character for module name. For example:
352 	 * [aaa.bbb] is a valid module name. '[' should have higher
353 	 * priority than '.ko' suffix.
354 	 *
355 	 * The kernel names are from machine__mmap_name. Such
356 	 * name should belong to kernel itself, not kernel module.
357 	 */
358 	if (name[0] == '[') {
359 		is_simple_name = true;
360 		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
361 		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
362 		    (strncmp(name, "[vdso]", 6) == 0) ||
363 		    (strncmp(name, "[vdso32]", 8) == 0) ||
364 		    (strncmp(name, "[vdsox32]", 9) == 0) ||
365 		    (strncmp(name, "[vsyscall]", 10) == 0)) {
366 			m->kmod = false;
367 
368 		} else
369 			m->kmod = true;
370 	}
371 
372 	/* No extension, just return name. */
373 	if ((ext == NULL) || is_simple_name) {
374 		if (alloc_name) {
375 			m->name = strdup(name);
376 			return m->name ? 0 : -ENOMEM;
377 		}
378 		return 0;
379 	}
380 
381 	m->comp = is_supported_compression(ext + 1);
382 	if (m->comp > COMP_ID__NONE)
383 		ext -= 3;
384 
385 	/* Check .ko extension only if there's enough name left. */
386 	if (ext > name)
387 		m->kmod = !strncmp(ext, ".ko", 3);
388 
389 	if (alloc_name) {
390 		if (m->kmod) {
391 			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
392 				return -ENOMEM;
393 		} else {
394 			if (asprintf(&m->name, "%s", name) == -1)
395 				return -ENOMEM;
396 		}
397 
398 		strreplace(m->name, '-', '_');
399 	}
400 
401 	return 0;
402 }
403 
404 void dso__set_module_info(struct dso *dso, struct kmod_path *m,
405 			  struct machine *machine)
406 {
407 	if (machine__is_host(machine))
408 		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
409 	else
410 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
411 
412 	/* _KMODULE_COMP should be next to _KMODULE */
413 	if (m->kmod && m->comp) {
414 		dso->symtab_type++;
415 		dso->comp = m->comp;
416 	}
417 
418 	dso__set_short_name(dso, strdup(m->name), true);
419 }
420 
421 /*
422  * Global list of open DSOs and the counter.
423  */
424 static LIST_HEAD(dso__data_open);
425 static long dso__data_open_cnt;
426 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
427 
428 static void dso__list_add(struct dso *dso)
429 {
430 	list_add_tail(&dso->data.open_entry, &dso__data_open);
431 	dso__data_open_cnt++;
432 }
433 
434 static void dso__list_del(struct dso *dso)
435 {
436 	list_del(&dso->data.open_entry);
437 	WARN_ONCE(dso__data_open_cnt <= 0,
438 		  "DSO data fd counter out of bounds.");
439 	dso__data_open_cnt--;
440 }
441 
442 static void close_first_dso(void);
443 
444 static int do_open(char *name)
445 {
446 	int fd;
447 	char sbuf[STRERR_BUFSIZE];
448 
449 	do {
450 		fd = open(name, O_RDONLY|O_CLOEXEC);
451 		if (fd >= 0)
452 			return fd;
453 
454 		pr_debug("dso open failed: %s\n",
455 			 str_error_r(errno, sbuf, sizeof(sbuf)));
456 		if (!dso__data_open_cnt || errno != EMFILE)
457 			break;
458 
459 		close_first_dso();
460 	} while (1);
461 
462 	return -1;
463 }
464 
465 static int __open_dso(struct dso *dso, struct machine *machine)
466 {
467 	int fd = -EINVAL;
468 	char *root_dir = (char *)"";
469 	char *name = malloc(PATH_MAX);
470 	bool decomp = false;
471 
472 	if (!name)
473 		return -ENOMEM;
474 
475 	if (machine)
476 		root_dir = machine->root_dir;
477 
478 	if (dso__read_binary_type_filename(dso, dso->binary_type,
479 					    root_dir, name, PATH_MAX))
480 		goto out;
481 
482 	if (!is_regular_file(name))
483 		goto out;
484 
485 	if (dso__needs_decompress(dso)) {
486 		char newpath[KMOD_DECOMP_LEN];
487 		size_t len = sizeof(newpath);
488 
489 		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
490 			fd = -dso->load_errno;
491 			goto out;
492 		}
493 
494 		decomp = true;
495 		strcpy(name, newpath);
496 	}
497 
498 	fd = do_open(name);
499 
500 	if (decomp)
501 		unlink(name);
502 
503 out:
504 	free(name);
505 	return fd;
506 }
507 
508 static void check_data_close(void);
509 
510 /**
511  * dso_close - Open DSO data file
512  * @dso: dso object
513  *
514  * Open @dso's data file descriptor and updates
515  * list/count of open DSO objects.
516  */
517 static int open_dso(struct dso *dso, struct machine *machine)
518 {
519 	int fd;
520 	struct nscookie nsc;
521 
522 	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
523 		nsinfo__mountns_enter(dso->nsinfo, &nsc);
524 	fd = __open_dso(dso, machine);
525 	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
526 		nsinfo__mountns_exit(&nsc);
527 
528 	if (fd >= 0) {
529 		dso__list_add(dso);
530 		/*
531 		 * Check if we crossed the allowed number
532 		 * of opened DSOs and close one if needed.
533 		 */
534 		check_data_close();
535 	}
536 
537 	return fd;
538 }
539 
540 static void close_data_fd(struct dso *dso)
541 {
542 	if (dso->data.fd >= 0) {
543 		close(dso->data.fd);
544 		dso->data.fd = -1;
545 		dso->data.file_size = 0;
546 		dso__list_del(dso);
547 	}
548 }
549 
550 /**
551  * dso_close - Close DSO data file
552  * @dso: dso object
553  *
554  * Close @dso's data file descriptor and updates
555  * list/count of open DSO objects.
556  */
557 static void close_dso(struct dso *dso)
558 {
559 	close_data_fd(dso);
560 }
561 
562 static void close_first_dso(void)
563 {
564 	struct dso *dso;
565 
566 	dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
567 	close_dso(dso);
568 }
569 
570 static rlim_t get_fd_limit(void)
571 {
572 	struct rlimit l;
573 	rlim_t limit = 0;
574 
575 	/* Allow half of the current open fd limit. */
576 	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
577 		if (l.rlim_cur == RLIM_INFINITY)
578 			limit = l.rlim_cur;
579 		else
580 			limit = l.rlim_cur / 2;
581 	} else {
582 		pr_err("failed to get fd limit\n");
583 		limit = 1;
584 	}
585 
586 	return limit;
587 }
588 
589 static rlim_t fd_limit;
590 
591 /*
592  * Used only by tests/dso-data.c to reset the environment
593  * for tests. I dont expect we should change this during
594  * standard runtime.
595  */
596 void reset_fd_limit(void)
597 {
598 	fd_limit = 0;
599 }
600 
601 static bool may_cache_fd(void)
602 {
603 	if (!fd_limit)
604 		fd_limit = get_fd_limit();
605 
606 	if (fd_limit == RLIM_INFINITY)
607 		return true;
608 
609 	return fd_limit > (rlim_t) dso__data_open_cnt;
610 }
611 
612 /*
613  * Check and close LRU dso if we crossed allowed limit
614  * for opened dso file descriptors. The limit is half
615  * of the RLIMIT_NOFILE files opened.
616 */
617 static void check_data_close(void)
618 {
619 	bool cache_fd = may_cache_fd();
620 
621 	if (!cache_fd)
622 		close_first_dso();
623 }
624 
625 /**
626  * dso__data_close - Close DSO data file
627  * @dso: dso object
628  *
629  * External interface to close @dso's data file descriptor.
630  */
631 void dso__data_close(struct dso *dso)
632 {
633 	pthread_mutex_lock(&dso__data_open_lock);
634 	close_dso(dso);
635 	pthread_mutex_unlock(&dso__data_open_lock);
636 }
637 
638 static void try_to_open_dso(struct dso *dso, struct machine *machine)
639 {
640 	enum dso_binary_type binary_type_data[] = {
641 		DSO_BINARY_TYPE__BUILD_ID_CACHE,
642 		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
643 		DSO_BINARY_TYPE__NOT_FOUND,
644 	};
645 	int i = 0;
646 
647 	if (dso->data.fd >= 0)
648 		return;
649 
650 	if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
651 		dso->data.fd = open_dso(dso, machine);
652 		goto out;
653 	}
654 
655 	do {
656 		dso->binary_type = binary_type_data[i++];
657 
658 		dso->data.fd = open_dso(dso, machine);
659 		if (dso->data.fd >= 0)
660 			goto out;
661 
662 	} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
663 out:
664 	if (dso->data.fd >= 0)
665 		dso->data.status = DSO_DATA_STATUS_OK;
666 	else
667 		dso->data.status = DSO_DATA_STATUS_ERROR;
668 }
669 
670 /**
671  * dso__data_get_fd - Get dso's data file descriptor
672  * @dso: dso object
673  * @machine: machine object
674  *
675  * External interface to find dso's file, open it and
676  * returns file descriptor.  It should be paired with
677  * dso__data_put_fd() if it returns non-negative value.
678  */
679 int dso__data_get_fd(struct dso *dso, struct machine *machine)
680 {
681 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
682 		return -1;
683 
684 	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
685 		return -1;
686 
687 	try_to_open_dso(dso, machine);
688 
689 	if (dso->data.fd < 0)
690 		pthread_mutex_unlock(&dso__data_open_lock);
691 
692 	return dso->data.fd;
693 }
694 
695 void dso__data_put_fd(struct dso *dso __maybe_unused)
696 {
697 	pthread_mutex_unlock(&dso__data_open_lock);
698 }
699 
700 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
701 {
702 	u32 flag = 1 << by;
703 
704 	if (dso->data.status_seen & flag)
705 		return true;
706 
707 	dso->data.status_seen |= flag;
708 
709 	return false;
710 }
711 
712 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
713 {
714 	struct bpf_prog_info_node *node;
715 	ssize_t size = DSO__DATA_CACHE_SIZE;
716 	u64 len;
717 	u8 *buf;
718 
719 	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
720 	if (!node || !node->info_linear) {
721 		dso->data.status = DSO_DATA_STATUS_ERROR;
722 		return -1;
723 	}
724 
725 	len = node->info_linear->info.jited_prog_len;
726 	buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
727 
728 	if (offset >= len)
729 		return -1;
730 
731 	size = (ssize_t)min(len - offset, (u64)size);
732 	memcpy(data, buf + offset, size);
733 	return size;
734 }
735 
736 static int bpf_size(struct dso *dso)
737 {
738 	struct bpf_prog_info_node *node;
739 
740 	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
741 	if (!node || !node->info_linear) {
742 		dso->data.status = DSO_DATA_STATUS_ERROR;
743 		return -1;
744 	}
745 
746 	dso->data.file_size = node->info_linear->info.jited_prog_len;
747 	return 0;
748 }
749 
750 static void
751 dso_cache__free(struct dso *dso)
752 {
753 	struct rb_root *root = &dso->data.cache;
754 	struct rb_node *next = rb_first(root);
755 
756 	pthread_mutex_lock(&dso->lock);
757 	while (next) {
758 		struct dso_cache *cache;
759 
760 		cache = rb_entry(next, struct dso_cache, rb_node);
761 		next = rb_next(&cache->rb_node);
762 		rb_erase(&cache->rb_node, root);
763 		free(cache);
764 	}
765 	pthread_mutex_unlock(&dso->lock);
766 }
767 
768 static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
769 {
770 	const struct rb_root *root = &dso->data.cache;
771 	struct rb_node * const *p = &root->rb_node;
772 	const struct rb_node *parent = NULL;
773 	struct dso_cache *cache;
774 
775 	while (*p != NULL) {
776 		u64 end;
777 
778 		parent = *p;
779 		cache = rb_entry(parent, struct dso_cache, rb_node);
780 		end = cache->offset + DSO__DATA_CACHE_SIZE;
781 
782 		if (offset < cache->offset)
783 			p = &(*p)->rb_left;
784 		else if (offset >= end)
785 			p = &(*p)->rb_right;
786 		else
787 			return cache;
788 	}
789 
790 	return NULL;
791 }
792 
793 static struct dso_cache *
794 dso_cache__insert(struct dso *dso, struct dso_cache *new)
795 {
796 	struct rb_root *root = &dso->data.cache;
797 	struct rb_node **p = &root->rb_node;
798 	struct rb_node *parent = NULL;
799 	struct dso_cache *cache;
800 	u64 offset = new->offset;
801 
802 	pthread_mutex_lock(&dso->lock);
803 	while (*p != NULL) {
804 		u64 end;
805 
806 		parent = *p;
807 		cache = rb_entry(parent, struct dso_cache, rb_node);
808 		end = cache->offset + DSO__DATA_CACHE_SIZE;
809 
810 		if (offset < cache->offset)
811 			p = &(*p)->rb_left;
812 		else if (offset >= end)
813 			p = &(*p)->rb_right;
814 		else
815 			goto out;
816 	}
817 
818 	rb_link_node(&new->rb_node, parent, p);
819 	rb_insert_color(&new->rb_node, root);
820 
821 	cache = NULL;
822 out:
823 	pthread_mutex_unlock(&dso->lock);
824 	return cache;
825 }
826 
827 static ssize_t
828 dso_cache__memcpy(struct dso_cache *cache, u64 offset,
829 		  u8 *data, u64 size)
830 {
831 	u64 cache_offset = offset - cache->offset;
832 	u64 cache_size   = min(cache->size - cache_offset, size);
833 
834 	memcpy(data, cache->data + cache_offset, cache_size);
835 	return cache_size;
836 }
837 
838 static ssize_t file_read(struct dso *dso, struct machine *machine,
839 			 u64 offset, char *data)
840 {
841 	ssize_t ret;
842 
843 	pthread_mutex_lock(&dso__data_open_lock);
844 
845 	/*
846 	 * dso->data.fd might be closed if other thread opened another
847 	 * file (dso) due to open file limit (RLIMIT_NOFILE).
848 	 */
849 	try_to_open_dso(dso, machine);
850 
851 	if (dso->data.fd < 0) {
852 		dso->data.status = DSO_DATA_STATUS_ERROR;
853 		ret = -errno;
854 		goto out;
855 	}
856 
857 	ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
858 out:
859 	pthread_mutex_unlock(&dso__data_open_lock);
860 	return ret;
861 }
862 
863 static ssize_t
864 dso_cache__read(struct dso *dso, struct machine *machine,
865 		u64 offset, u8 *data, ssize_t size)
866 {
867 	u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
868 	struct dso_cache *cache;
869 	struct dso_cache *old;
870 	ssize_t ret;
871 
872 	cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
873 	if (!cache)
874 		return -ENOMEM;
875 
876 	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
877 		ret = bpf_read(dso, cache_offset, cache->data);
878 	else
879 		ret = file_read(dso, machine, cache_offset, cache->data);
880 
881 	if (ret > 0) {
882 		cache->offset = cache_offset;
883 		cache->size   = ret;
884 
885 		old = dso_cache__insert(dso, cache);
886 		if (old) {
887 			/* we lose the race */
888 			free(cache);
889 			cache = old;
890 		}
891 
892 		ret = dso_cache__memcpy(cache, offset, data, size);
893 	}
894 
895 	if (ret <= 0)
896 		free(cache);
897 
898 	return ret;
899 }
900 
901 static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
902 			      u64 offset, u8 *data, ssize_t size)
903 {
904 	struct dso_cache *cache;
905 
906 	cache = dso_cache__find(dso, offset);
907 	if (cache)
908 		return dso_cache__memcpy(cache, offset, data, size);
909 	else
910 		return dso_cache__read(dso, machine, offset, data, size);
911 }
912 
913 /*
914  * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
915  * in the rb_tree. Any read to already cached data is served
916  * by cached data.
917  */
918 static ssize_t cached_read(struct dso *dso, struct machine *machine,
919 			   u64 offset, u8 *data, ssize_t size)
920 {
921 	ssize_t r = 0;
922 	u8 *p = data;
923 
924 	do {
925 		ssize_t ret;
926 
927 		ret = dso_cache_read(dso, machine, offset, p, size);
928 		if (ret < 0)
929 			return ret;
930 
931 		/* Reached EOF, return what we have. */
932 		if (!ret)
933 			break;
934 
935 		BUG_ON(ret > size);
936 
937 		r      += ret;
938 		p      += ret;
939 		offset += ret;
940 		size   -= ret;
941 
942 	} while (size);
943 
944 	return r;
945 }
946 
947 static int file_size(struct dso *dso, struct machine *machine)
948 {
949 	int ret = 0;
950 	struct stat st;
951 	char sbuf[STRERR_BUFSIZE];
952 
953 	pthread_mutex_lock(&dso__data_open_lock);
954 
955 	/*
956 	 * dso->data.fd might be closed if other thread opened another
957 	 * file (dso) due to open file limit (RLIMIT_NOFILE).
958 	 */
959 	try_to_open_dso(dso, machine);
960 
961 	if (dso->data.fd < 0) {
962 		ret = -errno;
963 		dso->data.status = DSO_DATA_STATUS_ERROR;
964 		goto out;
965 	}
966 
967 	if (fstat(dso->data.fd, &st) < 0) {
968 		ret = -errno;
969 		pr_err("dso cache fstat failed: %s\n",
970 		       str_error_r(errno, sbuf, sizeof(sbuf)));
971 		dso->data.status = DSO_DATA_STATUS_ERROR;
972 		goto out;
973 	}
974 	dso->data.file_size = st.st_size;
975 
976 out:
977 	pthread_mutex_unlock(&dso__data_open_lock);
978 	return ret;
979 }
980 
981 int dso__data_file_size(struct dso *dso, struct machine *machine)
982 {
983 	if (dso->data.file_size)
984 		return 0;
985 
986 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
987 		return -1;
988 
989 	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
990 		return bpf_size(dso);
991 
992 	return file_size(dso, machine);
993 }
994 
995 /**
996  * dso__data_size - Return dso data size
997  * @dso: dso object
998  * @machine: machine object
999  *
1000  * Return: dso data size
1001  */
1002 off_t dso__data_size(struct dso *dso, struct machine *machine)
1003 {
1004 	if (dso__data_file_size(dso, machine))
1005 		return -1;
1006 
1007 	/* For now just estimate dso data size is close to file size */
1008 	return dso->data.file_size;
1009 }
1010 
1011 static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
1012 				u64 offset, u8 *data, ssize_t size)
1013 {
1014 	if (dso__data_file_size(dso, machine))
1015 		return -1;
1016 
1017 	/* Check the offset sanity. */
1018 	if (offset > dso->data.file_size)
1019 		return -1;
1020 
1021 	if (offset + size < offset)
1022 		return -1;
1023 
1024 	return cached_read(dso, machine, offset, data, size);
1025 }
1026 
1027 /**
1028  * dso__data_read_offset - Read data from dso file offset
1029  * @dso: dso object
1030  * @machine: machine object
1031  * @offset: file offset
1032  * @data: buffer to store data
1033  * @size: size of the @data buffer
1034  *
1035  * External interface to read data from dso file offset. Open
1036  * dso data file and use cached_read to get the data.
1037  */
1038 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1039 			      u64 offset, u8 *data, ssize_t size)
1040 {
1041 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1042 		return -1;
1043 
1044 	return data_read_offset(dso, machine, offset, data, size);
1045 }
1046 
1047 /**
1048  * dso__data_read_addr - Read data from dso address
1049  * @dso: dso object
1050  * @machine: machine object
1051  * @add: virtual memory address
1052  * @data: buffer to store data
1053  * @size: size of the @data buffer
1054  *
1055  * External interface to read data from dso address.
1056  */
1057 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1058 			    struct machine *machine, u64 addr,
1059 			    u8 *data, ssize_t size)
1060 {
1061 	u64 offset = map->map_ip(map, addr);
1062 	return dso__data_read_offset(dso, machine, offset, data, size);
1063 }
1064 
1065 struct map *dso__new_map(const char *name)
1066 {
1067 	struct map *map = NULL;
1068 	struct dso *dso = dso__new(name);
1069 
1070 	if (dso)
1071 		map = map__new2(0, dso);
1072 
1073 	return map;
1074 }
1075 
1076 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1077 				    const char *short_name, int dso_type)
1078 {
1079 	/*
1080 	 * The kernel dso could be created by build_id processing.
1081 	 */
1082 	struct dso *dso = machine__findnew_dso(machine, name);
1083 
1084 	/*
1085 	 * We need to run this in all cases, since during the build_id
1086 	 * processing we had no idea this was the kernel dso.
1087 	 */
1088 	if (dso != NULL) {
1089 		dso__set_short_name(dso, short_name, false);
1090 		dso->kernel = dso_type;
1091 	}
1092 
1093 	return dso;
1094 }
1095 
1096 /*
1097  * Find a matching entry and/or link current entry to RB tree.
1098  * Either one of the dso or name parameter must be non-NULL or the
1099  * function will not work.
1100  */
1101 static struct dso *__dso__findlink_by_longname(struct rb_root *root,
1102 					       struct dso *dso, const char *name)
1103 {
1104 	struct rb_node **p = &root->rb_node;
1105 	struct rb_node  *parent = NULL;
1106 
1107 	if (!name)
1108 		name = dso->long_name;
1109 	/*
1110 	 * Find node with the matching name
1111 	 */
1112 	while (*p) {
1113 		struct dso *this = rb_entry(*p, struct dso, rb_node);
1114 		int rc = strcmp(name, this->long_name);
1115 
1116 		parent = *p;
1117 		if (rc == 0) {
1118 			/*
1119 			 * In case the new DSO is a duplicate of an existing
1120 			 * one, print a one-time warning & put the new entry
1121 			 * at the end of the list of duplicates.
1122 			 */
1123 			if (!dso || (dso == this))
1124 				return this;	/* Find matching dso */
1125 			/*
1126 			 * The core kernel DSOs may have duplicated long name.
1127 			 * In this case, the short name should be different.
1128 			 * Comparing the short names to differentiate the DSOs.
1129 			 */
1130 			rc = strcmp(dso->short_name, this->short_name);
1131 			if (rc == 0) {
1132 				pr_err("Duplicated dso name: %s\n", name);
1133 				return NULL;
1134 			}
1135 		}
1136 		if (rc < 0)
1137 			p = &parent->rb_left;
1138 		else
1139 			p = &parent->rb_right;
1140 	}
1141 	if (dso) {
1142 		/* Add new node and rebalance tree */
1143 		rb_link_node(&dso->rb_node, parent, p);
1144 		rb_insert_color(&dso->rb_node, root);
1145 		dso->root = root;
1146 	}
1147 	return NULL;
1148 }
1149 
1150 static inline struct dso *__dso__find_by_longname(struct rb_root *root,
1151 						  const char *name)
1152 {
1153 	return __dso__findlink_by_longname(root, NULL, name);
1154 }
1155 
1156 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1157 {
1158 	struct rb_root *root = dso->root;
1159 
1160 	if (name == NULL)
1161 		return;
1162 
1163 	if (dso->long_name_allocated)
1164 		free((char *)dso->long_name);
1165 
1166 	if (root) {
1167 		rb_erase(&dso->rb_node, root);
1168 		/*
1169 		 * __dso__findlink_by_longname() isn't guaranteed to add it
1170 		 * back, so a clean removal is required here.
1171 		 */
1172 		RB_CLEAR_NODE(&dso->rb_node);
1173 		dso->root = NULL;
1174 	}
1175 
1176 	dso->long_name		 = name;
1177 	dso->long_name_len	 = strlen(name);
1178 	dso->long_name_allocated = name_allocated;
1179 
1180 	if (root)
1181 		__dso__findlink_by_longname(root, dso, NULL);
1182 }
1183 
1184 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1185 {
1186 	if (name == NULL)
1187 		return;
1188 
1189 	if (dso->short_name_allocated)
1190 		free((char *)dso->short_name);
1191 
1192 	dso->short_name		  = name;
1193 	dso->short_name_len	  = strlen(name);
1194 	dso->short_name_allocated = name_allocated;
1195 }
1196 
1197 static void dso__set_basename(struct dso *dso)
1198 {
1199 	char *base, *lname;
1200 	int tid;
1201 
1202 	if (sscanf(dso->long_name, "/tmp/perf-%d.map", &tid) == 1) {
1203 		if (asprintf(&base, "[JIT] tid %d", tid) < 0)
1204 			return;
1205 	} else {
1206 	      /*
1207 	       * basename() may modify path buffer, so we must pass
1208                * a copy.
1209                */
1210 		lname = strdup(dso->long_name);
1211 		if (!lname)
1212 			return;
1213 
1214 		/*
1215 		 * basename() may return a pointer to internal
1216 		 * storage which is reused in subsequent calls
1217 		 * so copy the result.
1218 		 */
1219 		base = strdup(basename(lname));
1220 
1221 		free(lname);
1222 
1223 		if (!base)
1224 			return;
1225 	}
1226 	dso__set_short_name(dso, base, true);
1227 }
1228 
1229 int dso__name_len(const struct dso *dso)
1230 {
1231 	if (!dso)
1232 		return strlen("[unknown]");
1233 	if (verbose > 0)
1234 		return dso->long_name_len;
1235 
1236 	return dso->short_name_len;
1237 }
1238 
1239 bool dso__loaded(const struct dso *dso)
1240 {
1241 	return dso->loaded;
1242 }
1243 
1244 bool dso__sorted_by_name(const struct dso *dso)
1245 {
1246 	return dso->sorted_by_name;
1247 }
1248 
1249 void dso__set_sorted_by_name(struct dso *dso)
1250 {
1251 	dso->sorted_by_name = true;
1252 }
1253 
1254 struct dso *dso__new(const char *name)
1255 {
1256 	struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1257 
1258 	if (dso != NULL) {
1259 		strcpy(dso->name, name);
1260 		dso__set_long_name(dso, dso->name, false);
1261 		dso__set_short_name(dso, dso->name, false);
1262 		dso->symbols = dso->symbol_names = RB_ROOT_CACHED;
1263 		dso->data.cache = RB_ROOT;
1264 		dso->inlined_nodes = RB_ROOT_CACHED;
1265 		dso->srclines = RB_ROOT_CACHED;
1266 		dso->data.fd = -1;
1267 		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1268 		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1269 		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1270 		dso->is_64_bit = (sizeof(void *) == 8);
1271 		dso->loaded = 0;
1272 		dso->rel = 0;
1273 		dso->sorted_by_name = 0;
1274 		dso->has_build_id = 0;
1275 		dso->has_srcline = 1;
1276 		dso->a2l_fails = 1;
1277 		dso->kernel = DSO_TYPE_USER;
1278 		dso->needs_swap = DSO_SWAP__UNSET;
1279 		dso->comp = COMP_ID__NONE;
1280 		RB_CLEAR_NODE(&dso->rb_node);
1281 		dso->root = NULL;
1282 		INIT_LIST_HEAD(&dso->node);
1283 		INIT_LIST_HEAD(&dso->data.open_entry);
1284 		pthread_mutex_init(&dso->lock, NULL);
1285 		refcount_set(&dso->refcnt, 1);
1286 	}
1287 
1288 	return dso;
1289 }
1290 
1291 void dso__delete(struct dso *dso)
1292 {
1293 	if (!RB_EMPTY_NODE(&dso->rb_node))
1294 		pr_err("DSO %s is still in rbtree when being deleted!\n",
1295 		       dso->long_name);
1296 
1297 	/* free inlines first, as they reference symbols */
1298 	inlines__tree_delete(&dso->inlined_nodes);
1299 	srcline__tree_delete(&dso->srclines);
1300 	symbols__delete(&dso->symbols);
1301 
1302 	if (dso->short_name_allocated) {
1303 		zfree((char **)&dso->short_name);
1304 		dso->short_name_allocated = false;
1305 	}
1306 
1307 	if (dso->long_name_allocated) {
1308 		zfree((char **)&dso->long_name);
1309 		dso->long_name_allocated = false;
1310 	}
1311 
1312 	dso__data_close(dso);
1313 	auxtrace_cache__free(dso->auxtrace_cache);
1314 	dso_cache__free(dso);
1315 	dso__free_a2l(dso);
1316 	zfree(&dso->symsrc_filename);
1317 	nsinfo__zput(dso->nsinfo);
1318 	pthread_mutex_destroy(&dso->lock);
1319 	free(dso);
1320 }
1321 
1322 struct dso *dso__get(struct dso *dso)
1323 {
1324 	if (dso)
1325 		refcount_inc(&dso->refcnt);
1326 	return dso;
1327 }
1328 
1329 void dso__put(struct dso *dso)
1330 {
1331 	if (dso && refcount_dec_and_test(&dso->refcnt))
1332 		dso__delete(dso);
1333 }
1334 
1335 void dso__set_build_id(struct dso *dso, void *build_id)
1336 {
1337 	memcpy(dso->build_id, build_id, sizeof(dso->build_id));
1338 	dso->has_build_id = 1;
1339 }
1340 
1341 bool dso__build_id_equal(const struct dso *dso, u8 *build_id)
1342 {
1343 	return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0;
1344 }
1345 
1346 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1347 {
1348 	char path[PATH_MAX];
1349 
1350 	if (machine__is_default_guest(machine))
1351 		return;
1352 	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1353 	if (sysfs__read_build_id(path, dso->build_id,
1354 				 sizeof(dso->build_id)) == 0)
1355 		dso->has_build_id = true;
1356 }
1357 
1358 int dso__kernel_module_get_build_id(struct dso *dso,
1359 				    const char *root_dir)
1360 {
1361 	char filename[PATH_MAX];
1362 	/*
1363 	 * kernel module short names are of the form "[module]" and
1364 	 * we need just "module" here.
1365 	 */
1366 	const char *name = dso->short_name + 1;
1367 
1368 	snprintf(filename, sizeof(filename),
1369 		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1370 		 root_dir, (int)strlen(name) - 1, name);
1371 
1372 	if (sysfs__read_build_id(filename, dso->build_id,
1373 				 sizeof(dso->build_id)) == 0)
1374 		dso->has_build_id = true;
1375 
1376 	return 0;
1377 }
1378 
1379 bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
1380 {
1381 	bool have_build_id = false;
1382 	struct dso *pos;
1383 	struct nscookie nsc;
1384 
1385 	list_for_each_entry(pos, head, node) {
1386 		if (with_hits && !pos->hit && !dso__is_vdso(pos))
1387 			continue;
1388 		if (pos->has_build_id) {
1389 			have_build_id = true;
1390 			continue;
1391 		}
1392 		nsinfo__mountns_enter(pos->nsinfo, &nsc);
1393 		if (filename__read_build_id(pos->long_name, pos->build_id,
1394 					    sizeof(pos->build_id)) > 0) {
1395 			have_build_id	  = true;
1396 			pos->has_build_id = true;
1397 		}
1398 		nsinfo__mountns_exit(&nsc);
1399 	}
1400 
1401 	return have_build_id;
1402 }
1403 
1404 void __dsos__add(struct dsos *dsos, struct dso *dso)
1405 {
1406 	list_add_tail(&dso->node, &dsos->head);
1407 	__dso__findlink_by_longname(&dsos->root, dso, NULL);
1408 	/*
1409 	 * It is now in the linked list, grab a reference, then garbage collect
1410 	 * this when needing memory, by looking at LRU dso instances in the
1411 	 * list with atomic_read(&dso->refcnt) == 1, i.e. no references
1412 	 * anywhere besides the one for the list, do, under a lock for the
1413 	 * list: remove it from the list, then a dso__put(), that probably will
1414 	 * be the last and will then call dso__delete(), end of life.
1415 	 *
1416 	 * That, or at the end of the 'struct machine' lifetime, when all
1417 	 * 'struct dso' instances will be removed from the list, in
1418 	 * dsos__exit(), if they have no other reference from some other data
1419 	 * structure.
1420 	 *
1421 	 * E.g.: after processing a 'perf.data' file and storing references
1422 	 * to objects instantiated while processing events, we will have
1423 	 * references to the 'thread', 'map', 'dso' structs all from 'struct
1424 	 * hist_entry' instances, but we may not need anything not referenced,
1425 	 * so we might as well call machines__exit()/machines__delete() and
1426 	 * garbage collect it.
1427 	 */
1428 	dso__get(dso);
1429 }
1430 
1431 void dsos__add(struct dsos *dsos, struct dso *dso)
1432 {
1433 	down_write(&dsos->lock);
1434 	__dsos__add(dsos, dso);
1435 	up_write(&dsos->lock);
1436 }
1437 
1438 struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1439 {
1440 	struct dso *pos;
1441 
1442 	if (cmp_short) {
1443 		list_for_each_entry(pos, &dsos->head, node)
1444 			if (strcmp(pos->short_name, name) == 0)
1445 				return pos;
1446 		return NULL;
1447 	}
1448 	return __dso__find_by_longname(&dsos->root, name);
1449 }
1450 
1451 struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
1452 {
1453 	struct dso *dso;
1454 	down_read(&dsos->lock);
1455 	dso = __dsos__find(dsos, name, cmp_short);
1456 	up_read(&dsos->lock);
1457 	return dso;
1458 }
1459 
1460 struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
1461 {
1462 	struct dso *dso = dso__new(name);
1463 
1464 	if (dso != NULL) {
1465 		__dsos__add(dsos, dso);
1466 		dso__set_basename(dso);
1467 		/* Put dso here because __dsos_add already got it */
1468 		dso__put(dso);
1469 	}
1470 	return dso;
1471 }
1472 
1473 struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
1474 {
1475 	struct dso *dso = __dsos__find(dsos, name, false);
1476 
1477 	return dso ? dso : __dsos__addnew(dsos, name);
1478 }
1479 
1480 struct dso *dsos__findnew(struct dsos *dsos, const char *name)
1481 {
1482 	struct dso *dso;
1483 	down_write(&dsos->lock);
1484 	dso = dso__get(__dsos__findnew(dsos, name));
1485 	up_write(&dsos->lock);
1486 	return dso;
1487 }
1488 
1489 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
1490 			       bool (skip)(struct dso *dso, int parm), int parm)
1491 {
1492 	struct dso *pos;
1493 	size_t ret = 0;
1494 
1495 	list_for_each_entry(pos, head, node) {
1496 		if (skip && skip(pos, parm))
1497 			continue;
1498 		ret += dso__fprintf_buildid(pos, fp);
1499 		ret += fprintf(fp, " %s\n", pos->long_name);
1500 	}
1501 	return ret;
1502 }
1503 
1504 size_t __dsos__fprintf(struct list_head *head, FILE *fp)
1505 {
1506 	struct dso *pos;
1507 	size_t ret = 0;
1508 
1509 	list_for_each_entry(pos, head, node) {
1510 		ret += dso__fprintf(pos, fp);
1511 	}
1512 
1513 	return ret;
1514 }
1515 
1516 size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1517 {
1518 	char sbuild_id[SBUILD_ID_SIZE];
1519 
1520 	build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
1521 	return fprintf(fp, "%s", sbuild_id);
1522 }
1523 
1524 size_t dso__fprintf(struct dso *dso, FILE *fp)
1525 {
1526 	struct rb_node *nd;
1527 	size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1528 
1529 	if (dso->short_name != dso->long_name)
1530 		ret += fprintf(fp, "%s, ", dso->long_name);
1531 	ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1532 	ret += dso__fprintf_buildid(dso, fp);
1533 	ret += fprintf(fp, ")\n");
1534 	for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
1535 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1536 		ret += symbol__fprintf(pos, fp);
1537 	}
1538 
1539 	return ret;
1540 }
1541 
1542 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1543 {
1544 	int fd;
1545 	enum dso_type type = DSO__TYPE_UNKNOWN;
1546 
1547 	fd = dso__data_get_fd(dso, machine);
1548 	if (fd >= 0) {
1549 		type = dso__type_fd(fd);
1550 		dso__data_put_fd(dso);
1551 	}
1552 
1553 	return type;
1554 }
1555 
1556 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1557 {
1558 	int idx, errnum = dso->load_errno;
1559 	/*
1560 	 * This must have a same ordering as the enum dso_load_errno.
1561 	 */
1562 	static const char *dso_load__error_str[] = {
1563 	"Internal tools/perf/ library error",
1564 	"Invalid ELF file",
1565 	"Can not read build id",
1566 	"Mismatching build id",
1567 	"Decompression failure",
1568 	};
1569 
1570 	BUG_ON(buflen == 0);
1571 
1572 	if (errnum >= 0) {
1573 		const char *err = str_error_r(errnum, buf, buflen);
1574 
1575 		if (err != buf)
1576 			scnprintf(buf, buflen, "%s", err);
1577 
1578 		return 0;
1579 	}
1580 
1581 	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1582 		return -1;
1583 
1584 	idx = errnum - __DSO_LOAD_ERRNO__START;
1585 	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1586 	return 0;
1587 }
1588