xref: /linux/tools/perf/util/dso.c (revision be239684b18e1cdcafcf8c7face4a2f562c745ad)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/bug.h>
3 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
6 #include <sys/time.h>
7 #include <sys/resource.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <fcntl.h>
13 #include <stdlib.h>
14 #ifdef HAVE_LIBBPF_SUPPORT
15 #include <bpf/libbpf.h>
16 #include "bpf-event.h"
17 #include "bpf-utils.h"
18 #endif
19 #include "compress.h"
20 #include "env.h"
21 #include "namespaces.h"
22 #include "path.h"
23 #include "map.h"
24 #include "symbol.h"
25 #include "srcline.h"
26 #include "dso.h"
27 #include "dsos.h"
28 #include "machine.h"
29 #include "auxtrace.h"
30 #include "util.h" /* O_CLOEXEC for older systems */
31 #include "debug.h"
32 #include "string2.h"
33 #include "vdso.h"
34 #include "annotate-data.h"
35 
36 static const char * const debuglink_paths[] = {
37 	"%.0s%s",
38 	"%s/%s",
39 	"%s/.debug/%s",
40 	"/usr/lib/debug%s/%s"
41 };
42 
43 char dso__symtab_origin(const struct dso *dso)
44 {
45 	static const char origin[] = {
46 		[DSO_BINARY_TYPE__KALLSYMS]			= 'k',
47 		[DSO_BINARY_TYPE__VMLINUX]			= 'v',
48 		[DSO_BINARY_TYPE__JAVA_JIT]			= 'j',
49 		[DSO_BINARY_TYPE__DEBUGLINK]			= 'l',
50 		[DSO_BINARY_TYPE__BUILD_ID_CACHE]		= 'B',
51 		[DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO]	= 'D',
52 		[DSO_BINARY_TYPE__FEDORA_DEBUGINFO]		= 'f',
53 		[DSO_BINARY_TYPE__UBUNTU_DEBUGINFO]		= 'u',
54 		[DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO]	= 'x',
55 		[DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO]	= 'o',
56 		[DSO_BINARY_TYPE__BUILDID_DEBUGINFO]		= 'b',
57 		[DSO_BINARY_TYPE__SYSTEM_PATH_DSO]		= 'd',
58 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE]		= 'K',
59 		[DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP]	= 'm',
60 		[DSO_BINARY_TYPE__GUEST_KALLSYMS]		= 'g',
61 		[DSO_BINARY_TYPE__GUEST_KMODULE]		= 'G',
62 		[DSO_BINARY_TYPE__GUEST_KMODULE_COMP]		= 'M',
63 		[DSO_BINARY_TYPE__GUEST_VMLINUX]		= 'V',
64 	};
65 
66 	if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND)
67 		return '!';
68 	return origin[dso->symtab_type];
69 }
70 
71 bool dso__is_object_file(const struct dso *dso)
72 {
73 	switch (dso->binary_type) {
74 	case DSO_BINARY_TYPE__KALLSYMS:
75 	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
76 	case DSO_BINARY_TYPE__JAVA_JIT:
77 	case DSO_BINARY_TYPE__BPF_PROG_INFO:
78 	case DSO_BINARY_TYPE__BPF_IMAGE:
79 	case DSO_BINARY_TYPE__OOL:
80 		return false;
81 	case DSO_BINARY_TYPE__VMLINUX:
82 	case DSO_BINARY_TYPE__GUEST_VMLINUX:
83 	case DSO_BINARY_TYPE__DEBUGLINK:
84 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
85 	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
86 	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
87 	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
88 	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
89 	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
90 	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
91 	case DSO_BINARY_TYPE__GUEST_KMODULE:
92 	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
93 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
94 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
95 	case DSO_BINARY_TYPE__KCORE:
96 	case DSO_BINARY_TYPE__GUEST_KCORE:
97 	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
98 	case DSO_BINARY_TYPE__NOT_FOUND:
99 	default:
100 		return true;
101 	}
102 }
103 
104 int dso__read_binary_type_filename(const struct dso *dso,
105 				   enum dso_binary_type type,
106 				   char *root_dir, char *filename, size_t size)
107 {
108 	char build_id_hex[SBUILD_ID_SIZE];
109 	int ret = 0;
110 	size_t len;
111 
112 	switch (type) {
113 	case DSO_BINARY_TYPE__DEBUGLINK:
114 	{
115 		const char *last_slash;
116 		char dso_dir[PATH_MAX];
117 		char symfile[PATH_MAX];
118 		unsigned int i;
119 
120 		len = __symbol__join_symfs(filename, size, dso->long_name);
121 		last_slash = filename + len;
122 		while (last_slash != filename && *last_slash != '/')
123 			last_slash--;
124 
125 		strncpy(dso_dir, filename, last_slash - filename);
126 		dso_dir[last_slash-filename] = '\0';
127 
128 		if (!is_regular_file(filename)) {
129 			ret = -1;
130 			break;
131 		}
132 
133 		ret = filename__read_debuglink(filename, symfile, PATH_MAX);
134 		if (ret)
135 			break;
136 
137 		/* Check predefined locations where debug file might reside */
138 		ret = -1;
139 		for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
140 			snprintf(filename, size,
141 					debuglink_paths[i], dso_dir, symfile);
142 			if (is_regular_file(filename)) {
143 				ret = 0;
144 				break;
145 			}
146 		}
147 
148 		break;
149 	}
150 	case DSO_BINARY_TYPE__BUILD_ID_CACHE:
151 		if (dso__build_id_filename(dso, filename, size, false) == NULL)
152 			ret = -1;
153 		break;
154 
155 	case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
156 		if (dso__build_id_filename(dso, filename, size, true) == NULL)
157 			ret = -1;
158 		break;
159 
160 	case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
161 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
162 		snprintf(filename + len, size - len, "%s.debug", dso->long_name);
163 		break;
164 
165 	case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
166 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
167 		snprintf(filename + len, size - len, "%s", dso->long_name);
168 		break;
169 
170 	case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
171 		/*
172 		 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
173 		 * /usr/lib/debug/lib when it is expected to be in
174 		 * /usr/lib/debug/usr/lib
175 		 */
176 		if (strlen(dso->long_name) < 9 ||
177 		    strncmp(dso->long_name, "/usr/lib/", 9)) {
178 			ret = -1;
179 			break;
180 		}
181 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
182 		snprintf(filename + len, size - len, "%s", dso->long_name + 4);
183 		break;
184 
185 	case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
186 	{
187 		const char *last_slash;
188 		size_t dir_size;
189 
190 		last_slash = dso->long_name + dso->long_name_len;
191 		while (last_slash != dso->long_name && *last_slash != '/')
192 			last_slash--;
193 
194 		len = __symbol__join_symfs(filename, size, "");
195 		dir_size = last_slash - dso->long_name + 2;
196 		if (dir_size > (size - len)) {
197 			ret = -1;
198 			break;
199 		}
200 		len += scnprintf(filename + len, dir_size, "%s",  dso->long_name);
201 		len += scnprintf(filename + len , size - len, ".debug%s",
202 								last_slash);
203 		break;
204 	}
205 
206 	case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
207 		if (!dso->has_build_id) {
208 			ret = -1;
209 			break;
210 		}
211 
212 		build_id__sprintf(&dso->bid, build_id_hex);
213 		len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
214 		snprintf(filename + len, size - len, "%.2s/%s.debug",
215 			 build_id_hex, build_id_hex + 2);
216 		break;
217 
218 	case DSO_BINARY_TYPE__VMLINUX:
219 	case DSO_BINARY_TYPE__GUEST_VMLINUX:
220 	case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
221 		__symbol__join_symfs(filename, size, dso->long_name);
222 		break;
223 
224 	case DSO_BINARY_TYPE__GUEST_KMODULE:
225 	case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
226 		path__join3(filename, size, symbol_conf.symfs,
227 			    root_dir, dso->long_name);
228 		break;
229 
230 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
231 	case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
232 		__symbol__join_symfs(filename, size, dso->long_name);
233 		break;
234 
235 	case DSO_BINARY_TYPE__KCORE:
236 	case DSO_BINARY_TYPE__GUEST_KCORE:
237 		snprintf(filename, size, "%s", dso->long_name);
238 		break;
239 
240 	default:
241 	case DSO_BINARY_TYPE__KALLSYMS:
242 	case DSO_BINARY_TYPE__GUEST_KALLSYMS:
243 	case DSO_BINARY_TYPE__JAVA_JIT:
244 	case DSO_BINARY_TYPE__BPF_PROG_INFO:
245 	case DSO_BINARY_TYPE__BPF_IMAGE:
246 	case DSO_BINARY_TYPE__OOL:
247 	case DSO_BINARY_TYPE__NOT_FOUND:
248 		ret = -1;
249 		break;
250 	}
251 
252 	return ret;
253 }
254 
255 enum {
256 	COMP_ID__NONE = 0,
257 };
258 
259 static const struct {
260 	const char *fmt;
261 	int (*decompress)(const char *input, int output);
262 	bool (*is_compressed)(const char *input);
263 } compressions[] = {
264 	[COMP_ID__NONE] = { .fmt = NULL, },
265 #ifdef HAVE_ZLIB_SUPPORT
266 	{ "gz", gzip_decompress_to_file, gzip_is_compressed },
267 #endif
268 #ifdef HAVE_LZMA_SUPPORT
269 	{ "xz", lzma_decompress_to_file, lzma_is_compressed },
270 #endif
271 	{ NULL, NULL, NULL },
272 };
273 
274 static int is_supported_compression(const char *ext)
275 {
276 	unsigned i;
277 
278 	for (i = 1; compressions[i].fmt; i++) {
279 		if (!strcmp(ext, compressions[i].fmt))
280 			return i;
281 	}
282 	return COMP_ID__NONE;
283 }
284 
285 bool is_kernel_module(const char *pathname, int cpumode)
286 {
287 	struct kmod_path m;
288 	int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
289 
290 	WARN_ONCE(mode != cpumode,
291 		  "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
292 		  cpumode);
293 
294 	switch (mode) {
295 	case PERF_RECORD_MISC_USER:
296 	case PERF_RECORD_MISC_HYPERVISOR:
297 	case PERF_RECORD_MISC_GUEST_USER:
298 		return false;
299 	/* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
300 	default:
301 		if (kmod_path__parse(&m, pathname)) {
302 			pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
303 					pathname);
304 			return true;
305 		}
306 	}
307 
308 	return m.kmod;
309 }
310 
311 bool dso__needs_decompress(struct dso *dso)
312 {
313 	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
314 		dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
315 }
316 
317 int filename__decompress(const char *name, char *pathname,
318 			 size_t len, int comp, int *err)
319 {
320 	char tmpbuf[] = KMOD_DECOMP_NAME;
321 	int fd = -1;
322 
323 	/*
324 	 * We have proper compression id for DSO and yet the file
325 	 * behind the 'name' can still be plain uncompressed object.
326 	 *
327 	 * The reason is behind the logic we open the DSO object files,
328 	 * when we try all possible 'debug' objects until we find the
329 	 * data. So even if the DSO is represented by 'krava.xz' module,
330 	 * we can end up here opening ~/.debug/....23432432/debug' file
331 	 * which is not compressed.
332 	 *
333 	 * To keep this transparent, we detect this and return the file
334 	 * descriptor to the uncompressed file.
335 	 */
336 	if (!compressions[comp].is_compressed(name))
337 		return open(name, O_RDONLY);
338 
339 	fd = mkstemp(tmpbuf);
340 	if (fd < 0) {
341 		*err = errno;
342 		return -1;
343 	}
344 
345 	if (compressions[comp].decompress(name, fd)) {
346 		*err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
347 		close(fd);
348 		fd = -1;
349 	}
350 
351 	if (!pathname || (fd < 0))
352 		unlink(tmpbuf);
353 
354 	if (pathname && (fd >= 0))
355 		strlcpy(pathname, tmpbuf, len);
356 
357 	return fd;
358 }
359 
360 static int decompress_kmodule(struct dso *dso, const char *name,
361 			      char *pathname, size_t len)
362 {
363 	if (!dso__needs_decompress(dso))
364 		return -1;
365 
366 	if (dso->comp == COMP_ID__NONE)
367 		return -1;
368 
369 	return filename__decompress(name, pathname, len, dso->comp,
370 				    &dso->load_errno);
371 }
372 
373 int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
374 {
375 	return decompress_kmodule(dso, name, NULL, 0);
376 }
377 
378 int dso__decompress_kmodule_path(struct dso *dso, const char *name,
379 				 char *pathname, size_t len)
380 {
381 	int fd = decompress_kmodule(dso, name, pathname, len);
382 
383 	close(fd);
384 	return fd >= 0 ? 0 : -1;
385 }
386 
387 /*
388  * Parses kernel module specified in @path and updates
389  * @m argument like:
390  *
391  *    @comp - true if @path contains supported compression suffix,
392  *            false otherwise
393  *    @kmod - true if @path contains '.ko' suffix in right position,
394  *            false otherwise
395  *    @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
396  *            of the kernel module without suffixes, otherwise strudup-ed
397  *            base name of @path
398  *    @ext  - if (@alloc_ext && @comp) is true, it contains strdup-ed string
399  *            the compression suffix
400  *
401  * Returns 0 if there's no strdup error, -ENOMEM otherwise.
402  */
403 int __kmod_path__parse(struct kmod_path *m, const char *path,
404 		       bool alloc_name)
405 {
406 	const char *name = strrchr(path, '/');
407 	const char *ext  = strrchr(path, '.');
408 	bool is_simple_name = false;
409 
410 	memset(m, 0x0, sizeof(*m));
411 	name = name ? name + 1 : path;
412 
413 	/*
414 	 * '.' is also a valid character for module name. For example:
415 	 * [aaa.bbb] is a valid module name. '[' should have higher
416 	 * priority than '.ko' suffix.
417 	 *
418 	 * The kernel names are from machine__mmap_name. Such
419 	 * name should belong to kernel itself, not kernel module.
420 	 */
421 	if (name[0] == '[') {
422 		is_simple_name = true;
423 		if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
424 		    (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
425 		    (strncmp(name, "[vdso]", 6) == 0) ||
426 		    (strncmp(name, "[vdso32]", 8) == 0) ||
427 		    (strncmp(name, "[vdsox32]", 9) == 0) ||
428 		    (strncmp(name, "[vsyscall]", 10) == 0)) {
429 			m->kmod = false;
430 
431 		} else
432 			m->kmod = true;
433 	}
434 
435 	/* No extension, just return name. */
436 	if ((ext == NULL) || is_simple_name) {
437 		if (alloc_name) {
438 			m->name = strdup(name);
439 			return m->name ? 0 : -ENOMEM;
440 		}
441 		return 0;
442 	}
443 
444 	m->comp = is_supported_compression(ext + 1);
445 	if (m->comp > COMP_ID__NONE)
446 		ext -= 3;
447 
448 	/* Check .ko extension only if there's enough name left. */
449 	if (ext > name)
450 		m->kmod = !strncmp(ext, ".ko", 3);
451 
452 	if (alloc_name) {
453 		if (m->kmod) {
454 			if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
455 				return -ENOMEM;
456 		} else {
457 			if (asprintf(&m->name, "%s", name) == -1)
458 				return -ENOMEM;
459 		}
460 
461 		strreplace(m->name, '-', '_');
462 	}
463 
464 	return 0;
465 }
466 
467 void dso__set_module_info(struct dso *dso, struct kmod_path *m,
468 			  struct machine *machine)
469 {
470 	if (machine__is_host(machine))
471 		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
472 	else
473 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
474 
475 	/* _KMODULE_COMP should be next to _KMODULE */
476 	if (m->kmod && m->comp) {
477 		dso->symtab_type++;
478 		dso->comp = m->comp;
479 	}
480 
481 	dso->is_kmod = 1;
482 	dso__set_short_name(dso, strdup(m->name), true);
483 }
484 
485 /*
486  * Global list of open DSOs and the counter.
487  */
488 static LIST_HEAD(dso__data_open);
489 static long dso__data_open_cnt;
490 static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
491 
492 static void dso__list_add(struct dso *dso)
493 {
494 	list_add_tail(&dso->data.open_entry, &dso__data_open);
495 	dso__data_open_cnt++;
496 }
497 
498 static void dso__list_del(struct dso *dso)
499 {
500 	list_del_init(&dso->data.open_entry);
501 	WARN_ONCE(dso__data_open_cnt <= 0,
502 		  "DSO data fd counter out of bounds.");
503 	dso__data_open_cnt--;
504 }
505 
506 static void close_first_dso(void);
507 
508 static int do_open(char *name)
509 {
510 	int fd;
511 	char sbuf[STRERR_BUFSIZE];
512 
513 	do {
514 		fd = open(name, O_RDONLY|O_CLOEXEC);
515 		if (fd >= 0)
516 			return fd;
517 
518 		pr_debug("dso open failed: %s\n",
519 			 str_error_r(errno, sbuf, sizeof(sbuf)));
520 		if (!dso__data_open_cnt || errno != EMFILE)
521 			break;
522 
523 		close_first_dso();
524 	} while (1);
525 
526 	return -1;
527 }
528 
529 char *dso__filename_with_chroot(const struct dso *dso, const char *filename)
530 {
531 	return filename_with_chroot(nsinfo__pid(dso->nsinfo), filename);
532 }
533 
534 static int __open_dso(struct dso *dso, struct machine *machine)
535 {
536 	int fd = -EINVAL;
537 	char *root_dir = (char *)"";
538 	char *name = malloc(PATH_MAX);
539 	bool decomp = false;
540 
541 	if (!name)
542 		return -ENOMEM;
543 
544 	mutex_lock(&dso->lock);
545 	if (machine)
546 		root_dir = machine->root_dir;
547 
548 	if (dso__read_binary_type_filename(dso, dso->binary_type,
549 					    root_dir, name, PATH_MAX))
550 		goto out;
551 
552 	if (!is_regular_file(name)) {
553 		char *new_name;
554 
555 		if (errno != ENOENT || dso->nsinfo == NULL)
556 			goto out;
557 
558 		new_name = dso__filename_with_chroot(dso, name);
559 		if (!new_name)
560 			goto out;
561 
562 		free(name);
563 		name = new_name;
564 	}
565 
566 	if (dso__needs_decompress(dso)) {
567 		char newpath[KMOD_DECOMP_LEN];
568 		size_t len = sizeof(newpath);
569 
570 		if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
571 			fd = -dso->load_errno;
572 			goto out;
573 		}
574 
575 		decomp = true;
576 		strcpy(name, newpath);
577 	}
578 
579 	fd = do_open(name);
580 
581 	if (decomp)
582 		unlink(name);
583 
584 out:
585 	mutex_unlock(&dso->lock);
586 	free(name);
587 	return fd;
588 }
589 
590 static void check_data_close(void);
591 
592 /**
593  * dso_close - Open DSO data file
594  * @dso: dso object
595  *
596  * Open @dso's data file descriptor and updates
597  * list/count of open DSO objects.
598  */
599 static int open_dso(struct dso *dso, struct machine *machine)
600 {
601 	int fd;
602 	struct nscookie nsc;
603 
604 	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
605 		mutex_lock(&dso->lock);
606 		nsinfo__mountns_enter(dso->nsinfo, &nsc);
607 		mutex_unlock(&dso->lock);
608 	}
609 	fd = __open_dso(dso, machine);
610 	if (dso->binary_type != DSO_BINARY_TYPE__BUILD_ID_CACHE)
611 		nsinfo__mountns_exit(&nsc);
612 
613 	if (fd >= 0) {
614 		dso__list_add(dso);
615 		/*
616 		 * Check if we crossed the allowed number
617 		 * of opened DSOs and close one if needed.
618 		 */
619 		check_data_close();
620 	}
621 
622 	return fd;
623 }
624 
625 static void close_data_fd(struct dso *dso)
626 {
627 	if (dso->data.fd >= 0) {
628 		close(dso->data.fd);
629 		dso->data.fd = -1;
630 		dso->data.file_size = 0;
631 		dso__list_del(dso);
632 	}
633 }
634 
635 /**
636  * dso_close - Close DSO data file
637  * @dso: dso object
638  *
639  * Close @dso's data file descriptor and updates
640  * list/count of open DSO objects.
641  */
642 static void close_dso(struct dso *dso)
643 {
644 	close_data_fd(dso);
645 }
646 
647 static void close_first_dso(void)
648 {
649 	struct dso *dso;
650 
651 	dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
652 	close_dso(dso);
653 }
654 
655 static rlim_t get_fd_limit(void)
656 {
657 	struct rlimit l;
658 	rlim_t limit = 0;
659 
660 	/* Allow half of the current open fd limit. */
661 	if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
662 		if (l.rlim_cur == RLIM_INFINITY)
663 			limit = l.rlim_cur;
664 		else
665 			limit = l.rlim_cur / 2;
666 	} else {
667 		pr_err("failed to get fd limit\n");
668 		limit = 1;
669 	}
670 
671 	return limit;
672 }
673 
674 static rlim_t fd_limit;
675 
676 /*
677  * Used only by tests/dso-data.c to reset the environment
678  * for tests. I dont expect we should change this during
679  * standard runtime.
680  */
681 void reset_fd_limit(void)
682 {
683 	fd_limit = 0;
684 }
685 
686 static bool may_cache_fd(void)
687 {
688 	if (!fd_limit)
689 		fd_limit = get_fd_limit();
690 
691 	if (fd_limit == RLIM_INFINITY)
692 		return true;
693 
694 	return fd_limit > (rlim_t) dso__data_open_cnt;
695 }
696 
697 /*
698  * Check and close LRU dso if we crossed allowed limit
699  * for opened dso file descriptors. The limit is half
700  * of the RLIMIT_NOFILE files opened.
701 */
702 static void check_data_close(void)
703 {
704 	bool cache_fd = may_cache_fd();
705 
706 	if (!cache_fd)
707 		close_first_dso();
708 }
709 
710 /**
711  * dso__data_close - Close DSO data file
712  * @dso: dso object
713  *
714  * External interface to close @dso's data file descriptor.
715  */
716 void dso__data_close(struct dso *dso)
717 {
718 	pthread_mutex_lock(&dso__data_open_lock);
719 	close_dso(dso);
720 	pthread_mutex_unlock(&dso__data_open_lock);
721 }
722 
723 static void try_to_open_dso(struct dso *dso, struct machine *machine)
724 {
725 	enum dso_binary_type binary_type_data[] = {
726 		DSO_BINARY_TYPE__BUILD_ID_CACHE,
727 		DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
728 		DSO_BINARY_TYPE__NOT_FOUND,
729 	};
730 	int i = 0;
731 
732 	if (dso->data.fd >= 0)
733 		return;
734 
735 	if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
736 		dso->data.fd = open_dso(dso, machine);
737 		goto out;
738 	}
739 
740 	do {
741 		dso->binary_type = binary_type_data[i++];
742 
743 		dso->data.fd = open_dso(dso, machine);
744 		if (dso->data.fd >= 0)
745 			goto out;
746 
747 	} while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
748 out:
749 	if (dso->data.fd >= 0)
750 		dso->data.status = DSO_DATA_STATUS_OK;
751 	else
752 		dso->data.status = DSO_DATA_STATUS_ERROR;
753 }
754 
755 /**
756  * dso__data_get_fd - Get dso's data file descriptor
757  * @dso: dso object
758  * @machine: machine object
759  *
760  * External interface to find dso's file, open it and
761  * returns file descriptor.  It should be paired with
762  * dso__data_put_fd() if it returns non-negative value.
763  */
764 int dso__data_get_fd(struct dso *dso, struct machine *machine)
765 {
766 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
767 		return -1;
768 
769 	if (pthread_mutex_lock(&dso__data_open_lock) < 0)
770 		return -1;
771 
772 	try_to_open_dso(dso, machine);
773 
774 	if (dso->data.fd < 0)
775 		pthread_mutex_unlock(&dso__data_open_lock);
776 
777 	return dso->data.fd;
778 }
779 
780 void dso__data_put_fd(struct dso *dso __maybe_unused)
781 {
782 	pthread_mutex_unlock(&dso__data_open_lock);
783 }
784 
785 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
786 {
787 	u32 flag = 1 << by;
788 
789 	if (dso->data.status_seen & flag)
790 		return true;
791 
792 	dso->data.status_seen |= flag;
793 
794 	return false;
795 }
796 
797 #ifdef HAVE_LIBBPF_SUPPORT
798 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
799 {
800 	struct bpf_prog_info_node *node;
801 	ssize_t size = DSO__DATA_CACHE_SIZE;
802 	u64 len;
803 	u8 *buf;
804 
805 	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
806 	if (!node || !node->info_linear) {
807 		dso->data.status = DSO_DATA_STATUS_ERROR;
808 		return -1;
809 	}
810 
811 	len = node->info_linear->info.jited_prog_len;
812 	buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
813 
814 	if (offset >= len)
815 		return -1;
816 
817 	size = (ssize_t)min(len - offset, (u64)size);
818 	memcpy(data, buf + offset, size);
819 	return size;
820 }
821 
822 static int bpf_size(struct dso *dso)
823 {
824 	struct bpf_prog_info_node *node;
825 
826 	node = perf_env__find_bpf_prog_info(dso->bpf_prog.env, dso->bpf_prog.id);
827 	if (!node || !node->info_linear) {
828 		dso->data.status = DSO_DATA_STATUS_ERROR;
829 		return -1;
830 	}
831 
832 	dso->data.file_size = node->info_linear->info.jited_prog_len;
833 	return 0;
834 }
835 #endif // HAVE_LIBBPF_SUPPORT
836 
837 static void
838 dso_cache__free(struct dso *dso)
839 {
840 	struct rb_root *root = &dso->data.cache;
841 	struct rb_node *next = rb_first(root);
842 
843 	mutex_lock(&dso->lock);
844 	while (next) {
845 		struct dso_cache *cache;
846 
847 		cache = rb_entry(next, struct dso_cache, rb_node);
848 		next = rb_next(&cache->rb_node);
849 		rb_erase(&cache->rb_node, root);
850 		free(cache);
851 	}
852 	mutex_unlock(&dso->lock);
853 }
854 
855 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
856 {
857 	const struct rb_root *root = &dso->data.cache;
858 	struct rb_node * const *p = &root->rb_node;
859 	const struct rb_node *parent = NULL;
860 	struct dso_cache *cache;
861 
862 	while (*p != NULL) {
863 		u64 end;
864 
865 		parent = *p;
866 		cache = rb_entry(parent, struct dso_cache, rb_node);
867 		end = cache->offset + DSO__DATA_CACHE_SIZE;
868 
869 		if (offset < cache->offset)
870 			p = &(*p)->rb_left;
871 		else if (offset >= end)
872 			p = &(*p)->rb_right;
873 		else
874 			return cache;
875 	}
876 
877 	return NULL;
878 }
879 
880 static struct dso_cache *
881 dso_cache__insert(struct dso *dso, struct dso_cache *new)
882 {
883 	struct rb_root *root = &dso->data.cache;
884 	struct rb_node **p = &root->rb_node;
885 	struct rb_node *parent = NULL;
886 	struct dso_cache *cache;
887 	u64 offset = new->offset;
888 
889 	mutex_lock(&dso->lock);
890 	while (*p != NULL) {
891 		u64 end;
892 
893 		parent = *p;
894 		cache = rb_entry(parent, struct dso_cache, rb_node);
895 		end = cache->offset + DSO__DATA_CACHE_SIZE;
896 
897 		if (offset < cache->offset)
898 			p = &(*p)->rb_left;
899 		else if (offset >= end)
900 			p = &(*p)->rb_right;
901 		else
902 			goto out;
903 	}
904 
905 	rb_link_node(&new->rb_node, parent, p);
906 	rb_insert_color(&new->rb_node, root);
907 
908 	cache = NULL;
909 out:
910 	mutex_unlock(&dso->lock);
911 	return cache;
912 }
913 
914 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
915 				 u64 size, bool out)
916 {
917 	u64 cache_offset = offset - cache->offset;
918 	u64 cache_size   = min(cache->size - cache_offset, size);
919 
920 	if (out)
921 		memcpy(data, cache->data + cache_offset, cache_size);
922 	else
923 		memcpy(cache->data + cache_offset, data, cache_size);
924 	return cache_size;
925 }
926 
927 static ssize_t file_read(struct dso *dso, struct machine *machine,
928 			 u64 offset, char *data)
929 {
930 	ssize_t ret;
931 
932 	pthread_mutex_lock(&dso__data_open_lock);
933 
934 	/*
935 	 * dso->data.fd might be closed if other thread opened another
936 	 * file (dso) due to open file limit (RLIMIT_NOFILE).
937 	 */
938 	try_to_open_dso(dso, machine);
939 
940 	if (dso->data.fd < 0) {
941 		dso->data.status = DSO_DATA_STATUS_ERROR;
942 		ret = -errno;
943 		goto out;
944 	}
945 
946 	ret = pread(dso->data.fd, data, DSO__DATA_CACHE_SIZE, offset);
947 out:
948 	pthread_mutex_unlock(&dso__data_open_lock);
949 	return ret;
950 }
951 
952 static struct dso_cache *dso_cache__populate(struct dso *dso,
953 					     struct machine *machine,
954 					     u64 offset, ssize_t *ret)
955 {
956 	u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
957 	struct dso_cache *cache;
958 	struct dso_cache *old;
959 
960 	cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
961 	if (!cache) {
962 		*ret = -ENOMEM;
963 		return NULL;
964 	}
965 #ifdef HAVE_LIBBPF_SUPPORT
966 	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
967 		*ret = bpf_read(dso, cache_offset, cache->data);
968 	else
969 #endif
970 	if (dso->binary_type == DSO_BINARY_TYPE__OOL)
971 		*ret = DSO__DATA_CACHE_SIZE;
972 	else
973 		*ret = file_read(dso, machine, cache_offset, cache->data);
974 
975 	if (*ret <= 0) {
976 		free(cache);
977 		return NULL;
978 	}
979 
980 	cache->offset = cache_offset;
981 	cache->size   = *ret;
982 
983 	old = dso_cache__insert(dso, cache);
984 	if (old) {
985 		/* we lose the race */
986 		free(cache);
987 		cache = old;
988 	}
989 
990 	return cache;
991 }
992 
993 static struct dso_cache *dso_cache__find(struct dso *dso,
994 					 struct machine *machine,
995 					 u64 offset,
996 					 ssize_t *ret)
997 {
998 	struct dso_cache *cache = __dso_cache__find(dso, offset);
999 
1000 	return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
1001 }
1002 
1003 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
1004 			    u64 offset, u8 *data, ssize_t size, bool out)
1005 {
1006 	struct dso_cache *cache;
1007 	ssize_t ret = 0;
1008 
1009 	cache = dso_cache__find(dso, machine, offset, &ret);
1010 	if (!cache)
1011 		return ret;
1012 
1013 	return dso_cache__memcpy(cache, offset, data, size, out);
1014 }
1015 
1016 /*
1017  * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
1018  * in the rb_tree. Any read to already cached data is served
1019  * by cached data. Writes update the cache only, not the backing file.
1020  */
1021 static ssize_t cached_io(struct dso *dso, struct machine *machine,
1022 			 u64 offset, u8 *data, ssize_t size, bool out)
1023 {
1024 	ssize_t r = 0;
1025 	u8 *p = data;
1026 
1027 	do {
1028 		ssize_t ret;
1029 
1030 		ret = dso_cache_io(dso, machine, offset, p, size, out);
1031 		if (ret < 0)
1032 			return ret;
1033 
1034 		/* Reached EOF, return what we have. */
1035 		if (!ret)
1036 			break;
1037 
1038 		BUG_ON(ret > size);
1039 
1040 		r      += ret;
1041 		p      += ret;
1042 		offset += ret;
1043 		size   -= ret;
1044 
1045 	} while (size);
1046 
1047 	return r;
1048 }
1049 
1050 static int file_size(struct dso *dso, struct machine *machine)
1051 {
1052 	int ret = 0;
1053 	struct stat st;
1054 	char sbuf[STRERR_BUFSIZE];
1055 
1056 	pthread_mutex_lock(&dso__data_open_lock);
1057 
1058 	/*
1059 	 * dso->data.fd might be closed if other thread opened another
1060 	 * file (dso) due to open file limit (RLIMIT_NOFILE).
1061 	 */
1062 	try_to_open_dso(dso, machine);
1063 
1064 	if (dso->data.fd < 0) {
1065 		ret = -errno;
1066 		dso->data.status = DSO_DATA_STATUS_ERROR;
1067 		goto out;
1068 	}
1069 
1070 	if (fstat(dso->data.fd, &st) < 0) {
1071 		ret = -errno;
1072 		pr_err("dso cache fstat failed: %s\n",
1073 		       str_error_r(errno, sbuf, sizeof(sbuf)));
1074 		dso->data.status = DSO_DATA_STATUS_ERROR;
1075 		goto out;
1076 	}
1077 	dso->data.file_size = st.st_size;
1078 
1079 out:
1080 	pthread_mutex_unlock(&dso__data_open_lock);
1081 	return ret;
1082 }
1083 
1084 int dso__data_file_size(struct dso *dso, struct machine *machine)
1085 {
1086 	if (dso->data.file_size)
1087 		return 0;
1088 
1089 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1090 		return -1;
1091 #ifdef HAVE_LIBBPF_SUPPORT
1092 	if (dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO)
1093 		return bpf_size(dso);
1094 #endif
1095 	return file_size(dso, machine);
1096 }
1097 
1098 /**
1099  * dso__data_size - Return dso data size
1100  * @dso: dso object
1101  * @machine: machine object
1102  *
1103  * Return: dso data size
1104  */
1105 off_t dso__data_size(struct dso *dso, struct machine *machine)
1106 {
1107 	if (dso__data_file_size(dso, machine))
1108 		return -1;
1109 
1110 	/* For now just estimate dso data size is close to file size */
1111 	return dso->data.file_size;
1112 }
1113 
1114 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
1115 				      u64 offset, u8 *data, ssize_t size,
1116 				      bool out)
1117 {
1118 	if (dso__data_file_size(dso, machine))
1119 		return -1;
1120 
1121 	/* Check the offset sanity. */
1122 	if (offset > dso->data.file_size)
1123 		return -1;
1124 
1125 	if (offset + size < offset)
1126 		return -1;
1127 
1128 	return cached_io(dso, machine, offset, data, size, out);
1129 }
1130 
1131 /**
1132  * dso__data_read_offset - Read data from dso file offset
1133  * @dso: dso object
1134  * @machine: machine object
1135  * @offset: file offset
1136  * @data: buffer to store data
1137  * @size: size of the @data buffer
1138  *
1139  * External interface to read data from dso file offset. Open
1140  * dso data file and use cached_read to get the data.
1141  */
1142 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1143 			      u64 offset, u8 *data, ssize_t size)
1144 {
1145 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1146 		return -1;
1147 
1148 	return data_read_write_offset(dso, machine, offset, data, size, true);
1149 }
1150 
1151 /**
1152  * dso__data_read_addr - Read data from dso address
1153  * @dso: dso object
1154  * @machine: machine object
1155  * @add: virtual memory address
1156  * @data: buffer to store data
1157  * @size: size of the @data buffer
1158  *
1159  * External interface to read data from dso address.
1160  */
1161 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1162 			    struct machine *machine, u64 addr,
1163 			    u8 *data, ssize_t size)
1164 {
1165 	u64 offset = map__map_ip(map, addr);
1166 
1167 	return dso__data_read_offset(dso, machine, offset, data, size);
1168 }
1169 
1170 /**
1171  * dso__data_write_cache_offs - Write data to dso data cache at file offset
1172  * @dso: dso object
1173  * @machine: machine object
1174  * @offset: file offset
1175  * @data: buffer to write
1176  * @size: size of the @data buffer
1177  *
1178  * Write into the dso file data cache, but do not change the file itself.
1179  */
1180 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
1181 				   u64 offset, const u8 *data_in, ssize_t size)
1182 {
1183 	u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
1184 
1185 	if (dso->data.status == DSO_DATA_STATUS_ERROR)
1186 		return -1;
1187 
1188 	return data_read_write_offset(dso, machine, offset, data, size, false);
1189 }
1190 
1191 /**
1192  * dso__data_write_cache_addr - Write data to dso data cache at dso address
1193  * @dso: dso object
1194  * @machine: machine object
1195  * @add: virtual memory address
1196  * @data: buffer to write
1197  * @size: size of the @data buffer
1198  *
1199  * External interface to write into the dso file data cache, but do not change
1200  * the file itself.
1201  */
1202 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
1203 				   struct machine *machine, u64 addr,
1204 				   const u8 *data, ssize_t size)
1205 {
1206 	u64 offset = map__map_ip(map, addr);
1207 
1208 	return dso__data_write_cache_offs(dso, machine, offset, data, size);
1209 }
1210 
1211 struct map *dso__new_map(const char *name)
1212 {
1213 	struct map *map = NULL;
1214 	struct dso *dso = dso__new(name);
1215 
1216 	if (dso) {
1217 		map = map__new2(0, dso);
1218 		dso__put(dso);
1219 	}
1220 
1221 	return map;
1222 }
1223 
1224 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1225 				    const char *short_name, int dso_type)
1226 {
1227 	/*
1228 	 * The kernel dso could be created by build_id processing.
1229 	 */
1230 	struct dso *dso = machine__findnew_dso(machine, name);
1231 
1232 	/*
1233 	 * We need to run this in all cases, since during the build_id
1234 	 * processing we had no idea this was the kernel dso.
1235 	 */
1236 	if (dso != NULL) {
1237 		dso__set_short_name(dso, short_name, false);
1238 		dso->kernel = dso_type;
1239 	}
1240 
1241 	return dso;
1242 }
1243 
1244 static void dso__set_long_name_id(struct dso *dso, const char *name, struct dso_id *id, bool name_allocated)
1245 {
1246 	struct rb_root *root = dso->root;
1247 
1248 	if (name == NULL)
1249 		return;
1250 
1251 	if (dso->long_name_allocated)
1252 		free((char *)dso->long_name);
1253 
1254 	if (root) {
1255 		rb_erase(&dso->rb_node, root);
1256 		/*
1257 		 * __dsos__findnew_link_by_longname_id() isn't guaranteed to
1258 		 * add it back, so a clean removal is required here.
1259 		 */
1260 		RB_CLEAR_NODE(&dso->rb_node);
1261 		dso->root = NULL;
1262 	}
1263 
1264 	dso->long_name		 = name;
1265 	dso->long_name_len	 = strlen(name);
1266 	dso->long_name_allocated = name_allocated;
1267 
1268 	if (root)
1269 		__dsos__findnew_link_by_longname_id(root, dso, NULL, id);
1270 }
1271 
1272 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1273 {
1274 	dso__set_long_name_id(dso, name, NULL, name_allocated);
1275 }
1276 
1277 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1278 {
1279 	if (name == NULL)
1280 		return;
1281 
1282 	if (dso->short_name_allocated)
1283 		free((char *)dso->short_name);
1284 
1285 	dso->short_name		  = name;
1286 	dso->short_name_len	  = strlen(name);
1287 	dso->short_name_allocated = name_allocated;
1288 }
1289 
1290 int dso__name_len(const struct dso *dso)
1291 {
1292 	if (!dso)
1293 		return strlen("[unknown]");
1294 	if (verbose > 0)
1295 		return dso->long_name_len;
1296 
1297 	return dso->short_name_len;
1298 }
1299 
1300 bool dso__loaded(const struct dso *dso)
1301 {
1302 	return dso->loaded;
1303 }
1304 
1305 bool dso__sorted_by_name(const struct dso *dso)
1306 {
1307 	return dso->sorted_by_name;
1308 }
1309 
1310 void dso__set_sorted_by_name(struct dso *dso)
1311 {
1312 	dso->sorted_by_name = true;
1313 }
1314 
1315 struct dso *dso__new_id(const char *name, struct dso_id *id)
1316 {
1317 	struct dso *dso = calloc(1, sizeof(*dso) + strlen(name) + 1);
1318 
1319 	if (dso != NULL) {
1320 		strcpy(dso->name, name);
1321 		if (id)
1322 			dso->id = *id;
1323 		dso__set_long_name_id(dso, dso->name, id, false);
1324 		dso__set_short_name(dso, dso->name, false);
1325 		dso->symbols = RB_ROOT_CACHED;
1326 		dso->symbol_names = NULL;
1327 		dso->symbol_names_len = 0;
1328 		dso->data.cache = RB_ROOT;
1329 		dso->inlined_nodes = RB_ROOT_CACHED;
1330 		dso->srclines = RB_ROOT_CACHED;
1331 		dso->data_types = RB_ROOT;
1332 		dso->data.fd = -1;
1333 		dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1334 		dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1335 		dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1336 		dso->is_64_bit = (sizeof(void *) == 8);
1337 		dso->loaded = 0;
1338 		dso->rel = 0;
1339 		dso->sorted_by_name = 0;
1340 		dso->has_build_id = 0;
1341 		dso->has_srcline = 1;
1342 		dso->a2l_fails = 1;
1343 		dso->kernel = DSO_SPACE__USER;
1344 		dso->is_kmod = 0;
1345 		dso->needs_swap = DSO_SWAP__UNSET;
1346 		dso->comp = COMP_ID__NONE;
1347 		RB_CLEAR_NODE(&dso->rb_node);
1348 		dso->root = NULL;
1349 		INIT_LIST_HEAD(&dso->node);
1350 		INIT_LIST_HEAD(&dso->data.open_entry);
1351 		mutex_init(&dso->lock);
1352 		refcount_set(&dso->refcnt, 1);
1353 	}
1354 
1355 	return dso;
1356 }
1357 
1358 struct dso *dso__new(const char *name)
1359 {
1360 	return dso__new_id(name, NULL);
1361 }
1362 
1363 void dso__delete(struct dso *dso)
1364 {
1365 	if (!RB_EMPTY_NODE(&dso->rb_node))
1366 		pr_err("DSO %s is still in rbtree when being deleted!\n",
1367 		       dso->long_name);
1368 
1369 	/* free inlines first, as they reference symbols */
1370 	inlines__tree_delete(&dso->inlined_nodes);
1371 	srcline__tree_delete(&dso->srclines);
1372 	symbols__delete(&dso->symbols);
1373 	dso->symbol_names_len = 0;
1374 	zfree(&dso->symbol_names);
1375 	annotated_data_type__tree_delete(&dso->data_types);
1376 
1377 	if (dso->short_name_allocated) {
1378 		zfree((char **)&dso->short_name);
1379 		dso->short_name_allocated = false;
1380 	}
1381 
1382 	if (dso->long_name_allocated) {
1383 		zfree((char **)&dso->long_name);
1384 		dso->long_name_allocated = false;
1385 	}
1386 
1387 	dso__data_close(dso);
1388 	auxtrace_cache__free(dso->auxtrace_cache);
1389 	dso_cache__free(dso);
1390 	dso__free_a2l(dso);
1391 	zfree(&dso->symsrc_filename);
1392 	nsinfo__zput(dso->nsinfo);
1393 	mutex_destroy(&dso->lock);
1394 	free(dso);
1395 }
1396 
1397 struct dso *dso__get(struct dso *dso)
1398 {
1399 	if (dso)
1400 		refcount_inc(&dso->refcnt);
1401 	return dso;
1402 }
1403 
1404 void dso__put(struct dso *dso)
1405 {
1406 	if (dso && refcount_dec_and_test(&dso->refcnt))
1407 		dso__delete(dso);
1408 }
1409 
1410 void dso__set_build_id(struct dso *dso, struct build_id *bid)
1411 {
1412 	dso->bid = *bid;
1413 	dso->has_build_id = 1;
1414 }
1415 
1416 bool dso__build_id_equal(const struct dso *dso, struct build_id *bid)
1417 {
1418 	if (dso->bid.size > bid->size && dso->bid.size == BUILD_ID_SIZE) {
1419 		/*
1420 		 * For the backward compatibility, it allows a build-id has
1421 		 * trailing zeros.
1422 		 */
1423 		return !memcmp(dso->bid.data, bid->data, bid->size) &&
1424 			!memchr_inv(&dso->bid.data[bid->size], 0,
1425 				    dso->bid.size - bid->size);
1426 	}
1427 
1428 	return dso->bid.size == bid->size &&
1429 	       memcmp(dso->bid.data, bid->data, dso->bid.size) == 0;
1430 }
1431 
1432 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1433 {
1434 	char path[PATH_MAX];
1435 
1436 	if (machine__is_default_guest(machine))
1437 		return;
1438 	sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1439 	if (sysfs__read_build_id(path, &dso->bid) == 0)
1440 		dso->has_build_id = true;
1441 }
1442 
1443 int dso__kernel_module_get_build_id(struct dso *dso,
1444 				    const char *root_dir)
1445 {
1446 	char filename[PATH_MAX];
1447 	/*
1448 	 * kernel module short names are of the form "[module]" and
1449 	 * we need just "module" here.
1450 	 */
1451 	const char *name = dso->short_name + 1;
1452 
1453 	snprintf(filename, sizeof(filename),
1454 		 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1455 		 root_dir, (int)strlen(name) - 1, name);
1456 
1457 	if (sysfs__read_build_id(filename, &dso->bid) == 0)
1458 		dso->has_build_id = true;
1459 
1460 	return 0;
1461 }
1462 
1463 static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1464 {
1465 	char sbuild_id[SBUILD_ID_SIZE];
1466 
1467 	build_id__sprintf(&dso->bid, sbuild_id);
1468 	return fprintf(fp, "%s", sbuild_id);
1469 }
1470 
1471 size_t dso__fprintf(struct dso *dso, FILE *fp)
1472 {
1473 	struct rb_node *nd;
1474 	size_t ret = fprintf(fp, "dso: %s (", dso->short_name);
1475 
1476 	if (dso->short_name != dso->long_name)
1477 		ret += fprintf(fp, "%s, ", dso->long_name);
1478 	ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1479 	ret += dso__fprintf_buildid(dso, fp);
1480 	ret += fprintf(fp, ")\n");
1481 	for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
1482 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1483 		ret += symbol__fprintf(pos, fp);
1484 	}
1485 
1486 	return ret;
1487 }
1488 
1489 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1490 {
1491 	int fd;
1492 	enum dso_type type = DSO__TYPE_UNKNOWN;
1493 
1494 	fd = dso__data_get_fd(dso, machine);
1495 	if (fd >= 0) {
1496 		type = dso__type_fd(fd);
1497 		dso__data_put_fd(dso);
1498 	}
1499 
1500 	return type;
1501 }
1502 
1503 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1504 {
1505 	int idx, errnum = dso->load_errno;
1506 	/*
1507 	 * This must have a same ordering as the enum dso_load_errno.
1508 	 */
1509 	static const char *dso_load__error_str[] = {
1510 	"Internal tools/perf/ library error",
1511 	"Invalid ELF file",
1512 	"Can not read build id",
1513 	"Mismatching build id",
1514 	"Decompression failure",
1515 	};
1516 
1517 	BUG_ON(buflen == 0);
1518 
1519 	if (errnum >= 0) {
1520 		const char *err = str_error_r(errnum, buf, buflen);
1521 
1522 		if (err != buf)
1523 			scnprintf(buf, buflen, "%s", err);
1524 
1525 		return 0;
1526 	}
1527 
1528 	if (errnum <  __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1529 		return -1;
1530 
1531 	idx = errnum - __DSO_LOAD_ERRNO__START;
1532 	scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1533 	return 0;
1534 }
1535