1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/bug.h>
3 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
6 #include <sys/time.h>
7 #include <sys/resource.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include <errno.h>
12 #include <fcntl.h>
13 #include <stdlib.h>
14 #ifdef HAVE_LIBBPF_SUPPORT
15 #include <bpf/libbpf.h>
16 #include "bpf-event.h"
17 #include "bpf-utils.h"
18 #endif
19 #include "compress.h"
20 #include "env.h"
21 #include "namespaces.h"
22 #include "path.h"
23 #include "map.h"
24 #include "symbol.h"
25 #include "srcline.h"
26 #include "dso.h"
27 #include "dsos.h"
28 #include "machine.h"
29 #include "auxtrace.h"
30 #include "util.h" /* O_CLOEXEC for older systems */
31 #include "debug.h"
32 #include "string2.h"
33 #include "vdso.h"
34 #include "annotate-data.h"
35 #include "libdw.h"
36
37 static const char * const debuglink_paths[] = {
38 "%.0s%s",
39 "%s/%s",
40 "%s/.debug/%s",
41 "/usr/lib/debug%s/%s"
42 };
43
dso__set_nsinfo(struct dso * dso,struct nsinfo * nsi)44 void dso__set_nsinfo(struct dso *dso, struct nsinfo *nsi)
45 {
46 nsinfo__put(RC_CHK_ACCESS(dso)->nsinfo);
47 RC_CHK_ACCESS(dso)->nsinfo = nsi;
48 }
49
dso__symtab_origin(const struct dso * dso)50 char dso__symtab_origin(const struct dso *dso)
51 {
52 static const char origin[] = {
53 [DSO_BINARY_TYPE__KALLSYMS] = 'k',
54 [DSO_BINARY_TYPE__VMLINUX] = 'v',
55 [DSO_BINARY_TYPE__JAVA_JIT] = 'j',
56 [DSO_BINARY_TYPE__DEBUGLINK] = 'l',
57 [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B',
58 [DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO] = 'D',
59 [DSO_BINARY_TYPE__FEDORA_DEBUGINFO] = 'f',
60 [DSO_BINARY_TYPE__UBUNTU_DEBUGINFO] = 'u',
61 [DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO] = 'x',
62 [DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO] = 'o',
63 [DSO_BINARY_TYPE__BUILDID_DEBUGINFO] = 'b',
64 [DSO_BINARY_TYPE__SYSTEM_PATH_DSO] = 'd',
65 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K',
66 [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP] = 'm',
67 [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g',
68 [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G',
69 [DSO_BINARY_TYPE__GUEST_KMODULE_COMP] = 'M',
70 [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V',
71 [DSO_BINARY_TYPE__GNU_DEBUGDATA] = 'n',
72 };
73
74 if (dso == NULL || dso__symtab_type(dso) == DSO_BINARY_TYPE__NOT_FOUND)
75 return '!';
76 return origin[dso__symtab_type(dso)];
77 }
78
dso__is_object_file(const struct dso * dso)79 bool dso__is_object_file(const struct dso *dso)
80 {
81 switch (dso__binary_type(dso)) {
82 case DSO_BINARY_TYPE__KALLSYMS:
83 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
84 case DSO_BINARY_TYPE__JAVA_JIT:
85 case DSO_BINARY_TYPE__BPF_PROG_INFO:
86 case DSO_BINARY_TYPE__BPF_IMAGE:
87 case DSO_BINARY_TYPE__OOL:
88 return false;
89 case DSO_BINARY_TYPE__VMLINUX:
90 case DSO_BINARY_TYPE__GUEST_VMLINUX:
91 case DSO_BINARY_TYPE__DEBUGLINK:
92 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
93 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
94 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
95 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
96 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
97 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
98 case DSO_BINARY_TYPE__GNU_DEBUGDATA:
99 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
100 case DSO_BINARY_TYPE__GUEST_KMODULE:
101 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
102 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
103 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
104 case DSO_BINARY_TYPE__KCORE:
105 case DSO_BINARY_TYPE__GUEST_KCORE:
106 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
107 case DSO_BINARY_TYPE__NOT_FOUND:
108 default:
109 return true;
110 }
111 }
112
dso__read_binary_type_filename(const struct dso * dso,enum dso_binary_type type,const char * root_dir,char * filename,size_t size)113 int dso__read_binary_type_filename(const struct dso *dso,
114 enum dso_binary_type type,
115 const char *root_dir, char *filename, size_t size)
116 {
117 char build_id_hex[SBUILD_ID_SIZE];
118 int ret = 0;
119 size_t len;
120
121 switch (type) {
122 case DSO_BINARY_TYPE__DEBUGLINK:
123 {
124 const char *last_slash;
125 char dso_dir[PATH_MAX];
126 char symfile[PATH_MAX];
127 unsigned int i;
128
129 len = __symbol__join_symfs(filename, size, dso__long_name(dso));
130 last_slash = filename + len;
131 while (last_slash != filename && *last_slash != '/')
132 last_slash--;
133
134 strncpy(dso_dir, filename, last_slash - filename);
135 dso_dir[last_slash-filename] = '\0';
136
137 if (!is_regular_file(filename)) {
138 ret = -1;
139 break;
140 }
141
142 ret = filename__read_debuglink(filename, symfile, PATH_MAX);
143 if (ret)
144 break;
145
146 /* Check predefined locations where debug file might reside */
147 ret = -1;
148 for (i = 0; i < ARRAY_SIZE(debuglink_paths); i++) {
149 snprintf(filename, size,
150 debuglink_paths[i], dso_dir, symfile);
151 if (is_regular_file(filename)) {
152 ret = 0;
153 break;
154 }
155 }
156
157 break;
158 }
159 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
160 if (dso__build_id_filename(dso, filename, size, false) == NULL)
161 ret = -1;
162 break;
163
164 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
165 if (dso__build_id_filename(dso, filename, size, true) == NULL)
166 ret = -1;
167 break;
168
169 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
170 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
171 snprintf(filename + len, size - len, "%s.debug", dso__long_name(dso));
172 break;
173
174 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
175 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
176 snprintf(filename + len, size - len, "%s", dso__long_name(dso));
177 break;
178
179 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
180 /*
181 * Ubuntu can mixup /usr/lib with /lib, putting debuginfo in
182 * /usr/lib/debug/lib when it is expected to be in
183 * /usr/lib/debug/usr/lib
184 */
185 if (strlen(dso__long_name(dso)) < 9 ||
186 strncmp(dso__long_name(dso), "/usr/lib/", 9)) {
187 ret = -1;
188 break;
189 }
190 len = __symbol__join_symfs(filename, size, "/usr/lib/debug");
191 snprintf(filename + len, size - len, "%s", dso__long_name(dso) + 4);
192 break;
193
194 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
195 {
196 const char *last_slash;
197 size_t dir_size;
198
199 last_slash = dso__long_name(dso) + dso__long_name_len(dso);
200 while (last_slash != dso__long_name(dso) && *last_slash != '/')
201 last_slash--;
202
203 len = __symbol__join_symfs(filename, size, "");
204 dir_size = last_slash - dso__long_name(dso) + 2;
205 if (dir_size > (size - len)) {
206 ret = -1;
207 break;
208 }
209 len += scnprintf(filename + len, dir_size, "%s", dso__long_name(dso));
210 len += scnprintf(filename + len , size - len, ".debug%s",
211 last_slash);
212 break;
213 }
214
215 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
216 if (!dso__has_build_id(dso)) {
217 ret = -1;
218 break;
219 }
220
221 build_id__snprintf(dso__bid(dso), build_id_hex, sizeof(build_id_hex));
222 len = __symbol__join_symfs(filename, size, "/usr/lib/debug/.build-id/");
223 snprintf(filename + len, size - len, "%.2s/%s.debug",
224 build_id_hex, build_id_hex + 2);
225 break;
226
227 case DSO_BINARY_TYPE__VMLINUX:
228 case DSO_BINARY_TYPE__GUEST_VMLINUX:
229 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
230 case DSO_BINARY_TYPE__GNU_DEBUGDATA:
231 __symbol__join_symfs(filename, size, dso__long_name(dso));
232 break;
233
234 case DSO_BINARY_TYPE__GUEST_KMODULE:
235 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
236 path__join3(filename, size, symbol_conf.symfs,
237 root_dir, dso__long_name(dso));
238 break;
239
240 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
241 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
242 __symbol__join_symfs(filename, size, dso__long_name(dso));
243 break;
244
245 case DSO_BINARY_TYPE__KCORE:
246 case DSO_BINARY_TYPE__GUEST_KCORE:
247 snprintf(filename, size, "%s", dso__long_name(dso));
248 break;
249
250 default:
251 case DSO_BINARY_TYPE__KALLSYMS:
252 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
253 case DSO_BINARY_TYPE__JAVA_JIT:
254 case DSO_BINARY_TYPE__BPF_PROG_INFO:
255 case DSO_BINARY_TYPE__BPF_IMAGE:
256 case DSO_BINARY_TYPE__OOL:
257 case DSO_BINARY_TYPE__NOT_FOUND:
258 ret = -1;
259 break;
260 }
261
262 return ret;
263 }
264
265 enum {
266 COMP_ID__NONE = 0,
267 };
268
269 static const struct {
270 const char *fmt;
271 int (*decompress)(const char *input, int output);
272 bool (*is_compressed)(const char *input);
273 } compressions[] = {
274 [COMP_ID__NONE] = { .fmt = NULL, },
275 #ifdef HAVE_ZLIB_SUPPORT
276 { "gz", gzip_decompress_to_file, gzip_is_compressed },
277 #endif
278 #ifdef HAVE_LZMA_SUPPORT
279 { "xz", lzma_decompress_to_file, lzma_is_compressed },
280 #endif
281 { NULL, NULL, NULL },
282 };
283
is_supported_compression(const char * ext)284 static int is_supported_compression(const char *ext)
285 {
286 unsigned i;
287
288 for (i = 1; compressions[i].fmt; i++) {
289 if (!strcmp(ext, compressions[i].fmt))
290 return i;
291 }
292 return COMP_ID__NONE;
293 }
294
is_kernel_module(const char * pathname,int cpumode)295 bool is_kernel_module(const char *pathname, int cpumode)
296 {
297 struct kmod_path m;
298 int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
299
300 WARN_ONCE(mode != cpumode,
301 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
302 cpumode);
303
304 switch (mode) {
305 case PERF_RECORD_MISC_USER:
306 case PERF_RECORD_MISC_HYPERVISOR:
307 case PERF_RECORD_MISC_GUEST_USER:
308 return false;
309 /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
310 default:
311 if (kmod_path__parse(&m, pathname)) {
312 pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
313 pathname);
314 return true;
315 }
316 }
317
318 return m.kmod;
319 }
320
dso__needs_decompress(struct dso * dso)321 bool dso__needs_decompress(struct dso *dso)
322 {
323 return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
324 dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
325 }
326
filename__decompress(const char * name,char * pathname,size_t len,int comp,int * err)327 int filename__decompress(const char *name, char *pathname,
328 size_t len, int comp, int *err)
329 {
330 char tmpbuf[] = KMOD_DECOMP_NAME;
331 int fd = -1;
332
333 /*
334 * We have proper compression id for DSO and yet the file
335 * behind the 'name' can still be plain uncompressed object.
336 *
337 * The reason is behind the logic we open the DSO object files,
338 * when we try all possible 'debug' objects until we find the
339 * data. So even if the DSO is represented by 'krava.xz' module,
340 * we can end up here opening ~/.debug/....23432432/debug' file
341 * which is not compressed.
342 *
343 * To keep this transparent, we detect this and return the file
344 * descriptor to the uncompressed file.
345 */
346 if (!compressions[comp].is_compressed(name))
347 return open(name, O_RDONLY);
348
349 fd = mkstemp(tmpbuf);
350 if (fd < 0) {
351 *err = errno;
352 return -1;
353 }
354
355 if (compressions[comp].decompress(name, fd)) {
356 *err = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE;
357 close(fd);
358 fd = -1;
359 }
360
361 if (!pathname || (fd < 0))
362 unlink(tmpbuf);
363
364 if (pathname && (fd >= 0))
365 strlcpy(pathname, tmpbuf, len);
366
367 return fd;
368 }
369
decompress_kmodule(struct dso * dso,const char * name,char * pathname,size_t len)370 static int decompress_kmodule(struct dso *dso, const char *name,
371 char *pathname, size_t len)
372 {
373 if (!dso__needs_decompress(dso))
374 return -1;
375
376 if (dso__comp(dso) == COMP_ID__NONE)
377 return -1;
378
379 return filename__decompress(name, pathname, len, dso__comp(dso), dso__load_errno(dso));
380 }
381
dso__decompress_kmodule_fd(struct dso * dso,const char * name)382 int dso__decompress_kmodule_fd(struct dso *dso, const char *name)
383 {
384 return decompress_kmodule(dso, name, NULL, 0);
385 }
386
dso__decompress_kmodule_path(struct dso * dso,const char * name,char * pathname,size_t len)387 int dso__decompress_kmodule_path(struct dso *dso, const char *name,
388 char *pathname, size_t len)
389 {
390 int fd = decompress_kmodule(dso, name, pathname, len);
391
392 close(fd);
393 return fd >= 0 ? 0 : -1;
394 }
395
396 /*
397 * Parses kernel module specified in @path and updates
398 * @m argument like:
399 *
400 * @comp - true if @path contains supported compression suffix,
401 * false otherwise
402 * @kmod - true if @path contains '.ko' suffix in right position,
403 * false otherwise
404 * @name - if (@alloc_name && @kmod) is true, it contains strdup-ed base name
405 * of the kernel module without suffixes, otherwise strudup-ed
406 * base name of @path
407 * @ext - if (@alloc_ext && @comp) is true, it contains strdup-ed string
408 * the compression suffix
409 *
410 * Returns 0 if there's no strdup error, -ENOMEM otherwise.
411 */
__kmod_path__parse(struct kmod_path * m,const char * path,bool alloc_name)412 int __kmod_path__parse(struct kmod_path *m, const char *path,
413 bool alloc_name)
414 {
415 const char *name = strrchr(path, '/');
416 const char *ext = strrchr(path, '.');
417 bool is_simple_name = false;
418
419 memset(m, 0x0, sizeof(*m));
420 name = name ? name + 1 : path;
421
422 /*
423 * '.' is also a valid character for module name. For example:
424 * [aaa.bbb] is a valid module name. '[' should have higher
425 * priority than '.ko' suffix.
426 *
427 * The kernel names are from machine__mmap_name. Such
428 * name should belong to kernel itself, not kernel module.
429 */
430 if (name[0] == '[') {
431 is_simple_name = true;
432 if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
433 (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
434 (strncmp(name, "[vdso]", 6) == 0) ||
435 (strncmp(name, "[vdso32]", 8) == 0) ||
436 (strncmp(name, "[vdsox32]", 9) == 0) ||
437 (strncmp(name, "[vsyscall]", 10) == 0)) {
438 m->kmod = false;
439
440 } else
441 m->kmod = true;
442 }
443
444 /* No extension, just return name. */
445 if ((ext == NULL) || is_simple_name) {
446 if (alloc_name) {
447 m->name = strdup(name);
448 return m->name ? 0 : -ENOMEM;
449 }
450 return 0;
451 }
452
453 m->comp = is_supported_compression(ext + 1);
454 if (m->comp > COMP_ID__NONE)
455 ext -= 3;
456
457 /* Check .ko extension only if there's enough name left. */
458 if (ext > name)
459 m->kmod = !strncmp(ext, ".ko", 3);
460
461 if (alloc_name) {
462 if (m->kmod) {
463 if (asprintf(&m->name, "[%.*s]", (int) (ext - name), name) == -1)
464 return -ENOMEM;
465 } else {
466 if (asprintf(&m->name, "%s", name) == -1)
467 return -ENOMEM;
468 }
469
470 strreplace(m->name, '-', '_');
471 }
472
473 return 0;
474 }
475
dso__set_module_info(struct dso * dso,struct kmod_path * m,struct machine * machine)476 void dso__set_module_info(struct dso *dso, struct kmod_path *m,
477 struct machine *machine)
478 {
479 if (machine__is_host(machine))
480 dso__set_symtab_type(dso, DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE);
481 else
482 dso__set_symtab_type(dso, DSO_BINARY_TYPE__GUEST_KMODULE);
483
484 /* _KMODULE_COMP should be next to _KMODULE */
485 if (m->kmod && m->comp) {
486 dso__set_symtab_type(dso, dso__symtab_type(dso) + 1);
487 dso__set_comp(dso, m->comp);
488 }
489
490 dso__set_is_kmod(dso);
491 dso__set_short_name(dso, strdup(m->name), true);
492 }
493
494 /*
495 * Global list of open DSOs and the counter.
496 */
497 struct mutex _dso__data_open_lock;
498 static LIST_HEAD(dso__data_open);
499 static long dso__data_open_cnt GUARDED_BY(_dso__data_open_lock);
500
dso__data_open_lock_init(void)501 static void dso__data_open_lock_init(void)
502 {
503 mutex_init(&_dso__data_open_lock);
504 }
505
dso__data_open_lock(void)506 static struct mutex *dso__data_open_lock(void) LOCK_RETURNED(_dso__data_open_lock)
507 {
508 static pthread_once_t data_open_lock_once = PTHREAD_ONCE_INIT;
509
510 pthread_once(&data_open_lock_once, dso__data_open_lock_init);
511
512 return &_dso__data_open_lock;
513 }
514
dso__list_add(struct dso * dso)515 static void dso__list_add(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
516 {
517 list_add_tail(&dso__data(dso)->open_entry, &dso__data_open);
518 #ifdef REFCNT_CHECKING
519 dso__data(dso)->dso = dso__get(dso);
520 #endif
521 /* Assume the dso is part of dsos, hence the optional reference count above. */
522 assert(dso__dsos(dso));
523 dso__data_open_cnt++;
524 }
525
dso__list_del(struct dso * dso)526 static void dso__list_del(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
527 {
528 list_del_init(&dso__data(dso)->open_entry);
529 #ifdef REFCNT_CHECKING
530 mutex_unlock(dso__data_open_lock());
531 dso__put(dso__data(dso)->dso);
532 mutex_lock(dso__data_open_lock());
533 #endif
534 WARN_ONCE(dso__data_open_cnt <= 0,
535 "DSO data fd counter out of bounds.");
536 dso__data_open_cnt--;
537 }
538
539 static void close_first_dso(void);
540
do_open(char * name)541 static int do_open(char *name) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
542 {
543 do {
544 int fd = open(name, O_RDONLY|O_CLOEXEC);
545
546 if (fd >= 0)
547 return fd;
548
549 pr_debug("dso open failed: %m\n");
550 if (!dso__data_open_cnt || errno != EMFILE)
551 break;
552
553 close_first_dso();
554 } while (1);
555
556 return -1;
557 }
558
dso__filename_with_chroot(const struct dso * dso,const char * filename)559 char *dso__filename_with_chroot(const struct dso *dso, const char *filename)
560 {
561 return filename_with_chroot(nsinfo__pid(dso__nsinfo_const(dso)), filename);
562 }
563
dso__get_filename(struct dso * dso,const char * root_dir,bool * decomp)564 static char *dso__get_filename(struct dso *dso, const char *root_dir,
565 bool *decomp)
566 {
567 char *name = malloc(PATH_MAX);
568
569 *decomp = false;
570
571 if (name == NULL)
572 return NULL;
573
574 if (dso__read_binary_type_filename(dso, dso__binary_type(dso),
575 root_dir, name, PATH_MAX))
576 goto out;
577
578 if (!is_regular_file(name)) {
579 char *new_name;
580
581 if (errno != ENOENT || dso__nsinfo(dso) == NULL)
582 goto out;
583
584 new_name = dso__filename_with_chroot(dso, name);
585 if (!new_name)
586 goto out;
587
588 free(name);
589 name = new_name;
590 }
591
592 if (dso__needs_decompress(dso)) {
593 char newpath[KMOD_DECOMP_LEN];
594 size_t len = sizeof(newpath);
595
596 if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) {
597 errno = *dso__load_errno(dso);
598 goto out;
599 }
600
601 *decomp = true;
602 strcpy(name, newpath);
603 }
604 return name;
605
606 out:
607 free(name);
608 return NULL;
609 }
610
__open_dso(struct dso * dso,struct machine * machine)611 static int __open_dso(struct dso *dso, struct machine *machine)
612 EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
613 {
614 int fd = -EINVAL;
615 char *name;
616 bool decomp = false;
617
618 mutex_lock(dso__lock(dso));
619
620 name = dso__get_filename(dso, machine ? machine->root_dir : "", &decomp);
621 if (name)
622 fd = do_open(name);
623 else
624 fd = -errno;
625
626 if (decomp)
627 unlink(name);
628
629 mutex_unlock(dso__lock(dso));
630 free(name);
631 return fd;
632 }
633
634 static void check_data_close(void);
635
636 /**
637 * dso_close - Open DSO data file
638 * @dso: dso object
639 *
640 * Open @dso's data file descriptor and updates
641 * list/count of open DSO objects.
642 */
open_dso(struct dso * dso,struct machine * machine)643 static int open_dso(struct dso *dso, struct machine *machine)
644 EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
645 {
646 int fd;
647 struct nscookie nsc;
648
649 if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE) {
650 mutex_lock(dso__lock(dso));
651 nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
652 mutex_unlock(dso__lock(dso));
653 }
654 fd = __open_dso(dso, machine);
655 if (dso__binary_type(dso) != DSO_BINARY_TYPE__BUILD_ID_CACHE)
656 nsinfo__mountns_exit(&nsc);
657
658 if (fd >= 0) {
659 dso__list_add(dso);
660 /*
661 * Check if we crossed the allowed number
662 * of opened DSOs and close one if needed.
663 */
664 check_data_close();
665 }
666
667 return fd;
668 }
669
close_data_fd(struct dso * dso)670 static void close_data_fd(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
671 {
672 if (dso__data(dso)->fd >= 0) {
673 close(dso__data(dso)->fd);
674 dso__data(dso)->fd = -1;
675 dso__data(dso)->file_size = 0;
676 dso__list_del(dso);
677 }
678 }
679
680 /**
681 * dso_close - Close DSO data file
682 * @dso: dso object
683 *
684 * Close @dso's data file descriptor and updates
685 * list/count of open DSO objects.
686 */
close_dso(struct dso * dso)687 static void close_dso(struct dso *dso) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
688 {
689 close_data_fd(dso);
690 }
691
close_first_dso(void)692 static void close_first_dso(void) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
693 {
694 struct dso_data *dso_data;
695 struct dso *dso;
696
697 dso_data = list_first_entry(&dso__data_open, struct dso_data, open_entry);
698 #ifdef REFCNT_CHECKING
699 dso = dso_data->dso;
700 #else
701 dso = container_of(dso_data, struct dso, data);
702 #endif
703 close_dso(dso);
704 }
705
get_fd_limit(void)706 static rlim_t get_fd_limit(void)
707 {
708 struct rlimit l;
709 rlim_t limit = 0;
710
711 /* Allow half of the current open fd limit. */
712 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
713 if (l.rlim_cur == RLIM_INFINITY)
714 limit = l.rlim_cur;
715 else
716 limit = l.rlim_cur / 2;
717 } else {
718 pr_err("failed to get fd limit\n");
719 limit = 1;
720 }
721
722 return limit;
723 }
724
725 static rlim_t fd_limit;
726
727 /*
728 * Used only by tests/dso-data.c to reset the environment
729 * for tests. I dont expect we should change this during
730 * standard runtime.
731 */
reset_fd_limit(void)732 void reset_fd_limit(void)
733 {
734 fd_limit = 0;
735 }
736
may_cache_fd(void)737 static bool may_cache_fd(void) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
738 {
739 if (!fd_limit)
740 fd_limit = get_fd_limit();
741
742 if (fd_limit == RLIM_INFINITY)
743 return true;
744
745 return fd_limit > (rlim_t) dso__data_open_cnt;
746 }
747
748 /*
749 * Check and close LRU dso if we crossed allowed limit
750 * for opened dso file descriptors. The limit is half
751 * of the RLIMIT_NOFILE files opened.
752 */
check_data_close(void)753 static void check_data_close(void) EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
754 {
755 bool cache_fd = may_cache_fd();
756
757 if (!cache_fd)
758 close_first_dso();
759 }
760
761 /**
762 * dso__data_close - Close DSO data file
763 * @dso: dso object
764 *
765 * External interface to close @dso's data file descriptor.
766 */
dso__data_close(struct dso * dso)767 void dso__data_close(struct dso *dso)
768 {
769 mutex_lock(dso__data_open_lock());
770 close_dso(dso);
771 mutex_unlock(dso__data_open_lock());
772 }
773
try_to_open_dso(struct dso * dso,struct machine * machine)774 static void try_to_open_dso(struct dso *dso, struct machine *machine)
775 EXCLUSIVE_LOCKS_REQUIRED(_dso__data_open_lock)
776 {
777 enum dso_binary_type binary_type_data[] = {
778 DSO_BINARY_TYPE__BUILD_ID_CACHE,
779 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
780 DSO_BINARY_TYPE__NOT_FOUND,
781 };
782 int i = 0;
783 struct dso_data *dso_data = dso__data(dso);
784
785 if (dso_data->fd >= 0)
786 return;
787
788 if (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND) {
789 dso_data->fd = open_dso(dso, machine);
790 goto out;
791 }
792
793 do {
794 dso__set_binary_type(dso, binary_type_data[i++]);
795
796 dso_data->fd = open_dso(dso, machine);
797 if (dso_data->fd >= 0)
798 goto out;
799
800 } while (dso__binary_type(dso) != DSO_BINARY_TYPE__NOT_FOUND);
801 out:
802 if (dso_data->fd >= 0)
803 dso_data->status = DSO_DATA_STATUS_OK;
804 else
805 dso_data->status = DSO_DATA_STATUS_ERROR;
806 }
807
808 /**
809 * dso__data_get_fd - Get dso's data file descriptor
810 * @dso: dso object
811 * @machine: machine object
812 *
813 * External interface to find dso's file, open it and
814 * returns file descriptor. It should be paired with
815 * dso__data_put_fd() if it returns non-negative value.
816 */
dso__data_get_fd(struct dso * dso,struct machine * machine,int * fd)817 bool dso__data_get_fd(struct dso *dso, struct machine *machine, int *fd)
818 {
819 *fd = -1;
820 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
821 return false;
822
823 mutex_lock(dso__data_open_lock());
824
825 try_to_open_dso(dso, machine);
826
827 *fd = dso__data(dso)->fd;
828 if (*fd >= 0)
829 return true;
830
831 mutex_unlock(dso__data_open_lock());
832 return false;
833 }
834
dso__data_put_fd(struct dso * dso __maybe_unused)835 void dso__data_put_fd(struct dso *dso __maybe_unused)
836 {
837 mutex_unlock(dso__data_open_lock());
838 }
839
dso__data_status_seen(struct dso * dso,enum dso_data_status_seen by)840 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
841 {
842 u32 flag = 1 << by;
843
844 if (dso__data(dso)->status_seen & flag)
845 return true;
846
847 dso__data(dso)->status_seen |= flag;
848
849 return false;
850 }
851
852 #ifdef HAVE_LIBBPF_SUPPORT
bpf_read(struct dso * dso,u64 offset,char * data)853 static ssize_t bpf_read(struct dso *dso, u64 offset, char *data)
854 {
855 struct bpf_prog_info_node *node;
856 ssize_t size = DSO__DATA_CACHE_SIZE;
857 struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso);
858 u64 len;
859 u8 *buf;
860
861 node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
862 if (!node || !node->info_linear) {
863 dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
864 return -1;
865 }
866
867 len = node->info_linear->info.jited_prog_len;
868 buf = (u8 *)(uintptr_t)node->info_linear->info.jited_prog_insns;
869
870 if (offset >= len)
871 return -1;
872
873 size = (ssize_t)min(len - offset, (u64)size);
874 memcpy(data, buf + offset, size);
875 return size;
876 }
877
bpf_size(struct dso * dso)878 static int bpf_size(struct dso *dso)
879 {
880 struct bpf_prog_info_node *node;
881 struct dso_bpf_prog *dso_bpf_prog = dso__bpf_prog(dso);
882
883 node = perf_env__find_bpf_prog_info(dso_bpf_prog->env, dso_bpf_prog->id);
884 if (!node || !node->info_linear) {
885 dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
886 return -1;
887 }
888
889 dso__data(dso)->file_size = node->info_linear->info.jited_prog_len;
890 return 0;
891 }
892 #endif // HAVE_LIBBPF_SUPPORT
893
894 static void
dso_cache__free(struct dso * dso)895 dso_cache__free(struct dso *dso)
896 {
897 struct rb_root *root = &dso__data(dso)->cache;
898 struct rb_node *next = rb_first(root);
899
900 mutex_lock(dso__lock(dso));
901 while (next) {
902 struct dso_cache *cache;
903
904 cache = rb_entry(next, struct dso_cache, rb_node);
905 next = rb_next(&cache->rb_node);
906 rb_erase(&cache->rb_node, root);
907 free(cache);
908 }
909 mutex_unlock(dso__lock(dso));
910 }
911
__dso_cache__find(struct dso * dso,u64 offset)912 static struct dso_cache *__dso_cache__find(struct dso *dso, u64 offset)
913 {
914 const struct rb_root *root = &dso__data(dso)->cache;
915 struct rb_node * const *p = &root->rb_node;
916 const struct rb_node *parent = NULL;
917 struct dso_cache *cache;
918
919 while (*p != NULL) {
920 u64 end;
921
922 parent = *p;
923 cache = rb_entry(parent, struct dso_cache, rb_node);
924 end = cache->offset + DSO__DATA_CACHE_SIZE;
925
926 if (offset < cache->offset)
927 p = &(*p)->rb_left;
928 else if (offset >= end)
929 p = &(*p)->rb_right;
930 else
931 return cache;
932 }
933
934 return NULL;
935 }
936
937 static struct dso_cache *
dso_cache__insert(struct dso * dso,struct dso_cache * new)938 dso_cache__insert(struct dso *dso, struct dso_cache *new)
939 {
940 struct rb_root *root = &dso__data(dso)->cache;
941 struct rb_node **p = &root->rb_node;
942 struct rb_node *parent = NULL;
943 struct dso_cache *cache;
944 u64 offset = new->offset;
945
946 mutex_lock(dso__lock(dso));
947 while (*p != NULL) {
948 u64 end;
949
950 parent = *p;
951 cache = rb_entry(parent, struct dso_cache, rb_node);
952 end = cache->offset + DSO__DATA_CACHE_SIZE;
953
954 if (offset < cache->offset)
955 p = &(*p)->rb_left;
956 else if (offset >= end)
957 p = &(*p)->rb_right;
958 else
959 goto out;
960 }
961
962 rb_link_node(&new->rb_node, parent, p);
963 rb_insert_color(&new->rb_node, root);
964
965 cache = NULL;
966 out:
967 mutex_unlock(dso__lock(dso));
968 return cache;
969 }
970
dso_cache__memcpy(struct dso_cache * cache,u64 offset,u8 * data,u64 size,bool out)971 static ssize_t dso_cache__memcpy(struct dso_cache *cache, u64 offset, u8 *data,
972 u64 size, bool out)
973 {
974 u64 cache_offset = offset - cache->offset;
975 u64 cache_size = min(cache->size - cache_offset, size);
976
977 if (out)
978 memcpy(data, cache->data + cache_offset, cache_size);
979 else
980 memcpy(cache->data + cache_offset, data, cache_size);
981 return cache_size;
982 }
983
file_read(struct dso * dso,struct machine * machine,u64 offset,char * data)984 static ssize_t file_read(struct dso *dso, struct machine *machine,
985 u64 offset, char *data)
986 {
987 ssize_t ret;
988
989 mutex_lock(dso__data_open_lock());
990
991 /*
992 * dso__data(dso)->fd might be closed if other thread opened another
993 * file (dso) due to open file limit (RLIMIT_NOFILE).
994 */
995 try_to_open_dso(dso, machine);
996
997 if (dso__data(dso)->fd < 0) {
998 dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
999 ret = -errno;
1000 goto out;
1001 }
1002
1003 ret = pread(dso__data(dso)->fd, data, DSO__DATA_CACHE_SIZE, offset);
1004 out:
1005 mutex_unlock(dso__data_open_lock());
1006 return ret;
1007 }
1008
dso_cache__populate(struct dso * dso,struct machine * machine,u64 offset,ssize_t * ret)1009 static struct dso_cache *dso_cache__populate(struct dso *dso,
1010 struct machine *machine,
1011 u64 offset, ssize_t *ret)
1012 {
1013 u64 cache_offset = offset & DSO__DATA_CACHE_MASK;
1014 struct dso_cache *cache;
1015 struct dso_cache *old;
1016
1017 cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
1018 if (!cache) {
1019 *ret = -ENOMEM;
1020 return NULL;
1021 }
1022 #ifdef HAVE_LIBBPF_SUPPORT
1023 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
1024 *ret = bpf_read(dso, cache_offset, cache->data);
1025 else
1026 #endif
1027 if (dso__binary_type(dso) == DSO_BINARY_TYPE__OOL)
1028 *ret = DSO__DATA_CACHE_SIZE;
1029 else
1030 *ret = file_read(dso, machine, cache_offset, cache->data);
1031
1032 if (*ret <= 0) {
1033 free(cache);
1034 return NULL;
1035 }
1036
1037 cache->offset = cache_offset;
1038 cache->size = *ret;
1039
1040 old = dso_cache__insert(dso, cache);
1041 if (old) {
1042 /* we lose the race */
1043 free(cache);
1044 cache = old;
1045 }
1046
1047 return cache;
1048 }
1049
dso_cache__find(struct dso * dso,struct machine * machine,u64 offset,ssize_t * ret)1050 static struct dso_cache *dso_cache__find(struct dso *dso,
1051 struct machine *machine,
1052 u64 offset,
1053 ssize_t *ret)
1054 {
1055 struct dso_cache *cache = __dso_cache__find(dso, offset);
1056
1057 return cache ? cache : dso_cache__populate(dso, machine, offset, ret);
1058 }
1059
dso_cache_io(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size,bool out)1060 static ssize_t dso_cache_io(struct dso *dso, struct machine *machine,
1061 u64 offset, u8 *data, ssize_t size, bool out)
1062 {
1063 struct dso_cache *cache;
1064 ssize_t ret = 0;
1065
1066 cache = dso_cache__find(dso, machine, offset, &ret);
1067 if (!cache)
1068 return ret;
1069
1070 return dso_cache__memcpy(cache, offset, data, size, out);
1071 }
1072
1073 /*
1074 * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
1075 * in the rb_tree. Any read to already cached data is served
1076 * by cached data. Writes update the cache only, not the backing file.
1077 */
cached_io(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size,bool out)1078 static ssize_t cached_io(struct dso *dso, struct machine *machine,
1079 u64 offset, u8 *data, ssize_t size, bool out)
1080 {
1081 ssize_t r = 0;
1082 u8 *p = data;
1083
1084 do {
1085 ssize_t ret;
1086
1087 ret = dso_cache_io(dso, machine, offset, p, size, out);
1088 if (ret < 0)
1089 return ret;
1090
1091 /* Reached EOF, return what we have. */
1092 if (!ret)
1093 break;
1094
1095 BUG_ON(ret > size);
1096
1097 r += ret;
1098 p += ret;
1099 offset += ret;
1100 size -= ret;
1101
1102 } while (size);
1103
1104 return r;
1105 }
1106
file_size(struct dso * dso,struct machine * machine)1107 static int file_size(struct dso *dso, struct machine *machine)
1108 {
1109 int ret = 0;
1110 struct stat st;
1111
1112 mutex_lock(dso__data_open_lock());
1113
1114 /*
1115 * dso__data(dso)->fd might be closed if other thread opened another
1116 * file (dso) due to open file limit (RLIMIT_NOFILE).
1117 */
1118 try_to_open_dso(dso, machine);
1119
1120 if (dso__data(dso)->fd < 0) {
1121 ret = -errno;
1122 dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
1123 goto out;
1124 }
1125
1126 if (fstat(dso__data(dso)->fd, &st) < 0) {
1127 ret = -errno;
1128 pr_err("dso cache fstat failed: %m\n");
1129 dso__data(dso)->status = DSO_DATA_STATUS_ERROR;
1130 goto out;
1131 }
1132 dso__data(dso)->file_size = st.st_size;
1133
1134 out:
1135 mutex_unlock(dso__data_open_lock());
1136 return ret;
1137 }
1138
dso__data_file_size(struct dso * dso,struct machine * machine)1139 int dso__data_file_size(struct dso *dso, struct machine *machine)
1140 {
1141 if (dso__data(dso)->file_size)
1142 return 0;
1143
1144 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
1145 return -1;
1146 #ifdef HAVE_LIBBPF_SUPPORT
1147 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO)
1148 return bpf_size(dso);
1149 #endif
1150 return file_size(dso, machine);
1151 }
1152
1153 /**
1154 * dso__data_size - Return dso data size
1155 * @dso: dso object
1156 * @machine: machine object
1157 *
1158 * Return: dso data size
1159 */
dso__data_size(struct dso * dso,struct machine * machine)1160 off_t dso__data_size(struct dso *dso, struct machine *machine)
1161 {
1162 if (dso__data_file_size(dso, machine))
1163 return -1;
1164
1165 /* For now just estimate dso data size is close to file size */
1166 return dso__data(dso)->file_size;
1167 }
1168
data_read_write_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size,bool out)1169 static ssize_t data_read_write_offset(struct dso *dso, struct machine *machine,
1170 u64 offset, u8 *data, ssize_t size,
1171 bool out)
1172 {
1173 if (dso__data_file_size(dso, machine))
1174 return -1;
1175
1176 /* Check the offset sanity. */
1177 if (offset > dso__data(dso)->file_size)
1178 return -1;
1179
1180 if (offset + size < offset)
1181 return -1;
1182
1183 return cached_io(dso, machine, offset, data, size, out);
1184 }
1185
1186 /**
1187 * dso__data_read_offset - Read data from dso file offset
1188 * @dso: dso object
1189 * @machine: machine object
1190 * @offset: file offset
1191 * @data: buffer to store data
1192 * @size: size of the @data buffer
1193 *
1194 * External interface to read data from dso file offset. Open
1195 * dso data file and use cached_read to get the data.
1196 */
dso__data_read_offset(struct dso * dso,struct machine * machine,u64 offset,u8 * data,ssize_t size)1197 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
1198 u64 offset, u8 *data, ssize_t size)
1199 {
1200 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
1201 return -1;
1202
1203 return data_read_write_offset(dso, machine, offset, data, size, true);
1204 }
1205
dso_swap_type__from_elf_data(unsigned char eidata)1206 static enum dso_swap_type dso_swap_type__from_elf_data(unsigned char eidata)
1207 {
1208 static const unsigned int endian = 1;
1209
1210 switch (eidata) {
1211 case ELFDATA2LSB:
1212 /* We are big endian, DSO is little endian. */
1213 return (*(unsigned char const *)&endian != 1) ? DSO_SWAP__YES : DSO_SWAP__NO;
1214 case ELFDATA2MSB:
1215 /* We are little endian, DSO is big endian. */
1216 return (*(unsigned char const *)&endian != 0) ? DSO_SWAP__YES : DSO_SWAP__NO;
1217 default:
1218 return DSO_SWAP__UNSET;
1219 }
1220 }
1221
1222 /* Reads e_machine from fd, optionally caching data in dso. */
dso__read_e_machine(struct dso * optional_dso,int fd,uint32_t * e_flags)1223 uint16_t dso__read_e_machine(struct dso *optional_dso, int fd, uint32_t *e_flags)
1224 {
1225 uint16_t e_machine = EM_NONE;
1226 unsigned char e_ident[EI_NIDENT];
1227 enum dso_swap_type swap_type;
1228 bool need_e_flags;
1229
1230 if (e_flags)
1231 *e_flags = 0;
1232
1233 {
1234 _Static_assert(offsetof(Elf32_Ehdr, e_ident) == 0, "Unexpected offset");
1235 _Static_assert(offsetof(Elf64_Ehdr, e_ident) == 0, "Unexpected offset");
1236 }
1237 if (pread(fd, &e_ident, sizeof(e_ident), 0) != sizeof(e_ident))
1238 return EM_NONE; // Read failed.
1239
1240 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0)
1241 return EM_NONE; // Not an ELF file.
1242
1243 if (e_ident[EI_CLASS] == ELFCLASSNONE || e_ident[EI_CLASS] >= ELFCLASSNUM)
1244 return EM_NONE; // Bad ELF class (32 or 64-bit objects).
1245
1246 if (e_ident[EI_VERSION] != EV_CURRENT)
1247 return EM_NONE; // Bad ELF version.
1248
1249 swap_type = dso_swap_type__from_elf_data(e_ident[EI_DATA]);
1250 if (swap_type == DSO_SWAP__UNSET)
1251 return EM_NONE; // Bad ELF data encoding.
1252
1253 /* Cache the need for swapping. */
1254 if (optional_dso) {
1255 assert(dso__needs_swap(optional_dso) == DSO_SWAP__UNSET ||
1256 dso__needs_swap(optional_dso) == swap_type);
1257 dso__set_needs_swap(optional_dso, swap_type);
1258 }
1259
1260 {
1261 _Static_assert(offsetof(Elf32_Ehdr, e_machine) == 18, "Unexpected offset");
1262 _Static_assert(offsetof(Elf64_Ehdr, e_machine) == 18, "Unexpected offset");
1263 }
1264 if (pread(fd, &e_machine, sizeof(e_machine), 18) != sizeof(e_machine))
1265 return EM_NONE; // e_machine read failed.
1266
1267 e_machine = DSO_SWAP_TYPE__SWAP(swap_type, uint16_t, e_machine);
1268 if (e_machine >= EM_NUM)
1269 return EM_NONE; // Bad ELF machine number.
1270
1271 #ifdef NDEBUG
1272 /* In production code the e_flags are only needed on CSKY. */
1273 need_e_flags = e_flags && e_machine == EM_CSKY;
1274 #else
1275 /* Debug code will always read the e_flags. */
1276 need_e_flags = e_flags != NULL;
1277 #endif
1278 if (need_e_flags) {
1279 off_t offset = e_ident[EI_CLASS] == ELFCLASS32
1280 ? offsetof(Elf32_Ehdr, e_flags)
1281 : offsetof(Elf64_Ehdr, e_flags);
1282
1283 if (pread(fd, e_flags, sizeof(*e_flags), offset) != sizeof(*e_flags)) {
1284 *e_flags = 0;
1285 return EM_NONE; // e_flags read failed.
1286 }
1287 }
1288 return e_machine;
1289 }
1290
dso__e_machine(struct dso * dso,struct machine * machine,uint32_t * e_flags)1291 uint16_t dso__e_machine(struct dso *dso, struct machine *machine, uint32_t *e_flags)
1292 {
1293 uint16_t e_machine = EM_NONE;
1294 int fd;
1295
1296 switch (dso__binary_type(dso)) {
1297 case DSO_BINARY_TYPE__KALLSYMS:
1298 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1299 case DSO_BINARY_TYPE__VMLINUX:
1300 case DSO_BINARY_TYPE__GUEST_VMLINUX:
1301 case DSO_BINARY_TYPE__GUEST_KMODULE:
1302 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1303 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1304 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1305 case DSO_BINARY_TYPE__KCORE:
1306 case DSO_BINARY_TYPE__GUEST_KCORE:
1307 case DSO_BINARY_TYPE__BPF_PROG_INFO:
1308 case DSO_BINARY_TYPE__BPF_IMAGE:
1309 case DSO_BINARY_TYPE__OOL:
1310 case DSO_BINARY_TYPE__JAVA_JIT:
1311 if (e_flags)
1312 *e_flags = EF_HOST;
1313 return EM_HOST;
1314 case DSO_BINARY_TYPE__DEBUGLINK:
1315 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1316 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1317 case DSO_BINARY_TYPE__GNU_DEBUGDATA:
1318 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1319 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1320 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1321 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1322 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
1323 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1324 break;
1325 case DSO_BINARY_TYPE__NOT_FOUND:
1326 default:
1327 if (e_flags)
1328 *e_flags = 0;
1329 return EM_NONE;
1330 }
1331
1332 mutex_lock(dso__data_open_lock());
1333
1334 /*
1335 * dso__data(dso)->fd might be closed if other thread opened another
1336 * file (dso) due to open file limit (RLIMIT_NOFILE).
1337 */
1338 try_to_open_dso(dso, machine);
1339 fd = dso__data(dso)->fd;
1340 if (fd >= 0)
1341 e_machine = dso__read_e_machine(dso, fd, e_flags);
1342 else if (e_flags)
1343 *e_flags = 0;
1344
1345 mutex_unlock(dso__data_open_lock());
1346 return e_machine;
1347 }
1348
1349 /**
1350 * dso__data_read_addr - Read data from dso address
1351 * @dso: dso object
1352 * @machine: machine object
1353 * @add: virtual memory address
1354 * @data: buffer to store data
1355 * @size: size of the @data buffer
1356 *
1357 * External interface to read data from dso address.
1358 */
dso__data_read_addr(struct dso * dso,struct map * map,struct machine * machine,u64 addr,u8 * data,ssize_t size)1359 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
1360 struct machine *machine, u64 addr,
1361 u8 *data, ssize_t size)
1362 {
1363 u64 offset = map__map_ip(map, addr);
1364
1365 return dso__data_read_offset(dso, machine, offset, data, size);
1366 }
1367
1368 /**
1369 * dso__data_write_cache_offs - Write data to dso data cache at file offset
1370 * @dso: dso object
1371 * @machine: machine object
1372 * @offset: file offset
1373 * @data: buffer to write
1374 * @size: size of the @data buffer
1375 *
1376 * Write into the dso file data cache, but do not change the file itself.
1377 */
dso__data_write_cache_offs(struct dso * dso,struct machine * machine,u64 offset,const u8 * data_in,ssize_t size)1378 ssize_t dso__data_write_cache_offs(struct dso *dso, struct machine *machine,
1379 u64 offset, const u8 *data_in, ssize_t size)
1380 {
1381 u8 *data = (u8 *)data_in; /* cast away const to use same fns for r/w */
1382
1383 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR)
1384 return -1;
1385
1386 return data_read_write_offset(dso, machine, offset, data, size, false);
1387 }
1388
1389 /**
1390 * dso__data_write_cache_addr - Write data to dso data cache at dso address
1391 * @dso: dso object
1392 * @machine: machine object
1393 * @add: virtual memory address
1394 * @data: buffer to write
1395 * @size: size of the @data buffer
1396 *
1397 * External interface to write into the dso file data cache, but do not change
1398 * the file itself.
1399 */
dso__data_write_cache_addr(struct dso * dso,struct map * map,struct machine * machine,u64 addr,const u8 * data,ssize_t size)1400 ssize_t dso__data_write_cache_addr(struct dso *dso, struct map *map,
1401 struct machine *machine, u64 addr,
1402 const u8 *data, ssize_t size)
1403 {
1404 u64 offset = map__map_ip(map, addr);
1405
1406 return dso__data_write_cache_offs(dso, machine, offset, data, size);
1407 }
1408
dso__new_map(const char * name)1409 struct map *dso__new_map(const char *name)
1410 {
1411 struct map *map = NULL;
1412 struct dso *dso = dso__new(name);
1413
1414 if (dso) {
1415 map = map__new2(0, dso);
1416 dso__put(dso);
1417 }
1418
1419 return map;
1420 }
1421
machine__findnew_kernel(struct machine * machine,const char * name,const char * short_name,int dso_type)1422 struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
1423 const char *short_name, int dso_type)
1424 {
1425 /*
1426 * The kernel dso could be created by build_id processing.
1427 */
1428 struct dso *dso = machine__findnew_dso(machine, name);
1429
1430 /*
1431 * We need to run this in all cases, since during the build_id
1432 * processing we had no idea this was the kernel dso.
1433 */
1434 if (dso != NULL) {
1435 dso__set_short_name(dso, short_name, false);
1436 dso__set_kernel(dso, dso_type);
1437 }
1438
1439 return dso;
1440 }
1441
__dso__set_long_name_id(struct dso * dso,const char * name,bool name_allocated)1442 static void __dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated)
1443 {
1444 if (dso__long_name_allocated(dso))
1445 free((char *)dso__long_name(dso));
1446
1447 RC_CHK_ACCESS(dso)->long_name = name;
1448 RC_CHK_ACCESS(dso)->long_name_len = strlen(name);
1449 dso__set_long_name_allocated(dso, name_allocated);
1450 }
1451
dso__set_long_name_id(struct dso * dso,const char * name,bool name_allocated)1452 static void dso__set_long_name_id(struct dso *dso, const char *name, bool name_allocated)
1453 {
1454 struct dsos *dsos = dso__dsos(dso);
1455
1456 if (name == NULL)
1457 return;
1458
1459 if (dsos) {
1460 /*
1461 * Need to avoid re-sorting the dsos breaking by non-atomically
1462 * renaming the dso.
1463 */
1464 down_write(&dsos->lock);
1465 __dso__set_long_name_id(dso, name, name_allocated);
1466 dsos->sorted = false;
1467 up_write(&dsos->lock);
1468 } else {
1469 __dso__set_long_name_id(dso, name, name_allocated);
1470 }
1471 }
1472
__dso_id__cmp(const struct dso_id * a,const struct dso_id * b)1473 static int __dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
1474 {
1475 if (a->mmap2_valid && b->mmap2_valid) {
1476 if (a->maj > b->maj) return -1;
1477 if (a->maj < b->maj) return 1;
1478
1479 if (a->min > b->min) return -1;
1480 if (a->min < b->min) return 1;
1481
1482 if (a->ino > b->ino) return -1;
1483 if (a->ino < b->ino) return 1;
1484 }
1485 if (a->mmap2_ino_generation_valid && b->mmap2_ino_generation_valid) {
1486 if (a->ino_generation > b->ino_generation) return -1;
1487 if (a->ino_generation < b->ino_generation) return 1;
1488 }
1489 if (build_id__is_defined(&a->build_id) && build_id__is_defined(&b->build_id)) {
1490 if (a->build_id.size != b->build_id.size)
1491 return a->build_id.size < b->build_id.size ? -1 : 1;
1492 return memcmp(a->build_id.data, b->build_id.data, a->build_id.size);
1493 }
1494 return 0;
1495 }
1496
1497 const struct dso_id dso_id_empty = {
1498 {
1499 .maj = 0,
1500 .min = 0,
1501 .ino = 0,
1502 .ino_generation = 0,
1503 },
1504 .mmap2_valid = false,
1505 .mmap2_ino_generation_valid = false,
1506 {
1507 .size = 0,
1508 }
1509 };
1510
__dso__improve_id(struct dso * dso,const struct dso_id * id)1511 void __dso__improve_id(struct dso *dso, const struct dso_id *id)
1512 {
1513 struct dsos *dsos = dso__dsos(dso);
1514 struct dso_id *dso_id = dso__id(dso);
1515 bool changed = false;
1516
1517 /* dsos write lock held by caller. */
1518
1519 if (id->mmap2_valid && !dso_id->mmap2_valid) {
1520 dso_id->maj = id->maj;
1521 dso_id->min = id->min;
1522 dso_id->ino = id->ino;
1523 dso_id->mmap2_valid = true;
1524 changed = true;
1525 }
1526 if (id->mmap2_ino_generation_valid && !dso_id->mmap2_ino_generation_valid) {
1527 dso_id->ino_generation = id->ino_generation;
1528 dso_id->mmap2_ino_generation_valid = true;
1529 changed = true;
1530 }
1531 if (build_id__is_defined(&id->build_id) && !build_id__is_defined(&dso_id->build_id)) {
1532 dso_id->build_id = id->build_id;
1533 changed = true;
1534 }
1535 if (changed && dsos)
1536 dsos->sorted = false;
1537 }
1538
dso_id__cmp(const struct dso_id * a,const struct dso_id * b)1539 int dso_id__cmp(const struct dso_id *a, const struct dso_id *b)
1540 {
1541 if (a == &dso_id_empty || b == &dso_id_empty) {
1542 /* There is no valid data to compare so the comparison always returns identical. */
1543 return 0;
1544 }
1545
1546 return __dso_id__cmp(a, b);
1547 }
1548
dso__cmp_id(struct dso * a,struct dso * b)1549 int dso__cmp_id(struct dso *a, struct dso *b)
1550 {
1551 return __dso_id__cmp(dso__id(a), dso__id(b));
1552 }
1553
dso__set_long_name(struct dso * dso,const char * name,bool name_allocated)1554 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
1555 {
1556 dso__set_long_name_id(dso, name, name_allocated);
1557 }
1558
__dso__set_short_name(struct dso * dso,const char * name,bool name_allocated)1559 static void __dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1560 {
1561 if (dso__short_name_allocated(dso))
1562 free((char *)dso__short_name(dso));
1563
1564 RC_CHK_ACCESS(dso)->short_name = name;
1565 RC_CHK_ACCESS(dso)->short_name_len = strlen(name);
1566 dso__set_short_name_allocated(dso, name_allocated);
1567 }
1568
dso__set_short_name(struct dso * dso,const char * name,bool name_allocated)1569 void dso__set_short_name(struct dso *dso, const char *name, bool name_allocated)
1570 {
1571 struct dsos *dsos = dso__dsos(dso);
1572
1573 if (name == NULL)
1574 return;
1575
1576 if (dsos) {
1577 /*
1578 * Need to avoid re-sorting the dsos breaking by non-atomically
1579 * renaming the dso.
1580 */
1581 down_write(&dsos->lock);
1582 __dso__set_short_name(dso, name, name_allocated);
1583 dsos->sorted = false;
1584 up_write(&dsos->lock);
1585 } else {
1586 __dso__set_short_name(dso, name, name_allocated);
1587 }
1588 }
1589
dso__name_len(const struct dso * dso)1590 int dso__name_len(const struct dso *dso)
1591 {
1592 if (!dso)
1593 return strlen("[unknown]");
1594 if (verbose > 0)
1595 return dso__long_name_len(dso);
1596
1597 return dso__short_name_len(dso);
1598 }
1599
dso__loaded(const struct dso * dso)1600 bool dso__loaded(const struct dso *dso)
1601 {
1602 return RC_CHK_ACCESS(dso)->loaded;
1603 }
1604
dso__sorted_by_name(const struct dso * dso)1605 bool dso__sorted_by_name(const struct dso *dso)
1606 {
1607 return RC_CHK_ACCESS(dso)->sorted_by_name;
1608 }
1609
dso__set_sorted_by_name(struct dso * dso)1610 void dso__set_sorted_by_name(struct dso *dso)
1611 {
1612 RC_CHK_ACCESS(dso)->sorted_by_name = true;
1613 }
1614
dso__new_id(const char * name,const struct dso_id * id)1615 struct dso *dso__new_id(const char *name, const struct dso_id *id)
1616 {
1617 RC_STRUCT(dso) *dso = zalloc(sizeof(*dso) + strlen(name) + 1);
1618 struct dso *res;
1619 struct dso_data *data;
1620
1621 if (!dso)
1622 return NULL;
1623
1624 if (ADD_RC_CHK(res, dso)) {
1625 strcpy(dso->name, name);
1626 if (id)
1627 dso->id = *id;
1628 dso__set_long_name_id(res, dso->name, false);
1629 dso__set_short_name(res, dso->name, false);
1630 dso->symbols = RB_ROOT_CACHED;
1631 dso->symbol_names = NULL;
1632 dso->symbol_names_len = 0;
1633 dso->inlined_nodes = RB_ROOT_CACHED;
1634 dso->srclines = RB_ROOT_CACHED;
1635 dso->data_types = RB_ROOT;
1636 dso->global_vars = RB_ROOT;
1637 dso->data.fd = -1;
1638 dso->data.status = DSO_DATA_STATUS_UNKNOWN;
1639 dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
1640 dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
1641 dso->is_64_bit = (sizeof(void *) == 8);
1642 dso->loaded = 0;
1643 dso->rel = 0;
1644 dso->sorted_by_name = 0;
1645 dso->has_srcline = 1;
1646 dso->a2l_fails = 1;
1647 dso->kernel = DSO_SPACE__USER;
1648 dso->is_kmod = 0;
1649 dso->needs_swap = DSO_SWAP__UNSET;
1650 dso->comp = COMP_ID__NONE;
1651 mutex_init(&dso->lock);
1652 refcount_set(&dso->refcnt, 1);
1653 data = &dso->data;
1654 data->cache = RB_ROOT;
1655 data->fd = -1;
1656 data->status = DSO_DATA_STATUS_UNKNOWN;
1657 INIT_LIST_HEAD(&data->open_entry);
1658 #ifdef REFCNT_CHECKING
1659 data->dso = NULL; /* Set when on the open_entry list. */
1660 #endif
1661 }
1662 return res;
1663 }
1664
dso__new(const char * name)1665 struct dso *dso__new(const char *name)
1666 {
1667 return dso__new_id(name, NULL);
1668 }
1669
dso__delete(struct dso * dso)1670 void dso__delete(struct dso *dso)
1671 {
1672 if (dso__dsos(dso))
1673 pr_err("DSO %s is still in rbtree when being deleted!\n", dso__long_name(dso));
1674
1675 /* free inlines first, as they reference symbols */
1676 inlines__tree_delete(&RC_CHK_ACCESS(dso)->inlined_nodes);
1677 srcline__tree_delete(&RC_CHK_ACCESS(dso)->srclines);
1678 symbols__delete(&RC_CHK_ACCESS(dso)->symbols);
1679 RC_CHK_ACCESS(dso)->symbol_names_len = 0;
1680 zfree(&RC_CHK_ACCESS(dso)->symbol_names);
1681 annotated_data_type__tree_delete(dso__data_types(dso));
1682 global_var_type__tree_delete(dso__global_vars(dso));
1683
1684 if (RC_CHK_ACCESS(dso)->short_name_allocated) {
1685 zfree((char **)&RC_CHK_ACCESS(dso)->short_name);
1686 RC_CHK_ACCESS(dso)->short_name_allocated = false;
1687 }
1688
1689 if (RC_CHK_ACCESS(dso)->long_name_allocated) {
1690 zfree((char **)&RC_CHK_ACCESS(dso)->long_name);
1691 RC_CHK_ACCESS(dso)->long_name_allocated = false;
1692 }
1693
1694 dso__data_close(dso);
1695 auxtrace_cache__free(RC_CHK_ACCESS(dso)->auxtrace_cache);
1696 dso_cache__free(dso);
1697 dso__free_a2l(dso);
1698 dso__free_libdw(dso);
1699 dso__free_symsrc_filename(dso);
1700 nsinfo__zput(RC_CHK_ACCESS(dso)->nsinfo);
1701 mutex_destroy(dso__lock(dso));
1702 RC_CHK_FREE(dso);
1703 }
1704
dso__get(struct dso * dso)1705 struct dso *dso__get(struct dso *dso)
1706 {
1707 struct dso *result;
1708
1709 if (RC_CHK_GET(result, dso))
1710 refcount_inc(&RC_CHK_ACCESS(dso)->refcnt);
1711
1712 return result;
1713 }
1714
dso__put(struct dso * dso)1715 void dso__put(struct dso *dso)
1716 {
1717 #ifdef REFCNT_CHECKING
1718 if (dso && dso__data(dso) && refcount_read(&RC_CHK_ACCESS(dso)->refcnt) == 2)
1719 dso__data_close(dso);
1720 #endif
1721 if (dso && refcount_dec_and_test(&RC_CHK_ACCESS(dso)->refcnt))
1722 dso__delete(dso);
1723 else
1724 RC_CHK_PUT(dso);
1725 }
1726
dso__swap_init(struct dso * dso,unsigned char eidata)1727 int dso__swap_init(struct dso *dso, unsigned char eidata)
1728 {
1729 enum dso_swap_type type = dso_swap_type__from_elf_data(eidata);
1730
1731 dso__set_needs_swap(dso, type);
1732 if (type == DSO_SWAP__UNSET) {
1733 pr_err("unrecognized DSO data encoding %d\n", eidata);
1734 return -EINVAL;
1735 }
1736 return 0;
1737 }
1738
dso__set_build_id(struct dso * dso,const struct build_id * bid)1739 void dso__set_build_id(struct dso *dso, const struct build_id *bid)
1740 {
1741 dso__id(dso)->build_id = *bid;
1742 }
1743
dso__build_id_equal(const struct dso * dso,const struct build_id * bid)1744 bool dso__build_id_equal(const struct dso *dso, const struct build_id *bid)
1745 {
1746 const struct build_id *dso_bid = dso__bid(dso);
1747
1748 if (dso_bid->size > bid->size && dso_bid->size == BUILD_ID_SIZE) {
1749 /*
1750 * For the backward compatibility, it allows a build-id has
1751 * trailing zeros.
1752 */
1753 return !memcmp(dso_bid->data, bid->data, bid->size) &&
1754 !memchr_inv(&dso_bid->data[bid->size], 0,
1755 dso_bid->size - bid->size);
1756 }
1757
1758 return dso_bid->size == bid->size &&
1759 memcmp(dso_bid->data, bid->data, dso_bid->size) == 0;
1760 }
1761
dso__read_running_kernel_build_id(struct dso * dso,struct machine * machine)1762 void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine)
1763 {
1764 char path[PATH_MAX];
1765 struct build_id bid = { .size = 0, };
1766
1767 if (machine__is_default_guest(machine))
1768 return;
1769 sprintf(path, "%s/sys/kernel/notes", machine->root_dir);
1770 sysfs__read_build_id(path, &bid);
1771 dso__set_build_id(dso, &bid);
1772 }
1773
dso__kernel_module_get_build_id(struct dso * dso,const char * root_dir)1774 int dso__kernel_module_get_build_id(struct dso *dso,
1775 const char *root_dir)
1776 {
1777 char filename[PATH_MAX];
1778 struct build_id bid = { .size = 0, };
1779 /*
1780 * kernel module short names are of the form "[module]" and
1781 * we need just "module" here.
1782 */
1783 const char *name = dso__short_name(dso) + 1;
1784
1785 snprintf(filename, sizeof(filename),
1786 "%s/sys/module/%.*s/notes/.note.gnu.build-id",
1787 root_dir, (int)strlen(name) - 1, name);
1788
1789 sysfs__read_build_id(filename, &bid);
1790 dso__set_build_id(dso, &bid);
1791 return 0;
1792 }
1793
dso__fprintf_buildid(struct dso * dso,FILE * fp)1794 static size_t dso__fprintf_buildid(struct dso *dso, FILE *fp)
1795 {
1796 char sbuild_id[SBUILD_ID_SIZE];
1797
1798 build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
1799 return fprintf(fp, "%s", sbuild_id);
1800 }
1801
dso__fprintf(struct dso * dso,FILE * fp)1802 size_t dso__fprintf(struct dso *dso, FILE *fp)
1803 {
1804 struct rb_node *nd;
1805 size_t ret = fprintf(fp, "dso: %s (", dso__short_name(dso));
1806
1807 if (dso__short_name(dso) != dso__long_name(dso))
1808 ret += fprintf(fp, "%s, ", dso__long_name(dso));
1809 ret += fprintf(fp, "%sloaded, ", dso__loaded(dso) ? "" : "NOT ");
1810 ret += dso__fprintf_buildid(dso, fp);
1811 ret += fprintf(fp, ")\n");
1812 for (nd = rb_first_cached(dso__symbols(dso)); nd; nd = rb_next(nd)) {
1813 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
1814 ret += symbol__fprintf(pos, fp);
1815 }
1816
1817 return ret;
1818 }
1819
dso__type(struct dso * dso,struct machine * machine)1820 enum dso_type dso__type(struct dso *dso, struct machine *machine)
1821 {
1822 int fd = -1;
1823 enum dso_type type = DSO__TYPE_UNKNOWN;
1824
1825 if (dso__data_get_fd(dso, machine, &fd)) {
1826 type = dso__type_fd(fd);
1827 dso__data_put_fd(dso);
1828 }
1829
1830 return type;
1831 }
1832
dso__strerror_load(struct dso * dso,char * buf,size_t buflen)1833 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
1834 {
1835 int idx, errnum = *dso__load_errno(dso);
1836 /*
1837 * This must have a same ordering as the enum dso_load_errno.
1838 */
1839 static const char *dso_load__error_str[] = {
1840 "Internal tools/perf/ library error",
1841 "Invalid ELF file",
1842 "Can not read build id",
1843 "Mismatching build id",
1844 "Decompression failure",
1845 };
1846
1847 BUG_ON(buflen == 0);
1848
1849 if (errnum >= 0) {
1850 errno = errnum;
1851 scnprintf(buf, buflen, "%m");
1852
1853 return 0;
1854 }
1855
1856 if (errnum < __DSO_LOAD_ERRNO__START || errnum >= __DSO_LOAD_ERRNO__END)
1857 return -1;
1858
1859 idx = errnum - __DSO_LOAD_ERRNO__START;
1860 scnprintf(buf, buflen, "%s", dso_load__error_str[idx]);
1861 return 0;
1862 }
1863
perf_pid_map_tid(const char * dso_name,int * tid)1864 bool perf_pid_map_tid(const char *dso_name, int *tid)
1865 {
1866 return sscanf(dso_name, "/tmp/perf-%d.map", tid) == 1;
1867 }
1868
is_perf_pid_map_name(const char * dso_name)1869 bool is_perf_pid_map_name(const char *dso_name)
1870 {
1871 int tid;
1872
1873 return perf_pid_map_tid(dso_name, &tid);
1874 }
1875
1876 struct find_file_offset_data {
1877 u64 ip;
1878 u64 offset;
1879 };
1880
1881 /* This will be called for each PHDR in an ELF binary */
find_file_offset(u64 start,u64 len,u64 pgoff,void * arg)1882 static int find_file_offset(u64 start, u64 len, u64 pgoff, void *arg)
1883 {
1884 struct find_file_offset_data *data = arg;
1885
1886 if (start <= data->ip && data->ip < start + len) {
1887 data->offset = pgoff + data->ip - start;
1888 return 1;
1889 }
1890 return 0;
1891 }
1892
__dso__read_symbol(struct dso * dso,const char * symfs_filename,u64 start,size_t len,u8 ** out_buf,u64 * out_buf_len,bool * is_64bit)1893 static const u8 *__dso__read_symbol(struct dso *dso, const char *symfs_filename,
1894 u64 start, size_t len,
1895 u8 **out_buf, u64 *out_buf_len, bool *is_64bit)
1896 {
1897 struct nscookie nsc;
1898 int fd;
1899 ssize_t count;
1900 struct find_file_offset_data data = {
1901 .ip = start,
1902 };
1903 u8 *code_buf = NULL;
1904 int saved_errno;
1905
1906 nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
1907 fd = open(symfs_filename, O_RDONLY);
1908 saved_errno = errno;
1909 nsinfo__mountns_exit(&nsc);
1910 if (fd < 0) {
1911 errno = saved_errno;
1912 return NULL;
1913 }
1914 if (file__read_maps(fd, /*exe=*/true, find_file_offset, &data, is_64bit) <= 0) {
1915 close(fd);
1916 errno = ENOENT;
1917 return NULL;
1918 }
1919 code_buf = malloc(len);
1920 if (code_buf == NULL) {
1921 close(fd);
1922 errno = ENOMEM;
1923 return NULL;
1924 }
1925 count = pread(fd, code_buf, len, data.offset);
1926 saved_errno = errno;
1927 close(fd);
1928 if ((u64)count != len) {
1929 free(code_buf);
1930 errno = saved_errno;
1931 return NULL;
1932 }
1933 *out_buf = code_buf;
1934 *out_buf_len = len;
1935 return code_buf;
1936 }
1937
1938 /*
1939 * Read a symbol into memory for disassembly by a library like capstone of
1940 * libLLVM. If memory is allocated out_buf holds it.
1941 */
dso__read_symbol(struct dso * dso,const char * symfs_filename,const struct map * map,const struct symbol * sym,u8 ** out_buf,u64 * out_buf_len,bool * is_64bit)1942 const u8 *dso__read_symbol(struct dso *dso, const char *symfs_filename,
1943 const struct map *map, const struct symbol *sym,
1944 u8 **out_buf, u64 *out_buf_len, bool *is_64bit)
1945 {
1946 u64 start = map__rip_2objdump(map, sym->start);
1947 u64 end = map__rip_2objdump(map, sym->end);
1948 size_t len = end - start;
1949
1950 *out_buf = NULL;
1951 *out_buf_len = 0;
1952 *is_64bit = false;
1953
1954 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_IMAGE) {
1955 /*
1956 * Note, there is fallback BPF image disassembly in the objdump
1957 * version but it currently does nothing.
1958 */
1959 errno = EOPNOTSUPP;
1960 return NULL;
1961 }
1962 if (dso__binary_type(dso) == DSO_BINARY_TYPE__BPF_PROG_INFO) {
1963 #ifdef HAVE_LIBBPF_SUPPORT
1964 struct bpf_prog_info_node *info_node;
1965 struct perf_bpil *info_linear;
1966
1967 *is_64bit = sizeof(void *) == sizeof(u64);
1968 info_node = perf_env__find_bpf_prog_info(dso__bpf_prog(dso)->env,
1969 dso__bpf_prog(dso)->id);
1970 if (!info_node) {
1971 errno = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
1972 return NULL;
1973 }
1974 info_linear = info_node->info_linear;
1975 assert(len <= info_linear->info.jited_prog_len);
1976 *out_buf_len = len;
1977 return (const u8 *)(uintptr_t)(info_linear->info.jited_prog_insns);
1978 #else
1979 pr_debug("No BPF program disassembly support\n");
1980 errno = EOPNOTSUPP;
1981 return NULL;
1982 #endif
1983 }
1984 return __dso__read_symbol(dso, symfs_filename, start, len,
1985 out_buf, out_buf_len, is_64bit);
1986 }
1987
dso__debuginfo(struct dso * dso)1988 struct debuginfo *dso__debuginfo(struct dso *dso)
1989 {
1990 char *name;
1991 bool decomp = false;
1992 struct debuginfo *dinfo = NULL;
1993
1994 mutex_lock(dso__lock(dso));
1995
1996 name = dso__get_filename(dso, "", &decomp);
1997 if (name)
1998 dinfo = debuginfo__new(name);
1999
2000 if (decomp)
2001 unlink(name);
2002
2003 mutex_unlock(dso__lock(dso));
2004 free(name);
2005 return dinfo;
2006 }
2007