1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <regex.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <linux/compiler.h>
13 #include <linux/list.h>
14 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/string.h>
17 #include <linux/stringify.h>
18 #include <linux/zalloc.h>
19 #include <sys/stat.h>
20 #include <sys/utsname.h>
21 #include <linux/time64.h>
22 #include <dirent.h>
23 #ifdef HAVE_LIBBPF_SUPPORT
24 #include <bpf/libbpf.h>
25 #endif
26 #include <perf/cpumap.h>
27 #include <tools/libc_compat.h> // reallocarray
28
29 #include "dso.h"
30 #include "evlist.h"
31 #include "evsel.h"
32 #include "util/evsel_fprintf.h"
33 #include "header.h"
34 #include "memswap.h"
35 #include "trace-event.h"
36 #include "session.h"
37 #include "symbol.h"
38 #include "debug.h"
39 #include "cpumap.h"
40 #include "pmu.h"
41 #include "pmus.h"
42 #include "vdso.h"
43 #include "strbuf.h"
44 #include "build-id.h"
45 #include "data.h"
46 #include <api/fs/fs.h>
47 #include <api/io_dir.h>
48 #include "asm/bug.h"
49 #include "tool.h"
50 #include "time-utils.h"
51 #include "units.h"
52 #include "util/util.h" // perf_exe()
53 #include "cputopo.h"
54 #include "bpf-event.h"
55 #include "bpf-utils.h"
56 #include "clockid.h"
57
58 #include <linux/ctype.h>
59 #include <internal/lib.h>
60
61 #ifdef HAVE_LIBTRACEEVENT
62 #include <event-parse.h>
63 #endif
64
65 /*
66 * magic2 = "PERFILE2"
67 * must be a numerical value to let the endianness
68 * determine the memory layout. That way we are able
69 * to detect endianness when reading the perf.data file
70 * back.
71 *
72 * we check for legacy (PERFFILE) format.
73 */
74 static const char *__perf_magic1 = "PERFFILE";
75 static const u64 __perf_magic2 = 0x32454c4946524550ULL;
76 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
77
78 #define PERF_MAGIC __perf_magic2
79
80 const char perf_version_string[] = PERF_VERSION;
81
82 struct perf_file_attr {
83 struct perf_event_attr attr;
84 struct perf_file_section ids;
85 };
86
perf_header__set_feat(struct perf_header * header,int feat)87 void perf_header__set_feat(struct perf_header *header, int feat)
88 {
89 __set_bit(feat, header->adds_features);
90 }
91
perf_header__clear_feat(struct perf_header * header,int feat)92 void perf_header__clear_feat(struct perf_header *header, int feat)
93 {
94 __clear_bit(feat, header->adds_features);
95 }
96
perf_header__has_feat(const struct perf_header * header,int feat)97 bool perf_header__has_feat(const struct perf_header *header, int feat)
98 {
99 return test_bit(feat, header->adds_features);
100 }
101
__do_write_fd(struct feat_fd * ff,const void * buf,size_t size)102 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
103 {
104 ssize_t ret = writen(ff->fd, buf, size);
105
106 if (ret != (ssize_t)size)
107 return ret < 0 ? (int)ret : -1;
108 return 0;
109 }
110
__do_write_buf(struct feat_fd * ff,const void * buf,size_t size)111 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
112 {
113 /* struct perf_event_header::size is u16 */
114 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
115 size_t new_size = ff->size;
116 void *addr;
117
118 if (size + ff->offset > max_size)
119 return -E2BIG;
120
121 while (size > (new_size - ff->offset))
122 new_size <<= 1;
123 new_size = min(max_size, new_size);
124
125 if (ff->size < new_size) {
126 addr = realloc(ff->buf, new_size);
127 if (!addr)
128 return -ENOMEM;
129 ff->buf = addr;
130 ff->size = new_size;
131 }
132
133 memcpy(ff->buf + ff->offset, buf, size);
134 ff->offset += size;
135
136 return 0;
137 }
138
139 /* Return: 0 if succeeded, -ERR if failed. */
do_write(struct feat_fd * ff,const void * buf,size_t size)140 int do_write(struct feat_fd *ff, const void *buf, size_t size)
141 {
142 if (!ff->buf)
143 return __do_write_fd(ff, buf, size);
144 return __do_write_buf(ff, buf, size);
145 }
146
147 /* Return: 0 if succeeded, -ERR if failed. */
do_write_bitmap(struct feat_fd * ff,unsigned long * set,u64 size)148 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
149 {
150 u64 *p = (u64 *) set;
151 int i, ret;
152
153 ret = do_write(ff, &size, sizeof(size));
154 if (ret < 0)
155 return ret;
156
157 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
158 ret = do_write(ff, p + i, sizeof(*p));
159 if (ret < 0)
160 return ret;
161 }
162
163 return 0;
164 }
165
166 /* Return: 0 if succeeded, -ERR if failed. */
write_padded(struct feat_fd * ff,const void * bf,size_t count,size_t count_aligned)167 int write_padded(struct feat_fd *ff, const void *bf,
168 size_t count, size_t count_aligned)
169 {
170 static const char zero_buf[NAME_ALIGN];
171 int err = do_write(ff, bf, count);
172
173 if (!err)
174 err = do_write(ff, zero_buf, count_aligned - count);
175
176 return err;
177 }
178
179 #define string_size(str) \
180 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
181
182 /* Return: 0 if succeeded, -ERR if failed. */
do_write_string(struct feat_fd * ff,const char * str)183 static int do_write_string(struct feat_fd *ff, const char *str)
184 {
185 u32 len, olen;
186 int ret;
187
188 olen = strlen(str) + 1;
189 len = PERF_ALIGN(olen, NAME_ALIGN);
190
191 /* write len, incl. \0 */
192 ret = do_write(ff, &len, sizeof(len));
193 if (ret < 0)
194 return ret;
195
196 return write_padded(ff, str, olen, len);
197 }
198
__do_read_fd(struct feat_fd * ff,void * addr,ssize_t size)199 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
200 {
201 ssize_t ret = readn(ff->fd, addr, size);
202
203 if (ret != size)
204 return ret < 0 ? (int)ret : -1;
205 return 0;
206 }
207
__do_read_buf(struct feat_fd * ff,void * addr,ssize_t size)208 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
209 {
210 if (size > (ssize_t)ff->size - ff->offset)
211 return -1;
212
213 memcpy(addr, ff->buf + ff->offset, size);
214 ff->offset += size;
215
216 return 0;
217
218 }
219
__do_read(struct feat_fd * ff,void * addr,ssize_t size)220 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
221 {
222 if (!ff->buf)
223 return __do_read_fd(ff, addr, size);
224 return __do_read_buf(ff, addr, size);
225 }
226
do_read_u32(struct feat_fd * ff,u32 * addr)227 static int do_read_u32(struct feat_fd *ff, u32 *addr)
228 {
229 int ret;
230
231 ret = __do_read(ff, addr, sizeof(*addr));
232 if (ret)
233 return ret;
234
235 if (ff->ph->needs_swap)
236 *addr = bswap_32(*addr);
237 return 0;
238 }
239
do_read_u64(struct feat_fd * ff,u64 * addr)240 static int do_read_u64(struct feat_fd *ff, u64 *addr)
241 {
242 int ret;
243
244 ret = __do_read(ff, addr, sizeof(*addr));
245 if (ret)
246 return ret;
247
248 if (ff->ph->needs_swap)
249 *addr = bswap_64(*addr);
250 return 0;
251 }
252
do_read_string(struct feat_fd * ff)253 static char *do_read_string(struct feat_fd *ff)
254 {
255 u32 len;
256 char *buf;
257
258 if (do_read_u32(ff, &len))
259 return NULL;
260
261 buf = malloc(len);
262 if (!buf)
263 return NULL;
264
265 if (!__do_read(ff, buf, len)) {
266 /*
267 * strings are padded by zeroes
268 * thus the actual strlen of buf
269 * may be less than len
270 */
271 return buf;
272 }
273
274 free(buf);
275 return NULL;
276 }
277
278 /* Return: 0 if succeeded, -ERR if failed. */
do_read_bitmap(struct feat_fd * ff,unsigned long ** pset,u64 * psize)279 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
280 {
281 unsigned long *set;
282 u64 size, *p;
283 int i, ret;
284
285 ret = do_read_u64(ff, &size);
286 if (ret)
287 return ret;
288
289 set = bitmap_zalloc(size);
290 if (!set)
291 return -ENOMEM;
292
293 p = (u64 *) set;
294
295 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
296 ret = do_read_u64(ff, p + i);
297 if (ret < 0) {
298 free(set);
299 return ret;
300 }
301 }
302
303 *pset = set;
304 *psize = size;
305 return 0;
306 }
307
308 #ifdef HAVE_LIBTRACEEVENT
write_tracing_data(struct feat_fd * ff,struct evlist * evlist)309 static int write_tracing_data(struct feat_fd *ff,
310 struct evlist *evlist)
311 {
312 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
313 return -1;
314
315 return read_tracing_data(ff->fd, &evlist->core.entries);
316 }
317 #endif
318
write_build_id(struct feat_fd * ff,struct evlist * evlist __maybe_unused)319 static int write_build_id(struct feat_fd *ff,
320 struct evlist *evlist __maybe_unused)
321 {
322 struct perf_session *session;
323 int err;
324
325 session = container_of(ff->ph, struct perf_session, header);
326
327 if (!perf_session__read_build_ids(session, true))
328 return -1;
329
330 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
331 return -1;
332
333 err = perf_session__write_buildid_table(session, ff);
334 if (err < 0) {
335 pr_debug("failed to write buildid table\n");
336 return err;
337 }
338 perf_session__cache_build_ids(session);
339
340 return 0;
341 }
342
write_hostname(struct feat_fd * ff,struct evlist * evlist __maybe_unused)343 static int write_hostname(struct feat_fd *ff,
344 struct evlist *evlist __maybe_unused)
345 {
346 struct utsname uts;
347 int ret;
348
349 ret = uname(&uts);
350 if (ret < 0)
351 return -1;
352
353 return do_write_string(ff, uts.nodename);
354 }
355
write_osrelease(struct feat_fd * ff,struct evlist * evlist __maybe_unused)356 static int write_osrelease(struct feat_fd *ff,
357 struct evlist *evlist __maybe_unused)
358 {
359 struct utsname uts;
360 int ret;
361
362 ret = uname(&uts);
363 if (ret < 0)
364 return -1;
365
366 return do_write_string(ff, uts.release);
367 }
368
write_arch(struct feat_fd * ff,struct evlist * evlist __maybe_unused)369 static int write_arch(struct feat_fd *ff,
370 struct evlist *evlist __maybe_unused)
371 {
372 struct utsname uts;
373 int ret;
374
375 ret = uname(&uts);
376 if (ret < 0)
377 return -1;
378
379 return do_write_string(ff, uts.machine);
380 }
381
write_version(struct feat_fd * ff,struct evlist * evlist __maybe_unused)382 static int write_version(struct feat_fd *ff,
383 struct evlist *evlist __maybe_unused)
384 {
385 return do_write_string(ff, perf_version_string);
386 }
387
__write_cpudesc(struct feat_fd * ff,const char * cpuinfo_proc)388 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
389 {
390 FILE *file;
391 char *buf = NULL;
392 char *s, *p;
393 const char *search = cpuinfo_proc;
394 size_t len = 0;
395 int ret = -1;
396
397 if (!search)
398 return -1;
399
400 file = fopen("/proc/cpuinfo", "r");
401 if (!file)
402 return -1;
403
404 while (getline(&buf, &len, file) > 0) {
405 ret = strncmp(buf, search, strlen(search));
406 if (!ret)
407 break;
408 }
409
410 if (ret) {
411 ret = -1;
412 goto done;
413 }
414
415 s = buf;
416
417 p = strchr(buf, ':');
418 if (p && *(p+1) == ' ' && *(p+2))
419 s = p + 2;
420 p = strchr(s, '\n');
421 if (p)
422 *p = '\0';
423
424 /* squash extra space characters (branding string) */
425 p = s;
426 while (*p) {
427 if (isspace(*p)) {
428 char *r = p + 1;
429 char *q = skip_spaces(r);
430 *p = ' ';
431 if (q != (p+1))
432 while ((*r++ = *q++));
433 }
434 p++;
435 }
436 ret = do_write_string(ff, s);
437 done:
438 free(buf);
439 fclose(file);
440 return ret;
441 }
442
write_cpudesc(struct feat_fd * ff,struct evlist * evlist __maybe_unused)443 static int write_cpudesc(struct feat_fd *ff,
444 struct evlist *evlist __maybe_unused)
445 {
446 #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
447 #define CPUINFO_PROC { "cpu", }
448 #elif defined(__s390__)
449 #define CPUINFO_PROC { "vendor_id", }
450 #elif defined(__sh__)
451 #define CPUINFO_PROC { "cpu type", }
452 #elif defined(__alpha__) || defined(__mips__)
453 #define CPUINFO_PROC { "cpu model", }
454 #elif defined(__arm__)
455 #define CPUINFO_PROC { "model name", "Processor", }
456 #elif defined(__arc__)
457 #define CPUINFO_PROC { "Processor", }
458 #elif defined(__xtensa__)
459 #define CPUINFO_PROC { "core ID", }
460 #elif defined(__loongarch__)
461 #define CPUINFO_PROC { "Model Name", }
462 #else
463 #define CPUINFO_PROC { "model name", }
464 #endif
465 const char *cpuinfo_procs[] = CPUINFO_PROC;
466 #undef CPUINFO_PROC
467 unsigned int i;
468
469 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
470 int ret;
471 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
472 if (ret >= 0)
473 return ret;
474 }
475 return -1;
476 }
477
478
write_nrcpus(struct feat_fd * ff,struct evlist * evlist __maybe_unused)479 static int write_nrcpus(struct feat_fd *ff,
480 struct evlist *evlist __maybe_unused)
481 {
482 long nr;
483 u32 nrc, nra;
484 int ret;
485
486 nrc = cpu__max_present_cpu().cpu;
487
488 nr = sysconf(_SC_NPROCESSORS_ONLN);
489 if (nr < 0)
490 return -1;
491
492 nra = (u32)(nr & UINT_MAX);
493
494 ret = do_write(ff, &nrc, sizeof(nrc));
495 if (ret < 0)
496 return ret;
497
498 return do_write(ff, &nra, sizeof(nra));
499 }
500
write_event_desc(struct feat_fd * ff,struct evlist * evlist)501 static int write_event_desc(struct feat_fd *ff,
502 struct evlist *evlist)
503 {
504 struct evsel *evsel;
505 u32 nre, nri, sz;
506 int ret;
507
508 nre = evlist->core.nr_entries;
509
510 /*
511 * write number of events
512 */
513 ret = do_write(ff, &nre, sizeof(nre));
514 if (ret < 0)
515 return ret;
516
517 /*
518 * size of perf_event_attr struct
519 */
520 sz = (u32)sizeof(evsel->core.attr);
521 ret = do_write(ff, &sz, sizeof(sz));
522 if (ret < 0)
523 return ret;
524
525 evlist__for_each_entry(evlist, evsel) {
526 ret = do_write(ff, &evsel->core.attr, sz);
527 if (ret < 0)
528 return ret;
529 /*
530 * write number of unique id per event
531 * there is one id per instance of an event
532 *
533 * copy into an nri to be independent of the
534 * type of ids,
535 */
536 nri = evsel->core.ids;
537 ret = do_write(ff, &nri, sizeof(nri));
538 if (ret < 0)
539 return ret;
540
541 /*
542 * write event string as passed on cmdline
543 */
544 ret = do_write_string(ff, evsel__name(evsel));
545 if (ret < 0)
546 return ret;
547 /*
548 * write unique ids for this event
549 */
550 ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
551 if (ret < 0)
552 return ret;
553 }
554 return 0;
555 }
556
write_cmdline(struct feat_fd * ff,struct evlist * evlist __maybe_unused)557 static int write_cmdline(struct feat_fd *ff,
558 struct evlist *evlist __maybe_unused)
559 {
560 struct perf_env *env = &ff->ph->env;
561 char pbuf[MAXPATHLEN], *buf;
562 int i, ret, n;
563
564 /* actual path to perf binary */
565 buf = perf_exe(pbuf, MAXPATHLEN);
566
567 /* account for binary path */
568 n = env->nr_cmdline + 1;
569
570 ret = do_write(ff, &n, sizeof(n));
571 if (ret < 0)
572 return ret;
573
574 ret = do_write_string(ff, buf);
575 if (ret < 0)
576 return ret;
577
578 for (i = 0 ; i < env->nr_cmdline; i++) {
579 ret = do_write_string(ff, env->cmdline_argv[i]);
580 if (ret < 0)
581 return ret;
582 }
583 return 0;
584 }
585
586
write_cpu_topology(struct feat_fd * ff,struct evlist * evlist __maybe_unused)587 static int write_cpu_topology(struct feat_fd *ff,
588 struct evlist *evlist __maybe_unused)
589 {
590 struct perf_env *env = &ff->ph->env;
591 struct cpu_topology *tp;
592 u32 i;
593 int ret, j;
594
595 tp = cpu_topology__new();
596 if (!tp)
597 return -1;
598
599 ret = do_write(ff, &tp->package_cpus_lists, sizeof(tp->package_cpus_lists));
600 if (ret < 0)
601 goto done;
602
603 for (i = 0; i < tp->package_cpus_lists; i++) {
604 ret = do_write_string(ff, tp->package_cpus_list[i]);
605 if (ret < 0)
606 goto done;
607 }
608 ret = do_write(ff, &tp->core_cpus_lists, sizeof(tp->core_cpus_lists));
609 if (ret < 0)
610 goto done;
611
612 for (i = 0; i < tp->core_cpus_lists; i++) {
613 ret = do_write_string(ff, tp->core_cpus_list[i]);
614 if (ret < 0)
615 break;
616 }
617
618 ret = perf_env__read_cpu_topology_map(env);
619 if (ret < 0)
620 goto done;
621
622 for (j = 0; j < env->nr_cpus_avail; j++) {
623 ret = do_write(ff, &env->cpu[j].core_id,
624 sizeof(env->cpu[j].core_id));
625 if (ret < 0)
626 return ret;
627 ret = do_write(ff, &env->cpu[j].socket_id,
628 sizeof(env->cpu[j].socket_id));
629 if (ret < 0)
630 return ret;
631 }
632
633 if (!tp->die_cpus_lists)
634 goto done;
635
636 ret = do_write(ff, &tp->die_cpus_lists, sizeof(tp->die_cpus_lists));
637 if (ret < 0)
638 goto done;
639
640 for (i = 0; i < tp->die_cpus_lists; i++) {
641 ret = do_write_string(ff, tp->die_cpus_list[i]);
642 if (ret < 0)
643 goto done;
644 }
645
646 for (j = 0; j < env->nr_cpus_avail; j++) {
647 ret = do_write(ff, &env->cpu[j].die_id,
648 sizeof(env->cpu[j].die_id));
649 if (ret < 0)
650 return ret;
651 }
652
653 done:
654 cpu_topology__delete(tp);
655 return ret;
656 }
657
658
659
write_total_mem(struct feat_fd * ff,struct evlist * evlist __maybe_unused)660 static int write_total_mem(struct feat_fd *ff,
661 struct evlist *evlist __maybe_unused)
662 {
663 char *buf = NULL;
664 FILE *fp;
665 size_t len = 0;
666 int ret = -1, n;
667 uint64_t mem;
668
669 fp = fopen("/proc/meminfo", "r");
670 if (!fp)
671 return -1;
672
673 while (getline(&buf, &len, fp) > 0) {
674 ret = strncmp(buf, "MemTotal:", 9);
675 if (!ret)
676 break;
677 }
678 if (!ret) {
679 n = sscanf(buf, "%*s %"PRIu64, &mem);
680 if (n == 1)
681 ret = do_write(ff, &mem, sizeof(mem));
682 } else
683 ret = -1;
684 free(buf);
685 fclose(fp);
686 return ret;
687 }
688
write_numa_topology(struct feat_fd * ff,struct evlist * evlist __maybe_unused)689 static int write_numa_topology(struct feat_fd *ff,
690 struct evlist *evlist __maybe_unused)
691 {
692 struct numa_topology *tp;
693 int ret = -1;
694 u32 i;
695
696 tp = numa_topology__new();
697 if (!tp)
698 return -ENOMEM;
699
700 ret = do_write(ff, &tp->nr, sizeof(u32));
701 if (ret < 0)
702 goto err;
703
704 for (i = 0; i < tp->nr; i++) {
705 struct numa_topology_node *n = &tp->nodes[i];
706
707 ret = do_write(ff, &n->node, sizeof(u32));
708 if (ret < 0)
709 goto err;
710
711 ret = do_write(ff, &n->mem_total, sizeof(u64));
712 if (ret)
713 goto err;
714
715 ret = do_write(ff, &n->mem_free, sizeof(u64));
716 if (ret)
717 goto err;
718
719 ret = do_write_string(ff, n->cpus);
720 if (ret < 0)
721 goto err;
722 }
723
724 ret = 0;
725
726 err:
727 numa_topology__delete(tp);
728 return ret;
729 }
730
731 /*
732 * File format:
733 *
734 * struct pmu_mappings {
735 * u32 pmu_num;
736 * struct pmu_map {
737 * u32 type;
738 * char name[];
739 * }[pmu_num];
740 * };
741 */
742
write_pmu_mappings(struct feat_fd * ff,struct evlist * evlist __maybe_unused)743 static int write_pmu_mappings(struct feat_fd *ff,
744 struct evlist *evlist __maybe_unused)
745 {
746 struct perf_pmu *pmu = NULL;
747 u32 pmu_num = 0;
748 int ret;
749
750 /*
751 * Do a first pass to count number of pmu to avoid lseek so this
752 * works in pipe mode as well.
753 */
754 while ((pmu = perf_pmus__scan(pmu)))
755 pmu_num++;
756
757 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
758 if (ret < 0)
759 return ret;
760
761 while ((pmu = perf_pmus__scan(pmu))) {
762 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
763 if (ret < 0)
764 return ret;
765
766 ret = do_write_string(ff, pmu->name);
767 if (ret < 0)
768 return ret;
769 }
770
771 return 0;
772 }
773
774 /*
775 * File format:
776 *
777 * struct group_descs {
778 * u32 nr_groups;
779 * struct group_desc {
780 * char name[];
781 * u32 leader_idx;
782 * u32 nr_members;
783 * }[nr_groups];
784 * };
785 */
write_group_desc(struct feat_fd * ff,struct evlist * evlist)786 static int write_group_desc(struct feat_fd *ff,
787 struct evlist *evlist)
788 {
789 u32 nr_groups = evlist__nr_groups(evlist);
790 struct evsel *evsel;
791 int ret;
792
793 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
794 if (ret < 0)
795 return ret;
796
797 evlist__for_each_entry(evlist, evsel) {
798 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
799 const char *name = evsel->group_name ?: "{anon_group}";
800 u32 leader_idx = evsel->core.idx;
801 u32 nr_members = evsel->core.nr_members;
802
803 ret = do_write_string(ff, name);
804 if (ret < 0)
805 return ret;
806
807 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
808 if (ret < 0)
809 return ret;
810
811 ret = do_write(ff, &nr_members, sizeof(nr_members));
812 if (ret < 0)
813 return ret;
814 }
815 }
816 return 0;
817 }
818
819 /*
820 * Return the CPU id as a raw string.
821 *
822 * Each architecture should provide a more precise id string that
823 * can be use to match the architecture's "mapfile".
824 */
get_cpuid_str(struct perf_cpu cpu __maybe_unused)825 char * __weak get_cpuid_str(struct perf_cpu cpu __maybe_unused)
826 {
827 return NULL;
828 }
829
get_cpuid_allow_env_override(struct perf_cpu cpu)830 char *get_cpuid_allow_env_override(struct perf_cpu cpu)
831 {
832 char *cpuid;
833 static bool printed;
834
835 cpuid = getenv("PERF_CPUID");
836 if (cpuid)
837 cpuid = strdup(cpuid);
838 if (!cpuid)
839 cpuid = get_cpuid_str(cpu);
840 if (!cpuid)
841 return NULL;
842
843 if (!printed) {
844 pr_debug("Using CPUID %s\n", cpuid);
845 printed = true;
846 }
847 return cpuid;
848 }
849
850 /* Return zero when the cpuid from the mapfile.csv matches the
851 * cpuid string generated on this platform.
852 * Otherwise return non-zero.
853 */
strcmp_cpuid_str(const char * mapcpuid,const char * cpuid)854 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
855 {
856 regex_t re;
857 regmatch_t pmatch[1];
858 int match;
859
860 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
861 /* Warn unable to generate match particular string. */
862 pr_info("Invalid regular expression %s\n", mapcpuid);
863 return 1;
864 }
865
866 match = !regexec(&re, cpuid, 1, pmatch, 0);
867 regfree(&re);
868 if (match) {
869 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
870
871 /* Verify the entire string matched. */
872 if (match_len == strlen(cpuid))
873 return 0;
874 }
875 return 1;
876 }
877
878 /*
879 * default get_cpuid(): nothing gets recorded
880 * actual implementation must be in arch/$(SRCARCH)/util/header.c
881 */
get_cpuid(char * buffer __maybe_unused,size_t sz __maybe_unused,struct perf_cpu cpu __maybe_unused)882 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused,
883 struct perf_cpu cpu __maybe_unused)
884 {
885 return ENOSYS; /* Not implemented */
886 }
887
write_cpuid(struct feat_fd * ff,struct evlist * evlist)888 static int write_cpuid(struct feat_fd *ff, struct evlist *evlist)
889 {
890 struct perf_cpu cpu = perf_cpu_map__min(evlist->core.all_cpus);
891 char buffer[64];
892 int ret;
893
894 ret = get_cpuid(buffer, sizeof(buffer), cpu);
895 if (ret)
896 return -1;
897
898 return do_write_string(ff, buffer);
899 }
900
write_branch_stack(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)901 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
902 struct evlist *evlist __maybe_unused)
903 {
904 return 0;
905 }
906
write_auxtrace(struct feat_fd * ff,struct evlist * evlist __maybe_unused)907 static int write_auxtrace(struct feat_fd *ff,
908 struct evlist *evlist __maybe_unused)
909 {
910 struct perf_session *session;
911 int err;
912
913 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
914 return -1;
915
916 session = container_of(ff->ph, struct perf_session, header);
917
918 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
919 if (err < 0)
920 pr_err("Failed to write auxtrace index\n");
921 return err;
922 }
923
write_clockid(struct feat_fd * ff,struct evlist * evlist __maybe_unused)924 static int write_clockid(struct feat_fd *ff,
925 struct evlist *evlist __maybe_unused)
926 {
927 return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
928 sizeof(ff->ph->env.clock.clockid_res_ns));
929 }
930
write_clock_data(struct feat_fd * ff,struct evlist * evlist __maybe_unused)931 static int write_clock_data(struct feat_fd *ff,
932 struct evlist *evlist __maybe_unused)
933 {
934 u64 *data64;
935 u32 data32;
936 int ret;
937
938 /* version */
939 data32 = 1;
940
941 ret = do_write(ff, &data32, sizeof(data32));
942 if (ret < 0)
943 return ret;
944
945 /* clockid */
946 data32 = ff->ph->env.clock.clockid;
947
948 ret = do_write(ff, &data32, sizeof(data32));
949 if (ret < 0)
950 return ret;
951
952 /* TOD ref time */
953 data64 = &ff->ph->env.clock.tod_ns;
954
955 ret = do_write(ff, data64, sizeof(*data64));
956 if (ret < 0)
957 return ret;
958
959 /* clockid ref time */
960 data64 = &ff->ph->env.clock.clockid_ns;
961
962 return do_write(ff, data64, sizeof(*data64));
963 }
964
write_hybrid_topology(struct feat_fd * ff,struct evlist * evlist __maybe_unused)965 static int write_hybrid_topology(struct feat_fd *ff,
966 struct evlist *evlist __maybe_unused)
967 {
968 struct hybrid_topology *tp;
969 int ret;
970 u32 i;
971
972 tp = hybrid_topology__new();
973 if (!tp)
974 return -ENOENT;
975
976 ret = do_write(ff, &tp->nr, sizeof(u32));
977 if (ret < 0)
978 goto err;
979
980 for (i = 0; i < tp->nr; i++) {
981 struct hybrid_topology_node *n = &tp->nodes[i];
982
983 ret = do_write_string(ff, n->pmu_name);
984 if (ret < 0)
985 goto err;
986
987 ret = do_write_string(ff, n->cpus);
988 if (ret < 0)
989 goto err;
990 }
991
992 ret = 0;
993
994 err:
995 hybrid_topology__delete(tp);
996 return ret;
997 }
998
write_dir_format(struct feat_fd * ff,struct evlist * evlist __maybe_unused)999 static int write_dir_format(struct feat_fd *ff,
1000 struct evlist *evlist __maybe_unused)
1001 {
1002 struct perf_session *session;
1003 struct perf_data *data;
1004
1005 session = container_of(ff->ph, struct perf_session, header);
1006 data = session->data;
1007
1008 if (WARN_ON(!perf_data__is_dir(data)))
1009 return -1;
1010
1011 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
1012 }
1013
1014 #ifdef HAVE_LIBBPF_SUPPORT
write_bpf_prog_info(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1015 static int write_bpf_prog_info(struct feat_fd *ff,
1016 struct evlist *evlist __maybe_unused)
1017 {
1018 struct perf_env *env = &ff->ph->env;
1019 struct rb_root *root;
1020 struct rb_node *next;
1021 int ret = 0;
1022
1023 down_read(&env->bpf_progs.lock);
1024
1025 ret = do_write(ff, &env->bpf_progs.infos_cnt,
1026 sizeof(env->bpf_progs.infos_cnt));
1027 if (ret < 0 || env->bpf_progs.infos_cnt == 0)
1028 goto out;
1029
1030 root = &env->bpf_progs.infos;
1031 next = rb_first(root);
1032 while (next) {
1033 struct bpf_prog_info_node *node;
1034 size_t len;
1035
1036 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1037 next = rb_next(&node->rb_node);
1038 len = sizeof(struct perf_bpil) +
1039 node->info_linear->data_len;
1040
1041 /* before writing to file, translate address to offset */
1042 bpil_addr_to_offs(node->info_linear);
1043 ret = do_write(ff, node->info_linear, len);
1044 /*
1045 * translate back to address even when do_write() fails,
1046 * so that this function never changes the data.
1047 */
1048 bpil_offs_to_addr(node->info_linear);
1049 if (ret < 0)
1050 goto out;
1051 }
1052 out:
1053 up_read(&env->bpf_progs.lock);
1054 return ret;
1055 }
1056
write_bpf_btf(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1057 static int write_bpf_btf(struct feat_fd *ff,
1058 struct evlist *evlist __maybe_unused)
1059 {
1060 struct perf_env *env = &ff->ph->env;
1061 struct rb_root *root;
1062 struct rb_node *next;
1063 int ret = 0;
1064
1065 down_read(&env->bpf_progs.lock);
1066
1067 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
1068 sizeof(env->bpf_progs.btfs_cnt));
1069
1070 if (ret < 0 || env->bpf_progs.btfs_cnt == 0)
1071 goto out;
1072
1073 root = &env->bpf_progs.btfs;
1074 next = rb_first(root);
1075 while (next) {
1076 struct btf_node *node;
1077
1078 node = rb_entry(next, struct btf_node, rb_node);
1079 next = rb_next(&node->rb_node);
1080 ret = do_write(ff, &node->id,
1081 sizeof(u32) * 2 + node->data_size);
1082 if (ret < 0)
1083 goto out;
1084 }
1085 out:
1086 up_read(&env->bpf_progs.lock);
1087 return ret;
1088 }
1089 #endif // HAVE_LIBBPF_SUPPORT
1090
cpu_cache_level__sort(const void * a,const void * b)1091 static int cpu_cache_level__sort(const void *a, const void *b)
1092 {
1093 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1094 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1095
1096 return cache_a->level - cache_b->level;
1097 }
1098
cpu_cache_level__cmp(struct cpu_cache_level * a,struct cpu_cache_level * b)1099 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1100 {
1101 if (a->level != b->level)
1102 return false;
1103
1104 if (a->line_size != b->line_size)
1105 return false;
1106
1107 if (a->sets != b->sets)
1108 return false;
1109
1110 if (a->ways != b->ways)
1111 return false;
1112
1113 if (strcmp(a->type, b->type))
1114 return false;
1115
1116 if (strcmp(a->size, b->size))
1117 return false;
1118
1119 if (strcmp(a->map, b->map))
1120 return false;
1121
1122 return true;
1123 }
1124
cpu_cache_level__read(struct cpu_cache_level * cache,u32 cpu,u16 level)1125 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1126 {
1127 char path[PATH_MAX], file[PATH_MAX];
1128 struct stat st;
1129 size_t len;
1130
1131 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1132 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1133
1134 if (stat(file, &st))
1135 return 1;
1136
1137 scnprintf(file, PATH_MAX, "%s/level", path);
1138 if (sysfs__read_int(file, (int *) &cache->level))
1139 return -1;
1140
1141 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1142 if (sysfs__read_int(file, (int *) &cache->line_size))
1143 return -1;
1144
1145 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1146 if (sysfs__read_int(file, (int *) &cache->sets))
1147 return -1;
1148
1149 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1150 if (sysfs__read_int(file, (int *) &cache->ways))
1151 return -1;
1152
1153 scnprintf(file, PATH_MAX, "%s/type", path);
1154 if (sysfs__read_str(file, &cache->type, &len))
1155 return -1;
1156
1157 cache->type[len] = 0;
1158 cache->type = strim(cache->type);
1159
1160 scnprintf(file, PATH_MAX, "%s/size", path);
1161 if (sysfs__read_str(file, &cache->size, &len)) {
1162 zfree(&cache->type);
1163 return -1;
1164 }
1165
1166 cache->size[len] = 0;
1167 cache->size = strim(cache->size);
1168
1169 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1170 if (sysfs__read_str(file, &cache->map, &len)) {
1171 zfree(&cache->size);
1172 zfree(&cache->type);
1173 return -1;
1174 }
1175
1176 cache->map[len] = 0;
1177 cache->map = strim(cache->map);
1178 return 0;
1179 }
1180
cpu_cache_level__fprintf(FILE * out,struct cpu_cache_level * c)1181 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1182 {
1183 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1184 }
1185
1186 /*
1187 * Build caches levels for a particular CPU from the data in
1188 * /sys/devices/system/cpu/cpu<cpu>/cache/
1189 * The cache level data is stored in caches[] from index at
1190 * *cntp.
1191 */
build_caches_for_cpu(u32 cpu,struct cpu_cache_level caches[],u32 * cntp)1192 int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp)
1193 {
1194 u16 level;
1195
1196 for (level = 0; level < MAX_CACHE_LVL; level++) {
1197 struct cpu_cache_level c;
1198 int err;
1199 u32 i;
1200
1201 err = cpu_cache_level__read(&c, cpu, level);
1202 if (err < 0)
1203 return err;
1204
1205 if (err == 1)
1206 break;
1207
1208 for (i = 0; i < *cntp; i++) {
1209 if (cpu_cache_level__cmp(&c, &caches[i]))
1210 break;
1211 }
1212
1213 if (i == *cntp) {
1214 caches[*cntp] = c;
1215 *cntp = *cntp + 1;
1216 } else
1217 cpu_cache_level__free(&c);
1218 }
1219
1220 return 0;
1221 }
1222
build_caches(struct cpu_cache_level caches[],u32 * cntp)1223 static int build_caches(struct cpu_cache_level caches[], u32 *cntp)
1224 {
1225 u32 nr, cpu, cnt = 0;
1226
1227 nr = cpu__max_cpu().cpu;
1228
1229 for (cpu = 0; cpu < nr; cpu++) {
1230 int ret = build_caches_for_cpu(cpu, caches, &cnt);
1231
1232 if (ret)
1233 return ret;
1234 }
1235 *cntp = cnt;
1236 return 0;
1237 }
1238
write_cache(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1239 static int write_cache(struct feat_fd *ff,
1240 struct evlist *evlist __maybe_unused)
1241 {
1242 u32 max_caches = cpu__max_cpu().cpu * MAX_CACHE_LVL;
1243 struct cpu_cache_level caches[max_caches];
1244 u32 cnt = 0, i, version = 1;
1245 int ret;
1246
1247 ret = build_caches(caches, &cnt);
1248 if (ret)
1249 goto out;
1250
1251 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1252
1253 ret = do_write(ff, &version, sizeof(u32));
1254 if (ret < 0)
1255 goto out;
1256
1257 ret = do_write(ff, &cnt, sizeof(u32));
1258 if (ret < 0)
1259 goto out;
1260
1261 for (i = 0; i < cnt; i++) {
1262 struct cpu_cache_level *c = &caches[i];
1263
1264 #define _W(v) \
1265 ret = do_write(ff, &c->v, sizeof(u32)); \
1266 if (ret < 0) \
1267 goto out;
1268
1269 _W(level)
1270 _W(line_size)
1271 _W(sets)
1272 _W(ways)
1273 #undef _W
1274
1275 #define _W(v) \
1276 ret = do_write_string(ff, (const char *) c->v); \
1277 if (ret < 0) \
1278 goto out;
1279
1280 _W(type)
1281 _W(size)
1282 _W(map)
1283 #undef _W
1284 }
1285
1286 out:
1287 for (i = 0; i < cnt; i++)
1288 cpu_cache_level__free(&caches[i]);
1289 return ret;
1290 }
1291
write_stat(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)1292 static int write_stat(struct feat_fd *ff __maybe_unused,
1293 struct evlist *evlist __maybe_unused)
1294 {
1295 return 0;
1296 }
1297
write_sample_time(struct feat_fd * ff,struct evlist * evlist)1298 static int write_sample_time(struct feat_fd *ff,
1299 struct evlist *evlist)
1300 {
1301 int ret;
1302
1303 ret = do_write(ff, &evlist->first_sample_time,
1304 sizeof(evlist->first_sample_time));
1305 if (ret < 0)
1306 return ret;
1307
1308 return do_write(ff, &evlist->last_sample_time,
1309 sizeof(evlist->last_sample_time));
1310 }
1311
1312
memory_node__read(struct memory_node * n,unsigned long idx)1313 static int memory_node__read(struct memory_node *n, unsigned long idx)
1314 {
1315 unsigned int phys, size = 0;
1316 char path[PATH_MAX];
1317 struct io_dirent64 *ent;
1318 struct io_dir dir;
1319
1320 #define for_each_memory(mem, dir) \
1321 while ((ent = io_dir__readdir(&dir)) != NULL) \
1322 if (strcmp(ent->d_name, ".") && \
1323 strcmp(ent->d_name, "..") && \
1324 sscanf(ent->d_name, "memory%u", &mem) == 1)
1325
1326 scnprintf(path, PATH_MAX,
1327 "%s/devices/system/node/node%lu",
1328 sysfs__mountpoint(), idx);
1329
1330 io_dir__init(&dir, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
1331 if (dir.dirfd < 0) {
1332 pr_warning("failed: can't open memory sysfs data '%s'\n", path);
1333 return -1;
1334 }
1335
1336 for_each_memory(phys, dir) {
1337 size = max(phys, size);
1338 }
1339
1340 size++;
1341
1342 n->set = bitmap_zalloc(size);
1343 if (!n->set) {
1344 close(dir.dirfd);
1345 return -ENOMEM;
1346 }
1347
1348 n->node = idx;
1349 n->size = size;
1350
1351 io_dir__rewinddir(&dir);
1352
1353 for_each_memory(phys, dir) {
1354 __set_bit(phys, n->set);
1355 }
1356
1357 close(dir.dirfd);
1358 return 0;
1359 }
1360
memory_node__delete_nodes(struct memory_node * nodesp,u64 cnt)1361 static void memory_node__delete_nodes(struct memory_node *nodesp, u64 cnt)
1362 {
1363 for (u64 i = 0; i < cnt; i++)
1364 bitmap_free(nodesp[i].set);
1365
1366 free(nodesp);
1367 }
1368
memory_node__sort(const void * a,const void * b)1369 static int memory_node__sort(const void *a, const void *b)
1370 {
1371 const struct memory_node *na = a;
1372 const struct memory_node *nb = b;
1373
1374 return na->node - nb->node;
1375 }
1376
build_mem_topology(struct memory_node ** nodesp,u64 * cntp)1377 static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
1378 {
1379 char path[PATH_MAX];
1380 struct io_dirent64 *ent;
1381 struct io_dir dir;
1382 int ret = 0;
1383 size_t cnt = 0, size = 0;
1384 struct memory_node *nodes = NULL;
1385
1386 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1387 sysfs__mountpoint());
1388
1389 io_dir__init(&dir, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
1390 if (dir.dirfd < 0) {
1391 pr_debug2("%s: couldn't read %s, does this arch have topology information?\n",
1392 __func__, path);
1393 return -1;
1394 }
1395
1396 while (!ret && (ent = io_dir__readdir(&dir))) {
1397 unsigned int idx;
1398 int r;
1399
1400 if (!strcmp(ent->d_name, ".") ||
1401 !strcmp(ent->d_name, ".."))
1402 continue;
1403
1404 r = sscanf(ent->d_name, "node%u", &idx);
1405 if (r != 1)
1406 continue;
1407
1408 if (cnt >= size) {
1409 struct memory_node *new_nodes =
1410 reallocarray(nodes, cnt + 4, sizeof(*nodes));
1411
1412 if (!new_nodes) {
1413 pr_err("Failed to write MEM_TOPOLOGY, size %zd nodes\n", size);
1414 ret = -ENOMEM;
1415 goto out;
1416 }
1417 nodes = new_nodes;
1418 size += 4;
1419 }
1420 ret = memory_node__read(&nodes[cnt], idx);
1421 if (!ret)
1422 cnt += 1;
1423 }
1424 out:
1425 close(dir.dirfd);
1426 if (!ret) {
1427 *cntp = cnt;
1428 *nodesp = nodes;
1429 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1430 } else
1431 memory_node__delete_nodes(nodes, cnt);
1432
1433 return ret;
1434 }
1435
1436 /*
1437 * The MEM_TOPOLOGY holds physical memory map for every
1438 * node in system. The format of data is as follows:
1439 *
1440 * 0 - version | for future changes
1441 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1442 * 16 - count | number of nodes
1443 *
1444 * For each node we store map of physical indexes for
1445 * each node:
1446 *
1447 * 32 - node id | node index
1448 * 40 - size | size of bitmap
1449 * 48 - bitmap | bitmap of memory indexes that belongs to node
1450 */
write_mem_topology(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)1451 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1452 struct evlist *evlist __maybe_unused)
1453 {
1454 struct memory_node *nodes = NULL;
1455 u64 bsize, version = 1, i, nr = 0;
1456 int ret;
1457
1458 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1459 (unsigned long long *) &bsize);
1460 if (ret)
1461 return ret;
1462
1463 ret = build_mem_topology(&nodes, &nr);
1464 if (ret)
1465 return ret;
1466
1467 ret = do_write(ff, &version, sizeof(version));
1468 if (ret < 0)
1469 goto out;
1470
1471 ret = do_write(ff, &bsize, sizeof(bsize));
1472 if (ret < 0)
1473 goto out;
1474
1475 ret = do_write(ff, &nr, sizeof(nr));
1476 if (ret < 0)
1477 goto out;
1478
1479 for (i = 0; i < nr; i++) {
1480 struct memory_node *n = &nodes[i];
1481
1482 #define _W(v) \
1483 ret = do_write(ff, &n->v, sizeof(n->v)); \
1484 if (ret < 0) \
1485 goto out;
1486
1487 _W(node)
1488 _W(size)
1489
1490 #undef _W
1491
1492 ret = do_write_bitmap(ff, n->set, n->size);
1493 if (ret < 0)
1494 goto out;
1495 }
1496
1497 out:
1498 memory_node__delete_nodes(nodes, nr);
1499 return ret;
1500 }
1501
write_compressed(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)1502 static int write_compressed(struct feat_fd *ff __maybe_unused,
1503 struct evlist *evlist __maybe_unused)
1504 {
1505 int ret;
1506
1507 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1508 if (ret)
1509 return ret;
1510
1511 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1512 if (ret)
1513 return ret;
1514
1515 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1516 if (ret)
1517 return ret;
1518
1519 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1520 if (ret)
1521 return ret;
1522
1523 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1524 }
1525
__write_pmu_caps(struct feat_fd * ff,struct perf_pmu * pmu,bool write_pmu)1526 static int __write_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
1527 bool write_pmu)
1528 {
1529 struct perf_pmu_caps *caps = NULL;
1530 int ret;
1531
1532 ret = do_write(ff, &pmu->nr_caps, sizeof(pmu->nr_caps));
1533 if (ret < 0)
1534 return ret;
1535
1536 list_for_each_entry(caps, &pmu->caps, list) {
1537 ret = do_write_string(ff, caps->name);
1538 if (ret < 0)
1539 return ret;
1540
1541 ret = do_write_string(ff, caps->value);
1542 if (ret < 0)
1543 return ret;
1544 }
1545
1546 if (write_pmu) {
1547 ret = do_write_string(ff, pmu->name);
1548 if (ret < 0)
1549 return ret;
1550 }
1551
1552 return ret;
1553 }
1554
write_cpu_pmu_caps(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1555 static int write_cpu_pmu_caps(struct feat_fd *ff,
1556 struct evlist *evlist __maybe_unused)
1557 {
1558 struct perf_pmu *cpu_pmu = perf_pmus__find("cpu");
1559 int ret;
1560
1561 if (!cpu_pmu)
1562 return -ENOENT;
1563
1564 ret = perf_pmu__caps_parse(cpu_pmu);
1565 if (ret < 0)
1566 return ret;
1567
1568 return __write_pmu_caps(ff, cpu_pmu, false);
1569 }
1570
write_pmu_caps(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1571 static int write_pmu_caps(struct feat_fd *ff,
1572 struct evlist *evlist __maybe_unused)
1573 {
1574 struct perf_pmu *pmu = NULL;
1575 int nr_pmu = 0;
1576 int ret;
1577
1578 while ((pmu = perf_pmus__scan(pmu))) {
1579 if (!strcmp(pmu->name, "cpu")) {
1580 /*
1581 * The "cpu" PMU is special and covered by
1582 * HEADER_CPU_PMU_CAPS. Note, core PMUs are
1583 * counted/written here for ARM, s390 and Intel hybrid.
1584 */
1585 continue;
1586 }
1587 if (perf_pmu__caps_parse(pmu) <= 0)
1588 continue;
1589 nr_pmu++;
1590 }
1591
1592 ret = do_write(ff, &nr_pmu, sizeof(nr_pmu));
1593 if (ret < 0)
1594 return ret;
1595
1596 if (!nr_pmu)
1597 return 0;
1598
1599 /*
1600 * Note older perf tools assume core PMUs come first, this is a property
1601 * of perf_pmus__scan.
1602 */
1603 pmu = NULL;
1604 while ((pmu = perf_pmus__scan(pmu))) {
1605 if (!strcmp(pmu->name, "cpu")) {
1606 /* Skip as above. */
1607 continue;
1608 }
1609 if (perf_pmu__caps_parse(pmu) <= 0)
1610 continue;
1611 ret = __write_pmu_caps(ff, pmu, true);
1612 if (ret < 0)
1613 return ret;
1614 }
1615 return 0;
1616 }
1617
print_hostname(struct feat_fd * ff,FILE * fp)1618 static void print_hostname(struct feat_fd *ff, FILE *fp)
1619 {
1620 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1621 }
1622
print_osrelease(struct feat_fd * ff,FILE * fp)1623 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1624 {
1625 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1626 }
1627
print_arch(struct feat_fd * ff,FILE * fp)1628 static void print_arch(struct feat_fd *ff, FILE *fp)
1629 {
1630 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1631 }
1632
print_cpudesc(struct feat_fd * ff,FILE * fp)1633 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1634 {
1635 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1636 }
1637
print_nrcpus(struct feat_fd * ff,FILE * fp)1638 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1639 {
1640 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1641 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1642 }
1643
print_version(struct feat_fd * ff,FILE * fp)1644 static void print_version(struct feat_fd *ff, FILE *fp)
1645 {
1646 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1647 }
1648
print_cmdline(struct feat_fd * ff,FILE * fp)1649 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1650 {
1651 int nr, i;
1652
1653 nr = ff->ph->env.nr_cmdline;
1654
1655 fprintf(fp, "# cmdline : ");
1656
1657 for (i = 0; i < nr; i++) {
1658 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1659 if (!argv_i) {
1660 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1661 } else {
1662 char *mem = argv_i;
1663 do {
1664 char *quote = strchr(argv_i, '\'');
1665 if (!quote)
1666 break;
1667 *quote++ = '\0';
1668 fprintf(fp, "%s\\\'", argv_i);
1669 argv_i = quote;
1670 } while (1);
1671 fprintf(fp, "%s ", argv_i);
1672 free(mem);
1673 }
1674 }
1675 fputc('\n', fp);
1676 }
1677
print_cpu_topology(struct feat_fd * ff,FILE * fp)1678 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1679 {
1680 struct perf_header *ph = ff->ph;
1681 int cpu_nr = ph->env.nr_cpus_avail;
1682 int nr, i;
1683 char *str;
1684
1685 nr = ph->env.nr_sibling_cores;
1686 str = ph->env.sibling_cores;
1687
1688 for (i = 0; i < nr; i++) {
1689 fprintf(fp, "# sibling sockets : %s\n", str);
1690 str += strlen(str) + 1;
1691 }
1692
1693 if (ph->env.nr_sibling_dies) {
1694 nr = ph->env.nr_sibling_dies;
1695 str = ph->env.sibling_dies;
1696
1697 for (i = 0; i < nr; i++) {
1698 fprintf(fp, "# sibling dies : %s\n", str);
1699 str += strlen(str) + 1;
1700 }
1701 }
1702
1703 nr = ph->env.nr_sibling_threads;
1704 str = ph->env.sibling_threads;
1705
1706 for (i = 0; i < nr; i++) {
1707 fprintf(fp, "# sibling threads : %s\n", str);
1708 str += strlen(str) + 1;
1709 }
1710
1711 if (ph->env.nr_sibling_dies) {
1712 if (ph->env.cpu != NULL) {
1713 for (i = 0; i < cpu_nr; i++)
1714 fprintf(fp, "# CPU %d: Core ID %d, "
1715 "Die ID %d, Socket ID %d\n",
1716 i, ph->env.cpu[i].core_id,
1717 ph->env.cpu[i].die_id,
1718 ph->env.cpu[i].socket_id);
1719 } else
1720 fprintf(fp, "# Core ID, Die ID and Socket ID "
1721 "information is not available\n");
1722 } else {
1723 if (ph->env.cpu != NULL) {
1724 for (i = 0; i < cpu_nr; i++)
1725 fprintf(fp, "# CPU %d: Core ID %d, "
1726 "Socket ID %d\n",
1727 i, ph->env.cpu[i].core_id,
1728 ph->env.cpu[i].socket_id);
1729 } else
1730 fprintf(fp, "# Core ID and Socket ID "
1731 "information is not available\n");
1732 }
1733 }
1734
print_clockid(struct feat_fd * ff,FILE * fp)1735 static void print_clockid(struct feat_fd *ff, FILE *fp)
1736 {
1737 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1738 ff->ph->env.clock.clockid_res_ns * 1000);
1739 }
1740
print_clock_data(struct feat_fd * ff,FILE * fp)1741 static void print_clock_data(struct feat_fd *ff, FILE *fp)
1742 {
1743 struct timespec clockid_ns;
1744 char tstr[64], date[64];
1745 struct timeval tod_ns;
1746 clockid_t clockid;
1747 struct tm ltime;
1748 u64 ref;
1749
1750 if (!ff->ph->env.clock.enabled) {
1751 fprintf(fp, "# reference time disabled\n");
1752 return;
1753 }
1754
1755 /* Compute TOD time. */
1756 ref = ff->ph->env.clock.tod_ns;
1757 tod_ns.tv_sec = ref / NSEC_PER_SEC;
1758 ref -= tod_ns.tv_sec * NSEC_PER_SEC;
1759 tod_ns.tv_usec = ref / NSEC_PER_USEC;
1760
1761 /* Compute clockid time. */
1762 ref = ff->ph->env.clock.clockid_ns;
1763 clockid_ns.tv_sec = ref / NSEC_PER_SEC;
1764 ref -= clockid_ns.tv_sec * NSEC_PER_SEC;
1765 clockid_ns.tv_nsec = ref;
1766
1767 clockid = ff->ph->env.clock.clockid;
1768
1769 if (localtime_r(&tod_ns.tv_sec, <ime) == NULL)
1770 snprintf(tstr, sizeof(tstr), "<error>");
1771 else {
1772 strftime(date, sizeof(date), "%F %T", <ime);
1773 scnprintf(tstr, sizeof(tstr), "%s.%06d",
1774 date, (int) tod_ns.tv_usec);
1775 }
1776
1777 fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid);
1778 fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n",
1779 tstr, (long) tod_ns.tv_sec, (int) tod_ns.tv_usec,
1780 (long) clockid_ns.tv_sec, clockid_ns.tv_nsec,
1781 clockid_name(clockid));
1782 }
1783
print_hybrid_topology(struct feat_fd * ff,FILE * fp)1784 static void print_hybrid_topology(struct feat_fd *ff, FILE *fp)
1785 {
1786 int i;
1787 struct hybrid_node *n;
1788
1789 fprintf(fp, "# hybrid cpu system:\n");
1790 for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) {
1791 n = &ff->ph->env.hybrid_nodes[i];
1792 fprintf(fp, "# %s cpu list : %s\n", n->pmu_name, n->cpus);
1793 }
1794 }
1795
print_dir_format(struct feat_fd * ff,FILE * fp)1796 static void print_dir_format(struct feat_fd *ff, FILE *fp)
1797 {
1798 struct perf_session *session;
1799 struct perf_data *data;
1800
1801 session = container_of(ff->ph, struct perf_session, header);
1802 data = session->data;
1803
1804 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1805 }
1806
1807 #ifdef HAVE_LIBBPF_SUPPORT
print_bpf_prog_info(struct feat_fd * ff,FILE * fp)1808 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1809 {
1810 struct perf_env *env = &ff->ph->env;
1811 struct rb_root *root;
1812 struct rb_node *next;
1813
1814 down_read(&env->bpf_progs.lock);
1815
1816 root = &env->bpf_progs.infos;
1817 next = rb_first(root);
1818
1819 if (!next)
1820 printf("# bpf_prog_info empty\n");
1821
1822 while (next) {
1823 struct bpf_prog_info_node *node;
1824
1825 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1826 next = rb_next(&node->rb_node);
1827
1828 __bpf_event__print_bpf_prog_info(&node->info_linear->info,
1829 env, fp);
1830 }
1831
1832 up_read(&env->bpf_progs.lock);
1833 }
1834
print_bpf_btf(struct feat_fd * ff,FILE * fp)1835 static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1836 {
1837 struct perf_env *env = &ff->ph->env;
1838 struct rb_root *root;
1839 struct rb_node *next;
1840
1841 down_read(&env->bpf_progs.lock);
1842
1843 root = &env->bpf_progs.btfs;
1844 next = rb_first(root);
1845
1846 if (!next)
1847 printf("# btf info empty\n");
1848
1849 while (next) {
1850 struct btf_node *node;
1851
1852 node = rb_entry(next, struct btf_node, rb_node);
1853 next = rb_next(&node->rb_node);
1854 fprintf(fp, "# btf info of id %u\n", node->id);
1855 }
1856
1857 up_read(&env->bpf_progs.lock);
1858 }
1859 #endif // HAVE_LIBBPF_SUPPORT
1860
free_event_desc(struct evsel * events)1861 static void free_event_desc(struct evsel *events)
1862 {
1863 struct evsel *evsel;
1864
1865 if (!events)
1866 return;
1867
1868 for (evsel = events; evsel->core.attr.size; evsel++) {
1869 zfree(&evsel->name);
1870 zfree(&evsel->core.id);
1871 }
1872
1873 free(events);
1874 }
1875
perf_attr_check(struct perf_event_attr * attr)1876 static bool perf_attr_check(struct perf_event_attr *attr)
1877 {
1878 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) {
1879 pr_warning("Reserved bits are set unexpectedly. "
1880 "Please update perf tool.\n");
1881 return false;
1882 }
1883
1884 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) {
1885 pr_warning("Unknown sample type (0x%llx) is detected. "
1886 "Please update perf tool.\n",
1887 attr->sample_type);
1888 return false;
1889 }
1890
1891 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) {
1892 pr_warning("Unknown read format (0x%llx) is detected. "
1893 "Please update perf tool.\n",
1894 attr->read_format);
1895 return false;
1896 }
1897
1898 if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) &&
1899 (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) {
1900 pr_warning("Unknown branch sample type (0x%llx) is detected. "
1901 "Please update perf tool.\n",
1902 attr->branch_sample_type);
1903
1904 return false;
1905 }
1906
1907 return true;
1908 }
1909
read_event_desc(struct feat_fd * ff)1910 static struct evsel *read_event_desc(struct feat_fd *ff)
1911 {
1912 struct evsel *evsel, *events = NULL;
1913 u64 *id;
1914 void *buf = NULL;
1915 u32 nre, sz, nr, i, j;
1916 size_t msz;
1917
1918 /* number of events */
1919 if (do_read_u32(ff, &nre))
1920 goto error;
1921
1922 if (do_read_u32(ff, &sz))
1923 goto error;
1924
1925 /* buffer to hold on file attr struct */
1926 buf = malloc(sz);
1927 if (!buf)
1928 goto error;
1929
1930 /* the last event terminates with evsel->core.attr.size == 0: */
1931 events = calloc(nre + 1, sizeof(*events));
1932 if (!events)
1933 goto error;
1934
1935 msz = sizeof(evsel->core.attr);
1936 if (sz < msz)
1937 msz = sz;
1938
1939 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1940 evsel->core.idx = i;
1941
1942 /*
1943 * must read entire on-file attr struct to
1944 * sync up with layout.
1945 */
1946 if (__do_read(ff, buf, sz))
1947 goto error;
1948
1949 if (ff->ph->needs_swap)
1950 perf_event__attr_swap(buf);
1951
1952 memcpy(&evsel->core.attr, buf, msz);
1953
1954 if (!perf_attr_check(&evsel->core.attr))
1955 goto error;
1956
1957 if (do_read_u32(ff, &nr))
1958 goto error;
1959
1960 if (ff->ph->needs_swap)
1961 evsel->needs_swap = true;
1962
1963 evsel->name = do_read_string(ff);
1964 if (!evsel->name)
1965 goto error;
1966
1967 if (!nr)
1968 continue;
1969
1970 id = calloc(nr, sizeof(*id));
1971 if (!id)
1972 goto error;
1973 evsel->core.ids = nr;
1974 evsel->core.id = id;
1975
1976 for (j = 0 ; j < nr; j++) {
1977 if (do_read_u64(ff, id))
1978 goto error;
1979 id++;
1980 }
1981 }
1982 out:
1983 free(buf);
1984 return events;
1985 error:
1986 free_event_desc(events);
1987 events = NULL;
1988 goto out;
1989 }
1990
__desc_attr__fprintf(FILE * fp,const char * name,const char * val,void * priv __maybe_unused)1991 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1992 void *priv __maybe_unused)
1993 {
1994 return fprintf(fp, ", %s = %s", name, val);
1995 }
1996
print_event_desc(struct feat_fd * ff,FILE * fp)1997 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1998 {
1999 struct evsel *evsel, *events;
2000 u32 j;
2001 u64 *id;
2002
2003 if (ff->events)
2004 events = ff->events;
2005 else
2006 events = read_event_desc(ff);
2007
2008 if (!events) {
2009 fprintf(fp, "# event desc: not available or unable to read\n");
2010 return;
2011 }
2012
2013 for (evsel = events; evsel->core.attr.size; evsel++) {
2014 fprintf(fp, "# event : name = %s, ", evsel->name);
2015
2016 if (evsel->core.ids) {
2017 fprintf(fp, ", id = {");
2018 for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
2019 if (j)
2020 fputc(',', fp);
2021 fprintf(fp, " %"PRIu64, *id);
2022 }
2023 fprintf(fp, " }");
2024 }
2025
2026 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
2027
2028 fputc('\n', fp);
2029 }
2030
2031 free_event_desc(events);
2032 ff->events = NULL;
2033 }
2034
print_total_mem(struct feat_fd * ff,FILE * fp)2035 static void print_total_mem(struct feat_fd *ff, FILE *fp)
2036 {
2037 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
2038 }
2039
print_numa_topology(struct feat_fd * ff,FILE * fp)2040 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
2041 {
2042 int i;
2043 struct numa_node *n;
2044
2045 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
2046 n = &ff->ph->env.numa_nodes[i];
2047
2048 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
2049 " free = %"PRIu64" kB\n",
2050 n->node, n->mem_total, n->mem_free);
2051
2052 fprintf(fp, "# node%u cpu list : ", n->node);
2053 cpu_map__fprintf(n->map, fp);
2054 }
2055 }
2056
print_cpuid(struct feat_fd * ff,FILE * fp)2057 static void print_cpuid(struct feat_fd *ff, FILE *fp)
2058 {
2059 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
2060 }
2061
print_branch_stack(struct feat_fd * ff __maybe_unused,FILE * fp)2062 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
2063 {
2064 fprintf(fp, "# contains samples with branch stack\n");
2065 }
2066
print_auxtrace(struct feat_fd * ff __maybe_unused,FILE * fp)2067 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
2068 {
2069 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
2070 }
2071
print_stat(struct feat_fd * ff __maybe_unused,FILE * fp)2072 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
2073 {
2074 fprintf(fp, "# contains stat data\n");
2075 }
2076
print_cache(struct feat_fd * ff,FILE * fp __maybe_unused)2077 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
2078 {
2079 int i;
2080
2081 fprintf(fp, "# CPU cache info:\n");
2082 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
2083 fprintf(fp, "# ");
2084 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
2085 }
2086 }
2087
print_compressed(struct feat_fd * ff,FILE * fp)2088 static void print_compressed(struct feat_fd *ff, FILE *fp)
2089 {
2090 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
2091 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
2092 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
2093 }
2094
__print_pmu_caps(FILE * fp,int nr_caps,char ** caps,char * pmu_name)2095 static void __print_pmu_caps(FILE *fp, int nr_caps, char **caps, char *pmu_name)
2096 {
2097 const char *delimiter = "";
2098 int i;
2099
2100 if (!nr_caps) {
2101 fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
2102 return;
2103 }
2104
2105 fprintf(fp, "# %s pmu capabilities: ", pmu_name);
2106 for (i = 0; i < nr_caps; i++) {
2107 fprintf(fp, "%s%s", delimiter, caps[i]);
2108 delimiter = ", ";
2109 }
2110
2111 fprintf(fp, "\n");
2112 }
2113
print_cpu_pmu_caps(struct feat_fd * ff,FILE * fp)2114 static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
2115 {
2116 __print_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps,
2117 ff->ph->env.cpu_pmu_caps, (char *)"cpu");
2118 }
2119
print_pmu_caps(struct feat_fd * ff,FILE * fp)2120 static void print_pmu_caps(struct feat_fd *ff, FILE *fp)
2121 {
2122 struct perf_env *env = &ff->ph->env;
2123 struct pmu_caps *pmu_caps;
2124
2125 for (int i = 0; i < env->nr_pmus_with_caps; i++) {
2126 pmu_caps = &env->pmu_caps[i];
2127 __print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps,
2128 pmu_caps->pmu_name);
2129 }
2130
2131 if (strcmp(perf_env__arch(env), "x86") == 0 &&
2132 perf_env__has_pmu_mapping(env, "ibs_op")) {
2133 char *max_precise = perf_env__find_pmu_cap(env, "cpu", "max_precise");
2134
2135 if (max_precise != NULL && atoi(max_precise) == 0)
2136 fprintf(fp, "# AMD systems uses ibs_op// PMU for some precise events, e.g.: cycles:p, see the 'perf list' man page for further details.\n");
2137 }
2138 }
2139
print_pmu_mappings(struct feat_fd * ff,FILE * fp)2140 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
2141 {
2142 struct perf_env *env = &ff->ph->env;
2143 const char *delimiter = "# pmu mappings: ";
2144 char *str, *tmp;
2145 u32 pmu_num;
2146 u32 type;
2147
2148 pmu_num = env->nr_pmu_mappings;
2149 if (!pmu_num) {
2150 fprintf(fp, "# pmu mappings: not available\n");
2151 return;
2152 }
2153
2154 str = env->pmu_mappings;
2155
2156 while (pmu_num) {
2157 type = strtoul(str, &tmp, 0);
2158 if (*tmp != ':')
2159 goto error;
2160
2161 str = tmp + 1;
2162 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
2163
2164 delimiter = ", ";
2165 str += strlen(str) + 1;
2166 pmu_num--;
2167 }
2168
2169 fprintf(fp, "\n");
2170
2171 if (!pmu_num)
2172 return;
2173 error:
2174 fprintf(fp, "# pmu mappings: unable to read\n");
2175 }
2176
print_group_desc(struct feat_fd * ff,FILE * fp)2177 static void print_group_desc(struct feat_fd *ff, FILE *fp)
2178 {
2179 struct perf_session *session;
2180 struct evsel *evsel;
2181 u32 nr = 0;
2182
2183 session = container_of(ff->ph, struct perf_session, header);
2184
2185 evlist__for_each_entry(session->evlist, evsel) {
2186 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
2187 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", evsel__name(evsel));
2188
2189 nr = evsel->core.nr_members - 1;
2190 } else if (nr) {
2191 fprintf(fp, ",%s", evsel__name(evsel));
2192
2193 if (--nr == 0)
2194 fprintf(fp, "}\n");
2195 }
2196 }
2197 }
2198
print_sample_time(struct feat_fd * ff,FILE * fp)2199 static void print_sample_time(struct feat_fd *ff, FILE *fp)
2200 {
2201 struct perf_session *session;
2202 char time_buf[32];
2203 double d;
2204
2205 session = container_of(ff->ph, struct perf_session, header);
2206
2207 timestamp__scnprintf_usec(session->evlist->first_sample_time,
2208 time_buf, sizeof(time_buf));
2209 fprintf(fp, "# time of first sample : %s\n", time_buf);
2210
2211 timestamp__scnprintf_usec(session->evlist->last_sample_time,
2212 time_buf, sizeof(time_buf));
2213 fprintf(fp, "# time of last sample : %s\n", time_buf);
2214
2215 d = (double)(session->evlist->last_sample_time -
2216 session->evlist->first_sample_time) / NSEC_PER_MSEC;
2217
2218 fprintf(fp, "# sample duration : %10.3f ms\n", d);
2219 }
2220
memory_node__fprintf(struct memory_node * n,unsigned long long bsize,FILE * fp)2221 static void memory_node__fprintf(struct memory_node *n,
2222 unsigned long long bsize, FILE *fp)
2223 {
2224 char buf_map[100], buf_size[50];
2225 unsigned long long size;
2226
2227 size = bsize * bitmap_weight(n->set, n->size);
2228 unit_number__scnprintf(buf_size, 50, size);
2229
2230 bitmap_scnprintf(n->set, n->size, buf_map, 100);
2231 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
2232 }
2233
print_mem_topology(struct feat_fd * ff,FILE * fp)2234 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
2235 {
2236 struct perf_env *env = &ff->ph->env;
2237 struct memory_node *nodes;
2238 int i, nr;
2239
2240 nodes = env->memory_nodes;
2241 nr = env->nr_memory_nodes;
2242
2243 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
2244 nr, env->memory_bsize);
2245
2246 for (i = 0; i < nr; i++) {
2247 memory_node__fprintf(&nodes[i], env->memory_bsize, fp);
2248 }
2249 }
2250
__event_process_build_id(struct perf_record_header_build_id * bev,char * filename,struct perf_session * session)2251 static int __event_process_build_id(struct perf_record_header_build_id *bev,
2252 char *filename,
2253 struct perf_session *session)
2254 {
2255 int err = -1;
2256 struct machine *machine;
2257 u16 cpumode;
2258 struct dso *dso;
2259 enum dso_space_type dso_space;
2260
2261 machine = perf_session__findnew_machine(session, bev->pid);
2262 if (!machine)
2263 goto out;
2264
2265 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2266
2267 switch (cpumode) {
2268 case PERF_RECORD_MISC_KERNEL:
2269 dso_space = DSO_SPACE__KERNEL;
2270 break;
2271 case PERF_RECORD_MISC_GUEST_KERNEL:
2272 dso_space = DSO_SPACE__KERNEL_GUEST;
2273 break;
2274 case PERF_RECORD_MISC_USER:
2275 case PERF_RECORD_MISC_GUEST_USER:
2276 dso_space = DSO_SPACE__USER;
2277 break;
2278 default:
2279 goto out;
2280 }
2281
2282 dso = machine__findnew_dso(machine, filename);
2283 if (dso != NULL) {
2284 char sbuild_id[SBUILD_ID_SIZE];
2285 struct build_id bid;
2286 size_t size = BUILD_ID_SIZE;
2287
2288 if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE)
2289 size = bev->size;
2290
2291 build_id__init(&bid, bev->data, size);
2292 dso__set_build_id(dso, &bid);
2293 dso__set_header_build_id(dso, true);
2294
2295 if (dso_space != DSO_SPACE__USER) {
2296 struct kmod_path m = { .name = NULL, };
2297
2298 if (!kmod_path__parse_name(&m, filename) && m.kmod)
2299 dso__set_module_info(dso, &m, machine);
2300
2301 dso__set_kernel(dso, dso_space);
2302 free(m.name);
2303 }
2304
2305 build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
2306 pr_debug("build id event received for %s: %s [%zu]\n",
2307 dso__long_name(dso), sbuild_id, size);
2308 dso__put(dso);
2309 }
2310
2311 err = 0;
2312 out:
2313 return err;
2314 }
2315
perf_header__read_build_ids_abi_quirk(struct perf_header * header,int input,u64 offset,u64 size)2316 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
2317 int input, u64 offset, u64 size)
2318 {
2319 struct perf_session *session = container_of(header, struct perf_session, header);
2320 struct {
2321 struct perf_event_header header;
2322 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
2323 char filename[0];
2324 } old_bev;
2325 struct perf_record_header_build_id bev;
2326 char filename[PATH_MAX];
2327 u64 limit = offset + size;
2328
2329 while (offset < limit) {
2330 ssize_t len;
2331
2332 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
2333 return -1;
2334
2335 if (header->needs_swap)
2336 perf_event_header__bswap(&old_bev.header);
2337
2338 len = old_bev.header.size - sizeof(old_bev);
2339 if (readn(input, filename, len) != len)
2340 return -1;
2341
2342 bev.header = old_bev.header;
2343
2344 /*
2345 * As the pid is the missing value, we need to fill
2346 * it properly. The header.misc value give us nice hint.
2347 */
2348 bev.pid = HOST_KERNEL_ID;
2349 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
2350 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
2351 bev.pid = DEFAULT_GUEST_KERNEL_ID;
2352
2353 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
2354 __event_process_build_id(&bev, filename, session);
2355
2356 offset += bev.header.size;
2357 }
2358
2359 return 0;
2360 }
2361
perf_header__read_build_ids(struct perf_header * header,int input,u64 offset,u64 size)2362 static int perf_header__read_build_ids(struct perf_header *header,
2363 int input, u64 offset, u64 size)
2364 {
2365 struct perf_session *session = container_of(header, struct perf_session, header);
2366 struct perf_record_header_build_id bev;
2367 char filename[PATH_MAX];
2368 u64 limit = offset + size, orig_offset = offset;
2369 int err = -1;
2370
2371 while (offset < limit) {
2372 ssize_t len;
2373
2374 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
2375 goto out;
2376
2377 if (header->needs_swap)
2378 perf_event_header__bswap(&bev.header);
2379
2380 len = bev.header.size - sizeof(bev);
2381 if (readn(input, filename, len) != len)
2382 goto out;
2383 /*
2384 * The a1645ce1 changeset:
2385 *
2386 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2387 *
2388 * Added a field to struct perf_record_header_build_id that broke the file
2389 * format.
2390 *
2391 * Since the kernel build-id is the first entry, process the
2392 * table using the old format if the well known
2393 * '[kernel.kallsyms]' string for the kernel build-id has the
2394 * first 4 characters chopped off (where the pid_t sits).
2395 */
2396 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2397 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2398 return -1;
2399 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2400 }
2401
2402 __event_process_build_id(&bev, filename, session);
2403
2404 offset += bev.header.size;
2405 }
2406 err = 0;
2407 out:
2408 return err;
2409 }
2410
2411 /* Macro for features that simply need to read and store a string. */
2412 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2413 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2414 {\
2415 free(ff->ph->env.__feat_env); \
2416 ff->ph->env.__feat_env = do_read_string(ff); \
2417 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2418 }
2419
2420 FEAT_PROCESS_STR_FUN(hostname, hostname);
2421 FEAT_PROCESS_STR_FUN(osrelease, os_release);
2422 FEAT_PROCESS_STR_FUN(version, version);
2423 FEAT_PROCESS_STR_FUN(arch, arch);
2424 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2425 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2426
2427 #ifdef HAVE_LIBTRACEEVENT
process_tracing_data(struct feat_fd * ff,void * data)2428 static int process_tracing_data(struct feat_fd *ff, void *data)
2429 {
2430 ssize_t ret = trace_report(ff->fd, data, false);
2431
2432 return ret < 0 ? -1 : 0;
2433 }
2434 #endif
2435
process_build_id(struct feat_fd * ff,void * data __maybe_unused)2436 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2437 {
2438 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2439 pr_debug("Failed to read buildids, continuing...\n");
2440 return 0;
2441 }
2442
process_nrcpus(struct feat_fd * ff,void * data __maybe_unused)2443 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2444 {
2445 struct perf_env *env = &ff->ph->env;
2446 int ret;
2447 u32 nr_cpus_avail, nr_cpus_online;
2448
2449 ret = do_read_u32(ff, &nr_cpus_avail);
2450 if (ret)
2451 return ret;
2452
2453 ret = do_read_u32(ff, &nr_cpus_online);
2454 if (ret)
2455 return ret;
2456 env->nr_cpus_avail = (int)nr_cpus_avail;
2457 env->nr_cpus_online = (int)nr_cpus_online;
2458 return 0;
2459 }
2460
process_total_mem(struct feat_fd * ff,void * data __maybe_unused)2461 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2462 {
2463 struct perf_env *env = &ff->ph->env;
2464 u64 total_mem;
2465 int ret;
2466
2467 ret = do_read_u64(ff, &total_mem);
2468 if (ret)
2469 return -1;
2470 env->total_mem = (unsigned long long)total_mem;
2471 return 0;
2472 }
2473
evlist__find_by_index(struct evlist * evlist,int idx)2474 static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx)
2475 {
2476 struct evsel *evsel;
2477
2478 evlist__for_each_entry(evlist, evsel) {
2479 if (evsel->core.idx == idx)
2480 return evsel;
2481 }
2482
2483 return NULL;
2484 }
2485
evlist__set_event_name(struct evlist * evlist,struct evsel * event)2486 static void evlist__set_event_name(struct evlist *evlist, struct evsel *event)
2487 {
2488 struct evsel *evsel;
2489
2490 if (!event->name)
2491 return;
2492
2493 evsel = evlist__find_by_index(evlist, event->core.idx);
2494 if (!evsel)
2495 return;
2496
2497 if (evsel->name)
2498 return;
2499
2500 evsel->name = strdup(event->name);
2501 }
2502
2503 static int
process_event_desc(struct feat_fd * ff,void * data __maybe_unused)2504 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2505 {
2506 struct perf_session *session;
2507 struct evsel *evsel, *events = read_event_desc(ff);
2508
2509 if (!events)
2510 return 0;
2511
2512 session = container_of(ff->ph, struct perf_session, header);
2513
2514 if (session->data->is_pipe) {
2515 /* Save events for reading later by print_event_desc,
2516 * since they can't be read again in pipe mode. */
2517 ff->events = events;
2518 }
2519
2520 for (evsel = events; evsel->core.attr.size; evsel++)
2521 evlist__set_event_name(session->evlist, evsel);
2522
2523 if (!session->data->is_pipe)
2524 free_event_desc(events);
2525
2526 return 0;
2527 }
2528
process_cmdline(struct feat_fd * ff,void * data __maybe_unused)2529 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2530 {
2531 struct perf_env *env = &ff->ph->env;
2532 char *str, *cmdline = NULL, **argv = NULL;
2533 u32 nr, i, len = 0;
2534
2535 if (do_read_u32(ff, &nr))
2536 return -1;
2537
2538 env->nr_cmdline = nr;
2539
2540 cmdline = zalloc(ff->size + nr + 1);
2541 if (!cmdline)
2542 return -1;
2543
2544 argv = zalloc(sizeof(char *) * (nr + 1));
2545 if (!argv)
2546 goto error;
2547
2548 for (i = 0; i < nr; i++) {
2549 str = do_read_string(ff);
2550 if (!str)
2551 goto error;
2552
2553 argv[i] = cmdline + len;
2554 memcpy(argv[i], str, strlen(str) + 1);
2555 len += strlen(str) + 1;
2556 free(str);
2557 }
2558 env->cmdline = cmdline;
2559 env->cmdline_argv = (const char **) argv;
2560 return 0;
2561
2562 error:
2563 free(argv);
2564 free(cmdline);
2565 return -1;
2566 }
2567
process_cpu_topology(struct feat_fd * ff,void * data __maybe_unused)2568 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2569 {
2570 u32 nr, i;
2571 char *str = NULL;
2572 struct strbuf sb;
2573 struct perf_env *env = &ff->ph->env;
2574 int cpu_nr = env->nr_cpus_avail;
2575 u64 size = 0;
2576
2577 env->cpu = calloc(cpu_nr, sizeof(*env->cpu));
2578 if (!env->cpu)
2579 return -1;
2580
2581 if (do_read_u32(ff, &nr))
2582 goto free_cpu;
2583
2584 env->nr_sibling_cores = nr;
2585 size += sizeof(u32);
2586 if (strbuf_init(&sb, 128) < 0)
2587 goto free_cpu;
2588
2589 for (i = 0; i < nr; i++) {
2590 str = do_read_string(ff);
2591 if (!str)
2592 goto error;
2593
2594 /* include a NULL character at the end */
2595 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2596 goto error;
2597 size += string_size(str);
2598 zfree(&str);
2599 }
2600 env->sibling_cores = strbuf_detach(&sb, NULL);
2601
2602 if (do_read_u32(ff, &nr))
2603 return -1;
2604
2605 env->nr_sibling_threads = nr;
2606 size += sizeof(u32);
2607
2608 for (i = 0; i < nr; i++) {
2609 str = do_read_string(ff);
2610 if (!str)
2611 goto error;
2612
2613 /* include a NULL character at the end */
2614 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2615 goto error;
2616 size += string_size(str);
2617 zfree(&str);
2618 }
2619 env->sibling_threads = strbuf_detach(&sb, NULL);
2620
2621 /*
2622 * The header may be from old perf,
2623 * which doesn't include core id and socket id information.
2624 */
2625 if (ff->size <= size) {
2626 zfree(&env->cpu);
2627 return 0;
2628 }
2629
2630 for (i = 0; i < (u32)cpu_nr; i++) {
2631 if (do_read_u32(ff, &nr))
2632 goto free_cpu;
2633
2634 env->cpu[i].core_id = nr;
2635 size += sizeof(u32);
2636
2637 if (do_read_u32(ff, &nr))
2638 goto free_cpu;
2639
2640 env->cpu[i].socket_id = nr;
2641 size += sizeof(u32);
2642 }
2643
2644 /*
2645 * The header may be from old perf,
2646 * which doesn't include die information.
2647 */
2648 if (ff->size <= size)
2649 return 0;
2650
2651 if (do_read_u32(ff, &nr))
2652 return -1;
2653
2654 env->nr_sibling_dies = nr;
2655 size += sizeof(u32);
2656
2657 for (i = 0; i < nr; i++) {
2658 str = do_read_string(ff);
2659 if (!str)
2660 goto error;
2661
2662 /* include a NULL character at the end */
2663 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2664 goto error;
2665 size += string_size(str);
2666 zfree(&str);
2667 }
2668 env->sibling_dies = strbuf_detach(&sb, NULL);
2669
2670 for (i = 0; i < (u32)cpu_nr; i++) {
2671 if (do_read_u32(ff, &nr))
2672 goto free_cpu;
2673
2674 env->cpu[i].die_id = nr;
2675 }
2676
2677 return 0;
2678
2679 error:
2680 strbuf_release(&sb);
2681 zfree(&str);
2682 free_cpu:
2683 zfree(&env->cpu);
2684 return -1;
2685 }
2686
process_numa_topology(struct feat_fd * ff,void * data __maybe_unused)2687 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2688 {
2689 struct perf_env *env = &ff->ph->env;
2690 struct numa_node *nodes, *n;
2691 u32 nr, i;
2692 char *str;
2693
2694 /* nr nodes */
2695 if (do_read_u32(ff, &nr))
2696 return -1;
2697
2698 nodes = zalloc(sizeof(*nodes) * nr);
2699 if (!nodes)
2700 return -ENOMEM;
2701
2702 for (i = 0; i < nr; i++) {
2703 n = &nodes[i];
2704
2705 /* node number */
2706 if (do_read_u32(ff, &n->node))
2707 goto error;
2708
2709 if (do_read_u64(ff, &n->mem_total))
2710 goto error;
2711
2712 if (do_read_u64(ff, &n->mem_free))
2713 goto error;
2714
2715 str = do_read_string(ff);
2716 if (!str)
2717 goto error;
2718
2719 n->map = perf_cpu_map__new(str);
2720 free(str);
2721 if (!n->map)
2722 goto error;
2723 }
2724 env->nr_numa_nodes = nr;
2725 env->numa_nodes = nodes;
2726 return 0;
2727
2728 error:
2729 free(nodes);
2730 return -1;
2731 }
2732
process_pmu_mappings(struct feat_fd * ff,void * data __maybe_unused)2733 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2734 {
2735 struct perf_env *env = &ff->ph->env;
2736 char *name;
2737 u32 pmu_num;
2738 u32 type;
2739 struct strbuf sb;
2740
2741 if (do_read_u32(ff, &pmu_num))
2742 return -1;
2743
2744 if (!pmu_num) {
2745 pr_debug("pmu mappings not available\n");
2746 return 0;
2747 }
2748
2749 env->nr_pmu_mappings = pmu_num;
2750 if (strbuf_init(&sb, 128) < 0)
2751 return -1;
2752
2753 while (pmu_num) {
2754 if (do_read_u32(ff, &type))
2755 goto error;
2756
2757 name = do_read_string(ff);
2758 if (!name)
2759 goto error;
2760
2761 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2762 goto error;
2763 /* include a NULL character at the end */
2764 if (strbuf_add(&sb, "", 1) < 0)
2765 goto error;
2766
2767 if (!strcmp(name, "msr"))
2768 env->msr_pmu_type = type;
2769
2770 free(name);
2771 pmu_num--;
2772 }
2773 /* AMD may set it by evlist__has_amd_ibs() from perf_session__new() */
2774 free(env->pmu_mappings);
2775 env->pmu_mappings = strbuf_detach(&sb, NULL);
2776 return 0;
2777
2778 error:
2779 strbuf_release(&sb);
2780 return -1;
2781 }
2782
process_group_desc(struct feat_fd * ff,void * data __maybe_unused)2783 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2784 {
2785 struct perf_env *env = &ff->ph->env;
2786 size_t ret = -1;
2787 u32 i, nr, nr_groups;
2788 struct perf_session *session;
2789 struct evsel *evsel, *leader = NULL;
2790 struct group_desc {
2791 char *name;
2792 u32 leader_idx;
2793 u32 nr_members;
2794 } *desc;
2795
2796 if (do_read_u32(ff, &nr_groups))
2797 return -1;
2798
2799 env->nr_groups = nr_groups;
2800 if (!nr_groups) {
2801 pr_debug("group desc not available\n");
2802 return 0;
2803 }
2804
2805 desc = calloc(nr_groups, sizeof(*desc));
2806 if (!desc)
2807 return -1;
2808
2809 for (i = 0; i < nr_groups; i++) {
2810 desc[i].name = do_read_string(ff);
2811 if (!desc[i].name)
2812 goto out_free;
2813
2814 if (do_read_u32(ff, &desc[i].leader_idx))
2815 goto out_free;
2816
2817 if (do_read_u32(ff, &desc[i].nr_members))
2818 goto out_free;
2819 }
2820
2821 /*
2822 * Rebuild group relationship based on the group_desc
2823 */
2824 session = container_of(ff->ph, struct perf_session, header);
2825
2826 i = nr = 0;
2827 evlist__for_each_entry(session->evlist, evsel) {
2828 if (i < nr_groups && evsel->core.idx == (int) desc[i].leader_idx) {
2829 evsel__set_leader(evsel, evsel);
2830 /* {anon_group} is a dummy name */
2831 if (strcmp(desc[i].name, "{anon_group}")) {
2832 evsel->group_name = desc[i].name;
2833 desc[i].name = NULL;
2834 }
2835 evsel->core.nr_members = desc[i].nr_members;
2836
2837 if (i >= nr_groups || nr > 0) {
2838 pr_debug("invalid group desc\n");
2839 goto out_free;
2840 }
2841
2842 leader = evsel;
2843 nr = evsel->core.nr_members - 1;
2844 i++;
2845 } else if (nr) {
2846 /* This is a group member */
2847 evsel__set_leader(evsel, leader);
2848
2849 nr--;
2850 }
2851 }
2852
2853 if (i != nr_groups || nr != 0) {
2854 pr_debug("invalid group desc\n");
2855 goto out_free;
2856 }
2857
2858 ret = 0;
2859 out_free:
2860 for (i = 0; i < nr_groups; i++)
2861 zfree(&desc[i].name);
2862 free(desc);
2863
2864 return ret;
2865 }
2866
process_auxtrace(struct feat_fd * ff,void * data __maybe_unused)2867 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2868 {
2869 struct perf_session *session;
2870 int err;
2871
2872 session = container_of(ff->ph, struct perf_session, header);
2873
2874 err = auxtrace_index__process(ff->fd, ff->size, session,
2875 ff->ph->needs_swap);
2876 if (err < 0)
2877 pr_err("Failed to process auxtrace index\n");
2878 return err;
2879 }
2880
process_cache(struct feat_fd * ff,void * data __maybe_unused)2881 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2882 {
2883 struct perf_env *env = &ff->ph->env;
2884 struct cpu_cache_level *caches;
2885 u32 cnt, i, version;
2886
2887 if (do_read_u32(ff, &version))
2888 return -1;
2889
2890 if (version != 1)
2891 return -1;
2892
2893 if (do_read_u32(ff, &cnt))
2894 return -1;
2895
2896 caches = zalloc(sizeof(*caches) * cnt);
2897 if (!caches)
2898 return -1;
2899
2900 for (i = 0; i < cnt; i++) {
2901 struct cpu_cache_level *c = &caches[i];
2902
2903 #define _R(v) \
2904 if (do_read_u32(ff, &c->v)) \
2905 goto out_free_caches; \
2906
2907 _R(level)
2908 _R(line_size)
2909 _R(sets)
2910 _R(ways)
2911 #undef _R
2912
2913 #define _R(v) \
2914 c->v = do_read_string(ff); \
2915 if (!c->v) \
2916 goto out_free_caches; \
2917
2918 _R(type)
2919 _R(size)
2920 _R(map)
2921 #undef _R
2922 }
2923
2924 env->caches = caches;
2925 env->caches_cnt = cnt;
2926 return 0;
2927 out_free_caches:
2928 for (i = 0; i < cnt; i++) {
2929 free(caches[i].type);
2930 free(caches[i].size);
2931 free(caches[i].map);
2932 }
2933 free(caches);
2934 return -1;
2935 }
2936
process_sample_time(struct feat_fd * ff,void * data __maybe_unused)2937 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2938 {
2939 struct perf_session *session;
2940 u64 first_sample_time, last_sample_time;
2941 int ret;
2942
2943 session = container_of(ff->ph, struct perf_session, header);
2944
2945 ret = do_read_u64(ff, &first_sample_time);
2946 if (ret)
2947 return -1;
2948
2949 ret = do_read_u64(ff, &last_sample_time);
2950 if (ret)
2951 return -1;
2952
2953 session->evlist->first_sample_time = first_sample_time;
2954 session->evlist->last_sample_time = last_sample_time;
2955 return 0;
2956 }
2957
process_mem_topology(struct feat_fd * ff,void * data __maybe_unused)2958 static int process_mem_topology(struct feat_fd *ff,
2959 void *data __maybe_unused)
2960 {
2961 struct perf_env *env = &ff->ph->env;
2962 struct memory_node *nodes;
2963 u64 version, i, nr, bsize;
2964 int ret = -1;
2965
2966 if (do_read_u64(ff, &version))
2967 return -1;
2968
2969 if (version != 1)
2970 return -1;
2971
2972 if (do_read_u64(ff, &bsize))
2973 return -1;
2974
2975 if (do_read_u64(ff, &nr))
2976 return -1;
2977
2978 nodes = zalloc(sizeof(*nodes) * nr);
2979 if (!nodes)
2980 return -1;
2981
2982 for (i = 0; i < nr; i++) {
2983 struct memory_node n;
2984
2985 #define _R(v) \
2986 if (do_read_u64(ff, &n.v)) \
2987 goto out; \
2988
2989 _R(node)
2990 _R(size)
2991
2992 #undef _R
2993
2994 if (do_read_bitmap(ff, &n.set, &n.size))
2995 goto out;
2996
2997 nodes[i] = n;
2998 }
2999
3000 env->memory_bsize = bsize;
3001 env->memory_nodes = nodes;
3002 env->nr_memory_nodes = nr;
3003 ret = 0;
3004
3005 out:
3006 if (ret)
3007 free(nodes);
3008 return ret;
3009 }
3010
process_clockid(struct feat_fd * ff,void * data __maybe_unused)3011 static int process_clockid(struct feat_fd *ff,
3012 void *data __maybe_unused)
3013 {
3014 struct perf_env *env = &ff->ph->env;
3015
3016 if (do_read_u64(ff, &env->clock.clockid_res_ns))
3017 return -1;
3018
3019 return 0;
3020 }
3021
process_clock_data(struct feat_fd * ff,void * _data __maybe_unused)3022 static int process_clock_data(struct feat_fd *ff,
3023 void *_data __maybe_unused)
3024 {
3025 struct perf_env *env = &ff->ph->env;
3026 u32 data32;
3027 u64 data64;
3028
3029 /* version */
3030 if (do_read_u32(ff, &data32))
3031 return -1;
3032
3033 if (data32 != 1)
3034 return -1;
3035
3036 /* clockid */
3037 if (do_read_u32(ff, &data32))
3038 return -1;
3039
3040 env->clock.clockid = data32;
3041
3042 /* TOD ref time */
3043 if (do_read_u64(ff, &data64))
3044 return -1;
3045
3046 env->clock.tod_ns = data64;
3047
3048 /* clockid ref time */
3049 if (do_read_u64(ff, &data64))
3050 return -1;
3051
3052 env->clock.clockid_ns = data64;
3053 env->clock.enabled = true;
3054 return 0;
3055 }
3056
process_hybrid_topology(struct feat_fd * ff,void * data __maybe_unused)3057 static int process_hybrid_topology(struct feat_fd *ff,
3058 void *data __maybe_unused)
3059 {
3060 struct perf_env *env = &ff->ph->env;
3061 struct hybrid_node *nodes, *n;
3062 u32 nr, i;
3063
3064 /* nr nodes */
3065 if (do_read_u32(ff, &nr))
3066 return -1;
3067
3068 nodes = zalloc(sizeof(*nodes) * nr);
3069 if (!nodes)
3070 return -ENOMEM;
3071
3072 for (i = 0; i < nr; i++) {
3073 n = &nodes[i];
3074
3075 n->pmu_name = do_read_string(ff);
3076 if (!n->pmu_name)
3077 goto error;
3078
3079 n->cpus = do_read_string(ff);
3080 if (!n->cpus)
3081 goto error;
3082 }
3083
3084 env->nr_hybrid_nodes = nr;
3085 env->hybrid_nodes = nodes;
3086 return 0;
3087
3088 error:
3089 for (i = 0; i < nr; i++) {
3090 free(nodes[i].pmu_name);
3091 free(nodes[i].cpus);
3092 }
3093
3094 free(nodes);
3095 return -1;
3096 }
3097
process_dir_format(struct feat_fd * ff,void * _data __maybe_unused)3098 static int process_dir_format(struct feat_fd *ff,
3099 void *_data __maybe_unused)
3100 {
3101 struct perf_session *session;
3102 struct perf_data *data;
3103
3104 session = container_of(ff->ph, struct perf_session, header);
3105 data = session->data;
3106
3107 if (WARN_ON(!perf_data__is_dir(data)))
3108 return -1;
3109
3110 return do_read_u64(ff, &data->dir.version);
3111 }
3112
3113 #ifdef HAVE_LIBBPF_SUPPORT
process_bpf_prog_info(struct feat_fd * ff,void * data __maybe_unused)3114 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
3115 {
3116 struct bpf_prog_info_node *info_node;
3117 struct perf_env *env = &ff->ph->env;
3118 struct perf_bpil *info_linear;
3119 u32 count, i;
3120 int err = -1;
3121
3122 if (ff->ph->needs_swap) {
3123 pr_warning("interpreting bpf_prog_info from systems with endianness is not yet supported\n");
3124 return 0;
3125 }
3126
3127 if (do_read_u32(ff, &count))
3128 return -1;
3129
3130 down_write(&env->bpf_progs.lock);
3131
3132 for (i = 0; i < count; ++i) {
3133 u32 info_len, data_len;
3134
3135 info_linear = NULL;
3136 info_node = NULL;
3137 if (do_read_u32(ff, &info_len))
3138 goto out;
3139 if (do_read_u32(ff, &data_len))
3140 goto out;
3141
3142 if (info_len > sizeof(struct bpf_prog_info)) {
3143 pr_warning("detected invalid bpf_prog_info\n");
3144 goto out;
3145 }
3146
3147 info_linear = malloc(sizeof(struct perf_bpil) +
3148 data_len);
3149 if (!info_linear)
3150 goto out;
3151 info_linear->info_len = sizeof(struct bpf_prog_info);
3152 info_linear->data_len = data_len;
3153 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
3154 goto out;
3155 if (__do_read(ff, &info_linear->info, info_len))
3156 goto out;
3157 if (info_len < sizeof(struct bpf_prog_info))
3158 memset(((void *)(&info_linear->info)) + info_len, 0,
3159 sizeof(struct bpf_prog_info) - info_len);
3160
3161 if (__do_read(ff, info_linear->data, data_len))
3162 goto out;
3163
3164 info_node = malloc(sizeof(struct bpf_prog_info_node));
3165 if (!info_node)
3166 goto out;
3167
3168 /* after reading from file, translate offset to address */
3169 bpil_offs_to_addr(info_linear);
3170 info_node->info_linear = info_linear;
3171 info_node->metadata = NULL;
3172 if (!__perf_env__insert_bpf_prog_info(env, info_node)) {
3173 free(info_linear);
3174 free(info_node);
3175 }
3176 }
3177
3178 up_write(&env->bpf_progs.lock);
3179 return 0;
3180 out:
3181 free(info_linear);
3182 free(info_node);
3183 up_write(&env->bpf_progs.lock);
3184 return err;
3185 }
3186
process_bpf_btf(struct feat_fd * ff,void * data __maybe_unused)3187 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
3188 {
3189 struct perf_env *env = &ff->ph->env;
3190 struct btf_node *node = NULL;
3191 u32 count, i;
3192 int err = -1;
3193
3194 if (ff->ph->needs_swap) {
3195 pr_warning("interpreting btf from systems with endianness is not yet supported\n");
3196 return 0;
3197 }
3198
3199 if (do_read_u32(ff, &count))
3200 return -1;
3201
3202 down_write(&env->bpf_progs.lock);
3203
3204 for (i = 0; i < count; ++i) {
3205 u32 id, data_size;
3206
3207 if (do_read_u32(ff, &id))
3208 goto out;
3209 if (do_read_u32(ff, &data_size))
3210 goto out;
3211
3212 node = malloc(sizeof(struct btf_node) + data_size);
3213 if (!node)
3214 goto out;
3215
3216 node->id = id;
3217 node->data_size = data_size;
3218
3219 if (__do_read(ff, node->data, data_size))
3220 goto out;
3221
3222 if (!__perf_env__insert_btf(env, node))
3223 free(node);
3224 node = NULL;
3225 }
3226
3227 err = 0;
3228 out:
3229 up_write(&env->bpf_progs.lock);
3230 free(node);
3231 return err;
3232 }
3233 #endif // HAVE_LIBBPF_SUPPORT
3234
process_compressed(struct feat_fd * ff,void * data __maybe_unused)3235 static int process_compressed(struct feat_fd *ff,
3236 void *data __maybe_unused)
3237 {
3238 struct perf_env *env = &ff->ph->env;
3239
3240 if (do_read_u32(ff, &(env->comp_ver)))
3241 return -1;
3242
3243 if (do_read_u32(ff, &(env->comp_type)))
3244 return -1;
3245
3246 if (do_read_u32(ff, &(env->comp_level)))
3247 return -1;
3248
3249 if (do_read_u32(ff, &(env->comp_ratio)))
3250 return -1;
3251
3252 if (do_read_u32(ff, &(env->comp_mmap_len)))
3253 return -1;
3254
3255 return 0;
3256 }
3257
__process_pmu_caps(struct feat_fd * ff,int * nr_caps,char *** caps,unsigned int * max_branches,unsigned int * br_cntr_nr,unsigned int * br_cntr_width)3258 static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
3259 char ***caps, unsigned int *max_branches,
3260 unsigned int *br_cntr_nr,
3261 unsigned int *br_cntr_width)
3262 {
3263 char *name, *value, *ptr;
3264 u32 nr_pmu_caps, i;
3265
3266 *nr_caps = 0;
3267 *caps = NULL;
3268
3269 if (do_read_u32(ff, &nr_pmu_caps))
3270 return -1;
3271
3272 if (!nr_pmu_caps)
3273 return 0;
3274
3275 *caps = zalloc(sizeof(char *) * nr_pmu_caps);
3276 if (!*caps)
3277 return -1;
3278
3279 for (i = 0; i < nr_pmu_caps; i++) {
3280 name = do_read_string(ff);
3281 if (!name)
3282 goto error;
3283
3284 value = do_read_string(ff);
3285 if (!value)
3286 goto free_name;
3287
3288 if (asprintf(&ptr, "%s=%s", name, value) < 0)
3289 goto free_value;
3290
3291 (*caps)[i] = ptr;
3292
3293 if (!strcmp(name, "branches"))
3294 *max_branches = atoi(value);
3295
3296 if (!strcmp(name, "branch_counter_nr"))
3297 *br_cntr_nr = atoi(value);
3298
3299 if (!strcmp(name, "branch_counter_width"))
3300 *br_cntr_width = atoi(value);
3301
3302 free(value);
3303 free(name);
3304 }
3305 *nr_caps = nr_pmu_caps;
3306 return 0;
3307
3308 free_value:
3309 free(value);
3310 free_name:
3311 free(name);
3312 error:
3313 for (; i > 0; i--)
3314 free((*caps)[i - 1]);
3315 free(*caps);
3316 *caps = NULL;
3317 *nr_caps = 0;
3318 return -1;
3319 }
3320
process_cpu_pmu_caps(struct feat_fd * ff,void * data __maybe_unused)3321 static int process_cpu_pmu_caps(struct feat_fd *ff,
3322 void *data __maybe_unused)
3323 {
3324 struct perf_env *env = &ff->ph->env;
3325 int ret = __process_pmu_caps(ff, &env->nr_cpu_pmu_caps,
3326 &env->cpu_pmu_caps,
3327 &env->max_branches,
3328 &env->br_cntr_nr,
3329 &env->br_cntr_width);
3330
3331 if (!ret && !env->cpu_pmu_caps)
3332 pr_debug("cpu pmu capabilities not available\n");
3333 return ret;
3334 }
3335
process_pmu_caps(struct feat_fd * ff,void * data __maybe_unused)3336 static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
3337 {
3338 struct perf_env *env = &ff->ph->env;
3339 struct pmu_caps *pmu_caps;
3340 u32 nr_pmu, i;
3341 int ret;
3342 int j;
3343
3344 if (do_read_u32(ff, &nr_pmu))
3345 return -1;
3346
3347 if (!nr_pmu) {
3348 pr_debug("pmu capabilities not available\n");
3349 return 0;
3350 }
3351
3352 pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
3353 if (!pmu_caps)
3354 return -ENOMEM;
3355
3356 for (i = 0; i < nr_pmu; i++) {
3357 ret = __process_pmu_caps(ff, &pmu_caps[i].nr_caps,
3358 &pmu_caps[i].caps,
3359 &pmu_caps[i].max_branches,
3360 &pmu_caps[i].br_cntr_nr,
3361 &pmu_caps[i].br_cntr_width);
3362 if (ret)
3363 goto err;
3364
3365 pmu_caps[i].pmu_name = do_read_string(ff);
3366 if (!pmu_caps[i].pmu_name) {
3367 ret = -1;
3368 goto err;
3369 }
3370 if (!pmu_caps[i].nr_caps) {
3371 pr_debug("%s pmu capabilities not available\n",
3372 pmu_caps[i].pmu_name);
3373 }
3374 }
3375
3376 env->nr_pmus_with_caps = nr_pmu;
3377 env->pmu_caps = pmu_caps;
3378 return 0;
3379
3380 err:
3381 for (i = 0; i < nr_pmu; i++) {
3382 for (j = 0; j < pmu_caps[i].nr_caps; j++)
3383 free(pmu_caps[i].caps[j]);
3384 free(pmu_caps[i].caps);
3385 free(pmu_caps[i].pmu_name);
3386 }
3387
3388 free(pmu_caps);
3389 return ret;
3390 }
3391
3392 #define FEAT_OPR(n, func, __full_only) \
3393 [HEADER_##n] = { \
3394 .name = __stringify(n), \
3395 .write = write_##func, \
3396 .print = print_##func, \
3397 .full_only = __full_only, \
3398 .process = process_##func, \
3399 .synthesize = true \
3400 }
3401
3402 #define FEAT_OPN(n, func, __full_only) \
3403 [HEADER_##n] = { \
3404 .name = __stringify(n), \
3405 .write = write_##func, \
3406 .print = print_##func, \
3407 .full_only = __full_only, \
3408 .process = process_##func \
3409 }
3410
3411 /* feature_ops not implemented: */
3412 #define print_tracing_data NULL
3413 #define print_build_id NULL
3414
3415 #define process_branch_stack NULL
3416 #define process_stat NULL
3417
3418 // Only used in util/synthetic-events.c
3419 const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
3420
3421 const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
3422 #ifdef HAVE_LIBTRACEEVENT
3423 FEAT_OPN(TRACING_DATA, tracing_data, false),
3424 #endif
3425 FEAT_OPN(BUILD_ID, build_id, false),
3426 FEAT_OPR(HOSTNAME, hostname, false),
3427 FEAT_OPR(OSRELEASE, osrelease, false),
3428 FEAT_OPR(VERSION, version, false),
3429 FEAT_OPR(ARCH, arch, false),
3430 FEAT_OPR(NRCPUS, nrcpus, false),
3431 FEAT_OPR(CPUDESC, cpudesc, false),
3432 FEAT_OPR(CPUID, cpuid, false),
3433 FEAT_OPR(TOTAL_MEM, total_mem, false),
3434 FEAT_OPR(EVENT_DESC, event_desc, false),
3435 FEAT_OPR(CMDLINE, cmdline, false),
3436 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
3437 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
3438 FEAT_OPN(BRANCH_STACK, branch_stack, false),
3439 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
3440 FEAT_OPR(GROUP_DESC, group_desc, false),
3441 FEAT_OPN(AUXTRACE, auxtrace, false),
3442 FEAT_OPN(STAT, stat, false),
3443 FEAT_OPN(CACHE, cache, true),
3444 FEAT_OPR(SAMPLE_TIME, sample_time, false),
3445 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
3446 FEAT_OPR(CLOCKID, clockid, false),
3447 FEAT_OPN(DIR_FORMAT, dir_format, false),
3448 #ifdef HAVE_LIBBPF_SUPPORT
3449 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
3450 FEAT_OPR(BPF_BTF, bpf_btf, false),
3451 #endif
3452 FEAT_OPR(COMPRESSED, compressed, false),
3453 FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
3454 FEAT_OPR(CLOCK_DATA, clock_data, false),
3455 FEAT_OPN(HYBRID_TOPOLOGY, hybrid_topology, true),
3456 FEAT_OPR(PMU_CAPS, pmu_caps, false),
3457 };
3458
3459 struct header_print_data {
3460 FILE *fp;
3461 bool full; /* extended list of headers */
3462 };
3463
perf_file_section__fprintf_info(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data)3464 static int perf_file_section__fprintf_info(struct perf_file_section *section,
3465 struct perf_header *ph,
3466 int feat, int fd, void *data)
3467 {
3468 struct header_print_data *hd = data;
3469 struct feat_fd ff;
3470
3471 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3472 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3473 "%d, continuing...\n", section->offset, feat);
3474 return 0;
3475 }
3476 if (feat >= HEADER_LAST_FEATURE) {
3477 pr_warning("unknown feature %d\n", feat);
3478 return 0;
3479 }
3480 if (!feat_ops[feat].print)
3481 return 0;
3482
3483 ff = (struct feat_fd) {
3484 .fd = fd,
3485 .ph = ph,
3486 };
3487
3488 if (!feat_ops[feat].full_only || hd->full)
3489 feat_ops[feat].print(&ff, hd->fp);
3490 else
3491 fprintf(hd->fp, "# %s info available, use -I to display\n",
3492 feat_ops[feat].name);
3493
3494 return 0;
3495 }
3496
perf_header__fprintf_info(struct perf_session * session,FILE * fp,bool full)3497 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
3498 {
3499 struct header_print_data hd;
3500 struct perf_header *header = &session->header;
3501 int fd = perf_data__fd(session->data);
3502 struct stat st;
3503 time_t stctime;
3504 int ret, bit;
3505
3506 hd.fp = fp;
3507 hd.full = full;
3508
3509 ret = fstat(fd, &st);
3510 if (ret == -1)
3511 return -1;
3512
3513 stctime = st.st_mtime;
3514 fprintf(fp, "# captured on : %s", ctime(&stctime));
3515
3516 fprintf(fp, "# header version : %u\n", header->version);
3517 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
3518 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
3519 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
3520
3521 perf_header__process_sections(header, fd, &hd,
3522 perf_file_section__fprintf_info);
3523
3524 if (session->data->is_pipe)
3525 return 0;
3526
3527 fprintf(fp, "# missing features: ");
3528 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
3529 if (bit)
3530 fprintf(fp, "%s ", feat_ops[bit].name);
3531 }
3532
3533 fprintf(fp, "\n");
3534 return 0;
3535 }
3536
3537 struct header_fw {
3538 struct feat_writer fw;
3539 struct feat_fd *ff;
3540 };
3541
feat_writer_cb(struct feat_writer * fw,void * buf,size_t sz)3542 static int feat_writer_cb(struct feat_writer *fw, void *buf, size_t sz)
3543 {
3544 struct header_fw *h = container_of(fw, struct header_fw, fw);
3545
3546 return do_write(h->ff, buf, sz);
3547 }
3548
do_write_feat(struct feat_fd * ff,int type,struct perf_file_section ** p,struct evlist * evlist,struct feat_copier * fc)3549 static int do_write_feat(struct feat_fd *ff, int type,
3550 struct perf_file_section **p,
3551 struct evlist *evlist,
3552 struct feat_copier *fc)
3553 {
3554 int err;
3555 int ret = 0;
3556
3557 if (perf_header__has_feat(ff->ph, type)) {
3558 if (!feat_ops[type].write)
3559 return -1;
3560
3561 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
3562 return -1;
3563
3564 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
3565
3566 /*
3567 * Hook to let perf inject copy features sections from the input
3568 * file.
3569 */
3570 if (fc && fc->copy) {
3571 struct header_fw h = {
3572 .fw.write = feat_writer_cb,
3573 .ff = ff,
3574 };
3575
3576 /* ->copy() returns 0 if the feature was not copied */
3577 err = fc->copy(fc, type, &h.fw);
3578 } else {
3579 err = 0;
3580 }
3581 if (!err)
3582 err = feat_ops[type].write(ff, evlist);
3583 if (err < 0) {
3584 pr_debug("failed to write feature %s\n", feat_ops[type].name);
3585
3586 /* undo anything written */
3587 lseek(ff->fd, (*p)->offset, SEEK_SET);
3588
3589 return -1;
3590 }
3591 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
3592 (*p)++;
3593 }
3594 return ret;
3595 }
3596
perf_header__adds_write(struct perf_header * header,struct evlist * evlist,int fd,struct feat_copier * fc)3597 static int perf_header__adds_write(struct perf_header *header,
3598 struct evlist *evlist, int fd,
3599 struct feat_copier *fc)
3600 {
3601 int nr_sections;
3602 struct feat_fd ff = {
3603 .fd = fd,
3604 .ph = header,
3605 };
3606 struct perf_file_section *feat_sec, *p;
3607 int sec_size;
3608 u64 sec_start;
3609 int feat;
3610 int err;
3611
3612 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3613 if (!nr_sections)
3614 return 0;
3615
3616 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
3617 if (feat_sec == NULL)
3618 return -ENOMEM;
3619
3620 sec_size = sizeof(*feat_sec) * nr_sections;
3621
3622 sec_start = header->feat_offset;
3623 lseek(fd, sec_start + sec_size, SEEK_SET);
3624
3625 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3626 if (do_write_feat(&ff, feat, &p, evlist, fc))
3627 perf_header__clear_feat(header, feat);
3628 }
3629
3630 lseek(fd, sec_start, SEEK_SET);
3631 /*
3632 * may write more than needed due to dropped feature, but
3633 * this is okay, reader will skip the missing entries
3634 */
3635 err = do_write(&ff, feat_sec, sec_size);
3636 if (err < 0)
3637 pr_debug("failed to write feature section\n");
3638 free(ff.buf); /* TODO: added to silence clang-tidy. */
3639 free(feat_sec);
3640 return err;
3641 }
3642
perf_header__write_pipe(int fd)3643 int perf_header__write_pipe(int fd)
3644 {
3645 struct perf_pipe_file_header f_header;
3646 struct feat_fd ff = {
3647 .fd = fd,
3648 };
3649 int err;
3650
3651 f_header = (struct perf_pipe_file_header){
3652 .magic = PERF_MAGIC,
3653 .size = sizeof(f_header),
3654 };
3655
3656 err = do_write(&ff, &f_header, sizeof(f_header));
3657 if (err < 0) {
3658 pr_debug("failed to write perf pipe header\n");
3659 return err;
3660 }
3661 free(ff.buf);
3662 return 0;
3663 }
3664
perf_session__do_write_header(struct perf_session * session,struct evlist * evlist,int fd,bool at_exit,struct feat_copier * fc,bool write_attrs_after_data)3665 static int perf_session__do_write_header(struct perf_session *session,
3666 struct evlist *evlist,
3667 int fd, bool at_exit,
3668 struct feat_copier *fc,
3669 bool write_attrs_after_data)
3670 {
3671 struct perf_file_header f_header;
3672 struct perf_header *header = &session->header;
3673 struct evsel *evsel;
3674 struct feat_fd ff = {
3675 .ph = header,
3676 .fd = fd,
3677 };
3678 u64 attr_offset = sizeof(f_header), attr_size = 0;
3679 int err;
3680
3681 if (write_attrs_after_data && at_exit) {
3682 /*
3683 * Write features at the end of the file first so that
3684 * attributes may come after them.
3685 */
3686 if (!header->data_offset && header->data_size) {
3687 pr_err("File contains data but offset unknown\n");
3688 err = -1;
3689 goto err_out;
3690 }
3691 header->feat_offset = header->data_offset + header->data_size;
3692 err = perf_header__adds_write(header, evlist, fd, fc);
3693 if (err < 0)
3694 goto err_out;
3695 attr_offset = lseek(fd, 0, SEEK_CUR);
3696 } else {
3697 lseek(fd, attr_offset, SEEK_SET);
3698 }
3699
3700 evlist__for_each_entry(session->evlist, evsel) {
3701 evsel->id_offset = attr_offset;
3702 /* Avoid writing at the end of the file until the session is exiting. */
3703 if (!write_attrs_after_data || at_exit) {
3704 err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
3705 if (err < 0) {
3706 pr_debug("failed to write perf header\n");
3707 goto err_out;
3708 }
3709 }
3710 attr_offset += evsel->core.ids * sizeof(u64);
3711 }
3712
3713 evlist__for_each_entry(evlist, evsel) {
3714 if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
3715 /*
3716 * We are likely in "perf inject" and have read
3717 * from an older file. Update attr size so that
3718 * reader gets the right offset to the ids.
3719 */
3720 evsel->core.attr.size = sizeof(evsel->core.attr);
3721 }
3722 /* Avoid writing at the end of the file until the session is exiting. */
3723 if (!write_attrs_after_data || at_exit) {
3724 struct perf_file_attr f_attr = {
3725 .attr = evsel->core.attr,
3726 .ids = {
3727 .offset = evsel->id_offset,
3728 .size = evsel->core.ids * sizeof(u64),
3729 }
3730 };
3731 err = do_write(&ff, &f_attr, sizeof(f_attr));
3732 if (err < 0) {
3733 pr_debug("failed to write perf header attribute\n");
3734 goto err_out;
3735 }
3736 }
3737 attr_size += sizeof(struct perf_file_attr);
3738 }
3739
3740 if (!header->data_offset) {
3741 if (write_attrs_after_data)
3742 header->data_offset = sizeof(f_header);
3743 else
3744 header->data_offset = attr_offset + attr_size;
3745 }
3746 header->feat_offset = header->data_offset + header->data_size;
3747
3748 if (!write_attrs_after_data && at_exit) {
3749 /* Write features now feat_offset is known. */
3750 err = perf_header__adds_write(header, evlist, fd, fc);
3751 if (err < 0)
3752 goto err_out;
3753 }
3754
3755 f_header = (struct perf_file_header){
3756 .magic = PERF_MAGIC,
3757 .size = sizeof(f_header),
3758 .attr_size = sizeof(struct perf_file_attr),
3759 .attrs = {
3760 .offset = attr_offset,
3761 .size = attr_size,
3762 },
3763 .data = {
3764 .offset = header->data_offset,
3765 .size = header->data_size,
3766 },
3767 /* event_types is ignored, store zeros */
3768 };
3769
3770 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
3771
3772 lseek(fd, 0, SEEK_SET);
3773 err = do_write(&ff, &f_header, sizeof(f_header));
3774 if (err < 0) {
3775 pr_debug("failed to write perf header\n");
3776 goto err_out;
3777 } else {
3778 lseek(fd, 0, SEEK_END);
3779 err = 0;
3780 }
3781 err_out:
3782 free(ff.buf);
3783 return err;
3784 }
3785
perf_session__write_header(struct perf_session * session,struct evlist * evlist,int fd,bool at_exit)3786 int perf_session__write_header(struct perf_session *session,
3787 struct evlist *evlist,
3788 int fd, bool at_exit)
3789 {
3790 return perf_session__do_write_header(session, evlist, fd, at_exit, /*fc=*/NULL,
3791 /*write_attrs_after_data=*/false);
3792 }
3793
perf_session__data_offset(const struct evlist * evlist)3794 size_t perf_session__data_offset(const struct evlist *evlist)
3795 {
3796 struct evsel *evsel;
3797 size_t data_offset;
3798
3799 data_offset = sizeof(struct perf_file_header);
3800 evlist__for_each_entry(evlist, evsel) {
3801 data_offset += evsel->core.ids * sizeof(u64);
3802 }
3803 data_offset += evlist->core.nr_entries * sizeof(struct perf_file_attr);
3804
3805 return data_offset;
3806 }
3807
perf_session__inject_header(struct perf_session * session,struct evlist * evlist,int fd,struct feat_copier * fc,bool write_attrs_after_data)3808 int perf_session__inject_header(struct perf_session *session,
3809 struct evlist *evlist,
3810 int fd,
3811 struct feat_copier *fc,
3812 bool write_attrs_after_data)
3813 {
3814 return perf_session__do_write_header(session, evlist, fd, true, fc,
3815 write_attrs_after_data);
3816 }
3817
perf_header__getbuffer64(struct perf_header * header,int fd,void * buf,size_t size)3818 static int perf_header__getbuffer64(struct perf_header *header,
3819 int fd, void *buf, size_t size)
3820 {
3821 if (readn(fd, buf, size) <= 0)
3822 return -1;
3823
3824 if (header->needs_swap)
3825 mem_bswap_64(buf, size);
3826
3827 return 0;
3828 }
3829
perf_header__process_sections(struct perf_header * header,int fd,void * data,int (* process)(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data))3830 int perf_header__process_sections(struct perf_header *header, int fd,
3831 void *data,
3832 int (*process)(struct perf_file_section *section,
3833 struct perf_header *ph,
3834 int feat, int fd, void *data))
3835 {
3836 struct perf_file_section *feat_sec, *sec;
3837 int nr_sections;
3838 int sec_size;
3839 int feat;
3840 int err;
3841
3842 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3843 if (!nr_sections)
3844 return 0;
3845
3846 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
3847 if (!feat_sec)
3848 return -1;
3849
3850 sec_size = sizeof(*feat_sec) * nr_sections;
3851
3852 lseek(fd, header->feat_offset, SEEK_SET);
3853
3854 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3855 if (err < 0)
3856 goto out_free;
3857
3858 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3859 err = process(sec++, header, feat, fd, data);
3860 if (err < 0)
3861 goto out_free;
3862 }
3863 err = 0;
3864 out_free:
3865 free(feat_sec);
3866 return err;
3867 }
3868
3869 static const int attr_file_abi_sizes[] = {
3870 [0] = PERF_ATTR_SIZE_VER0,
3871 [1] = PERF_ATTR_SIZE_VER1,
3872 [2] = PERF_ATTR_SIZE_VER2,
3873 [3] = PERF_ATTR_SIZE_VER3,
3874 [4] = PERF_ATTR_SIZE_VER4,
3875 0,
3876 };
3877
3878 /*
3879 * In the legacy file format, the magic number is not used to encode endianness.
3880 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3881 * on ABI revisions, we need to try all combinations for all endianness to
3882 * detect the endianness.
3883 */
try_all_file_abis(uint64_t hdr_sz,struct perf_header * ph)3884 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3885 {
3886 uint64_t ref_size, attr_size;
3887 int i;
3888
3889 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3890 ref_size = attr_file_abi_sizes[i]
3891 + sizeof(struct perf_file_section);
3892 if (hdr_sz != ref_size) {
3893 attr_size = bswap_64(hdr_sz);
3894 if (attr_size != ref_size)
3895 continue;
3896
3897 ph->needs_swap = true;
3898 }
3899 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3900 i,
3901 ph->needs_swap);
3902 return 0;
3903 }
3904 /* could not determine endianness */
3905 return -1;
3906 }
3907
3908 #define PERF_PIPE_HDR_VER0 16
3909
3910 static const size_t attr_pipe_abi_sizes[] = {
3911 [0] = PERF_PIPE_HDR_VER0,
3912 0,
3913 };
3914
3915 /*
3916 * In the legacy pipe format, there is an implicit assumption that endianness
3917 * between host recording the samples, and host parsing the samples is the
3918 * same. This is not always the case given that the pipe output may always be
3919 * redirected into a file and analyzed on a different machine with possibly a
3920 * different endianness and perf_event ABI revisions in the perf tool itself.
3921 */
try_all_pipe_abis(uint64_t hdr_sz,struct perf_header * ph)3922 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3923 {
3924 u64 attr_size;
3925 int i;
3926
3927 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3928 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3929 attr_size = bswap_64(hdr_sz);
3930 if (attr_size != hdr_sz)
3931 continue;
3932
3933 ph->needs_swap = true;
3934 }
3935 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3936 return 0;
3937 }
3938 return -1;
3939 }
3940
is_perf_magic(u64 magic)3941 bool is_perf_magic(u64 magic)
3942 {
3943 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3944 || magic == __perf_magic2
3945 || magic == __perf_magic2_sw)
3946 return true;
3947
3948 return false;
3949 }
3950
check_magic_endian(u64 magic,uint64_t hdr_sz,bool is_pipe,struct perf_header * ph)3951 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3952 bool is_pipe, struct perf_header *ph)
3953 {
3954 int ret;
3955
3956 /* check for legacy format */
3957 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3958 if (ret == 0) {
3959 ph->version = PERF_HEADER_VERSION_1;
3960 pr_debug("legacy perf.data format\n");
3961 if (is_pipe)
3962 return try_all_pipe_abis(hdr_sz, ph);
3963
3964 return try_all_file_abis(hdr_sz, ph);
3965 }
3966 /*
3967 * the new magic number serves two purposes:
3968 * - unique number to identify actual perf.data files
3969 * - encode endianness of file
3970 */
3971 ph->version = PERF_HEADER_VERSION_2;
3972
3973 /* check magic number with one endianness */
3974 if (magic == __perf_magic2)
3975 return 0;
3976
3977 /* check magic number with opposite endianness */
3978 if (magic != __perf_magic2_sw)
3979 return -1;
3980
3981 ph->needs_swap = true;
3982
3983 return 0;
3984 }
3985
perf_file_header__read(struct perf_file_header * header,struct perf_header * ph,int fd)3986 int perf_file_header__read(struct perf_file_header *header,
3987 struct perf_header *ph, int fd)
3988 {
3989 ssize_t ret;
3990
3991 lseek(fd, 0, SEEK_SET);
3992
3993 ret = readn(fd, header, sizeof(*header));
3994 if (ret <= 0)
3995 return -1;
3996
3997 if (check_magic_endian(header->magic,
3998 header->attr_size, false, ph) < 0) {
3999 pr_debug("magic/endian check failed\n");
4000 return -1;
4001 }
4002
4003 if (ph->needs_swap) {
4004 mem_bswap_64(header, offsetof(struct perf_file_header,
4005 adds_features));
4006 }
4007
4008 if (header->size > header->attrs.offset) {
4009 pr_err("Perf file header corrupt: header overlaps attrs\n");
4010 return -1;
4011 }
4012
4013 if (header->size > header->data.offset) {
4014 pr_err("Perf file header corrupt: header overlaps data\n");
4015 return -1;
4016 }
4017
4018 if ((header->attrs.offset <= header->data.offset &&
4019 header->attrs.offset + header->attrs.size > header->data.offset) ||
4020 (header->attrs.offset > header->data.offset &&
4021 header->data.offset + header->data.size > header->attrs.offset)) {
4022 pr_err("Perf file header corrupt: Attributes and data overlap\n");
4023 return -1;
4024 }
4025
4026 if (header->size != sizeof(*header)) {
4027 /* Support the previous format */
4028 if (header->size == offsetof(typeof(*header), adds_features))
4029 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
4030 else
4031 return -1;
4032 } else if (ph->needs_swap) {
4033 /*
4034 * feature bitmap is declared as an array of unsigned longs --
4035 * not good since its size can differ between the host that
4036 * generated the data file and the host analyzing the file.
4037 *
4038 * We need to handle endianness, but we don't know the size of
4039 * the unsigned long where the file was generated. Take a best
4040 * guess at determining it: try 64-bit swap first (ie., file
4041 * created on a 64-bit host), and check if the hostname feature
4042 * bit is set (this feature bit is forced on as of fbe96f2).
4043 * If the bit is not, undo the 64-bit swap and try a 32-bit
4044 * swap. If the hostname bit is still not set (e.g., older data
4045 * file), punt and fallback to the original behavior --
4046 * clearing all feature bits and setting buildid.
4047 */
4048 mem_bswap_64(&header->adds_features,
4049 BITS_TO_U64(HEADER_FEAT_BITS));
4050
4051 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
4052 /* unswap as u64 */
4053 mem_bswap_64(&header->adds_features,
4054 BITS_TO_U64(HEADER_FEAT_BITS));
4055
4056 /* unswap as u32 */
4057 mem_bswap_32(&header->adds_features,
4058 BITS_TO_U32(HEADER_FEAT_BITS));
4059 }
4060
4061 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
4062 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
4063 __set_bit(HEADER_BUILD_ID, header->adds_features);
4064 }
4065 }
4066
4067 memcpy(&ph->adds_features, &header->adds_features,
4068 sizeof(ph->adds_features));
4069
4070 ph->data_offset = header->data.offset;
4071 ph->data_size = header->data.size;
4072 ph->feat_offset = header->data.offset + header->data.size;
4073 return 0;
4074 }
4075
perf_file_section__process(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data)4076 static int perf_file_section__process(struct perf_file_section *section,
4077 struct perf_header *ph,
4078 int feat, int fd, void *data)
4079 {
4080 struct feat_fd fdd = {
4081 .fd = fd,
4082 .ph = ph,
4083 .size = section->size,
4084 .offset = section->offset,
4085 };
4086
4087 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
4088 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
4089 "%d, continuing...\n", section->offset, feat);
4090 return 0;
4091 }
4092
4093 if (feat >= HEADER_LAST_FEATURE) {
4094 pr_debug("unknown feature %d, continuing...\n", feat);
4095 return 0;
4096 }
4097
4098 if (!feat_ops[feat].process)
4099 return 0;
4100
4101 return feat_ops[feat].process(&fdd, data);
4102 }
4103
perf_file_header__read_pipe(struct perf_pipe_file_header * header,struct perf_header * ph,struct perf_data * data)4104 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
4105 struct perf_header *ph,
4106 struct perf_data *data)
4107 {
4108 ssize_t ret;
4109
4110 ret = perf_data__read(data, header, sizeof(*header));
4111 if (ret <= 0)
4112 return -1;
4113
4114 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
4115 pr_debug("endian/magic failed\n");
4116 return -1;
4117 }
4118
4119 if (ph->needs_swap)
4120 header->size = bswap_64(header->size);
4121
4122 return 0;
4123 }
4124
perf_header__read_pipe(struct perf_session * session)4125 static int perf_header__read_pipe(struct perf_session *session)
4126 {
4127 struct perf_header *header = &session->header;
4128 struct perf_pipe_file_header f_header;
4129
4130 if (perf_file_header__read_pipe(&f_header, header, session->data) < 0) {
4131 pr_debug("incompatible file format\n");
4132 return -EINVAL;
4133 }
4134
4135 return f_header.size == sizeof(f_header) ? 0 : -1;
4136 }
4137
read_attr(int fd,struct perf_header * ph,struct perf_file_attr * f_attr)4138 static int read_attr(int fd, struct perf_header *ph,
4139 struct perf_file_attr *f_attr)
4140 {
4141 struct perf_event_attr *attr = &f_attr->attr;
4142 size_t sz, left;
4143 size_t our_sz = sizeof(f_attr->attr);
4144 ssize_t ret;
4145
4146 memset(f_attr, 0, sizeof(*f_attr));
4147
4148 /* read minimal guaranteed structure */
4149 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
4150 if (ret <= 0) {
4151 pr_debug("cannot read %d bytes of header attr\n",
4152 PERF_ATTR_SIZE_VER0);
4153 return -1;
4154 }
4155
4156 /* on file perf_event_attr size */
4157 sz = attr->size;
4158
4159 if (ph->needs_swap)
4160 sz = bswap_32(sz);
4161
4162 if (sz == 0) {
4163 /* assume ABI0 */
4164 sz = PERF_ATTR_SIZE_VER0;
4165 } else if (sz > our_sz) {
4166 pr_debug("file uses a more recent and unsupported ABI"
4167 " (%zu bytes extra)\n", sz - our_sz);
4168 return -1;
4169 }
4170 /* what we have not yet read and that we know about */
4171 left = sz - PERF_ATTR_SIZE_VER0;
4172 if (left) {
4173 void *ptr = attr;
4174 ptr += PERF_ATTR_SIZE_VER0;
4175
4176 ret = readn(fd, ptr, left);
4177 }
4178 /* read perf_file_section, ids are read in caller */
4179 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
4180
4181 return ret <= 0 ? -1 : 0;
4182 }
4183
4184 #ifdef HAVE_LIBTRACEEVENT
evsel__prepare_tracepoint_event(struct evsel * evsel,struct tep_handle * pevent)4185 static int evsel__prepare_tracepoint_event(struct evsel *evsel, struct tep_handle *pevent)
4186 {
4187 struct tep_event *event;
4188 char bf[128];
4189
4190 /* already prepared */
4191 if (evsel->tp_format)
4192 return 0;
4193
4194 if (pevent == NULL) {
4195 pr_debug("broken or missing trace data\n");
4196 return -1;
4197 }
4198
4199 event = tep_find_event(pevent, evsel->core.attr.config);
4200 if (event == NULL) {
4201 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
4202 return -1;
4203 }
4204
4205 if (!evsel->name) {
4206 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
4207 evsel->name = strdup(bf);
4208 if (evsel->name == NULL)
4209 return -1;
4210 }
4211
4212 evsel->tp_format = event;
4213 return 0;
4214 }
4215
evlist__prepare_tracepoint_events(struct evlist * evlist,struct tep_handle * pevent)4216 static int evlist__prepare_tracepoint_events(struct evlist *evlist, struct tep_handle *pevent)
4217 {
4218 struct evsel *pos;
4219
4220 evlist__for_each_entry(evlist, pos) {
4221 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
4222 evsel__prepare_tracepoint_event(pos, pevent))
4223 return -1;
4224 }
4225
4226 return 0;
4227 }
4228 #endif
4229
perf_session__read_header(struct perf_session * session)4230 int perf_session__read_header(struct perf_session *session)
4231 {
4232 struct perf_data *data = session->data;
4233 struct perf_header *header = &session->header;
4234 struct perf_file_header f_header;
4235 struct perf_file_attr f_attr;
4236 u64 f_id;
4237 int nr_attrs, nr_ids, i, j, err;
4238 int fd = perf_data__fd(data);
4239
4240 session->evlist = evlist__new();
4241 if (session->evlist == NULL)
4242 return -ENOMEM;
4243
4244 session->evlist->session = session;
4245 session->machines.host.env = &header->env;
4246
4247 /*
4248 * We can read 'pipe' data event from regular file,
4249 * check for the pipe header regardless of source.
4250 */
4251 err = perf_header__read_pipe(session);
4252 if (!err || perf_data__is_pipe(data)) {
4253 data->is_pipe = true;
4254 return err;
4255 }
4256
4257 if (perf_file_header__read(&f_header, header, fd) < 0)
4258 return -EINVAL;
4259
4260 if (header->needs_swap && data->in_place_update) {
4261 pr_err("In-place update not supported when byte-swapping is required\n");
4262 return -EINVAL;
4263 }
4264
4265 /*
4266 * Sanity check that perf.data was written cleanly; data size is
4267 * initialized to 0 and updated only if the on_exit function is run.
4268 * If data size is still 0 then the file contains only partial
4269 * information. Just warn user and process it as much as it can.
4270 */
4271 if (f_header.data.size == 0) {
4272 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
4273 "Was the 'perf record' command properly terminated?\n",
4274 data->file.path);
4275 }
4276
4277 if (f_header.attr_size == 0) {
4278 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
4279 "Was the 'perf record' command properly terminated?\n",
4280 data->file.path);
4281 return -EINVAL;
4282 }
4283
4284 nr_attrs = f_header.attrs.size / f_header.attr_size;
4285 lseek(fd, f_header.attrs.offset, SEEK_SET);
4286
4287 for (i = 0; i < nr_attrs; i++) {
4288 struct evsel *evsel;
4289 off_t tmp;
4290
4291 if (read_attr(fd, header, &f_attr) < 0)
4292 goto out_errno;
4293
4294 if (header->needs_swap) {
4295 f_attr.ids.size = bswap_64(f_attr.ids.size);
4296 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
4297 perf_event__attr_swap(&f_attr.attr);
4298 }
4299
4300 tmp = lseek(fd, 0, SEEK_CUR);
4301 evsel = evsel__new(&f_attr.attr);
4302
4303 if (evsel == NULL)
4304 goto out_delete_evlist;
4305
4306 evsel->needs_swap = header->needs_swap;
4307 /*
4308 * Do it before so that if perf_evsel__alloc_id fails, this
4309 * entry gets purged too at evlist__delete().
4310 */
4311 evlist__add(session->evlist, evsel);
4312
4313 nr_ids = f_attr.ids.size / sizeof(u64);
4314 /*
4315 * We don't have the cpu and thread maps on the header, so
4316 * for allocating the perf_sample_id table we fake 1 cpu and
4317 * hattr->ids threads.
4318 */
4319 if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
4320 goto out_delete_evlist;
4321
4322 lseek(fd, f_attr.ids.offset, SEEK_SET);
4323
4324 for (j = 0; j < nr_ids; j++) {
4325 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
4326 goto out_errno;
4327
4328 perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
4329 }
4330
4331 lseek(fd, tmp, SEEK_SET);
4332 }
4333
4334 #ifdef HAVE_LIBTRACEEVENT
4335 perf_header__process_sections(header, fd, &session->tevent,
4336 perf_file_section__process);
4337
4338 if (evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent))
4339 goto out_delete_evlist;
4340 #else
4341 perf_header__process_sections(header, fd, NULL, perf_file_section__process);
4342 #endif
4343
4344 return 0;
4345 out_errno:
4346 return -errno;
4347
4348 out_delete_evlist:
4349 evlist__delete(session->evlist);
4350 session->evlist = NULL;
4351 return -ENOMEM;
4352 }
4353
perf_event__process_feature(struct perf_session * session,union perf_event * event)4354 int perf_event__process_feature(struct perf_session *session,
4355 union perf_event *event)
4356 {
4357 struct feat_fd ff = { .fd = 0 };
4358 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
4359 int type = fe->header.type;
4360 u64 feat = fe->feat_id;
4361 int ret = 0;
4362 bool print = dump_trace;
4363
4364 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
4365 pr_warning("invalid record type %d in pipe-mode\n", type);
4366 return 0;
4367 }
4368 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
4369 pr_warning("invalid record type %d in pipe-mode\n", type);
4370 return -1;
4371 }
4372
4373 ff.buf = (void *)fe->data;
4374 ff.size = event->header.size - sizeof(*fe);
4375 ff.ph = &session->header;
4376
4377 if (feat_ops[feat].process && feat_ops[feat].process(&ff, NULL)) {
4378 ret = -1;
4379 goto out;
4380 }
4381
4382 if (session->tool->show_feat_hdr) {
4383 if (!feat_ops[feat].full_only ||
4384 session->tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
4385 print = true;
4386 } else {
4387 fprintf(stdout, "# %s info available, use -I to display\n",
4388 feat_ops[feat].name);
4389 }
4390 }
4391
4392 if (dump_trace)
4393 printf(", ");
4394
4395 if (print) {
4396 if (feat_ops[feat].print)
4397 feat_ops[feat].print(&ff, stdout);
4398 else
4399 printf("# %s", feat_ops[feat].name);
4400 }
4401
4402 out:
4403 free_event_desc(ff.events);
4404 return ret;
4405 }
4406
perf_event__fprintf_event_update(union perf_event * event,FILE * fp)4407 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
4408 {
4409 struct perf_record_event_update *ev = &event->event_update;
4410 struct perf_cpu_map *map;
4411 size_t ret;
4412
4413 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id);
4414
4415 switch (ev->type) {
4416 case PERF_EVENT_UPDATE__SCALE:
4417 ret += fprintf(fp, "... scale: %f\n", ev->scale.scale);
4418 break;
4419 case PERF_EVENT_UPDATE__UNIT:
4420 ret += fprintf(fp, "... unit: %s\n", ev->unit);
4421 break;
4422 case PERF_EVENT_UPDATE__NAME:
4423 ret += fprintf(fp, "... name: %s\n", ev->name);
4424 break;
4425 case PERF_EVENT_UPDATE__CPUS:
4426 ret += fprintf(fp, "... ");
4427
4428 map = cpu_map__new_data(&ev->cpus.cpus);
4429 if (map) {
4430 ret += cpu_map__fprintf(map, fp);
4431 perf_cpu_map__put(map);
4432 } else
4433 ret += fprintf(fp, "failed to get cpus\n");
4434 break;
4435 default:
4436 ret += fprintf(fp, "... unknown type\n");
4437 break;
4438 }
4439
4440 return ret;
4441 }
4442
perf_event__fprintf_attr(union perf_event * event,FILE * fp)4443 size_t perf_event__fprintf_attr(union perf_event *event, FILE *fp)
4444 {
4445 return perf_event_attr__fprintf(fp, &event->attr.attr, __desc_attr__fprintf, NULL);
4446 }
4447
perf_event__process_attr(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct evlist ** pevlist)4448 int perf_event__process_attr(const struct perf_tool *tool __maybe_unused,
4449 union perf_event *event,
4450 struct evlist **pevlist)
4451 {
4452 u32 i, n_ids;
4453 u64 *ids;
4454 struct evsel *evsel;
4455 struct evlist *evlist = *pevlist;
4456
4457 if (dump_trace)
4458 perf_event__fprintf_attr(event, stdout);
4459
4460 if (evlist == NULL) {
4461 *pevlist = evlist = evlist__new();
4462 if (evlist == NULL)
4463 return -ENOMEM;
4464 }
4465
4466 evsel = evsel__new(&event->attr.attr);
4467 if (evsel == NULL)
4468 return -ENOMEM;
4469
4470 evlist__add(evlist, evsel);
4471
4472 n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
4473 n_ids = n_ids / sizeof(u64);
4474 /*
4475 * We don't have the cpu and thread maps on the header, so
4476 * for allocating the perf_sample_id table we fake 1 cpu and
4477 * hattr->ids threads.
4478 */
4479 if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
4480 return -ENOMEM;
4481
4482 ids = perf_record_header_attr_id(event);
4483 for (i = 0; i < n_ids; i++) {
4484 perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
4485 }
4486
4487 return 0;
4488 }
4489
perf_event__process_event_update(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct evlist ** pevlist)4490 int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused,
4491 union perf_event *event,
4492 struct evlist **pevlist)
4493 {
4494 struct perf_record_event_update *ev = &event->event_update;
4495 struct evlist *evlist;
4496 struct evsel *evsel;
4497 struct perf_cpu_map *map;
4498
4499 if (dump_trace)
4500 perf_event__fprintf_event_update(event, stdout);
4501
4502 if (!pevlist || *pevlist == NULL)
4503 return -EINVAL;
4504
4505 evlist = *pevlist;
4506
4507 evsel = evlist__id2evsel(evlist, ev->id);
4508 if (evsel == NULL)
4509 return -EINVAL;
4510
4511 switch (ev->type) {
4512 case PERF_EVENT_UPDATE__UNIT:
4513 free((char *)evsel->unit);
4514 evsel->unit = strdup(ev->unit);
4515 break;
4516 case PERF_EVENT_UPDATE__NAME:
4517 free(evsel->name);
4518 evsel->name = strdup(ev->name);
4519 break;
4520 case PERF_EVENT_UPDATE__SCALE:
4521 evsel->scale = ev->scale.scale;
4522 break;
4523 case PERF_EVENT_UPDATE__CPUS:
4524 map = cpu_map__new_data(&ev->cpus.cpus);
4525 if (map) {
4526 perf_cpu_map__put(evsel->core.pmu_cpus);
4527 evsel->core.pmu_cpus = map;
4528 } else
4529 pr_err("failed to get event_update cpus\n");
4530 default:
4531 break;
4532 }
4533
4534 return 0;
4535 }
4536
4537 #ifdef HAVE_LIBTRACEEVENT
perf_event__process_tracing_data(struct perf_session * session,union perf_event * event)4538 int perf_event__process_tracing_data(struct perf_session *session,
4539 union perf_event *event)
4540 {
4541 ssize_t size_read, padding, size = event->tracing_data.size;
4542 int fd = perf_data__fd(session->data);
4543 char buf[BUFSIZ];
4544
4545 /*
4546 * The pipe fd is already in proper place and in any case
4547 * we can't move it, and we'd screw the case where we read
4548 * 'pipe' data from regular file. The trace_report reads
4549 * data from 'fd' so we need to set it directly behind the
4550 * event, where the tracing data starts.
4551 */
4552 if (!perf_data__is_pipe(session->data)) {
4553 off_t offset = lseek(fd, 0, SEEK_CUR);
4554
4555 /* setup for reading amidst mmap */
4556 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
4557 SEEK_SET);
4558 }
4559
4560 size_read = trace_report(fd, &session->tevent, session->trace_event_repipe);
4561 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
4562
4563 if (readn(fd, buf, padding) < 0) {
4564 pr_err("%s: reading input file", __func__);
4565 return -1;
4566 }
4567 if (session->trace_event_repipe) {
4568 int retw = write(STDOUT_FILENO, buf, padding);
4569 if (retw <= 0 || retw != padding) {
4570 pr_err("%s: repiping tracing data padding", __func__);
4571 return -1;
4572 }
4573 }
4574
4575 if (size_read + padding != size) {
4576 pr_err("%s: tracing data size mismatch", __func__);
4577 return -1;
4578 }
4579
4580 evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent);
4581
4582 return size_read + padding;
4583 }
4584 #endif
4585
perf_event__process_build_id(struct perf_session * session,union perf_event * event)4586 int perf_event__process_build_id(struct perf_session *session,
4587 union perf_event *event)
4588 {
4589 __event_process_build_id(&event->build_id,
4590 event->build_id.filename,
4591 session);
4592 return 0;
4593 }
4594