1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <regex.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <linux/compiler.h>
13 #include <linux/list.h>
14 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/string.h>
17 #include <linux/stringify.h>
18 #include <linux/zalloc.h>
19 #include <sys/stat.h>
20 #include <sys/utsname.h>
21 #include <linux/time64.h>
22 #include <dirent.h>
23 #ifdef HAVE_LIBBPF_SUPPORT
24 #include <bpf/libbpf.h>
25 #endif
26 #include <perf/cpumap.h>
27 #include <tools/libc_compat.h> // reallocarray
28
29 #include "dso.h"
30 #include "evlist.h"
31 #include "evsel.h"
32 #include "util/evsel_fprintf.h"
33 #include "header.h"
34 #include "memswap.h"
35 #include "trace-event.h"
36 #include "session.h"
37 #include "symbol.h"
38 #include "debug.h"
39 #include "cpumap.h"
40 #include "pmu.h"
41 #include "pmus.h"
42 #include "vdso.h"
43 #include "strbuf.h"
44 #include "build-id.h"
45 #include "data.h"
46 #include <api/fs/fs.h>
47 #include <api/io_dir.h>
48 #include "asm/bug.h"
49 #include "tool.h"
50 #include "time-utils.h"
51 #include "units.h"
52 #include "util/util.h" // perf_exe()
53 #include "cputopo.h"
54 #include "bpf-event.h"
55 #include "bpf-utils.h"
56 #include "clockid.h"
57
58 #include <linux/ctype.h>
59 #include <internal/lib.h>
60
61 #ifdef HAVE_LIBTRACEEVENT
62 #include <event-parse.h>
63 #endif
64
65 /*
66 * magic2 = "PERFILE2"
67 * must be a numerical value to let the endianness
68 * determine the memory layout. That way we are able
69 * to detect endianness when reading the perf.data file
70 * back.
71 *
72 * we check for legacy (PERFFILE) format.
73 */
74 static const char *__perf_magic1 = "PERFFILE";
75 static const u64 __perf_magic2 = 0x32454c4946524550ULL;
76 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
77
78 #define PERF_MAGIC __perf_magic2
79
80 const char perf_version_string[] = PERF_VERSION;
81
82 struct perf_file_attr {
83 struct perf_event_attr attr;
84 struct perf_file_section ids;
85 };
86
perf_header__set_feat(struct perf_header * header,int feat)87 void perf_header__set_feat(struct perf_header *header, int feat)
88 {
89 __set_bit(feat, header->adds_features);
90 }
91
perf_header__clear_feat(struct perf_header * header,int feat)92 void perf_header__clear_feat(struct perf_header *header, int feat)
93 {
94 __clear_bit(feat, header->adds_features);
95 }
96
perf_header__has_feat(const struct perf_header * header,int feat)97 bool perf_header__has_feat(const struct perf_header *header, int feat)
98 {
99 return test_bit(feat, header->adds_features);
100 }
101
__do_write_fd(struct feat_fd * ff,const void * buf,size_t size)102 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
103 {
104 ssize_t ret = writen(ff->fd, buf, size);
105
106 if (ret != (ssize_t)size)
107 return ret < 0 ? (int)ret : -1;
108 return 0;
109 }
110
__do_write_buf(struct feat_fd * ff,const void * buf,size_t size)111 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
112 {
113 /* struct perf_event_header::size is u16 */
114 const size_t max_size = 0xffff - sizeof(struct perf_event_header);
115 size_t new_size = ff->size;
116 void *addr;
117
118 if (size + ff->offset > max_size)
119 return -E2BIG;
120
121 while (size > (new_size - ff->offset))
122 new_size <<= 1;
123 new_size = min(max_size, new_size);
124
125 if (ff->size < new_size) {
126 addr = realloc(ff->buf, new_size);
127 if (!addr)
128 return -ENOMEM;
129 ff->buf = addr;
130 ff->size = new_size;
131 }
132
133 memcpy(ff->buf + ff->offset, buf, size);
134 ff->offset += size;
135
136 return 0;
137 }
138
139 /* Return: 0 if succeeded, -ERR if failed. */
do_write(struct feat_fd * ff,const void * buf,size_t size)140 int do_write(struct feat_fd *ff, const void *buf, size_t size)
141 {
142 if (!ff->buf)
143 return __do_write_fd(ff, buf, size);
144 return __do_write_buf(ff, buf, size);
145 }
146
147 /* Return: 0 if succeeded, -ERR if failed. */
do_write_bitmap(struct feat_fd * ff,unsigned long * set,u64 size)148 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
149 {
150 u64 *p = (u64 *) set;
151 int i, ret;
152
153 ret = do_write(ff, &size, sizeof(size));
154 if (ret < 0)
155 return ret;
156
157 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
158 ret = do_write(ff, p + i, sizeof(*p));
159 if (ret < 0)
160 return ret;
161 }
162
163 return 0;
164 }
165
166 /* Return: 0 if succeeded, -ERR if failed. */
write_padded(struct feat_fd * ff,const void * bf,size_t count,size_t count_aligned)167 int write_padded(struct feat_fd *ff, const void *bf,
168 size_t count, size_t count_aligned)
169 {
170 static const char zero_buf[NAME_ALIGN];
171 int err = do_write(ff, bf, count);
172
173 if (!err)
174 err = do_write(ff, zero_buf, count_aligned - count);
175
176 return err;
177 }
178
179 #define string_size(str) \
180 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
181
182 /* Return: 0 if succeeded, -ERR if failed. */
do_write_string(struct feat_fd * ff,const char * str)183 static int do_write_string(struct feat_fd *ff, const char *str)
184 {
185 u32 len, olen;
186 int ret;
187
188 olen = strlen(str) + 1;
189 len = PERF_ALIGN(olen, NAME_ALIGN);
190
191 /* write len, incl. \0 */
192 ret = do_write(ff, &len, sizeof(len));
193 if (ret < 0)
194 return ret;
195
196 return write_padded(ff, str, olen, len);
197 }
198
__do_read_fd(struct feat_fd * ff,void * addr,ssize_t size)199 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
200 {
201 ssize_t ret = readn(ff->fd, addr, size);
202
203 if (ret != size)
204 return ret < 0 ? (int)ret : -1;
205 return 0;
206 }
207
__do_read_buf(struct feat_fd * ff,void * addr,ssize_t size)208 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
209 {
210 if (size > (ssize_t)ff->size - ff->offset)
211 return -1;
212
213 memcpy(addr, ff->buf + ff->offset, size);
214 ff->offset += size;
215
216 return 0;
217
218 }
219
__do_read(struct feat_fd * ff,void * addr,ssize_t size)220 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
221 {
222 if (!ff->buf)
223 return __do_read_fd(ff, addr, size);
224 return __do_read_buf(ff, addr, size);
225 }
226
do_read_u32(struct feat_fd * ff,u32 * addr)227 static int do_read_u32(struct feat_fd *ff, u32 *addr)
228 {
229 int ret;
230
231 ret = __do_read(ff, addr, sizeof(*addr));
232 if (ret)
233 return ret;
234
235 if (ff->ph->needs_swap)
236 *addr = bswap_32(*addr);
237 return 0;
238 }
239
do_read_u64(struct feat_fd * ff,u64 * addr)240 static int do_read_u64(struct feat_fd *ff, u64 *addr)
241 {
242 int ret;
243
244 ret = __do_read(ff, addr, sizeof(*addr));
245 if (ret)
246 return ret;
247
248 if (ff->ph->needs_swap)
249 *addr = bswap_64(*addr);
250 return 0;
251 }
252
do_read_string(struct feat_fd * ff)253 static char *do_read_string(struct feat_fd *ff)
254 {
255 u32 len;
256 char *buf;
257
258 if (do_read_u32(ff, &len))
259 return NULL;
260
261 buf = malloc(len);
262 if (!buf)
263 return NULL;
264
265 if (!__do_read(ff, buf, len)) {
266 /*
267 * strings are padded by zeroes
268 * thus the actual strlen of buf
269 * may be less than len
270 */
271 return buf;
272 }
273
274 free(buf);
275 return NULL;
276 }
277
278 /* Return: 0 if succeeded, -ERR if failed. */
do_read_bitmap(struct feat_fd * ff,unsigned long ** pset,u64 * psize)279 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
280 {
281 unsigned long *set;
282 u64 size, *p;
283 int i, ret;
284
285 ret = do_read_u64(ff, &size);
286 if (ret)
287 return ret;
288
289 set = bitmap_zalloc(size);
290 if (!set)
291 return -ENOMEM;
292
293 p = (u64 *) set;
294
295 for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
296 ret = do_read_u64(ff, p + i);
297 if (ret < 0) {
298 free(set);
299 return ret;
300 }
301 }
302
303 *pset = set;
304 *psize = size;
305 return 0;
306 }
307
308 #ifdef HAVE_LIBTRACEEVENT
write_tracing_data(struct feat_fd * ff,struct evlist * evlist)309 static int write_tracing_data(struct feat_fd *ff,
310 struct evlist *evlist)
311 {
312 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
313 return -1;
314
315 return read_tracing_data(ff->fd, &evlist->core.entries);
316 }
317 #endif
318
write_build_id(struct feat_fd * ff,struct evlist * evlist __maybe_unused)319 static int write_build_id(struct feat_fd *ff,
320 struct evlist *evlist __maybe_unused)
321 {
322 struct perf_session *session;
323 int err;
324
325 session = container_of(ff->ph, struct perf_session, header);
326
327 if (!perf_session__read_build_ids(session, true))
328 return -1;
329
330 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
331 return -1;
332
333 err = perf_session__write_buildid_table(session, ff);
334 if (err < 0) {
335 pr_debug("failed to write buildid table\n");
336 return err;
337 }
338
339 return 0;
340 }
341
write_hostname(struct feat_fd * ff,struct evlist * evlist __maybe_unused)342 static int write_hostname(struct feat_fd *ff,
343 struct evlist *evlist __maybe_unused)
344 {
345 struct utsname uts;
346 int ret;
347
348 ret = uname(&uts);
349 if (ret < 0)
350 return -1;
351
352 return do_write_string(ff, uts.nodename);
353 }
354
write_osrelease(struct feat_fd * ff,struct evlist * evlist __maybe_unused)355 static int write_osrelease(struct feat_fd *ff,
356 struct evlist *evlist __maybe_unused)
357 {
358 struct utsname uts;
359 int ret;
360
361 ret = uname(&uts);
362 if (ret < 0)
363 return -1;
364
365 return do_write_string(ff, uts.release);
366 }
367
write_arch(struct feat_fd * ff,struct evlist * evlist __maybe_unused)368 static int write_arch(struct feat_fd *ff,
369 struct evlist *evlist __maybe_unused)
370 {
371 struct utsname uts;
372 int ret;
373
374 ret = uname(&uts);
375 if (ret < 0)
376 return -1;
377
378 return do_write_string(ff, uts.machine);
379 }
380
write_version(struct feat_fd * ff,struct evlist * evlist __maybe_unused)381 static int write_version(struct feat_fd *ff,
382 struct evlist *evlist __maybe_unused)
383 {
384 return do_write_string(ff, perf_version_string);
385 }
386
__write_cpudesc(struct feat_fd * ff,const char * cpuinfo_proc)387 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
388 {
389 FILE *file;
390 char *buf = NULL;
391 char *s, *p;
392 const char *search = cpuinfo_proc;
393 size_t len = 0;
394 int ret = -1;
395
396 if (!search)
397 return -1;
398
399 file = fopen("/proc/cpuinfo", "r");
400 if (!file)
401 return -1;
402
403 while (getline(&buf, &len, file) > 0) {
404 ret = strncmp(buf, search, strlen(search));
405 if (!ret)
406 break;
407 }
408
409 if (ret) {
410 ret = -1;
411 goto done;
412 }
413
414 s = buf;
415
416 p = strchr(buf, ':');
417 if (p && *(p+1) == ' ' && *(p+2))
418 s = p + 2;
419 p = strchr(s, '\n');
420 if (p)
421 *p = '\0';
422
423 /* squash extra space characters (branding string) */
424 p = s;
425 while (*p) {
426 if (isspace(*p)) {
427 char *r = p + 1;
428 char *q = skip_spaces(r);
429 *p = ' ';
430 if (q != (p+1))
431 while ((*r++ = *q++));
432 }
433 p++;
434 }
435 ret = do_write_string(ff, s);
436 done:
437 free(buf);
438 fclose(file);
439 return ret;
440 }
441
write_cpudesc(struct feat_fd * ff,struct evlist * evlist __maybe_unused)442 static int write_cpudesc(struct feat_fd *ff,
443 struct evlist *evlist __maybe_unused)
444 {
445 #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
446 #define CPUINFO_PROC { "cpu", }
447 #elif defined(__s390__)
448 #define CPUINFO_PROC { "vendor_id", }
449 #elif defined(__sh__)
450 #define CPUINFO_PROC { "cpu type", }
451 #elif defined(__alpha__) || defined(__mips__)
452 #define CPUINFO_PROC { "cpu model", }
453 #elif defined(__arm__)
454 #define CPUINFO_PROC { "model name", "Processor", }
455 #elif defined(__arc__)
456 #define CPUINFO_PROC { "Processor", }
457 #elif defined(__xtensa__)
458 #define CPUINFO_PROC { "core ID", }
459 #elif defined(__loongarch__)
460 #define CPUINFO_PROC { "Model Name", }
461 #else
462 #define CPUINFO_PROC { "model name", }
463 #endif
464 const char *cpuinfo_procs[] = CPUINFO_PROC;
465 #undef CPUINFO_PROC
466 unsigned int i;
467
468 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
469 int ret;
470 ret = __write_cpudesc(ff, cpuinfo_procs[i]);
471 if (ret >= 0)
472 return ret;
473 }
474 return -1;
475 }
476
477
write_nrcpus(struct feat_fd * ff,struct evlist * evlist __maybe_unused)478 static int write_nrcpus(struct feat_fd *ff,
479 struct evlist *evlist __maybe_unused)
480 {
481 long nr;
482 u32 nrc, nra;
483 int ret;
484
485 nrc = cpu__max_present_cpu().cpu;
486
487 nr = sysconf(_SC_NPROCESSORS_ONLN);
488 if (nr < 0)
489 return -1;
490
491 nra = (u32)(nr & UINT_MAX);
492
493 ret = do_write(ff, &nrc, sizeof(nrc));
494 if (ret < 0)
495 return ret;
496
497 return do_write(ff, &nra, sizeof(nra));
498 }
499
write_event_desc(struct feat_fd * ff,struct evlist * evlist)500 static int write_event_desc(struct feat_fd *ff,
501 struct evlist *evlist)
502 {
503 struct evsel *evsel;
504 u32 nre, nri, sz;
505 int ret;
506
507 nre = evlist->core.nr_entries;
508
509 /*
510 * write number of events
511 */
512 ret = do_write(ff, &nre, sizeof(nre));
513 if (ret < 0)
514 return ret;
515
516 /*
517 * size of perf_event_attr struct
518 */
519 sz = (u32)sizeof(evsel->core.attr);
520 ret = do_write(ff, &sz, sizeof(sz));
521 if (ret < 0)
522 return ret;
523
524 evlist__for_each_entry(evlist, evsel) {
525 ret = do_write(ff, &evsel->core.attr, sz);
526 if (ret < 0)
527 return ret;
528 /*
529 * write number of unique id per event
530 * there is one id per instance of an event
531 *
532 * copy into an nri to be independent of the
533 * type of ids,
534 */
535 nri = evsel->core.ids;
536 ret = do_write(ff, &nri, sizeof(nri));
537 if (ret < 0)
538 return ret;
539
540 /*
541 * write event string as passed on cmdline
542 */
543 ret = do_write_string(ff, evsel__name(evsel));
544 if (ret < 0)
545 return ret;
546 /*
547 * write unique ids for this event
548 */
549 ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
550 if (ret < 0)
551 return ret;
552 }
553 return 0;
554 }
555
write_cmdline(struct feat_fd * ff,struct evlist * evlist __maybe_unused)556 static int write_cmdline(struct feat_fd *ff,
557 struct evlist *evlist __maybe_unused)
558 {
559 struct perf_env *env = &ff->ph->env;
560 char pbuf[MAXPATHLEN], *buf;
561 int i, ret, n;
562
563 /* actual path to perf binary */
564 buf = perf_exe(pbuf, MAXPATHLEN);
565
566 /* account for binary path */
567 n = env->nr_cmdline + 1;
568
569 ret = do_write(ff, &n, sizeof(n));
570 if (ret < 0)
571 return ret;
572
573 ret = do_write_string(ff, buf);
574 if (ret < 0)
575 return ret;
576
577 for (i = 0 ; i < env->nr_cmdline; i++) {
578 ret = do_write_string(ff, env->cmdline_argv[i]);
579 if (ret < 0)
580 return ret;
581 }
582 return 0;
583 }
584
585
write_cpu_topology(struct feat_fd * ff,struct evlist * evlist __maybe_unused)586 static int write_cpu_topology(struct feat_fd *ff,
587 struct evlist *evlist __maybe_unused)
588 {
589 struct perf_env *env = &ff->ph->env;
590 struct cpu_topology *tp;
591 u32 i;
592 int ret, j;
593
594 tp = cpu_topology__new();
595 if (!tp)
596 return -1;
597
598 ret = do_write(ff, &tp->package_cpus_lists, sizeof(tp->package_cpus_lists));
599 if (ret < 0)
600 goto done;
601
602 for (i = 0; i < tp->package_cpus_lists; i++) {
603 ret = do_write_string(ff, tp->package_cpus_list[i]);
604 if (ret < 0)
605 goto done;
606 }
607 ret = do_write(ff, &tp->core_cpus_lists, sizeof(tp->core_cpus_lists));
608 if (ret < 0)
609 goto done;
610
611 for (i = 0; i < tp->core_cpus_lists; i++) {
612 ret = do_write_string(ff, tp->core_cpus_list[i]);
613 if (ret < 0)
614 break;
615 }
616
617 ret = perf_env__read_cpu_topology_map(env);
618 if (ret < 0)
619 goto done;
620
621 for (j = 0; j < env->nr_cpus_avail; j++) {
622 ret = do_write(ff, &env->cpu[j].core_id,
623 sizeof(env->cpu[j].core_id));
624 if (ret < 0)
625 return ret;
626 ret = do_write(ff, &env->cpu[j].socket_id,
627 sizeof(env->cpu[j].socket_id));
628 if (ret < 0)
629 return ret;
630 }
631
632 if (!tp->die_cpus_lists)
633 goto done;
634
635 ret = do_write(ff, &tp->die_cpus_lists, sizeof(tp->die_cpus_lists));
636 if (ret < 0)
637 goto done;
638
639 for (i = 0; i < tp->die_cpus_lists; i++) {
640 ret = do_write_string(ff, tp->die_cpus_list[i]);
641 if (ret < 0)
642 goto done;
643 }
644
645 for (j = 0; j < env->nr_cpus_avail; j++) {
646 ret = do_write(ff, &env->cpu[j].die_id,
647 sizeof(env->cpu[j].die_id));
648 if (ret < 0)
649 return ret;
650 }
651
652 done:
653 cpu_topology__delete(tp);
654 return ret;
655 }
656
657
658
write_total_mem(struct feat_fd * ff,struct evlist * evlist __maybe_unused)659 static int write_total_mem(struct feat_fd *ff,
660 struct evlist *evlist __maybe_unused)
661 {
662 char *buf = NULL;
663 FILE *fp;
664 size_t len = 0;
665 int ret = -1, n;
666 uint64_t mem;
667
668 fp = fopen("/proc/meminfo", "r");
669 if (!fp)
670 return -1;
671
672 while (getline(&buf, &len, fp) > 0) {
673 ret = strncmp(buf, "MemTotal:", 9);
674 if (!ret)
675 break;
676 }
677 if (!ret) {
678 n = sscanf(buf, "%*s %"PRIu64, &mem);
679 if (n == 1)
680 ret = do_write(ff, &mem, sizeof(mem));
681 } else
682 ret = -1;
683 free(buf);
684 fclose(fp);
685 return ret;
686 }
687
write_numa_topology(struct feat_fd * ff,struct evlist * evlist __maybe_unused)688 static int write_numa_topology(struct feat_fd *ff,
689 struct evlist *evlist __maybe_unused)
690 {
691 struct numa_topology *tp;
692 int ret = -1;
693 u32 i;
694
695 tp = numa_topology__new();
696 if (!tp)
697 return -ENOMEM;
698
699 ret = do_write(ff, &tp->nr, sizeof(u32));
700 if (ret < 0)
701 goto err;
702
703 for (i = 0; i < tp->nr; i++) {
704 struct numa_topology_node *n = &tp->nodes[i];
705
706 ret = do_write(ff, &n->node, sizeof(u32));
707 if (ret < 0)
708 goto err;
709
710 ret = do_write(ff, &n->mem_total, sizeof(u64));
711 if (ret)
712 goto err;
713
714 ret = do_write(ff, &n->mem_free, sizeof(u64));
715 if (ret)
716 goto err;
717
718 ret = do_write_string(ff, n->cpus);
719 if (ret < 0)
720 goto err;
721 }
722
723 ret = 0;
724
725 err:
726 numa_topology__delete(tp);
727 return ret;
728 }
729
730 /*
731 * File format:
732 *
733 * struct pmu_mappings {
734 * u32 pmu_num;
735 * struct pmu_map {
736 * u32 type;
737 * char name[];
738 * }[pmu_num];
739 * };
740 */
741
write_pmu_mappings(struct feat_fd * ff,struct evlist * evlist __maybe_unused)742 static int write_pmu_mappings(struct feat_fd *ff,
743 struct evlist *evlist __maybe_unused)
744 {
745 struct perf_pmu *pmu = NULL;
746 u32 pmu_num = 0;
747 int ret;
748
749 /*
750 * Do a first pass to count number of pmu to avoid lseek so this
751 * works in pipe mode as well.
752 */
753 while ((pmu = perf_pmus__scan(pmu)))
754 pmu_num++;
755
756 ret = do_write(ff, &pmu_num, sizeof(pmu_num));
757 if (ret < 0)
758 return ret;
759
760 while ((pmu = perf_pmus__scan(pmu))) {
761 ret = do_write(ff, &pmu->type, sizeof(pmu->type));
762 if (ret < 0)
763 return ret;
764
765 ret = do_write_string(ff, pmu->name);
766 if (ret < 0)
767 return ret;
768 }
769
770 return 0;
771 }
772
773 /*
774 * File format:
775 *
776 * struct group_descs {
777 * u32 nr_groups;
778 * struct group_desc {
779 * char name[];
780 * u32 leader_idx;
781 * u32 nr_members;
782 * }[nr_groups];
783 * };
784 */
write_group_desc(struct feat_fd * ff,struct evlist * evlist)785 static int write_group_desc(struct feat_fd *ff,
786 struct evlist *evlist)
787 {
788 u32 nr_groups = evlist__nr_groups(evlist);
789 struct evsel *evsel;
790 int ret;
791
792 ret = do_write(ff, &nr_groups, sizeof(nr_groups));
793 if (ret < 0)
794 return ret;
795
796 evlist__for_each_entry(evlist, evsel) {
797 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
798 const char *name = evsel->group_name ?: "{anon_group}";
799 u32 leader_idx = evsel->core.idx;
800 u32 nr_members = evsel->core.nr_members;
801
802 ret = do_write_string(ff, name);
803 if (ret < 0)
804 return ret;
805
806 ret = do_write(ff, &leader_idx, sizeof(leader_idx));
807 if (ret < 0)
808 return ret;
809
810 ret = do_write(ff, &nr_members, sizeof(nr_members));
811 if (ret < 0)
812 return ret;
813 }
814 }
815 return 0;
816 }
817
818 /*
819 * Return the CPU id as a raw string.
820 *
821 * Each architecture should provide a more precise id string that
822 * can be use to match the architecture's "mapfile".
823 */
get_cpuid_str(struct perf_cpu cpu __maybe_unused)824 char * __weak get_cpuid_str(struct perf_cpu cpu __maybe_unused)
825 {
826 return NULL;
827 }
828
get_cpuid_allow_env_override(struct perf_cpu cpu)829 char *get_cpuid_allow_env_override(struct perf_cpu cpu)
830 {
831 char *cpuid;
832 static bool printed;
833
834 cpuid = getenv("PERF_CPUID");
835 if (cpuid)
836 cpuid = strdup(cpuid);
837 if (!cpuid)
838 cpuid = get_cpuid_str(cpu);
839 if (!cpuid)
840 return NULL;
841
842 if (!printed) {
843 pr_debug("Using CPUID %s\n", cpuid);
844 printed = true;
845 }
846 return cpuid;
847 }
848
849 /* Return zero when the cpuid from the mapfile.csv matches the
850 * cpuid string generated on this platform.
851 * Otherwise return non-zero.
852 */
strcmp_cpuid_str(const char * mapcpuid,const char * cpuid)853 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
854 {
855 regex_t re;
856 regmatch_t pmatch[1];
857 int match;
858
859 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
860 /* Warn unable to generate match particular string. */
861 pr_info("Invalid regular expression %s\n", mapcpuid);
862 return 1;
863 }
864
865 match = !regexec(&re, cpuid, 1, pmatch, 0);
866 regfree(&re);
867 if (match) {
868 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
869
870 /* Verify the entire string matched. */
871 if (match_len == strlen(cpuid))
872 return 0;
873 }
874 return 1;
875 }
876
877 /*
878 * default get_cpuid(): nothing gets recorded
879 * actual implementation must be in arch/$(SRCARCH)/util/header.c
880 */
get_cpuid(char * buffer __maybe_unused,size_t sz __maybe_unused,struct perf_cpu cpu __maybe_unused)881 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused,
882 struct perf_cpu cpu __maybe_unused)
883 {
884 return ENOSYS; /* Not implemented */
885 }
886
write_cpuid(struct feat_fd * ff,struct evlist * evlist)887 static int write_cpuid(struct feat_fd *ff, struct evlist *evlist)
888 {
889 struct perf_cpu cpu = perf_cpu_map__min(evlist->core.all_cpus);
890 char buffer[64];
891 int ret;
892
893 ret = get_cpuid(buffer, sizeof(buffer), cpu);
894 if (ret)
895 return -1;
896
897 return do_write_string(ff, buffer);
898 }
899
write_branch_stack(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)900 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
901 struct evlist *evlist __maybe_unused)
902 {
903 return 0;
904 }
905
write_auxtrace(struct feat_fd * ff,struct evlist * evlist __maybe_unused)906 static int write_auxtrace(struct feat_fd *ff,
907 struct evlist *evlist __maybe_unused)
908 {
909 struct perf_session *session;
910 int err;
911
912 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
913 return -1;
914
915 session = container_of(ff->ph, struct perf_session, header);
916
917 err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
918 if (err < 0)
919 pr_err("Failed to write auxtrace index\n");
920 return err;
921 }
922
write_clockid(struct feat_fd * ff,struct evlist * evlist __maybe_unused)923 static int write_clockid(struct feat_fd *ff,
924 struct evlist *evlist __maybe_unused)
925 {
926 return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
927 sizeof(ff->ph->env.clock.clockid_res_ns));
928 }
929
write_clock_data(struct feat_fd * ff,struct evlist * evlist __maybe_unused)930 static int write_clock_data(struct feat_fd *ff,
931 struct evlist *evlist __maybe_unused)
932 {
933 u64 *data64;
934 u32 data32;
935 int ret;
936
937 /* version */
938 data32 = 1;
939
940 ret = do_write(ff, &data32, sizeof(data32));
941 if (ret < 0)
942 return ret;
943
944 /* clockid */
945 data32 = ff->ph->env.clock.clockid;
946
947 ret = do_write(ff, &data32, sizeof(data32));
948 if (ret < 0)
949 return ret;
950
951 /* TOD ref time */
952 data64 = &ff->ph->env.clock.tod_ns;
953
954 ret = do_write(ff, data64, sizeof(*data64));
955 if (ret < 0)
956 return ret;
957
958 /* clockid ref time */
959 data64 = &ff->ph->env.clock.clockid_ns;
960
961 return do_write(ff, data64, sizeof(*data64));
962 }
963
write_hybrid_topology(struct feat_fd * ff,struct evlist * evlist __maybe_unused)964 static int write_hybrid_topology(struct feat_fd *ff,
965 struct evlist *evlist __maybe_unused)
966 {
967 struct hybrid_topology *tp;
968 int ret;
969 u32 i;
970
971 tp = hybrid_topology__new();
972 if (!tp)
973 return -ENOENT;
974
975 ret = do_write(ff, &tp->nr, sizeof(u32));
976 if (ret < 0)
977 goto err;
978
979 for (i = 0; i < tp->nr; i++) {
980 struct hybrid_topology_node *n = &tp->nodes[i];
981
982 ret = do_write_string(ff, n->pmu_name);
983 if (ret < 0)
984 goto err;
985
986 ret = do_write_string(ff, n->cpus);
987 if (ret < 0)
988 goto err;
989 }
990
991 ret = 0;
992
993 err:
994 hybrid_topology__delete(tp);
995 return ret;
996 }
997
write_dir_format(struct feat_fd * ff,struct evlist * evlist __maybe_unused)998 static int write_dir_format(struct feat_fd *ff,
999 struct evlist *evlist __maybe_unused)
1000 {
1001 struct perf_session *session;
1002 struct perf_data *data;
1003
1004 session = container_of(ff->ph, struct perf_session, header);
1005 data = session->data;
1006
1007 if (WARN_ON(!perf_data__is_dir(data)))
1008 return -1;
1009
1010 return do_write(ff, &data->dir.version, sizeof(data->dir.version));
1011 }
1012
1013 #ifdef HAVE_LIBBPF_SUPPORT
write_bpf_prog_info(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1014 static int write_bpf_prog_info(struct feat_fd *ff,
1015 struct evlist *evlist __maybe_unused)
1016 {
1017 struct perf_env *env = &ff->ph->env;
1018 struct rb_root *root;
1019 struct rb_node *next;
1020 int ret = 0;
1021
1022 down_read(&env->bpf_progs.lock);
1023
1024 ret = do_write(ff, &env->bpf_progs.infos_cnt,
1025 sizeof(env->bpf_progs.infos_cnt));
1026 if (ret < 0 || env->bpf_progs.infos_cnt == 0)
1027 goto out;
1028
1029 root = &env->bpf_progs.infos;
1030 next = rb_first(root);
1031 while (next) {
1032 struct bpf_prog_info_node *node;
1033 size_t len;
1034
1035 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1036 next = rb_next(&node->rb_node);
1037 len = sizeof(struct perf_bpil) +
1038 node->info_linear->data_len;
1039
1040 /* before writing to file, translate address to offset */
1041 bpil_addr_to_offs(node->info_linear);
1042 ret = do_write(ff, node->info_linear, len);
1043 /*
1044 * translate back to address even when do_write() fails,
1045 * so that this function never changes the data.
1046 */
1047 bpil_offs_to_addr(node->info_linear);
1048 if (ret < 0)
1049 goto out;
1050 }
1051 out:
1052 up_read(&env->bpf_progs.lock);
1053 return ret;
1054 }
1055
write_bpf_btf(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1056 static int write_bpf_btf(struct feat_fd *ff,
1057 struct evlist *evlist __maybe_unused)
1058 {
1059 struct perf_env *env = &ff->ph->env;
1060 struct rb_root *root;
1061 struct rb_node *next;
1062 int ret = 0;
1063
1064 down_read(&env->bpf_progs.lock);
1065
1066 ret = do_write(ff, &env->bpf_progs.btfs_cnt,
1067 sizeof(env->bpf_progs.btfs_cnt));
1068
1069 if (ret < 0 || env->bpf_progs.btfs_cnt == 0)
1070 goto out;
1071
1072 root = &env->bpf_progs.btfs;
1073 next = rb_first(root);
1074 while (next) {
1075 struct btf_node *node;
1076
1077 node = rb_entry(next, struct btf_node, rb_node);
1078 next = rb_next(&node->rb_node);
1079 ret = do_write(ff, &node->id,
1080 sizeof(u32) * 2 + node->data_size);
1081 if (ret < 0)
1082 goto out;
1083 }
1084 out:
1085 up_read(&env->bpf_progs.lock);
1086 return ret;
1087 }
1088 #endif // HAVE_LIBBPF_SUPPORT
1089
cpu_cache_level__sort(const void * a,const void * b)1090 static int cpu_cache_level__sort(const void *a, const void *b)
1091 {
1092 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1093 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1094
1095 return cache_a->level - cache_b->level;
1096 }
1097
cpu_cache_level__cmp(struct cpu_cache_level * a,struct cpu_cache_level * b)1098 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1099 {
1100 if (a->level != b->level)
1101 return false;
1102
1103 if (a->line_size != b->line_size)
1104 return false;
1105
1106 if (a->sets != b->sets)
1107 return false;
1108
1109 if (a->ways != b->ways)
1110 return false;
1111
1112 if (strcmp(a->type, b->type))
1113 return false;
1114
1115 if (strcmp(a->size, b->size))
1116 return false;
1117
1118 if (strcmp(a->map, b->map))
1119 return false;
1120
1121 return true;
1122 }
1123
cpu_cache_level__read(struct cpu_cache_level * cache,u32 cpu,u16 level)1124 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1125 {
1126 char path[PATH_MAX], file[PATH_MAX];
1127 struct stat st;
1128 size_t len;
1129
1130 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1131 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1132
1133 if (stat(file, &st))
1134 return 1;
1135
1136 scnprintf(file, PATH_MAX, "%s/level", path);
1137 if (sysfs__read_int(file, (int *) &cache->level))
1138 return -1;
1139
1140 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1141 if (sysfs__read_int(file, (int *) &cache->line_size))
1142 return -1;
1143
1144 scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1145 if (sysfs__read_int(file, (int *) &cache->sets))
1146 return -1;
1147
1148 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1149 if (sysfs__read_int(file, (int *) &cache->ways))
1150 return -1;
1151
1152 scnprintf(file, PATH_MAX, "%s/type", path);
1153 if (sysfs__read_str(file, &cache->type, &len))
1154 return -1;
1155
1156 cache->type[len] = 0;
1157 cache->type = strim(cache->type);
1158
1159 scnprintf(file, PATH_MAX, "%s/size", path);
1160 if (sysfs__read_str(file, &cache->size, &len)) {
1161 zfree(&cache->type);
1162 return -1;
1163 }
1164
1165 cache->size[len] = 0;
1166 cache->size = strim(cache->size);
1167
1168 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1169 if (sysfs__read_str(file, &cache->map, &len)) {
1170 zfree(&cache->size);
1171 zfree(&cache->type);
1172 return -1;
1173 }
1174
1175 cache->map[len] = 0;
1176 cache->map = strim(cache->map);
1177 return 0;
1178 }
1179
cpu_cache_level__fprintf(FILE * out,struct cpu_cache_level * c)1180 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1181 {
1182 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1183 }
1184
1185 /*
1186 * Build caches levels for a particular CPU from the data in
1187 * /sys/devices/system/cpu/cpu<cpu>/cache/
1188 * The cache level data is stored in caches[] from index at
1189 * *cntp.
1190 */
build_caches_for_cpu(u32 cpu,struct cpu_cache_level caches[],u32 * cntp)1191 int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp)
1192 {
1193 u16 level;
1194
1195 for (level = 0; level < MAX_CACHE_LVL; level++) {
1196 struct cpu_cache_level c;
1197 int err;
1198 u32 i;
1199
1200 err = cpu_cache_level__read(&c, cpu, level);
1201 if (err < 0)
1202 return err;
1203
1204 if (err == 1)
1205 break;
1206
1207 for (i = 0; i < *cntp; i++) {
1208 if (cpu_cache_level__cmp(&c, &caches[i]))
1209 break;
1210 }
1211
1212 if (i == *cntp) {
1213 caches[*cntp] = c;
1214 *cntp = *cntp + 1;
1215 } else
1216 cpu_cache_level__free(&c);
1217 }
1218
1219 return 0;
1220 }
1221
build_caches(struct cpu_cache_level caches[],u32 * cntp)1222 static int build_caches(struct cpu_cache_level caches[], u32 *cntp)
1223 {
1224 u32 nr, cpu, cnt = 0;
1225
1226 nr = cpu__max_cpu().cpu;
1227
1228 for (cpu = 0; cpu < nr; cpu++) {
1229 int ret = build_caches_for_cpu(cpu, caches, &cnt);
1230
1231 if (ret)
1232 return ret;
1233 }
1234 *cntp = cnt;
1235 return 0;
1236 }
1237
write_cache(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1238 static int write_cache(struct feat_fd *ff,
1239 struct evlist *evlist __maybe_unused)
1240 {
1241 u32 max_caches = cpu__max_cpu().cpu * MAX_CACHE_LVL;
1242 struct cpu_cache_level caches[max_caches];
1243 u32 cnt = 0, i, version = 1;
1244 int ret;
1245
1246 ret = build_caches(caches, &cnt);
1247 if (ret)
1248 goto out;
1249
1250 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1251
1252 ret = do_write(ff, &version, sizeof(u32));
1253 if (ret < 0)
1254 goto out;
1255
1256 ret = do_write(ff, &cnt, sizeof(u32));
1257 if (ret < 0)
1258 goto out;
1259
1260 for (i = 0; i < cnt; i++) {
1261 struct cpu_cache_level *c = &caches[i];
1262
1263 #define _W(v) \
1264 ret = do_write(ff, &c->v, sizeof(u32)); \
1265 if (ret < 0) \
1266 goto out;
1267
1268 _W(level)
1269 _W(line_size)
1270 _W(sets)
1271 _W(ways)
1272 #undef _W
1273
1274 #define _W(v) \
1275 ret = do_write_string(ff, (const char *) c->v); \
1276 if (ret < 0) \
1277 goto out;
1278
1279 _W(type)
1280 _W(size)
1281 _W(map)
1282 #undef _W
1283 }
1284
1285 out:
1286 for (i = 0; i < cnt; i++)
1287 cpu_cache_level__free(&caches[i]);
1288 return ret;
1289 }
1290
write_stat(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)1291 static int write_stat(struct feat_fd *ff __maybe_unused,
1292 struct evlist *evlist __maybe_unused)
1293 {
1294 return 0;
1295 }
1296
write_sample_time(struct feat_fd * ff,struct evlist * evlist)1297 static int write_sample_time(struct feat_fd *ff,
1298 struct evlist *evlist)
1299 {
1300 int ret;
1301
1302 ret = do_write(ff, &evlist->first_sample_time,
1303 sizeof(evlist->first_sample_time));
1304 if (ret < 0)
1305 return ret;
1306
1307 return do_write(ff, &evlist->last_sample_time,
1308 sizeof(evlist->last_sample_time));
1309 }
1310
1311
memory_node__read(struct memory_node * n,unsigned long idx)1312 static int memory_node__read(struct memory_node *n, unsigned long idx)
1313 {
1314 unsigned int phys, size = 0;
1315 char path[PATH_MAX];
1316 struct io_dirent64 *ent;
1317 struct io_dir dir;
1318
1319 #define for_each_memory(mem, dir) \
1320 while ((ent = io_dir__readdir(&dir)) != NULL) \
1321 if (strcmp(ent->d_name, ".") && \
1322 strcmp(ent->d_name, "..") && \
1323 sscanf(ent->d_name, "memory%u", &mem) == 1)
1324
1325 scnprintf(path, PATH_MAX,
1326 "%s/devices/system/node/node%lu",
1327 sysfs__mountpoint(), idx);
1328
1329 io_dir__init(&dir, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
1330 if (dir.dirfd < 0) {
1331 pr_warning("failed: can't open memory sysfs data '%s'\n", path);
1332 return -1;
1333 }
1334
1335 for_each_memory(phys, dir) {
1336 size = max(phys, size);
1337 }
1338
1339 size++;
1340
1341 n->set = bitmap_zalloc(size);
1342 if (!n->set) {
1343 close(dir.dirfd);
1344 return -ENOMEM;
1345 }
1346
1347 n->node = idx;
1348 n->size = size;
1349
1350 io_dir__rewinddir(&dir);
1351
1352 for_each_memory(phys, dir) {
1353 __set_bit(phys, n->set);
1354 }
1355
1356 close(dir.dirfd);
1357 return 0;
1358 }
1359
memory_node__delete_nodes(struct memory_node * nodesp,u64 cnt)1360 static void memory_node__delete_nodes(struct memory_node *nodesp, u64 cnt)
1361 {
1362 for (u64 i = 0; i < cnt; i++)
1363 bitmap_free(nodesp[i].set);
1364
1365 free(nodesp);
1366 }
1367
memory_node__sort(const void * a,const void * b)1368 static int memory_node__sort(const void *a, const void *b)
1369 {
1370 const struct memory_node *na = a;
1371 const struct memory_node *nb = b;
1372
1373 return na->node - nb->node;
1374 }
1375
build_mem_topology(struct memory_node ** nodesp,u64 * cntp)1376 static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
1377 {
1378 char path[PATH_MAX];
1379 struct io_dirent64 *ent;
1380 struct io_dir dir;
1381 int ret = 0;
1382 size_t cnt = 0, size = 0;
1383 struct memory_node *nodes = NULL;
1384
1385 scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1386 sysfs__mountpoint());
1387
1388 io_dir__init(&dir, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
1389 if (dir.dirfd < 0) {
1390 pr_debug2("%s: couldn't read %s, does this arch have topology information?\n",
1391 __func__, path);
1392 return -1;
1393 }
1394
1395 while (!ret && (ent = io_dir__readdir(&dir))) {
1396 unsigned int idx;
1397 int r;
1398
1399 if (!strcmp(ent->d_name, ".") ||
1400 !strcmp(ent->d_name, ".."))
1401 continue;
1402
1403 r = sscanf(ent->d_name, "node%u", &idx);
1404 if (r != 1)
1405 continue;
1406
1407 if (cnt >= size) {
1408 struct memory_node *new_nodes =
1409 reallocarray(nodes, cnt + 4, sizeof(*nodes));
1410
1411 if (!new_nodes) {
1412 pr_err("Failed to write MEM_TOPOLOGY, size %zd nodes\n", size);
1413 ret = -ENOMEM;
1414 goto out;
1415 }
1416 nodes = new_nodes;
1417 size += 4;
1418 }
1419 ret = memory_node__read(&nodes[cnt], idx);
1420 if (!ret)
1421 cnt += 1;
1422 }
1423 out:
1424 close(dir.dirfd);
1425 if (!ret) {
1426 *cntp = cnt;
1427 *nodesp = nodes;
1428 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1429 } else
1430 memory_node__delete_nodes(nodes, cnt);
1431
1432 return ret;
1433 }
1434
1435 /*
1436 * The MEM_TOPOLOGY holds physical memory map for every
1437 * node in system. The format of data is as follows:
1438 *
1439 * 0 - version | for future changes
1440 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1441 * 16 - count | number of nodes
1442 *
1443 * For each node we store map of physical indexes for
1444 * each node:
1445 *
1446 * 32 - node id | node index
1447 * 40 - size | size of bitmap
1448 * 48 - bitmap | bitmap of memory indexes that belongs to node
1449 */
write_mem_topology(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)1450 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1451 struct evlist *evlist __maybe_unused)
1452 {
1453 struct memory_node *nodes = NULL;
1454 u64 bsize, version = 1, i, nr = 0;
1455 int ret;
1456
1457 ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1458 (unsigned long long *) &bsize);
1459 if (ret)
1460 return ret;
1461
1462 ret = build_mem_topology(&nodes, &nr);
1463 if (ret)
1464 return ret;
1465
1466 ret = do_write(ff, &version, sizeof(version));
1467 if (ret < 0)
1468 goto out;
1469
1470 ret = do_write(ff, &bsize, sizeof(bsize));
1471 if (ret < 0)
1472 goto out;
1473
1474 ret = do_write(ff, &nr, sizeof(nr));
1475 if (ret < 0)
1476 goto out;
1477
1478 for (i = 0; i < nr; i++) {
1479 struct memory_node *n = &nodes[i];
1480
1481 #define _W(v) \
1482 ret = do_write(ff, &n->v, sizeof(n->v)); \
1483 if (ret < 0) \
1484 goto out;
1485
1486 _W(node)
1487 _W(size)
1488
1489 #undef _W
1490
1491 ret = do_write_bitmap(ff, n->set, n->size);
1492 if (ret < 0)
1493 goto out;
1494 }
1495
1496 out:
1497 memory_node__delete_nodes(nodes, nr);
1498 return ret;
1499 }
1500
write_compressed(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)1501 static int write_compressed(struct feat_fd *ff __maybe_unused,
1502 struct evlist *evlist __maybe_unused)
1503 {
1504 int ret;
1505
1506 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1507 if (ret)
1508 return ret;
1509
1510 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1511 if (ret)
1512 return ret;
1513
1514 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1515 if (ret)
1516 return ret;
1517
1518 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1519 if (ret)
1520 return ret;
1521
1522 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1523 }
1524
__write_pmu_caps(struct feat_fd * ff,struct perf_pmu * pmu,bool write_pmu)1525 static int __write_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
1526 bool write_pmu)
1527 {
1528 struct perf_pmu_caps *caps = NULL;
1529 int ret;
1530
1531 ret = do_write(ff, &pmu->nr_caps, sizeof(pmu->nr_caps));
1532 if (ret < 0)
1533 return ret;
1534
1535 list_for_each_entry(caps, &pmu->caps, list) {
1536 ret = do_write_string(ff, caps->name);
1537 if (ret < 0)
1538 return ret;
1539
1540 ret = do_write_string(ff, caps->value);
1541 if (ret < 0)
1542 return ret;
1543 }
1544
1545 if (write_pmu) {
1546 ret = do_write_string(ff, pmu->name);
1547 if (ret < 0)
1548 return ret;
1549 }
1550
1551 return ret;
1552 }
1553
write_cpu_pmu_caps(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1554 static int write_cpu_pmu_caps(struct feat_fd *ff,
1555 struct evlist *evlist __maybe_unused)
1556 {
1557 struct perf_pmu *cpu_pmu = perf_pmus__find_core_pmu();
1558 int ret;
1559
1560 if (!cpu_pmu)
1561 return -ENOENT;
1562
1563 ret = perf_pmu__caps_parse(cpu_pmu);
1564 if (ret < 0)
1565 return ret;
1566
1567 return __write_pmu_caps(ff, cpu_pmu, false);
1568 }
1569
write_pmu_caps(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1570 static int write_pmu_caps(struct feat_fd *ff,
1571 struct evlist *evlist __maybe_unused)
1572 {
1573 struct perf_pmu *pmu = NULL;
1574 int nr_pmu = 0;
1575 int ret;
1576
1577 while ((pmu = perf_pmus__scan(pmu))) {
1578 if (!strcmp(pmu->name, "cpu")) {
1579 /*
1580 * The "cpu" PMU is special and covered by
1581 * HEADER_CPU_PMU_CAPS. Note, core PMUs are
1582 * counted/written here for ARM, s390 and Intel hybrid.
1583 */
1584 continue;
1585 }
1586 if (perf_pmu__caps_parse(pmu) <= 0)
1587 continue;
1588 nr_pmu++;
1589 }
1590
1591 ret = do_write(ff, &nr_pmu, sizeof(nr_pmu));
1592 if (ret < 0)
1593 return ret;
1594
1595 if (!nr_pmu)
1596 return 0;
1597
1598 /*
1599 * Note older perf tools assume core PMUs come first, this is a property
1600 * of perf_pmus__scan.
1601 */
1602 pmu = NULL;
1603 while ((pmu = perf_pmus__scan(pmu))) {
1604 if (!strcmp(pmu->name, "cpu")) {
1605 /* Skip as above. */
1606 continue;
1607 }
1608 if (perf_pmu__caps_parse(pmu) <= 0)
1609 continue;
1610 ret = __write_pmu_caps(ff, pmu, true);
1611 if (ret < 0)
1612 return ret;
1613 }
1614 return 0;
1615 }
1616
print_hostname(struct feat_fd * ff,FILE * fp)1617 static void print_hostname(struct feat_fd *ff, FILE *fp)
1618 {
1619 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1620 }
1621
print_osrelease(struct feat_fd * ff,FILE * fp)1622 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1623 {
1624 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1625 }
1626
print_arch(struct feat_fd * ff,FILE * fp)1627 static void print_arch(struct feat_fd *ff, FILE *fp)
1628 {
1629 fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1630 }
1631
print_cpudesc(struct feat_fd * ff,FILE * fp)1632 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1633 {
1634 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1635 }
1636
print_nrcpus(struct feat_fd * ff,FILE * fp)1637 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1638 {
1639 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1640 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1641 }
1642
print_version(struct feat_fd * ff,FILE * fp)1643 static void print_version(struct feat_fd *ff, FILE *fp)
1644 {
1645 fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1646 }
1647
print_cmdline(struct feat_fd * ff,FILE * fp)1648 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1649 {
1650 int nr, i;
1651
1652 nr = ff->ph->env.nr_cmdline;
1653
1654 fprintf(fp, "# cmdline : ");
1655
1656 for (i = 0; i < nr; i++) {
1657 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1658 if (!argv_i) {
1659 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1660 } else {
1661 char *mem = argv_i;
1662 do {
1663 char *quote = strchr(argv_i, '\'');
1664 if (!quote)
1665 break;
1666 *quote++ = '\0';
1667 fprintf(fp, "%s\\\'", argv_i);
1668 argv_i = quote;
1669 } while (1);
1670 fprintf(fp, "%s ", argv_i);
1671 free(mem);
1672 }
1673 }
1674 fputc('\n', fp);
1675 }
1676
print_cpu_topology(struct feat_fd * ff,FILE * fp)1677 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1678 {
1679 struct perf_header *ph = ff->ph;
1680 int cpu_nr = ph->env.nr_cpus_avail;
1681 int nr, i;
1682 char *str;
1683
1684 nr = ph->env.nr_sibling_cores;
1685 str = ph->env.sibling_cores;
1686
1687 for (i = 0; i < nr; i++) {
1688 fprintf(fp, "# sibling sockets : %s\n", str);
1689 str += strlen(str) + 1;
1690 }
1691
1692 if (ph->env.nr_sibling_dies) {
1693 nr = ph->env.nr_sibling_dies;
1694 str = ph->env.sibling_dies;
1695
1696 for (i = 0; i < nr; i++) {
1697 fprintf(fp, "# sibling dies : %s\n", str);
1698 str += strlen(str) + 1;
1699 }
1700 }
1701
1702 nr = ph->env.nr_sibling_threads;
1703 str = ph->env.sibling_threads;
1704
1705 for (i = 0; i < nr; i++) {
1706 fprintf(fp, "# sibling threads : %s\n", str);
1707 str += strlen(str) + 1;
1708 }
1709
1710 if (ph->env.nr_sibling_dies) {
1711 if (ph->env.cpu != NULL) {
1712 for (i = 0; i < cpu_nr; i++)
1713 fprintf(fp, "# CPU %d: Core ID %d, "
1714 "Die ID %d, Socket ID %d\n",
1715 i, ph->env.cpu[i].core_id,
1716 ph->env.cpu[i].die_id,
1717 ph->env.cpu[i].socket_id);
1718 } else
1719 fprintf(fp, "# Core ID, Die ID and Socket ID "
1720 "information is not available\n");
1721 } else {
1722 if (ph->env.cpu != NULL) {
1723 for (i = 0; i < cpu_nr; i++)
1724 fprintf(fp, "# CPU %d: Core ID %d, "
1725 "Socket ID %d\n",
1726 i, ph->env.cpu[i].core_id,
1727 ph->env.cpu[i].socket_id);
1728 } else
1729 fprintf(fp, "# Core ID and Socket ID "
1730 "information is not available\n");
1731 }
1732 }
1733
print_clockid(struct feat_fd * ff,FILE * fp)1734 static void print_clockid(struct feat_fd *ff, FILE *fp)
1735 {
1736 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1737 ff->ph->env.clock.clockid_res_ns * 1000);
1738 }
1739
print_clock_data(struct feat_fd * ff,FILE * fp)1740 static void print_clock_data(struct feat_fd *ff, FILE *fp)
1741 {
1742 struct timespec clockid_ns;
1743 char tstr[64], date[64];
1744 struct timeval tod_ns;
1745 clockid_t clockid;
1746 struct tm ltime;
1747 u64 ref;
1748
1749 if (!ff->ph->env.clock.enabled) {
1750 fprintf(fp, "# reference time disabled\n");
1751 return;
1752 }
1753
1754 /* Compute TOD time. */
1755 ref = ff->ph->env.clock.tod_ns;
1756 tod_ns.tv_sec = ref / NSEC_PER_SEC;
1757 ref -= tod_ns.tv_sec * NSEC_PER_SEC;
1758 tod_ns.tv_usec = ref / NSEC_PER_USEC;
1759
1760 /* Compute clockid time. */
1761 ref = ff->ph->env.clock.clockid_ns;
1762 clockid_ns.tv_sec = ref / NSEC_PER_SEC;
1763 ref -= clockid_ns.tv_sec * NSEC_PER_SEC;
1764 clockid_ns.tv_nsec = ref;
1765
1766 clockid = ff->ph->env.clock.clockid;
1767
1768 if (localtime_r(&tod_ns.tv_sec, <ime) == NULL)
1769 snprintf(tstr, sizeof(tstr), "<error>");
1770 else {
1771 strftime(date, sizeof(date), "%F %T", <ime);
1772 scnprintf(tstr, sizeof(tstr), "%s.%06d",
1773 date, (int) tod_ns.tv_usec);
1774 }
1775
1776 fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid);
1777 fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n",
1778 tstr, (long) tod_ns.tv_sec, (int) tod_ns.tv_usec,
1779 (long) clockid_ns.tv_sec, clockid_ns.tv_nsec,
1780 clockid_name(clockid));
1781 }
1782
print_hybrid_topology(struct feat_fd * ff,FILE * fp)1783 static void print_hybrid_topology(struct feat_fd *ff, FILE *fp)
1784 {
1785 int i;
1786 struct hybrid_node *n;
1787
1788 fprintf(fp, "# hybrid cpu system:\n");
1789 for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) {
1790 n = &ff->ph->env.hybrid_nodes[i];
1791 fprintf(fp, "# %s cpu list : %s\n", n->pmu_name, n->cpus);
1792 }
1793 }
1794
print_dir_format(struct feat_fd * ff,FILE * fp)1795 static void print_dir_format(struct feat_fd *ff, FILE *fp)
1796 {
1797 struct perf_session *session;
1798 struct perf_data *data;
1799
1800 session = container_of(ff->ph, struct perf_session, header);
1801 data = session->data;
1802
1803 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1804 }
1805
1806 #ifdef HAVE_LIBBPF_SUPPORT
print_bpf_prog_info(struct feat_fd * ff,FILE * fp)1807 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1808 {
1809 struct perf_env *env = &ff->ph->env;
1810 struct rb_root *root;
1811 struct rb_node *next;
1812
1813 down_read(&env->bpf_progs.lock);
1814
1815 root = &env->bpf_progs.infos;
1816 next = rb_first(root);
1817
1818 if (!next)
1819 printf("# bpf_prog_info empty\n");
1820
1821 while (next) {
1822 struct bpf_prog_info_node *node;
1823
1824 node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1825 next = rb_next(&node->rb_node);
1826
1827 __bpf_event__print_bpf_prog_info(&node->info_linear->info,
1828 env, fp);
1829 }
1830
1831 up_read(&env->bpf_progs.lock);
1832 }
1833
print_bpf_btf(struct feat_fd * ff,FILE * fp)1834 static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1835 {
1836 struct perf_env *env = &ff->ph->env;
1837 struct rb_root *root;
1838 struct rb_node *next;
1839
1840 down_read(&env->bpf_progs.lock);
1841
1842 root = &env->bpf_progs.btfs;
1843 next = rb_first(root);
1844
1845 if (!next)
1846 printf("# btf info empty\n");
1847
1848 while (next) {
1849 struct btf_node *node;
1850
1851 node = rb_entry(next, struct btf_node, rb_node);
1852 next = rb_next(&node->rb_node);
1853 fprintf(fp, "# btf info of id %u\n", node->id);
1854 }
1855
1856 up_read(&env->bpf_progs.lock);
1857 }
1858 #endif // HAVE_LIBBPF_SUPPORT
1859
free_event_desc(struct evsel * events)1860 static void free_event_desc(struct evsel *events)
1861 {
1862 struct evsel *evsel;
1863
1864 if (!events)
1865 return;
1866
1867 for (evsel = events; evsel->core.attr.size; evsel++) {
1868 zfree(&evsel->name);
1869 zfree(&evsel->core.id);
1870 }
1871
1872 free(events);
1873 }
1874
perf_attr_check(struct perf_event_attr * attr)1875 static bool perf_attr_check(struct perf_event_attr *attr)
1876 {
1877 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) {
1878 pr_warning("Reserved bits are set unexpectedly. "
1879 "Please update perf tool.\n");
1880 return false;
1881 }
1882
1883 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) {
1884 pr_warning("Unknown sample type (0x%llx) is detected. "
1885 "Please update perf tool.\n",
1886 attr->sample_type);
1887 return false;
1888 }
1889
1890 if (attr->read_format & ~(PERF_FORMAT_MAX-1)) {
1891 pr_warning("Unknown read format (0x%llx) is detected. "
1892 "Please update perf tool.\n",
1893 attr->read_format);
1894 return false;
1895 }
1896
1897 if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) &&
1898 (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) {
1899 pr_warning("Unknown branch sample type (0x%llx) is detected. "
1900 "Please update perf tool.\n",
1901 attr->branch_sample_type);
1902
1903 return false;
1904 }
1905
1906 return true;
1907 }
1908
read_event_desc(struct feat_fd * ff)1909 static struct evsel *read_event_desc(struct feat_fd *ff)
1910 {
1911 struct evsel *evsel, *events = NULL;
1912 u64 *id;
1913 void *buf = NULL;
1914 u32 nre, sz, nr, i, j;
1915 size_t msz;
1916
1917 /* number of events */
1918 if (do_read_u32(ff, &nre))
1919 goto error;
1920
1921 if (do_read_u32(ff, &sz))
1922 goto error;
1923
1924 /* buffer to hold on file attr struct */
1925 buf = malloc(sz);
1926 if (!buf)
1927 goto error;
1928
1929 /* the last event terminates with evsel->core.attr.size == 0: */
1930 events = calloc(nre + 1, sizeof(*events));
1931 if (!events)
1932 goto error;
1933
1934 msz = sizeof(evsel->core.attr);
1935 if (sz < msz)
1936 msz = sz;
1937
1938 for (i = 0, evsel = events; i < nre; evsel++, i++) {
1939 evsel->core.idx = i;
1940
1941 /*
1942 * must read entire on-file attr struct to
1943 * sync up with layout.
1944 */
1945 if (__do_read(ff, buf, sz))
1946 goto error;
1947
1948 if (ff->ph->needs_swap)
1949 perf_event__attr_swap(buf);
1950
1951 memcpy(&evsel->core.attr, buf, msz);
1952
1953 if (!perf_attr_check(&evsel->core.attr))
1954 goto error;
1955
1956 if (do_read_u32(ff, &nr))
1957 goto error;
1958
1959 if (ff->ph->needs_swap)
1960 evsel->needs_swap = true;
1961
1962 evsel->name = do_read_string(ff);
1963 if (!evsel->name)
1964 goto error;
1965
1966 if (!nr)
1967 continue;
1968
1969 id = calloc(nr, sizeof(*id));
1970 if (!id)
1971 goto error;
1972 evsel->core.ids = nr;
1973 evsel->core.id = id;
1974
1975 for (j = 0 ; j < nr; j++) {
1976 if (do_read_u64(ff, id))
1977 goto error;
1978 id++;
1979 }
1980 }
1981 out:
1982 free(buf);
1983 return events;
1984 error:
1985 free_event_desc(events);
1986 events = NULL;
1987 goto out;
1988 }
1989
__desc_attr__fprintf(FILE * fp,const char * name,const char * val,void * priv __maybe_unused)1990 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1991 void *priv __maybe_unused)
1992 {
1993 return fprintf(fp, ", %s = %s", name, val);
1994 }
1995
print_event_desc(struct feat_fd * ff,FILE * fp)1996 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1997 {
1998 struct evsel *evsel, *events;
1999 u32 j;
2000 u64 *id;
2001
2002 if (ff->events)
2003 events = ff->events;
2004 else
2005 events = read_event_desc(ff);
2006
2007 if (!events) {
2008 fprintf(fp, "# event desc: not available or unable to read\n");
2009 return;
2010 }
2011
2012 for (evsel = events; evsel->core.attr.size; evsel++) {
2013 fprintf(fp, "# event : name = %s, ", evsel->name);
2014
2015 if (evsel->core.ids) {
2016 fprintf(fp, ", id = {");
2017 for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
2018 if (j)
2019 fputc(',', fp);
2020 fprintf(fp, " %"PRIu64, *id);
2021 }
2022 fprintf(fp, " }");
2023 }
2024
2025 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
2026
2027 fputc('\n', fp);
2028 }
2029
2030 free_event_desc(events);
2031 ff->events = NULL;
2032 }
2033
print_total_mem(struct feat_fd * ff,FILE * fp)2034 static void print_total_mem(struct feat_fd *ff, FILE *fp)
2035 {
2036 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
2037 }
2038
print_numa_topology(struct feat_fd * ff,FILE * fp)2039 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
2040 {
2041 int i;
2042 struct numa_node *n;
2043
2044 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
2045 n = &ff->ph->env.numa_nodes[i];
2046
2047 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB,"
2048 " free = %"PRIu64" kB\n",
2049 n->node, n->mem_total, n->mem_free);
2050
2051 fprintf(fp, "# node%u cpu list : ", n->node);
2052 cpu_map__fprintf(n->map, fp);
2053 }
2054 }
2055
print_cpuid(struct feat_fd * ff,FILE * fp)2056 static void print_cpuid(struct feat_fd *ff, FILE *fp)
2057 {
2058 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
2059 }
2060
print_branch_stack(struct feat_fd * ff __maybe_unused,FILE * fp)2061 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
2062 {
2063 fprintf(fp, "# contains samples with branch stack\n");
2064 }
2065
print_auxtrace(struct feat_fd * ff __maybe_unused,FILE * fp)2066 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
2067 {
2068 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
2069 }
2070
print_stat(struct feat_fd * ff __maybe_unused,FILE * fp)2071 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
2072 {
2073 fprintf(fp, "# contains stat data\n");
2074 }
2075
print_cache(struct feat_fd * ff,FILE * fp __maybe_unused)2076 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
2077 {
2078 int i;
2079
2080 fprintf(fp, "# CPU cache info:\n");
2081 for (i = 0; i < ff->ph->env.caches_cnt; i++) {
2082 fprintf(fp, "# ");
2083 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
2084 }
2085 }
2086
print_compressed(struct feat_fd * ff,FILE * fp)2087 static void print_compressed(struct feat_fd *ff, FILE *fp)
2088 {
2089 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
2090 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
2091 ff->ph->env.comp_level, ff->ph->env.comp_ratio);
2092 }
2093
__print_pmu_caps(FILE * fp,int nr_caps,char ** caps,char * pmu_name)2094 static void __print_pmu_caps(FILE *fp, int nr_caps, char **caps, char *pmu_name)
2095 {
2096 const char *delimiter = "";
2097 int i;
2098
2099 if (!nr_caps) {
2100 fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
2101 return;
2102 }
2103
2104 fprintf(fp, "# %s pmu capabilities: ", pmu_name);
2105 for (i = 0; i < nr_caps; i++) {
2106 fprintf(fp, "%s%s", delimiter, caps[i]);
2107 delimiter = ", ";
2108 }
2109
2110 fprintf(fp, "\n");
2111 }
2112
print_cpu_pmu_caps(struct feat_fd * ff,FILE * fp)2113 static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
2114 {
2115 __print_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps,
2116 ff->ph->env.cpu_pmu_caps, (char *)"cpu");
2117 }
2118
print_pmu_caps(struct feat_fd * ff,FILE * fp)2119 static void print_pmu_caps(struct feat_fd *ff, FILE *fp)
2120 {
2121 struct perf_env *env = &ff->ph->env;
2122 struct pmu_caps *pmu_caps;
2123
2124 for (int i = 0; i < env->nr_pmus_with_caps; i++) {
2125 pmu_caps = &env->pmu_caps[i];
2126 __print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps,
2127 pmu_caps->pmu_name);
2128 }
2129
2130 if (strcmp(perf_env__arch(env), "x86") == 0 &&
2131 perf_env__has_pmu_mapping(env, "ibs_op")) {
2132 char *max_precise = perf_env__find_pmu_cap(env, "cpu", "max_precise");
2133
2134 if (max_precise != NULL && atoi(max_precise) == 0)
2135 fprintf(fp, "# AMD systems uses ibs_op// PMU for some precise events, e.g.: cycles:p, see the 'perf list' man page for further details.\n");
2136 }
2137 }
2138
print_pmu_mappings(struct feat_fd * ff,FILE * fp)2139 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
2140 {
2141 struct perf_env *env = &ff->ph->env;
2142 const char *delimiter = "# pmu mappings: ";
2143 char *str, *tmp;
2144 u32 pmu_num;
2145 u32 type;
2146
2147 pmu_num = env->nr_pmu_mappings;
2148 if (!pmu_num) {
2149 fprintf(fp, "# pmu mappings: not available\n");
2150 return;
2151 }
2152
2153 str = env->pmu_mappings;
2154
2155 while (pmu_num) {
2156 type = strtoul(str, &tmp, 0);
2157 if (*tmp != ':')
2158 goto error;
2159
2160 str = tmp + 1;
2161 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
2162
2163 delimiter = ", ";
2164 str += strlen(str) + 1;
2165 pmu_num--;
2166 }
2167
2168 fprintf(fp, "\n");
2169
2170 if (!pmu_num)
2171 return;
2172 error:
2173 fprintf(fp, "# pmu mappings: unable to read\n");
2174 }
2175
print_group_desc(struct feat_fd * ff,FILE * fp)2176 static void print_group_desc(struct feat_fd *ff, FILE *fp)
2177 {
2178 struct perf_session *session;
2179 struct evsel *evsel;
2180 u32 nr = 0;
2181
2182 session = container_of(ff->ph, struct perf_session, header);
2183
2184 evlist__for_each_entry(session->evlist, evsel) {
2185 if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
2186 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", evsel__name(evsel));
2187
2188 nr = evsel->core.nr_members - 1;
2189 } else if (nr) {
2190 fprintf(fp, ",%s", evsel__name(evsel));
2191
2192 if (--nr == 0)
2193 fprintf(fp, "}\n");
2194 }
2195 }
2196 }
2197
print_sample_time(struct feat_fd * ff,FILE * fp)2198 static void print_sample_time(struct feat_fd *ff, FILE *fp)
2199 {
2200 struct perf_session *session;
2201 char time_buf[32];
2202 double d;
2203
2204 session = container_of(ff->ph, struct perf_session, header);
2205
2206 timestamp__scnprintf_usec(session->evlist->first_sample_time,
2207 time_buf, sizeof(time_buf));
2208 fprintf(fp, "# time of first sample : %s\n", time_buf);
2209
2210 timestamp__scnprintf_usec(session->evlist->last_sample_time,
2211 time_buf, sizeof(time_buf));
2212 fprintf(fp, "# time of last sample : %s\n", time_buf);
2213
2214 d = (double)(session->evlist->last_sample_time -
2215 session->evlist->first_sample_time) / NSEC_PER_MSEC;
2216
2217 fprintf(fp, "# sample duration : %10.3f ms\n", d);
2218 }
2219
memory_node__fprintf(struct memory_node * n,unsigned long long bsize,FILE * fp)2220 static void memory_node__fprintf(struct memory_node *n,
2221 unsigned long long bsize, FILE *fp)
2222 {
2223 char buf_map[100], buf_size[50];
2224 unsigned long long size;
2225
2226 size = bsize * bitmap_weight(n->set, n->size);
2227 unit_number__scnprintf(buf_size, 50, size);
2228
2229 bitmap_scnprintf(n->set, n->size, buf_map, 100);
2230 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
2231 }
2232
print_mem_topology(struct feat_fd * ff,FILE * fp)2233 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
2234 {
2235 struct perf_env *env = &ff->ph->env;
2236 struct memory_node *nodes;
2237 int i, nr;
2238
2239 nodes = env->memory_nodes;
2240 nr = env->nr_memory_nodes;
2241
2242 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
2243 nr, env->memory_bsize);
2244
2245 for (i = 0; i < nr; i++) {
2246 memory_node__fprintf(&nodes[i], env->memory_bsize, fp);
2247 }
2248 }
2249
__event_process_build_id(struct perf_record_header_build_id * bev,char * filename,struct perf_session * session)2250 static int __event_process_build_id(struct perf_record_header_build_id *bev,
2251 char *filename,
2252 struct perf_session *session)
2253 {
2254 int err = -1;
2255 struct machine *machine;
2256 u16 cpumode;
2257 struct dso *dso;
2258 enum dso_space_type dso_space;
2259
2260 machine = perf_session__findnew_machine(session, bev->pid);
2261 if (!machine)
2262 goto out;
2263
2264 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2265
2266 switch (cpumode) {
2267 case PERF_RECORD_MISC_KERNEL:
2268 dso_space = DSO_SPACE__KERNEL;
2269 break;
2270 case PERF_RECORD_MISC_GUEST_KERNEL:
2271 dso_space = DSO_SPACE__KERNEL_GUEST;
2272 break;
2273 case PERF_RECORD_MISC_USER:
2274 case PERF_RECORD_MISC_GUEST_USER:
2275 dso_space = DSO_SPACE__USER;
2276 break;
2277 default:
2278 goto out;
2279 }
2280
2281 dso = machine__findnew_dso(machine, filename);
2282 if (dso != NULL) {
2283 char sbuild_id[SBUILD_ID_SIZE];
2284 struct build_id bid;
2285 size_t size = BUILD_ID_SIZE;
2286
2287 if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE)
2288 size = bev->size;
2289
2290 build_id__init(&bid, bev->data, size);
2291 dso__set_build_id(dso, &bid);
2292 dso__set_header_build_id(dso, true);
2293
2294 if (dso_space != DSO_SPACE__USER) {
2295 struct kmod_path m = { .name = NULL, };
2296
2297 if (!kmod_path__parse_name(&m, filename) && m.kmod)
2298 dso__set_module_info(dso, &m, machine);
2299
2300 dso__set_kernel(dso, dso_space);
2301 free(m.name);
2302 }
2303
2304 build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
2305 pr_debug("build id event received for %s: %s [%zu]\n",
2306 dso__long_name(dso), sbuild_id, size);
2307 dso__put(dso);
2308 }
2309
2310 err = 0;
2311 out:
2312 return err;
2313 }
2314
perf_header__read_build_ids_abi_quirk(struct perf_header * header,int input,u64 offset,u64 size)2315 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
2316 int input, u64 offset, u64 size)
2317 {
2318 struct perf_session *session = container_of(header, struct perf_session, header);
2319 struct {
2320 struct perf_event_header header;
2321 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
2322 char filename[0];
2323 } old_bev;
2324 struct perf_record_header_build_id bev;
2325 char filename[PATH_MAX];
2326 u64 limit = offset + size;
2327
2328 while (offset < limit) {
2329 ssize_t len;
2330
2331 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
2332 return -1;
2333
2334 if (header->needs_swap)
2335 perf_event_header__bswap(&old_bev.header);
2336
2337 len = old_bev.header.size - sizeof(old_bev);
2338 if (readn(input, filename, len) != len)
2339 return -1;
2340
2341 bev.header = old_bev.header;
2342
2343 /*
2344 * As the pid is the missing value, we need to fill
2345 * it properly. The header.misc value give us nice hint.
2346 */
2347 bev.pid = HOST_KERNEL_ID;
2348 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
2349 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
2350 bev.pid = DEFAULT_GUEST_KERNEL_ID;
2351
2352 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
2353 __event_process_build_id(&bev, filename, session);
2354
2355 offset += bev.header.size;
2356 }
2357
2358 return 0;
2359 }
2360
perf_header__read_build_ids(struct perf_header * header,int input,u64 offset,u64 size)2361 static int perf_header__read_build_ids(struct perf_header *header,
2362 int input, u64 offset, u64 size)
2363 {
2364 struct perf_session *session = container_of(header, struct perf_session, header);
2365 struct perf_record_header_build_id bev;
2366 char filename[PATH_MAX];
2367 u64 limit = offset + size, orig_offset = offset;
2368 int err = -1;
2369
2370 while (offset < limit) {
2371 ssize_t len;
2372
2373 if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
2374 goto out;
2375
2376 if (header->needs_swap)
2377 perf_event_header__bswap(&bev.header);
2378
2379 len = bev.header.size - sizeof(bev);
2380 if (readn(input, filename, len) != len)
2381 goto out;
2382 /*
2383 * The a1645ce1 changeset:
2384 *
2385 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2386 *
2387 * Added a field to struct perf_record_header_build_id that broke the file
2388 * format.
2389 *
2390 * Since the kernel build-id is the first entry, process the
2391 * table using the old format if the well known
2392 * '[kernel.kallsyms]' string for the kernel build-id has the
2393 * first 4 characters chopped off (where the pid_t sits).
2394 */
2395 if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2396 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2397 return -1;
2398 return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2399 }
2400
2401 __event_process_build_id(&bev, filename, session);
2402
2403 offset += bev.header.size;
2404 }
2405 err = 0;
2406 out:
2407 return err;
2408 }
2409
2410 /* Macro for features that simply need to read and store a string. */
2411 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2412 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2413 {\
2414 free(ff->ph->env.__feat_env); \
2415 ff->ph->env.__feat_env = do_read_string(ff); \
2416 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2417 }
2418
2419 FEAT_PROCESS_STR_FUN(hostname, hostname);
2420 FEAT_PROCESS_STR_FUN(osrelease, os_release);
2421 FEAT_PROCESS_STR_FUN(version, version);
2422 FEAT_PROCESS_STR_FUN(arch, arch);
2423 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2424 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2425
2426 #ifdef HAVE_LIBTRACEEVENT
process_tracing_data(struct feat_fd * ff,void * data)2427 static int process_tracing_data(struct feat_fd *ff, void *data)
2428 {
2429 ssize_t ret = trace_report(ff->fd, data, false);
2430
2431 return ret < 0 ? -1 : 0;
2432 }
2433 #endif
2434
process_build_id(struct feat_fd * ff,void * data __maybe_unused)2435 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2436 {
2437 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2438 pr_debug("Failed to read buildids, continuing...\n");
2439 return 0;
2440 }
2441
process_nrcpus(struct feat_fd * ff,void * data __maybe_unused)2442 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2443 {
2444 struct perf_env *env = &ff->ph->env;
2445 int ret;
2446 u32 nr_cpus_avail, nr_cpus_online;
2447
2448 ret = do_read_u32(ff, &nr_cpus_avail);
2449 if (ret)
2450 return ret;
2451
2452 ret = do_read_u32(ff, &nr_cpus_online);
2453 if (ret)
2454 return ret;
2455 env->nr_cpus_avail = (int)nr_cpus_avail;
2456 env->nr_cpus_online = (int)nr_cpus_online;
2457 return 0;
2458 }
2459
process_total_mem(struct feat_fd * ff,void * data __maybe_unused)2460 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2461 {
2462 struct perf_env *env = &ff->ph->env;
2463 u64 total_mem;
2464 int ret;
2465
2466 ret = do_read_u64(ff, &total_mem);
2467 if (ret)
2468 return -1;
2469 env->total_mem = (unsigned long long)total_mem;
2470 return 0;
2471 }
2472
evlist__find_by_index(struct evlist * evlist,int idx)2473 static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx)
2474 {
2475 struct evsel *evsel;
2476
2477 evlist__for_each_entry(evlist, evsel) {
2478 if (evsel->core.idx == idx)
2479 return evsel;
2480 }
2481
2482 return NULL;
2483 }
2484
evlist__set_event_name(struct evlist * evlist,struct evsel * event)2485 static void evlist__set_event_name(struct evlist *evlist, struct evsel *event)
2486 {
2487 struct evsel *evsel;
2488
2489 if (!event->name)
2490 return;
2491
2492 evsel = evlist__find_by_index(evlist, event->core.idx);
2493 if (!evsel)
2494 return;
2495
2496 if (evsel->name)
2497 return;
2498
2499 evsel->name = strdup(event->name);
2500 }
2501
2502 static int
process_event_desc(struct feat_fd * ff,void * data __maybe_unused)2503 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2504 {
2505 struct perf_session *session;
2506 struct evsel *evsel, *events = read_event_desc(ff);
2507
2508 if (!events)
2509 return 0;
2510
2511 session = container_of(ff->ph, struct perf_session, header);
2512
2513 if (session->data->is_pipe) {
2514 /* Save events for reading later by print_event_desc,
2515 * since they can't be read again in pipe mode. */
2516 ff->events = events;
2517 }
2518
2519 for (evsel = events; evsel->core.attr.size; evsel++)
2520 evlist__set_event_name(session->evlist, evsel);
2521
2522 if (!session->data->is_pipe)
2523 free_event_desc(events);
2524
2525 return 0;
2526 }
2527
process_cmdline(struct feat_fd * ff,void * data __maybe_unused)2528 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2529 {
2530 struct perf_env *env = &ff->ph->env;
2531 char *str, *cmdline = NULL, **argv = NULL;
2532 u32 nr, i, len = 0;
2533
2534 if (do_read_u32(ff, &nr))
2535 return -1;
2536
2537 env->nr_cmdline = nr;
2538
2539 cmdline = zalloc(ff->size + nr + 1);
2540 if (!cmdline)
2541 return -1;
2542
2543 argv = zalloc(sizeof(char *) * (nr + 1));
2544 if (!argv)
2545 goto error;
2546
2547 for (i = 0; i < nr; i++) {
2548 str = do_read_string(ff);
2549 if (!str)
2550 goto error;
2551
2552 argv[i] = cmdline + len;
2553 memcpy(argv[i], str, strlen(str) + 1);
2554 len += strlen(str) + 1;
2555 free(str);
2556 }
2557 env->cmdline = cmdline;
2558 env->cmdline_argv = (const char **) argv;
2559 return 0;
2560
2561 error:
2562 free(argv);
2563 free(cmdline);
2564 return -1;
2565 }
2566
process_cpu_topology(struct feat_fd * ff,void * data __maybe_unused)2567 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2568 {
2569 u32 nr, i;
2570 char *str = NULL;
2571 struct strbuf sb;
2572 struct perf_env *env = &ff->ph->env;
2573 int cpu_nr = env->nr_cpus_avail;
2574 u64 size = 0;
2575
2576 env->cpu = calloc(cpu_nr, sizeof(*env->cpu));
2577 if (!env->cpu)
2578 return -1;
2579
2580 if (do_read_u32(ff, &nr))
2581 goto free_cpu;
2582
2583 env->nr_sibling_cores = nr;
2584 size += sizeof(u32);
2585 if (strbuf_init(&sb, 128) < 0)
2586 goto free_cpu;
2587
2588 for (i = 0; i < nr; i++) {
2589 str = do_read_string(ff);
2590 if (!str)
2591 goto error;
2592
2593 /* include a NULL character at the end */
2594 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2595 goto error;
2596 size += string_size(str);
2597 zfree(&str);
2598 }
2599 env->sibling_cores = strbuf_detach(&sb, NULL);
2600
2601 if (do_read_u32(ff, &nr))
2602 return -1;
2603
2604 env->nr_sibling_threads = nr;
2605 size += sizeof(u32);
2606
2607 for (i = 0; i < nr; i++) {
2608 str = do_read_string(ff);
2609 if (!str)
2610 goto error;
2611
2612 /* include a NULL character at the end */
2613 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2614 goto error;
2615 size += string_size(str);
2616 zfree(&str);
2617 }
2618 env->sibling_threads = strbuf_detach(&sb, NULL);
2619
2620 /*
2621 * The header may be from old perf,
2622 * which doesn't include core id and socket id information.
2623 */
2624 if (ff->size <= size) {
2625 zfree(&env->cpu);
2626 return 0;
2627 }
2628
2629 for (i = 0; i < (u32)cpu_nr; i++) {
2630 if (do_read_u32(ff, &nr))
2631 goto free_cpu;
2632
2633 env->cpu[i].core_id = nr;
2634 size += sizeof(u32);
2635
2636 if (do_read_u32(ff, &nr))
2637 goto free_cpu;
2638
2639 env->cpu[i].socket_id = nr;
2640 size += sizeof(u32);
2641 }
2642
2643 /*
2644 * The header may be from old perf,
2645 * which doesn't include die information.
2646 */
2647 if (ff->size <= size)
2648 return 0;
2649
2650 if (do_read_u32(ff, &nr))
2651 return -1;
2652
2653 env->nr_sibling_dies = nr;
2654 size += sizeof(u32);
2655
2656 for (i = 0; i < nr; i++) {
2657 str = do_read_string(ff);
2658 if (!str)
2659 goto error;
2660
2661 /* include a NULL character at the end */
2662 if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2663 goto error;
2664 size += string_size(str);
2665 zfree(&str);
2666 }
2667 env->sibling_dies = strbuf_detach(&sb, NULL);
2668
2669 for (i = 0; i < (u32)cpu_nr; i++) {
2670 if (do_read_u32(ff, &nr))
2671 goto free_cpu;
2672
2673 env->cpu[i].die_id = nr;
2674 }
2675
2676 return 0;
2677
2678 error:
2679 strbuf_release(&sb);
2680 zfree(&str);
2681 free_cpu:
2682 zfree(&env->cpu);
2683 return -1;
2684 }
2685
process_numa_topology(struct feat_fd * ff,void * data __maybe_unused)2686 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2687 {
2688 struct perf_env *env = &ff->ph->env;
2689 struct numa_node *nodes, *n;
2690 u32 nr, i;
2691 char *str;
2692
2693 /* nr nodes */
2694 if (do_read_u32(ff, &nr))
2695 return -1;
2696
2697 nodes = zalloc(sizeof(*nodes) * nr);
2698 if (!nodes)
2699 return -ENOMEM;
2700
2701 for (i = 0; i < nr; i++) {
2702 n = &nodes[i];
2703
2704 /* node number */
2705 if (do_read_u32(ff, &n->node))
2706 goto error;
2707
2708 if (do_read_u64(ff, &n->mem_total))
2709 goto error;
2710
2711 if (do_read_u64(ff, &n->mem_free))
2712 goto error;
2713
2714 str = do_read_string(ff);
2715 if (!str)
2716 goto error;
2717
2718 n->map = perf_cpu_map__new(str);
2719 free(str);
2720 if (!n->map)
2721 goto error;
2722 }
2723 env->nr_numa_nodes = nr;
2724 env->numa_nodes = nodes;
2725 return 0;
2726
2727 error:
2728 free(nodes);
2729 return -1;
2730 }
2731
process_pmu_mappings(struct feat_fd * ff,void * data __maybe_unused)2732 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2733 {
2734 struct perf_env *env = &ff->ph->env;
2735 char *name;
2736 u32 pmu_num;
2737 u32 type;
2738 struct strbuf sb;
2739
2740 if (do_read_u32(ff, &pmu_num))
2741 return -1;
2742
2743 if (!pmu_num) {
2744 pr_debug("pmu mappings not available\n");
2745 return 0;
2746 }
2747
2748 env->nr_pmu_mappings = pmu_num;
2749 if (strbuf_init(&sb, 128) < 0)
2750 return -1;
2751
2752 while (pmu_num) {
2753 if (do_read_u32(ff, &type))
2754 goto error;
2755
2756 name = do_read_string(ff);
2757 if (!name)
2758 goto error;
2759
2760 if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2761 goto error;
2762 /* include a NULL character at the end */
2763 if (strbuf_add(&sb, "", 1) < 0)
2764 goto error;
2765
2766 if (!strcmp(name, "msr"))
2767 env->msr_pmu_type = type;
2768
2769 free(name);
2770 pmu_num--;
2771 }
2772 /* AMD may set it by evlist__has_amd_ibs() from perf_session__new() */
2773 free(env->pmu_mappings);
2774 env->pmu_mappings = strbuf_detach(&sb, NULL);
2775 return 0;
2776
2777 error:
2778 strbuf_release(&sb);
2779 return -1;
2780 }
2781
process_group_desc(struct feat_fd * ff,void * data __maybe_unused)2782 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2783 {
2784 struct perf_env *env = &ff->ph->env;
2785 size_t ret = -1;
2786 u32 i, nr, nr_groups;
2787 struct perf_session *session;
2788 struct evsel *evsel, *leader = NULL;
2789 struct group_desc {
2790 char *name;
2791 u32 leader_idx;
2792 u32 nr_members;
2793 } *desc;
2794
2795 if (do_read_u32(ff, &nr_groups))
2796 return -1;
2797
2798 env->nr_groups = nr_groups;
2799 if (!nr_groups) {
2800 pr_debug("group desc not available\n");
2801 return 0;
2802 }
2803
2804 desc = calloc(nr_groups, sizeof(*desc));
2805 if (!desc)
2806 return -1;
2807
2808 for (i = 0; i < nr_groups; i++) {
2809 desc[i].name = do_read_string(ff);
2810 if (!desc[i].name)
2811 goto out_free;
2812
2813 if (do_read_u32(ff, &desc[i].leader_idx))
2814 goto out_free;
2815
2816 if (do_read_u32(ff, &desc[i].nr_members))
2817 goto out_free;
2818 }
2819
2820 /*
2821 * Rebuild group relationship based on the group_desc
2822 */
2823 session = container_of(ff->ph, struct perf_session, header);
2824
2825 i = nr = 0;
2826 evlist__for_each_entry(session->evlist, evsel) {
2827 if (i < nr_groups && evsel->core.idx == (int) desc[i].leader_idx) {
2828 evsel__set_leader(evsel, evsel);
2829 /* {anon_group} is a dummy name */
2830 if (strcmp(desc[i].name, "{anon_group}")) {
2831 evsel->group_name = desc[i].name;
2832 desc[i].name = NULL;
2833 }
2834 evsel->core.nr_members = desc[i].nr_members;
2835
2836 if (i >= nr_groups || nr > 0) {
2837 pr_debug("invalid group desc\n");
2838 goto out_free;
2839 }
2840
2841 leader = evsel;
2842 nr = evsel->core.nr_members - 1;
2843 i++;
2844 } else if (nr) {
2845 /* This is a group member */
2846 evsel__set_leader(evsel, leader);
2847
2848 nr--;
2849 }
2850 }
2851
2852 if (i != nr_groups || nr != 0) {
2853 pr_debug("invalid group desc\n");
2854 goto out_free;
2855 }
2856
2857 ret = 0;
2858 out_free:
2859 for (i = 0; i < nr_groups; i++)
2860 zfree(&desc[i].name);
2861 free(desc);
2862
2863 return ret;
2864 }
2865
process_auxtrace(struct feat_fd * ff,void * data __maybe_unused)2866 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2867 {
2868 struct perf_session *session;
2869 int err;
2870
2871 session = container_of(ff->ph, struct perf_session, header);
2872
2873 err = auxtrace_index__process(ff->fd, ff->size, session,
2874 ff->ph->needs_swap);
2875 if (err < 0)
2876 pr_err("Failed to process auxtrace index\n");
2877 return err;
2878 }
2879
process_cache(struct feat_fd * ff,void * data __maybe_unused)2880 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2881 {
2882 struct perf_env *env = &ff->ph->env;
2883 struct cpu_cache_level *caches;
2884 u32 cnt, i, version;
2885
2886 if (do_read_u32(ff, &version))
2887 return -1;
2888
2889 if (version != 1)
2890 return -1;
2891
2892 if (do_read_u32(ff, &cnt))
2893 return -1;
2894
2895 caches = zalloc(sizeof(*caches) * cnt);
2896 if (!caches)
2897 return -1;
2898
2899 for (i = 0; i < cnt; i++) {
2900 struct cpu_cache_level *c = &caches[i];
2901
2902 #define _R(v) \
2903 if (do_read_u32(ff, &c->v)) \
2904 goto out_free_caches; \
2905
2906 _R(level)
2907 _R(line_size)
2908 _R(sets)
2909 _R(ways)
2910 #undef _R
2911
2912 #define _R(v) \
2913 c->v = do_read_string(ff); \
2914 if (!c->v) \
2915 goto out_free_caches; \
2916
2917 _R(type)
2918 _R(size)
2919 _R(map)
2920 #undef _R
2921 }
2922
2923 env->caches = caches;
2924 env->caches_cnt = cnt;
2925 return 0;
2926 out_free_caches:
2927 for (i = 0; i < cnt; i++) {
2928 free(caches[i].type);
2929 free(caches[i].size);
2930 free(caches[i].map);
2931 }
2932 free(caches);
2933 return -1;
2934 }
2935
process_sample_time(struct feat_fd * ff,void * data __maybe_unused)2936 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2937 {
2938 struct perf_session *session;
2939 u64 first_sample_time, last_sample_time;
2940 int ret;
2941
2942 session = container_of(ff->ph, struct perf_session, header);
2943
2944 ret = do_read_u64(ff, &first_sample_time);
2945 if (ret)
2946 return -1;
2947
2948 ret = do_read_u64(ff, &last_sample_time);
2949 if (ret)
2950 return -1;
2951
2952 session->evlist->first_sample_time = first_sample_time;
2953 session->evlist->last_sample_time = last_sample_time;
2954 return 0;
2955 }
2956
process_mem_topology(struct feat_fd * ff,void * data __maybe_unused)2957 static int process_mem_topology(struct feat_fd *ff,
2958 void *data __maybe_unused)
2959 {
2960 struct perf_env *env = &ff->ph->env;
2961 struct memory_node *nodes;
2962 u64 version, i, nr, bsize;
2963 int ret = -1;
2964
2965 if (do_read_u64(ff, &version))
2966 return -1;
2967
2968 if (version != 1)
2969 return -1;
2970
2971 if (do_read_u64(ff, &bsize))
2972 return -1;
2973
2974 if (do_read_u64(ff, &nr))
2975 return -1;
2976
2977 nodes = zalloc(sizeof(*nodes) * nr);
2978 if (!nodes)
2979 return -1;
2980
2981 for (i = 0; i < nr; i++) {
2982 struct memory_node n;
2983
2984 #define _R(v) \
2985 if (do_read_u64(ff, &n.v)) \
2986 goto out; \
2987
2988 _R(node)
2989 _R(size)
2990
2991 #undef _R
2992
2993 if (do_read_bitmap(ff, &n.set, &n.size))
2994 goto out;
2995
2996 nodes[i] = n;
2997 }
2998
2999 env->memory_bsize = bsize;
3000 env->memory_nodes = nodes;
3001 env->nr_memory_nodes = nr;
3002 ret = 0;
3003
3004 out:
3005 if (ret)
3006 free(nodes);
3007 return ret;
3008 }
3009
process_clockid(struct feat_fd * ff,void * data __maybe_unused)3010 static int process_clockid(struct feat_fd *ff,
3011 void *data __maybe_unused)
3012 {
3013 struct perf_env *env = &ff->ph->env;
3014
3015 if (do_read_u64(ff, &env->clock.clockid_res_ns))
3016 return -1;
3017
3018 return 0;
3019 }
3020
process_clock_data(struct feat_fd * ff,void * _data __maybe_unused)3021 static int process_clock_data(struct feat_fd *ff,
3022 void *_data __maybe_unused)
3023 {
3024 struct perf_env *env = &ff->ph->env;
3025 u32 data32;
3026 u64 data64;
3027
3028 /* version */
3029 if (do_read_u32(ff, &data32))
3030 return -1;
3031
3032 if (data32 != 1)
3033 return -1;
3034
3035 /* clockid */
3036 if (do_read_u32(ff, &data32))
3037 return -1;
3038
3039 env->clock.clockid = data32;
3040
3041 /* TOD ref time */
3042 if (do_read_u64(ff, &data64))
3043 return -1;
3044
3045 env->clock.tod_ns = data64;
3046
3047 /* clockid ref time */
3048 if (do_read_u64(ff, &data64))
3049 return -1;
3050
3051 env->clock.clockid_ns = data64;
3052 env->clock.enabled = true;
3053 return 0;
3054 }
3055
process_hybrid_topology(struct feat_fd * ff,void * data __maybe_unused)3056 static int process_hybrid_topology(struct feat_fd *ff,
3057 void *data __maybe_unused)
3058 {
3059 struct perf_env *env = &ff->ph->env;
3060 struct hybrid_node *nodes, *n;
3061 u32 nr, i;
3062
3063 /* nr nodes */
3064 if (do_read_u32(ff, &nr))
3065 return -1;
3066
3067 nodes = zalloc(sizeof(*nodes) * nr);
3068 if (!nodes)
3069 return -ENOMEM;
3070
3071 for (i = 0; i < nr; i++) {
3072 n = &nodes[i];
3073
3074 n->pmu_name = do_read_string(ff);
3075 if (!n->pmu_name)
3076 goto error;
3077
3078 n->cpus = do_read_string(ff);
3079 if (!n->cpus)
3080 goto error;
3081 }
3082
3083 env->nr_hybrid_nodes = nr;
3084 env->hybrid_nodes = nodes;
3085 return 0;
3086
3087 error:
3088 for (i = 0; i < nr; i++) {
3089 free(nodes[i].pmu_name);
3090 free(nodes[i].cpus);
3091 }
3092
3093 free(nodes);
3094 return -1;
3095 }
3096
process_dir_format(struct feat_fd * ff,void * _data __maybe_unused)3097 static int process_dir_format(struct feat_fd *ff,
3098 void *_data __maybe_unused)
3099 {
3100 struct perf_session *session;
3101 struct perf_data *data;
3102
3103 session = container_of(ff->ph, struct perf_session, header);
3104 data = session->data;
3105
3106 if (WARN_ON(!perf_data__is_dir(data)))
3107 return -1;
3108
3109 return do_read_u64(ff, &data->dir.version);
3110 }
3111
3112 #ifdef HAVE_LIBBPF_SUPPORT
process_bpf_prog_info(struct feat_fd * ff,void * data __maybe_unused)3113 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
3114 {
3115 struct bpf_prog_info_node *info_node;
3116 struct perf_env *env = &ff->ph->env;
3117 struct perf_bpil *info_linear;
3118 u32 count, i;
3119 int err = -1;
3120
3121 if (ff->ph->needs_swap) {
3122 pr_warning("interpreting bpf_prog_info from systems with endianness is not yet supported\n");
3123 return 0;
3124 }
3125
3126 if (do_read_u32(ff, &count))
3127 return -1;
3128
3129 down_write(&env->bpf_progs.lock);
3130
3131 for (i = 0; i < count; ++i) {
3132 u32 info_len, data_len;
3133
3134 info_linear = NULL;
3135 info_node = NULL;
3136 if (do_read_u32(ff, &info_len))
3137 goto out;
3138 if (do_read_u32(ff, &data_len))
3139 goto out;
3140
3141 if (info_len > sizeof(struct bpf_prog_info)) {
3142 pr_warning("detected invalid bpf_prog_info\n");
3143 goto out;
3144 }
3145
3146 info_linear = malloc(sizeof(struct perf_bpil) +
3147 data_len);
3148 if (!info_linear)
3149 goto out;
3150 info_linear->info_len = sizeof(struct bpf_prog_info);
3151 info_linear->data_len = data_len;
3152 if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
3153 goto out;
3154 if (__do_read(ff, &info_linear->info, info_len))
3155 goto out;
3156 if (info_len < sizeof(struct bpf_prog_info))
3157 memset(((void *)(&info_linear->info)) + info_len, 0,
3158 sizeof(struct bpf_prog_info) - info_len);
3159
3160 if (__do_read(ff, info_linear->data, data_len))
3161 goto out;
3162
3163 info_node = malloc(sizeof(struct bpf_prog_info_node));
3164 if (!info_node)
3165 goto out;
3166
3167 /* after reading from file, translate offset to address */
3168 bpil_offs_to_addr(info_linear);
3169 info_node->info_linear = info_linear;
3170 info_node->metadata = NULL;
3171 if (!__perf_env__insert_bpf_prog_info(env, info_node)) {
3172 free(info_linear);
3173 free(info_node);
3174 }
3175 }
3176
3177 up_write(&env->bpf_progs.lock);
3178 return 0;
3179 out:
3180 free(info_linear);
3181 free(info_node);
3182 up_write(&env->bpf_progs.lock);
3183 return err;
3184 }
3185
process_bpf_btf(struct feat_fd * ff,void * data __maybe_unused)3186 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
3187 {
3188 struct perf_env *env = &ff->ph->env;
3189 struct btf_node *node = NULL;
3190 u32 count, i;
3191 int err = -1;
3192
3193 if (ff->ph->needs_swap) {
3194 pr_warning("interpreting btf from systems with endianness is not yet supported\n");
3195 return 0;
3196 }
3197
3198 if (do_read_u32(ff, &count))
3199 return -1;
3200
3201 down_write(&env->bpf_progs.lock);
3202
3203 for (i = 0; i < count; ++i) {
3204 u32 id, data_size;
3205
3206 if (do_read_u32(ff, &id))
3207 goto out;
3208 if (do_read_u32(ff, &data_size))
3209 goto out;
3210
3211 node = malloc(sizeof(struct btf_node) + data_size);
3212 if (!node)
3213 goto out;
3214
3215 node->id = id;
3216 node->data_size = data_size;
3217
3218 if (__do_read(ff, node->data, data_size))
3219 goto out;
3220
3221 if (!__perf_env__insert_btf(env, node))
3222 free(node);
3223 node = NULL;
3224 }
3225
3226 err = 0;
3227 out:
3228 up_write(&env->bpf_progs.lock);
3229 free(node);
3230 return err;
3231 }
3232 #endif // HAVE_LIBBPF_SUPPORT
3233
process_compressed(struct feat_fd * ff,void * data __maybe_unused)3234 static int process_compressed(struct feat_fd *ff,
3235 void *data __maybe_unused)
3236 {
3237 struct perf_env *env = &ff->ph->env;
3238
3239 if (do_read_u32(ff, &(env->comp_ver)))
3240 return -1;
3241
3242 if (do_read_u32(ff, &(env->comp_type)))
3243 return -1;
3244
3245 if (do_read_u32(ff, &(env->comp_level)))
3246 return -1;
3247
3248 if (do_read_u32(ff, &(env->comp_ratio)))
3249 return -1;
3250
3251 if (do_read_u32(ff, &(env->comp_mmap_len)))
3252 return -1;
3253
3254 return 0;
3255 }
3256
__process_pmu_caps(struct feat_fd * ff,int * nr_caps,char *** caps,unsigned int * max_branches,unsigned int * br_cntr_nr,unsigned int * br_cntr_width)3257 static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
3258 char ***caps, unsigned int *max_branches,
3259 unsigned int *br_cntr_nr,
3260 unsigned int *br_cntr_width)
3261 {
3262 char *name, *value, *ptr;
3263 u32 nr_pmu_caps, i;
3264
3265 *nr_caps = 0;
3266 *caps = NULL;
3267
3268 if (do_read_u32(ff, &nr_pmu_caps))
3269 return -1;
3270
3271 if (!nr_pmu_caps)
3272 return 0;
3273
3274 *caps = zalloc(sizeof(char *) * nr_pmu_caps);
3275 if (!*caps)
3276 return -1;
3277
3278 for (i = 0; i < nr_pmu_caps; i++) {
3279 name = do_read_string(ff);
3280 if (!name)
3281 goto error;
3282
3283 value = do_read_string(ff);
3284 if (!value)
3285 goto free_name;
3286
3287 if (asprintf(&ptr, "%s=%s", name, value) < 0)
3288 goto free_value;
3289
3290 (*caps)[i] = ptr;
3291
3292 if (!strcmp(name, "branches"))
3293 *max_branches = atoi(value);
3294
3295 if (!strcmp(name, "branch_counter_nr"))
3296 *br_cntr_nr = atoi(value);
3297
3298 if (!strcmp(name, "branch_counter_width"))
3299 *br_cntr_width = atoi(value);
3300
3301 free(value);
3302 free(name);
3303 }
3304 *nr_caps = nr_pmu_caps;
3305 return 0;
3306
3307 free_value:
3308 free(value);
3309 free_name:
3310 free(name);
3311 error:
3312 for (; i > 0; i--)
3313 free((*caps)[i - 1]);
3314 free(*caps);
3315 *caps = NULL;
3316 *nr_caps = 0;
3317 return -1;
3318 }
3319
process_cpu_pmu_caps(struct feat_fd * ff,void * data __maybe_unused)3320 static int process_cpu_pmu_caps(struct feat_fd *ff,
3321 void *data __maybe_unused)
3322 {
3323 struct perf_env *env = &ff->ph->env;
3324 int ret = __process_pmu_caps(ff, &env->nr_cpu_pmu_caps,
3325 &env->cpu_pmu_caps,
3326 &env->max_branches,
3327 &env->br_cntr_nr,
3328 &env->br_cntr_width);
3329
3330 if (!ret && !env->cpu_pmu_caps)
3331 pr_debug("cpu pmu capabilities not available\n");
3332 return ret;
3333 }
3334
process_pmu_caps(struct feat_fd * ff,void * data __maybe_unused)3335 static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
3336 {
3337 struct perf_env *env = &ff->ph->env;
3338 struct pmu_caps *pmu_caps;
3339 u32 nr_pmu, i;
3340 int ret;
3341 int j;
3342
3343 if (do_read_u32(ff, &nr_pmu))
3344 return -1;
3345
3346 if (!nr_pmu) {
3347 pr_debug("pmu capabilities not available\n");
3348 return 0;
3349 }
3350
3351 pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
3352 if (!pmu_caps)
3353 return -ENOMEM;
3354
3355 for (i = 0; i < nr_pmu; i++) {
3356 ret = __process_pmu_caps(ff, &pmu_caps[i].nr_caps,
3357 &pmu_caps[i].caps,
3358 &pmu_caps[i].max_branches,
3359 &pmu_caps[i].br_cntr_nr,
3360 &pmu_caps[i].br_cntr_width);
3361 if (ret)
3362 goto err;
3363
3364 pmu_caps[i].pmu_name = do_read_string(ff);
3365 if (!pmu_caps[i].pmu_name) {
3366 ret = -1;
3367 goto err;
3368 }
3369 if (!pmu_caps[i].nr_caps) {
3370 pr_debug("%s pmu capabilities not available\n",
3371 pmu_caps[i].pmu_name);
3372 }
3373 }
3374
3375 env->nr_pmus_with_caps = nr_pmu;
3376 env->pmu_caps = pmu_caps;
3377 return 0;
3378
3379 err:
3380 for (i = 0; i < nr_pmu; i++) {
3381 for (j = 0; j < pmu_caps[i].nr_caps; j++)
3382 free(pmu_caps[i].caps[j]);
3383 free(pmu_caps[i].caps);
3384 free(pmu_caps[i].pmu_name);
3385 }
3386
3387 free(pmu_caps);
3388 return ret;
3389 }
3390
3391 #define FEAT_OPR(n, func, __full_only) \
3392 [HEADER_##n] = { \
3393 .name = __stringify(n), \
3394 .write = write_##func, \
3395 .print = print_##func, \
3396 .full_only = __full_only, \
3397 .process = process_##func, \
3398 .synthesize = true \
3399 }
3400
3401 #define FEAT_OPN(n, func, __full_only) \
3402 [HEADER_##n] = { \
3403 .name = __stringify(n), \
3404 .write = write_##func, \
3405 .print = print_##func, \
3406 .full_only = __full_only, \
3407 .process = process_##func \
3408 }
3409
3410 /* feature_ops not implemented: */
3411 #define print_tracing_data NULL
3412 #define print_build_id NULL
3413
3414 #define process_branch_stack NULL
3415 #define process_stat NULL
3416
3417 // Only used in util/synthetic-events.c
3418 const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
3419
3420 const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
3421 #ifdef HAVE_LIBTRACEEVENT
3422 FEAT_OPN(TRACING_DATA, tracing_data, false),
3423 #endif
3424 FEAT_OPN(BUILD_ID, build_id, false),
3425 FEAT_OPR(HOSTNAME, hostname, false),
3426 FEAT_OPR(OSRELEASE, osrelease, false),
3427 FEAT_OPR(VERSION, version, false),
3428 FEAT_OPR(ARCH, arch, false),
3429 FEAT_OPR(NRCPUS, nrcpus, false),
3430 FEAT_OPR(CPUDESC, cpudesc, false),
3431 FEAT_OPR(CPUID, cpuid, false),
3432 FEAT_OPR(TOTAL_MEM, total_mem, false),
3433 FEAT_OPR(EVENT_DESC, event_desc, false),
3434 FEAT_OPR(CMDLINE, cmdline, false),
3435 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true),
3436 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true),
3437 FEAT_OPN(BRANCH_STACK, branch_stack, false),
3438 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false),
3439 FEAT_OPR(GROUP_DESC, group_desc, false),
3440 FEAT_OPN(AUXTRACE, auxtrace, false),
3441 FEAT_OPN(STAT, stat, false),
3442 FEAT_OPN(CACHE, cache, true),
3443 FEAT_OPR(SAMPLE_TIME, sample_time, false),
3444 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true),
3445 FEAT_OPR(CLOCKID, clockid, false),
3446 FEAT_OPN(DIR_FORMAT, dir_format, false),
3447 #ifdef HAVE_LIBBPF_SUPPORT
3448 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
3449 FEAT_OPR(BPF_BTF, bpf_btf, false),
3450 #endif
3451 FEAT_OPR(COMPRESSED, compressed, false),
3452 FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
3453 FEAT_OPR(CLOCK_DATA, clock_data, false),
3454 FEAT_OPN(HYBRID_TOPOLOGY, hybrid_topology, true),
3455 FEAT_OPR(PMU_CAPS, pmu_caps, false),
3456 };
3457
3458 struct header_print_data {
3459 FILE *fp;
3460 bool full; /* extended list of headers */
3461 };
3462
perf_file_section__fprintf_info(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data)3463 static int perf_file_section__fprintf_info(struct perf_file_section *section,
3464 struct perf_header *ph,
3465 int feat, int fd, void *data)
3466 {
3467 struct header_print_data *hd = data;
3468 struct feat_fd ff;
3469
3470 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3471 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3472 "%d, continuing...\n", section->offset, feat);
3473 return 0;
3474 }
3475 if (feat >= HEADER_LAST_FEATURE) {
3476 pr_warning("unknown feature %d\n", feat);
3477 return 0;
3478 }
3479 if (!feat_ops[feat].print)
3480 return 0;
3481
3482 ff = (struct feat_fd) {
3483 .fd = fd,
3484 .ph = ph,
3485 };
3486
3487 if (!feat_ops[feat].full_only || hd->full)
3488 feat_ops[feat].print(&ff, hd->fp);
3489 else
3490 fprintf(hd->fp, "# %s info available, use -I to display\n",
3491 feat_ops[feat].name);
3492
3493 return 0;
3494 }
3495
perf_header__fprintf_info(struct perf_session * session,FILE * fp,bool full)3496 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
3497 {
3498 struct header_print_data hd;
3499 struct perf_header *header = &session->header;
3500 int fd = perf_data__fd(session->data);
3501 struct stat st;
3502 time_t stctime;
3503 int ret, bit;
3504
3505 hd.fp = fp;
3506 hd.full = full;
3507
3508 ret = fstat(fd, &st);
3509 if (ret == -1)
3510 return -1;
3511
3512 stctime = st.st_mtime;
3513 fprintf(fp, "# captured on : %s", ctime(&stctime));
3514
3515 fprintf(fp, "# header version : %u\n", header->version);
3516 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
3517 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size);
3518 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset);
3519
3520 perf_header__process_sections(header, fd, &hd,
3521 perf_file_section__fprintf_info);
3522
3523 if (session->data->is_pipe)
3524 return 0;
3525
3526 fprintf(fp, "# missing features: ");
3527 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
3528 if (bit)
3529 fprintf(fp, "%s ", feat_ops[bit].name);
3530 }
3531
3532 fprintf(fp, "\n");
3533 return 0;
3534 }
3535
3536 struct header_fw {
3537 struct feat_writer fw;
3538 struct feat_fd *ff;
3539 };
3540
feat_writer_cb(struct feat_writer * fw,void * buf,size_t sz)3541 static int feat_writer_cb(struct feat_writer *fw, void *buf, size_t sz)
3542 {
3543 struct header_fw *h = container_of(fw, struct header_fw, fw);
3544
3545 return do_write(h->ff, buf, sz);
3546 }
3547
do_write_feat(struct feat_fd * ff,int type,struct perf_file_section ** p,struct evlist * evlist,struct feat_copier * fc)3548 static int do_write_feat(struct feat_fd *ff, int type,
3549 struct perf_file_section **p,
3550 struct evlist *evlist,
3551 struct feat_copier *fc)
3552 {
3553 int err;
3554 int ret = 0;
3555
3556 if (perf_header__has_feat(ff->ph, type)) {
3557 if (!feat_ops[type].write)
3558 return -1;
3559
3560 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
3561 return -1;
3562
3563 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
3564
3565 /*
3566 * Hook to let perf inject copy features sections from the input
3567 * file.
3568 */
3569 if (fc && fc->copy) {
3570 struct header_fw h = {
3571 .fw.write = feat_writer_cb,
3572 .ff = ff,
3573 };
3574
3575 /* ->copy() returns 0 if the feature was not copied */
3576 err = fc->copy(fc, type, &h.fw);
3577 } else {
3578 err = 0;
3579 }
3580 if (!err)
3581 err = feat_ops[type].write(ff, evlist);
3582 if (err < 0) {
3583 pr_debug("failed to write feature %s\n", feat_ops[type].name);
3584
3585 /* undo anything written */
3586 lseek(ff->fd, (*p)->offset, SEEK_SET);
3587
3588 return -1;
3589 }
3590 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
3591 (*p)++;
3592 }
3593 return ret;
3594 }
3595
perf_header__adds_write(struct perf_header * header,struct evlist * evlist,int fd,struct feat_copier * fc)3596 static int perf_header__adds_write(struct perf_header *header,
3597 struct evlist *evlist, int fd,
3598 struct feat_copier *fc)
3599 {
3600 int nr_sections;
3601 struct feat_fd ff = {
3602 .fd = fd,
3603 .ph = header,
3604 };
3605 struct perf_file_section *feat_sec, *p;
3606 int sec_size;
3607 u64 sec_start;
3608 int feat;
3609 int err;
3610
3611 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3612 if (!nr_sections)
3613 return 0;
3614
3615 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
3616 if (feat_sec == NULL)
3617 return -ENOMEM;
3618
3619 sec_size = sizeof(*feat_sec) * nr_sections;
3620
3621 sec_start = header->feat_offset;
3622 lseek(fd, sec_start + sec_size, SEEK_SET);
3623
3624 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3625 if (do_write_feat(&ff, feat, &p, evlist, fc))
3626 perf_header__clear_feat(header, feat);
3627 }
3628
3629 lseek(fd, sec_start, SEEK_SET);
3630 /*
3631 * may write more than needed due to dropped feature, but
3632 * this is okay, reader will skip the missing entries
3633 */
3634 err = do_write(&ff, feat_sec, sec_size);
3635 if (err < 0)
3636 pr_debug("failed to write feature section\n");
3637 free(ff.buf); /* TODO: added to silence clang-tidy. */
3638 free(feat_sec);
3639 return err;
3640 }
3641
perf_header__write_pipe(int fd)3642 int perf_header__write_pipe(int fd)
3643 {
3644 struct perf_pipe_file_header f_header;
3645 struct feat_fd ff = {
3646 .fd = fd,
3647 };
3648 int err;
3649
3650 f_header = (struct perf_pipe_file_header){
3651 .magic = PERF_MAGIC,
3652 .size = sizeof(f_header),
3653 };
3654
3655 err = do_write(&ff, &f_header, sizeof(f_header));
3656 if (err < 0) {
3657 pr_debug("failed to write perf pipe header\n");
3658 return err;
3659 }
3660 free(ff.buf);
3661 return 0;
3662 }
3663
perf_session__do_write_header(struct perf_session * session,struct evlist * evlist,int fd,bool at_exit,struct feat_copier * fc,bool write_attrs_after_data)3664 static int perf_session__do_write_header(struct perf_session *session,
3665 struct evlist *evlist,
3666 int fd, bool at_exit,
3667 struct feat_copier *fc,
3668 bool write_attrs_after_data)
3669 {
3670 struct perf_file_header f_header;
3671 struct perf_header *header = &session->header;
3672 struct evsel *evsel;
3673 struct feat_fd ff = {
3674 .ph = header,
3675 .fd = fd,
3676 };
3677 u64 attr_offset = sizeof(f_header), attr_size = 0;
3678 int err;
3679
3680 if (write_attrs_after_data && at_exit) {
3681 /*
3682 * Write features at the end of the file first so that
3683 * attributes may come after them.
3684 */
3685 if (!header->data_offset && header->data_size) {
3686 pr_err("File contains data but offset unknown\n");
3687 err = -1;
3688 goto err_out;
3689 }
3690 header->feat_offset = header->data_offset + header->data_size;
3691 err = perf_header__adds_write(header, evlist, fd, fc);
3692 if (err < 0)
3693 goto err_out;
3694 attr_offset = lseek(fd, 0, SEEK_CUR);
3695 } else {
3696 lseek(fd, attr_offset, SEEK_SET);
3697 }
3698
3699 evlist__for_each_entry(session->evlist, evsel) {
3700 evsel->id_offset = attr_offset;
3701 /* Avoid writing at the end of the file until the session is exiting. */
3702 if (!write_attrs_after_data || at_exit) {
3703 err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
3704 if (err < 0) {
3705 pr_debug("failed to write perf header\n");
3706 goto err_out;
3707 }
3708 }
3709 attr_offset += evsel->core.ids * sizeof(u64);
3710 }
3711
3712 evlist__for_each_entry(evlist, evsel) {
3713 if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
3714 /*
3715 * We are likely in "perf inject" and have read
3716 * from an older file. Update attr size so that
3717 * reader gets the right offset to the ids.
3718 */
3719 evsel->core.attr.size = sizeof(evsel->core.attr);
3720 }
3721 /* Avoid writing at the end of the file until the session is exiting. */
3722 if (!write_attrs_after_data || at_exit) {
3723 struct perf_file_attr f_attr = {
3724 .attr = evsel->core.attr,
3725 .ids = {
3726 .offset = evsel->id_offset,
3727 .size = evsel->core.ids * sizeof(u64),
3728 }
3729 };
3730 err = do_write(&ff, &f_attr, sizeof(f_attr));
3731 if (err < 0) {
3732 pr_debug("failed to write perf header attribute\n");
3733 goto err_out;
3734 }
3735 }
3736 attr_size += sizeof(struct perf_file_attr);
3737 }
3738
3739 if (!header->data_offset) {
3740 if (write_attrs_after_data)
3741 header->data_offset = sizeof(f_header);
3742 else
3743 header->data_offset = attr_offset + attr_size;
3744 }
3745 header->feat_offset = header->data_offset + header->data_size;
3746
3747 if (!write_attrs_after_data && at_exit) {
3748 /* Write features now feat_offset is known. */
3749 err = perf_header__adds_write(header, evlist, fd, fc);
3750 if (err < 0)
3751 goto err_out;
3752 }
3753
3754 f_header = (struct perf_file_header){
3755 .magic = PERF_MAGIC,
3756 .size = sizeof(f_header),
3757 .attr_size = sizeof(struct perf_file_attr),
3758 .attrs = {
3759 .offset = attr_offset,
3760 .size = attr_size,
3761 },
3762 .data = {
3763 .offset = header->data_offset,
3764 .size = header->data_size,
3765 },
3766 /* event_types is ignored, store zeros */
3767 };
3768
3769 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
3770
3771 lseek(fd, 0, SEEK_SET);
3772 err = do_write(&ff, &f_header, sizeof(f_header));
3773 if (err < 0) {
3774 pr_debug("failed to write perf header\n");
3775 goto err_out;
3776 } else {
3777 lseek(fd, 0, SEEK_END);
3778 err = 0;
3779 }
3780 err_out:
3781 free(ff.buf);
3782 return err;
3783 }
3784
perf_session__write_header(struct perf_session * session,struct evlist * evlist,int fd,bool at_exit)3785 int perf_session__write_header(struct perf_session *session,
3786 struct evlist *evlist,
3787 int fd, bool at_exit)
3788 {
3789 return perf_session__do_write_header(session, evlist, fd, at_exit, /*fc=*/NULL,
3790 /*write_attrs_after_data=*/false);
3791 }
3792
perf_session__data_offset(const struct evlist * evlist)3793 size_t perf_session__data_offset(const struct evlist *evlist)
3794 {
3795 struct evsel *evsel;
3796 size_t data_offset;
3797
3798 data_offset = sizeof(struct perf_file_header);
3799 evlist__for_each_entry(evlist, evsel) {
3800 data_offset += evsel->core.ids * sizeof(u64);
3801 }
3802 data_offset += evlist->core.nr_entries * sizeof(struct perf_file_attr);
3803
3804 return data_offset;
3805 }
3806
perf_session__inject_header(struct perf_session * session,struct evlist * evlist,int fd,struct feat_copier * fc,bool write_attrs_after_data)3807 int perf_session__inject_header(struct perf_session *session,
3808 struct evlist *evlist,
3809 int fd,
3810 struct feat_copier *fc,
3811 bool write_attrs_after_data)
3812 {
3813 return perf_session__do_write_header(session, evlist, fd, true, fc,
3814 write_attrs_after_data);
3815 }
3816
perf_header__getbuffer64(struct perf_header * header,int fd,void * buf,size_t size)3817 static int perf_header__getbuffer64(struct perf_header *header,
3818 int fd, void *buf, size_t size)
3819 {
3820 if (readn(fd, buf, size) <= 0)
3821 return -1;
3822
3823 if (header->needs_swap)
3824 mem_bswap_64(buf, size);
3825
3826 return 0;
3827 }
3828
perf_header__process_sections(struct perf_header * header,int fd,void * data,int (* process)(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data))3829 int perf_header__process_sections(struct perf_header *header, int fd,
3830 void *data,
3831 int (*process)(struct perf_file_section *section,
3832 struct perf_header *ph,
3833 int feat, int fd, void *data))
3834 {
3835 struct perf_file_section *feat_sec, *sec;
3836 int nr_sections;
3837 int sec_size;
3838 int feat;
3839 int err;
3840
3841 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3842 if (!nr_sections)
3843 return 0;
3844
3845 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
3846 if (!feat_sec)
3847 return -1;
3848
3849 sec_size = sizeof(*feat_sec) * nr_sections;
3850
3851 lseek(fd, header->feat_offset, SEEK_SET);
3852
3853 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3854 if (err < 0)
3855 goto out_free;
3856
3857 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3858 err = process(sec++, header, feat, fd, data);
3859 if (err < 0)
3860 goto out_free;
3861 }
3862 err = 0;
3863 out_free:
3864 free(feat_sec);
3865 return err;
3866 }
3867
3868 static const int attr_file_abi_sizes[] = {
3869 [0] = PERF_ATTR_SIZE_VER0,
3870 [1] = PERF_ATTR_SIZE_VER1,
3871 [2] = PERF_ATTR_SIZE_VER2,
3872 [3] = PERF_ATTR_SIZE_VER3,
3873 [4] = PERF_ATTR_SIZE_VER4,
3874 0,
3875 };
3876
3877 /*
3878 * In the legacy file format, the magic number is not used to encode endianness.
3879 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3880 * on ABI revisions, we need to try all combinations for all endianness to
3881 * detect the endianness.
3882 */
try_all_file_abis(uint64_t hdr_sz,struct perf_header * ph)3883 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3884 {
3885 uint64_t ref_size, attr_size;
3886 int i;
3887
3888 for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3889 ref_size = attr_file_abi_sizes[i]
3890 + sizeof(struct perf_file_section);
3891 if (hdr_sz != ref_size) {
3892 attr_size = bswap_64(hdr_sz);
3893 if (attr_size != ref_size)
3894 continue;
3895
3896 ph->needs_swap = true;
3897 }
3898 pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3899 i,
3900 ph->needs_swap);
3901 return 0;
3902 }
3903 /* could not determine endianness */
3904 return -1;
3905 }
3906
3907 #define PERF_PIPE_HDR_VER0 16
3908
3909 static const size_t attr_pipe_abi_sizes[] = {
3910 [0] = PERF_PIPE_HDR_VER0,
3911 0,
3912 };
3913
3914 /*
3915 * In the legacy pipe format, there is an implicit assumption that endianness
3916 * between host recording the samples, and host parsing the samples is the
3917 * same. This is not always the case given that the pipe output may always be
3918 * redirected into a file and analyzed on a different machine with possibly a
3919 * different endianness and perf_event ABI revisions in the perf tool itself.
3920 */
try_all_pipe_abis(uint64_t hdr_sz,struct perf_header * ph)3921 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3922 {
3923 u64 attr_size;
3924 int i;
3925
3926 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3927 if (hdr_sz != attr_pipe_abi_sizes[i]) {
3928 attr_size = bswap_64(hdr_sz);
3929 if (attr_size != hdr_sz)
3930 continue;
3931
3932 ph->needs_swap = true;
3933 }
3934 pr_debug("Pipe ABI%d perf.data file detected\n", i);
3935 return 0;
3936 }
3937 return -1;
3938 }
3939
is_perf_magic(u64 magic)3940 bool is_perf_magic(u64 magic)
3941 {
3942 if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3943 || magic == __perf_magic2
3944 || magic == __perf_magic2_sw)
3945 return true;
3946
3947 return false;
3948 }
3949
check_magic_endian(u64 magic,uint64_t hdr_sz,bool is_pipe,struct perf_header * ph)3950 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3951 bool is_pipe, struct perf_header *ph)
3952 {
3953 int ret;
3954
3955 /* check for legacy format */
3956 ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3957 if (ret == 0) {
3958 ph->version = PERF_HEADER_VERSION_1;
3959 pr_debug("legacy perf.data format\n");
3960 if (is_pipe)
3961 return try_all_pipe_abis(hdr_sz, ph);
3962
3963 return try_all_file_abis(hdr_sz, ph);
3964 }
3965 /*
3966 * the new magic number serves two purposes:
3967 * - unique number to identify actual perf.data files
3968 * - encode endianness of file
3969 */
3970 ph->version = PERF_HEADER_VERSION_2;
3971
3972 /* check magic number with one endianness */
3973 if (magic == __perf_magic2)
3974 return 0;
3975
3976 /* check magic number with opposite endianness */
3977 if (magic != __perf_magic2_sw)
3978 return -1;
3979
3980 ph->needs_swap = true;
3981
3982 return 0;
3983 }
3984
perf_file_header__read(struct perf_file_header * header,struct perf_header * ph,int fd)3985 int perf_file_header__read(struct perf_file_header *header,
3986 struct perf_header *ph, int fd)
3987 {
3988 ssize_t ret;
3989
3990 lseek(fd, 0, SEEK_SET);
3991
3992 ret = readn(fd, header, sizeof(*header));
3993 if (ret <= 0)
3994 return -1;
3995
3996 if (check_magic_endian(header->magic,
3997 header->attr_size, false, ph) < 0) {
3998 pr_debug("magic/endian check failed\n");
3999 return -1;
4000 }
4001
4002 if (ph->needs_swap) {
4003 mem_bswap_64(header, offsetof(struct perf_file_header,
4004 adds_features));
4005 }
4006
4007 if (header->size > header->attrs.offset) {
4008 pr_err("Perf file header corrupt: header overlaps attrs\n");
4009 return -1;
4010 }
4011
4012 if (header->size > header->data.offset) {
4013 pr_err("Perf file header corrupt: header overlaps data\n");
4014 return -1;
4015 }
4016
4017 if ((header->attrs.offset <= header->data.offset &&
4018 header->attrs.offset + header->attrs.size > header->data.offset) ||
4019 (header->attrs.offset > header->data.offset &&
4020 header->data.offset + header->data.size > header->attrs.offset)) {
4021 pr_err("Perf file header corrupt: Attributes and data overlap\n");
4022 return -1;
4023 }
4024
4025 if (header->size != sizeof(*header)) {
4026 /* Support the previous format */
4027 if (header->size == offsetof(typeof(*header), adds_features))
4028 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
4029 else
4030 return -1;
4031 } else if (ph->needs_swap) {
4032 /*
4033 * feature bitmap is declared as an array of unsigned longs --
4034 * not good since its size can differ between the host that
4035 * generated the data file and the host analyzing the file.
4036 *
4037 * We need to handle endianness, but we don't know the size of
4038 * the unsigned long where the file was generated. Take a best
4039 * guess at determining it: try 64-bit swap first (ie., file
4040 * created on a 64-bit host), and check if the hostname feature
4041 * bit is set (this feature bit is forced on as of fbe96f2).
4042 * If the bit is not, undo the 64-bit swap and try a 32-bit
4043 * swap. If the hostname bit is still not set (e.g., older data
4044 * file), punt and fallback to the original behavior --
4045 * clearing all feature bits and setting buildid.
4046 */
4047 mem_bswap_64(&header->adds_features,
4048 BITS_TO_U64(HEADER_FEAT_BITS));
4049
4050 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
4051 /* unswap as u64 */
4052 mem_bswap_64(&header->adds_features,
4053 BITS_TO_U64(HEADER_FEAT_BITS));
4054
4055 /* unswap as u32 */
4056 mem_bswap_32(&header->adds_features,
4057 BITS_TO_U32(HEADER_FEAT_BITS));
4058 }
4059
4060 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
4061 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
4062 __set_bit(HEADER_BUILD_ID, header->adds_features);
4063 }
4064 }
4065
4066 memcpy(&ph->adds_features, &header->adds_features,
4067 sizeof(ph->adds_features));
4068
4069 ph->data_offset = header->data.offset;
4070 ph->data_size = header->data.size;
4071 ph->feat_offset = header->data.offset + header->data.size;
4072 return 0;
4073 }
4074
perf_file_section__process(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data)4075 static int perf_file_section__process(struct perf_file_section *section,
4076 struct perf_header *ph,
4077 int feat, int fd, void *data)
4078 {
4079 struct feat_fd fdd = {
4080 .fd = fd,
4081 .ph = ph,
4082 .size = section->size,
4083 .offset = section->offset,
4084 };
4085
4086 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
4087 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
4088 "%d, continuing...\n", section->offset, feat);
4089 return 0;
4090 }
4091
4092 if (feat >= HEADER_LAST_FEATURE) {
4093 pr_debug("unknown feature %d, continuing...\n", feat);
4094 return 0;
4095 }
4096
4097 if (!feat_ops[feat].process)
4098 return 0;
4099
4100 return feat_ops[feat].process(&fdd, data);
4101 }
4102
perf_file_header__read_pipe(struct perf_pipe_file_header * header,struct perf_header * ph,struct perf_data * data)4103 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
4104 struct perf_header *ph,
4105 struct perf_data *data)
4106 {
4107 ssize_t ret;
4108
4109 ret = perf_data__read(data, header, sizeof(*header));
4110 if (ret <= 0)
4111 return -1;
4112
4113 if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
4114 pr_debug("endian/magic failed\n");
4115 return -1;
4116 }
4117
4118 if (ph->needs_swap)
4119 header->size = bswap_64(header->size);
4120
4121 return 0;
4122 }
4123
perf_header__read_pipe(struct perf_session * session)4124 static int perf_header__read_pipe(struct perf_session *session)
4125 {
4126 struct perf_header *header = &session->header;
4127 struct perf_pipe_file_header f_header;
4128
4129 if (perf_file_header__read_pipe(&f_header, header, session->data) < 0) {
4130 pr_debug("incompatible file format\n");
4131 return -EINVAL;
4132 }
4133
4134 return f_header.size == sizeof(f_header) ? 0 : -1;
4135 }
4136
read_attr(int fd,struct perf_header * ph,struct perf_file_attr * f_attr)4137 static int read_attr(int fd, struct perf_header *ph,
4138 struct perf_file_attr *f_attr)
4139 {
4140 struct perf_event_attr *attr = &f_attr->attr;
4141 size_t sz, left;
4142 size_t our_sz = sizeof(f_attr->attr);
4143 ssize_t ret;
4144
4145 memset(f_attr, 0, sizeof(*f_attr));
4146
4147 /* read minimal guaranteed structure */
4148 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
4149 if (ret <= 0) {
4150 pr_debug("cannot read %d bytes of header attr\n",
4151 PERF_ATTR_SIZE_VER0);
4152 return -1;
4153 }
4154
4155 /* on file perf_event_attr size */
4156 sz = attr->size;
4157
4158 if (ph->needs_swap)
4159 sz = bswap_32(sz);
4160
4161 if (sz == 0) {
4162 /* assume ABI0 */
4163 sz = PERF_ATTR_SIZE_VER0;
4164 } else if (sz > our_sz) {
4165 pr_debug("file uses a more recent and unsupported ABI"
4166 " (%zu bytes extra)\n", sz - our_sz);
4167 return -1;
4168 }
4169 /* what we have not yet read and that we know about */
4170 left = sz - PERF_ATTR_SIZE_VER0;
4171 if (left) {
4172 void *ptr = attr;
4173 ptr += PERF_ATTR_SIZE_VER0;
4174
4175 ret = readn(fd, ptr, left);
4176 }
4177 /* read perf_file_section, ids are read in caller */
4178 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
4179
4180 return ret <= 0 ? -1 : 0;
4181 }
4182
4183 #ifdef HAVE_LIBTRACEEVENT
evsel__prepare_tracepoint_event(struct evsel * evsel,struct tep_handle * pevent)4184 static int evsel__prepare_tracepoint_event(struct evsel *evsel, struct tep_handle *pevent)
4185 {
4186 struct tep_event *event;
4187 char bf[128];
4188
4189 /* already prepared */
4190 if (evsel->tp_format)
4191 return 0;
4192
4193 if (pevent == NULL) {
4194 pr_debug("broken or missing trace data\n");
4195 return -1;
4196 }
4197
4198 event = tep_find_event(pevent, evsel->core.attr.config);
4199 if (event == NULL) {
4200 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
4201 return -1;
4202 }
4203
4204 if (!evsel->name) {
4205 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
4206 evsel->name = strdup(bf);
4207 if (evsel->name == NULL)
4208 return -1;
4209 }
4210
4211 evsel->tp_format = event;
4212 return 0;
4213 }
4214
evlist__prepare_tracepoint_events(struct evlist * evlist,struct tep_handle * pevent)4215 static int evlist__prepare_tracepoint_events(struct evlist *evlist, struct tep_handle *pevent)
4216 {
4217 struct evsel *pos;
4218
4219 evlist__for_each_entry(evlist, pos) {
4220 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
4221 evsel__prepare_tracepoint_event(pos, pevent))
4222 return -1;
4223 }
4224
4225 return 0;
4226 }
4227 #endif
4228
perf_session__read_header(struct perf_session * session)4229 int perf_session__read_header(struct perf_session *session)
4230 {
4231 struct perf_data *data = session->data;
4232 struct perf_header *header = &session->header;
4233 struct perf_file_header f_header;
4234 struct perf_file_attr f_attr;
4235 u64 f_id;
4236 int nr_attrs, nr_ids, i, j, err;
4237 int fd = perf_data__fd(data);
4238
4239 session->evlist = evlist__new();
4240 if (session->evlist == NULL)
4241 return -ENOMEM;
4242
4243 session->evlist->session = session;
4244 session->machines.host.env = &header->env;
4245
4246 /*
4247 * We can read 'pipe' data event from regular file,
4248 * check for the pipe header regardless of source.
4249 */
4250 err = perf_header__read_pipe(session);
4251 if (!err || perf_data__is_pipe(data)) {
4252 data->is_pipe = true;
4253 return err;
4254 }
4255
4256 if (perf_file_header__read(&f_header, header, fd) < 0)
4257 return -EINVAL;
4258
4259 if (header->needs_swap && data->in_place_update) {
4260 pr_err("In-place update not supported when byte-swapping is required\n");
4261 return -EINVAL;
4262 }
4263
4264 /*
4265 * Sanity check that perf.data was written cleanly; data size is
4266 * initialized to 0 and updated only if the on_exit function is run.
4267 * If data size is still 0 then the file contains only partial
4268 * information. Just warn user and process it as much as it can.
4269 */
4270 if (f_header.data.size == 0) {
4271 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
4272 "Was the 'perf record' command properly terminated?\n",
4273 data->file.path);
4274 }
4275
4276 if (f_header.attr_size == 0) {
4277 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
4278 "Was the 'perf record' command properly terminated?\n",
4279 data->file.path);
4280 return -EINVAL;
4281 }
4282
4283 nr_attrs = f_header.attrs.size / f_header.attr_size;
4284 lseek(fd, f_header.attrs.offset, SEEK_SET);
4285
4286 for (i = 0; i < nr_attrs; i++) {
4287 struct evsel *evsel;
4288 off_t tmp;
4289
4290 if (read_attr(fd, header, &f_attr) < 0)
4291 goto out_errno;
4292
4293 if (header->needs_swap) {
4294 f_attr.ids.size = bswap_64(f_attr.ids.size);
4295 f_attr.ids.offset = bswap_64(f_attr.ids.offset);
4296 perf_event__attr_swap(&f_attr.attr);
4297 }
4298
4299 tmp = lseek(fd, 0, SEEK_CUR);
4300 evsel = evsel__new(&f_attr.attr);
4301
4302 if (evsel == NULL)
4303 goto out_delete_evlist;
4304
4305 evsel->needs_swap = header->needs_swap;
4306 /*
4307 * Do it before so that if perf_evsel__alloc_id fails, this
4308 * entry gets purged too at evlist__delete().
4309 */
4310 evlist__add(session->evlist, evsel);
4311
4312 nr_ids = f_attr.ids.size / sizeof(u64);
4313 /*
4314 * We don't have the cpu and thread maps on the header, so
4315 * for allocating the perf_sample_id table we fake 1 cpu and
4316 * hattr->ids threads.
4317 */
4318 if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
4319 goto out_delete_evlist;
4320
4321 lseek(fd, f_attr.ids.offset, SEEK_SET);
4322
4323 for (j = 0; j < nr_ids; j++) {
4324 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
4325 goto out_errno;
4326
4327 perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
4328 }
4329
4330 lseek(fd, tmp, SEEK_SET);
4331 }
4332
4333 #ifdef HAVE_LIBTRACEEVENT
4334 perf_header__process_sections(header, fd, &session->tevent,
4335 perf_file_section__process);
4336
4337 if (evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent))
4338 goto out_delete_evlist;
4339 #else
4340 perf_header__process_sections(header, fd, NULL, perf_file_section__process);
4341 #endif
4342
4343 return 0;
4344 out_errno:
4345 return -errno;
4346
4347 out_delete_evlist:
4348 evlist__delete(session->evlist);
4349 session->evlist = NULL;
4350 return -ENOMEM;
4351 }
4352
perf_event__process_feature(struct perf_session * session,union perf_event * event)4353 int perf_event__process_feature(struct perf_session *session,
4354 union perf_event *event)
4355 {
4356 struct feat_fd ff = { .fd = 0 };
4357 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
4358 int type = fe->header.type;
4359 u64 feat = fe->feat_id;
4360 int ret = 0;
4361 bool print = dump_trace;
4362
4363 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
4364 pr_warning("invalid record type %d in pipe-mode\n", type);
4365 return 0;
4366 }
4367 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
4368 pr_warning("invalid record type %d in pipe-mode\n", type);
4369 return -1;
4370 }
4371
4372 ff.buf = (void *)fe->data;
4373 ff.size = event->header.size - sizeof(*fe);
4374 ff.ph = &session->header;
4375
4376 if (feat_ops[feat].process && feat_ops[feat].process(&ff, NULL)) {
4377 ret = -1;
4378 goto out;
4379 }
4380
4381 if (session->tool->show_feat_hdr) {
4382 if (!feat_ops[feat].full_only ||
4383 session->tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
4384 print = true;
4385 } else {
4386 fprintf(stdout, "# %s info available, use -I to display\n",
4387 feat_ops[feat].name);
4388 }
4389 }
4390
4391 if (dump_trace)
4392 printf(", ");
4393
4394 if (print) {
4395 if (feat_ops[feat].print)
4396 feat_ops[feat].print(&ff, stdout);
4397 else
4398 printf("# %s", feat_ops[feat].name);
4399 }
4400
4401 out:
4402 free_event_desc(ff.events);
4403 return ret;
4404 }
4405
perf_event__fprintf_event_update(union perf_event * event,FILE * fp)4406 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
4407 {
4408 struct perf_record_event_update *ev = &event->event_update;
4409 struct perf_cpu_map *map;
4410 size_t ret;
4411
4412 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id);
4413
4414 switch (ev->type) {
4415 case PERF_EVENT_UPDATE__SCALE:
4416 ret += fprintf(fp, "... scale: %f\n", ev->scale.scale);
4417 break;
4418 case PERF_EVENT_UPDATE__UNIT:
4419 ret += fprintf(fp, "... unit: %s\n", ev->unit);
4420 break;
4421 case PERF_EVENT_UPDATE__NAME:
4422 ret += fprintf(fp, "... name: %s\n", ev->name);
4423 break;
4424 case PERF_EVENT_UPDATE__CPUS:
4425 ret += fprintf(fp, "... ");
4426
4427 map = cpu_map__new_data(&ev->cpus.cpus);
4428 if (map) {
4429 ret += cpu_map__fprintf(map, fp);
4430 perf_cpu_map__put(map);
4431 } else
4432 ret += fprintf(fp, "failed to get cpus\n");
4433 break;
4434 default:
4435 ret += fprintf(fp, "... unknown type\n");
4436 break;
4437 }
4438
4439 return ret;
4440 }
4441
perf_event__fprintf_attr(union perf_event * event,FILE * fp)4442 size_t perf_event__fprintf_attr(union perf_event *event, FILE *fp)
4443 {
4444 return perf_event_attr__fprintf(fp, &event->attr.attr, __desc_attr__fprintf, NULL);
4445 }
4446
perf_event__process_attr(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct evlist ** pevlist)4447 int perf_event__process_attr(const struct perf_tool *tool __maybe_unused,
4448 union perf_event *event,
4449 struct evlist **pevlist)
4450 {
4451 u32 i, n_ids;
4452 u64 *ids;
4453 struct evsel *evsel;
4454 struct evlist *evlist = *pevlist;
4455
4456 if (dump_trace)
4457 perf_event__fprintf_attr(event, stdout);
4458
4459 if (evlist == NULL) {
4460 *pevlist = evlist = evlist__new();
4461 if (evlist == NULL)
4462 return -ENOMEM;
4463 }
4464
4465 evsel = evsel__new(&event->attr.attr);
4466 if (evsel == NULL)
4467 return -ENOMEM;
4468
4469 evlist__add(evlist, evsel);
4470
4471 n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
4472 n_ids = n_ids / sizeof(u64);
4473 /*
4474 * We don't have the cpu and thread maps on the header, so
4475 * for allocating the perf_sample_id table we fake 1 cpu and
4476 * hattr->ids threads.
4477 */
4478 if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
4479 return -ENOMEM;
4480
4481 ids = perf_record_header_attr_id(event);
4482 for (i = 0; i < n_ids; i++) {
4483 perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
4484 }
4485
4486 return 0;
4487 }
4488
perf_event__process_event_update(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct evlist ** pevlist)4489 int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused,
4490 union perf_event *event,
4491 struct evlist **pevlist)
4492 {
4493 struct perf_record_event_update *ev = &event->event_update;
4494 struct evlist *evlist;
4495 struct evsel *evsel;
4496 struct perf_cpu_map *map;
4497
4498 if (dump_trace)
4499 perf_event__fprintf_event_update(event, stdout);
4500
4501 if (!pevlist || *pevlist == NULL)
4502 return -EINVAL;
4503
4504 evlist = *pevlist;
4505
4506 evsel = evlist__id2evsel(evlist, ev->id);
4507 if (evsel == NULL)
4508 return -EINVAL;
4509
4510 switch (ev->type) {
4511 case PERF_EVENT_UPDATE__UNIT:
4512 free((char *)evsel->unit);
4513 evsel->unit = strdup(ev->unit);
4514 break;
4515 case PERF_EVENT_UPDATE__NAME:
4516 free(evsel->name);
4517 evsel->name = strdup(ev->name);
4518 break;
4519 case PERF_EVENT_UPDATE__SCALE:
4520 evsel->scale = ev->scale.scale;
4521 break;
4522 case PERF_EVENT_UPDATE__CPUS:
4523 map = cpu_map__new_data(&ev->cpus.cpus);
4524 if (map) {
4525 perf_cpu_map__put(evsel->core.pmu_cpus);
4526 evsel->core.pmu_cpus = map;
4527 } else
4528 pr_err("failed to get event_update cpus\n");
4529 default:
4530 break;
4531 }
4532
4533 return 0;
4534 }
4535
4536 #ifdef HAVE_LIBTRACEEVENT
perf_event__process_tracing_data(const struct perf_tool * tool __maybe_unused,struct perf_session * session,union perf_event * event)4537 int perf_event__process_tracing_data(const struct perf_tool *tool __maybe_unused,
4538 struct perf_session *session,
4539 union perf_event *event)
4540 {
4541 ssize_t size_read, padding, size = event->tracing_data.size;
4542 int fd = perf_data__fd(session->data);
4543 char buf[BUFSIZ];
4544
4545 /*
4546 * The pipe fd is already in proper place and in any case
4547 * we can't move it, and we'd screw the case where we read
4548 * 'pipe' data from regular file. The trace_report reads
4549 * data from 'fd' so we need to set it directly behind the
4550 * event, where the tracing data starts.
4551 */
4552 if (!perf_data__is_pipe(session->data)) {
4553 off_t offset = lseek(fd, 0, SEEK_CUR);
4554
4555 /* setup for reading amidst mmap */
4556 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
4557 SEEK_SET);
4558 }
4559
4560 size_read = trace_report(fd, &session->tevent, session->trace_event_repipe);
4561 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
4562
4563 if (readn(fd, buf, padding) < 0) {
4564 pr_err("%s: reading input file", __func__);
4565 return -1;
4566 }
4567 if (session->trace_event_repipe) {
4568 int retw = write(STDOUT_FILENO, buf, padding);
4569 if (retw <= 0 || retw != padding) {
4570 pr_err("%s: repiping tracing data padding", __func__);
4571 return -1;
4572 }
4573 }
4574
4575 if (size_read + padding != size) {
4576 pr_err("%s: tracing data size mismatch", __func__);
4577 return -1;
4578 }
4579
4580 evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent);
4581
4582 return size_read + padding;
4583 }
4584 #endif
4585
perf_event__process_build_id(const struct perf_tool * tool __maybe_unused,struct perf_session * session,union perf_event * event)4586 int perf_event__process_build_id(const struct perf_tool *tool __maybe_unused,
4587 struct perf_session *session,
4588 union perf_event *event)
4589 {
4590 __event_process_build_id(&event->build_id,
4591 event->build_id.filename,
4592 session);
4593 return 0;
4594 }
4595