xref: /linux/tools/perf/util/header.c (revision 8520a98dbab61e9e340cdfb72dd17ccc8a98961e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <linux/compiler.h>
12 #include <linux/list.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15 #include <linux/string.h>
16 #include <linux/stringify.h>
17 #include <linux/zalloc.h>
18 #include <sys/stat.h>
19 #include <sys/utsname.h>
20 #include <linux/time64.h>
21 #include <dirent.h>
22 #include <bpf/libbpf.h>
23 #include <perf/cpumap.h>
24 
25 #include "evlist.h"
26 #include "evsel.h"
27 #include "header.h"
28 #include "memswap.h"
29 #include "trace-event.h"
30 #include "session.h"
31 #include "symbol.h"
32 #include "debug.h"
33 #include "cpumap.h"
34 #include "pmu.h"
35 #include "vdso.h"
36 #include "strbuf.h"
37 #include "build-id.h"
38 #include "data.h"
39 #include <api/fs/fs.h>
40 #include "asm/bug.h"
41 #include "tool.h"
42 #include "time-utils.h"
43 #include "units.h"
44 #include "util.h"
45 #include "cputopo.h"
46 #include "bpf-event.h"
47 
48 #include <linux/ctype.h>
49 
50 /*
51  * magic2 = "PERFILE2"
52  * must be a numerical value to let the endianness
53  * determine the memory layout. That way we are able
54  * to detect endianness when reading the perf.data file
55  * back.
56  *
57  * we check for legacy (PERFFILE) format.
58  */
59 static const char *__perf_magic1 = "PERFFILE";
60 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
61 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
62 
63 #define PERF_MAGIC	__perf_magic2
64 
65 const char perf_version_string[] = PERF_VERSION;
66 
67 struct perf_file_attr {
68 	struct perf_event_attr	attr;
69 	struct perf_file_section	ids;
70 };
71 
72 struct feat_fd {
73 	struct perf_header	*ph;
74 	int			fd;
75 	void			*buf;	/* Either buf != NULL or fd >= 0 */
76 	ssize_t			offset;
77 	size_t			size;
78 	struct evsel	*events;
79 };
80 
81 void perf_header__set_feat(struct perf_header *header, int feat)
82 {
83 	set_bit(feat, header->adds_features);
84 }
85 
86 void perf_header__clear_feat(struct perf_header *header, int feat)
87 {
88 	clear_bit(feat, header->adds_features);
89 }
90 
91 bool perf_header__has_feat(const struct perf_header *header, int feat)
92 {
93 	return test_bit(feat, header->adds_features);
94 }
95 
96 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
97 {
98 	ssize_t ret = writen(ff->fd, buf, size);
99 
100 	if (ret != (ssize_t)size)
101 		return ret < 0 ? (int)ret : -1;
102 	return 0;
103 }
104 
105 static int __do_write_buf(struct feat_fd *ff,  const void *buf, size_t size)
106 {
107 	/* struct perf_event_header::size is u16 */
108 	const size_t max_size = 0xffff - sizeof(struct perf_event_header);
109 	size_t new_size = ff->size;
110 	void *addr;
111 
112 	if (size + ff->offset > max_size)
113 		return -E2BIG;
114 
115 	while (size > (new_size - ff->offset))
116 		new_size <<= 1;
117 	new_size = min(max_size, new_size);
118 
119 	if (ff->size < new_size) {
120 		addr = realloc(ff->buf, new_size);
121 		if (!addr)
122 			return -ENOMEM;
123 		ff->buf = addr;
124 		ff->size = new_size;
125 	}
126 
127 	memcpy(ff->buf + ff->offset, buf, size);
128 	ff->offset += size;
129 
130 	return 0;
131 }
132 
133 /* Return: 0 if succeded, -ERR if failed. */
134 int do_write(struct feat_fd *ff, const void *buf, size_t size)
135 {
136 	if (!ff->buf)
137 		return __do_write_fd(ff, buf, size);
138 	return __do_write_buf(ff, buf, size);
139 }
140 
141 /* Return: 0 if succeded, -ERR if failed. */
142 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
143 {
144 	u64 *p = (u64 *) set;
145 	int i, ret;
146 
147 	ret = do_write(ff, &size, sizeof(size));
148 	if (ret < 0)
149 		return ret;
150 
151 	for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
152 		ret = do_write(ff, p + i, sizeof(*p));
153 		if (ret < 0)
154 			return ret;
155 	}
156 
157 	return 0;
158 }
159 
160 /* Return: 0 if succeded, -ERR if failed. */
161 int write_padded(struct feat_fd *ff, const void *bf,
162 		 size_t count, size_t count_aligned)
163 {
164 	static const char zero_buf[NAME_ALIGN];
165 	int err = do_write(ff, bf, count);
166 
167 	if (!err)
168 		err = do_write(ff, zero_buf, count_aligned - count);
169 
170 	return err;
171 }
172 
173 #define string_size(str)						\
174 	(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
175 
176 /* Return: 0 if succeded, -ERR if failed. */
177 static int do_write_string(struct feat_fd *ff, const char *str)
178 {
179 	u32 len, olen;
180 	int ret;
181 
182 	olen = strlen(str) + 1;
183 	len = PERF_ALIGN(olen, NAME_ALIGN);
184 
185 	/* write len, incl. \0 */
186 	ret = do_write(ff, &len, sizeof(len));
187 	if (ret < 0)
188 		return ret;
189 
190 	return write_padded(ff, str, olen, len);
191 }
192 
193 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
194 {
195 	ssize_t ret = readn(ff->fd, addr, size);
196 
197 	if (ret != size)
198 		return ret < 0 ? (int)ret : -1;
199 	return 0;
200 }
201 
202 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
203 {
204 	if (size > (ssize_t)ff->size - ff->offset)
205 		return -1;
206 
207 	memcpy(addr, ff->buf + ff->offset, size);
208 	ff->offset += size;
209 
210 	return 0;
211 
212 }
213 
214 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
215 {
216 	if (!ff->buf)
217 		return __do_read_fd(ff, addr, size);
218 	return __do_read_buf(ff, addr, size);
219 }
220 
221 static int do_read_u32(struct feat_fd *ff, u32 *addr)
222 {
223 	int ret;
224 
225 	ret = __do_read(ff, addr, sizeof(*addr));
226 	if (ret)
227 		return ret;
228 
229 	if (ff->ph->needs_swap)
230 		*addr = bswap_32(*addr);
231 	return 0;
232 }
233 
234 static int do_read_u64(struct feat_fd *ff, u64 *addr)
235 {
236 	int ret;
237 
238 	ret = __do_read(ff, addr, sizeof(*addr));
239 	if (ret)
240 		return ret;
241 
242 	if (ff->ph->needs_swap)
243 		*addr = bswap_64(*addr);
244 	return 0;
245 }
246 
247 static char *do_read_string(struct feat_fd *ff)
248 {
249 	u32 len;
250 	char *buf;
251 
252 	if (do_read_u32(ff, &len))
253 		return NULL;
254 
255 	buf = malloc(len);
256 	if (!buf)
257 		return NULL;
258 
259 	if (!__do_read(ff, buf, len)) {
260 		/*
261 		 * strings are padded by zeroes
262 		 * thus the actual strlen of buf
263 		 * may be less than len
264 		 */
265 		return buf;
266 	}
267 
268 	free(buf);
269 	return NULL;
270 }
271 
272 /* Return: 0 if succeded, -ERR if failed. */
273 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
274 {
275 	unsigned long *set;
276 	u64 size, *p;
277 	int i, ret;
278 
279 	ret = do_read_u64(ff, &size);
280 	if (ret)
281 		return ret;
282 
283 	set = bitmap_alloc(size);
284 	if (!set)
285 		return -ENOMEM;
286 
287 	p = (u64 *) set;
288 
289 	for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
290 		ret = do_read_u64(ff, p + i);
291 		if (ret < 0) {
292 			free(set);
293 			return ret;
294 		}
295 	}
296 
297 	*pset  = set;
298 	*psize = size;
299 	return 0;
300 }
301 
302 static int write_tracing_data(struct feat_fd *ff,
303 			      struct evlist *evlist)
304 {
305 	if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
306 		return -1;
307 
308 	return read_tracing_data(ff->fd, &evlist->core.entries);
309 }
310 
311 static int write_build_id(struct feat_fd *ff,
312 			  struct evlist *evlist __maybe_unused)
313 {
314 	struct perf_session *session;
315 	int err;
316 
317 	session = container_of(ff->ph, struct perf_session, header);
318 
319 	if (!perf_session__read_build_ids(session, true))
320 		return -1;
321 
322 	if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
323 		return -1;
324 
325 	err = perf_session__write_buildid_table(session, ff);
326 	if (err < 0) {
327 		pr_debug("failed to write buildid table\n");
328 		return err;
329 	}
330 	perf_session__cache_build_ids(session);
331 
332 	return 0;
333 }
334 
335 static int write_hostname(struct feat_fd *ff,
336 			  struct evlist *evlist __maybe_unused)
337 {
338 	struct utsname uts;
339 	int ret;
340 
341 	ret = uname(&uts);
342 	if (ret < 0)
343 		return -1;
344 
345 	return do_write_string(ff, uts.nodename);
346 }
347 
348 static int write_osrelease(struct feat_fd *ff,
349 			   struct evlist *evlist __maybe_unused)
350 {
351 	struct utsname uts;
352 	int ret;
353 
354 	ret = uname(&uts);
355 	if (ret < 0)
356 		return -1;
357 
358 	return do_write_string(ff, uts.release);
359 }
360 
361 static int write_arch(struct feat_fd *ff,
362 		      struct evlist *evlist __maybe_unused)
363 {
364 	struct utsname uts;
365 	int ret;
366 
367 	ret = uname(&uts);
368 	if (ret < 0)
369 		return -1;
370 
371 	return do_write_string(ff, uts.machine);
372 }
373 
374 static int write_version(struct feat_fd *ff,
375 			 struct evlist *evlist __maybe_unused)
376 {
377 	return do_write_string(ff, perf_version_string);
378 }
379 
380 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
381 {
382 	FILE *file;
383 	char *buf = NULL;
384 	char *s, *p;
385 	const char *search = cpuinfo_proc;
386 	size_t len = 0;
387 	int ret = -1;
388 
389 	if (!search)
390 		return -1;
391 
392 	file = fopen("/proc/cpuinfo", "r");
393 	if (!file)
394 		return -1;
395 
396 	while (getline(&buf, &len, file) > 0) {
397 		ret = strncmp(buf, search, strlen(search));
398 		if (!ret)
399 			break;
400 	}
401 
402 	if (ret) {
403 		ret = -1;
404 		goto done;
405 	}
406 
407 	s = buf;
408 
409 	p = strchr(buf, ':');
410 	if (p && *(p+1) == ' ' && *(p+2))
411 		s = p + 2;
412 	p = strchr(s, '\n');
413 	if (p)
414 		*p = '\0';
415 
416 	/* squash extra space characters (branding string) */
417 	p = s;
418 	while (*p) {
419 		if (isspace(*p)) {
420 			char *r = p + 1;
421 			char *q = skip_spaces(r);
422 			*p = ' ';
423 			if (q != (p+1))
424 				while ((*r++ = *q++));
425 		}
426 		p++;
427 	}
428 	ret = do_write_string(ff, s);
429 done:
430 	free(buf);
431 	fclose(file);
432 	return ret;
433 }
434 
435 static int write_cpudesc(struct feat_fd *ff,
436 		       struct evlist *evlist __maybe_unused)
437 {
438 #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
439 #define CPUINFO_PROC	{ "cpu", }
440 #elif defined(__s390__)
441 #define CPUINFO_PROC	{ "vendor_id", }
442 #elif defined(__sh__)
443 #define CPUINFO_PROC	{ "cpu type", }
444 #elif defined(__alpha__) || defined(__mips__)
445 #define CPUINFO_PROC	{ "cpu model", }
446 #elif defined(__arm__)
447 #define CPUINFO_PROC	{ "model name", "Processor", }
448 #elif defined(__arc__)
449 #define CPUINFO_PROC	{ "Processor", }
450 #elif defined(__xtensa__)
451 #define CPUINFO_PROC	{ "core ID", }
452 #else
453 #define CPUINFO_PROC	{ "model name", }
454 #endif
455 	const char *cpuinfo_procs[] = CPUINFO_PROC;
456 #undef CPUINFO_PROC
457 	unsigned int i;
458 
459 	for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
460 		int ret;
461 		ret = __write_cpudesc(ff, cpuinfo_procs[i]);
462 		if (ret >= 0)
463 			return ret;
464 	}
465 	return -1;
466 }
467 
468 
469 static int write_nrcpus(struct feat_fd *ff,
470 			struct evlist *evlist __maybe_unused)
471 {
472 	long nr;
473 	u32 nrc, nra;
474 	int ret;
475 
476 	nrc = cpu__max_present_cpu();
477 
478 	nr = sysconf(_SC_NPROCESSORS_ONLN);
479 	if (nr < 0)
480 		return -1;
481 
482 	nra = (u32)(nr & UINT_MAX);
483 
484 	ret = do_write(ff, &nrc, sizeof(nrc));
485 	if (ret < 0)
486 		return ret;
487 
488 	return do_write(ff, &nra, sizeof(nra));
489 }
490 
491 static int write_event_desc(struct feat_fd *ff,
492 			    struct evlist *evlist)
493 {
494 	struct evsel *evsel;
495 	u32 nre, nri, sz;
496 	int ret;
497 
498 	nre = evlist->core.nr_entries;
499 
500 	/*
501 	 * write number of events
502 	 */
503 	ret = do_write(ff, &nre, sizeof(nre));
504 	if (ret < 0)
505 		return ret;
506 
507 	/*
508 	 * size of perf_event_attr struct
509 	 */
510 	sz = (u32)sizeof(evsel->core.attr);
511 	ret = do_write(ff, &sz, sizeof(sz));
512 	if (ret < 0)
513 		return ret;
514 
515 	evlist__for_each_entry(evlist, evsel) {
516 		ret = do_write(ff, &evsel->core.attr, sz);
517 		if (ret < 0)
518 			return ret;
519 		/*
520 		 * write number of unique id per event
521 		 * there is one id per instance of an event
522 		 *
523 		 * copy into an nri to be independent of the
524 		 * type of ids,
525 		 */
526 		nri = evsel->ids;
527 		ret = do_write(ff, &nri, sizeof(nri));
528 		if (ret < 0)
529 			return ret;
530 
531 		/*
532 		 * write event string as passed on cmdline
533 		 */
534 		ret = do_write_string(ff, perf_evsel__name(evsel));
535 		if (ret < 0)
536 			return ret;
537 		/*
538 		 * write unique ids for this event
539 		 */
540 		ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64));
541 		if (ret < 0)
542 			return ret;
543 	}
544 	return 0;
545 }
546 
547 static int write_cmdline(struct feat_fd *ff,
548 			 struct evlist *evlist __maybe_unused)
549 {
550 	char pbuf[MAXPATHLEN], *buf;
551 	int i, ret, n;
552 
553 	/* actual path to perf binary */
554 	buf = perf_exe(pbuf, MAXPATHLEN);
555 
556 	/* account for binary path */
557 	n = perf_env.nr_cmdline + 1;
558 
559 	ret = do_write(ff, &n, sizeof(n));
560 	if (ret < 0)
561 		return ret;
562 
563 	ret = do_write_string(ff, buf);
564 	if (ret < 0)
565 		return ret;
566 
567 	for (i = 0 ; i < perf_env.nr_cmdline; i++) {
568 		ret = do_write_string(ff, perf_env.cmdline_argv[i]);
569 		if (ret < 0)
570 			return ret;
571 	}
572 	return 0;
573 }
574 
575 
576 static int write_cpu_topology(struct feat_fd *ff,
577 			      struct evlist *evlist __maybe_unused)
578 {
579 	struct cpu_topology *tp;
580 	u32 i;
581 	int ret, j;
582 
583 	tp = cpu_topology__new();
584 	if (!tp)
585 		return -1;
586 
587 	ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib));
588 	if (ret < 0)
589 		goto done;
590 
591 	for (i = 0; i < tp->core_sib; i++) {
592 		ret = do_write_string(ff, tp->core_siblings[i]);
593 		if (ret < 0)
594 			goto done;
595 	}
596 	ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib));
597 	if (ret < 0)
598 		goto done;
599 
600 	for (i = 0; i < tp->thread_sib; i++) {
601 		ret = do_write_string(ff, tp->thread_siblings[i]);
602 		if (ret < 0)
603 			break;
604 	}
605 
606 	ret = perf_env__read_cpu_topology_map(&perf_env);
607 	if (ret < 0)
608 		goto done;
609 
610 	for (j = 0; j < perf_env.nr_cpus_avail; j++) {
611 		ret = do_write(ff, &perf_env.cpu[j].core_id,
612 			       sizeof(perf_env.cpu[j].core_id));
613 		if (ret < 0)
614 			return ret;
615 		ret = do_write(ff, &perf_env.cpu[j].socket_id,
616 			       sizeof(perf_env.cpu[j].socket_id));
617 		if (ret < 0)
618 			return ret;
619 	}
620 
621 	if (!tp->die_sib)
622 		goto done;
623 
624 	ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib));
625 	if (ret < 0)
626 		goto done;
627 
628 	for (i = 0; i < tp->die_sib; i++) {
629 		ret = do_write_string(ff, tp->die_siblings[i]);
630 		if (ret < 0)
631 			goto done;
632 	}
633 
634 	for (j = 0; j < perf_env.nr_cpus_avail; j++) {
635 		ret = do_write(ff, &perf_env.cpu[j].die_id,
636 			       sizeof(perf_env.cpu[j].die_id));
637 		if (ret < 0)
638 			return ret;
639 	}
640 
641 done:
642 	cpu_topology__delete(tp);
643 	return ret;
644 }
645 
646 
647 
648 static int write_total_mem(struct feat_fd *ff,
649 			   struct evlist *evlist __maybe_unused)
650 {
651 	char *buf = NULL;
652 	FILE *fp;
653 	size_t len = 0;
654 	int ret = -1, n;
655 	uint64_t mem;
656 
657 	fp = fopen("/proc/meminfo", "r");
658 	if (!fp)
659 		return -1;
660 
661 	while (getline(&buf, &len, fp) > 0) {
662 		ret = strncmp(buf, "MemTotal:", 9);
663 		if (!ret)
664 			break;
665 	}
666 	if (!ret) {
667 		n = sscanf(buf, "%*s %"PRIu64, &mem);
668 		if (n == 1)
669 			ret = do_write(ff, &mem, sizeof(mem));
670 	} else
671 		ret = -1;
672 	free(buf);
673 	fclose(fp);
674 	return ret;
675 }
676 
677 static int write_numa_topology(struct feat_fd *ff,
678 			       struct evlist *evlist __maybe_unused)
679 {
680 	struct numa_topology *tp;
681 	int ret = -1;
682 	u32 i;
683 
684 	tp = numa_topology__new();
685 	if (!tp)
686 		return -ENOMEM;
687 
688 	ret = do_write(ff, &tp->nr, sizeof(u32));
689 	if (ret < 0)
690 		goto err;
691 
692 	for (i = 0; i < tp->nr; i++) {
693 		struct numa_topology_node *n = &tp->nodes[i];
694 
695 		ret = do_write(ff, &n->node, sizeof(u32));
696 		if (ret < 0)
697 			goto err;
698 
699 		ret = do_write(ff, &n->mem_total, sizeof(u64));
700 		if (ret)
701 			goto err;
702 
703 		ret = do_write(ff, &n->mem_free, sizeof(u64));
704 		if (ret)
705 			goto err;
706 
707 		ret = do_write_string(ff, n->cpus);
708 		if (ret < 0)
709 			goto err;
710 	}
711 
712 	ret = 0;
713 
714 err:
715 	numa_topology__delete(tp);
716 	return ret;
717 }
718 
719 /*
720  * File format:
721  *
722  * struct pmu_mappings {
723  *	u32	pmu_num;
724  *	struct pmu_map {
725  *		u32	type;
726  *		char	name[];
727  *	}[pmu_num];
728  * };
729  */
730 
731 static int write_pmu_mappings(struct feat_fd *ff,
732 			      struct evlist *evlist __maybe_unused)
733 {
734 	struct perf_pmu *pmu = NULL;
735 	u32 pmu_num = 0;
736 	int ret;
737 
738 	/*
739 	 * Do a first pass to count number of pmu to avoid lseek so this
740 	 * works in pipe mode as well.
741 	 */
742 	while ((pmu = perf_pmu__scan(pmu))) {
743 		if (!pmu->name)
744 			continue;
745 		pmu_num++;
746 	}
747 
748 	ret = do_write(ff, &pmu_num, sizeof(pmu_num));
749 	if (ret < 0)
750 		return ret;
751 
752 	while ((pmu = perf_pmu__scan(pmu))) {
753 		if (!pmu->name)
754 			continue;
755 
756 		ret = do_write(ff, &pmu->type, sizeof(pmu->type));
757 		if (ret < 0)
758 			return ret;
759 
760 		ret = do_write_string(ff, pmu->name);
761 		if (ret < 0)
762 			return ret;
763 	}
764 
765 	return 0;
766 }
767 
768 /*
769  * File format:
770  *
771  * struct group_descs {
772  *	u32	nr_groups;
773  *	struct group_desc {
774  *		char	name[];
775  *		u32	leader_idx;
776  *		u32	nr_members;
777  *	}[nr_groups];
778  * };
779  */
780 static int write_group_desc(struct feat_fd *ff,
781 			    struct evlist *evlist)
782 {
783 	u32 nr_groups = evlist->nr_groups;
784 	struct evsel *evsel;
785 	int ret;
786 
787 	ret = do_write(ff, &nr_groups, sizeof(nr_groups));
788 	if (ret < 0)
789 		return ret;
790 
791 	evlist__for_each_entry(evlist, evsel) {
792 		if (perf_evsel__is_group_leader(evsel) &&
793 		    evsel->core.nr_members > 1) {
794 			const char *name = evsel->group_name ?: "{anon_group}";
795 			u32 leader_idx = evsel->idx;
796 			u32 nr_members = evsel->core.nr_members;
797 
798 			ret = do_write_string(ff, name);
799 			if (ret < 0)
800 				return ret;
801 
802 			ret = do_write(ff, &leader_idx, sizeof(leader_idx));
803 			if (ret < 0)
804 				return ret;
805 
806 			ret = do_write(ff, &nr_members, sizeof(nr_members));
807 			if (ret < 0)
808 				return ret;
809 		}
810 	}
811 	return 0;
812 }
813 
814 /*
815  * Return the CPU id as a raw string.
816  *
817  * Each architecture should provide a more precise id string that
818  * can be use to match the architecture's "mapfile".
819  */
820 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
821 {
822 	return NULL;
823 }
824 
825 /* Return zero when the cpuid from the mapfile.csv matches the
826  * cpuid string generated on this platform.
827  * Otherwise return non-zero.
828  */
829 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
830 {
831 	regex_t re;
832 	regmatch_t pmatch[1];
833 	int match;
834 
835 	if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
836 		/* Warn unable to generate match particular string. */
837 		pr_info("Invalid regular expression %s\n", mapcpuid);
838 		return 1;
839 	}
840 
841 	match = !regexec(&re, cpuid, 1, pmatch, 0);
842 	regfree(&re);
843 	if (match) {
844 		size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
845 
846 		/* Verify the entire string matched. */
847 		if (match_len == strlen(cpuid))
848 			return 0;
849 	}
850 	return 1;
851 }
852 
853 /*
854  * default get_cpuid(): nothing gets recorded
855  * actual implementation must be in arch/$(SRCARCH)/util/header.c
856  */
857 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
858 {
859 	return -1;
860 }
861 
862 static int write_cpuid(struct feat_fd *ff,
863 		       struct evlist *evlist __maybe_unused)
864 {
865 	char buffer[64];
866 	int ret;
867 
868 	ret = get_cpuid(buffer, sizeof(buffer));
869 	if (ret)
870 		return -1;
871 
872 	return do_write_string(ff, buffer);
873 }
874 
875 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
876 			      struct evlist *evlist __maybe_unused)
877 {
878 	return 0;
879 }
880 
881 static int write_auxtrace(struct feat_fd *ff,
882 			  struct evlist *evlist __maybe_unused)
883 {
884 	struct perf_session *session;
885 	int err;
886 
887 	if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
888 		return -1;
889 
890 	session = container_of(ff->ph, struct perf_session, header);
891 
892 	err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
893 	if (err < 0)
894 		pr_err("Failed to write auxtrace index\n");
895 	return err;
896 }
897 
898 static int write_clockid(struct feat_fd *ff,
899 			 struct evlist *evlist __maybe_unused)
900 {
901 	return do_write(ff, &ff->ph->env.clockid_res_ns,
902 			sizeof(ff->ph->env.clockid_res_ns));
903 }
904 
905 static int write_dir_format(struct feat_fd *ff,
906 			    struct evlist *evlist __maybe_unused)
907 {
908 	struct perf_session *session;
909 	struct perf_data *data;
910 
911 	session = container_of(ff->ph, struct perf_session, header);
912 	data = session->data;
913 
914 	if (WARN_ON(!perf_data__is_dir(data)))
915 		return -1;
916 
917 	return do_write(ff, &data->dir.version, sizeof(data->dir.version));
918 }
919 
920 #ifdef HAVE_LIBBPF_SUPPORT
921 static int write_bpf_prog_info(struct feat_fd *ff,
922 			       struct evlist *evlist __maybe_unused)
923 {
924 	struct perf_env *env = &ff->ph->env;
925 	struct rb_root *root;
926 	struct rb_node *next;
927 	int ret;
928 
929 	down_read(&env->bpf_progs.lock);
930 
931 	ret = do_write(ff, &env->bpf_progs.infos_cnt,
932 		       sizeof(env->bpf_progs.infos_cnt));
933 	if (ret < 0)
934 		goto out;
935 
936 	root = &env->bpf_progs.infos;
937 	next = rb_first(root);
938 	while (next) {
939 		struct bpf_prog_info_node *node;
940 		size_t len;
941 
942 		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
943 		next = rb_next(&node->rb_node);
944 		len = sizeof(struct bpf_prog_info_linear) +
945 			node->info_linear->data_len;
946 
947 		/* before writing to file, translate address to offset */
948 		bpf_program__bpil_addr_to_offs(node->info_linear);
949 		ret = do_write(ff, node->info_linear, len);
950 		/*
951 		 * translate back to address even when do_write() fails,
952 		 * so that this function never changes the data.
953 		 */
954 		bpf_program__bpil_offs_to_addr(node->info_linear);
955 		if (ret < 0)
956 			goto out;
957 	}
958 out:
959 	up_read(&env->bpf_progs.lock);
960 	return ret;
961 }
962 #else // HAVE_LIBBPF_SUPPORT
963 static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused,
964 			       struct evlist *evlist __maybe_unused)
965 {
966 	return 0;
967 }
968 #endif // HAVE_LIBBPF_SUPPORT
969 
970 static int write_bpf_btf(struct feat_fd *ff,
971 			 struct evlist *evlist __maybe_unused)
972 {
973 	struct perf_env *env = &ff->ph->env;
974 	struct rb_root *root;
975 	struct rb_node *next;
976 	int ret;
977 
978 	down_read(&env->bpf_progs.lock);
979 
980 	ret = do_write(ff, &env->bpf_progs.btfs_cnt,
981 		       sizeof(env->bpf_progs.btfs_cnt));
982 
983 	if (ret < 0)
984 		goto out;
985 
986 	root = &env->bpf_progs.btfs;
987 	next = rb_first(root);
988 	while (next) {
989 		struct btf_node *node;
990 
991 		node = rb_entry(next, struct btf_node, rb_node);
992 		next = rb_next(&node->rb_node);
993 		ret = do_write(ff, &node->id,
994 			       sizeof(u32) * 2 + node->data_size);
995 		if (ret < 0)
996 			goto out;
997 	}
998 out:
999 	up_read(&env->bpf_progs.lock);
1000 	return ret;
1001 }
1002 
1003 static int cpu_cache_level__sort(const void *a, const void *b)
1004 {
1005 	struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1006 	struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1007 
1008 	return cache_a->level - cache_b->level;
1009 }
1010 
1011 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1012 {
1013 	if (a->level != b->level)
1014 		return false;
1015 
1016 	if (a->line_size != b->line_size)
1017 		return false;
1018 
1019 	if (a->sets != b->sets)
1020 		return false;
1021 
1022 	if (a->ways != b->ways)
1023 		return false;
1024 
1025 	if (strcmp(a->type, b->type))
1026 		return false;
1027 
1028 	if (strcmp(a->size, b->size))
1029 		return false;
1030 
1031 	if (strcmp(a->map, b->map))
1032 		return false;
1033 
1034 	return true;
1035 }
1036 
1037 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1038 {
1039 	char path[PATH_MAX], file[PATH_MAX];
1040 	struct stat st;
1041 	size_t len;
1042 
1043 	scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1044 	scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1045 
1046 	if (stat(file, &st))
1047 		return 1;
1048 
1049 	scnprintf(file, PATH_MAX, "%s/level", path);
1050 	if (sysfs__read_int(file, (int *) &cache->level))
1051 		return -1;
1052 
1053 	scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1054 	if (sysfs__read_int(file, (int *) &cache->line_size))
1055 		return -1;
1056 
1057 	scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1058 	if (sysfs__read_int(file, (int *) &cache->sets))
1059 		return -1;
1060 
1061 	scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1062 	if (sysfs__read_int(file, (int *) &cache->ways))
1063 		return -1;
1064 
1065 	scnprintf(file, PATH_MAX, "%s/type", path);
1066 	if (sysfs__read_str(file, &cache->type, &len))
1067 		return -1;
1068 
1069 	cache->type[len] = 0;
1070 	cache->type = strim(cache->type);
1071 
1072 	scnprintf(file, PATH_MAX, "%s/size", path);
1073 	if (sysfs__read_str(file, &cache->size, &len)) {
1074 		zfree(&cache->type);
1075 		return -1;
1076 	}
1077 
1078 	cache->size[len] = 0;
1079 	cache->size = strim(cache->size);
1080 
1081 	scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1082 	if (sysfs__read_str(file, &cache->map, &len)) {
1083 		zfree(&cache->map);
1084 		zfree(&cache->type);
1085 		return -1;
1086 	}
1087 
1088 	cache->map[len] = 0;
1089 	cache->map = strim(cache->map);
1090 	return 0;
1091 }
1092 
1093 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1094 {
1095 	fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1096 }
1097 
1098 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
1099 {
1100 	u32 i, cnt = 0;
1101 	long ncpus;
1102 	u32 nr, cpu;
1103 	u16 level;
1104 
1105 	ncpus = sysconf(_SC_NPROCESSORS_CONF);
1106 	if (ncpus < 0)
1107 		return -1;
1108 
1109 	nr = (u32)(ncpus & UINT_MAX);
1110 
1111 	for (cpu = 0; cpu < nr; cpu++) {
1112 		for (level = 0; level < 10; level++) {
1113 			struct cpu_cache_level c;
1114 			int err;
1115 
1116 			err = cpu_cache_level__read(&c, cpu, level);
1117 			if (err < 0)
1118 				return err;
1119 
1120 			if (err == 1)
1121 				break;
1122 
1123 			for (i = 0; i < cnt; i++) {
1124 				if (cpu_cache_level__cmp(&c, &caches[i]))
1125 					break;
1126 			}
1127 
1128 			if (i == cnt)
1129 				caches[cnt++] = c;
1130 			else
1131 				cpu_cache_level__free(&c);
1132 
1133 			if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1134 				goto out;
1135 		}
1136 	}
1137  out:
1138 	*cntp = cnt;
1139 	return 0;
1140 }
1141 
1142 #define MAX_CACHE_LVL 4
1143 
1144 static int write_cache(struct feat_fd *ff,
1145 		       struct evlist *evlist __maybe_unused)
1146 {
1147 	u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL;
1148 	struct cpu_cache_level caches[max_caches];
1149 	u32 cnt = 0, i, version = 1;
1150 	int ret;
1151 
1152 	ret = build_caches(caches, max_caches, &cnt);
1153 	if (ret)
1154 		goto out;
1155 
1156 	qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1157 
1158 	ret = do_write(ff, &version, sizeof(u32));
1159 	if (ret < 0)
1160 		goto out;
1161 
1162 	ret = do_write(ff, &cnt, sizeof(u32));
1163 	if (ret < 0)
1164 		goto out;
1165 
1166 	for (i = 0; i < cnt; i++) {
1167 		struct cpu_cache_level *c = &caches[i];
1168 
1169 		#define _W(v)					\
1170 			ret = do_write(ff, &c->v, sizeof(u32));	\
1171 			if (ret < 0)				\
1172 				goto out;
1173 
1174 		_W(level)
1175 		_W(line_size)
1176 		_W(sets)
1177 		_W(ways)
1178 		#undef _W
1179 
1180 		#define _W(v)						\
1181 			ret = do_write_string(ff, (const char *) c->v);	\
1182 			if (ret < 0)					\
1183 				goto out;
1184 
1185 		_W(type)
1186 		_W(size)
1187 		_W(map)
1188 		#undef _W
1189 	}
1190 
1191 out:
1192 	for (i = 0; i < cnt; i++)
1193 		cpu_cache_level__free(&caches[i]);
1194 	return ret;
1195 }
1196 
1197 static int write_stat(struct feat_fd *ff __maybe_unused,
1198 		      struct evlist *evlist __maybe_unused)
1199 {
1200 	return 0;
1201 }
1202 
1203 static int write_sample_time(struct feat_fd *ff,
1204 			     struct evlist *evlist)
1205 {
1206 	int ret;
1207 
1208 	ret = do_write(ff, &evlist->first_sample_time,
1209 		       sizeof(evlist->first_sample_time));
1210 	if (ret < 0)
1211 		return ret;
1212 
1213 	return do_write(ff, &evlist->last_sample_time,
1214 			sizeof(evlist->last_sample_time));
1215 }
1216 
1217 
1218 static int memory_node__read(struct memory_node *n, unsigned long idx)
1219 {
1220 	unsigned int phys, size = 0;
1221 	char path[PATH_MAX];
1222 	struct dirent *ent;
1223 	DIR *dir;
1224 
1225 #define for_each_memory(mem, dir)					\
1226 	while ((ent = readdir(dir)))					\
1227 		if (strcmp(ent->d_name, ".") &&				\
1228 		    strcmp(ent->d_name, "..") &&			\
1229 		    sscanf(ent->d_name, "memory%u", &mem) == 1)
1230 
1231 	scnprintf(path, PATH_MAX,
1232 		  "%s/devices/system/node/node%lu",
1233 		  sysfs__mountpoint(), idx);
1234 
1235 	dir = opendir(path);
1236 	if (!dir) {
1237 		pr_warning("failed: cant' open memory sysfs data\n");
1238 		return -1;
1239 	}
1240 
1241 	for_each_memory(phys, dir) {
1242 		size = max(phys, size);
1243 	}
1244 
1245 	size++;
1246 
1247 	n->set = bitmap_alloc(size);
1248 	if (!n->set) {
1249 		closedir(dir);
1250 		return -ENOMEM;
1251 	}
1252 
1253 	n->node = idx;
1254 	n->size = size;
1255 
1256 	rewinddir(dir);
1257 
1258 	for_each_memory(phys, dir) {
1259 		set_bit(phys, n->set);
1260 	}
1261 
1262 	closedir(dir);
1263 	return 0;
1264 }
1265 
1266 static int memory_node__sort(const void *a, const void *b)
1267 {
1268 	const struct memory_node *na = a;
1269 	const struct memory_node *nb = b;
1270 
1271 	return na->node - nb->node;
1272 }
1273 
1274 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
1275 {
1276 	char path[PATH_MAX];
1277 	struct dirent *ent;
1278 	DIR *dir;
1279 	u64 cnt = 0;
1280 	int ret = 0;
1281 
1282 	scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1283 		  sysfs__mountpoint());
1284 
1285 	dir = opendir(path);
1286 	if (!dir) {
1287 		pr_debug2("%s: could't read %s, does this arch have topology information?\n",
1288 			  __func__, path);
1289 		return -1;
1290 	}
1291 
1292 	while (!ret && (ent = readdir(dir))) {
1293 		unsigned int idx;
1294 		int r;
1295 
1296 		if (!strcmp(ent->d_name, ".") ||
1297 		    !strcmp(ent->d_name, ".."))
1298 			continue;
1299 
1300 		r = sscanf(ent->d_name, "node%u", &idx);
1301 		if (r != 1)
1302 			continue;
1303 
1304 		if (WARN_ONCE(cnt >= size,
1305 			      "failed to write MEM_TOPOLOGY, way too many nodes\n"))
1306 			return -1;
1307 
1308 		ret = memory_node__read(&nodes[cnt++], idx);
1309 	}
1310 
1311 	*cntp = cnt;
1312 	closedir(dir);
1313 
1314 	if (!ret)
1315 		qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1316 
1317 	return ret;
1318 }
1319 
1320 #define MAX_MEMORY_NODES 2000
1321 
1322 /*
1323  * The MEM_TOPOLOGY holds physical memory map for every
1324  * node in system. The format of data is as follows:
1325  *
1326  *  0 - version          | for future changes
1327  *  8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1328  * 16 - count            | number of nodes
1329  *
1330  * For each node we store map of physical indexes for
1331  * each node:
1332  *
1333  * 32 - node id          | node index
1334  * 40 - size             | size of bitmap
1335  * 48 - bitmap           | bitmap of memory indexes that belongs to node
1336  */
1337 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1338 			      struct evlist *evlist __maybe_unused)
1339 {
1340 	static struct memory_node nodes[MAX_MEMORY_NODES];
1341 	u64 bsize, version = 1, i, nr;
1342 	int ret;
1343 
1344 	ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1345 			      (unsigned long long *) &bsize);
1346 	if (ret)
1347 		return ret;
1348 
1349 	ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr);
1350 	if (ret)
1351 		return ret;
1352 
1353 	ret = do_write(ff, &version, sizeof(version));
1354 	if (ret < 0)
1355 		goto out;
1356 
1357 	ret = do_write(ff, &bsize, sizeof(bsize));
1358 	if (ret < 0)
1359 		goto out;
1360 
1361 	ret = do_write(ff, &nr, sizeof(nr));
1362 	if (ret < 0)
1363 		goto out;
1364 
1365 	for (i = 0; i < nr; i++) {
1366 		struct memory_node *n = &nodes[i];
1367 
1368 		#define _W(v)						\
1369 			ret = do_write(ff, &n->v, sizeof(n->v));	\
1370 			if (ret < 0)					\
1371 				goto out;
1372 
1373 		_W(node)
1374 		_W(size)
1375 
1376 		#undef _W
1377 
1378 		ret = do_write_bitmap(ff, n->set, n->size);
1379 		if (ret < 0)
1380 			goto out;
1381 	}
1382 
1383 out:
1384 	return ret;
1385 }
1386 
1387 static int write_compressed(struct feat_fd *ff __maybe_unused,
1388 			    struct evlist *evlist __maybe_unused)
1389 {
1390 	int ret;
1391 
1392 	ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1393 	if (ret)
1394 		return ret;
1395 
1396 	ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1397 	if (ret)
1398 		return ret;
1399 
1400 	ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1401 	if (ret)
1402 		return ret;
1403 
1404 	ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1405 	if (ret)
1406 		return ret;
1407 
1408 	return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1409 }
1410 
1411 static void print_hostname(struct feat_fd *ff, FILE *fp)
1412 {
1413 	fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1414 }
1415 
1416 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1417 {
1418 	fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1419 }
1420 
1421 static void print_arch(struct feat_fd *ff, FILE *fp)
1422 {
1423 	fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1424 }
1425 
1426 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1427 {
1428 	fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1429 }
1430 
1431 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1432 {
1433 	fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1434 	fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1435 }
1436 
1437 static void print_version(struct feat_fd *ff, FILE *fp)
1438 {
1439 	fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1440 }
1441 
1442 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1443 {
1444 	int nr, i;
1445 
1446 	nr = ff->ph->env.nr_cmdline;
1447 
1448 	fprintf(fp, "# cmdline : ");
1449 
1450 	for (i = 0; i < nr; i++) {
1451 		char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1452 		if (!argv_i) {
1453 			fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1454 		} else {
1455 			char *mem = argv_i;
1456 			do {
1457 				char *quote = strchr(argv_i, '\'');
1458 				if (!quote)
1459 					break;
1460 				*quote++ = '\0';
1461 				fprintf(fp, "%s\\\'", argv_i);
1462 				argv_i = quote;
1463 			} while (1);
1464 			fprintf(fp, "%s ", argv_i);
1465 			free(mem);
1466 		}
1467 	}
1468 	fputc('\n', fp);
1469 }
1470 
1471 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1472 {
1473 	struct perf_header *ph = ff->ph;
1474 	int cpu_nr = ph->env.nr_cpus_avail;
1475 	int nr, i;
1476 	char *str;
1477 
1478 	nr = ph->env.nr_sibling_cores;
1479 	str = ph->env.sibling_cores;
1480 
1481 	for (i = 0; i < nr; i++) {
1482 		fprintf(fp, "# sibling sockets : %s\n", str);
1483 		str += strlen(str) + 1;
1484 	}
1485 
1486 	if (ph->env.nr_sibling_dies) {
1487 		nr = ph->env.nr_sibling_dies;
1488 		str = ph->env.sibling_dies;
1489 
1490 		for (i = 0; i < nr; i++) {
1491 			fprintf(fp, "# sibling dies    : %s\n", str);
1492 			str += strlen(str) + 1;
1493 		}
1494 	}
1495 
1496 	nr = ph->env.nr_sibling_threads;
1497 	str = ph->env.sibling_threads;
1498 
1499 	for (i = 0; i < nr; i++) {
1500 		fprintf(fp, "# sibling threads : %s\n", str);
1501 		str += strlen(str) + 1;
1502 	}
1503 
1504 	if (ph->env.nr_sibling_dies) {
1505 		if (ph->env.cpu != NULL) {
1506 			for (i = 0; i < cpu_nr; i++)
1507 				fprintf(fp, "# CPU %d: Core ID %d, "
1508 					    "Die ID %d, Socket ID %d\n",
1509 					    i, ph->env.cpu[i].core_id,
1510 					    ph->env.cpu[i].die_id,
1511 					    ph->env.cpu[i].socket_id);
1512 		} else
1513 			fprintf(fp, "# Core ID, Die ID and Socket ID "
1514 				    "information is not available\n");
1515 	} else {
1516 		if (ph->env.cpu != NULL) {
1517 			for (i = 0; i < cpu_nr; i++)
1518 				fprintf(fp, "# CPU %d: Core ID %d, "
1519 					    "Socket ID %d\n",
1520 					    i, ph->env.cpu[i].core_id,
1521 					    ph->env.cpu[i].socket_id);
1522 		} else
1523 			fprintf(fp, "# Core ID and Socket ID "
1524 				    "information is not available\n");
1525 	}
1526 }
1527 
1528 static void print_clockid(struct feat_fd *ff, FILE *fp)
1529 {
1530 	fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1531 		ff->ph->env.clockid_res_ns * 1000);
1532 }
1533 
1534 static void print_dir_format(struct feat_fd *ff, FILE *fp)
1535 {
1536 	struct perf_session *session;
1537 	struct perf_data *data;
1538 
1539 	session = container_of(ff->ph, struct perf_session, header);
1540 	data = session->data;
1541 
1542 	fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1543 }
1544 
1545 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1546 {
1547 	struct perf_env *env = &ff->ph->env;
1548 	struct rb_root *root;
1549 	struct rb_node *next;
1550 
1551 	down_read(&env->bpf_progs.lock);
1552 
1553 	root = &env->bpf_progs.infos;
1554 	next = rb_first(root);
1555 
1556 	while (next) {
1557 		struct bpf_prog_info_node *node;
1558 
1559 		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1560 		next = rb_next(&node->rb_node);
1561 
1562 		bpf_event__print_bpf_prog_info(&node->info_linear->info,
1563 					       env, fp);
1564 	}
1565 
1566 	up_read(&env->bpf_progs.lock);
1567 }
1568 
1569 static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
1570 {
1571 	struct perf_env *env = &ff->ph->env;
1572 	struct rb_root *root;
1573 	struct rb_node *next;
1574 
1575 	down_read(&env->bpf_progs.lock);
1576 
1577 	root = &env->bpf_progs.btfs;
1578 	next = rb_first(root);
1579 
1580 	while (next) {
1581 		struct btf_node *node;
1582 
1583 		node = rb_entry(next, struct btf_node, rb_node);
1584 		next = rb_next(&node->rb_node);
1585 		fprintf(fp, "# btf info of id %u\n", node->id);
1586 	}
1587 
1588 	up_read(&env->bpf_progs.lock);
1589 }
1590 
1591 static void free_event_desc(struct evsel *events)
1592 {
1593 	struct evsel *evsel;
1594 
1595 	if (!events)
1596 		return;
1597 
1598 	for (evsel = events; evsel->core.attr.size; evsel++) {
1599 		zfree(&evsel->name);
1600 		zfree(&evsel->id);
1601 	}
1602 
1603 	free(events);
1604 }
1605 
1606 static struct evsel *read_event_desc(struct feat_fd *ff)
1607 {
1608 	struct evsel *evsel, *events = NULL;
1609 	u64 *id;
1610 	void *buf = NULL;
1611 	u32 nre, sz, nr, i, j;
1612 	size_t msz;
1613 
1614 	/* number of events */
1615 	if (do_read_u32(ff, &nre))
1616 		goto error;
1617 
1618 	if (do_read_u32(ff, &sz))
1619 		goto error;
1620 
1621 	/* buffer to hold on file attr struct */
1622 	buf = malloc(sz);
1623 	if (!buf)
1624 		goto error;
1625 
1626 	/* the last event terminates with evsel->core.attr.size == 0: */
1627 	events = calloc(nre + 1, sizeof(*events));
1628 	if (!events)
1629 		goto error;
1630 
1631 	msz = sizeof(evsel->core.attr);
1632 	if (sz < msz)
1633 		msz = sz;
1634 
1635 	for (i = 0, evsel = events; i < nre; evsel++, i++) {
1636 		evsel->idx = i;
1637 
1638 		/*
1639 		 * must read entire on-file attr struct to
1640 		 * sync up with layout.
1641 		 */
1642 		if (__do_read(ff, buf, sz))
1643 			goto error;
1644 
1645 		if (ff->ph->needs_swap)
1646 			perf_event__attr_swap(buf);
1647 
1648 		memcpy(&evsel->core.attr, buf, msz);
1649 
1650 		if (do_read_u32(ff, &nr))
1651 			goto error;
1652 
1653 		if (ff->ph->needs_swap)
1654 			evsel->needs_swap = true;
1655 
1656 		evsel->name = do_read_string(ff);
1657 		if (!evsel->name)
1658 			goto error;
1659 
1660 		if (!nr)
1661 			continue;
1662 
1663 		id = calloc(nr, sizeof(*id));
1664 		if (!id)
1665 			goto error;
1666 		evsel->ids = nr;
1667 		evsel->id = id;
1668 
1669 		for (j = 0 ; j < nr; j++) {
1670 			if (do_read_u64(ff, id))
1671 				goto error;
1672 			id++;
1673 		}
1674 	}
1675 out:
1676 	free(buf);
1677 	return events;
1678 error:
1679 	free_event_desc(events);
1680 	events = NULL;
1681 	goto out;
1682 }
1683 
1684 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1685 				void *priv __maybe_unused)
1686 {
1687 	return fprintf(fp, ", %s = %s", name, val);
1688 }
1689 
1690 static void print_event_desc(struct feat_fd *ff, FILE *fp)
1691 {
1692 	struct evsel *evsel, *events;
1693 	u32 j;
1694 	u64 *id;
1695 
1696 	if (ff->events)
1697 		events = ff->events;
1698 	else
1699 		events = read_event_desc(ff);
1700 
1701 	if (!events) {
1702 		fprintf(fp, "# event desc: not available or unable to read\n");
1703 		return;
1704 	}
1705 
1706 	for (evsel = events; evsel->core.attr.size; evsel++) {
1707 		fprintf(fp, "# event : name = %s, ", evsel->name);
1708 
1709 		if (evsel->ids) {
1710 			fprintf(fp, ", id = {");
1711 			for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1712 				if (j)
1713 					fputc(',', fp);
1714 				fprintf(fp, " %"PRIu64, *id);
1715 			}
1716 			fprintf(fp, " }");
1717 		}
1718 
1719 		perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
1720 
1721 		fputc('\n', fp);
1722 	}
1723 
1724 	free_event_desc(events);
1725 	ff->events = NULL;
1726 }
1727 
1728 static void print_total_mem(struct feat_fd *ff, FILE *fp)
1729 {
1730 	fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
1731 }
1732 
1733 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
1734 {
1735 	int i;
1736 	struct numa_node *n;
1737 
1738 	for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
1739 		n = &ff->ph->env.numa_nodes[i];
1740 
1741 		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1742 			    " free = %"PRIu64" kB\n",
1743 			n->node, n->mem_total, n->mem_free);
1744 
1745 		fprintf(fp, "# node%u cpu list : ", n->node);
1746 		cpu_map__fprintf(n->map, fp);
1747 	}
1748 }
1749 
1750 static void print_cpuid(struct feat_fd *ff, FILE *fp)
1751 {
1752 	fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
1753 }
1754 
1755 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
1756 {
1757 	fprintf(fp, "# contains samples with branch stack\n");
1758 }
1759 
1760 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
1761 {
1762 	fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1763 }
1764 
1765 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
1766 {
1767 	fprintf(fp, "# contains stat data\n");
1768 }
1769 
1770 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
1771 {
1772 	int i;
1773 
1774 	fprintf(fp, "# CPU cache info:\n");
1775 	for (i = 0; i < ff->ph->env.caches_cnt; i++) {
1776 		fprintf(fp, "#  ");
1777 		cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
1778 	}
1779 }
1780 
1781 static void print_compressed(struct feat_fd *ff, FILE *fp)
1782 {
1783 	fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
1784 		ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
1785 		ff->ph->env.comp_level, ff->ph->env.comp_ratio);
1786 }
1787 
1788 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
1789 {
1790 	const char *delimiter = "# pmu mappings: ";
1791 	char *str, *tmp;
1792 	u32 pmu_num;
1793 	u32 type;
1794 
1795 	pmu_num = ff->ph->env.nr_pmu_mappings;
1796 	if (!pmu_num) {
1797 		fprintf(fp, "# pmu mappings: not available\n");
1798 		return;
1799 	}
1800 
1801 	str = ff->ph->env.pmu_mappings;
1802 
1803 	while (pmu_num) {
1804 		type = strtoul(str, &tmp, 0);
1805 		if (*tmp != ':')
1806 			goto error;
1807 
1808 		str = tmp + 1;
1809 		fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1810 
1811 		delimiter = ", ";
1812 		str += strlen(str) + 1;
1813 		pmu_num--;
1814 	}
1815 
1816 	fprintf(fp, "\n");
1817 
1818 	if (!pmu_num)
1819 		return;
1820 error:
1821 	fprintf(fp, "# pmu mappings: unable to read\n");
1822 }
1823 
1824 static void print_group_desc(struct feat_fd *ff, FILE *fp)
1825 {
1826 	struct perf_session *session;
1827 	struct evsel *evsel;
1828 	u32 nr = 0;
1829 
1830 	session = container_of(ff->ph, struct perf_session, header);
1831 
1832 	evlist__for_each_entry(session->evlist, evsel) {
1833 		if (perf_evsel__is_group_leader(evsel) &&
1834 		    evsel->core.nr_members > 1) {
1835 			fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1836 				perf_evsel__name(evsel));
1837 
1838 			nr = evsel->core.nr_members - 1;
1839 		} else if (nr) {
1840 			fprintf(fp, ",%s", perf_evsel__name(evsel));
1841 
1842 			if (--nr == 0)
1843 				fprintf(fp, "}\n");
1844 		}
1845 	}
1846 }
1847 
1848 static void print_sample_time(struct feat_fd *ff, FILE *fp)
1849 {
1850 	struct perf_session *session;
1851 	char time_buf[32];
1852 	double d;
1853 
1854 	session = container_of(ff->ph, struct perf_session, header);
1855 
1856 	timestamp__scnprintf_usec(session->evlist->first_sample_time,
1857 				  time_buf, sizeof(time_buf));
1858 	fprintf(fp, "# time of first sample : %s\n", time_buf);
1859 
1860 	timestamp__scnprintf_usec(session->evlist->last_sample_time,
1861 				  time_buf, sizeof(time_buf));
1862 	fprintf(fp, "# time of last sample : %s\n", time_buf);
1863 
1864 	d = (double)(session->evlist->last_sample_time -
1865 		session->evlist->first_sample_time) / NSEC_PER_MSEC;
1866 
1867 	fprintf(fp, "# sample duration : %10.3f ms\n", d);
1868 }
1869 
1870 static void memory_node__fprintf(struct memory_node *n,
1871 				 unsigned long long bsize, FILE *fp)
1872 {
1873 	char buf_map[100], buf_size[50];
1874 	unsigned long long size;
1875 
1876 	size = bsize * bitmap_weight(n->set, n->size);
1877 	unit_number__scnprintf(buf_size, 50, size);
1878 
1879 	bitmap_scnprintf(n->set, n->size, buf_map, 100);
1880 	fprintf(fp, "#  %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
1881 }
1882 
1883 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
1884 {
1885 	struct memory_node *nodes;
1886 	int i, nr;
1887 
1888 	nodes = ff->ph->env.memory_nodes;
1889 	nr    = ff->ph->env.nr_memory_nodes;
1890 
1891 	fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
1892 		nr, ff->ph->env.memory_bsize);
1893 
1894 	for (i = 0; i < nr; i++) {
1895 		memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp);
1896 	}
1897 }
1898 
1899 static int __event_process_build_id(struct perf_record_header_build_id *bev,
1900 				    char *filename,
1901 				    struct perf_session *session)
1902 {
1903 	int err = -1;
1904 	struct machine *machine;
1905 	u16 cpumode;
1906 	struct dso *dso;
1907 	enum dso_kernel_type dso_type;
1908 
1909 	machine = perf_session__findnew_machine(session, bev->pid);
1910 	if (!machine)
1911 		goto out;
1912 
1913 	cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1914 
1915 	switch (cpumode) {
1916 	case PERF_RECORD_MISC_KERNEL:
1917 		dso_type = DSO_TYPE_KERNEL;
1918 		break;
1919 	case PERF_RECORD_MISC_GUEST_KERNEL:
1920 		dso_type = DSO_TYPE_GUEST_KERNEL;
1921 		break;
1922 	case PERF_RECORD_MISC_USER:
1923 	case PERF_RECORD_MISC_GUEST_USER:
1924 		dso_type = DSO_TYPE_USER;
1925 		break;
1926 	default:
1927 		goto out;
1928 	}
1929 
1930 	dso = machine__findnew_dso(machine, filename);
1931 	if (dso != NULL) {
1932 		char sbuild_id[SBUILD_ID_SIZE];
1933 
1934 		dso__set_build_id(dso, &bev->build_id);
1935 
1936 		if (dso_type != DSO_TYPE_USER) {
1937 			struct kmod_path m = { .name = NULL, };
1938 
1939 			if (!kmod_path__parse_name(&m, filename) && m.kmod)
1940 				dso__set_module_info(dso, &m, machine);
1941 			else
1942 				dso->kernel = dso_type;
1943 
1944 			free(m.name);
1945 		}
1946 
1947 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1948 				  sbuild_id);
1949 		pr_debug("build id event received for %s: %s\n",
1950 			 dso->long_name, sbuild_id);
1951 		dso__put(dso);
1952 	}
1953 
1954 	err = 0;
1955 out:
1956 	return err;
1957 }
1958 
1959 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1960 						 int input, u64 offset, u64 size)
1961 {
1962 	struct perf_session *session = container_of(header, struct perf_session, header);
1963 	struct {
1964 		struct perf_event_header   header;
1965 		u8			   build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1966 		char			   filename[0];
1967 	} old_bev;
1968 	struct perf_record_header_build_id bev;
1969 	char filename[PATH_MAX];
1970 	u64 limit = offset + size;
1971 
1972 	while (offset < limit) {
1973 		ssize_t len;
1974 
1975 		if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1976 			return -1;
1977 
1978 		if (header->needs_swap)
1979 			perf_event_header__bswap(&old_bev.header);
1980 
1981 		len = old_bev.header.size - sizeof(old_bev);
1982 		if (readn(input, filename, len) != len)
1983 			return -1;
1984 
1985 		bev.header = old_bev.header;
1986 
1987 		/*
1988 		 * As the pid is the missing value, we need to fill
1989 		 * it properly. The header.misc value give us nice hint.
1990 		 */
1991 		bev.pid	= HOST_KERNEL_ID;
1992 		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1993 		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1994 			bev.pid	= DEFAULT_GUEST_KERNEL_ID;
1995 
1996 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1997 		__event_process_build_id(&bev, filename, session);
1998 
1999 		offset += bev.header.size;
2000 	}
2001 
2002 	return 0;
2003 }
2004 
2005 static int perf_header__read_build_ids(struct perf_header *header,
2006 				       int input, u64 offset, u64 size)
2007 {
2008 	struct perf_session *session = container_of(header, struct perf_session, header);
2009 	struct perf_record_header_build_id bev;
2010 	char filename[PATH_MAX];
2011 	u64 limit = offset + size, orig_offset = offset;
2012 	int err = -1;
2013 
2014 	while (offset < limit) {
2015 		ssize_t len;
2016 
2017 		if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
2018 			goto out;
2019 
2020 		if (header->needs_swap)
2021 			perf_event_header__bswap(&bev.header);
2022 
2023 		len = bev.header.size - sizeof(bev);
2024 		if (readn(input, filename, len) != len)
2025 			goto out;
2026 		/*
2027 		 * The a1645ce1 changeset:
2028 		 *
2029 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2030 		 *
2031 		 * Added a field to struct perf_record_header_build_id that broke the file
2032 		 * format.
2033 		 *
2034 		 * Since the kernel build-id is the first entry, process the
2035 		 * table using the old format if the well known
2036 		 * '[kernel.kallsyms]' string for the kernel build-id has the
2037 		 * first 4 characters chopped off (where the pid_t sits).
2038 		 */
2039 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2040 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2041 				return -1;
2042 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2043 		}
2044 
2045 		__event_process_build_id(&bev, filename, session);
2046 
2047 		offset += bev.header.size;
2048 	}
2049 	err = 0;
2050 out:
2051 	return err;
2052 }
2053 
2054 /* Macro for features that simply need to read and store a string. */
2055 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2056 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2057 {\
2058 	ff->ph->env.__feat_env = do_read_string(ff); \
2059 	return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2060 }
2061 
2062 FEAT_PROCESS_STR_FUN(hostname, hostname);
2063 FEAT_PROCESS_STR_FUN(osrelease, os_release);
2064 FEAT_PROCESS_STR_FUN(version, version);
2065 FEAT_PROCESS_STR_FUN(arch, arch);
2066 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2067 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2068 
2069 static int process_tracing_data(struct feat_fd *ff, void *data)
2070 {
2071 	ssize_t ret = trace_report(ff->fd, data, false);
2072 
2073 	return ret < 0 ? -1 : 0;
2074 }
2075 
2076 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2077 {
2078 	if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2079 		pr_debug("Failed to read buildids, continuing...\n");
2080 	return 0;
2081 }
2082 
2083 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2084 {
2085 	int ret;
2086 	u32 nr_cpus_avail, nr_cpus_online;
2087 
2088 	ret = do_read_u32(ff, &nr_cpus_avail);
2089 	if (ret)
2090 		return ret;
2091 
2092 	ret = do_read_u32(ff, &nr_cpus_online);
2093 	if (ret)
2094 		return ret;
2095 	ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail;
2096 	ff->ph->env.nr_cpus_online = (int)nr_cpus_online;
2097 	return 0;
2098 }
2099 
2100 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2101 {
2102 	u64 total_mem;
2103 	int ret;
2104 
2105 	ret = do_read_u64(ff, &total_mem);
2106 	if (ret)
2107 		return -1;
2108 	ff->ph->env.total_mem = (unsigned long long)total_mem;
2109 	return 0;
2110 }
2111 
2112 static struct evsel *
2113 perf_evlist__find_by_index(struct evlist *evlist, int idx)
2114 {
2115 	struct evsel *evsel;
2116 
2117 	evlist__for_each_entry(evlist, evsel) {
2118 		if (evsel->idx == idx)
2119 			return evsel;
2120 	}
2121 
2122 	return NULL;
2123 }
2124 
2125 static void
2126 perf_evlist__set_event_name(struct evlist *evlist,
2127 			    struct evsel *event)
2128 {
2129 	struct evsel *evsel;
2130 
2131 	if (!event->name)
2132 		return;
2133 
2134 	evsel = perf_evlist__find_by_index(evlist, event->idx);
2135 	if (!evsel)
2136 		return;
2137 
2138 	if (evsel->name)
2139 		return;
2140 
2141 	evsel->name = strdup(event->name);
2142 }
2143 
2144 static int
2145 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2146 {
2147 	struct perf_session *session;
2148 	struct evsel *evsel, *events = read_event_desc(ff);
2149 
2150 	if (!events)
2151 		return 0;
2152 
2153 	session = container_of(ff->ph, struct perf_session, header);
2154 
2155 	if (session->data->is_pipe) {
2156 		/* Save events for reading later by print_event_desc,
2157 		 * since they can't be read again in pipe mode. */
2158 		ff->events = events;
2159 	}
2160 
2161 	for (evsel = events; evsel->core.attr.size; evsel++)
2162 		perf_evlist__set_event_name(session->evlist, evsel);
2163 
2164 	if (!session->data->is_pipe)
2165 		free_event_desc(events);
2166 
2167 	return 0;
2168 }
2169 
2170 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2171 {
2172 	char *str, *cmdline = NULL, **argv = NULL;
2173 	u32 nr, i, len = 0;
2174 
2175 	if (do_read_u32(ff, &nr))
2176 		return -1;
2177 
2178 	ff->ph->env.nr_cmdline = nr;
2179 
2180 	cmdline = zalloc(ff->size + nr + 1);
2181 	if (!cmdline)
2182 		return -1;
2183 
2184 	argv = zalloc(sizeof(char *) * (nr + 1));
2185 	if (!argv)
2186 		goto error;
2187 
2188 	for (i = 0; i < nr; i++) {
2189 		str = do_read_string(ff);
2190 		if (!str)
2191 			goto error;
2192 
2193 		argv[i] = cmdline + len;
2194 		memcpy(argv[i], str, strlen(str) + 1);
2195 		len += strlen(str) + 1;
2196 		free(str);
2197 	}
2198 	ff->ph->env.cmdline = cmdline;
2199 	ff->ph->env.cmdline_argv = (const char **) argv;
2200 	return 0;
2201 
2202 error:
2203 	free(argv);
2204 	free(cmdline);
2205 	return -1;
2206 }
2207 
2208 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2209 {
2210 	u32 nr, i;
2211 	char *str;
2212 	struct strbuf sb;
2213 	int cpu_nr = ff->ph->env.nr_cpus_avail;
2214 	u64 size = 0;
2215 	struct perf_header *ph = ff->ph;
2216 	bool do_core_id_test = true;
2217 
2218 	ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
2219 	if (!ph->env.cpu)
2220 		return -1;
2221 
2222 	if (do_read_u32(ff, &nr))
2223 		goto free_cpu;
2224 
2225 	ph->env.nr_sibling_cores = nr;
2226 	size += sizeof(u32);
2227 	if (strbuf_init(&sb, 128) < 0)
2228 		goto free_cpu;
2229 
2230 	for (i = 0; i < nr; i++) {
2231 		str = do_read_string(ff);
2232 		if (!str)
2233 			goto error;
2234 
2235 		/* include a NULL character at the end */
2236 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2237 			goto error;
2238 		size += string_size(str);
2239 		free(str);
2240 	}
2241 	ph->env.sibling_cores = strbuf_detach(&sb, NULL);
2242 
2243 	if (do_read_u32(ff, &nr))
2244 		return -1;
2245 
2246 	ph->env.nr_sibling_threads = nr;
2247 	size += sizeof(u32);
2248 
2249 	for (i = 0; i < nr; i++) {
2250 		str = do_read_string(ff);
2251 		if (!str)
2252 			goto error;
2253 
2254 		/* include a NULL character at the end */
2255 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2256 			goto error;
2257 		size += string_size(str);
2258 		free(str);
2259 	}
2260 	ph->env.sibling_threads = strbuf_detach(&sb, NULL);
2261 
2262 	/*
2263 	 * The header may be from old perf,
2264 	 * which doesn't include core id and socket id information.
2265 	 */
2266 	if (ff->size <= size) {
2267 		zfree(&ph->env.cpu);
2268 		return 0;
2269 	}
2270 
2271 	/* On s390 the socket_id number is not related to the numbers of cpus.
2272 	 * The socket_id number might be higher than the numbers of cpus.
2273 	 * This depends on the configuration.
2274 	 * AArch64 is the same.
2275 	 */
2276 	if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
2277 			  || !strncmp(ph->env.arch, "aarch64", 7)))
2278 		do_core_id_test = false;
2279 
2280 	for (i = 0; i < (u32)cpu_nr; i++) {
2281 		if (do_read_u32(ff, &nr))
2282 			goto free_cpu;
2283 
2284 		ph->env.cpu[i].core_id = nr;
2285 		size += sizeof(u32);
2286 
2287 		if (do_read_u32(ff, &nr))
2288 			goto free_cpu;
2289 
2290 		if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
2291 			pr_debug("socket_id number is too big."
2292 				 "You may need to upgrade the perf tool.\n");
2293 			goto free_cpu;
2294 		}
2295 
2296 		ph->env.cpu[i].socket_id = nr;
2297 		size += sizeof(u32);
2298 	}
2299 
2300 	/*
2301 	 * The header may be from old perf,
2302 	 * which doesn't include die information.
2303 	 */
2304 	if (ff->size <= size)
2305 		return 0;
2306 
2307 	if (do_read_u32(ff, &nr))
2308 		return -1;
2309 
2310 	ph->env.nr_sibling_dies = nr;
2311 	size += sizeof(u32);
2312 
2313 	for (i = 0; i < nr; i++) {
2314 		str = do_read_string(ff);
2315 		if (!str)
2316 			goto error;
2317 
2318 		/* include a NULL character at the end */
2319 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2320 			goto error;
2321 		size += string_size(str);
2322 		free(str);
2323 	}
2324 	ph->env.sibling_dies = strbuf_detach(&sb, NULL);
2325 
2326 	for (i = 0; i < (u32)cpu_nr; i++) {
2327 		if (do_read_u32(ff, &nr))
2328 			goto free_cpu;
2329 
2330 		ph->env.cpu[i].die_id = nr;
2331 	}
2332 
2333 	return 0;
2334 
2335 error:
2336 	strbuf_release(&sb);
2337 free_cpu:
2338 	zfree(&ph->env.cpu);
2339 	return -1;
2340 }
2341 
2342 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2343 {
2344 	struct numa_node *nodes, *n;
2345 	u32 nr, i;
2346 	char *str;
2347 
2348 	/* nr nodes */
2349 	if (do_read_u32(ff, &nr))
2350 		return -1;
2351 
2352 	nodes = zalloc(sizeof(*nodes) * nr);
2353 	if (!nodes)
2354 		return -ENOMEM;
2355 
2356 	for (i = 0; i < nr; i++) {
2357 		n = &nodes[i];
2358 
2359 		/* node number */
2360 		if (do_read_u32(ff, &n->node))
2361 			goto error;
2362 
2363 		if (do_read_u64(ff, &n->mem_total))
2364 			goto error;
2365 
2366 		if (do_read_u64(ff, &n->mem_free))
2367 			goto error;
2368 
2369 		str = do_read_string(ff);
2370 		if (!str)
2371 			goto error;
2372 
2373 		n->map = perf_cpu_map__new(str);
2374 		if (!n->map)
2375 			goto error;
2376 
2377 		free(str);
2378 	}
2379 	ff->ph->env.nr_numa_nodes = nr;
2380 	ff->ph->env.numa_nodes = nodes;
2381 	return 0;
2382 
2383 error:
2384 	free(nodes);
2385 	return -1;
2386 }
2387 
2388 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2389 {
2390 	char *name;
2391 	u32 pmu_num;
2392 	u32 type;
2393 	struct strbuf sb;
2394 
2395 	if (do_read_u32(ff, &pmu_num))
2396 		return -1;
2397 
2398 	if (!pmu_num) {
2399 		pr_debug("pmu mappings not available\n");
2400 		return 0;
2401 	}
2402 
2403 	ff->ph->env.nr_pmu_mappings = pmu_num;
2404 	if (strbuf_init(&sb, 128) < 0)
2405 		return -1;
2406 
2407 	while (pmu_num) {
2408 		if (do_read_u32(ff, &type))
2409 			goto error;
2410 
2411 		name = do_read_string(ff);
2412 		if (!name)
2413 			goto error;
2414 
2415 		if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2416 			goto error;
2417 		/* include a NULL character at the end */
2418 		if (strbuf_add(&sb, "", 1) < 0)
2419 			goto error;
2420 
2421 		if (!strcmp(name, "msr"))
2422 			ff->ph->env.msr_pmu_type = type;
2423 
2424 		free(name);
2425 		pmu_num--;
2426 	}
2427 	ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2428 	return 0;
2429 
2430 error:
2431 	strbuf_release(&sb);
2432 	return -1;
2433 }
2434 
2435 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
2436 {
2437 	size_t ret = -1;
2438 	u32 i, nr, nr_groups;
2439 	struct perf_session *session;
2440 	struct evsel *evsel, *leader = NULL;
2441 	struct group_desc {
2442 		char *name;
2443 		u32 leader_idx;
2444 		u32 nr_members;
2445 	} *desc;
2446 
2447 	if (do_read_u32(ff, &nr_groups))
2448 		return -1;
2449 
2450 	ff->ph->env.nr_groups = nr_groups;
2451 	if (!nr_groups) {
2452 		pr_debug("group desc not available\n");
2453 		return 0;
2454 	}
2455 
2456 	desc = calloc(nr_groups, sizeof(*desc));
2457 	if (!desc)
2458 		return -1;
2459 
2460 	for (i = 0; i < nr_groups; i++) {
2461 		desc[i].name = do_read_string(ff);
2462 		if (!desc[i].name)
2463 			goto out_free;
2464 
2465 		if (do_read_u32(ff, &desc[i].leader_idx))
2466 			goto out_free;
2467 
2468 		if (do_read_u32(ff, &desc[i].nr_members))
2469 			goto out_free;
2470 	}
2471 
2472 	/*
2473 	 * Rebuild group relationship based on the group_desc
2474 	 */
2475 	session = container_of(ff->ph, struct perf_session, header);
2476 	session->evlist->nr_groups = nr_groups;
2477 
2478 	i = nr = 0;
2479 	evlist__for_each_entry(session->evlist, evsel) {
2480 		if (evsel->idx == (int) desc[i].leader_idx) {
2481 			evsel->leader = evsel;
2482 			/* {anon_group} is a dummy name */
2483 			if (strcmp(desc[i].name, "{anon_group}")) {
2484 				evsel->group_name = desc[i].name;
2485 				desc[i].name = NULL;
2486 			}
2487 			evsel->core.nr_members = desc[i].nr_members;
2488 
2489 			if (i >= nr_groups || nr > 0) {
2490 				pr_debug("invalid group desc\n");
2491 				goto out_free;
2492 			}
2493 
2494 			leader = evsel;
2495 			nr = evsel->core.nr_members - 1;
2496 			i++;
2497 		} else if (nr) {
2498 			/* This is a group member */
2499 			evsel->leader = leader;
2500 
2501 			nr--;
2502 		}
2503 	}
2504 
2505 	if (i != nr_groups || nr != 0) {
2506 		pr_debug("invalid group desc\n");
2507 		goto out_free;
2508 	}
2509 
2510 	ret = 0;
2511 out_free:
2512 	for (i = 0; i < nr_groups; i++)
2513 		zfree(&desc[i].name);
2514 	free(desc);
2515 
2516 	return ret;
2517 }
2518 
2519 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
2520 {
2521 	struct perf_session *session;
2522 	int err;
2523 
2524 	session = container_of(ff->ph, struct perf_session, header);
2525 
2526 	err = auxtrace_index__process(ff->fd, ff->size, session,
2527 				      ff->ph->needs_swap);
2528 	if (err < 0)
2529 		pr_err("Failed to process auxtrace index\n");
2530 	return err;
2531 }
2532 
2533 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
2534 {
2535 	struct cpu_cache_level *caches;
2536 	u32 cnt, i, version;
2537 
2538 	if (do_read_u32(ff, &version))
2539 		return -1;
2540 
2541 	if (version != 1)
2542 		return -1;
2543 
2544 	if (do_read_u32(ff, &cnt))
2545 		return -1;
2546 
2547 	caches = zalloc(sizeof(*caches) * cnt);
2548 	if (!caches)
2549 		return -1;
2550 
2551 	for (i = 0; i < cnt; i++) {
2552 		struct cpu_cache_level c;
2553 
2554 		#define _R(v)						\
2555 			if (do_read_u32(ff, &c.v))\
2556 				goto out_free_caches;			\
2557 
2558 		_R(level)
2559 		_R(line_size)
2560 		_R(sets)
2561 		_R(ways)
2562 		#undef _R
2563 
2564 		#define _R(v)					\
2565 			c.v = do_read_string(ff);		\
2566 			if (!c.v)				\
2567 				goto out_free_caches;
2568 
2569 		_R(type)
2570 		_R(size)
2571 		_R(map)
2572 		#undef _R
2573 
2574 		caches[i] = c;
2575 	}
2576 
2577 	ff->ph->env.caches = caches;
2578 	ff->ph->env.caches_cnt = cnt;
2579 	return 0;
2580 out_free_caches:
2581 	free(caches);
2582 	return -1;
2583 }
2584 
2585 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
2586 {
2587 	struct perf_session *session;
2588 	u64 first_sample_time, last_sample_time;
2589 	int ret;
2590 
2591 	session = container_of(ff->ph, struct perf_session, header);
2592 
2593 	ret = do_read_u64(ff, &first_sample_time);
2594 	if (ret)
2595 		return -1;
2596 
2597 	ret = do_read_u64(ff, &last_sample_time);
2598 	if (ret)
2599 		return -1;
2600 
2601 	session->evlist->first_sample_time = first_sample_time;
2602 	session->evlist->last_sample_time = last_sample_time;
2603 	return 0;
2604 }
2605 
2606 static int process_mem_topology(struct feat_fd *ff,
2607 				void *data __maybe_unused)
2608 {
2609 	struct memory_node *nodes;
2610 	u64 version, i, nr, bsize;
2611 	int ret = -1;
2612 
2613 	if (do_read_u64(ff, &version))
2614 		return -1;
2615 
2616 	if (version != 1)
2617 		return -1;
2618 
2619 	if (do_read_u64(ff, &bsize))
2620 		return -1;
2621 
2622 	if (do_read_u64(ff, &nr))
2623 		return -1;
2624 
2625 	nodes = zalloc(sizeof(*nodes) * nr);
2626 	if (!nodes)
2627 		return -1;
2628 
2629 	for (i = 0; i < nr; i++) {
2630 		struct memory_node n;
2631 
2632 		#define _R(v)				\
2633 			if (do_read_u64(ff, &n.v))	\
2634 				goto out;		\
2635 
2636 		_R(node)
2637 		_R(size)
2638 
2639 		#undef _R
2640 
2641 		if (do_read_bitmap(ff, &n.set, &n.size))
2642 			goto out;
2643 
2644 		nodes[i] = n;
2645 	}
2646 
2647 	ff->ph->env.memory_bsize    = bsize;
2648 	ff->ph->env.memory_nodes    = nodes;
2649 	ff->ph->env.nr_memory_nodes = nr;
2650 	ret = 0;
2651 
2652 out:
2653 	if (ret)
2654 		free(nodes);
2655 	return ret;
2656 }
2657 
2658 static int process_clockid(struct feat_fd *ff,
2659 			   void *data __maybe_unused)
2660 {
2661 	if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
2662 		return -1;
2663 
2664 	return 0;
2665 }
2666 
2667 static int process_dir_format(struct feat_fd *ff,
2668 			      void *_data __maybe_unused)
2669 {
2670 	struct perf_session *session;
2671 	struct perf_data *data;
2672 
2673 	session = container_of(ff->ph, struct perf_session, header);
2674 	data = session->data;
2675 
2676 	if (WARN_ON(!perf_data__is_dir(data)))
2677 		return -1;
2678 
2679 	return do_read_u64(ff, &data->dir.version);
2680 }
2681 
2682 #ifdef HAVE_LIBBPF_SUPPORT
2683 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
2684 {
2685 	struct bpf_prog_info_linear *info_linear;
2686 	struct bpf_prog_info_node *info_node;
2687 	struct perf_env *env = &ff->ph->env;
2688 	u32 count, i;
2689 	int err = -1;
2690 
2691 	if (ff->ph->needs_swap) {
2692 		pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
2693 		return 0;
2694 	}
2695 
2696 	if (do_read_u32(ff, &count))
2697 		return -1;
2698 
2699 	down_write(&env->bpf_progs.lock);
2700 
2701 	for (i = 0; i < count; ++i) {
2702 		u32 info_len, data_len;
2703 
2704 		info_linear = NULL;
2705 		info_node = NULL;
2706 		if (do_read_u32(ff, &info_len))
2707 			goto out;
2708 		if (do_read_u32(ff, &data_len))
2709 			goto out;
2710 
2711 		if (info_len > sizeof(struct bpf_prog_info)) {
2712 			pr_warning("detected invalid bpf_prog_info\n");
2713 			goto out;
2714 		}
2715 
2716 		info_linear = malloc(sizeof(struct bpf_prog_info_linear) +
2717 				     data_len);
2718 		if (!info_linear)
2719 			goto out;
2720 		info_linear->info_len = sizeof(struct bpf_prog_info);
2721 		info_linear->data_len = data_len;
2722 		if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
2723 			goto out;
2724 		if (__do_read(ff, &info_linear->info, info_len))
2725 			goto out;
2726 		if (info_len < sizeof(struct bpf_prog_info))
2727 			memset(((void *)(&info_linear->info)) + info_len, 0,
2728 			       sizeof(struct bpf_prog_info) - info_len);
2729 
2730 		if (__do_read(ff, info_linear->data, data_len))
2731 			goto out;
2732 
2733 		info_node = malloc(sizeof(struct bpf_prog_info_node));
2734 		if (!info_node)
2735 			goto out;
2736 
2737 		/* after reading from file, translate offset to address */
2738 		bpf_program__bpil_offs_to_addr(info_linear);
2739 		info_node->info_linear = info_linear;
2740 		perf_env__insert_bpf_prog_info(env, info_node);
2741 	}
2742 
2743 	up_write(&env->bpf_progs.lock);
2744 	return 0;
2745 out:
2746 	free(info_linear);
2747 	free(info_node);
2748 	up_write(&env->bpf_progs.lock);
2749 	return err;
2750 }
2751 #else // HAVE_LIBBPF_SUPPORT
2752 static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused)
2753 {
2754 	return 0;
2755 }
2756 #endif // HAVE_LIBBPF_SUPPORT
2757 
2758 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
2759 {
2760 	struct perf_env *env = &ff->ph->env;
2761 	struct btf_node *node = NULL;
2762 	u32 count, i;
2763 	int err = -1;
2764 
2765 	if (ff->ph->needs_swap) {
2766 		pr_warning("interpreting btf from systems with endianity is not yet supported\n");
2767 		return 0;
2768 	}
2769 
2770 	if (do_read_u32(ff, &count))
2771 		return -1;
2772 
2773 	down_write(&env->bpf_progs.lock);
2774 
2775 	for (i = 0; i < count; ++i) {
2776 		u32 id, data_size;
2777 
2778 		if (do_read_u32(ff, &id))
2779 			goto out;
2780 		if (do_read_u32(ff, &data_size))
2781 			goto out;
2782 
2783 		node = malloc(sizeof(struct btf_node) + data_size);
2784 		if (!node)
2785 			goto out;
2786 
2787 		node->id = id;
2788 		node->data_size = data_size;
2789 
2790 		if (__do_read(ff, node->data, data_size))
2791 			goto out;
2792 
2793 		perf_env__insert_btf(env, node);
2794 		node = NULL;
2795 	}
2796 
2797 	err = 0;
2798 out:
2799 	up_write(&env->bpf_progs.lock);
2800 	free(node);
2801 	return err;
2802 }
2803 
2804 static int process_compressed(struct feat_fd *ff,
2805 			      void *data __maybe_unused)
2806 {
2807 	if (do_read_u32(ff, &(ff->ph->env.comp_ver)))
2808 		return -1;
2809 
2810 	if (do_read_u32(ff, &(ff->ph->env.comp_type)))
2811 		return -1;
2812 
2813 	if (do_read_u32(ff, &(ff->ph->env.comp_level)))
2814 		return -1;
2815 
2816 	if (do_read_u32(ff, &(ff->ph->env.comp_ratio)))
2817 		return -1;
2818 
2819 	if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len)))
2820 		return -1;
2821 
2822 	return 0;
2823 }
2824 
2825 struct feature_ops {
2826 	int (*write)(struct feat_fd *ff, struct evlist *evlist);
2827 	void (*print)(struct feat_fd *ff, FILE *fp);
2828 	int (*process)(struct feat_fd *ff, void *data);
2829 	const char *name;
2830 	bool full_only;
2831 	bool synthesize;
2832 };
2833 
2834 #define FEAT_OPR(n, func, __full_only) \
2835 	[HEADER_##n] = {					\
2836 		.name	    = __stringify(n),			\
2837 		.write	    = write_##func,			\
2838 		.print	    = print_##func,			\
2839 		.full_only  = __full_only,			\
2840 		.process    = process_##func,			\
2841 		.synthesize = true				\
2842 	}
2843 
2844 #define FEAT_OPN(n, func, __full_only) \
2845 	[HEADER_##n] = {					\
2846 		.name	    = __stringify(n),			\
2847 		.write	    = write_##func,			\
2848 		.print	    = print_##func,			\
2849 		.full_only  = __full_only,			\
2850 		.process    = process_##func			\
2851 	}
2852 
2853 /* feature_ops not implemented: */
2854 #define print_tracing_data	NULL
2855 #define print_build_id		NULL
2856 
2857 #define process_branch_stack	NULL
2858 #define process_stat		NULL
2859 
2860 
2861 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2862 	FEAT_OPN(TRACING_DATA,	tracing_data,	false),
2863 	FEAT_OPN(BUILD_ID,	build_id,	false),
2864 	FEAT_OPR(HOSTNAME,	hostname,	false),
2865 	FEAT_OPR(OSRELEASE,	osrelease,	false),
2866 	FEAT_OPR(VERSION,	version,	false),
2867 	FEAT_OPR(ARCH,		arch,		false),
2868 	FEAT_OPR(NRCPUS,	nrcpus,		false),
2869 	FEAT_OPR(CPUDESC,	cpudesc,	false),
2870 	FEAT_OPR(CPUID,		cpuid,		false),
2871 	FEAT_OPR(TOTAL_MEM,	total_mem,	false),
2872 	FEAT_OPR(EVENT_DESC,	event_desc,	false),
2873 	FEAT_OPR(CMDLINE,	cmdline,	false),
2874 	FEAT_OPR(CPU_TOPOLOGY,	cpu_topology,	true),
2875 	FEAT_OPR(NUMA_TOPOLOGY,	numa_topology,	true),
2876 	FEAT_OPN(BRANCH_STACK,	branch_stack,	false),
2877 	FEAT_OPR(PMU_MAPPINGS,	pmu_mappings,	false),
2878 	FEAT_OPR(GROUP_DESC,	group_desc,	false),
2879 	FEAT_OPN(AUXTRACE,	auxtrace,	false),
2880 	FEAT_OPN(STAT,		stat,		false),
2881 	FEAT_OPN(CACHE,		cache,		true),
2882 	FEAT_OPR(SAMPLE_TIME,	sample_time,	false),
2883 	FEAT_OPR(MEM_TOPOLOGY,	mem_topology,	true),
2884 	FEAT_OPR(CLOCKID,	clockid,	false),
2885 	FEAT_OPN(DIR_FORMAT,	dir_format,	false),
2886 	FEAT_OPR(BPF_PROG_INFO, bpf_prog_info,  false),
2887 	FEAT_OPR(BPF_BTF,       bpf_btf,        false),
2888 	FEAT_OPR(COMPRESSED,	compressed,	false),
2889 };
2890 
2891 struct header_print_data {
2892 	FILE *fp;
2893 	bool full; /* extended list of headers */
2894 };
2895 
2896 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2897 					   struct perf_header *ph,
2898 					   int feat, int fd, void *data)
2899 {
2900 	struct header_print_data *hd = data;
2901 	struct feat_fd ff;
2902 
2903 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2904 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2905 				"%d, continuing...\n", section->offset, feat);
2906 		return 0;
2907 	}
2908 	if (feat >= HEADER_LAST_FEATURE) {
2909 		pr_warning("unknown feature %d\n", feat);
2910 		return 0;
2911 	}
2912 	if (!feat_ops[feat].print)
2913 		return 0;
2914 
2915 	ff = (struct  feat_fd) {
2916 		.fd = fd,
2917 		.ph = ph,
2918 	};
2919 
2920 	if (!feat_ops[feat].full_only || hd->full)
2921 		feat_ops[feat].print(&ff, hd->fp);
2922 	else
2923 		fprintf(hd->fp, "# %s info available, use -I to display\n",
2924 			feat_ops[feat].name);
2925 
2926 	return 0;
2927 }
2928 
2929 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2930 {
2931 	struct header_print_data hd;
2932 	struct perf_header *header = &session->header;
2933 	int fd = perf_data__fd(session->data);
2934 	struct stat st;
2935 	time_t stctime;
2936 	int ret, bit;
2937 
2938 	hd.fp = fp;
2939 	hd.full = full;
2940 
2941 	ret = fstat(fd, &st);
2942 	if (ret == -1)
2943 		return -1;
2944 
2945 	stctime = st.st_ctime;
2946 	fprintf(fp, "# captured on    : %s", ctime(&stctime));
2947 
2948 	fprintf(fp, "# header version : %u\n", header->version);
2949 	fprintf(fp, "# data offset    : %" PRIu64 "\n", header->data_offset);
2950 	fprintf(fp, "# data size      : %" PRIu64 "\n", header->data_size);
2951 	fprintf(fp, "# feat offset    : %" PRIu64 "\n", header->feat_offset);
2952 
2953 	perf_header__process_sections(header, fd, &hd,
2954 				      perf_file_section__fprintf_info);
2955 
2956 	if (session->data->is_pipe)
2957 		return 0;
2958 
2959 	fprintf(fp, "# missing features: ");
2960 	for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2961 		if (bit)
2962 			fprintf(fp, "%s ", feat_ops[bit].name);
2963 	}
2964 
2965 	fprintf(fp, "\n");
2966 	return 0;
2967 }
2968 
2969 static int do_write_feat(struct feat_fd *ff, int type,
2970 			 struct perf_file_section **p,
2971 			 struct evlist *evlist)
2972 {
2973 	int err;
2974 	int ret = 0;
2975 
2976 	if (perf_header__has_feat(ff->ph, type)) {
2977 		if (!feat_ops[type].write)
2978 			return -1;
2979 
2980 		if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
2981 			return -1;
2982 
2983 		(*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
2984 
2985 		err = feat_ops[type].write(ff, evlist);
2986 		if (err < 0) {
2987 			pr_debug("failed to write feature %s\n", feat_ops[type].name);
2988 
2989 			/* undo anything written */
2990 			lseek(ff->fd, (*p)->offset, SEEK_SET);
2991 
2992 			return -1;
2993 		}
2994 		(*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
2995 		(*p)++;
2996 	}
2997 	return ret;
2998 }
2999 
3000 static int perf_header__adds_write(struct perf_header *header,
3001 				   struct evlist *evlist, int fd)
3002 {
3003 	int nr_sections;
3004 	struct feat_fd ff;
3005 	struct perf_file_section *feat_sec, *p;
3006 	int sec_size;
3007 	u64 sec_start;
3008 	int feat;
3009 	int err;
3010 
3011 	ff = (struct feat_fd){
3012 		.fd  = fd,
3013 		.ph = header,
3014 	};
3015 
3016 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3017 	if (!nr_sections)
3018 		return 0;
3019 
3020 	feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
3021 	if (feat_sec == NULL)
3022 		return -ENOMEM;
3023 
3024 	sec_size = sizeof(*feat_sec) * nr_sections;
3025 
3026 	sec_start = header->feat_offset;
3027 	lseek(fd, sec_start + sec_size, SEEK_SET);
3028 
3029 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3030 		if (do_write_feat(&ff, feat, &p, evlist))
3031 			perf_header__clear_feat(header, feat);
3032 	}
3033 
3034 	lseek(fd, sec_start, SEEK_SET);
3035 	/*
3036 	 * may write more than needed due to dropped feature, but
3037 	 * this is okay, reader will skip the missing entries
3038 	 */
3039 	err = do_write(&ff, feat_sec, sec_size);
3040 	if (err < 0)
3041 		pr_debug("failed to write feature section\n");
3042 	free(feat_sec);
3043 	return err;
3044 }
3045 
3046 int perf_header__write_pipe(int fd)
3047 {
3048 	struct perf_pipe_file_header f_header;
3049 	struct feat_fd ff;
3050 	int err;
3051 
3052 	ff = (struct feat_fd){ .fd = fd };
3053 
3054 	f_header = (struct perf_pipe_file_header){
3055 		.magic	   = PERF_MAGIC,
3056 		.size	   = sizeof(f_header),
3057 	};
3058 
3059 	err = do_write(&ff, &f_header, sizeof(f_header));
3060 	if (err < 0) {
3061 		pr_debug("failed to write perf pipe header\n");
3062 		return err;
3063 	}
3064 
3065 	return 0;
3066 }
3067 
3068 int perf_session__write_header(struct perf_session *session,
3069 			       struct evlist *evlist,
3070 			       int fd, bool at_exit)
3071 {
3072 	struct perf_file_header f_header;
3073 	struct perf_file_attr   f_attr;
3074 	struct perf_header *header = &session->header;
3075 	struct evsel *evsel;
3076 	struct feat_fd ff;
3077 	u64 attr_offset;
3078 	int err;
3079 
3080 	ff = (struct feat_fd){ .fd = fd};
3081 	lseek(fd, sizeof(f_header), SEEK_SET);
3082 
3083 	evlist__for_each_entry(session->evlist, evsel) {
3084 		evsel->id_offset = lseek(fd, 0, SEEK_CUR);
3085 		err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64));
3086 		if (err < 0) {
3087 			pr_debug("failed to write perf header\n");
3088 			return err;
3089 		}
3090 	}
3091 
3092 	attr_offset = lseek(ff.fd, 0, SEEK_CUR);
3093 
3094 	evlist__for_each_entry(evlist, evsel) {
3095 		f_attr = (struct perf_file_attr){
3096 			.attr = evsel->core.attr,
3097 			.ids  = {
3098 				.offset = evsel->id_offset,
3099 				.size   = evsel->ids * sizeof(u64),
3100 			}
3101 		};
3102 		err = do_write(&ff, &f_attr, sizeof(f_attr));
3103 		if (err < 0) {
3104 			pr_debug("failed to write perf header attribute\n");
3105 			return err;
3106 		}
3107 	}
3108 
3109 	if (!header->data_offset)
3110 		header->data_offset = lseek(fd, 0, SEEK_CUR);
3111 	header->feat_offset = header->data_offset + header->data_size;
3112 
3113 	if (at_exit) {
3114 		err = perf_header__adds_write(header, evlist, fd);
3115 		if (err < 0)
3116 			return err;
3117 	}
3118 
3119 	f_header = (struct perf_file_header){
3120 		.magic	   = PERF_MAGIC,
3121 		.size	   = sizeof(f_header),
3122 		.attr_size = sizeof(f_attr),
3123 		.attrs = {
3124 			.offset = attr_offset,
3125 			.size   = evlist->core.nr_entries * sizeof(f_attr),
3126 		},
3127 		.data = {
3128 			.offset = header->data_offset,
3129 			.size	= header->data_size,
3130 		},
3131 		/* event_types is ignored, store zeros */
3132 	};
3133 
3134 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
3135 
3136 	lseek(fd, 0, SEEK_SET);
3137 	err = do_write(&ff, &f_header, sizeof(f_header));
3138 	if (err < 0) {
3139 		pr_debug("failed to write perf header\n");
3140 		return err;
3141 	}
3142 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
3143 
3144 	return 0;
3145 }
3146 
3147 static int perf_header__getbuffer64(struct perf_header *header,
3148 				    int fd, void *buf, size_t size)
3149 {
3150 	if (readn(fd, buf, size) <= 0)
3151 		return -1;
3152 
3153 	if (header->needs_swap)
3154 		mem_bswap_64(buf, size);
3155 
3156 	return 0;
3157 }
3158 
3159 int perf_header__process_sections(struct perf_header *header, int fd,
3160 				  void *data,
3161 				  int (*process)(struct perf_file_section *section,
3162 						 struct perf_header *ph,
3163 						 int feat, int fd, void *data))
3164 {
3165 	struct perf_file_section *feat_sec, *sec;
3166 	int nr_sections;
3167 	int sec_size;
3168 	int feat;
3169 	int err;
3170 
3171 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3172 	if (!nr_sections)
3173 		return 0;
3174 
3175 	feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
3176 	if (!feat_sec)
3177 		return -1;
3178 
3179 	sec_size = sizeof(*feat_sec) * nr_sections;
3180 
3181 	lseek(fd, header->feat_offset, SEEK_SET);
3182 
3183 	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
3184 	if (err < 0)
3185 		goto out_free;
3186 
3187 	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
3188 		err = process(sec++, header, feat, fd, data);
3189 		if (err < 0)
3190 			goto out_free;
3191 	}
3192 	err = 0;
3193 out_free:
3194 	free(feat_sec);
3195 	return err;
3196 }
3197 
3198 static const int attr_file_abi_sizes[] = {
3199 	[0] = PERF_ATTR_SIZE_VER0,
3200 	[1] = PERF_ATTR_SIZE_VER1,
3201 	[2] = PERF_ATTR_SIZE_VER2,
3202 	[3] = PERF_ATTR_SIZE_VER3,
3203 	[4] = PERF_ATTR_SIZE_VER4,
3204 	0,
3205 };
3206 
3207 /*
3208  * In the legacy file format, the magic number is not used to encode endianness.
3209  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
3210  * on ABI revisions, we need to try all combinations for all endianness to
3211  * detect the endianness.
3212  */
3213 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
3214 {
3215 	uint64_t ref_size, attr_size;
3216 	int i;
3217 
3218 	for (i = 0 ; attr_file_abi_sizes[i]; i++) {
3219 		ref_size = attr_file_abi_sizes[i]
3220 			 + sizeof(struct perf_file_section);
3221 		if (hdr_sz != ref_size) {
3222 			attr_size = bswap_64(hdr_sz);
3223 			if (attr_size != ref_size)
3224 				continue;
3225 
3226 			ph->needs_swap = true;
3227 		}
3228 		pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
3229 			 i,
3230 			 ph->needs_swap);
3231 		return 0;
3232 	}
3233 	/* could not determine endianness */
3234 	return -1;
3235 }
3236 
3237 #define PERF_PIPE_HDR_VER0	16
3238 
3239 static const size_t attr_pipe_abi_sizes[] = {
3240 	[0] = PERF_PIPE_HDR_VER0,
3241 	0,
3242 };
3243 
3244 /*
3245  * In the legacy pipe format, there is an implicit assumption that endiannesss
3246  * between host recording the samples, and host parsing the samples is the
3247  * same. This is not always the case given that the pipe output may always be
3248  * redirected into a file and analyzed on a different machine with possibly a
3249  * different endianness and perf_event ABI revsions in the perf tool itself.
3250  */
3251 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
3252 {
3253 	u64 attr_size;
3254 	int i;
3255 
3256 	for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
3257 		if (hdr_sz != attr_pipe_abi_sizes[i]) {
3258 			attr_size = bswap_64(hdr_sz);
3259 			if (attr_size != hdr_sz)
3260 				continue;
3261 
3262 			ph->needs_swap = true;
3263 		}
3264 		pr_debug("Pipe ABI%d perf.data file detected\n", i);
3265 		return 0;
3266 	}
3267 	return -1;
3268 }
3269 
3270 bool is_perf_magic(u64 magic)
3271 {
3272 	if (!memcmp(&magic, __perf_magic1, sizeof(magic))
3273 		|| magic == __perf_magic2
3274 		|| magic == __perf_magic2_sw)
3275 		return true;
3276 
3277 	return false;
3278 }
3279 
3280 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
3281 			      bool is_pipe, struct perf_header *ph)
3282 {
3283 	int ret;
3284 
3285 	/* check for legacy format */
3286 	ret = memcmp(&magic, __perf_magic1, sizeof(magic));
3287 	if (ret == 0) {
3288 		ph->version = PERF_HEADER_VERSION_1;
3289 		pr_debug("legacy perf.data format\n");
3290 		if (is_pipe)
3291 			return try_all_pipe_abis(hdr_sz, ph);
3292 
3293 		return try_all_file_abis(hdr_sz, ph);
3294 	}
3295 	/*
3296 	 * the new magic number serves two purposes:
3297 	 * - unique number to identify actual perf.data files
3298 	 * - encode endianness of file
3299 	 */
3300 	ph->version = PERF_HEADER_VERSION_2;
3301 
3302 	/* check magic number with one endianness */
3303 	if (magic == __perf_magic2)
3304 		return 0;
3305 
3306 	/* check magic number with opposite endianness */
3307 	if (magic != __perf_magic2_sw)
3308 		return -1;
3309 
3310 	ph->needs_swap = true;
3311 
3312 	return 0;
3313 }
3314 
3315 int perf_file_header__read(struct perf_file_header *header,
3316 			   struct perf_header *ph, int fd)
3317 {
3318 	ssize_t ret;
3319 
3320 	lseek(fd, 0, SEEK_SET);
3321 
3322 	ret = readn(fd, header, sizeof(*header));
3323 	if (ret <= 0)
3324 		return -1;
3325 
3326 	if (check_magic_endian(header->magic,
3327 			       header->attr_size, false, ph) < 0) {
3328 		pr_debug("magic/endian check failed\n");
3329 		return -1;
3330 	}
3331 
3332 	if (ph->needs_swap) {
3333 		mem_bswap_64(header, offsetof(struct perf_file_header,
3334 			     adds_features));
3335 	}
3336 
3337 	if (header->size != sizeof(*header)) {
3338 		/* Support the previous format */
3339 		if (header->size == offsetof(typeof(*header), adds_features))
3340 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3341 		else
3342 			return -1;
3343 	} else if (ph->needs_swap) {
3344 		/*
3345 		 * feature bitmap is declared as an array of unsigned longs --
3346 		 * not good since its size can differ between the host that
3347 		 * generated the data file and the host analyzing the file.
3348 		 *
3349 		 * We need to handle endianness, but we don't know the size of
3350 		 * the unsigned long where the file was generated. Take a best
3351 		 * guess at determining it: try 64-bit swap first (ie., file
3352 		 * created on a 64-bit host), and check if the hostname feature
3353 		 * bit is set (this feature bit is forced on as of fbe96f2).
3354 		 * If the bit is not, undo the 64-bit swap and try a 32-bit
3355 		 * swap. If the hostname bit is still not set (e.g., older data
3356 		 * file), punt and fallback to the original behavior --
3357 		 * clearing all feature bits and setting buildid.
3358 		 */
3359 		mem_bswap_64(&header->adds_features,
3360 			    BITS_TO_U64(HEADER_FEAT_BITS));
3361 
3362 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3363 			/* unswap as u64 */
3364 			mem_bswap_64(&header->adds_features,
3365 				    BITS_TO_U64(HEADER_FEAT_BITS));
3366 
3367 			/* unswap as u32 */
3368 			mem_bswap_32(&header->adds_features,
3369 				    BITS_TO_U32(HEADER_FEAT_BITS));
3370 		}
3371 
3372 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
3373 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
3374 			set_bit(HEADER_BUILD_ID, header->adds_features);
3375 		}
3376 	}
3377 
3378 	memcpy(&ph->adds_features, &header->adds_features,
3379 	       sizeof(ph->adds_features));
3380 
3381 	ph->data_offset  = header->data.offset;
3382 	ph->data_size	 = header->data.size;
3383 	ph->feat_offset  = header->data.offset + header->data.size;
3384 	return 0;
3385 }
3386 
3387 static int perf_file_section__process(struct perf_file_section *section,
3388 				      struct perf_header *ph,
3389 				      int feat, int fd, void *data)
3390 {
3391 	struct feat_fd fdd = {
3392 		.fd	= fd,
3393 		.ph	= ph,
3394 		.size	= section->size,
3395 		.offset	= section->offset,
3396 	};
3397 
3398 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3399 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3400 			  "%d, continuing...\n", section->offset, feat);
3401 		return 0;
3402 	}
3403 
3404 	if (feat >= HEADER_LAST_FEATURE) {
3405 		pr_debug("unknown feature %d, continuing...\n", feat);
3406 		return 0;
3407 	}
3408 
3409 	if (!feat_ops[feat].process)
3410 		return 0;
3411 
3412 	return feat_ops[feat].process(&fdd, data);
3413 }
3414 
3415 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
3416 				       struct perf_header *ph, int fd,
3417 				       bool repipe)
3418 {
3419 	struct feat_fd ff = {
3420 		.fd = STDOUT_FILENO,
3421 		.ph = ph,
3422 	};
3423 	ssize_t ret;
3424 
3425 	ret = readn(fd, header, sizeof(*header));
3426 	if (ret <= 0)
3427 		return -1;
3428 
3429 	if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
3430 		pr_debug("endian/magic failed\n");
3431 		return -1;
3432 	}
3433 
3434 	if (ph->needs_swap)
3435 		header->size = bswap_64(header->size);
3436 
3437 	if (repipe && do_write(&ff, header, sizeof(*header)) < 0)
3438 		return -1;
3439 
3440 	return 0;
3441 }
3442 
3443 static int perf_header__read_pipe(struct perf_session *session)
3444 {
3445 	struct perf_header *header = &session->header;
3446 	struct perf_pipe_file_header f_header;
3447 
3448 	if (perf_file_header__read_pipe(&f_header, header,
3449 					perf_data__fd(session->data),
3450 					session->repipe) < 0) {
3451 		pr_debug("incompatible file format\n");
3452 		return -EINVAL;
3453 	}
3454 
3455 	return 0;
3456 }
3457 
3458 static int read_attr(int fd, struct perf_header *ph,
3459 		     struct perf_file_attr *f_attr)
3460 {
3461 	struct perf_event_attr *attr = &f_attr->attr;
3462 	size_t sz, left;
3463 	size_t our_sz = sizeof(f_attr->attr);
3464 	ssize_t ret;
3465 
3466 	memset(f_attr, 0, sizeof(*f_attr));
3467 
3468 	/* read minimal guaranteed structure */
3469 	ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
3470 	if (ret <= 0) {
3471 		pr_debug("cannot read %d bytes of header attr\n",
3472 			 PERF_ATTR_SIZE_VER0);
3473 		return -1;
3474 	}
3475 
3476 	/* on file perf_event_attr size */
3477 	sz = attr->size;
3478 
3479 	if (ph->needs_swap)
3480 		sz = bswap_32(sz);
3481 
3482 	if (sz == 0) {
3483 		/* assume ABI0 */
3484 		sz =  PERF_ATTR_SIZE_VER0;
3485 	} else if (sz > our_sz) {
3486 		pr_debug("file uses a more recent and unsupported ABI"
3487 			 " (%zu bytes extra)\n", sz - our_sz);
3488 		return -1;
3489 	}
3490 	/* what we have not yet read and that we know about */
3491 	left = sz - PERF_ATTR_SIZE_VER0;
3492 	if (left) {
3493 		void *ptr = attr;
3494 		ptr += PERF_ATTR_SIZE_VER0;
3495 
3496 		ret = readn(fd, ptr, left);
3497 	}
3498 	/* read perf_file_section, ids are read in caller */
3499 	ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
3500 
3501 	return ret <= 0 ? -1 : 0;
3502 }
3503 
3504 static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel,
3505 						struct tep_handle *pevent)
3506 {
3507 	struct tep_event *event;
3508 	char bf[128];
3509 
3510 	/* already prepared */
3511 	if (evsel->tp_format)
3512 		return 0;
3513 
3514 	if (pevent == NULL) {
3515 		pr_debug("broken or missing trace data\n");
3516 		return -1;
3517 	}
3518 
3519 	event = tep_find_event(pevent, evsel->core.attr.config);
3520 	if (event == NULL) {
3521 		pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
3522 		return -1;
3523 	}
3524 
3525 	if (!evsel->name) {
3526 		snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
3527 		evsel->name = strdup(bf);
3528 		if (evsel->name == NULL)
3529 			return -1;
3530 	}
3531 
3532 	evsel->tp_format = event;
3533 	return 0;
3534 }
3535 
3536 static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist,
3537 						  struct tep_handle *pevent)
3538 {
3539 	struct evsel *pos;
3540 
3541 	evlist__for_each_entry(evlist, pos) {
3542 		if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
3543 		    perf_evsel__prepare_tracepoint_event(pos, pevent))
3544 			return -1;
3545 	}
3546 
3547 	return 0;
3548 }
3549 
3550 int perf_session__read_header(struct perf_session *session)
3551 {
3552 	struct perf_data *data = session->data;
3553 	struct perf_header *header = &session->header;
3554 	struct perf_file_header	f_header;
3555 	struct perf_file_attr	f_attr;
3556 	u64			f_id;
3557 	int nr_attrs, nr_ids, i, j;
3558 	int fd = perf_data__fd(data);
3559 
3560 	session->evlist = evlist__new();
3561 	if (session->evlist == NULL)
3562 		return -ENOMEM;
3563 
3564 	session->evlist->env = &header->env;
3565 	session->machines.host.env = &header->env;
3566 	if (perf_data__is_pipe(data))
3567 		return perf_header__read_pipe(session);
3568 
3569 	if (perf_file_header__read(&f_header, header, fd) < 0)
3570 		return -EINVAL;
3571 
3572 	/*
3573 	 * Sanity check that perf.data was written cleanly; data size is
3574 	 * initialized to 0 and updated only if the on_exit function is run.
3575 	 * If data size is still 0 then the file contains only partial
3576 	 * information.  Just warn user and process it as much as it can.
3577 	 */
3578 	if (f_header.data.size == 0) {
3579 		pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
3580 			   "Was the 'perf record' command properly terminated?\n",
3581 			   data->file.path);
3582 	}
3583 
3584 	if (f_header.attr_size == 0) {
3585 		pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
3586 		       "Was the 'perf record' command properly terminated?\n",
3587 		       data->file.path);
3588 		return -EINVAL;
3589 	}
3590 
3591 	nr_attrs = f_header.attrs.size / f_header.attr_size;
3592 	lseek(fd, f_header.attrs.offset, SEEK_SET);
3593 
3594 	for (i = 0; i < nr_attrs; i++) {
3595 		struct evsel *evsel;
3596 		off_t tmp;
3597 
3598 		if (read_attr(fd, header, &f_attr) < 0)
3599 			goto out_errno;
3600 
3601 		if (header->needs_swap) {
3602 			f_attr.ids.size   = bswap_64(f_attr.ids.size);
3603 			f_attr.ids.offset = bswap_64(f_attr.ids.offset);
3604 			perf_event__attr_swap(&f_attr.attr);
3605 		}
3606 
3607 		tmp = lseek(fd, 0, SEEK_CUR);
3608 		evsel = evsel__new(&f_attr.attr);
3609 
3610 		if (evsel == NULL)
3611 			goto out_delete_evlist;
3612 
3613 		evsel->needs_swap = header->needs_swap;
3614 		/*
3615 		 * Do it before so that if perf_evsel__alloc_id fails, this
3616 		 * entry gets purged too at evlist__delete().
3617 		 */
3618 		evlist__add(session->evlist, evsel);
3619 
3620 		nr_ids = f_attr.ids.size / sizeof(u64);
3621 		/*
3622 		 * We don't have the cpu and thread maps on the header, so
3623 		 * for allocating the perf_sample_id table we fake 1 cpu and
3624 		 * hattr->ids threads.
3625 		 */
3626 		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
3627 			goto out_delete_evlist;
3628 
3629 		lseek(fd, f_attr.ids.offset, SEEK_SET);
3630 
3631 		for (j = 0; j < nr_ids; j++) {
3632 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
3633 				goto out_errno;
3634 
3635 			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
3636 		}
3637 
3638 		lseek(fd, tmp, SEEK_SET);
3639 	}
3640 
3641 	perf_header__process_sections(header, fd, &session->tevent,
3642 				      perf_file_section__process);
3643 
3644 	if (perf_evlist__prepare_tracepoint_events(session->evlist,
3645 						   session->tevent.pevent))
3646 		goto out_delete_evlist;
3647 
3648 	return 0;
3649 out_errno:
3650 	return -errno;
3651 
3652 out_delete_evlist:
3653 	evlist__delete(session->evlist);
3654 	session->evlist = NULL;
3655 	return -ENOMEM;
3656 }
3657 
3658 int perf_event__synthesize_attr(struct perf_tool *tool,
3659 				struct perf_event_attr *attr, u32 ids, u64 *id,
3660 				perf_event__handler_t process)
3661 {
3662 	union perf_event *ev;
3663 	size_t size;
3664 	int err;
3665 
3666 	size = sizeof(struct perf_event_attr);
3667 	size = PERF_ALIGN(size, sizeof(u64));
3668 	size += sizeof(struct perf_event_header);
3669 	size += ids * sizeof(u64);
3670 
3671 	ev = zalloc(size);
3672 
3673 	if (ev == NULL)
3674 		return -ENOMEM;
3675 
3676 	ev->attr.attr = *attr;
3677 	memcpy(ev->attr.id, id, ids * sizeof(u64));
3678 
3679 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
3680 	ev->attr.header.size = (u16)size;
3681 
3682 	if (ev->attr.header.size == size)
3683 		err = process(tool, ev, NULL, NULL);
3684 	else
3685 		err = -E2BIG;
3686 
3687 	free(ev);
3688 
3689 	return err;
3690 }
3691 
3692 int perf_event__synthesize_features(struct perf_tool *tool,
3693 				    struct perf_session *session,
3694 				    struct evlist *evlist,
3695 				    perf_event__handler_t process)
3696 {
3697 	struct perf_header *header = &session->header;
3698 	struct feat_fd ff;
3699 	struct perf_record_header_feature *fe;
3700 	size_t sz, sz_hdr;
3701 	int feat, ret;
3702 
3703 	sz_hdr = sizeof(fe->header);
3704 	sz = sizeof(union perf_event);
3705 	/* get a nice alignment */
3706 	sz = PERF_ALIGN(sz, page_size);
3707 
3708 	memset(&ff, 0, sizeof(ff));
3709 
3710 	ff.buf = malloc(sz);
3711 	if (!ff.buf)
3712 		return -ENOMEM;
3713 
3714 	ff.size = sz - sz_hdr;
3715 	ff.ph = &session->header;
3716 
3717 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3718 		if (!feat_ops[feat].synthesize) {
3719 			pr_debug("No record header feature for header :%d\n", feat);
3720 			continue;
3721 		}
3722 
3723 		ff.offset = sizeof(*fe);
3724 
3725 		ret = feat_ops[feat].write(&ff, evlist);
3726 		if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
3727 			pr_debug("Error writing feature\n");
3728 			continue;
3729 		}
3730 		/* ff.buf may have changed due to realloc in do_write() */
3731 		fe = ff.buf;
3732 		memset(fe, 0, sizeof(*fe));
3733 
3734 		fe->feat_id = feat;
3735 		fe->header.type = PERF_RECORD_HEADER_FEATURE;
3736 		fe->header.size = ff.offset;
3737 
3738 		ret = process(tool, ff.buf, NULL, NULL);
3739 		if (ret) {
3740 			free(ff.buf);
3741 			return ret;
3742 		}
3743 	}
3744 
3745 	/* Send HEADER_LAST_FEATURE mark. */
3746 	fe = ff.buf;
3747 	fe->feat_id     = HEADER_LAST_FEATURE;
3748 	fe->header.type = PERF_RECORD_HEADER_FEATURE;
3749 	fe->header.size = sizeof(*fe);
3750 
3751 	ret = process(tool, ff.buf, NULL, NULL);
3752 
3753 	free(ff.buf);
3754 	return ret;
3755 }
3756 
3757 int perf_event__process_feature(struct perf_session *session,
3758 				union perf_event *event)
3759 {
3760 	struct perf_tool *tool = session->tool;
3761 	struct feat_fd ff = { .fd = 0 };
3762 	struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
3763 	int type = fe->header.type;
3764 	u64 feat = fe->feat_id;
3765 
3766 	if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
3767 		pr_warning("invalid record type %d in pipe-mode\n", type);
3768 		return 0;
3769 	}
3770 	if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
3771 		pr_warning("invalid record type %d in pipe-mode\n", type);
3772 		return -1;
3773 	}
3774 
3775 	if (!feat_ops[feat].process)
3776 		return 0;
3777 
3778 	ff.buf  = (void *)fe->data;
3779 	ff.size = event->header.size - sizeof(*fe);
3780 	ff.ph = &session->header;
3781 
3782 	if (feat_ops[feat].process(&ff, NULL))
3783 		return -1;
3784 
3785 	if (!feat_ops[feat].print || !tool->show_feat_hdr)
3786 		return 0;
3787 
3788 	if (!feat_ops[feat].full_only ||
3789 	    tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
3790 		feat_ops[feat].print(&ff, stdout);
3791 	} else {
3792 		fprintf(stdout, "# %s info available, use -I to display\n",
3793 			feat_ops[feat].name);
3794 	}
3795 
3796 	return 0;
3797 }
3798 
3799 static struct perf_record_event_update *
3800 event_update_event__new(size_t size, u64 type, u64 id)
3801 {
3802 	struct perf_record_event_update *ev;
3803 
3804 	size += sizeof(*ev);
3805 	size  = PERF_ALIGN(size, sizeof(u64));
3806 
3807 	ev = zalloc(size);
3808 	if (ev) {
3809 		ev->header.type = PERF_RECORD_EVENT_UPDATE;
3810 		ev->header.size = (u16)size;
3811 		ev->type = type;
3812 		ev->id = id;
3813 	}
3814 	return ev;
3815 }
3816 
3817 int
3818 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3819 					 struct evsel *evsel,
3820 					 perf_event__handler_t process)
3821 {
3822 	struct perf_record_event_update *ev;
3823 	size_t size = strlen(evsel->unit);
3824 	int err;
3825 
3826 	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3827 	if (ev == NULL)
3828 		return -ENOMEM;
3829 
3830 	strlcpy(ev->data, evsel->unit, size + 1);
3831 	err = process(tool, (union perf_event *)ev, NULL, NULL);
3832 	free(ev);
3833 	return err;
3834 }
3835 
3836 int
3837 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3838 					  struct evsel *evsel,
3839 					  perf_event__handler_t process)
3840 {
3841 	struct perf_record_event_update *ev;
3842 	struct perf_record_event_update_scale *ev_data;
3843 	int err;
3844 
3845 	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3846 	if (ev == NULL)
3847 		return -ENOMEM;
3848 
3849 	ev_data = (struct perf_record_event_update_scale *)ev->data;
3850 	ev_data->scale = evsel->scale;
3851 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3852 	free(ev);
3853 	return err;
3854 }
3855 
3856 int
3857 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3858 					 struct evsel *evsel,
3859 					 perf_event__handler_t process)
3860 {
3861 	struct perf_record_event_update *ev;
3862 	size_t len = strlen(evsel->name);
3863 	int err;
3864 
3865 	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3866 	if (ev == NULL)
3867 		return -ENOMEM;
3868 
3869 	strlcpy(ev->data, evsel->name, len + 1);
3870 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3871 	free(ev);
3872 	return err;
3873 }
3874 
3875 int
3876 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3877 					struct evsel *evsel,
3878 					perf_event__handler_t process)
3879 {
3880 	size_t size = sizeof(struct perf_record_event_update);
3881 	struct perf_record_event_update *ev;
3882 	int max, err;
3883 	u16 type;
3884 
3885 	if (!evsel->core.own_cpus)
3886 		return 0;
3887 
3888 	ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
3889 	if (!ev)
3890 		return -ENOMEM;
3891 
3892 	ev->header.type = PERF_RECORD_EVENT_UPDATE;
3893 	ev->header.size = (u16)size;
3894 	ev->type = PERF_EVENT_UPDATE__CPUS;
3895 	ev->id   = evsel->id[0];
3896 
3897 	cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
3898 				 evsel->core.own_cpus,
3899 				 type, max);
3900 
3901 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3902 	free(ev);
3903 	return err;
3904 }
3905 
3906 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3907 {
3908 	struct perf_record_event_update *ev = &event->event_update;
3909 	struct perf_record_event_update_scale *ev_scale;
3910 	struct perf_record_event_update_cpus *ev_cpus;
3911 	struct perf_cpu_map *map;
3912 	size_t ret;
3913 
3914 	ret = fprintf(fp, "\n... id:    %" PRI_lu64 "\n", ev->id);
3915 
3916 	switch (ev->type) {
3917 	case PERF_EVENT_UPDATE__SCALE:
3918 		ev_scale = (struct perf_record_event_update_scale *)ev->data;
3919 		ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3920 		break;
3921 	case PERF_EVENT_UPDATE__UNIT:
3922 		ret += fprintf(fp, "... unit:  %s\n", ev->data);
3923 		break;
3924 	case PERF_EVENT_UPDATE__NAME:
3925 		ret += fprintf(fp, "... name:  %s\n", ev->data);
3926 		break;
3927 	case PERF_EVENT_UPDATE__CPUS:
3928 		ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
3929 		ret += fprintf(fp, "... ");
3930 
3931 		map = cpu_map__new_data(&ev_cpus->cpus);
3932 		if (map)
3933 			ret += cpu_map__fprintf(map, fp);
3934 		else
3935 			ret += fprintf(fp, "failed to get cpus\n");
3936 		break;
3937 	default:
3938 		ret += fprintf(fp, "... unknown type\n");
3939 		break;
3940 	}
3941 
3942 	return ret;
3943 }
3944 
3945 int perf_event__synthesize_attrs(struct perf_tool *tool,
3946 				 struct evlist *evlist,
3947 				 perf_event__handler_t process)
3948 {
3949 	struct evsel *evsel;
3950 	int err = 0;
3951 
3952 	evlist__for_each_entry(evlist, evsel) {
3953 		err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids,
3954 						  evsel->id, process);
3955 		if (err) {
3956 			pr_debug("failed to create perf header attribute\n");
3957 			return err;
3958 		}
3959 	}
3960 
3961 	return err;
3962 }
3963 
3964 static bool has_unit(struct evsel *counter)
3965 {
3966 	return counter->unit && *counter->unit;
3967 }
3968 
3969 static bool has_scale(struct evsel *counter)
3970 {
3971 	return counter->scale != 1;
3972 }
3973 
3974 int perf_event__synthesize_extra_attr(struct perf_tool *tool,
3975 				      struct evlist *evsel_list,
3976 				      perf_event__handler_t process,
3977 				      bool is_pipe)
3978 {
3979 	struct evsel *counter;
3980 	int err;
3981 
3982 	/*
3983 	 * Synthesize other events stuff not carried within
3984 	 * attr event - unit, scale, name
3985 	 */
3986 	evlist__for_each_entry(evsel_list, counter) {
3987 		if (!counter->supported)
3988 			continue;
3989 
3990 		/*
3991 		 * Synthesize unit and scale only if it's defined.
3992 		 */
3993 		if (has_unit(counter)) {
3994 			err = perf_event__synthesize_event_update_unit(tool, counter, process);
3995 			if (err < 0) {
3996 				pr_err("Couldn't synthesize evsel unit.\n");
3997 				return err;
3998 			}
3999 		}
4000 
4001 		if (has_scale(counter)) {
4002 			err = perf_event__synthesize_event_update_scale(tool, counter, process);
4003 			if (err < 0) {
4004 				pr_err("Couldn't synthesize evsel counter.\n");
4005 				return err;
4006 			}
4007 		}
4008 
4009 		if (counter->core.own_cpus) {
4010 			err = perf_event__synthesize_event_update_cpus(tool, counter, process);
4011 			if (err < 0) {
4012 				pr_err("Couldn't synthesize evsel cpus.\n");
4013 				return err;
4014 			}
4015 		}
4016 
4017 		/*
4018 		 * Name is needed only for pipe output,
4019 		 * perf.data carries event names.
4020 		 */
4021 		if (is_pipe) {
4022 			err = perf_event__synthesize_event_update_name(tool, counter, process);
4023 			if (err < 0) {
4024 				pr_err("Couldn't synthesize evsel name.\n");
4025 				return err;
4026 			}
4027 		}
4028 	}
4029 	return 0;
4030 }
4031 
4032 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
4033 			     union perf_event *event,
4034 			     struct evlist **pevlist)
4035 {
4036 	u32 i, ids, n_ids;
4037 	struct evsel *evsel;
4038 	struct evlist *evlist = *pevlist;
4039 
4040 	if (evlist == NULL) {
4041 		*pevlist = evlist = evlist__new();
4042 		if (evlist == NULL)
4043 			return -ENOMEM;
4044 	}
4045 
4046 	evsel = evsel__new(&event->attr.attr);
4047 	if (evsel == NULL)
4048 		return -ENOMEM;
4049 
4050 	evlist__add(evlist, evsel);
4051 
4052 	ids = event->header.size;
4053 	ids -= (void *)&event->attr.id - (void *)event;
4054 	n_ids = ids / sizeof(u64);
4055 	/*
4056 	 * We don't have the cpu and thread maps on the header, so
4057 	 * for allocating the perf_sample_id table we fake 1 cpu and
4058 	 * hattr->ids threads.
4059 	 */
4060 	if (perf_evsel__alloc_id(evsel, 1, n_ids))
4061 		return -ENOMEM;
4062 
4063 	for (i = 0; i < n_ids; i++) {
4064 		perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
4065 	}
4066 
4067 	return 0;
4068 }
4069 
4070 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
4071 				     union perf_event *event,
4072 				     struct evlist **pevlist)
4073 {
4074 	struct perf_record_event_update *ev = &event->event_update;
4075 	struct perf_record_event_update_scale *ev_scale;
4076 	struct perf_record_event_update_cpus *ev_cpus;
4077 	struct evlist *evlist;
4078 	struct evsel *evsel;
4079 	struct perf_cpu_map *map;
4080 
4081 	if (!pevlist || *pevlist == NULL)
4082 		return -EINVAL;
4083 
4084 	evlist = *pevlist;
4085 
4086 	evsel = perf_evlist__id2evsel(evlist, ev->id);
4087 	if (evsel == NULL)
4088 		return -EINVAL;
4089 
4090 	switch (ev->type) {
4091 	case PERF_EVENT_UPDATE__UNIT:
4092 		evsel->unit = strdup(ev->data);
4093 		break;
4094 	case PERF_EVENT_UPDATE__NAME:
4095 		evsel->name = strdup(ev->data);
4096 		break;
4097 	case PERF_EVENT_UPDATE__SCALE:
4098 		ev_scale = (struct perf_record_event_update_scale *)ev->data;
4099 		evsel->scale = ev_scale->scale;
4100 		break;
4101 	case PERF_EVENT_UPDATE__CPUS:
4102 		ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
4103 
4104 		map = cpu_map__new_data(&ev_cpus->cpus);
4105 		if (map)
4106 			evsel->core.own_cpus = map;
4107 		else
4108 			pr_err("failed to get event_update cpus\n");
4109 	default:
4110 		break;
4111 	}
4112 
4113 	return 0;
4114 }
4115 
4116 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
4117 					struct evlist *evlist,
4118 					perf_event__handler_t process)
4119 {
4120 	union perf_event ev;
4121 	struct tracing_data *tdata;
4122 	ssize_t size = 0, aligned_size = 0, padding;
4123 	struct feat_fd ff;
4124 	int err __maybe_unused = 0;
4125 
4126 	/*
4127 	 * We are going to store the size of the data followed
4128 	 * by the data contents. Since the fd descriptor is a pipe,
4129 	 * we cannot seek back to store the size of the data once
4130 	 * we know it. Instead we:
4131 	 *
4132 	 * - write the tracing data to the temp file
4133 	 * - get/write the data size to pipe
4134 	 * - write the tracing data from the temp file
4135 	 *   to the pipe
4136 	 */
4137 	tdata = tracing_data_get(&evlist->core.entries, fd, true);
4138 	if (!tdata)
4139 		return -1;
4140 
4141 	memset(&ev, 0, sizeof(ev));
4142 
4143 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
4144 	size = tdata->size;
4145 	aligned_size = PERF_ALIGN(size, sizeof(u64));
4146 	padding = aligned_size - size;
4147 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
4148 	ev.tracing_data.size = aligned_size;
4149 
4150 	process(tool, &ev, NULL, NULL);
4151 
4152 	/*
4153 	 * The put function will copy all the tracing data
4154 	 * stored in temp file to the pipe.
4155 	 */
4156 	tracing_data_put(tdata);
4157 
4158 	ff = (struct feat_fd){ .fd = fd };
4159 	if (write_padded(&ff, NULL, 0, padding))
4160 		return -1;
4161 
4162 	return aligned_size;
4163 }
4164 
4165 int perf_event__process_tracing_data(struct perf_session *session,
4166 				     union perf_event *event)
4167 {
4168 	ssize_t size_read, padding, size = event->tracing_data.size;
4169 	int fd = perf_data__fd(session->data);
4170 	off_t offset = lseek(fd, 0, SEEK_CUR);
4171 	char buf[BUFSIZ];
4172 
4173 	/* setup for reading amidst mmap */
4174 	lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
4175 	      SEEK_SET);
4176 
4177 	size_read = trace_report(fd, &session->tevent,
4178 				 session->repipe);
4179 	padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
4180 
4181 	if (readn(fd, buf, padding) < 0) {
4182 		pr_err("%s: reading input file", __func__);
4183 		return -1;
4184 	}
4185 	if (session->repipe) {
4186 		int retw = write(STDOUT_FILENO, buf, padding);
4187 		if (retw <= 0 || retw != padding) {
4188 			pr_err("%s: repiping tracing data padding", __func__);
4189 			return -1;
4190 		}
4191 	}
4192 
4193 	if (size_read + padding != size) {
4194 		pr_err("%s: tracing data size mismatch", __func__);
4195 		return -1;
4196 	}
4197 
4198 	perf_evlist__prepare_tracepoint_events(session->evlist,
4199 					       session->tevent.pevent);
4200 
4201 	return size_read + padding;
4202 }
4203 
4204 int perf_event__synthesize_build_id(struct perf_tool *tool,
4205 				    struct dso *pos, u16 misc,
4206 				    perf_event__handler_t process,
4207 				    struct machine *machine)
4208 {
4209 	union perf_event ev;
4210 	size_t len;
4211 	int err = 0;
4212 
4213 	if (!pos->hit)
4214 		return err;
4215 
4216 	memset(&ev, 0, sizeof(ev));
4217 
4218 	len = pos->long_name_len + 1;
4219 	len = PERF_ALIGN(len, NAME_ALIGN);
4220 	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
4221 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
4222 	ev.build_id.header.misc = misc;
4223 	ev.build_id.pid = machine->pid;
4224 	ev.build_id.header.size = sizeof(ev.build_id) + len;
4225 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
4226 
4227 	err = process(tool, &ev, NULL, machine);
4228 
4229 	return err;
4230 }
4231 
4232 int perf_event__process_build_id(struct perf_session *session,
4233 				 union perf_event *event)
4234 {
4235 	__event_process_build_id(&event->build_id,
4236 				 event->build_id.filename,
4237 				 session);
4238 	return 0;
4239 }
4240