xref: /linux/tools/perf/util/header.c (revision 877a7a11050ee4d465364c57f8fbf78f6b1a2559)
1 #include "util.h"
2 #include <sys/types.h>
3 #include <byteswap.h>
4 #include <unistd.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <linux/list.h>
8 #include <linux/kernel.h>
9 #include <linux/bitops.h>
10 #include <sys/utsname.h>
11 
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
20 #include "cpumap.h"
21 #include "pmu.h"
22 #include "vdso.h"
23 #include "strbuf.h"
24 #include "build-id.h"
25 #include "data.h"
26 #include <api/fs/fs.h>
27 #include "asm/bug.h"
28 
29 /*
30  * magic2 = "PERFILE2"
31  * must be a numerical value to let the endianness
32  * determine the memory layout. That way we are able
33  * to detect endianness when reading the perf.data file
34  * back.
35  *
36  * we check for legacy (PERFFILE) format.
37  */
38 static const char *__perf_magic1 = "PERFFILE";
39 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
40 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
41 
42 #define PERF_MAGIC	__perf_magic2
43 
44 const char perf_version_string[] = PERF_VERSION;
45 
46 struct perf_file_attr {
47 	struct perf_event_attr	attr;
48 	struct perf_file_section	ids;
49 };
50 
51 void perf_header__set_feat(struct perf_header *header, int feat)
52 {
53 	set_bit(feat, header->adds_features);
54 }
55 
56 void perf_header__clear_feat(struct perf_header *header, int feat)
57 {
58 	clear_bit(feat, header->adds_features);
59 }
60 
61 bool perf_header__has_feat(const struct perf_header *header, int feat)
62 {
63 	return test_bit(feat, header->adds_features);
64 }
65 
66 static int do_write(int fd, const void *buf, size_t size)
67 {
68 	while (size) {
69 		int ret = write(fd, buf, size);
70 
71 		if (ret < 0)
72 			return -errno;
73 
74 		size -= ret;
75 		buf += ret;
76 	}
77 
78 	return 0;
79 }
80 
81 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
82 {
83 	static const char zero_buf[NAME_ALIGN];
84 	int err = do_write(fd, bf, count);
85 
86 	if (!err)
87 		err = do_write(fd, zero_buf, count_aligned - count);
88 
89 	return err;
90 }
91 
92 #define string_size(str)						\
93 	(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
94 
95 static int do_write_string(int fd, const char *str)
96 {
97 	u32 len, olen;
98 	int ret;
99 
100 	olen = strlen(str) + 1;
101 	len = PERF_ALIGN(olen, NAME_ALIGN);
102 
103 	/* write len, incl. \0 */
104 	ret = do_write(fd, &len, sizeof(len));
105 	if (ret < 0)
106 		return ret;
107 
108 	return write_padded(fd, str, olen, len);
109 }
110 
111 static char *do_read_string(int fd, struct perf_header *ph)
112 {
113 	ssize_t sz, ret;
114 	u32 len;
115 	char *buf;
116 
117 	sz = readn(fd, &len, sizeof(len));
118 	if (sz < (ssize_t)sizeof(len))
119 		return NULL;
120 
121 	if (ph->needs_swap)
122 		len = bswap_32(len);
123 
124 	buf = malloc(len);
125 	if (!buf)
126 		return NULL;
127 
128 	ret = readn(fd, buf, len);
129 	if (ret == (ssize_t)len) {
130 		/*
131 		 * strings are padded by zeroes
132 		 * thus the actual strlen of buf
133 		 * may be less than len
134 		 */
135 		return buf;
136 	}
137 
138 	free(buf);
139 	return NULL;
140 }
141 
142 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
143 			    struct perf_evlist *evlist)
144 {
145 	return read_tracing_data(fd, &evlist->entries);
146 }
147 
148 
149 static int write_build_id(int fd, struct perf_header *h,
150 			  struct perf_evlist *evlist __maybe_unused)
151 {
152 	struct perf_session *session;
153 	int err;
154 
155 	session = container_of(h, struct perf_session, header);
156 
157 	if (!perf_session__read_build_ids(session, true))
158 		return -1;
159 
160 	err = perf_session__write_buildid_table(session, fd);
161 	if (err < 0) {
162 		pr_debug("failed to write buildid table\n");
163 		return err;
164 	}
165 	perf_session__cache_build_ids(session);
166 
167 	return 0;
168 }
169 
170 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
171 			  struct perf_evlist *evlist __maybe_unused)
172 {
173 	struct utsname uts;
174 	int ret;
175 
176 	ret = uname(&uts);
177 	if (ret < 0)
178 		return -1;
179 
180 	return do_write_string(fd, uts.nodename);
181 }
182 
183 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
184 			   struct perf_evlist *evlist __maybe_unused)
185 {
186 	struct utsname uts;
187 	int ret;
188 
189 	ret = uname(&uts);
190 	if (ret < 0)
191 		return -1;
192 
193 	return do_write_string(fd, uts.release);
194 }
195 
196 static int write_arch(int fd, struct perf_header *h __maybe_unused,
197 		      struct perf_evlist *evlist __maybe_unused)
198 {
199 	struct utsname uts;
200 	int ret;
201 
202 	ret = uname(&uts);
203 	if (ret < 0)
204 		return -1;
205 
206 	return do_write_string(fd, uts.machine);
207 }
208 
209 static int write_version(int fd, struct perf_header *h __maybe_unused,
210 			 struct perf_evlist *evlist __maybe_unused)
211 {
212 	return do_write_string(fd, perf_version_string);
213 }
214 
215 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
216 {
217 	FILE *file;
218 	char *buf = NULL;
219 	char *s, *p;
220 	const char *search = cpuinfo_proc;
221 	size_t len = 0;
222 	int ret = -1;
223 
224 	if (!search)
225 		return -1;
226 
227 	file = fopen("/proc/cpuinfo", "r");
228 	if (!file)
229 		return -1;
230 
231 	while (getline(&buf, &len, file) > 0) {
232 		ret = strncmp(buf, search, strlen(search));
233 		if (!ret)
234 			break;
235 	}
236 
237 	if (ret) {
238 		ret = -1;
239 		goto done;
240 	}
241 
242 	s = buf;
243 
244 	p = strchr(buf, ':');
245 	if (p && *(p+1) == ' ' && *(p+2))
246 		s = p + 2;
247 	p = strchr(s, '\n');
248 	if (p)
249 		*p = '\0';
250 
251 	/* squash extra space characters (branding string) */
252 	p = s;
253 	while (*p) {
254 		if (isspace(*p)) {
255 			char *r = p + 1;
256 			char *q = r;
257 			*p = ' ';
258 			while (*q && isspace(*q))
259 				q++;
260 			if (q != (p+1))
261 				while ((*r++ = *q++));
262 		}
263 		p++;
264 	}
265 	ret = do_write_string(fd, s);
266 done:
267 	free(buf);
268 	fclose(file);
269 	return ret;
270 }
271 
272 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
273 		       struct perf_evlist *evlist __maybe_unused)
274 {
275 #ifndef CPUINFO_PROC
276 #define CPUINFO_PROC {"model name", }
277 #endif
278 	const char *cpuinfo_procs[] = CPUINFO_PROC;
279 	unsigned int i;
280 
281 	for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
282 		int ret;
283 		ret = __write_cpudesc(fd, cpuinfo_procs[i]);
284 		if (ret >= 0)
285 			return ret;
286 	}
287 	return -1;
288 }
289 
290 
291 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
292 			struct perf_evlist *evlist __maybe_unused)
293 {
294 	long nr;
295 	u32 nrc, nra;
296 	int ret;
297 
298 	nrc = cpu__max_present_cpu();
299 
300 	nr = sysconf(_SC_NPROCESSORS_ONLN);
301 	if (nr < 0)
302 		return -1;
303 
304 	nra = (u32)(nr & UINT_MAX);
305 
306 	ret = do_write(fd, &nrc, sizeof(nrc));
307 	if (ret < 0)
308 		return ret;
309 
310 	return do_write(fd, &nra, sizeof(nra));
311 }
312 
313 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
314 			    struct perf_evlist *evlist)
315 {
316 	struct perf_evsel *evsel;
317 	u32 nre, nri, sz;
318 	int ret;
319 
320 	nre = evlist->nr_entries;
321 
322 	/*
323 	 * write number of events
324 	 */
325 	ret = do_write(fd, &nre, sizeof(nre));
326 	if (ret < 0)
327 		return ret;
328 
329 	/*
330 	 * size of perf_event_attr struct
331 	 */
332 	sz = (u32)sizeof(evsel->attr);
333 	ret = do_write(fd, &sz, sizeof(sz));
334 	if (ret < 0)
335 		return ret;
336 
337 	evlist__for_each_entry(evlist, evsel) {
338 		ret = do_write(fd, &evsel->attr, sz);
339 		if (ret < 0)
340 			return ret;
341 		/*
342 		 * write number of unique id per event
343 		 * there is one id per instance of an event
344 		 *
345 		 * copy into an nri to be independent of the
346 		 * type of ids,
347 		 */
348 		nri = evsel->ids;
349 		ret = do_write(fd, &nri, sizeof(nri));
350 		if (ret < 0)
351 			return ret;
352 
353 		/*
354 		 * write event string as passed on cmdline
355 		 */
356 		ret = do_write_string(fd, perf_evsel__name(evsel));
357 		if (ret < 0)
358 			return ret;
359 		/*
360 		 * write unique ids for this event
361 		 */
362 		ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
363 		if (ret < 0)
364 			return ret;
365 	}
366 	return 0;
367 }
368 
369 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
370 			 struct perf_evlist *evlist __maybe_unused)
371 {
372 	char buf[MAXPATHLEN];
373 	u32 n;
374 	int i, ret;
375 
376 	/* actual path to perf binary */
377 	ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
378 	if (ret <= 0)
379 		return -1;
380 
381 	/* readlink() does not add null termination */
382 	buf[ret] = '\0';
383 
384 	/* account for binary path */
385 	n = perf_env.nr_cmdline + 1;
386 
387 	ret = do_write(fd, &n, sizeof(n));
388 	if (ret < 0)
389 		return ret;
390 
391 	ret = do_write_string(fd, buf);
392 	if (ret < 0)
393 		return ret;
394 
395 	for (i = 0 ; i < perf_env.nr_cmdline; i++) {
396 		ret = do_write_string(fd, perf_env.cmdline_argv[i]);
397 		if (ret < 0)
398 			return ret;
399 	}
400 	return 0;
401 }
402 
403 #define CORE_SIB_FMT \
404 	"/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
405 #define THRD_SIB_FMT \
406 	"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
407 
408 struct cpu_topo {
409 	u32 cpu_nr;
410 	u32 core_sib;
411 	u32 thread_sib;
412 	char **core_siblings;
413 	char **thread_siblings;
414 };
415 
416 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
417 {
418 	FILE *fp;
419 	char filename[MAXPATHLEN];
420 	char *buf = NULL, *p;
421 	size_t len = 0;
422 	ssize_t sret;
423 	u32 i = 0;
424 	int ret = -1;
425 
426 	sprintf(filename, CORE_SIB_FMT, cpu);
427 	fp = fopen(filename, "r");
428 	if (!fp)
429 		goto try_threads;
430 
431 	sret = getline(&buf, &len, fp);
432 	fclose(fp);
433 	if (sret <= 0)
434 		goto try_threads;
435 
436 	p = strchr(buf, '\n');
437 	if (p)
438 		*p = '\0';
439 
440 	for (i = 0; i < tp->core_sib; i++) {
441 		if (!strcmp(buf, tp->core_siblings[i]))
442 			break;
443 	}
444 	if (i == tp->core_sib) {
445 		tp->core_siblings[i] = buf;
446 		tp->core_sib++;
447 		buf = NULL;
448 		len = 0;
449 	}
450 	ret = 0;
451 
452 try_threads:
453 	sprintf(filename, THRD_SIB_FMT, cpu);
454 	fp = fopen(filename, "r");
455 	if (!fp)
456 		goto done;
457 
458 	if (getline(&buf, &len, fp) <= 0)
459 		goto done;
460 
461 	p = strchr(buf, '\n');
462 	if (p)
463 		*p = '\0';
464 
465 	for (i = 0; i < tp->thread_sib; i++) {
466 		if (!strcmp(buf, tp->thread_siblings[i]))
467 			break;
468 	}
469 	if (i == tp->thread_sib) {
470 		tp->thread_siblings[i] = buf;
471 		tp->thread_sib++;
472 		buf = NULL;
473 	}
474 	ret = 0;
475 done:
476 	if(fp)
477 		fclose(fp);
478 	free(buf);
479 	return ret;
480 }
481 
482 static void free_cpu_topo(struct cpu_topo *tp)
483 {
484 	u32 i;
485 
486 	if (!tp)
487 		return;
488 
489 	for (i = 0 ; i < tp->core_sib; i++)
490 		zfree(&tp->core_siblings[i]);
491 
492 	for (i = 0 ; i < tp->thread_sib; i++)
493 		zfree(&tp->thread_siblings[i]);
494 
495 	free(tp);
496 }
497 
498 static struct cpu_topo *build_cpu_topology(void)
499 {
500 	struct cpu_topo *tp = NULL;
501 	void *addr;
502 	u32 nr, i;
503 	size_t sz;
504 	long ncpus;
505 	int ret = -1;
506 	struct cpu_map *map;
507 
508 	ncpus = cpu__max_present_cpu();
509 
510 	/* build online CPU map */
511 	map = cpu_map__new(NULL);
512 	if (map == NULL) {
513 		pr_debug("failed to get system cpumap\n");
514 		return NULL;
515 	}
516 
517 	nr = (u32)(ncpus & UINT_MAX);
518 
519 	sz = nr * sizeof(char *);
520 	addr = calloc(1, sizeof(*tp) + 2 * sz);
521 	if (!addr)
522 		goto out_free;
523 
524 	tp = addr;
525 	tp->cpu_nr = nr;
526 	addr += sizeof(*tp);
527 	tp->core_siblings = addr;
528 	addr += sz;
529 	tp->thread_siblings = addr;
530 
531 	for (i = 0; i < nr; i++) {
532 		if (!cpu_map__has(map, i))
533 			continue;
534 
535 		ret = build_cpu_topo(tp, i);
536 		if (ret < 0)
537 			break;
538 	}
539 
540 out_free:
541 	cpu_map__put(map);
542 	if (ret) {
543 		free_cpu_topo(tp);
544 		tp = NULL;
545 	}
546 	return tp;
547 }
548 
549 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
550 			  struct perf_evlist *evlist __maybe_unused)
551 {
552 	struct cpu_topo *tp;
553 	u32 i;
554 	int ret, j;
555 
556 	tp = build_cpu_topology();
557 	if (!tp)
558 		return -1;
559 
560 	ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
561 	if (ret < 0)
562 		goto done;
563 
564 	for (i = 0; i < tp->core_sib; i++) {
565 		ret = do_write_string(fd, tp->core_siblings[i]);
566 		if (ret < 0)
567 			goto done;
568 	}
569 	ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
570 	if (ret < 0)
571 		goto done;
572 
573 	for (i = 0; i < tp->thread_sib; i++) {
574 		ret = do_write_string(fd, tp->thread_siblings[i]);
575 		if (ret < 0)
576 			break;
577 	}
578 
579 	ret = perf_env__read_cpu_topology_map(&perf_env);
580 	if (ret < 0)
581 		goto done;
582 
583 	for (j = 0; j < perf_env.nr_cpus_avail; j++) {
584 		ret = do_write(fd, &perf_env.cpu[j].core_id,
585 			       sizeof(perf_env.cpu[j].core_id));
586 		if (ret < 0)
587 			return ret;
588 		ret = do_write(fd, &perf_env.cpu[j].socket_id,
589 			       sizeof(perf_env.cpu[j].socket_id));
590 		if (ret < 0)
591 			return ret;
592 	}
593 done:
594 	free_cpu_topo(tp);
595 	return ret;
596 }
597 
598 
599 
600 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
601 			  struct perf_evlist *evlist __maybe_unused)
602 {
603 	char *buf = NULL;
604 	FILE *fp;
605 	size_t len = 0;
606 	int ret = -1, n;
607 	uint64_t mem;
608 
609 	fp = fopen("/proc/meminfo", "r");
610 	if (!fp)
611 		return -1;
612 
613 	while (getline(&buf, &len, fp) > 0) {
614 		ret = strncmp(buf, "MemTotal:", 9);
615 		if (!ret)
616 			break;
617 	}
618 	if (!ret) {
619 		n = sscanf(buf, "%*s %"PRIu64, &mem);
620 		if (n == 1)
621 			ret = do_write(fd, &mem, sizeof(mem));
622 	} else
623 		ret = -1;
624 	free(buf);
625 	fclose(fp);
626 	return ret;
627 }
628 
629 static int write_topo_node(int fd, int node)
630 {
631 	char str[MAXPATHLEN];
632 	char field[32];
633 	char *buf = NULL, *p;
634 	size_t len = 0;
635 	FILE *fp;
636 	u64 mem_total, mem_free, mem;
637 	int ret = -1;
638 
639 	sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
640 	fp = fopen(str, "r");
641 	if (!fp)
642 		return -1;
643 
644 	while (getline(&buf, &len, fp) > 0) {
645 		/* skip over invalid lines */
646 		if (!strchr(buf, ':'))
647 			continue;
648 		if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
649 			goto done;
650 		if (!strcmp(field, "MemTotal:"))
651 			mem_total = mem;
652 		if (!strcmp(field, "MemFree:"))
653 			mem_free = mem;
654 	}
655 
656 	fclose(fp);
657 	fp = NULL;
658 
659 	ret = do_write(fd, &mem_total, sizeof(u64));
660 	if (ret)
661 		goto done;
662 
663 	ret = do_write(fd, &mem_free, sizeof(u64));
664 	if (ret)
665 		goto done;
666 
667 	ret = -1;
668 	sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
669 
670 	fp = fopen(str, "r");
671 	if (!fp)
672 		goto done;
673 
674 	if (getline(&buf, &len, fp) <= 0)
675 		goto done;
676 
677 	p = strchr(buf, '\n');
678 	if (p)
679 		*p = '\0';
680 
681 	ret = do_write_string(fd, buf);
682 done:
683 	free(buf);
684 	if (fp)
685 		fclose(fp);
686 	return ret;
687 }
688 
689 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
690 			  struct perf_evlist *evlist __maybe_unused)
691 {
692 	char *buf = NULL;
693 	size_t len = 0;
694 	FILE *fp;
695 	struct cpu_map *node_map = NULL;
696 	char *c;
697 	u32 nr, i, j;
698 	int ret = -1;
699 
700 	fp = fopen("/sys/devices/system/node/online", "r");
701 	if (!fp)
702 		return -1;
703 
704 	if (getline(&buf, &len, fp) <= 0)
705 		goto done;
706 
707 	c = strchr(buf, '\n');
708 	if (c)
709 		*c = '\0';
710 
711 	node_map = cpu_map__new(buf);
712 	if (!node_map)
713 		goto done;
714 
715 	nr = (u32)node_map->nr;
716 
717 	ret = do_write(fd, &nr, sizeof(nr));
718 	if (ret < 0)
719 		goto done;
720 
721 	for (i = 0; i < nr; i++) {
722 		j = (u32)node_map->map[i];
723 		ret = do_write(fd, &j, sizeof(j));
724 		if (ret < 0)
725 			break;
726 
727 		ret = write_topo_node(fd, i);
728 		if (ret < 0)
729 			break;
730 	}
731 done:
732 	free(buf);
733 	fclose(fp);
734 	cpu_map__put(node_map);
735 	return ret;
736 }
737 
738 /*
739  * File format:
740  *
741  * struct pmu_mappings {
742  *	u32	pmu_num;
743  *	struct pmu_map {
744  *		u32	type;
745  *		char	name[];
746  *	}[pmu_num];
747  * };
748  */
749 
750 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
751 			      struct perf_evlist *evlist __maybe_unused)
752 {
753 	struct perf_pmu *pmu = NULL;
754 	off_t offset = lseek(fd, 0, SEEK_CUR);
755 	__u32 pmu_num = 0;
756 	int ret;
757 
758 	/* write real pmu_num later */
759 	ret = do_write(fd, &pmu_num, sizeof(pmu_num));
760 	if (ret < 0)
761 		return ret;
762 
763 	while ((pmu = perf_pmu__scan(pmu))) {
764 		if (!pmu->name)
765 			continue;
766 		pmu_num++;
767 
768 		ret = do_write(fd, &pmu->type, sizeof(pmu->type));
769 		if (ret < 0)
770 			return ret;
771 
772 		ret = do_write_string(fd, pmu->name);
773 		if (ret < 0)
774 			return ret;
775 	}
776 
777 	if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
778 		/* discard all */
779 		lseek(fd, offset, SEEK_SET);
780 		return -1;
781 	}
782 
783 	return 0;
784 }
785 
786 /*
787  * File format:
788  *
789  * struct group_descs {
790  *	u32	nr_groups;
791  *	struct group_desc {
792  *		char	name[];
793  *		u32	leader_idx;
794  *		u32	nr_members;
795  *	}[nr_groups];
796  * };
797  */
798 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
799 			    struct perf_evlist *evlist)
800 {
801 	u32 nr_groups = evlist->nr_groups;
802 	struct perf_evsel *evsel;
803 	int ret;
804 
805 	ret = do_write(fd, &nr_groups, sizeof(nr_groups));
806 	if (ret < 0)
807 		return ret;
808 
809 	evlist__for_each_entry(evlist, evsel) {
810 		if (perf_evsel__is_group_leader(evsel) &&
811 		    evsel->nr_members > 1) {
812 			const char *name = evsel->group_name ?: "{anon_group}";
813 			u32 leader_idx = evsel->idx;
814 			u32 nr_members = evsel->nr_members;
815 
816 			ret = do_write_string(fd, name);
817 			if (ret < 0)
818 				return ret;
819 
820 			ret = do_write(fd, &leader_idx, sizeof(leader_idx));
821 			if (ret < 0)
822 				return ret;
823 
824 			ret = do_write(fd, &nr_members, sizeof(nr_members));
825 			if (ret < 0)
826 				return ret;
827 		}
828 	}
829 	return 0;
830 }
831 
832 /*
833  * default get_cpuid(): nothing gets recorded
834  * actual implementation must be in arch/$(ARCH)/util/header.c
835  */
836 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
837 {
838 	return -1;
839 }
840 
841 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
842 		       struct perf_evlist *evlist __maybe_unused)
843 {
844 	char buffer[64];
845 	int ret;
846 
847 	ret = get_cpuid(buffer, sizeof(buffer));
848 	if (!ret)
849 		goto write_it;
850 
851 	return -1;
852 write_it:
853 	return do_write_string(fd, buffer);
854 }
855 
856 static int write_branch_stack(int fd __maybe_unused,
857 			      struct perf_header *h __maybe_unused,
858 		       struct perf_evlist *evlist __maybe_unused)
859 {
860 	return 0;
861 }
862 
863 static int write_auxtrace(int fd, struct perf_header *h,
864 			  struct perf_evlist *evlist __maybe_unused)
865 {
866 	struct perf_session *session;
867 	int err;
868 
869 	session = container_of(h, struct perf_session, header);
870 
871 	err = auxtrace_index__write(fd, &session->auxtrace_index);
872 	if (err < 0)
873 		pr_err("Failed to write auxtrace index\n");
874 	return err;
875 }
876 
877 static int cpu_cache_level__sort(const void *a, const void *b)
878 {
879 	struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
880 	struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
881 
882 	return cache_a->level - cache_b->level;
883 }
884 
885 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
886 {
887 	if (a->level != b->level)
888 		return false;
889 
890 	if (a->line_size != b->line_size)
891 		return false;
892 
893 	if (a->sets != b->sets)
894 		return false;
895 
896 	if (a->ways != b->ways)
897 		return false;
898 
899 	if (strcmp(a->type, b->type))
900 		return false;
901 
902 	if (strcmp(a->size, b->size))
903 		return false;
904 
905 	if (strcmp(a->map, b->map))
906 		return false;
907 
908 	return true;
909 }
910 
911 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
912 {
913 	char path[PATH_MAX], file[PATH_MAX];
914 	struct stat st;
915 	size_t len;
916 
917 	scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
918 	scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
919 
920 	if (stat(file, &st))
921 		return 1;
922 
923 	scnprintf(file, PATH_MAX, "%s/level", path);
924 	if (sysfs__read_int(file, (int *) &cache->level))
925 		return -1;
926 
927 	scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
928 	if (sysfs__read_int(file, (int *) &cache->line_size))
929 		return -1;
930 
931 	scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
932 	if (sysfs__read_int(file, (int *) &cache->sets))
933 		return -1;
934 
935 	scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
936 	if (sysfs__read_int(file, (int *) &cache->ways))
937 		return -1;
938 
939 	scnprintf(file, PATH_MAX, "%s/type", path);
940 	if (sysfs__read_str(file, &cache->type, &len))
941 		return -1;
942 
943 	cache->type[len] = 0;
944 	cache->type = rtrim(cache->type);
945 
946 	scnprintf(file, PATH_MAX, "%s/size", path);
947 	if (sysfs__read_str(file, &cache->size, &len)) {
948 		free(cache->type);
949 		return -1;
950 	}
951 
952 	cache->size[len] = 0;
953 	cache->size = rtrim(cache->size);
954 
955 	scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
956 	if (sysfs__read_str(file, &cache->map, &len)) {
957 		free(cache->map);
958 		free(cache->type);
959 		return -1;
960 	}
961 
962 	cache->map[len] = 0;
963 	cache->map = rtrim(cache->map);
964 	return 0;
965 }
966 
967 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
968 {
969 	fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
970 }
971 
972 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
973 {
974 	u32 i, cnt = 0;
975 	long ncpus;
976 	u32 nr, cpu;
977 	u16 level;
978 
979 	ncpus = sysconf(_SC_NPROCESSORS_CONF);
980 	if (ncpus < 0)
981 		return -1;
982 
983 	nr = (u32)(ncpus & UINT_MAX);
984 
985 	for (cpu = 0; cpu < nr; cpu++) {
986 		for (level = 0; level < 10; level++) {
987 			struct cpu_cache_level c;
988 			int err;
989 
990 			err = cpu_cache_level__read(&c, cpu, level);
991 			if (err < 0)
992 				return err;
993 
994 			if (err == 1)
995 				break;
996 
997 			for (i = 0; i < cnt; i++) {
998 				if (cpu_cache_level__cmp(&c, &caches[i]))
999 					break;
1000 			}
1001 
1002 			if (i == cnt)
1003 				caches[cnt++] = c;
1004 			else
1005 				cpu_cache_level__free(&c);
1006 
1007 			if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1008 				goto out;
1009 		}
1010 	}
1011  out:
1012 	*cntp = cnt;
1013 	return 0;
1014 }
1015 
1016 #define MAX_CACHES 2000
1017 
1018 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1019 			  struct perf_evlist *evlist __maybe_unused)
1020 {
1021 	struct cpu_cache_level caches[MAX_CACHES];
1022 	u32 cnt = 0, i, version = 1;
1023 	int ret;
1024 
1025 	ret = build_caches(caches, MAX_CACHES, &cnt);
1026 	if (ret)
1027 		goto out;
1028 
1029 	qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1030 
1031 	ret = do_write(fd, &version, sizeof(u32));
1032 	if (ret < 0)
1033 		goto out;
1034 
1035 	ret = do_write(fd, &cnt, sizeof(u32));
1036 	if (ret < 0)
1037 		goto out;
1038 
1039 	for (i = 0; i < cnt; i++) {
1040 		struct cpu_cache_level *c = &caches[i];
1041 
1042 		#define _W(v)					\
1043 			ret = do_write(fd, &c->v, sizeof(u32));	\
1044 			if (ret < 0)				\
1045 				goto out;
1046 
1047 		_W(level)
1048 		_W(line_size)
1049 		_W(sets)
1050 		_W(ways)
1051 		#undef _W
1052 
1053 		#define _W(v)						\
1054 			ret = do_write_string(fd, (const char *) c->v);	\
1055 			if (ret < 0)					\
1056 				goto out;
1057 
1058 		_W(type)
1059 		_W(size)
1060 		_W(map)
1061 		#undef _W
1062 	}
1063 
1064 out:
1065 	for (i = 0; i < cnt; i++)
1066 		cpu_cache_level__free(&caches[i]);
1067 	return ret;
1068 }
1069 
1070 static int write_stat(int fd __maybe_unused,
1071 		      struct perf_header *h __maybe_unused,
1072 		      struct perf_evlist *evlist __maybe_unused)
1073 {
1074 	return 0;
1075 }
1076 
1077 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1078 			   FILE *fp)
1079 {
1080 	fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1081 }
1082 
1083 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1084 			    FILE *fp)
1085 {
1086 	fprintf(fp, "# os release : %s\n", ph->env.os_release);
1087 }
1088 
1089 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1090 {
1091 	fprintf(fp, "# arch : %s\n", ph->env.arch);
1092 }
1093 
1094 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1095 			  FILE *fp)
1096 {
1097 	fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1098 }
1099 
1100 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1101 			 FILE *fp)
1102 {
1103 	fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1104 	fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1105 }
1106 
1107 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1108 			  FILE *fp)
1109 {
1110 	fprintf(fp, "# perf version : %s\n", ph->env.version);
1111 }
1112 
1113 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1114 			  FILE *fp)
1115 {
1116 	int nr, i;
1117 
1118 	nr = ph->env.nr_cmdline;
1119 
1120 	fprintf(fp, "# cmdline : ");
1121 
1122 	for (i = 0; i < nr; i++)
1123 		fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1124 	fputc('\n', fp);
1125 }
1126 
1127 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1128 			       FILE *fp)
1129 {
1130 	int nr, i;
1131 	char *str;
1132 	int cpu_nr = ph->env.nr_cpus_avail;
1133 
1134 	nr = ph->env.nr_sibling_cores;
1135 	str = ph->env.sibling_cores;
1136 
1137 	for (i = 0; i < nr; i++) {
1138 		fprintf(fp, "# sibling cores   : %s\n", str);
1139 		str += strlen(str) + 1;
1140 	}
1141 
1142 	nr = ph->env.nr_sibling_threads;
1143 	str = ph->env.sibling_threads;
1144 
1145 	for (i = 0; i < nr; i++) {
1146 		fprintf(fp, "# sibling threads : %s\n", str);
1147 		str += strlen(str) + 1;
1148 	}
1149 
1150 	if (ph->env.cpu != NULL) {
1151 		for (i = 0; i < cpu_nr; i++)
1152 			fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1153 				ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1154 	} else
1155 		fprintf(fp, "# Core ID and Socket ID information is not available\n");
1156 }
1157 
1158 static void free_event_desc(struct perf_evsel *events)
1159 {
1160 	struct perf_evsel *evsel;
1161 
1162 	if (!events)
1163 		return;
1164 
1165 	for (evsel = events; evsel->attr.size; evsel++) {
1166 		zfree(&evsel->name);
1167 		zfree(&evsel->id);
1168 	}
1169 
1170 	free(events);
1171 }
1172 
1173 static struct perf_evsel *
1174 read_event_desc(struct perf_header *ph, int fd)
1175 {
1176 	struct perf_evsel *evsel, *events = NULL;
1177 	u64 *id;
1178 	void *buf = NULL;
1179 	u32 nre, sz, nr, i, j;
1180 	ssize_t ret;
1181 	size_t msz;
1182 
1183 	/* number of events */
1184 	ret = readn(fd, &nre, sizeof(nre));
1185 	if (ret != (ssize_t)sizeof(nre))
1186 		goto error;
1187 
1188 	if (ph->needs_swap)
1189 		nre = bswap_32(nre);
1190 
1191 	ret = readn(fd, &sz, sizeof(sz));
1192 	if (ret != (ssize_t)sizeof(sz))
1193 		goto error;
1194 
1195 	if (ph->needs_swap)
1196 		sz = bswap_32(sz);
1197 
1198 	/* buffer to hold on file attr struct */
1199 	buf = malloc(sz);
1200 	if (!buf)
1201 		goto error;
1202 
1203 	/* the last event terminates with evsel->attr.size == 0: */
1204 	events = calloc(nre + 1, sizeof(*events));
1205 	if (!events)
1206 		goto error;
1207 
1208 	msz = sizeof(evsel->attr);
1209 	if (sz < msz)
1210 		msz = sz;
1211 
1212 	for (i = 0, evsel = events; i < nre; evsel++, i++) {
1213 		evsel->idx = i;
1214 
1215 		/*
1216 		 * must read entire on-file attr struct to
1217 		 * sync up with layout.
1218 		 */
1219 		ret = readn(fd, buf, sz);
1220 		if (ret != (ssize_t)sz)
1221 			goto error;
1222 
1223 		if (ph->needs_swap)
1224 			perf_event__attr_swap(buf);
1225 
1226 		memcpy(&evsel->attr, buf, msz);
1227 
1228 		ret = readn(fd, &nr, sizeof(nr));
1229 		if (ret != (ssize_t)sizeof(nr))
1230 			goto error;
1231 
1232 		if (ph->needs_swap) {
1233 			nr = bswap_32(nr);
1234 			evsel->needs_swap = true;
1235 		}
1236 
1237 		evsel->name = do_read_string(fd, ph);
1238 
1239 		if (!nr)
1240 			continue;
1241 
1242 		id = calloc(nr, sizeof(*id));
1243 		if (!id)
1244 			goto error;
1245 		evsel->ids = nr;
1246 		evsel->id = id;
1247 
1248 		for (j = 0 ; j < nr; j++) {
1249 			ret = readn(fd, id, sizeof(*id));
1250 			if (ret != (ssize_t)sizeof(*id))
1251 				goto error;
1252 			if (ph->needs_swap)
1253 				*id = bswap_64(*id);
1254 			id++;
1255 		}
1256 	}
1257 out:
1258 	free(buf);
1259 	return events;
1260 error:
1261 	free_event_desc(events);
1262 	events = NULL;
1263 	goto out;
1264 }
1265 
1266 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1267 				void *priv __attribute__((unused)))
1268 {
1269 	return fprintf(fp, ", %s = %s", name, val);
1270 }
1271 
1272 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1273 {
1274 	struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1275 	u32 j;
1276 	u64 *id;
1277 
1278 	if (!events) {
1279 		fprintf(fp, "# event desc: not available or unable to read\n");
1280 		return;
1281 	}
1282 
1283 	for (evsel = events; evsel->attr.size; evsel++) {
1284 		fprintf(fp, "# event : name = %s, ", evsel->name);
1285 
1286 		if (evsel->ids) {
1287 			fprintf(fp, ", id = {");
1288 			for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1289 				if (j)
1290 					fputc(',', fp);
1291 				fprintf(fp, " %"PRIu64, *id);
1292 			}
1293 			fprintf(fp, " }");
1294 		}
1295 
1296 		perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1297 
1298 		fputc('\n', fp);
1299 	}
1300 
1301 	free_event_desc(events);
1302 }
1303 
1304 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1305 			    FILE *fp)
1306 {
1307 	fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1308 }
1309 
1310 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1311 				FILE *fp)
1312 {
1313 	int i;
1314 	struct numa_node *n;
1315 
1316 	for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1317 		n = &ph->env.numa_nodes[i];
1318 
1319 		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1320 			    " free = %"PRIu64" kB\n",
1321 			n->node, n->mem_total, n->mem_free);
1322 
1323 		fprintf(fp, "# node%u cpu list : ", n->node);
1324 		cpu_map__fprintf(n->map, fp);
1325 	}
1326 }
1327 
1328 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1329 {
1330 	fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1331 }
1332 
1333 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1334 			       int fd __maybe_unused, FILE *fp)
1335 {
1336 	fprintf(fp, "# contains samples with branch stack\n");
1337 }
1338 
1339 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1340 			   int fd __maybe_unused, FILE *fp)
1341 {
1342 	fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1343 }
1344 
1345 static void print_stat(struct perf_header *ph __maybe_unused,
1346 		       int fd __maybe_unused, FILE *fp)
1347 {
1348 	fprintf(fp, "# contains stat data\n");
1349 }
1350 
1351 static void print_cache(struct perf_header *ph __maybe_unused,
1352 			int fd __maybe_unused, FILE *fp __maybe_unused)
1353 {
1354 	int i;
1355 
1356 	fprintf(fp, "# CPU cache info:\n");
1357 	for (i = 0; i < ph->env.caches_cnt; i++) {
1358 		fprintf(fp, "#  ");
1359 		cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1360 	}
1361 }
1362 
1363 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1364 			       FILE *fp)
1365 {
1366 	const char *delimiter = "# pmu mappings: ";
1367 	char *str, *tmp;
1368 	u32 pmu_num;
1369 	u32 type;
1370 
1371 	pmu_num = ph->env.nr_pmu_mappings;
1372 	if (!pmu_num) {
1373 		fprintf(fp, "# pmu mappings: not available\n");
1374 		return;
1375 	}
1376 
1377 	str = ph->env.pmu_mappings;
1378 
1379 	while (pmu_num) {
1380 		type = strtoul(str, &tmp, 0);
1381 		if (*tmp != ':')
1382 			goto error;
1383 
1384 		str = tmp + 1;
1385 		fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1386 
1387 		delimiter = ", ";
1388 		str += strlen(str) + 1;
1389 		pmu_num--;
1390 	}
1391 
1392 	fprintf(fp, "\n");
1393 
1394 	if (!pmu_num)
1395 		return;
1396 error:
1397 	fprintf(fp, "# pmu mappings: unable to read\n");
1398 }
1399 
1400 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1401 			     FILE *fp)
1402 {
1403 	struct perf_session *session;
1404 	struct perf_evsel *evsel;
1405 	u32 nr = 0;
1406 
1407 	session = container_of(ph, struct perf_session, header);
1408 
1409 	evlist__for_each_entry(session->evlist, evsel) {
1410 		if (perf_evsel__is_group_leader(evsel) &&
1411 		    evsel->nr_members > 1) {
1412 			fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1413 				perf_evsel__name(evsel));
1414 
1415 			nr = evsel->nr_members - 1;
1416 		} else if (nr) {
1417 			fprintf(fp, ",%s", perf_evsel__name(evsel));
1418 
1419 			if (--nr == 0)
1420 				fprintf(fp, "}\n");
1421 		}
1422 	}
1423 }
1424 
1425 static int __event_process_build_id(struct build_id_event *bev,
1426 				    char *filename,
1427 				    struct perf_session *session)
1428 {
1429 	int err = -1;
1430 	struct machine *machine;
1431 	u16 cpumode;
1432 	struct dso *dso;
1433 	enum dso_kernel_type dso_type;
1434 
1435 	machine = perf_session__findnew_machine(session, bev->pid);
1436 	if (!machine)
1437 		goto out;
1438 
1439 	cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1440 
1441 	switch (cpumode) {
1442 	case PERF_RECORD_MISC_KERNEL:
1443 		dso_type = DSO_TYPE_KERNEL;
1444 		break;
1445 	case PERF_RECORD_MISC_GUEST_KERNEL:
1446 		dso_type = DSO_TYPE_GUEST_KERNEL;
1447 		break;
1448 	case PERF_RECORD_MISC_USER:
1449 	case PERF_RECORD_MISC_GUEST_USER:
1450 		dso_type = DSO_TYPE_USER;
1451 		break;
1452 	default:
1453 		goto out;
1454 	}
1455 
1456 	dso = machine__findnew_dso(machine, filename);
1457 	if (dso != NULL) {
1458 		char sbuild_id[SBUILD_ID_SIZE];
1459 
1460 		dso__set_build_id(dso, &bev->build_id);
1461 
1462 		if (!is_kernel_module(filename, cpumode))
1463 			dso->kernel = dso_type;
1464 
1465 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1466 				  sbuild_id);
1467 		pr_debug("build id event received for %s: %s\n",
1468 			 dso->long_name, sbuild_id);
1469 		dso__put(dso);
1470 	}
1471 
1472 	err = 0;
1473 out:
1474 	return err;
1475 }
1476 
1477 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1478 						 int input, u64 offset, u64 size)
1479 {
1480 	struct perf_session *session = container_of(header, struct perf_session, header);
1481 	struct {
1482 		struct perf_event_header   header;
1483 		u8			   build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1484 		char			   filename[0];
1485 	} old_bev;
1486 	struct build_id_event bev;
1487 	char filename[PATH_MAX];
1488 	u64 limit = offset + size;
1489 
1490 	while (offset < limit) {
1491 		ssize_t len;
1492 
1493 		if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1494 			return -1;
1495 
1496 		if (header->needs_swap)
1497 			perf_event_header__bswap(&old_bev.header);
1498 
1499 		len = old_bev.header.size - sizeof(old_bev);
1500 		if (readn(input, filename, len) != len)
1501 			return -1;
1502 
1503 		bev.header = old_bev.header;
1504 
1505 		/*
1506 		 * As the pid is the missing value, we need to fill
1507 		 * it properly. The header.misc value give us nice hint.
1508 		 */
1509 		bev.pid	= HOST_KERNEL_ID;
1510 		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1511 		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1512 			bev.pid	= DEFAULT_GUEST_KERNEL_ID;
1513 
1514 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1515 		__event_process_build_id(&bev, filename, session);
1516 
1517 		offset += bev.header.size;
1518 	}
1519 
1520 	return 0;
1521 }
1522 
1523 static int perf_header__read_build_ids(struct perf_header *header,
1524 				       int input, u64 offset, u64 size)
1525 {
1526 	struct perf_session *session = container_of(header, struct perf_session, header);
1527 	struct build_id_event bev;
1528 	char filename[PATH_MAX];
1529 	u64 limit = offset + size, orig_offset = offset;
1530 	int err = -1;
1531 
1532 	while (offset < limit) {
1533 		ssize_t len;
1534 
1535 		if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1536 			goto out;
1537 
1538 		if (header->needs_swap)
1539 			perf_event_header__bswap(&bev.header);
1540 
1541 		len = bev.header.size - sizeof(bev);
1542 		if (readn(input, filename, len) != len)
1543 			goto out;
1544 		/*
1545 		 * The a1645ce1 changeset:
1546 		 *
1547 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1548 		 *
1549 		 * Added a field to struct build_id_event that broke the file
1550 		 * format.
1551 		 *
1552 		 * Since the kernel build-id is the first entry, process the
1553 		 * table using the old format if the well known
1554 		 * '[kernel.kallsyms]' string for the kernel build-id has the
1555 		 * first 4 characters chopped off (where the pid_t sits).
1556 		 */
1557 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1558 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1559 				return -1;
1560 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1561 		}
1562 
1563 		__event_process_build_id(&bev, filename, session);
1564 
1565 		offset += bev.header.size;
1566 	}
1567 	err = 0;
1568 out:
1569 	return err;
1570 }
1571 
1572 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1573 				struct perf_header *ph __maybe_unused,
1574 				int fd, void *data)
1575 {
1576 	ssize_t ret = trace_report(fd, data, false);
1577 	return ret < 0 ? -1 : 0;
1578 }
1579 
1580 static int process_build_id(struct perf_file_section *section,
1581 			    struct perf_header *ph, int fd,
1582 			    void *data __maybe_unused)
1583 {
1584 	if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1585 		pr_debug("Failed to read buildids, continuing...\n");
1586 	return 0;
1587 }
1588 
1589 static int process_hostname(struct perf_file_section *section __maybe_unused,
1590 			    struct perf_header *ph, int fd,
1591 			    void *data __maybe_unused)
1592 {
1593 	ph->env.hostname = do_read_string(fd, ph);
1594 	return ph->env.hostname ? 0 : -ENOMEM;
1595 }
1596 
1597 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1598 			     struct perf_header *ph, int fd,
1599 			     void *data __maybe_unused)
1600 {
1601 	ph->env.os_release = do_read_string(fd, ph);
1602 	return ph->env.os_release ? 0 : -ENOMEM;
1603 }
1604 
1605 static int process_version(struct perf_file_section *section __maybe_unused,
1606 			   struct perf_header *ph, int fd,
1607 			   void *data __maybe_unused)
1608 {
1609 	ph->env.version = do_read_string(fd, ph);
1610 	return ph->env.version ? 0 : -ENOMEM;
1611 }
1612 
1613 static int process_arch(struct perf_file_section *section __maybe_unused,
1614 			struct perf_header *ph,	int fd,
1615 			void *data __maybe_unused)
1616 {
1617 	ph->env.arch = do_read_string(fd, ph);
1618 	return ph->env.arch ? 0 : -ENOMEM;
1619 }
1620 
1621 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1622 			  struct perf_header *ph, int fd,
1623 			  void *data __maybe_unused)
1624 {
1625 	ssize_t ret;
1626 	u32 nr;
1627 
1628 	ret = readn(fd, &nr, sizeof(nr));
1629 	if (ret != sizeof(nr))
1630 		return -1;
1631 
1632 	if (ph->needs_swap)
1633 		nr = bswap_32(nr);
1634 
1635 	ph->env.nr_cpus_avail = nr;
1636 
1637 	ret = readn(fd, &nr, sizeof(nr));
1638 	if (ret != sizeof(nr))
1639 		return -1;
1640 
1641 	if (ph->needs_swap)
1642 		nr = bswap_32(nr);
1643 
1644 	ph->env.nr_cpus_online = nr;
1645 	return 0;
1646 }
1647 
1648 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1649 			   struct perf_header *ph, int fd,
1650 			   void *data __maybe_unused)
1651 {
1652 	ph->env.cpu_desc = do_read_string(fd, ph);
1653 	return ph->env.cpu_desc ? 0 : -ENOMEM;
1654 }
1655 
1656 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1657 			 struct perf_header *ph,  int fd,
1658 			 void *data __maybe_unused)
1659 {
1660 	ph->env.cpuid = do_read_string(fd, ph);
1661 	return ph->env.cpuid ? 0 : -ENOMEM;
1662 }
1663 
1664 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1665 			     struct perf_header *ph, int fd,
1666 			     void *data __maybe_unused)
1667 {
1668 	uint64_t mem;
1669 	ssize_t ret;
1670 
1671 	ret = readn(fd, &mem, sizeof(mem));
1672 	if (ret != sizeof(mem))
1673 		return -1;
1674 
1675 	if (ph->needs_swap)
1676 		mem = bswap_64(mem);
1677 
1678 	ph->env.total_mem = mem;
1679 	return 0;
1680 }
1681 
1682 static struct perf_evsel *
1683 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1684 {
1685 	struct perf_evsel *evsel;
1686 
1687 	evlist__for_each_entry(evlist, evsel) {
1688 		if (evsel->idx == idx)
1689 			return evsel;
1690 	}
1691 
1692 	return NULL;
1693 }
1694 
1695 static void
1696 perf_evlist__set_event_name(struct perf_evlist *evlist,
1697 			    struct perf_evsel *event)
1698 {
1699 	struct perf_evsel *evsel;
1700 
1701 	if (!event->name)
1702 		return;
1703 
1704 	evsel = perf_evlist__find_by_index(evlist, event->idx);
1705 	if (!evsel)
1706 		return;
1707 
1708 	if (evsel->name)
1709 		return;
1710 
1711 	evsel->name = strdup(event->name);
1712 }
1713 
1714 static int
1715 process_event_desc(struct perf_file_section *section __maybe_unused,
1716 		   struct perf_header *header, int fd,
1717 		   void *data __maybe_unused)
1718 {
1719 	struct perf_session *session;
1720 	struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1721 
1722 	if (!events)
1723 		return 0;
1724 
1725 	session = container_of(header, struct perf_session, header);
1726 	for (evsel = events; evsel->attr.size; evsel++)
1727 		perf_evlist__set_event_name(session->evlist, evsel);
1728 
1729 	free_event_desc(events);
1730 
1731 	return 0;
1732 }
1733 
1734 static int process_cmdline(struct perf_file_section *section,
1735 			   struct perf_header *ph, int fd,
1736 			   void *data __maybe_unused)
1737 {
1738 	ssize_t ret;
1739 	char *str, *cmdline = NULL, **argv = NULL;
1740 	u32 nr, i, len = 0;
1741 
1742 	ret = readn(fd, &nr, sizeof(nr));
1743 	if (ret != sizeof(nr))
1744 		return -1;
1745 
1746 	if (ph->needs_swap)
1747 		nr = bswap_32(nr);
1748 
1749 	ph->env.nr_cmdline = nr;
1750 
1751 	cmdline = zalloc(section->size + nr + 1);
1752 	if (!cmdline)
1753 		return -1;
1754 
1755 	argv = zalloc(sizeof(char *) * (nr + 1));
1756 	if (!argv)
1757 		goto error;
1758 
1759 	for (i = 0; i < nr; i++) {
1760 		str = do_read_string(fd, ph);
1761 		if (!str)
1762 			goto error;
1763 
1764 		argv[i] = cmdline + len;
1765 		memcpy(argv[i], str, strlen(str) + 1);
1766 		len += strlen(str) + 1;
1767 		free(str);
1768 	}
1769 	ph->env.cmdline = cmdline;
1770 	ph->env.cmdline_argv = (const char **) argv;
1771 	return 0;
1772 
1773 error:
1774 	free(argv);
1775 	free(cmdline);
1776 	return -1;
1777 }
1778 
1779 static int process_cpu_topology(struct perf_file_section *section,
1780 				struct perf_header *ph, int fd,
1781 				void *data __maybe_unused)
1782 {
1783 	ssize_t ret;
1784 	u32 nr, i;
1785 	char *str;
1786 	struct strbuf sb;
1787 	int cpu_nr = ph->env.nr_cpus_avail;
1788 	u64 size = 0;
1789 
1790 	ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1791 	if (!ph->env.cpu)
1792 		return -1;
1793 
1794 	ret = readn(fd, &nr, sizeof(nr));
1795 	if (ret != sizeof(nr))
1796 		goto free_cpu;
1797 
1798 	if (ph->needs_swap)
1799 		nr = bswap_32(nr);
1800 
1801 	ph->env.nr_sibling_cores = nr;
1802 	size += sizeof(u32);
1803 	if (strbuf_init(&sb, 128) < 0)
1804 		goto free_cpu;
1805 
1806 	for (i = 0; i < nr; i++) {
1807 		str = do_read_string(fd, ph);
1808 		if (!str)
1809 			goto error;
1810 
1811 		/* include a NULL character at the end */
1812 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1813 			goto error;
1814 		size += string_size(str);
1815 		free(str);
1816 	}
1817 	ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1818 
1819 	ret = readn(fd, &nr, sizeof(nr));
1820 	if (ret != sizeof(nr))
1821 		return -1;
1822 
1823 	if (ph->needs_swap)
1824 		nr = bswap_32(nr);
1825 
1826 	ph->env.nr_sibling_threads = nr;
1827 	size += sizeof(u32);
1828 
1829 	for (i = 0; i < nr; i++) {
1830 		str = do_read_string(fd, ph);
1831 		if (!str)
1832 			goto error;
1833 
1834 		/* include a NULL character at the end */
1835 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1836 			goto error;
1837 		size += string_size(str);
1838 		free(str);
1839 	}
1840 	ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1841 
1842 	/*
1843 	 * The header may be from old perf,
1844 	 * which doesn't include core id and socket id information.
1845 	 */
1846 	if (section->size <= size) {
1847 		zfree(&ph->env.cpu);
1848 		return 0;
1849 	}
1850 
1851 	for (i = 0; i < (u32)cpu_nr; i++) {
1852 		ret = readn(fd, &nr, sizeof(nr));
1853 		if (ret != sizeof(nr))
1854 			goto free_cpu;
1855 
1856 		if (ph->needs_swap)
1857 			nr = bswap_32(nr);
1858 
1859 		ph->env.cpu[i].core_id = nr;
1860 
1861 		ret = readn(fd, &nr, sizeof(nr));
1862 		if (ret != sizeof(nr))
1863 			goto free_cpu;
1864 
1865 		if (ph->needs_swap)
1866 			nr = bswap_32(nr);
1867 
1868 		if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1869 			pr_debug("socket_id number is too big."
1870 				 "You may need to upgrade the perf tool.\n");
1871 			goto free_cpu;
1872 		}
1873 
1874 		ph->env.cpu[i].socket_id = nr;
1875 	}
1876 
1877 	return 0;
1878 
1879 error:
1880 	strbuf_release(&sb);
1881 free_cpu:
1882 	zfree(&ph->env.cpu);
1883 	return -1;
1884 }
1885 
1886 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1887 				 struct perf_header *ph, int fd,
1888 				 void *data __maybe_unused)
1889 {
1890 	struct numa_node *nodes, *n;
1891 	ssize_t ret;
1892 	u32 nr, i;
1893 	char *str;
1894 
1895 	/* nr nodes */
1896 	ret = readn(fd, &nr, sizeof(nr));
1897 	if (ret != sizeof(nr))
1898 		return -1;
1899 
1900 	if (ph->needs_swap)
1901 		nr = bswap_32(nr);
1902 
1903 	nodes = zalloc(sizeof(*nodes) * nr);
1904 	if (!nodes)
1905 		return -ENOMEM;
1906 
1907 	for (i = 0; i < nr; i++) {
1908 		n = &nodes[i];
1909 
1910 		/* node number */
1911 		ret = readn(fd, &n->node, sizeof(u32));
1912 		if (ret != sizeof(n->node))
1913 			goto error;
1914 
1915 		ret = readn(fd, &n->mem_total, sizeof(u64));
1916 		if (ret != sizeof(u64))
1917 			goto error;
1918 
1919 		ret = readn(fd, &n->mem_free, sizeof(u64));
1920 		if (ret != sizeof(u64))
1921 			goto error;
1922 
1923 		if (ph->needs_swap) {
1924 			n->node      = bswap_32(n->node);
1925 			n->mem_total = bswap_64(n->mem_total);
1926 			n->mem_free  = bswap_64(n->mem_free);
1927 		}
1928 
1929 		str = do_read_string(fd, ph);
1930 		if (!str)
1931 			goto error;
1932 
1933 		n->map = cpu_map__new(str);
1934 		if (!n->map)
1935 			goto error;
1936 
1937 		free(str);
1938 	}
1939 	ph->env.nr_numa_nodes = nr;
1940 	ph->env.numa_nodes = nodes;
1941 	return 0;
1942 
1943 error:
1944 	free(nodes);
1945 	return -1;
1946 }
1947 
1948 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1949 				struct perf_header *ph, int fd,
1950 				void *data __maybe_unused)
1951 {
1952 	ssize_t ret;
1953 	char *name;
1954 	u32 pmu_num;
1955 	u32 type;
1956 	struct strbuf sb;
1957 
1958 	ret = readn(fd, &pmu_num, sizeof(pmu_num));
1959 	if (ret != sizeof(pmu_num))
1960 		return -1;
1961 
1962 	if (ph->needs_swap)
1963 		pmu_num = bswap_32(pmu_num);
1964 
1965 	if (!pmu_num) {
1966 		pr_debug("pmu mappings not available\n");
1967 		return 0;
1968 	}
1969 
1970 	ph->env.nr_pmu_mappings = pmu_num;
1971 	if (strbuf_init(&sb, 128) < 0)
1972 		return -1;
1973 
1974 	while (pmu_num) {
1975 		if (readn(fd, &type, sizeof(type)) != sizeof(type))
1976 			goto error;
1977 		if (ph->needs_swap)
1978 			type = bswap_32(type);
1979 
1980 		name = do_read_string(fd, ph);
1981 		if (!name)
1982 			goto error;
1983 
1984 		if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1985 			goto error;
1986 		/* include a NULL character at the end */
1987 		if (strbuf_add(&sb, "", 1) < 0)
1988 			goto error;
1989 
1990 		if (!strcmp(name, "msr"))
1991 			ph->env.msr_pmu_type = type;
1992 
1993 		free(name);
1994 		pmu_num--;
1995 	}
1996 	ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
1997 	return 0;
1998 
1999 error:
2000 	strbuf_release(&sb);
2001 	return -1;
2002 }
2003 
2004 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2005 			      struct perf_header *ph, int fd,
2006 			      void *data __maybe_unused)
2007 {
2008 	size_t ret = -1;
2009 	u32 i, nr, nr_groups;
2010 	struct perf_session *session;
2011 	struct perf_evsel *evsel, *leader = NULL;
2012 	struct group_desc {
2013 		char *name;
2014 		u32 leader_idx;
2015 		u32 nr_members;
2016 	} *desc;
2017 
2018 	if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2019 		return -1;
2020 
2021 	if (ph->needs_swap)
2022 		nr_groups = bswap_32(nr_groups);
2023 
2024 	ph->env.nr_groups = nr_groups;
2025 	if (!nr_groups) {
2026 		pr_debug("group desc not available\n");
2027 		return 0;
2028 	}
2029 
2030 	desc = calloc(nr_groups, sizeof(*desc));
2031 	if (!desc)
2032 		return -1;
2033 
2034 	for (i = 0; i < nr_groups; i++) {
2035 		desc[i].name = do_read_string(fd, ph);
2036 		if (!desc[i].name)
2037 			goto out_free;
2038 
2039 		if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2040 			goto out_free;
2041 
2042 		if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2043 			goto out_free;
2044 
2045 		if (ph->needs_swap) {
2046 			desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2047 			desc[i].nr_members = bswap_32(desc[i].nr_members);
2048 		}
2049 	}
2050 
2051 	/*
2052 	 * Rebuild group relationship based on the group_desc
2053 	 */
2054 	session = container_of(ph, struct perf_session, header);
2055 	session->evlist->nr_groups = nr_groups;
2056 
2057 	i = nr = 0;
2058 	evlist__for_each_entry(session->evlist, evsel) {
2059 		if (evsel->idx == (int) desc[i].leader_idx) {
2060 			evsel->leader = evsel;
2061 			/* {anon_group} is a dummy name */
2062 			if (strcmp(desc[i].name, "{anon_group}")) {
2063 				evsel->group_name = desc[i].name;
2064 				desc[i].name = NULL;
2065 			}
2066 			evsel->nr_members = desc[i].nr_members;
2067 
2068 			if (i >= nr_groups || nr > 0) {
2069 				pr_debug("invalid group desc\n");
2070 				goto out_free;
2071 			}
2072 
2073 			leader = evsel;
2074 			nr = evsel->nr_members - 1;
2075 			i++;
2076 		} else if (nr) {
2077 			/* This is a group member */
2078 			evsel->leader = leader;
2079 
2080 			nr--;
2081 		}
2082 	}
2083 
2084 	if (i != nr_groups || nr != 0) {
2085 		pr_debug("invalid group desc\n");
2086 		goto out_free;
2087 	}
2088 
2089 	ret = 0;
2090 out_free:
2091 	for (i = 0; i < nr_groups; i++)
2092 		zfree(&desc[i].name);
2093 	free(desc);
2094 
2095 	return ret;
2096 }
2097 
2098 static int process_auxtrace(struct perf_file_section *section,
2099 			    struct perf_header *ph, int fd,
2100 			    void *data __maybe_unused)
2101 {
2102 	struct perf_session *session;
2103 	int err;
2104 
2105 	session = container_of(ph, struct perf_session, header);
2106 
2107 	err = auxtrace_index__process(fd, section->size, session,
2108 				      ph->needs_swap);
2109 	if (err < 0)
2110 		pr_err("Failed to process auxtrace index\n");
2111 	return err;
2112 }
2113 
2114 static int process_cache(struct perf_file_section *section __maybe_unused,
2115 			 struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2116 			 void *data __maybe_unused)
2117 {
2118 	struct cpu_cache_level *caches;
2119 	u32 cnt, i, version;
2120 
2121 	if (readn(fd, &version, sizeof(version)) != sizeof(version))
2122 		return -1;
2123 
2124 	if (ph->needs_swap)
2125 		version = bswap_32(version);
2126 
2127 	if (version != 1)
2128 		return -1;
2129 
2130 	if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2131 		return -1;
2132 
2133 	if (ph->needs_swap)
2134 		cnt = bswap_32(cnt);
2135 
2136 	caches = zalloc(sizeof(*caches) * cnt);
2137 	if (!caches)
2138 		return -1;
2139 
2140 	for (i = 0; i < cnt; i++) {
2141 		struct cpu_cache_level c;
2142 
2143 		#define _R(v)						\
2144 			if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2145 				goto out_free_caches;			\
2146 			if (ph->needs_swap)				\
2147 				c.v = bswap_32(c.v);			\
2148 
2149 		_R(level)
2150 		_R(line_size)
2151 		_R(sets)
2152 		_R(ways)
2153 		#undef _R
2154 
2155 		#define _R(v)				\
2156 			c.v = do_read_string(fd, ph);	\
2157 			if (!c.v)			\
2158 				goto out_free_caches;
2159 
2160 		_R(type)
2161 		_R(size)
2162 		_R(map)
2163 		#undef _R
2164 
2165 		caches[i] = c;
2166 	}
2167 
2168 	ph->env.caches = caches;
2169 	ph->env.caches_cnt = cnt;
2170 	return 0;
2171 out_free_caches:
2172 	free(caches);
2173 	return -1;
2174 }
2175 
2176 struct feature_ops {
2177 	int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2178 	void (*print)(struct perf_header *h, int fd, FILE *fp);
2179 	int (*process)(struct perf_file_section *section,
2180 		       struct perf_header *h, int fd, void *data);
2181 	const char *name;
2182 	bool full_only;
2183 };
2184 
2185 #define FEAT_OPA(n, func) \
2186 	[n] = { .name = #n, .write = write_##func, .print = print_##func }
2187 #define FEAT_OPP(n, func) \
2188 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2189 		.process = process_##func }
2190 #define FEAT_OPF(n, func) \
2191 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2192 		.process = process_##func, .full_only = true }
2193 
2194 /* feature_ops not implemented: */
2195 #define print_tracing_data	NULL
2196 #define print_build_id		NULL
2197 
2198 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2199 	FEAT_OPP(HEADER_TRACING_DATA,	tracing_data),
2200 	FEAT_OPP(HEADER_BUILD_ID,	build_id),
2201 	FEAT_OPP(HEADER_HOSTNAME,	hostname),
2202 	FEAT_OPP(HEADER_OSRELEASE,	osrelease),
2203 	FEAT_OPP(HEADER_VERSION,	version),
2204 	FEAT_OPP(HEADER_ARCH,		arch),
2205 	FEAT_OPP(HEADER_NRCPUS,		nrcpus),
2206 	FEAT_OPP(HEADER_CPUDESC,	cpudesc),
2207 	FEAT_OPP(HEADER_CPUID,		cpuid),
2208 	FEAT_OPP(HEADER_TOTAL_MEM,	total_mem),
2209 	FEAT_OPP(HEADER_EVENT_DESC,	event_desc),
2210 	FEAT_OPP(HEADER_CMDLINE,	cmdline),
2211 	FEAT_OPF(HEADER_CPU_TOPOLOGY,	cpu_topology),
2212 	FEAT_OPF(HEADER_NUMA_TOPOLOGY,	numa_topology),
2213 	FEAT_OPA(HEADER_BRANCH_STACK,	branch_stack),
2214 	FEAT_OPP(HEADER_PMU_MAPPINGS,	pmu_mappings),
2215 	FEAT_OPP(HEADER_GROUP_DESC,	group_desc),
2216 	FEAT_OPP(HEADER_AUXTRACE,	auxtrace),
2217 	FEAT_OPA(HEADER_STAT,		stat),
2218 	FEAT_OPF(HEADER_CACHE,		cache),
2219 };
2220 
2221 struct header_print_data {
2222 	FILE *fp;
2223 	bool full; /* extended list of headers */
2224 };
2225 
2226 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2227 					   struct perf_header *ph,
2228 					   int feat, int fd, void *data)
2229 {
2230 	struct header_print_data *hd = data;
2231 
2232 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2233 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2234 				"%d, continuing...\n", section->offset, feat);
2235 		return 0;
2236 	}
2237 	if (feat >= HEADER_LAST_FEATURE) {
2238 		pr_warning("unknown feature %d\n", feat);
2239 		return 0;
2240 	}
2241 	if (!feat_ops[feat].print)
2242 		return 0;
2243 
2244 	if (!feat_ops[feat].full_only || hd->full)
2245 		feat_ops[feat].print(ph, fd, hd->fp);
2246 	else
2247 		fprintf(hd->fp, "# %s info available, use -I to display\n",
2248 			feat_ops[feat].name);
2249 
2250 	return 0;
2251 }
2252 
2253 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2254 {
2255 	struct header_print_data hd;
2256 	struct perf_header *header = &session->header;
2257 	int fd = perf_data_file__fd(session->file);
2258 	struct stat st;
2259 	int ret, bit;
2260 
2261 	hd.fp = fp;
2262 	hd.full = full;
2263 
2264 	ret = fstat(fd, &st);
2265 	if (ret == -1)
2266 		return -1;
2267 
2268 	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2269 
2270 	perf_header__process_sections(header, fd, &hd,
2271 				      perf_file_section__fprintf_info);
2272 
2273 	if (session->file->is_pipe)
2274 		return 0;
2275 
2276 	fprintf(fp, "# missing features: ");
2277 	for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2278 		if (bit)
2279 			fprintf(fp, "%s ", feat_ops[bit].name);
2280 	}
2281 
2282 	fprintf(fp, "\n");
2283 	return 0;
2284 }
2285 
2286 static int do_write_feat(int fd, struct perf_header *h, int type,
2287 			 struct perf_file_section **p,
2288 			 struct perf_evlist *evlist)
2289 {
2290 	int err;
2291 	int ret = 0;
2292 
2293 	if (perf_header__has_feat(h, type)) {
2294 		if (!feat_ops[type].write)
2295 			return -1;
2296 
2297 		(*p)->offset = lseek(fd, 0, SEEK_CUR);
2298 
2299 		err = feat_ops[type].write(fd, h, evlist);
2300 		if (err < 0) {
2301 			pr_debug("failed to write feature %s\n", feat_ops[type].name);
2302 
2303 			/* undo anything written */
2304 			lseek(fd, (*p)->offset, SEEK_SET);
2305 
2306 			return -1;
2307 		}
2308 		(*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2309 		(*p)++;
2310 	}
2311 	return ret;
2312 }
2313 
2314 static int perf_header__adds_write(struct perf_header *header,
2315 				   struct perf_evlist *evlist, int fd)
2316 {
2317 	int nr_sections;
2318 	struct perf_file_section *feat_sec, *p;
2319 	int sec_size;
2320 	u64 sec_start;
2321 	int feat;
2322 	int err;
2323 
2324 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2325 	if (!nr_sections)
2326 		return 0;
2327 
2328 	feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2329 	if (feat_sec == NULL)
2330 		return -ENOMEM;
2331 
2332 	sec_size = sizeof(*feat_sec) * nr_sections;
2333 
2334 	sec_start = header->feat_offset;
2335 	lseek(fd, sec_start + sec_size, SEEK_SET);
2336 
2337 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2338 		if (do_write_feat(fd, header, feat, &p, evlist))
2339 			perf_header__clear_feat(header, feat);
2340 	}
2341 
2342 	lseek(fd, sec_start, SEEK_SET);
2343 	/*
2344 	 * may write more than needed due to dropped feature, but
2345 	 * this is okay, reader will skip the mising entries
2346 	 */
2347 	err = do_write(fd, feat_sec, sec_size);
2348 	if (err < 0)
2349 		pr_debug("failed to write feature section\n");
2350 	free(feat_sec);
2351 	return err;
2352 }
2353 
2354 int perf_header__write_pipe(int fd)
2355 {
2356 	struct perf_pipe_file_header f_header;
2357 	int err;
2358 
2359 	f_header = (struct perf_pipe_file_header){
2360 		.magic	   = PERF_MAGIC,
2361 		.size	   = sizeof(f_header),
2362 	};
2363 
2364 	err = do_write(fd, &f_header, sizeof(f_header));
2365 	if (err < 0) {
2366 		pr_debug("failed to write perf pipe header\n");
2367 		return err;
2368 	}
2369 
2370 	return 0;
2371 }
2372 
2373 int perf_session__write_header(struct perf_session *session,
2374 			       struct perf_evlist *evlist,
2375 			       int fd, bool at_exit)
2376 {
2377 	struct perf_file_header f_header;
2378 	struct perf_file_attr   f_attr;
2379 	struct perf_header *header = &session->header;
2380 	struct perf_evsel *evsel;
2381 	u64 attr_offset;
2382 	int err;
2383 
2384 	lseek(fd, sizeof(f_header), SEEK_SET);
2385 
2386 	evlist__for_each_entry(session->evlist, evsel) {
2387 		evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2388 		err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2389 		if (err < 0) {
2390 			pr_debug("failed to write perf header\n");
2391 			return err;
2392 		}
2393 	}
2394 
2395 	attr_offset = lseek(fd, 0, SEEK_CUR);
2396 
2397 	evlist__for_each_entry(evlist, evsel) {
2398 		f_attr = (struct perf_file_attr){
2399 			.attr = evsel->attr,
2400 			.ids  = {
2401 				.offset = evsel->id_offset,
2402 				.size   = evsel->ids * sizeof(u64),
2403 			}
2404 		};
2405 		err = do_write(fd, &f_attr, sizeof(f_attr));
2406 		if (err < 0) {
2407 			pr_debug("failed to write perf header attribute\n");
2408 			return err;
2409 		}
2410 	}
2411 
2412 	if (!header->data_offset)
2413 		header->data_offset = lseek(fd, 0, SEEK_CUR);
2414 	header->feat_offset = header->data_offset + header->data_size;
2415 
2416 	if (at_exit) {
2417 		err = perf_header__adds_write(header, evlist, fd);
2418 		if (err < 0)
2419 			return err;
2420 	}
2421 
2422 	f_header = (struct perf_file_header){
2423 		.magic	   = PERF_MAGIC,
2424 		.size	   = sizeof(f_header),
2425 		.attr_size = sizeof(f_attr),
2426 		.attrs = {
2427 			.offset = attr_offset,
2428 			.size   = evlist->nr_entries * sizeof(f_attr),
2429 		},
2430 		.data = {
2431 			.offset = header->data_offset,
2432 			.size	= header->data_size,
2433 		},
2434 		/* event_types is ignored, store zeros */
2435 	};
2436 
2437 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2438 
2439 	lseek(fd, 0, SEEK_SET);
2440 	err = do_write(fd, &f_header, sizeof(f_header));
2441 	if (err < 0) {
2442 		pr_debug("failed to write perf header\n");
2443 		return err;
2444 	}
2445 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2446 
2447 	return 0;
2448 }
2449 
2450 static int perf_header__getbuffer64(struct perf_header *header,
2451 				    int fd, void *buf, size_t size)
2452 {
2453 	if (readn(fd, buf, size) <= 0)
2454 		return -1;
2455 
2456 	if (header->needs_swap)
2457 		mem_bswap_64(buf, size);
2458 
2459 	return 0;
2460 }
2461 
2462 int perf_header__process_sections(struct perf_header *header, int fd,
2463 				  void *data,
2464 				  int (*process)(struct perf_file_section *section,
2465 						 struct perf_header *ph,
2466 						 int feat, int fd, void *data))
2467 {
2468 	struct perf_file_section *feat_sec, *sec;
2469 	int nr_sections;
2470 	int sec_size;
2471 	int feat;
2472 	int err;
2473 
2474 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2475 	if (!nr_sections)
2476 		return 0;
2477 
2478 	feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2479 	if (!feat_sec)
2480 		return -1;
2481 
2482 	sec_size = sizeof(*feat_sec) * nr_sections;
2483 
2484 	lseek(fd, header->feat_offset, SEEK_SET);
2485 
2486 	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2487 	if (err < 0)
2488 		goto out_free;
2489 
2490 	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2491 		err = process(sec++, header, feat, fd, data);
2492 		if (err < 0)
2493 			goto out_free;
2494 	}
2495 	err = 0;
2496 out_free:
2497 	free(feat_sec);
2498 	return err;
2499 }
2500 
2501 static const int attr_file_abi_sizes[] = {
2502 	[0] = PERF_ATTR_SIZE_VER0,
2503 	[1] = PERF_ATTR_SIZE_VER1,
2504 	[2] = PERF_ATTR_SIZE_VER2,
2505 	[3] = PERF_ATTR_SIZE_VER3,
2506 	[4] = PERF_ATTR_SIZE_VER4,
2507 	0,
2508 };
2509 
2510 /*
2511  * In the legacy file format, the magic number is not used to encode endianness.
2512  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2513  * on ABI revisions, we need to try all combinations for all endianness to
2514  * detect the endianness.
2515  */
2516 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2517 {
2518 	uint64_t ref_size, attr_size;
2519 	int i;
2520 
2521 	for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2522 		ref_size = attr_file_abi_sizes[i]
2523 			 + sizeof(struct perf_file_section);
2524 		if (hdr_sz != ref_size) {
2525 			attr_size = bswap_64(hdr_sz);
2526 			if (attr_size != ref_size)
2527 				continue;
2528 
2529 			ph->needs_swap = true;
2530 		}
2531 		pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2532 			 i,
2533 			 ph->needs_swap);
2534 		return 0;
2535 	}
2536 	/* could not determine endianness */
2537 	return -1;
2538 }
2539 
2540 #define PERF_PIPE_HDR_VER0	16
2541 
2542 static const size_t attr_pipe_abi_sizes[] = {
2543 	[0] = PERF_PIPE_HDR_VER0,
2544 	0,
2545 };
2546 
2547 /*
2548  * In the legacy pipe format, there is an implicit assumption that endiannesss
2549  * between host recording the samples, and host parsing the samples is the
2550  * same. This is not always the case given that the pipe output may always be
2551  * redirected into a file and analyzed on a different machine with possibly a
2552  * different endianness and perf_event ABI revsions in the perf tool itself.
2553  */
2554 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2555 {
2556 	u64 attr_size;
2557 	int i;
2558 
2559 	for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2560 		if (hdr_sz != attr_pipe_abi_sizes[i]) {
2561 			attr_size = bswap_64(hdr_sz);
2562 			if (attr_size != hdr_sz)
2563 				continue;
2564 
2565 			ph->needs_swap = true;
2566 		}
2567 		pr_debug("Pipe ABI%d perf.data file detected\n", i);
2568 		return 0;
2569 	}
2570 	return -1;
2571 }
2572 
2573 bool is_perf_magic(u64 magic)
2574 {
2575 	if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2576 		|| magic == __perf_magic2
2577 		|| magic == __perf_magic2_sw)
2578 		return true;
2579 
2580 	return false;
2581 }
2582 
2583 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2584 			      bool is_pipe, struct perf_header *ph)
2585 {
2586 	int ret;
2587 
2588 	/* check for legacy format */
2589 	ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2590 	if (ret == 0) {
2591 		ph->version = PERF_HEADER_VERSION_1;
2592 		pr_debug("legacy perf.data format\n");
2593 		if (is_pipe)
2594 			return try_all_pipe_abis(hdr_sz, ph);
2595 
2596 		return try_all_file_abis(hdr_sz, ph);
2597 	}
2598 	/*
2599 	 * the new magic number serves two purposes:
2600 	 * - unique number to identify actual perf.data files
2601 	 * - encode endianness of file
2602 	 */
2603 	ph->version = PERF_HEADER_VERSION_2;
2604 
2605 	/* check magic number with one endianness */
2606 	if (magic == __perf_magic2)
2607 		return 0;
2608 
2609 	/* check magic number with opposite endianness */
2610 	if (magic != __perf_magic2_sw)
2611 		return -1;
2612 
2613 	ph->needs_swap = true;
2614 
2615 	return 0;
2616 }
2617 
2618 int perf_file_header__read(struct perf_file_header *header,
2619 			   struct perf_header *ph, int fd)
2620 {
2621 	ssize_t ret;
2622 
2623 	lseek(fd, 0, SEEK_SET);
2624 
2625 	ret = readn(fd, header, sizeof(*header));
2626 	if (ret <= 0)
2627 		return -1;
2628 
2629 	if (check_magic_endian(header->magic,
2630 			       header->attr_size, false, ph) < 0) {
2631 		pr_debug("magic/endian check failed\n");
2632 		return -1;
2633 	}
2634 
2635 	if (ph->needs_swap) {
2636 		mem_bswap_64(header, offsetof(struct perf_file_header,
2637 			     adds_features));
2638 	}
2639 
2640 	if (header->size != sizeof(*header)) {
2641 		/* Support the previous format */
2642 		if (header->size == offsetof(typeof(*header), adds_features))
2643 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2644 		else
2645 			return -1;
2646 	} else if (ph->needs_swap) {
2647 		/*
2648 		 * feature bitmap is declared as an array of unsigned longs --
2649 		 * not good since its size can differ between the host that
2650 		 * generated the data file and the host analyzing the file.
2651 		 *
2652 		 * We need to handle endianness, but we don't know the size of
2653 		 * the unsigned long where the file was generated. Take a best
2654 		 * guess at determining it: try 64-bit swap first (ie., file
2655 		 * created on a 64-bit host), and check if the hostname feature
2656 		 * bit is set (this feature bit is forced on as of fbe96f2).
2657 		 * If the bit is not, undo the 64-bit swap and try a 32-bit
2658 		 * swap. If the hostname bit is still not set (e.g., older data
2659 		 * file), punt and fallback to the original behavior --
2660 		 * clearing all feature bits and setting buildid.
2661 		 */
2662 		mem_bswap_64(&header->adds_features,
2663 			    BITS_TO_U64(HEADER_FEAT_BITS));
2664 
2665 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2666 			/* unswap as u64 */
2667 			mem_bswap_64(&header->adds_features,
2668 				    BITS_TO_U64(HEADER_FEAT_BITS));
2669 
2670 			/* unswap as u32 */
2671 			mem_bswap_32(&header->adds_features,
2672 				    BITS_TO_U32(HEADER_FEAT_BITS));
2673 		}
2674 
2675 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2676 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2677 			set_bit(HEADER_BUILD_ID, header->adds_features);
2678 		}
2679 	}
2680 
2681 	memcpy(&ph->adds_features, &header->adds_features,
2682 	       sizeof(ph->adds_features));
2683 
2684 	ph->data_offset  = header->data.offset;
2685 	ph->data_size	 = header->data.size;
2686 	ph->feat_offset  = header->data.offset + header->data.size;
2687 	return 0;
2688 }
2689 
2690 static int perf_file_section__process(struct perf_file_section *section,
2691 				      struct perf_header *ph,
2692 				      int feat, int fd, void *data)
2693 {
2694 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2695 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2696 			  "%d, continuing...\n", section->offset, feat);
2697 		return 0;
2698 	}
2699 
2700 	if (feat >= HEADER_LAST_FEATURE) {
2701 		pr_debug("unknown feature %d, continuing...\n", feat);
2702 		return 0;
2703 	}
2704 
2705 	if (!feat_ops[feat].process)
2706 		return 0;
2707 
2708 	return feat_ops[feat].process(section, ph, fd, data);
2709 }
2710 
2711 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2712 				       struct perf_header *ph, int fd,
2713 				       bool repipe)
2714 {
2715 	ssize_t ret;
2716 
2717 	ret = readn(fd, header, sizeof(*header));
2718 	if (ret <= 0)
2719 		return -1;
2720 
2721 	if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2722 		pr_debug("endian/magic failed\n");
2723 		return -1;
2724 	}
2725 
2726 	if (ph->needs_swap)
2727 		header->size = bswap_64(header->size);
2728 
2729 	if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2730 		return -1;
2731 
2732 	return 0;
2733 }
2734 
2735 static int perf_header__read_pipe(struct perf_session *session)
2736 {
2737 	struct perf_header *header = &session->header;
2738 	struct perf_pipe_file_header f_header;
2739 
2740 	if (perf_file_header__read_pipe(&f_header, header,
2741 					perf_data_file__fd(session->file),
2742 					session->repipe) < 0) {
2743 		pr_debug("incompatible file format\n");
2744 		return -EINVAL;
2745 	}
2746 
2747 	return 0;
2748 }
2749 
2750 static int read_attr(int fd, struct perf_header *ph,
2751 		     struct perf_file_attr *f_attr)
2752 {
2753 	struct perf_event_attr *attr = &f_attr->attr;
2754 	size_t sz, left;
2755 	size_t our_sz = sizeof(f_attr->attr);
2756 	ssize_t ret;
2757 
2758 	memset(f_attr, 0, sizeof(*f_attr));
2759 
2760 	/* read minimal guaranteed structure */
2761 	ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2762 	if (ret <= 0) {
2763 		pr_debug("cannot read %d bytes of header attr\n",
2764 			 PERF_ATTR_SIZE_VER0);
2765 		return -1;
2766 	}
2767 
2768 	/* on file perf_event_attr size */
2769 	sz = attr->size;
2770 
2771 	if (ph->needs_swap)
2772 		sz = bswap_32(sz);
2773 
2774 	if (sz == 0) {
2775 		/* assume ABI0 */
2776 		sz =  PERF_ATTR_SIZE_VER0;
2777 	} else if (sz > our_sz) {
2778 		pr_debug("file uses a more recent and unsupported ABI"
2779 			 " (%zu bytes extra)\n", sz - our_sz);
2780 		return -1;
2781 	}
2782 	/* what we have not yet read and that we know about */
2783 	left = sz - PERF_ATTR_SIZE_VER0;
2784 	if (left) {
2785 		void *ptr = attr;
2786 		ptr += PERF_ATTR_SIZE_VER0;
2787 
2788 		ret = readn(fd, ptr, left);
2789 	}
2790 	/* read perf_file_section, ids are read in caller */
2791 	ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2792 
2793 	return ret <= 0 ? -1 : 0;
2794 }
2795 
2796 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2797 						struct pevent *pevent)
2798 {
2799 	struct event_format *event;
2800 	char bf[128];
2801 
2802 	/* already prepared */
2803 	if (evsel->tp_format)
2804 		return 0;
2805 
2806 	if (pevent == NULL) {
2807 		pr_debug("broken or missing trace data\n");
2808 		return -1;
2809 	}
2810 
2811 	event = pevent_find_event(pevent, evsel->attr.config);
2812 	if (event == NULL) {
2813 		pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2814 		return -1;
2815 	}
2816 
2817 	if (!evsel->name) {
2818 		snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2819 		evsel->name = strdup(bf);
2820 		if (evsel->name == NULL)
2821 			return -1;
2822 	}
2823 
2824 	evsel->tp_format = event;
2825 	return 0;
2826 }
2827 
2828 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2829 						  struct pevent *pevent)
2830 {
2831 	struct perf_evsel *pos;
2832 
2833 	evlist__for_each_entry(evlist, pos) {
2834 		if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2835 		    perf_evsel__prepare_tracepoint_event(pos, pevent))
2836 			return -1;
2837 	}
2838 
2839 	return 0;
2840 }
2841 
2842 int perf_session__read_header(struct perf_session *session)
2843 {
2844 	struct perf_data_file *file = session->file;
2845 	struct perf_header *header = &session->header;
2846 	struct perf_file_header	f_header;
2847 	struct perf_file_attr	f_attr;
2848 	u64			f_id;
2849 	int nr_attrs, nr_ids, i, j;
2850 	int fd = perf_data_file__fd(file);
2851 
2852 	session->evlist = perf_evlist__new();
2853 	if (session->evlist == NULL)
2854 		return -ENOMEM;
2855 
2856 	session->evlist->env = &header->env;
2857 	session->machines.host.env = &header->env;
2858 	if (perf_data_file__is_pipe(file))
2859 		return perf_header__read_pipe(session);
2860 
2861 	if (perf_file_header__read(&f_header, header, fd) < 0)
2862 		return -EINVAL;
2863 
2864 	/*
2865 	 * Sanity check that perf.data was written cleanly; data size is
2866 	 * initialized to 0 and updated only if the on_exit function is run.
2867 	 * If data size is still 0 then the file contains only partial
2868 	 * information.  Just warn user and process it as much as it can.
2869 	 */
2870 	if (f_header.data.size == 0) {
2871 		pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2872 			   "Was the 'perf record' command properly terminated?\n",
2873 			   file->path);
2874 	}
2875 
2876 	nr_attrs = f_header.attrs.size / f_header.attr_size;
2877 	lseek(fd, f_header.attrs.offset, SEEK_SET);
2878 
2879 	for (i = 0; i < nr_attrs; i++) {
2880 		struct perf_evsel *evsel;
2881 		off_t tmp;
2882 
2883 		if (read_attr(fd, header, &f_attr) < 0)
2884 			goto out_errno;
2885 
2886 		if (header->needs_swap) {
2887 			f_attr.ids.size   = bswap_64(f_attr.ids.size);
2888 			f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2889 			perf_event__attr_swap(&f_attr.attr);
2890 		}
2891 
2892 		tmp = lseek(fd, 0, SEEK_CUR);
2893 		evsel = perf_evsel__new(&f_attr.attr);
2894 
2895 		if (evsel == NULL)
2896 			goto out_delete_evlist;
2897 
2898 		evsel->needs_swap = header->needs_swap;
2899 		/*
2900 		 * Do it before so that if perf_evsel__alloc_id fails, this
2901 		 * entry gets purged too at perf_evlist__delete().
2902 		 */
2903 		perf_evlist__add(session->evlist, evsel);
2904 
2905 		nr_ids = f_attr.ids.size / sizeof(u64);
2906 		/*
2907 		 * We don't have the cpu and thread maps on the header, so
2908 		 * for allocating the perf_sample_id table we fake 1 cpu and
2909 		 * hattr->ids threads.
2910 		 */
2911 		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2912 			goto out_delete_evlist;
2913 
2914 		lseek(fd, f_attr.ids.offset, SEEK_SET);
2915 
2916 		for (j = 0; j < nr_ids; j++) {
2917 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2918 				goto out_errno;
2919 
2920 			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2921 		}
2922 
2923 		lseek(fd, tmp, SEEK_SET);
2924 	}
2925 
2926 	symbol_conf.nr_events = nr_attrs;
2927 
2928 	perf_header__process_sections(header, fd, &session->tevent,
2929 				      perf_file_section__process);
2930 
2931 	if (perf_evlist__prepare_tracepoint_events(session->evlist,
2932 						   session->tevent.pevent))
2933 		goto out_delete_evlist;
2934 
2935 	return 0;
2936 out_errno:
2937 	return -errno;
2938 
2939 out_delete_evlist:
2940 	perf_evlist__delete(session->evlist);
2941 	session->evlist = NULL;
2942 	return -ENOMEM;
2943 }
2944 
2945 int perf_event__synthesize_attr(struct perf_tool *tool,
2946 				struct perf_event_attr *attr, u32 ids, u64 *id,
2947 				perf_event__handler_t process)
2948 {
2949 	union perf_event *ev;
2950 	size_t size;
2951 	int err;
2952 
2953 	size = sizeof(struct perf_event_attr);
2954 	size = PERF_ALIGN(size, sizeof(u64));
2955 	size += sizeof(struct perf_event_header);
2956 	size += ids * sizeof(u64);
2957 
2958 	ev = malloc(size);
2959 
2960 	if (ev == NULL)
2961 		return -ENOMEM;
2962 
2963 	ev->attr.attr = *attr;
2964 	memcpy(ev->attr.id, id, ids * sizeof(u64));
2965 
2966 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2967 	ev->attr.header.size = (u16)size;
2968 
2969 	if (ev->attr.header.size == size)
2970 		err = process(tool, ev, NULL, NULL);
2971 	else
2972 		err = -E2BIG;
2973 
2974 	free(ev);
2975 
2976 	return err;
2977 }
2978 
2979 static struct event_update_event *
2980 event_update_event__new(size_t size, u64 type, u64 id)
2981 {
2982 	struct event_update_event *ev;
2983 
2984 	size += sizeof(*ev);
2985 	size  = PERF_ALIGN(size, sizeof(u64));
2986 
2987 	ev = zalloc(size);
2988 	if (ev) {
2989 		ev->header.type = PERF_RECORD_EVENT_UPDATE;
2990 		ev->header.size = (u16)size;
2991 		ev->type = type;
2992 		ev->id = id;
2993 	}
2994 	return ev;
2995 }
2996 
2997 int
2998 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
2999 					 struct perf_evsel *evsel,
3000 					 perf_event__handler_t process)
3001 {
3002 	struct event_update_event *ev;
3003 	size_t size = strlen(evsel->unit);
3004 	int err;
3005 
3006 	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3007 	if (ev == NULL)
3008 		return -ENOMEM;
3009 
3010 	strncpy(ev->data, evsel->unit, size);
3011 	err = process(tool, (union perf_event *)ev, NULL, NULL);
3012 	free(ev);
3013 	return err;
3014 }
3015 
3016 int
3017 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3018 					  struct perf_evsel *evsel,
3019 					  perf_event__handler_t process)
3020 {
3021 	struct event_update_event *ev;
3022 	struct event_update_event_scale *ev_data;
3023 	int err;
3024 
3025 	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3026 	if (ev == NULL)
3027 		return -ENOMEM;
3028 
3029 	ev_data = (struct event_update_event_scale *) ev->data;
3030 	ev_data->scale = evsel->scale;
3031 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3032 	free(ev);
3033 	return err;
3034 }
3035 
3036 int
3037 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3038 					 struct perf_evsel *evsel,
3039 					 perf_event__handler_t process)
3040 {
3041 	struct event_update_event *ev;
3042 	size_t len = strlen(evsel->name);
3043 	int err;
3044 
3045 	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3046 	if (ev == NULL)
3047 		return -ENOMEM;
3048 
3049 	strncpy(ev->data, evsel->name, len);
3050 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3051 	free(ev);
3052 	return err;
3053 }
3054 
3055 int
3056 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3057 					struct perf_evsel *evsel,
3058 					perf_event__handler_t process)
3059 {
3060 	size_t size = sizeof(struct event_update_event);
3061 	struct event_update_event *ev;
3062 	int max, err;
3063 	u16 type;
3064 
3065 	if (!evsel->own_cpus)
3066 		return 0;
3067 
3068 	ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3069 	if (!ev)
3070 		return -ENOMEM;
3071 
3072 	ev->header.type = PERF_RECORD_EVENT_UPDATE;
3073 	ev->header.size = (u16)size;
3074 	ev->type = PERF_EVENT_UPDATE__CPUS;
3075 	ev->id   = evsel->id[0];
3076 
3077 	cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3078 				 evsel->own_cpus,
3079 				 type, max);
3080 
3081 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3082 	free(ev);
3083 	return err;
3084 }
3085 
3086 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3087 {
3088 	struct event_update_event *ev = &event->event_update;
3089 	struct event_update_event_scale *ev_scale;
3090 	struct event_update_event_cpus *ev_cpus;
3091 	struct cpu_map *map;
3092 	size_t ret;
3093 
3094 	ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3095 
3096 	switch (ev->type) {
3097 	case PERF_EVENT_UPDATE__SCALE:
3098 		ev_scale = (struct event_update_event_scale *) ev->data;
3099 		ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3100 		break;
3101 	case PERF_EVENT_UPDATE__UNIT:
3102 		ret += fprintf(fp, "... unit:  %s\n", ev->data);
3103 		break;
3104 	case PERF_EVENT_UPDATE__NAME:
3105 		ret += fprintf(fp, "... name:  %s\n", ev->data);
3106 		break;
3107 	case PERF_EVENT_UPDATE__CPUS:
3108 		ev_cpus = (struct event_update_event_cpus *) ev->data;
3109 		ret += fprintf(fp, "... ");
3110 
3111 		map = cpu_map__new_data(&ev_cpus->cpus);
3112 		if (map)
3113 			ret += cpu_map__fprintf(map, fp);
3114 		else
3115 			ret += fprintf(fp, "failed to get cpus\n");
3116 		break;
3117 	default:
3118 		ret += fprintf(fp, "... unknown type\n");
3119 		break;
3120 	}
3121 
3122 	return ret;
3123 }
3124 
3125 int perf_event__synthesize_attrs(struct perf_tool *tool,
3126 				   struct perf_session *session,
3127 				   perf_event__handler_t process)
3128 {
3129 	struct perf_evsel *evsel;
3130 	int err = 0;
3131 
3132 	evlist__for_each_entry(session->evlist, evsel) {
3133 		err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3134 						  evsel->id, process);
3135 		if (err) {
3136 			pr_debug("failed to create perf header attribute\n");
3137 			return err;
3138 		}
3139 	}
3140 
3141 	return err;
3142 }
3143 
3144 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3145 			     union perf_event *event,
3146 			     struct perf_evlist **pevlist)
3147 {
3148 	u32 i, ids, n_ids;
3149 	struct perf_evsel *evsel;
3150 	struct perf_evlist *evlist = *pevlist;
3151 
3152 	if (evlist == NULL) {
3153 		*pevlist = evlist = perf_evlist__new();
3154 		if (evlist == NULL)
3155 			return -ENOMEM;
3156 	}
3157 
3158 	evsel = perf_evsel__new(&event->attr.attr);
3159 	if (evsel == NULL)
3160 		return -ENOMEM;
3161 
3162 	perf_evlist__add(evlist, evsel);
3163 
3164 	ids = event->header.size;
3165 	ids -= (void *)&event->attr.id - (void *)event;
3166 	n_ids = ids / sizeof(u64);
3167 	/*
3168 	 * We don't have the cpu and thread maps on the header, so
3169 	 * for allocating the perf_sample_id table we fake 1 cpu and
3170 	 * hattr->ids threads.
3171 	 */
3172 	if (perf_evsel__alloc_id(evsel, 1, n_ids))
3173 		return -ENOMEM;
3174 
3175 	for (i = 0; i < n_ids; i++) {
3176 		perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3177 	}
3178 
3179 	symbol_conf.nr_events = evlist->nr_entries;
3180 
3181 	return 0;
3182 }
3183 
3184 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3185 				     union perf_event *event,
3186 				     struct perf_evlist **pevlist)
3187 {
3188 	struct event_update_event *ev = &event->event_update;
3189 	struct event_update_event_scale *ev_scale;
3190 	struct event_update_event_cpus *ev_cpus;
3191 	struct perf_evlist *evlist;
3192 	struct perf_evsel *evsel;
3193 	struct cpu_map *map;
3194 
3195 	if (!pevlist || *pevlist == NULL)
3196 		return -EINVAL;
3197 
3198 	evlist = *pevlist;
3199 
3200 	evsel = perf_evlist__id2evsel(evlist, ev->id);
3201 	if (evsel == NULL)
3202 		return -EINVAL;
3203 
3204 	switch (ev->type) {
3205 	case PERF_EVENT_UPDATE__UNIT:
3206 		evsel->unit = strdup(ev->data);
3207 		break;
3208 	case PERF_EVENT_UPDATE__NAME:
3209 		evsel->name = strdup(ev->data);
3210 		break;
3211 	case PERF_EVENT_UPDATE__SCALE:
3212 		ev_scale = (struct event_update_event_scale *) ev->data;
3213 		evsel->scale = ev_scale->scale;
3214 		break;
3215 	case PERF_EVENT_UPDATE__CPUS:
3216 		ev_cpus = (struct event_update_event_cpus *) ev->data;
3217 
3218 		map = cpu_map__new_data(&ev_cpus->cpus);
3219 		if (map)
3220 			evsel->own_cpus = map;
3221 		else
3222 			pr_err("failed to get event_update cpus\n");
3223 	default:
3224 		break;
3225 	}
3226 
3227 	return 0;
3228 }
3229 
3230 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3231 					struct perf_evlist *evlist,
3232 					perf_event__handler_t process)
3233 {
3234 	union perf_event ev;
3235 	struct tracing_data *tdata;
3236 	ssize_t size = 0, aligned_size = 0, padding;
3237 	int err __maybe_unused = 0;
3238 
3239 	/*
3240 	 * We are going to store the size of the data followed
3241 	 * by the data contents. Since the fd descriptor is a pipe,
3242 	 * we cannot seek back to store the size of the data once
3243 	 * we know it. Instead we:
3244 	 *
3245 	 * - write the tracing data to the temp file
3246 	 * - get/write the data size to pipe
3247 	 * - write the tracing data from the temp file
3248 	 *   to the pipe
3249 	 */
3250 	tdata = tracing_data_get(&evlist->entries, fd, true);
3251 	if (!tdata)
3252 		return -1;
3253 
3254 	memset(&ev, 0, sizeof(ev));
3255 
3256 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3257 	size = tdata->size;
3258 	aligned_size = PERF_ALIGN(size, sizeof(u64));
3259 	padding = aligned_size - size;
3260 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
3261 	ev.tracing_data.size = aligned_size;
3262 
3263 	process(tool, &ev, NULL, NULL);
3264 
3265 	/*
3266 	 * The put function will copy all the tracing data
3267 	 * stored in temp file to the pipe.
3268 	 */
3269 	tracing_data_put(tdata);
3270 
3271 	write_padded(fd, NULL, 0, padding);
3272 
3273 	return aligned_size;
3274 }
3275 
3276 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3277 				     union perf_event *event,
3278 				     struct perf_session *session)
3279 {
3280 	ssize_t size_read, padding, size = event->tracing_data.size;
3281 	int fd = perf_data_file__fd(session->file);
3282 	off_t offset = lseek(fd, 0, SEEK_CUR);
3283 	char buf[BUFSIZ];
3284 
3285 	/* setup for reading amidst mmap */
3286 	lseek(fd, offset + sizeof(struct tracing_data_event),
3287 	      SEEK_SET);
3288 
3289 	size_read = trace_report(fd, &session->tevent,
3290 				 session->repipe);
3291 	padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3292 
3293 	if (readn(fd, buf, padding) < 0) {
3294 		pr_err("%s: reading input file", __func__);
3295 		return -1;
3296 	}
3297 	if (session->repipe) {
3298 		int retw = write(STDOUT_FILENO, buf, padding);
3299 		if (retw <= 0 || retw != padding) {
3300 			pr_err("%s: repiping tracing data padding", __func__);
3301 			return -1;
3302 		}
3303 	}
3304 
3305 	if (size_read + padding != size) {
3306 		pr_err("%s: tracing data size mismatch", __func__);
3307 		return -1;
3308 	}
3309 
3310 	perf_evlist__prepare_tracepoint_events(session->evlist,
3311 					       session->tevent.pevent);
3312 
3313 	return size_read + padding;
3314 }
3315 
3316 int perf_event__synthesize_build_id(struct perf_tool *tool,
3317 				    struct dso *pos, u16 misc,
3318 				    perf_event__handler_t process,
3319 				    struct machine *machine)
3320 {
3321 	union perf_event ev;
3322 	size_t len;
3323 	int err = 0;
3324 
3325 	if (!pos->hit)
3326 		return err;
3327 
3328 	memset(&ev, 0, sizeof(ev));
3329 
3330 	len = pos->long_name_len + 1;
3331 	len = PERF_ALIGN(len, NAME_ALIGN);
3332 	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3333 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3334 	ev.build_id.header.misc = misc;
3335 	ev.build_id.pid = machine->pid;
3336 	ev.build_id.header.size = sizeof(ev.build_id) + len;
3337 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3338 
3339 	err = process(tool, &ev, NULL, machine);
3340 
3341 	return err;
3342 }
3343 
3344 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3345 				 union perf_event *event,
3346 				 struct perf_session *session)
3347 {
3348 	__event_process_build_id(&event->build_id,
3349 				 event->build_id.filename,
3350 				 session);
3351 	return 0;
3352 }
3353