xref: /linux/tools/perf/util/header.c (revision 3d689ed6099a1a11c38bb78aff7498e78e287e0b)
1 #include <inttypes.h>
2 #include "util.h"
3 #include <sys/types.h>
4 #include <byteswap.h>
5 #include <unistd.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <linux/list.h>
9 #include <linux/kernel.h>
10 #include <linux/bitops.h>
11 #include <sys/utsname.h>
12 
13 #include "evlist.h"
14 #include "evsel.h"
15 #include "header.h"
16 #include "../perf.h"
17 #include "trace-event.h"
18 #include "session.h"
19 #include "symbol.h"
20 #include "debug.h"
21 #include "cpumap.h"
22 #include "pmu.h"
23 #include "vdso.h"
24 #include "strbuf.h"
25 #include "build-id.h"
26 #include "data.h"
27 #include <api/fs/fs.h>
28 #include "asm/bug.h"
29 
30 #include "sane_ctype.h"
31 
32 /*
33  * magic2 = "PERFILE2"
34  * must be a numerical value to let the endianness
35  * determine the memory layout. That way we are able
36  * to detect endianness when reading the perf.data file
37  * back.
38  *
39  * we check for legacy (PERFFILE) format.
40  */
41 static const char *__perf_magic1 = "PERFFILE";
42 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
43 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
44 
45 #define PERF_MAGIC	__perf_magic2
46 
47 const char perf_version_string[] = PERF_VERSION;
48 
49 struct perf_file_attr {
50 	struct perf_event_attr	attr;
51 	struct perf_file_section	ids;
52 };
53 
54 void perf_header__set_feat(struct perf_header *header, int feat)
55 {
56 	set_bit(feat, header->adds_features);
57 }
58 
59 void perf_header__clear_feat(struct perf_header *header, int feat)
60 {
61 	clear_bit(feat, header->adds_features);
62 }
63 
64 bool perf_header__has_feat(const struct perf_header *header, int feat)
65 {
66 	return test_bit(feat, header->adds_features);
67 }
68 
69 static int do_write(int fd, const void *buf, size_t size)
70 {
71 	while (size) {
72 		int ret = write(fd, buf, size);
73 
74 		if (ret < 0)
75 			return -errno;
76 
77 		size -= ret;
78 		buf += ret;
79 	}
80 
81 	return 0;
82 }
83 
84 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
85 {
86 	static const char zero_buf[NAME_ALIGN];
87 	int err = do_write(fd, bf, count);
88 
89 	if (!err)
90 		err = do_write(fd, zero_buf, count_aligned - count);
91 
92 	return err;
93 }
94 
95 #define string_size(str)						\
96 	(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
97 
98 static int do_write_string(int fd, const char *str)
99 {
100 	u32 len, olen;
101 	int ret;
102 
103 	olen = strlen(str) + 1;
104 	len = PERF_ALIGN(olen, NAME_ALIGN);
105 
106 	/* write len, incl. \0 */
107 	ret = do_write(fd, &len, sizeof(len));
108 	if (ret < 0)
109 		return ret;
110 
111 	return write_padded(fd, str, olen, len);
112 }
113 
114 static char *do_read_string(int fd, struct perf_header *ph)
115 {
116 	ssize_t sz, ret;
117 	u32 len;
118 	char *buf;
119 
120 	sz = readn(fd, &len, sizeof(len));
121 	if (sz < (ssize_t)sizeof(len))
122 		return NULL;
123 
124 	if (ph->needs_swap)
125 		len = bswap_32(len);
126 
127 	buf = malloc(len);
128 	if (!buf)
129 		return NULL;
130 
131 	ret = readn(fd, buf, len);
132 	if (ret == (ssize_t)len) {
133 		/*
134 		 * strings are padded by zeroes
135 		 * thus the actual strlen of buf
136 		 * may be less than len
137 		 */
138 		return buf;
139 	}
140 
141 	free(buf);
142 	return NULL;
143 }
144 
145 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
146 			    struct perf_evlist *evlist)
147 {
148 	return read_tracing_data(fd, &evlist->entries);
149 }
150 
151 
152 static int write_build_id(int fd, struct perf_header *h,
153 			  struct perf_evlist *evlist __maybe_unused)
154 {
155 	struct perf_session *session;
156 	int err;
157 
158 	session = container_of(h, struct perf_session, header);
159 
160 	if (!perf_session__read_build_ids(session, true))
161 		return -1;
162 
163 	err = perf_session__write_buildid_table(session, fd);
164 	if (err < 0) {
165 		pr_debug("failed to write buildid table\n");
166 		return err;
167 	}
168 	perf_session__cache_build_ids(session);
169 
170 	return 0;
171 }
172 
173 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
174 			  struct perf_evlist *evlist __maybe_unused)
175 {
176 	struct utsname uts;
177 	int ret;
178 
179 	ret = uname(&uts);
180 	if (ret < 0)
181 		return -1;
182 
183 	return do_write_string(fd, uts.nodename);
184 }
185 
186 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
187 			   struct perf_evlist *evlist __maybe_unused)
188 {
189 	struct utsname uts;
190 	int ret;
191 
192 	ret = uname(&uts);
193 	if (ret < 0)
194 		return -1;
195 
196 	return do_write_string(fd, uts.release);
197 }
198 
199 static int write_arch(int fd, struct perf_header *h __maybe_unused,
200 		      struct perf_evlist *evlist __maybe_unused)
201 {
202 	struct utsname uts;
203 	int ret;
204 
205 	ret = uname(&uts);
206 	if (ret < 0)
207 		return -1;
208 
209 	return do_write_string(fd, uts.machine);
210 }
211 
212 static int write_version(int fd, struct perf_header *h __maybe_unused,
213 			 struct perf_evlist *evlist __maybe_unused)
214 {
215 	return do_write_string(fd, perf_version_string);
216 }
217 
218 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
219 {
220 	FILE *file;
221 	char *buf = NULL;
222 	char *s, *p;
223 	const char *search = cpuinfo_proc;
224 	size_t len = 0;
225 	int ret = -1;
226 
227 	if (!search)
228 		return -1;
229 
230 	file = fopen("/proc/cpuinfo", "r");
231 	if (!file)
232 		return -1;
233 
234 	while (getline(&buf, &len, file) > 0) {
235 		ret = strncmp(buf, search, strlen(search));
236 		if (!ret)
237 			break;
238 	}
239 
240 	if (ret) {
241 		ret = -1;
242 		goto done;
243 	}
244 
245 	s = buf;
246 
247 	p = strchr(buf, ':');
248 	if (p && *(p+1) == ' ' && *(p+2))
249 		s = p + 2;
250 	p = strchr(s, '\n');
251 	if (p)
252 		*p = '\0';
253 
254 	/* squash extra space characters (branding string) */
255 	p = s;
256 	while (*p) {
257 		if (isspace(*p)) {
258 			char *r = p + 1;
259 			char *q = r;
260 			*p = ' ';
261 			while (*q && isspace(*q))
262 				q++;
263 			if (q != (p+1))
264 				while ((*r++ = *q++));
265 		}
266 		p++;
267 	}
268 	ret = do_write_string(fd, s);
269 done:
270 	free(buf);
271 	fclose(file);
272 	return ret;
273 }
274 
275 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
276 		       struct perf_evlist *evlist __maybe_unused)
277 {
278 #ifndef CPUINFO_PROC
279 #define CPUINFO_PROC {"model name", }
280 #endif
281 	const char *cpuinfo_procs[] = CPUINFO_PROC;
282 	unsigned int i;
283 
284 	for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
285 		int ret;
286 		ret = __write_cpudesc(fd, cpuinfo_procs[i]);
287 		if (ret >= 0)
288 			return ret;
289 	}
290 	return -1;
291 }
292 
293 
294 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
295 			struct perf_evlist *evlist __maybe_unused)
296 {
297 	long nr;
298 	u32 nrc, nra;
299 	int ret;
300 
301 	nrc = cpu__max_present_cpu();
302 
303 	nr = sysconf(_SC_NPROCESSORS_ONLN);
304 	if (nr < 0)
305 		return -1;
306 
307 	nra = (u32)(nr & UINT_MAX);
308 
309 	ret = do_write(fd, &nrc, sizeof(nrc));
310 	if (ret < 0)
311 		return ret;
312 
313 	return do_write(fd, &nra, sizeof(nra));
314 }
315 
316 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
317 			    struct perf_evlist *evlist)
318 {
319 	struct perf_evsel *evsel;
320 	u32 nre, nri, sz;
321 	int ret;
322 
323 	nre = evlist->nr_entries;
324 
325 	/*
326 	 * write number of events
327 	 */
328 	ret = do_write(fd, &nre, sizeof(nre));
329 	if (ret < 0)
330 		return ret;
331 
332 	/*
333 	 * size of perf_event_attr struct
334 	 */
335 	sz = (u32)sizeof(evsel->attr);
336 	ret = do_write(fd, &sz, sizeof(sz));
337 	if (ret < 0)
338 		return ret;
339 
340 	evlist__for_each_entry(evlist, evsel) {
341 		ret = do_write(fd, &evsel->attr, sz);
342 		if (ret < 0)
343 			return ret;
344 		/*
345 		 * write number of unique id per event
346 		 * there is one id per instance of an event
347 		 *
348 		 * copy into an nri to be independent of the
349 		 * type of ids,
350 		 */
351 		nri = evsel->ids;
352 		ret = do_write(fd, &nri, sizeof(nri));
353 		if (ret < 0)
354 			return ret;
355 
356 		/*
357 		 * write event string as passed on cmdline
358 		 */
359 		ret = do_write_string(fd, perf_evsel__name(evsel));
360 		if (ret < 0)
361 			return ret;
362 		/*
363 		 * write unique ids for this event
364 		 */
365 		ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
366 		if (ret < 0)
367 			return ret;
368 	}
369 	return 0;
370 }
371 
372 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
373 			 struct perf_evlist *evlist __maybe_unused)
374 {
375 	char buf[MAXPATHLEN];
376 	u32 n;
377 	int i, ret;
378 
379 	/* actual path to perf binary */
380 	ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
381 	if (ret <= 0)
382 		return -1;
383 
384 	/* readlink() does not add null termination */
385 	buf[ret] = '\0';
386 
387 	/* account for binary path */
388 	n = perf_env.nr_cmdline + 1;
389 
390 	ret = do_write(fd, &n, sizeof(n));
391 	if (ret < 0)
392 		return ret;
393 
394 	ret = do_write_string(fd, buf);
395 	if (ret < 0)
396 		return ret;
397 
398 	for (i = 0 ; i < perf_env.nr_cmdline; i++) {
399 		ret = do_write_string(fd, perf_env.cmdline_argv[i]);
400 		if (ret < 0)
401 			return ret;
402 	}
403 	return 0;
404 }
405 
406 #define CORE_SIB_FMT \
407 	"/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
408 #define THRD_SIB_FMT \
409 	"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
410 
411 struct cpu_topo {
412 	u32 cpu_nr;
413 	u32 core_sib;
414 	u32 thread_sib;
415 	char **core_siblings;
416 	char **thread_siblings;
417 };
418 
419 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
420 {
421 	FILE *fp;
422 	char filename[MAXPATHLEN];
423 	char *buf = NULL, *p;
424 	size_t len = 0;
425 	ssize_t sret;
426 	u32 i = 0;
427 	int ret = -1;
428 
429 	sprintf(filename, CORE_SIB_FMT, cpu);
430 	fp = fopen(filename, "r");
431 	if (!fp)
432 		goto try_threads;
433 
434 	sret = getline(&buf, &len, fp);
435 	fclose(fp);
436 	if (sret <= 0)
437 		goto try_threads;
438 
439 	p = strchr(buf, '\n');
440 	if (p)
441 		*p = '\0';
442 
443 	for (i = 0; i < tp->core_sib; i++) {
444 		if (!strcmp(buf, tp->core_siblings[i]))
445 			break;
446 	}
447 	if (i == tp->core_sib) {
448 		tp->core_siblings[i] = buf;
449 		tp->core_sib++;
450 		buf = NULL;
451 		len = 0;
452 	}
453 	ret = 0;
454 
455 try_threads:
456 	sprintf(filename, THRD_SIB_FMT, cpu);
457 	fp = fopen(filename, "r");
458 	if (!fp)
459 		goto done;
460 
461 	if (getline(&buf, &len, fp) <= 0)
462 		goto done;
463 
464 	p = strchr(buf, '\n');
465 	if (p)
466 		*p = '\0';
467 
468 	for (i = 0; i < tp->thread_sib; i++) {
469 		if (!strcmp(buf, tp->thread_siblings[i]))
470 			break;
471 	}
472 	if (i == tp->thread_sib) {
473 		tp->thread_siblings[i] = buf;
474 		tp->thread_sib++;
475 		buf = NULL;
476 	}
477 	ret = 0;
478 done:
479 	if(fp)
480 		fclose(fp);
481 	free(buf);
482 	return ret;
483 }
484 
485 static void free_cpu_topo(struct cpu_topo *tp)
486 {
487 	u32 i;
488 
489 	if (!tp)
490 		return;
491 
492 	for (i = 0 ; i < tp->core_sib; i++)
493 		zfree(&tp->core_siblings[i]);
494 
495 	for (i = 0 ; i < tp->thread_sib; i++)
496 		zfree(&tp->thread_siblings[i]);
497 
498 	free(tp);
499 }
500 
501 static struct cpu_topo *build_cpu_topology(void)
502 {
503 	struct cpu_topo *tp = NULL;
504 	void *addr;
505 	u32 nr, i;
506 	size_t sz;
507 	long ncpus;
508 	int ret = -1;
509 	struct cpu_map *map;
510 
511 	ncpus = cpu__max_present_cpu();
512 
513 	/* build online CPU map */
514 	map = cpu_map__new(NULL);
515 	if (map == NULL) {
516 		pr_debug("failed to get system cpumap\n");
517 		return NULL;
518 	}
519 
520 	nr = (u32)(ncpus & UINT_MAX);
521 
522 	sz = nr * sizeof(char *);
523 	addr = calloc(1, sizeof(*tp) + 2 * sz);
524 	if (!addr)
525 		goto out_free;
526 
527 	tp = addr;
528 	tp->cpu_nr = nr;
529 	addr += sizeof(*tp);
530 	tp->core_siblings = addr;
531 	addr += sz;
532 	tp->thread_siblings = addr;
533 
534 	for (i = 0; i < nr; i++) {
535 		if (!cpu_map__has(map, i))
536 			continue;
537 
538 		ret = build_cpu_topo(tp, i);
539 		if (ret < 0)
540 			break;
541 	}
542 
543 out_free:
544 	cpu_map__put(map);
545 	if (ret) {
546 		free_cpu_topo(tp);
547 		tp = NULL;
548 	}
549 	return tp;
550 }
551 
552 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
553 			  struct perf_evlist *evlist __maybe_unused)
554 {
555 	struct cpu_topo *tp;
556 	u32 i;
557 	int ret, j;
558 
559 	tp = build_cpu_topology();
560 	if (!tp)
561 		return -1;
562 
563 	ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
564 	if (ret < 0)
565 		goto done;
566 
567 	for (i = 0; i < tp->core_sib; i++) {
568 		ret = do_write_string(fd, tp->core_siblings[i]);
569 		if (ret < 0)
570 			goto done;
571 	}
572 	ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
573 	if (ret < 0)
574 		goto done;
575 
576 	for (i = 0; i < tp->thread_sib; i++) {
577 		ret = do_write_string(fd, tp->thread_siblings[i]);
578 		if (ret < 0)
579 			break;
580 	}
581 
582 	ret = perf_env__read_cpu_topology_map(&perf_env);
583 	if (ret < 0)
584 		goto done;
585 
586 	for (j = 0; j < perf_env.nr_cpus_avail; j++) {
587 		ret = do_write(fd, &perf_env.cpu[j].core_id,
588 			       sizeof(perf_env.cpu[j].core_id));
589 		if (ret < 0)
590 			return ret;
591 		ret = do_write(fd, &perf_env.cpu[j].socket_id,
592 			       sizeof(perf_env.cpu[j].socket_id));
593 		if (ret < 0)
594 			return ret;
595 	}
596 done:
597 	free_cpu_topo(tp);
598 	return ret;
599 }
600 
601 
602 
603 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
604 			  struct perf_evlist *evlist __maybe_unused)
605 {
606 	char *buf = NULL;
607 	FILE *fp;
608 	size_t len = 0;
609 	int ret = -1, n;
610 	uint64_t mem;
611 
612 	fp = fopen("/proc/meminfo", "r");
613 	if (!fp)
614 		return -1;
615 
616 	while (getline(&buf, &len, fp) > 0) {
617 		ret = strncmp(buf, "MemTotal:", 9);
618 		if (!ret)
619 			break;
620 	}
621 	if (!ret) {
622 		n = sscanf(buf, "%*s %"PRIu64, &mem);
623 		if (n == 1)
624 			ret = do_write(fd, &mem, sizeof(mem));
625 	} else
626 		ret = -1;
627 	free(buf);
628 	fclose(fp);
629 	return ret;
630 }
631 
632 static int write_topo_node(int fd, int node)
633 {
634 	char str[MAXPATHLEN];
635 	char field[32];
636 	char *buf = NULL, *p;
637 	size_t len = 0;
638 	FILE *fp;
639 	u64 mem_total, mem_free, mem;
640 	int ret = -1;
641 
642 	sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
643 	fp = fopen(str, "r");
644 	if (!fp)
645 		return -1;
646 
647 	while (getline(&buf, &len, fp) > 0) {
648 		/* skip over invalid lines */
649 		if (!strchr(buf, ':'))
650 			continue;
651 		if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
652 			goto done;
653 		if (!strcmp(field, "MemTotal:"))
654 			mem_total = mem;
655 		if (!strcmp(field, "MemFree:"))
656 			mem_free = mem;
657 	}
658 
659 	fclose(fp);
660 	fp = NULL;
661 
662 	ret = do_write(fd, &mem_total, sizeof(u64));
663 	if (ret)
664 		goto done;
665 
666 	ret = do_write(fd, &mem_free, sizeof(u64));
667 	if (ret)
668 		goto done;
669 
670 	ret = -1;
671 	sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
672 
673 	fp = fopen(str, "r");
674 	if (!fp)
675 		goto done;
676 
677 	if (getline(&buf, &len, fp) <= 0)
678 		goto done;
679 
680 	p = strchr(buf, '\n');
681 	if (p)
682 		*p = '\0';
683 
684 	ret = do_write_string(fd, buf);
685 done:
686 	free(buf);
687 	if (fp)
688 		fclose(fp);
689 	return ret;
690 }
691 
692 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
693 			  struct perf_evlist *evlist __maybe_unused)
694 {
695 	char *buf = NULL;
696 	size_t len = 0;
697 	FILE *fp;
698 	struct cpu_map *node_map = NULL;
699 	char *c;
700 	u32 nr, i, j;
701 	int ret = -1;
702 
703 	fp = fopen("/sys/devices/system/node/online", "r");
704 	if (!fp)
705 		return -1;
706 
707 	if (getline(&buf, &len, fp) <= 0)
708 		goto done;
709 
710 	c = strchr(buf, '\n');
711 	if (c)
712 		*c = '\0';
713 
714 	node_map = cpu_map__new(buf);
715 	if (!node_map)
716 		goto done;
717 
718 	nr = (u32)node_map->nr;
719 
720 	ret = do_write(fd, &nr, sizeof(nr));
721 	if (ret < 0)
722 		goto done;
723 
724 	for (i = 0; i < nr; i++) {
725 		j = (u32)node_map->map[i];
726 		ret = do_write(fd, &j, sizeof(j));
727 		if (ret < 0)
728 			break;
729 
730 		ret = write_topo_node(fd, i);
731 		if (ret < 0)
732 			break;
733 	}
734 done:
735 	free(buf);
736 	fclose(fp);
737 	cpu_map__put(node_map);
738 	return ret;
739 }
740 
741 /*
742  * File format:
743  *
744  * struct pmu_mappings {
745  *	u32	pmu_num;
746  *	struct pmu_map {
747  *		u32	type;
748  *		char	name[];
749  *	}[pmu_num];
750  * };
751  */
752 
753 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
754 			      struct perf_evlist *evlist __maybe_unused)
755 {
756 	struct perf_pmu *pmu = NULL;
757 	off_t offset = lseek(fd, 0, SEEK_CUR);
758 	__u32 pmu_num = 0;
759 	int ret;
760 
761 	/* write real pmu_num later */
762 	ret = do_write(fd, &pmu_num, sizeof(pmu_num));
763 	if (ret < 0)
764 		return ret;
765 
766 	while ((pmu = perf_pmu__scan(pmu))) {
767 		if (!pmu->name)
768 			continue;
769 		pmu_num++;
770 
771 		ret = do_write(fd, &pmu->type, sizeof(pmu->type));
772 		if (ret < 0)
773 			return ret;
774 
775 		ret = do_write_string(fd, pmu->name);
776 		if (ret < 0)
777 			return ret;
778 	}
779 
780 	if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
781 		/* discard all */
782 		lseek(fd, offset, SEEK_SET);
783 		return -1;
784 	}
785 
786 	return 0;
787 }
788 
789 /*
790  * File format:
791  *
792  * struct group_descs {
793  *	u32	nr_groups;
794  *	struct group_desc {
795  *		char	name[];
796  *		u32	leader_idx;
797  *		u32	nr_members;
798  *	}[nr_groups];
799  * };
800  */
801 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
802 			    struct perf_evlist *evlist)
803 {
804 	u32 nr_groups = evlist->nr_groups;
805 	struct perf_evsel *evsel;
806 	int ret;
807 
808 	ret = do_write(fd, &nr_groups, sizeof(nr_groups));
809 	if (ret < 0)
810 		return ret;
811 
812 	evlist__for_each_entry(evlist, evsel) {
813 		if (perf_evsel__is_group_leader(evsel) &&
814 		    evsel->nr_members > 1) {
815 			const char *name = evsel->group_name ?: "{anon_group}";
816 			u32 leader_idx = evsel->idx;
817 			u32 nr_members = evsel->nr_members;
818 
819 			ret = do_write_string(fd, name);
820 			if (ret < 0)
821 				return ret;
822 
823 			ret = do_write(fd, &leader_idx, sizeof(leader_idx));
824 			if (ret < 0)
825 				return ret;
826 
827 			ret = do_write(fd, &nr_members, sizeof(nr_members));
828 			if (ret < 0)
829 				return ret;
830 		}
831 	}
832 	return 0;
833 }
834 
835 /*
836  * default get_cpuid(): nothing gets recorded
837  * actual implementation must be in arch/$(ARCH)/util/header.c
838  */
839 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
840 {
841 	return -1;
842 }
843 
844 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
845 		       struct perf_evlist *evlist __maybe_unused)
846 {
847 	char buffer[64];
848 	int ret;
849 
850 	ret = get_cpuid(buffer, sizeof(buffer));
851 	if (!ret)
852 		goto write_it;
853 
854 	return -1;
855 write_it:
856 	return do_write_string(fd, buffer);
857 }
858 
859 static int write_branch_stack(int fd __maybe_unused,
860 			      struct perf_header *h __maybe_unused,
861 		       struct perf_evlist *evlist __maybe_unused)
862 {
863 	return 0;
864 }
865 
866 static int write_auxtrace(int fd, struct perf_header *h,
867 			  struct perf_evlist *evlist __maybe_unused)
868 {
869 	struct perf_session *session;
870 	int err;
871 
872 	session = container_of(h, struct perf_session, header);
873 
874 	err = auxtrace_index__write(fd, &session->auxtrace_index);
875 	if (err < 0)
876 		pr_err("Failed to write auxtrace index\n");
877 	return err;
878 }
879 
880 static int cpu_cache_level__sort(const void *a, const void *b)
881 {
882 	struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
883 	struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
884 
885 	return cache_a->level - cache_b->level;
886 }
887 
888 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
889 {
890 	if (a->level != b->level)
891 		return false;
892 
893 	if (a->line_size != b->line_size)
894 		return false;
895 
896 	if (a->sets != b->sets)
897 		return false;
898 
899 	if (a->ways != b->ways)
900 		return false;
901 
902 	if (strcmp(a->type, b->type))
903 		return false;
904 
905 	if (strcmp(a->size, b->size))
906 		return false;
907 
908 	if (strcmp(a->map, b->map))
909 		return false;
910 
911 	return true;
912 }
913 
914 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
915 {
916 	char path[PATH_MAX], file[PATH_MAX];
917 	struct stat st;
918 	size_t len;
919 
920 	scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
921 	scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
922 
923 	if (stat(file, &st))
924 		return 1;
925 
926 	scnprintf(file, PATH_MAX, "%s/level", path);
927 	if (sysfs__read_int(file, (int *) &cache->level))
928 		return -1;
929 
930 	scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
931 	if (sysfs__read_int(file, (int *) &cache->line_size))
932 		return -1;
933 
934 	scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
935 	if (sysfs__read_int(file, (int *) &cache->sets))
936 		return -1;
937 
938 	scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
939 	if (sysfs__read_int(file, (int *) &cache->ways))
940 		return -1;
941 
942 	scnprintf(file, PATH_MAX, "%s/type", path);
943 	if (sysfs__read_str(file, &cache->type, &len))
944 		return -1;
945 
946 	cache->type[len] = 0;
947 	cache->type = rtrim(cache->type);
948 
949 	scnprintf(file, PATH_MAX, "%s/size", path);
950 	if (sysfs__read_str(file, &cache->size, &len)) {
951 		free(cache->type);
952 		return -1;
953 	}
954 
955 	cache->size[len] = 0;
956 	cache->size = rtrim(cache->size);
957 
958 	scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
959 	if (sysfs__read_str(file, &cache->map, &len)) {
960 		free(cache->map);
961 		free(cache->type);
962 		return -1;
963 	}
964 
965 	cache->map[len] = 0;
966 	cache->map = rtrim(cache->map);
967 	return 0;
968 }
969 
970 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
971 {
972 	fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
973 }
974 
975 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
976 {
977 	u32 i, cnt = 0;
978 	long ncpus;
979 	u32 nr, cpu;
980 	u16 level;
981 
982 	ncpus = sysconf(_SC_NPROCESSORS_CONF);
983 	if (ncpus < 0)
984 		return -1;
985 
986 	nr = (u32)(ncpus & UINT_MAX);
987 
988 	for (cpu = 0; cpu < nr; cpu++) {
989 		for (level = 0; level < 10; level++) {
990 			struct cpu_cache_level c;
991 			int err;
992 
993 			err = cpu_cache_level__read(&c, cpu, level);
994 			if (err < 0)
995 				return err;
996 
997 			if (err == 1)
998 				break;
999 
1000 			for (i = 0; i < cnt; i++) {
1001 				if (cpu_cache_level__cmp(&c, &caches[i]))
1002 					break;
1003 			}
1004 
1005 			if (i == cnt)
1006 				caches[cnt++] = c;
1007 			else
1008 				cpu_cache_level__free(&c);
1009 
1010 			if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1011 				goto out;
1012 		}
1013 	}
1014  out:
1015 	*cntp = cnt;
1016 	return 0;
1017 }
1018 
1019 #define MAX_CACHES 2000
1020 
1021 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1022 			  struct perf_evlist *evlist __maybe_unused)
1023 {
1024 	struct cpu_cache_level caches[MAX_CACHES];
1025 	u32 cnt = 0, i, version = 1;
1026 	int ret;
1027 
1028 	ret = build_caches(caches, MAX_CACHES, &cnt);
1029 	if (ret)
1030 		goto out;
1031 
1032 	qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1033 
1034 	ret = do_write(fd, &version, sizeof(u32));
1035 	if (ret < 0)
1036 		goto out;
1037 
1038 	ret = do_write(fd, &cnt, sizeof(u32));
1039 	if (ret < 0)
1040 		goto out;
1041 
1042 	for (i = 0; i < cnt; i++) {
1043 		struct cpu_cache_level *c = &caches[i];
1044 
1045 		#define _W(v)					\
1046 			ret = do_write(fd, &c->v, sizeof(u32));	\
1047 			if (ret < 0)				\
1048 				goto out;
1049 
1050 		_W(level)
1051 		_W(line_size)
1052 		_W(sets)
1053 		_W(ways)
1054 		#undef _W
1055 
1056 		#define _W(v)						\
1057 			ret = do_write_string(fd, (const char *) c->v);	\
1058 			if (ret < 0)					\
1059 				goto out;
1060 
1061 		_W(type)
1062 		_W(size)
1063 		_W(map)
1064 		#undef _W
1065 	}
1066 
1067 out:
1068 	for (i = 0; i < cnt; i++)
1069 		cpu_cache_level__free(&caches[i]);
1070 	return ret;
1071 }
1072 
1073 static int write_stat(int fd __maybe_unused,
1074 		      struct perf_header *h __maybe_unused,
1075 		      struct perf_evlist *evlist __maybe_unused)
1076 {
1077 	return 0;
1078 }
1079 
1080 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1081 			   FILE *fp)
1082 {
1083 	fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1084 }
1085 
1086 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1087 			    FILE *fp)
1088 {
1089 	fprintf(fp, "# os release : %s\n", ph->env.os_release);
1090 }
1091 
1092 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1093 {
1094 	fprintf(fp, "# arch : %s\n", ph->env.arch);
1095 }
1096 
1097 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1098 			  FILE *fp)
1099 {
1100 	fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1101 }
1102 
1103 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1104 			 FILE *fp)
1105 {
1106 	fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1107 	fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1108 }
1109 
1110 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1111 			  FILE *fp)
1112 {
1113 	fprintf(fp, "# perf version : %s\n", ph->env.version);
1114 }
1115 
1116 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1117 			  FILE *fp)
1118 {
1119 	int nr, i;
1120 
1121 	nr = ph->env.nr_cmdline;
1122 
1123 	fprintf(fp, "# cmdline : ");
1124 
1125 	for (i = 0; i < nr; i++)
1126 		fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1127 	fputc('\n', fp);
1128 }
1129 
1130 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1131 			       FILE *fp)
1132 {
1133 	int nr, i;
1134 	char *str;
1135 	int cpu_nr = ph->env.nr_cpus_avail;
1136 
1137 	nr = ph->env.nr_sibling_cores;
1138 	str = ph->env.sibling_cores;
1139 
1140 	for (i = 0; i < nr; i++) {
1141 		fprintf(fp, "# sibling cores   : %s\n", str);
1142 		str += strlen(str) + 1;
1143 	}
1144 
1145 	nr = ph->env.nr_sibling_threads;
1146 	str = ph->env.sibling_threads;
1147 
1148 	for (i = 0; i < nr; i++) {
1149 		fprintf(fp, "# sibling threads : %s\n", str);
1150 		str += strlen(str) + 1;
1151 	}
1152 
1153 	if (ph->env.cpu != NULL) {
1154 		for (i = 0; i < cpu_nr; i++)
1155 			fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1156 				ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1157 	} else
1158 		fprintf(fp, "# Core ID and Socket ID information is not available\n");
1159 }
1160 
1161 static void free_event_desc(struct perf_evsel *events)
1162 {
1163 	struct perf_evsel *evsel;
1164 
1165 	if (!events)
1166 		return;
1167 
1168 	for (evsel = events; evsel->attr.size; evsel++) {
1169 		zfree(&evsel->name);
1170 		zfree(&evsel->id);
1171 	}
1172 
1173 	free(events);
1174 }
1175 
1176 static struct perf_evsel *
1177 read_event_desc(struct perf_header *ph, int fd)
1178 {
1179 	struct perf_evsel *evsel, *events = NULL;
1180 	u64 *id;
1181 	void *buf = NULL;
1182 	u32 nre, sz, nr, i, j;
1183 	ssize_t ret;
1184 	size_t msz;
1185 
1186 	/* number of events */
1187 	ret = readn(fd, &nre, sizeof(nre));
1188 	if (ret != (ssize_t)sizeof(nre))
1189 		goto error;
1190 
1191 	if (ph->needs_swap)
1192 		nre = bswap_32(nre);
1193 
1194 	ret = readn(fd, &sz, sizeof(sz));
1195 	if (ret != (ssize_t)sizeof(sz))
1196 		goto error;
1197 
1198 	if (ph->needs_swap)
1199 		sz = bswap_32(sz);
1200 
1201 	/* buffer to hold on file attr struct */
1202 	buf = malloc(sz);
1203 	if (!buf)
1204 		goto error;
1205 
1206 	/* the last event terminates with evsel->attr.size == 0: */
1207 	events = calloc(nre + 1, sizeof(*events));
1208 	if (!events)
1209 		goto error;
1210 
1211 	msz = sizeof(evsel->attr);
1212 	if (sz < msz)
1213 		msz = sz;
1214 
1215 	for (i = 0, evsel = events; i < nre; evsel++, i++) {
1216 		evsel->idx = i;
1217 
1218 		/*
1219 		 * must read entire on-file attr struct to
1220 		 * sync up with layout.
1221 		 */
1222 		ret = readn(fd, buf, sz);
1223 		if (ret != (ssize_t)sz)
1224 			goto error;
1225 
1226 		if (ph->needs_swap)
1227 			perf_event__attr_swap(buf);
1228 
1229 		memcpy(&evsel->attr, buf, msz);
1230 
1231 		ret = readn(fd, &nr, sizeof(nr));
1232 		if (ret != (ssize_t)sizeof(nr))
1233 			goto error;
1234 
1235 		if (ph->needs_swap) {
1236 			nr = bswap_32(nr);
1237 			evsel->needs_swap = true;
1238 		}
1239 
1240 		evsel->name = do_read_string(fd, ph);
1241 
1242 		if (!nr)
1243 			continue;
1244 
1245 		id = calloc(nr, sizeof(*id));
1246 		if (!id)
1247 			goto error;
1248 		evsel->ids = nr;
1249 		evsel->id = id;
1250 
1251 		for (j = 0 ; j < nr; j++) {
1252 			ret = readn(fd, id, sizeof(*id));
1253 			if (ret != (ssize_t)sizeof(*id))
1254 				goto error;
1255 			if (ph->needs_swap)
1256 				*id = bswap_64(*id);
1257 			id++;
1258 		}
1259 	}
1260 out:
1261 	free(buf);
1262 	return events;
1263 error:
1264 	free_event_desc(events);
1265 	events = NULL;
1266 	goto out;
1267 }
1268 
1269 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1270 				void *priv __attribute__((unused)))
1271 {
1272 	return fprintf(fp, ", %s = %s", name, val);
1273 }
1274 
1275 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1276 {
1277 	struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1278 	u32 j;
1279 	u64 *id;
1280 
1281 	if (!events) {
1282 		fprintf(fp, "# event desc: not available or unable to read\n");
1283 		return;
1284 	}
1285 
1286 	for (evsel = events; evsel->attr.size; evsel++) {
1287 		fprintf(fp, "# event : name = %s, ", evsel->name);
1288 
1289 		if (evsel->ids) {
1290 			fprintf(fp, ", id = {");
1291 			for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1292 				if (j)
1293 					fputc(',', fp);
1294 				fprintf(fp, " %"PRIu64, *id);
1295 			}
1296 			fprintf(fp, " }");
1297 		}
1298 
1299 		perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1300 
1301 		fputc('\n', fp);
1302 	}
1303 
1304 	free_event_desc(events);
1305 }
1306 
1307 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1308 			    FILE *fp)
1309 {
1310 	fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1311 }
1312 
1313 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1314 				FILE *fp)
1315 {
1316 	int i;
1317 	struct numa_node *n;
1318 
1319 	for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1320 		n = &ph->env.numa_nodes[i];
1321 
1322 		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1323 			    " free = %"PRIu64" kB\n",
1324 			n->node, n->mem_total, n->mem_free);
1325 
1326 		fprintf(fp, "# node%u cpu list : ", n->node);
1327 		cpu_map__fprintf(n->map, fp);
1328 	}
1329 }
1330 
1331 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1332 {
1333 	fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1334 }
1335 
1336 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1337 			       int fd __maybe_unused, FILE *fp)
1338 {
1339 	fprintf(fp, "# contains samples with branch stack\n");
1340 }
1341 
1342 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1343 			   int fd __maybe_unused, FILE *fp)
1344 {
1345 	fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1346 }
1347 
1348 static void print_stat(struct perf_header *ph __maybe_unused,
1349 		       int fd __maybe_unused, FILE *fp)
1350 {
1351 	fprintf(fp, "# contains stat data\n");
1352 }
1353 
1354 static void print_cache(struct perf_header *ph __maybe_unused,
1355 			int fd __maybe_unused, FILE *fp __maybe_unused)
1356 {
1357 	int i;
1358 
1359 	fprintf(fp, "# CPU cache info:\n");
1360 	for (i = 0; i < ph->env.caches_cnt; i++) {
1361 		fprintf(fp, "#  ");
1362 		cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1363 	}
1364 }
1365 
1366 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1367 			       FILE *fp)
1368 {
1369 	const char *delimiter = "# pmu mappings: ";
1370 	char *str, *tmp;
1371 	u32 pmu_num;
1372 	u32 type;
1373 
1374 	pmu_num = ph->env.nr_pmu_mappings;
1375 	if (!pmu_num) {
1376 		fprintf(fp, "# pmu mappings: not available\n");
1377 		return;
1378 	}
1379 
1380 	str = ph->env.pmu_mappings;
1381 
1382 	while (pmu_num) {
1383 		type = strtoul(str, &tmp, 0);
1384 		if (*tmp != ':')
1385 			goto error;
1386 
1387 		str = tmp + 1;
1388 		fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1389 
1390 		delimiter = ", ";
1391 		str += strlen(str) + 1;
1392 		pmu_num--;
1393 	}
1394 
1395 	fprintf(fp, "\n");
1396 
1397 	if (!pmu_num)
1398 		return;
1399 error:
1400 	fprintf(fp, "# pmu mappings: unable to read\n");
1401 }
1402 
1403 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1404 			     FILE *fp)
1405 {
1406 	struct perf_session *session;
1407 	struct perf_evsel *evsel;
1408 	u32 nr = 0;
1409 
1410 	session = container_of(ph, struct perf_session, header);
1411 
1412 	evlist__for_each_entry(session->evlist, evsel) {
1413 		if (perf_evsel__is_group_leader(evsel) &&
1414 		    evsel->nr_members > 1) {
1415 			fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1416 				perf_evsel__name(evsel));
1417 
1418 			nr = evsel->nr_members - 1;
1419 		} else if (nr) {
1420 			fprintf(fp, ",%s", perf_evsel__name(evsel));
1421 
1422 			if (--nr == 0)
1423 				fprintf(fp, "}\n");
1424 		}
1425 	}
1426 }
1427 
1428 static int __event_process_build_id(struct build_id_event *bev,
1429 				    char *filename,
1430 				    struct perf_session *session)
1431 {
1432 	int err = -1;
1433 	struct machine *machine;
1434 	u16 cpumode;
1435 	struct dso *dso;
1436 	enum dso_kernel_type dso_type;
1437 
1438 	machine = perf_session__findnew_machine(session, bev->pid);
1439 	if (!machine)
1440 		goto out;
1441 
1442 	cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1443 
1444 	switch (cpumode) {
1445 	case PERF_RECORD_MISC_KERNEL:
1446 		dso_type = DSO_TYPE_KERNEL;
1447 		break;
1448 	case PERF_RECORD_MISC_GUEST_KERNEL:
1449 		dso_type = DSO_TYPE_GUEST_KERNEL;
1450 		break;
1451 	case PERF_RECORD_MISC_USER:
1452 	case PERF_RECORD_MISC_GUEST_USER:
1453 		dso_type = DSO_TYPE_USER;
1454 		break;
1455 	default:
1456 		goto out;
1457 	}
1458 
1459 	dso = machine__findnew_dso(machine, filename);
1460 	if (dso != NULL) {
1461 		char sbuild_id[SBUILD_ID_SIZE];
1462 
1463 		dso__set_build_id(dso, &bev->build_id);
1464 
1465 		if (!is_kernel_module(filename, cpumode))
1466 			dso->kernel = dso_type;
1467 
1468 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1469 				  sbuild_id);
1470 		pr_debug("build id event received for %s: %s\n",
1471 			 dso->long_name, sbuild_id);
1472 		dso__put(dso);
1473 	}
1474 
1475 	err = 0;
1476 out:
1477 	return err;
1478 }
1479 
1480 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1481 						 int input, u64 offset, u64 size)
1482 {
1483 	struct perf_session *session = container_of(header, struct perf_session, header);
1484 	struct {
1485 		struct perf_event_header   header;
1486 		u8			   build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1487 		char			   filename[0];
1488 	} old_bev;
1489 	struct build_id_event bev;
1490 	char filename[PATH_MAX];
1491 	u64 limit = offset + size;
1492 
1493 	while (offset < limit) {
1494 		ssize_t len;
1495 
1496 		if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1497 			return -1;
1498 
1499 		if (header->needs_swap)
1500 			perf_event_header__bswap(&old_bev.header);
1501 
1502 		len = old_bev.header.size - sizeof(old_bev);
1503 		if (readn(input, filename, len) != len)
1504 			return -1;
1505 
1506 		bev.header = old_bev.header;
1507 
1508 		/*
1509 		 * As the pid is the missing value, we need to fill
1510 		 * it properly. The header.misc value give us nice hint.
1511 		 */
1512 		bev.pid	= HOST_KERNEL_ID;
1513 		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1514 		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1515 			bev.pid	= DEFAULT_GUEST_KERNEL_ID;
1516 
1517 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1518 		__event_process_build_id(&bev, filename, session);
1519 
1520 		offset += bev.header.size;
1521 	}
1522 
1523 	return 0;
1524 }
1525 
1526 static int perf_header__read_build_ids(struct perf_header *header,
1527 				       int input, u64 offset, u64 size)
1528 {
1529 	struct perf_session *session = container_of(header, struct perf_session, header);
1530 	struct build_id_event bev;
1531 	char filename[PATH_MAX];
1532 	u64 limit = offset + size, orig_offset = offset;
1533 	int err = -1;
1534 
1535 	while (offset < limit) {
1536 		ssize_t len;
1537 
1538 		if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1539 			goto out;
1540 
1541 		if (header->needs_swap)
1542 			perf_event_header__bswap(&bev.header);
1543 
1544 		len = bev.header.size - sizeof(bev);
1545 		if (readn(input, filename, len) != len)
1546 			goto out;
1547 		/*
1548 		 * The a1645ce1 changeset:
1549 		 *
1550 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1551 		 *
1552 		 * Added a field to struct build_id_event that broke the file
1553 		 * format.
1554 		 *
1555 		 * Since the kernel build-id is the first entry, process the
1556 		 * table using the old format if the well known
1557 		 * '[kernel.kallsyms]' string for the kernel build-id has the
1558 		 * first 4 characters chopped off (where the pid_t sits).
1559 		 */
1560 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1561 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1562 				return -1;
1563 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1564 		}
1565 
1566 		__event_process_build_id(&bev, filename, session);
1567 
1568 		offset += bev.header.size;
1569 	}
1570 	err = 0;
1571 out:
1572 	return err;
1573 }
1574 
1575 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1576 				struct perf_header *ph __maybe_unused,
1577 				int fd, void *data)
1578 {
1579 	ssize_t ret = trace_report(fd, data, false);
1580 	return ret < 0 ? -1 : 0;
1581 }
1582 
1583 static int process_build_id(struct perf_file_section *section,
1584 			    struct perf_header *ph, int fd,
1585 			    void *data __maybe_unused)
1586 {
1587 	if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1588 		pr_debug("Failed to read buildids, continuing...\n");
1589 	return 0;
1590 }
1591 
1592 static int process_hostname(struct perf_file_section *section __maybe_unused,
1593 			    struct perf_header *ph, int fd,
1594 			    void *data __maybe_unused)
1595 {
1596 	ph->env.hostname = do_read_string(fd, ph);
1597 	return ph->env.hostname ? 0 : -ENOMEM;
1598 }
1599 
1600 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1601 			     struct perf_header *ph, int fd,
1602 			     void *data __maybe_unused)
1603 {
1604 	ph->env.os_release = do_read_string(fd, ph);
1605 	return ph->env.os_release ? 0 : -ENOMEM;
1606 }
1607 
1608 static int process_version(struct perf_file_section *section __maybe_unused,
1609 			   struct perf_header *ph, int fd,
1610 			   void *data __maybe_unused)
1611 {
1612 	ph->env.version = do_read_string(fd, ph);
1613 	return ph->env.version ? 0 : -ENOMEM;
1614 }
1615 
1616 static int process_arch(struct perf_file_section *section __maybe_unused,
1617 			struct perf_header *ph,	int fd,
1618 			void *data __maybe_unused)
1619 {
1620 	ph->env.arch = do_read_string(fd, ph);
1621 	return ph->env.arch ? 0 : -ENOMEM;
1622 }
1623 
1624 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1625 			  struct perf_header *ph, int fd,
1626 			  void *data __maybe_unused)
1627 {
1628 	ssize_t ret;
1629 	u32 nr;
1630 
1631 	ret = readn(fd, &nr, sizeof(nr));
1632 	if (ret != sizeof(nr))
1633 		return -1;
1634 
1635 	if (ph->needs_swap)
1636 		nr = bswap_32(nr);
1637 
1638 	ph->env.nr_cpus_avail = nr;
1639 
1640 	ret = readn(fd, &nr, sizeof(nr));
1641 	if (ret != sizeof(nr))
1642 		return -1;
1643 
1644 	if (ph->needs_swap)
1645 		nr = bswap_32(nr);
1646 
1647 	ph->env.nr_cpus_online = nr;
1648 	return 0;
1649 }
1650 
1651 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1652 			   struct perf_header *ph, int fd,
1653 			   void *data __maybe_unused)
1654 {
1655 	ph->env.cpu_desc = do_read_string(fd, ph);
1656 	return ph->env.cpu_desc ? 0 : -ENOMEM;
1657 }
1658 
1659 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1660 			 struct perf_header *ph,  int fd,
1661 			 void *data __maybe_unused)
1662 {
1663 	ph->env.cpuid = do_read_string(fd, ph);
1664 	return ph->env.cpuid ? 0 : -ENOMEM;
1665 }
1666 
1667 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1668 			     struct perf_header *ph, int fd,
1669 			     void *data __maybe_unused)
1670 {
1671 	uint64_t mem;
1672 	ssize_t ret;
1673 
1674 	ret = readn(fd, &mem, sizeof(mem));
1675 	if (ret != sizeof(mem))
1676 		return -1;
1677 
1678 	if (ph->needs_swap)
1679 		mem = bswap_64(mem);
1680 
1681 	ph->env.total_mem = mem;
1682 	return 0;
1683 }
1684 
1685 static struct perf_evsel *
1686 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1687 {
1688 	struct perf_evsel *evsel;
1689 
1690 	evlist__for_each_entry(evlist, evsel) {
1691 		if (evsel->idx == idx)
1692 			return evsel;
1693 	}
1694 
1695 	return NULL;
1696 }
1697 
1698 static void
1699 perf_evlist__set_event_name(struct perf_evlist *evlist,
1700 			    struct perf_evsel *event)
1701 {
1702 	struct perf_evsel *evsel;
1703 
1704 	if (!event->name)
1705 		return;
1706 
1707 	evsel = perf_evlist__find_by_index(evlist, event->idx);
1708 	if (!evsel)
1709 		return;
1710 
1711 	if (evsel->name)
1712 		return;
1713 
1714 	evsel->name = strdup(event->name);
1715 }
1716 
1717 static int
1718 process_event_desc(struct perf_file_section *section __maybe_unused,
1719 		   struct perf_header *header, int fd,
1720 		   void *data __maybe_unused)
1721 {
1722 	struct perf_session *session;
1723 	struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1724 
1725 	if (!events)
1726 		return 0;
1727 
1728 	session = container_of(header, struct perf_session, header);
1729 	for (evsel = events; evsel->attr.size; evsel++)
1730 		perf_evlist__set_event_name(session->evlist, evsel);
1731 
1732 	free_event_desc(events);
1733 
1734 	return 0;
1735 }
1736 
1737 static int process_cmdline(struct perf_file_section *section,
1738 			   struct perf_header *ph, int fd,
1739 			   void *data __maybe_unused)
1740 {
1741 	ssize_t ret;
1742 	char *str, *cmdline = NULL, **argv = NULL;
1743 	u32 nr, i, len = 0;
1744 
1745 	ret = readn(fd, &nr, sizeof(nr));
1746 	if (ret != sizeof(nr))
1747 		return -1;
1748 
1749 	if (ph->needs_swap)
1750 		nr = bswap_32(nr);
1751 
1752 	ph->env.nr_cmdline = nr;
1753 
1754 	cmdline = zalloc(section->size + nr + 1);
1755 	if (!cmdline)
1756 		return -1;
1757 
1758 	argv = zalloc(sizeof(char *) * (nr + 1));
1759 	if (!argv)
1760 		goto error;
1761 
1762 	for (i = 0; i < nr; i++) {
1763 		str = do_read_string(fd, ph);
1764 		if (!str)
1765 			goto error;
1766 
1767 		argv[i] = cmdline + len;
1768 		memcpy(argv[i], str, strlen(str) + 1);
1769 		len += strlen(str) + 1;
1770 		free(str);
1771 	}
1772 	ph->env.cmdline = cmdline;
1773 	ph->env.cmdline_argv = (const char **) argv;
1774 	return 0;
1775 
1776 error:
1777 	free(argv);
1778 	free(cmdline);
1779 	return -1;
1780 }
1781 
1782 static int process_cpu_topology(struct perf_file_section *section,
1783 				struct perf_header *ph, int fd,
1784 				void *data __maybe_unused)
1785 {
1786 	ssize_t ret;
1787 	u32 nr, i;
1788 	char *str;
1789 	struct strbuf sb;
1790 	int cpu_nr = ph->env.nr_cpus_avail;
1791 	u64 size = 0;
1792 
1793 	ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1794 	if (!ph->env.cpu)
1795 		return -1;
1796 
1797 	ret = readn(fd, &nr, sizeof(nr));
1798 	if (ret != sizeof(nr))
1799 		goto free_cpu;
1800 
1801 	if (ph->needs_swap)
1802 		nr = bswap_32(nr);
1803 
1804 	ph->env.nr_sibling_cores = nr;
1805 	size += sizeof(u32);
1806 	if (strbuf_init(&sb, 128) < 0)
1807 		goto free_cpu;
1808 
1809 	for (i = 0; i < nr; i++) {
1810 		str = do_read_string(fd, ph);
1811 		if (!str)
1812 			goto error;
1813 
1814 		/* include a NULL character at the end */
1815 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1816 			goto error;
1817 		size += string_size(str);
1818 		free(str);
1819 	}
1820 	ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1821 
1822 	ret = readn(fd, &nr, sizeof(nr));
1823 	if (ret != sizeof(nr))
1824 		return -1;
1825 
1826 	if (ph->needs_swap)
1827 		nr = bswap_32(nr);
1828 
1829 	ph->env.nr_sibling_threads = nr;
1830 	size += sizeof(u32);
1831 
1832 	for (i = 0; i < nr; i++) {
1833 		str = do_read_string(fd, ph);
1834 		if (!str)
1835 			goto error;
1836 
1837 		/* include a NULL character at the end */
1838 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1839 			goto error;
1840 		size += string_size(str);
1841 		free(str);
1842 	}
1843 	ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1844 
1845 	/*
1846 	 * The header may be from old perf,
1847 	 * which doesn't include core id and socket id information.
1848 	 */
1849 	if (section->size <= size) {
1850 		zfree(&ph->env.cpu);
1851 		return 0;
1852 	}
1853 
1854 	for (i = 0; i < (u32)cpu_nr; i++) {
1855 		ret = readn(fd, &nr, sizeof(nr));
1856 		if (ret != sizeof(nr))
1857 			goto free_cpu;
1858 
1859 		if (ph->needs_swap)
1860 			nr = bswap_32(nr);
1861 
1862 		ph->env.cpu[i].core_id = nr;
1863 
1864 		ret = readn(fd, &nr, sizeof(nr));
1865 		if (ret != sizeof(nr))
1866 			goto free_cpu;
1867 
1868 		if (ph->needs_swap)
1869 			nr = bswap_32(nr);
1870 
1871 		if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1872 			pr_debug("socket_id number is too big."
1873 				 "You may need to upgrade the perf tool.\n");
1874 			goto free_cpu;
1875 		}
1876 
1877 		ph->env.cpu[i].socket_id = nr;
1878 	}
1879 
1880 	return 0;
1881 
1882 error:
1883 	strbuf_release(&sb);
1884 free_cpu:
1885 	zfree(&ph->env.cpu);
1886 	return -1;
1887 }
1888 
1889 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1890 				 struct perf_header *ph, int fd,
1891 				 void *data __maybe_unused)
1892 {
1893 	struct numa_node *nodes, *n;
1894 	ssize_t ret;
1895 	u32 nr, i;
1896 	char *str;
1897 
1898 	/* nr nodes */
1899 	ret = readn(fd, &nr, sizeof(nr));
1900 	if (ret != sizeof(nr))
1901 		return -1;
1902 
1903 	if (ph->needs_swap)
1904 		nr = bswap_32(nr);
1905 
1906 	nodes = zalloc(sizeof(*nodes) * nr);
1907 	if (!nodes)
1908 		return -ENOMEM;
1909 
1910 	for (i = 0; i < nr; i++) {
1911 		n = &nodes[i];
1912 
1913 		/* node number */
1914 		ret = readn(fd, &n->node, sizeof(u32));
1915 		if (ret != sizeof(n->node))
1916 			goto error;
1917 
1918 		ret = readn(fd, &n->mem_total, sizeof(u64));
1919 		if (ret != sizeof(u64))
1920 			goto error;
1921 
1922 		ret = readn(fd, &n->mem_free, sizeof(u64));
1923 		if (ret != sizeof(u64))
1924 			goto error;
1925 
1926 		if (ph->needs_swap) {
1927 			n->node      = bswap_32(n->node);
1928 			n->mem_total = bswap_64(n->mem_total);
1929 			n->mem_free  = bswap_64(n->mem_free);
1930 		}
1931 
1932 		str = do_read_string(fd, ph);
1933 		if (!str)
1934 			goto error;
1935 
1936 		n->map = cpu_map__new(str);
1937 		if (!n->map)
1938 			goto error;
1939 
1940 		free(str);
1941 	}
1942 	ph->env.nr_numa_nodes = nr;
1943 	ph->env.numa_nodes = nodes;
1944 	return 0;
1945 
1946 error:
1947 	free(nodes);
1948 	return -1;
1949 }
1950 
1951 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1952 				struct perf_header *ph, int fd,
1953 				void *data __maybe_unused)
1954 {
1955 	ssize_t ret;
1956 	char *name;
1957 	u32 pmu_num;
1958 	u32 type;
1959 	struct strbuf sb;
1960 
1961 	ret = readn(fd, &pmu_num, sizeof(pmu_num));
1962 	if (ret != sizeof(pmu_num))
1963 		return -1;
1964 
1965 	if (ph->needs_swap)
1966 		pmu_num = bswap_32(pmu_num);
1967 
1968 	if (!pmu_num) {
1969 		pr_debug("pmu mappings not available\n");
1970 		return 0;
1971 	}
1972 
1973 	ph->env.nr_pmu_mappings = pmu_num;
1974 	if (strbuf_init(&sb, 128) < 0)
1975 		return -1;
1976 
1977 	while (pmu_num) {
1978 		if (readn(fd, &type, sizeof(type)) != sizeof(type))
1979 			goto error;
1980 		if (ph->needs_swap)
1981 			type = bswap_32(type);
1982 
1983 		name = do_read_string(fd, ph);
1984 		if (!name)
1985 			goto error;
1986 
1987 		if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1988 			goto error;
1989 		/* include a NULL character at the end */
1990 		if (strbuf_add(&sb, "", 1) < 0)
1991 			goto error;
1992 
1993 		if (!strcmp(name, "msr"))
1994 			ph->env.msr_pmu_type = type;
1995 
1996 		free(name);
1997 		pmu_num--;
1998 	}
1999 	ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2000 	return 0;
2001 
2002 error:
2003 	strbuf_release(&sb);
2004 	return -1;
2005 }
2006 
2007 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2008 			      struct perf_header *ph, int fd,
2009 			      void *data __maybe_unused)
2010 {
2011 	size_t ret = -1;
2012 	u32 i, nr, nr_groups;
2013 	struct perf_session *session;
2014 	struct perf_evsel *evsel, *leader = NULL;
2015 	struct group_desc {
2016 		char *name;
2017 		u32 leader_idx;
2018 		u32 nr_members;
2019 	} *desc;
2020 
2021 	if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2022 		return -1;
2023 
2024 	if (ph->needs_swap)
2025 		nr_groups = bswap_32(nr_groups);
2026 
2027 	ph->env.nr_groups = nr_groups;
2028 	if (!nr_groups) {
2029 		pr_debug("group desc not available\n");
2030 		return 0;
2031 	}
2032 
2033 	desc = calloc(nr_groups, sizeof(*desc));
2034 	if (!desc)
2035 		return -1;
2036 
2037 	for (i = 0; i < nr_groups; i++) {
2038 		desc[i].name = do_read_string(fd, ph);
2039 		if (!desc[i].name)
2040 			goto out_free;
2041 
2042 		if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2043 			goto out_free;
2044 
2045 		if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2046 			goto out_free;
2047 
2048 		if (ph->needs_swap) {
2049 			desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2050 			desc[i].nr_members = bswap_32(desc[i].nr_members);
2051 		}
2052 	}
2053 
2054 	/*
2055 	 * Rebuild group relationship based on the group_desc
2056 	 */
2057 	session = container_of(ph, struct perf_session, header);
2058 	session->evlist->nr_groups = nr_groups;
2059 
2060 	i = nr = 0;
2061 	evlist__for_each_entry(session->evlist, evsel) {
2062 		if (evsel->idx == (int) desc[i].leader_idx) {
2063 			evsel->leader = evsel;
2064 			/* {anon_group} is a dummy name */
2065 			if (strcmp(desc[i].name, "{anon_group}")) {
2066 				evsel->group_name = desc[i].name;
2067 				desc[i].name = NULL;
2068 			}
2069 			evsel->nr_members = desc[i].nr_members;
2070 
2071 			if (i >= nr_groups || nr > 0) {
2072 				pr_debug("invalid group desc\n");
2073 				goto out_free;
2074 			}
2075 
2076 			leader = evsel;
2077 			nr = evsel->nr_members - 1;
2078 			i++;
2079 		} else if (nr) {
2080 			/* This is a group member */
2081 			evsel->leader = leader;
2082 
2083 			nr--;
2084 		}
2085 	}
2086 
2087 	if (i != nr_groups || nr != 0) {
2088 		pr_debug("invalid group desc\n");
2089 		goto out_free;
2090 	}
2091 
2092 	ret = 0;
2093 out_free:
2094 	for (i = 0; i < nr_groups; i++)
2095 		zfree(&desc[i].name);
2096 	free(desc);
2097 
2098 	return ret;
2099 }
2100 
2101 static int process_auxtrace(struct perf_file_section *section,
2102 			    struct perf_header *ph, int fd,
2103 			    void *data __maybe_unused)
2104 {
2105 	struct perf_session *session;
2106 	int err;
2107 
2108 	session = container_of(ph, struct perf_session, header);
2109 
2110 	err = auxtrace_index__process(fd, section->size, session,
2111 				      ph->needs_swap);
2112 	if (err < 0)
2113 		pr_err("Failed to process auxtrace index\n");
2114 	return err;
2115 }
2116 
2117 static int process_cache(struct perf_file_section *section __maybe_unused,
2118 			 struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2119 			 void *data __maybe_unused)
2120 {
2121 	struct cpu_cache_level *caches;
2122 	u32 cnt, i, version;
2123 
2124 	if (readn(fd, &version, sizeof(version)) != sizeof(version))
2125 		return -1;
2126 
2127 	if (ph->needs_swap)
2128 		version = bswap_32(version);
2129 
2130 	if (version != 1)
2131 		return -1;
2132 
2133 	if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2134 		return -1;
2135 
2136 	if (ph->needs_swap)
2137 		cnt = bswap_32(cnt);
2138 
2139 	caches = zalloc(sizeof(*caches) * cnt);
2140 	if (!caches)
2141 		return -1;
2142 
2143 	for (i = 0; i < cnt; i++) {
2144 		struct cpu_cache_level c;
2145 
2146 		#define _R(v)						\
2147 			if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2148 				goto out_free_caches;			\
2149 			if (ph->needs_swap)				\
2150 				c.v = bswap_32(c.v);			\
2151 
2152 		_R(level)
2153 		_R(line_size)
2154 		_R(sets)
2155 		_R(ways)
2156 		#undef _R
2157 
2158 		#define _R(v)				\
2159 			c.v = do_read_string(fd, ph);	\
2160 			if (!c.v)			\
2161 				goto out_free_caches;
2162 
2163 		_R(type)
2164 		_R(size)
2165 		_R(map)
2166 		#undef _R
2167 
2168 		caches[i] = c;
2169 	}
2170 
2171 	ph->env.caches = caches;
2172 	ph->env.caches_cnt = cnt;
2173 	return 0;
2174 out_free_caches:
2175 	free(caches);
2176 	return -1;
2177 }
2178 
2179 struct feature_ops {
2180 	int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2181 	void (*print)(struct perf_header *h, int fd, FILE *fp);
2182 	int (*process)(struct perf_file_section *section,
2183 		       struct perf_header *h, int fd, void *data);
2184 	const char *name;
2185 	bool full_only;
2186 };
2187 
2188 #define FEAT_OPA(n, func) \
2189 	[n] = { .name = #n, .write = write_##func, .print = print_##func }
2190 #define FEAT_OPP(n, func) \
2191 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2192 		.process = process_##func }
2193 #define FEAT_OPF(n, func) \
2194 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2195 		.process = process_##func, .full_only = true }
2196 
2197 /* feature_ops not implemented: */
2198 #define print_tracing_data	NULL
2199 #define print_build_id		NULL
2200 
2201 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2202 	FEAT_OPP(HEADER_TRACING_DATA,	tracing_data),
2203 	FEAT_OPP(HEADER_BUILD_ID,	build_id),
2204 	FEAT_OPP(HEADER_HOSTNAME,	hostname),
2205 	FEAT_OPP(HEADER_OSRELEASE,	osrelease),
2206 	FEAT_OPP(HEADER_VERSION,	version),
2207 	FEAT_OPP(HEADER_ARCH,		arch),
2208 	FEAT_OPP(HEADER_NRCPUS,		nrcpus),
2209 	FEAT_OPP(HEADER_CPUDESC,	cpudesc),
2210 	FEAT_OPP(HEADER_CPUID,		cpuid),
2211 	FEAT_OPP(HEADER_TOTAL_MEM,	total_mem),
2212 	FEAT_OPP(HEADER_EVENT_DESC,	event_desc),
2213 	FEAT_OPP(HEADER_CMDLINE,	cmdline),
2214 	FEAT_OPF(HEADER_CPU_TOPOLOGY,	cpu_topology),
2215 	FEAT_OPF(HEADER_NUMA_TOPOLOGY,	numa_topology),
2216 	FEAT_OPA(HEADER_BRANCH_STACK,	branch_stack),
2217 	FEAT_OPP(HEADER_PMU_MAPPINGS,	pmu_mappings),
2218 	FEAT_OPP(HEADER_GROUP_DESC,	group_desc),
2219 	FEAT_OPP(HEADER_AUXTRACE,	auxtrace),
2220 	FEAT_OPA(HEADER_STAT,		stat),
2221 	FEAT_OPF(HEADER_CACHE,		cache),
2222 };
2223 
2224 struct header_print_data {
2225 	FILE *fp;
2226 	bool full; /* extended list of headers */
2227 };
2228 
2229 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2230 					   struct perf_header *ph,
2231 					   int feat, int fd, void *data)
2232 {
2233 	struct header_print_data *hd = data;
2234 
2235 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2236 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2237 				"%d, continuing...\n", section->offset, feat);
2238 		return 0;
2239 	}
2240 	if (feat >= HEADER_LAST_FEATURE) {
2241 		pr_warning("unknown feature %d\n", feat);
2242 		return 0;
2243 	}
2244 	if (!feat_ops[feat].print)
2245 		return 0;
2246 
2247 	if (!feat_ops[feat].full_only || hd->full)
2248 		feat_ops[feat].print(ph, fd, hd->fp);
2249 	else
2250 		fprintf(hd->fp, "# %s info available, use -I to display\n",
2251 			feat_ops[feat].name);
2252 
2253 	return 0;
2254 }
2255 
2256 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2257 {
2258 	struct header_print_data hd;
2259 	struct perf_header *header = &session->header;
2260 	int fd = perf_data_file__fd(session->file);
2261 	struct stat st;
2262 	int ret, bit;
2263 
2264 	hd.fp = fp;
2265 	hd.full = full;
2266 
2267 	ret = fstat(fd, &st);
2268 	if (ret == -1)
2269 		return -1;
2270 
2271 	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2272 
2273 	perf_header__process_sections(header, fd, &hd,
2274 				      perf_file_section__fprintf_info);
2275 
2276 	if (session->file->is_pipe)
2277 		return 0;
2278 
2279 	fprintf(fp, "# missing features: ");
2280 	for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2281 		if (bit)
2282 			fprintf(fp, "%s ", feat_ops[bit].name);
2283 	}
2284 
2285 	fprintf(fp, "\n");
2286 	return 0;
2287 }
2288 
2289 static int do_write_feat(int fd, struct perf_header *h, int type,
2290 			 struct perf_file_section **p,
2291 			 struct perf_evlist *evlist)
2292 {
2293 	int err;
2294 	int ret = 0;
2295 
2296 	if (perf_header__has_feat(h, type)) {
2297 		if (!feat_ops[type].write)
2298 			return -1;
2299 
2300 		(*p)->offset = lseek(fd, 0, SEEK_CUR);
2301 
2302 		err = feat_ops[type].write(fd, h, evlist);
2303 		if (err < 0) {
2304 			pr_debug("failed to write feature %s\n", feat_ops[type].name);
2305 
2306 			/* undo anything written */
2307 			lseek(fd, (*p)->offset, SEEK_SET);
2308 
2309 			return -1;
2310 		}
2311 		(*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2312 		(*p)++;
2313 	}
2314 	return ret;
2315 }
2316 
2317 static int perf_header__adds_write(struct perf_header *header,
2318 				   struct perf_evlist *evlist, int fd)
2319 {
2320 	int nr_sections;
2321 	struct perf_file_section *feat_sec, *p;
2322 	int sec_size;
2323 	u64 sec_start;
2324 	int feat;
2325 	int err;
2326 
2327 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2328 	if (!nr_sections)
2329 		return 0;
2330 
2331 	feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2332 	if (feat_sec == NULL)
2333 		return -ENOMEM;
2334 
2335 	sec_size = sizeof(*feat_sec) * nr_sections;
2336 
2337 	sec_start = header->feat_offset;
2338 	lseek(fd, sec_start + sec_size, SEEK_SET);
2339 
2340 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2341 		if (do_write_feat(fd, header, feat, &p, evlist))
2342 			perf_header__clear_feat(header, feat);
2343 	}
2344 
2345 	lseek(fd, sec_start, SEEK_SET);
2346 	/*
2347 	 * may write more than needed due to dropped feature, but
2348 	 * this is okay, reader will skip the mising entries
2349 	 */
2350 	err = do_write(fd, feat_sec, sec_size);
2351 	if (err < 0)
2352 		pr_debug("failed to write feature section\n");
2353 	free(feat_sec);
2354 	return err;
2355 }
2356 
2357 int perf_header__write_pipe(int fd)
2358 {
2359 	struct perf_pipe_file_header f_header;
2360 	int err;
2361 
2362 	f_header = (struct perf_pipe_file_header){
2363 		.magic	   = PERF_MAGIC,
2364 		.size	   = sizeof(f_header),
2365 	};
2366 
2367 	err = do_write(fd, &f_header, sizeof(f_header));
2368 	if (err < 0) {
2369 		pr_debug("failed to write perf pipe header\n");
2370 		return err;
2371 	}
2372 
2373 	return 0;
2374 }
2375 
2376 int perf_session__write_header(struct perf_session *session,
2377 			       struct perf_evlist *evlist,
2378 			       int fd, bool at_exit)
2379 {
2380 	struct perf_file_header f_header;
2381 	struct perf_file_attr   f_attr;
2382 	struct perf_header *header = &session->header;
2383 	struct perf_evsel *evsel;
2384 	u64 attr_offset;
2385 	int err;
2386 
2387 	lseek(fd, sizeof(f_header), SEEK_SET);
2388 
2389 	evlist__for_each_entry(session->evlist, evsel) {
2390 		evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2391 		err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2392 		if (err < 0) {
2393 			pr_debug("failed to write perf header\n");
2394 			return err;
2395 		}
2396 	}
2397 
2398 	attr_offset = lseek(fd, 0, SEEK_CUR);
2399 
2400 	evlist__for_each_entry(evlist, evsel) {
2401 		f_attr = (struct perf_file_attr){
2402 			.attr = evsel->attr,
2403 			.ids  = {
2404 				.offset = evsel->id_offset,
2405 				.size   = evsel->ids * sizeof(u64),
2406 			}
2407 		};
2408 		err = do_write(fd, &f_attr, sizeof(f_attr));
2409 		if (err < 0) {
2410 			pr_debug("failed to write perf header attribute\n");
2411 			return err;
2412 		}
2413 	}
2414 
2415 	if (!header->data_offset)
2416 		header->data_offset = lseek(fd, 0, SEEK_CUR);
2417 	header->feat_offset = header->data_offset + header->data_size;
2418 
2419 	if (at_exit) {
2420 		err = perf_header__adds_write(header, evlist, fd);
2421 		if (err < 0)
2422 			return err;
2423 	}
2424 
2425 	f_header = (struct perf_file_header){
2426 		.magic	   = PERF_MAGIC,
2427 		.size	   = sizeof(f_header),
2428 		.attr_size = sizeof(f_attr),
2429 		.attrs = {
2430 			.offset = attr_offset,
2431 			.size   = evlist->nr_entries * sizeof(f_attr),
2432 		},
2433 		.data = {
2434 			.offset = header->data_offset,
2435 			.size	= header->data_size,
2436 		},
2437 		/* event_types is ignored, store zeros */
2438 	};
2439 
2440 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2441 
2442 	lseek(fd, 0, SEEK_SET);
2443 	err = do_write(fd, &f_header, sizeof(f_header));
2444 	if (err < 0) {
2445 		pr_debug("failed to write perf header\n");
2446 		return err;
2447 	}
2448 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2449 
2450 	return 0;
2451 }
2452 
2453 static int perf_header__getbuffer64(struct perf_header *header,
2454 				    int fd, void *buf, size_t size)
2455 {
2456 	if (readn(fd, buf, size) <= 0)
2457 		return -1;
2458 
2459 	if (header->needs_swap)
2460 		mem_bswap_64(buf, size);
2461 
2462 	return 0;
2463 }
2464 
2465 int perf_header__process_sections(struct perf_header *header, int fd,
2466 				  void *data,
2467 				  int (*process)(struct perf_file_section *section,
2468 						 struct perf_header *ph,
2469 						 int feat, int fd, void *data))
2470 {
2471 	struct perf_file_section *feat_sec, *sec;
2472 	int nr_sections;
2473 	int sec_size;
2474 	int feat;
2475 	int err;
2476 
2477 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2478 	if (!nr_sections)
2479 		return 0;
2480 
2481 	feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2482 	if (!feat_sec)
2483 		return -1;
2484 
2485 	sec_size = sizeof(*feat_sec) * nr_sections;
2486 
2487 	lseek(fd, header->feat_offset, SEEK_SET);
2488 
2489 	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2490 	if (err < 0)
2491 		goto out_free;
2492 
2493 	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2494 		err = process(sec++, header, feat, fd, data);
2495 		if (err < 0)
2496 			goto out_free;
2497 	}
2498 	err = 0;
2499 out_free:
2500 	free(feat_sec);
2501 	return err;
2502 }
2503 
2504 static const int attr_file_abi_sizes[] = {
2505 	[0] = PERF_ATTR_SIZE_VER0,
2506 	[1] = PERF_ATTR_SIZE_VER1,
2507 	[2] = PERF_ATTR_SIZE_VER2,
2508 	[3] = PERF_ATTR_SIZE_VER3,
2509 	[4] = PERF_ATTR_SIZE_VER4,
2510 	0,
2511 };
2512 
2513 /*
2514  * In the legacy file format, the magic number is not used to encode endianness.
2515  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2516  * on ABI revisions, we need to try all combinations for all endianness to
2517  * detect the endianness.
2518  */
2519 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2520 {
2521 	uint64_t ref_size, attr_size;
2522 	int i;
2523 
2524 	for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2525 		ref_size = attr_file_abi_sizes[i]
2526 			 + sizeof(struct perf_file_section);
2527 		if (hdr_sz != ref_size) {
2528 			attr_size = bswap_64(hdr_sz);
2529 			if (attr_size != ref_size)
2530 				continue;
2531 
2532 			ph->needs_swap = true;
2533 		}
2534 		pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2535 			 i,
2536 			 ph->needs_swap);
2537 		return 0;
2538 	}
2539 	/* could not determine endianness */
2540 	return -1;
2541 }
2542 
2543 #define PERF_PIPE_HDR_VER0	16
2544 
2545 static const size_t attr_pipe_abi_sizes[] = {
2546 	[0] = PERF_PIPE_HDR_VER0,
2547 	0,
2548 };
2549 
2550 /*
2551  * In the legacy pipe format, there is an implicit assumption that endiannesss
2552  * between host recording the samples, and host parsing the samples is the
2553  * same. This is not always the case given that the pipe output may always be
2554  * redirected into a file and analyzed on a different machine with possibly a
2555  * different endianness and perf_event ABI revsions in the perf tool itself.
2556  */
2557 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2558 {
2559 	u64 attr_size;
2560 	int i;
2561 
2562 	for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2563 		if (hdr_sz != attr_pipe_abi_sizes[i]) {
2564 			attr_size = bswap_64(hdr_sz);
2565 			if (attr_size != hdr_sz)
2566 				continue;
2567 
2568 			ph->needs_swap = true;
2569 		}
2570 		pr_debug("Pipe ABI%d perf.data file detected\n", i);
2571 		return 0;
2572 	}
2573 	return -1;
2574 }
2575 
2576 bool is_perf_magic(u64 magic)
2577 {
2578 	if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2579 		|| magic == __perf_magic2
2580 		|| magic == __perf_magic2_sw)
2581 		return true;
2582 
2583 	return false;
2584 }
2585 
2586 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2587 			      bool is_pipe, struct perf_header *ph)
2588 {
2589 	int ret;
2590 
2591 	/* check for legacy format */
2592 	ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2593 	if (ret == 0) {
2594 		ph->version = PERF_HEADER_VERSION_1;
2595 		pr_debug("legacy perf.data format\n");
2596 		if (is_pipe)
2597 			return try_all_pipe_abis(hdr_sz, ph);
2598 
2599 		return try_all_file_abis(hdr_sz, ph);
2600 	}
2601 	/*
2602 	 * the new magic number serves two purposes:
2603 	 * - unique number to identify actual perf.data files
2604 	 * - encode endianness of file
2605 	 */
2606 	ph->version = PERF_HEADER_VERSION_2;
2607 
2608 	/* check magic number with one endianness */
2609 	if (magic == __perf_magic2)
2610 		return 0;
2611 
2612 	/* check magic number with opposite endianness */
2613 	if (magic != __perf_magic2_sw)
2614 		return -1;
2615 
2616 	ph->needs_swap = true;
2617 
2618 	return 0;
2619 }
2620 
2621 int perf_file_header__read(struct perf_file_header *header,
2622 			   struct perf_header *ph, int fd)
2623 {
2624 	ssize_t ret;
2625 
2626 	lseek(fd, 0, SEEK_SET);
2627 
2628 	ret = readn(fd, header, sizeof(*header));
2629 	if (ret <= 0)
2630 		return -1;
2631 
2632 	if (check_magic_endian(header->magic,
2633 			       header->attr_size, false, ph) < 0) {
2634 		pr_debug("magic/endian check failed\n");
2635 		return -1;
2636 	}
2637 
2638 	if (ph->needs_swap) {
2639 		mem_bswap_64(header, offsetof(struct perf_file_header,
2640 			     adds_features));
2641 	}
2642 
2643 	if (header->size != sizeof(*header)) {
2644 		/* Support the previous format */
2645 		if (header->size == offsetof(typeof(*header), adds_features))
2646 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2647 		else
2648 			return -1;
2649 	} else if (ph->needs_swap) {
2650 		/*
2651 		 * feature bitmap is declared as an array of unsigned longs --
2652 		 * not good since its size can differ between the host that
2653 		 * generated the data file and the host analyzing the file.
2654 		 *
2655 		 * We need to handle endianness, but we don't know the size of
2656 		 * the unsigned long where the file was generated. Take a best
2657 		 * guess at determining it: try 64-bit swap first (ie., file
2658 		 * created on a 64-bit host), and check if the hostname feature
2659 		 * bit is set (this feature bit is forced on as of fbe96f2).
2660 		 * If the bit is not, undo the 64-bit swap and try a 32-bit
2661 		 * swap. If the hostname bit is still not set (e.g., older data
2662 		 * file), punt and fallback to the original behavior --
2663 		 * clearing all feature bits and setting buildid.
2664 		 */
2665 		mem_bswap_64(&header->adds_features,
2666 			    BITS_TO_U64(HEADER_FEAT_BITS));
2667 
2668 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2669 			/* unswap as u64 */
2670 			mem_bswap_64(&header->adds_features,
2671 				    BITS_TO_U64(HEADER_FEAT_BITS));
2672 
2673 			/* unswap as u32 */
2674 			mem_bswap_32(&header->adds_features,
2675 				    BITS_TO_U32(HEADER_FEAT_BITS));
2676 		}
2677 
2678 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2679 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2680 			set_bit(HEADER_BUILD_ID, header->adds_features);
2681 		}
2682 	}
2683 
2684 	memcpy(&ph->adds_features, &header->adds_features,
2685 	       sizeof(ph->adds_features));
2686 
2687 	ph->data_offset  = header->data.offset;
2688 	ph->data_size	 = header->data.size;
2689 	ph->feat_offset  = header->data.offset + header->data.size;
2690 	return 0;
2691 }
2692 
2693 static int perf_file_section__process(struct perf_file_section *section,
2694 				      struct perf_header *ph,
2695 				      int feat, int fd, void *data)
2696 {
2697 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2698 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2699 			  "%d, continuing...\n", section->offset, feat);
2700 		return 0;
2701 	}
2702 
2703 	if (feat >= HEADER_LAST_FEATURE) {
2704 		pr_debug("unknown feature %d, continuing...\n", feat);
2705 		return 0;
2706 	}
2707 
2708 	if (!feat_ops[feat].process)
2709 		return 0;
2710 
2711 	return feat_ops[feat].process(section, ph, fd, data);
2712 }
2713 
2714 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2715 				       struct perf_header *ph, int fd,
2716 				       bool repipe)
2717 {
2718 	ssize_t ret;
2719 
2720 	ret = readn(fd, header, sizeof(*header));
2721 	if (ret <= 0)
2722 		return -1;
2723 
2724 	if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2725 		pr_debug("endian/magic failed\n");
2726 		return -1;
2727 	}
2728 
2729 	if (ph->needs_swap)
2730 		header->size = bswap_64(header->size);
2731 
2732 	if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2733 		return -1;
2734 
2735 	return 0;
2736 }
2737 
2738 static int perf_header__read_pipe(struct perf_session *session)
2739 {
2740 	struct perf_header *header = &session->header;
2741 	struct perf_pipe_file_header f_header;
2742 
2743 	if (perf_file_header__read_pipe(&f_header, header,
2744 					perf_data_file__fd(session->file),
2745 					session->repipe) < 0) {
2746 		pr_debug("incompatible file format\n");
2747 		return -EINVAL;
2748 	}
2749 
2750 	return 0;
2751 }
2752 
2753 static int read_attr(int fd, struct perf_header *ph,
2754 		     struct perf_file_attr *f_attr)
2755 {
2756 	struct perf_event_attr *attr = &f_attr->attr;
2757 	size_t sz, left;
2758 	size_t our_sz = sizeof(f_attr->attr);
2759 	ssize_t ret;
2760 
2761 	memset(f_attr, 0, sizeof(*f_attr));
2762 
2763 	/* read minimal guaranteed structure */
2764 	ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2765 	if (ret <= 0) {
2766 		pr_debug("cannot read %d bytes of header attr\n",
2767 			 PERF_ATTR_SIZE_VER0);
2768 		return -1;
2769 	}
2770 
2771 	/* on file perf_event_attr size */
2772 	sz = attr->size;
2773 
2774 	if (ph->needs_swap)
2775 		sz = bswap_32(sz);
2776 
2777 	if (sz == 0) {
2778 		/* assume ABI0 */
2779 		sz =  PERF_ATTR_SIZE_VER0;
2780 	} else if (sz > our_sz) {
2781 		pr_debug("file uses a more recent and unsupported ABI"
2782 			 " (%zu bytes extra)\n", sz - our_sz);
2783 		return -1;
2784 	}
2785 	/* what we have not yet read and that we know about */
2786 	left = sz - PERF_ATTR_SIZE_VER0;
2787 	if (left) {
2788 		void *ptr = attr;
2789 		ptr += PERF_ATTR_SIZE_VER0;
2790 
2791 		ret = readn(fd, ptr, left);
2792 	}
2793 	/* read perf_file_section, ids are read in caller */
2794 	ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2795 
2796 	return ret <= 0 ? -1 : 0;
2797 }
2798 
2799 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2800 						struct pevent *pevent)
2801 {
2802 	struct event_format *event;
2803 	char bf[128];
2804 
2805 	/* already prepared */
2806 	if (evsel->tp_format)
2807 		return 0;
2808 
2809 	if (pevent == NULL) {
2810 		pr_debug("broken or missing trace data\n");
2811 		return -1;
2812 	}
2813 
2814 	event = pevent_find_event(pevent, evsel->attr.config);
2815 	if (event == NULL) {
2816 		pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2817 		return -1;
2818 	}
2819 
2820 	if (!evsel->name) {
2821 		snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2822 		evsel->name = strdup(bf);
2823 		if (evsel->name == NULL)
2824 			return -1;
2825 	}
2826 
2827 	evsel->tp_format = event;
2828 	return 0;
2829 }
2830 
2831 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2832 						  struct pevent *pevent)
2833 {
2834 	struct perf_evsel *pos;
2835 
2836 	evlist__for_each_entry(evlist, pos) {
2837 		if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2838 		    perf_evsel__prepare_tracepoint_event(pos, pevent))
2839 			return -1;
2840 	}
2841 
2842 	return 0;
2843 }
2844 
2845 int perf_session__read_header(struct perf_session *session)
2846 {
2847 	struct perf_data_file *file = session->file;
2848 	struct perf_header *header = &session->header;
2849 	struct perf_file_header	f_header;
2850 	struct perf_file_attr	f_attr;
2851 	u64			f_id;
2852 	int nr_attrs, nr_ids, i, j;
2853 	int fd = perf_data_file__fd(file);
2854 
2855 	session->evlist = perf_evlist__new();
2856 	if (session->evlist == NULL)
2857 		return -ENOMEM;
2858 
2859 	session->evlist->env = &header->env;
2860 	session->machines.host.env = &header->env;
2861 	if (perf_data_file__is_pipe(file))
2862 		return perf_header__read_pipe(session);
2863 
2864 	if (perf_file_header__read(&f_header, header, fd) < 0)
2865 		return -EINVAL;
2866 
2867 	/*
2868 	 * Sanity check that perf.data was written cleanly; data size is
2869 	 * initialized to 0 and updated only if the on_exit function is run.
2870 	 * If data size is still 0 then the file contains only partial
2871 	 * information.  Just warn user and process it as much as it can.
2872 	 */
2873 	if (f_header.data.size == 0) {
2874 		pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2875 			   "Was the 'perf record' command properly terminated?\n",
2876 			   file->path);
2877 	}
2878 
2879 	nr_attrs = f_header.attrs.size / f_header.attr_size;
2880 	lseek(fd, f_header.attrs.offset, SEEK_SET);
2881 
2882 	for (i = 0; i < nr_attrs; i++) {
2883 		struct perf_evsel *evsel;
2884 		off_t tmp;
2885 
2886 		if (read_attr(fd, header, &f_attr) < 0)
2887 			goto out_errno;
2888 
2889 		if (header->needs_swap) {
2890 			f_attr.ids.size   = bswap_64(f_attr.ids.size);
2891 			f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2892 			perf_event__attr_swap(&f_attr.attr);
2893 		}
2894 
2895 		tmp = lseek(fd, 0, SEEK_CUR);
2896 		evsel = perf_evsel__new(&f_attr.attr);
2897 
2898 		if (evsel == NULL)
2899 			goto out_delete_evlist;
2900 
2901 		evsel->needs_swap = header->needs_swap;
2902 		/*
2903 		 * Do it before so that if perf_evsel__alloc_id fails, this
2904 		 * entry gets purged too at perf_evlist__delete().
2905 		 */
2906 		perf_evlist__add(session->evlist, evsel);
2907 
2908 		nr_ids = f_attr.ids.size / sizeof(u64);
2909 		/*
2910 		 * We don't have the cpu and thread maps on the header, so
2911 		 * for allocating the perf_sample_id table we fake 1 cpu and
2912 		 * hattr->ids threads.
2913 		 */
2914 		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2915 			goto out_delete_evlist;
2916 
2917 		lseek(fd, f_attr.ids.offset, SEEK_SET);
2918 
2919 		for (j = 0; j < nr_ids; j++) {
2920 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2921 				goto out_errno;
2922 
2923 			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2924 		}
2925 
2926 		lseek(fd, tmp, SEEK_SET);
2927 	}
2928 
2929 	symbol_conf.nr_events = nr_attrs;
2930 
2931 	perf_header__process_sections(header, fd, &session->tevent,
2932 				      perf_file_section__process);
2933 
2934 	if (perf_evlist__prepare_tracepoint_events(session->evlist,
2935 						   session->tevent.pevent))
2936 		goto out_delete_evlist;
2937 
2938 	return 0;
2939 out_errno:
2940 	return -errno;
2941 
2942 out_delete_evlist:
2943 	perf_evlist__delete(session->evlist);
2944 	session->evlist = NULL;
2945 	return -ENOMEM;
2946 }
2947 
2948 int perf_event__synthesize_attr(struct perf_tool *tool,
2949 				struct perf_event_attr *attr, u32 ids, u64 *id,
2950 				perf_event__handler_t process)
2951 {
2952 	union perf_event *ev;
2953 	size_t size;
2954 	int err;
2955 
2956 	size = sizeof(struct perf_event_attr);
2957 	size = PERF_ALIGN(size, sizeof(u64));
2958 	size += sizeof(struct perf_event_header);
2959 	size += ids * sizeof(u64);
2960 
2961 	ev = malloc(size);
2962 
2963 	if (ev == NULL)
2964 		return -ENOMEM;
2965 
2966 	ev->attr.attr = *attr;
2967 	memcpy(ev->attr.id, id, ids * sizeof(u64));
2968 
2969 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2970 	ev->attr.header.size = (u16)size;
2971 
2972 	if (ev->attr.header.size == size)
2973 		err = process(tool, ev, NULL, NULL);
2974 	else
2975 		err = -E2BIG;
2976 
2977 	free(ev);
2978 
2979 	return err;
2980 }
2981 
2982 static struct event_update_event *
2983 event_update_event__new(size_t size, u64 type, u64 id)
2984 {
2985 	struct event_update_event *ev;
2986 
2987 	size += sizeof(*ev);
2988 	size  = PERF_ALIGN(size, sizeof(u64));
2989 
2990 	ev = zalloc(size);
2991 	if (ev) {
2992 		ev->header.type = PERF_RECORD_EVENT_UPDATE;
2993 		ev->header.size = (u16)size;
2994 		ev->type = type;
2995 		ev->id = id;
2996 	}
2997 	return ev;
2998 }
2999 
3000 int
3001 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3002 					 struct perf_evsel *evsel,
3003 					 perf_event__handler_t process)
3004 {
3005 	struct event_update_event *ev;
3006 	size_t size = strlen(evsel->unit);
3007 	int err;
3008 
3009 	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3010 	if (ev == NULL)
3011 		return -ENOMEM;
3012 
3013 	strncpy(ev->data, evsel->unit, size);
3014 	err = process(tool, (union perf_event *)ev, NULL, NULL);
3015 	free(ev);
3016 	return err;
3017 }
3018 
3019 int
3020 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3021 					  struct perf_evsel *evsel,
3022 					  perf_event__handler_t process)
3023 {
3024 	struct event_update_event *ev;
3025 	struct event_update_event_scale *ev_data;
3026 	int err;
3027 
3028 	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3029 	if (ev == NULL)
3030 		return -ENOMEM;
3031 
3032 	ev_data = (struct event_update_event_scale *) ev->data;
3033 	ev_data->scale = evsel->scale;
3034 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3035 	free(ev);
3036 	return err;
3037 }
3038 
3039 int
3040 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3041 					 struct perf_evsel *evsel,
3042 					 perf_event__handler_t process)
3043 {
3044 	struct event_update_event *ev;
3045 	size_t len = strlen(evsel->name);
3046 	int err;
3047 
3048 	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3049 	if (ev == NULL)
3050 		return -ENOMEM;
3051 
3052 	strncpy(ev->data, evsel->name, len);
3053 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3054 	free(ev);
3055 	return err;
3056 }
3057 
3058 int
3059 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3060 					struct perf_evsel *evsel,
3061 					perf_event__handler_t process)
3062 {
3063 	size_t size = sizeof(struct event_update_event);
3064 	struct event_update_event *ev;
3065 	int max, err;
3066 	u16 type;
3067 
3068 	if (!evsel->own_cpus)
3069 		return 0;
3070 
3071 	ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3072 	if (!ev)
3073 		return -ENOMEM;
3074 
3075 	ev->header.type = PERF_RECORD_EVENT_UPDATE;
3076 	ev->header.size = (u16)size;
3077 	ev->type = PERF_EVENT_UPDATE__CPUS;
3078 	ev->id   = evsel->id[0];
3079 
3080 	cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3081 				 evsel->own_cpus,
3082 				 type, max);
3083 
3084 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3085 	free(ev);
3086 	return err;
3087 }
3088 
3089 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3090 {
3091 	struct event_update_event *ev = &event->event_update;
3092 	struct event_update_event_scale *ev_scale;
3093 	struct event_update_event_cpus *ev_cpus;
3094 	struct cpu_map *map;
3095 	size_t ret;
3096 
3097 	ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3098 
3099 	switch (ev->type) {
3100 	case PERF_EVENT_UPDATE__SCALE:
3101 		ev_scale = (struct event_update_event_scale *) ev->data;
3102 		ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3103 		break;
3104 	case PERF_EVENT_UPDATE__UNIT:
3105 		ret += fprintf(fp, "... unit:  %s\n", ev->data);
3106 		break;
3107 	case PERF_EVENT_UPDATE__NAME:
3108 		ret += fprintf(fp, "... name:  %s\n", ev->data);
3109 		break;
3110 	case PERF_EVENT_UPDATE__CPUS:
3111 		ev_cpus = (struct event_update_event_cpus *) ev->data;
3112 		ret += fprintf(fp, "... ");
3113 
3114 		map = cpu_map__new_data(&ev_cpus->cpus);
3115 		if (map)
3116 			ret += cpu_map__fprintf(map, fp);
3117 		else
3118 			ret += fprintf(fp, "failed to get cpus\n");
3119 		break;
3120 	default:
3121 		ret += fprintf(fp, "... unknown type\n");
3122 		break;
3123 	}
3124 
3125 	return ret;
3126 }
3127 
3128 int perf_event__synthesize_attrs(struct perf_tool *tool,
3129 				   struct perf_session *session,
3130 				   perf_event__handler_t process)
3131 {
3132 	struct perf_evsel *evsel;
3133 	int err = 0;
3134 
3135 	evlist__for_each_entry(session->evlist, evsel) {
3136 		err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3137 						  evsel->id, process);
3138 		if (err) {
3139 			pr_debug("failed to create perf header attribute\n");
3140 			return err;
3141 		}
3142 	}
3143 
3144 	return err;
3145 }
3146 
3147 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3148 			     union perf_event *event,
3149 			     struct perf_evlist **pevlist)
3150 {
3151 	u32 i, ids, n_ids;
3152 	struct perf_evsel *evsel;
3153 	struct perf_evlist *evlist = *pevlist;
3154 
3155 	if (evlist == NULL) {
3156 		*pevlist = evlist = perf_evlist__new();
3157 		if (evlist == NULL)
3158 			return -ENOMEM;
3159 	}
3160 
3161 	evsel = perf_evsel__new(&event->attr.attr);
3162 	if (evsel == NULL)
3163 		return -ENOMEM;
3164 
3165 	perf_evlist__add(evlist, evsel);
3166 
3167 	ids = event->header.size;
3168 	ids -= (void *)&event->attr.id - (void *)event;
3169 	n_ids = ids / sizeof(u64);
3170 	/*
3171 	 * We don't have the cpu and thread maps on the header, so
3172 	 * for allocating the perf_sample_id table we fake 1 cpu and
3173 	 * hattr->ids threads.
3174 	 */
3175 	if (perf_evsel__alloc_id(evsel, 1, n_ids))
3176 		return -ENOMEM;
3177 
3178 	for (i = 0; i < n_ids; i++) {
3179 		perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3180 	}
3181 
3182 	symbol_conf.nr_events = evlist->nr_entries;
3183 
3184 	return 0;
3185 }
3186 
3187 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3188 				     union perf_event *event,
3189 				     struct perf_evlist **pevlist)
3190 {
3191 	struct event_update_event *ev = &event->event_update;
3192 	struct event_update_event_scale *ev_scale;
3193 	struct event_update_event_cpus *ev_cpus;
3194 	struct perf_evlist *evlist;
3195 	struct perf_evsel *evsel;
3196 	struct cpu_map *map;
3197 
3198 	if (!pevlist || *pevlist == NULL)
3199 		return -EINVAL;
3200 
3201 	evlist = *pevlist;
3202 
3203 	evsel = perf_evlist__id2evsel(evlist, ev->id);
3204 	if (evsel == NULL)
3205 		return -EINVAL;
3206 
3207 	switch (ev->type) {
3208 	case PERF_EVENT_UPDATE__UNIT:
3209 		evsel->unit = strdup(ev->data);
3210 		break;
3211 	case PERF_EVENT_UPDATE__NAME:
3212 		evsel->name = strdup(ev->data);
3213 		break;
3214 	case PERF_EVENT_UPDATE__SCALE:
3215 		ev_scale = (struct event_update_event_scale *) ev->data;
3216 		evsel->scale = ev_scale->scale;
3217 		break;
3218 	case PERF_EVENT_UPDATE__CPUS:
3219 		ev_cpus = (struct event_update_event_cpus *) ev->data;
3220 
3221 		map = cpu_map__new_data(&ev_cpus->cpus);
3222 		if (map)
3223 			evsel->own_cpus = map;
3224 		else
3225 			pr_err("failed to get event_update cpus\n");
3226 	default:
3227 		break;
3228 	}
3229 
3230 	return 0;
3231 }
3232 
3233 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3234 					struct perf_evlist *evlist,
3235 					perf_event__handler_t process)
3236 {
3237 	union perf_event ev;
3238 	struct tracing_data *tdata;
3239 	ssize_t size = 0, aligned_size = 0, padding;
3240 	int err __maybe_unused = 0;
3241 
3242 	/*
3243 	 * We are going to store the size of the data followed
3244 	 * by the data contents. Since the fd descriptor is a pipe,
3245 	 * we cannot seek back to store the size of the data once
3246 	 * we know it. Instead we:
3247 	 *
3248 	 * - write the tracing data to the temp file
3249 	 * - get/write the data size to pipe
3250 	 * - write the tracing data from the temp file
3251 	 *   to the pipe
3252 	 */
3253 	tdata = tracing_data_get(&evlist->entries, fd, true);
3254 	if (!tdata)
3255 		return -1;
3256 
3257 	memset(&ev, 0, sizeof(ev));
3258 
3259 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3260 	size = tdata->size;
3261 	aligned_size = PERF_ALIGN(size, sizeof(u64));
3262 	padding = aligned_size - size;
3263 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
3264 	ev.tracing_data.size = aligned_size;
3265 
3266 	process(tool, &ev, NULL, NULL);
3267 
3268 	/*
3269 	 * The put function will copy all the tracing data
3270 	 * stored in temp file to the pipe.
3271 	 */
3272 	tracing_data_put(tdata);
3273 
3274 	write_padded(fd, NULL, 0, padding);
3275 
3276 	return aligned_size;
3277 }
3278 
3279 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3280 				     union perf_event *event,
3281 				     struct perf_session *session)
3282 {
3283 	ssize_t size_read, padding, size = event->tracing_data.size;
3284 	int fd = perf_data_file__fd(session->file);
3285 	off_t offset = lseek(fd, 0, SEEK_CUR);
3286 	char buf[BUFSIZ];
3287 
3288 	/* setup for reading amidst mmap */
3289 	lseek(fd, offset + sizeof(struct tracing_data_event),
3290 	      SEEK_SET);
3291 
3292 	size_read = trace_report(fd, &session->tevent,
3293 				 session->repipe);
3294 	padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3295 
3296 	if (readn(fd, buf, padding) < 0) {
3297 		pr_err("%s: reading input file", __func__);
3298 		return -1;
3299 	}
3300 	if (session->repipe) {
3301 		int retw = write(STDOUT_FILENO, buf, padding);
3302 		if (retw <= 0 || retw != padding) {
3303 			pr_err("%s: repiping tracing data padding", __func__);
3304 			return -1;
3305 		}
3306 	}
3307 
3308 	if (size_read + padding != size) {
3309 		pr_err("%s: tracing data size mismatch", __func__);
3310 		return -1;
3311 	}
3312 
3313 	perf_evlist__prepare_tracepoint_events(session->evlist,
3314 					       session->tevent.pevent);
3315 
3316 	return size_read + padding;
3317 }
3318 
3319 int perf_event__synthesize_build_id(struct perf_tool *tool,
3320 				    struct dso *pos, u16 misc,
3321 				    perf_event__handler_t process,
3322 				    struct machine *machine)
3323 {
3324 	union perf_event ev;
3325 	size_t len;
3326 	int err = 0;
3327 
3328 	if (!pos->hit)
3329 		return err;
3330 
3331 	memset(&ev, 0, sizeof(ev));
3332 
3333 	len = pos->long_name_len + 1;
3334 	len = PERF_ALIGN(len, NAME_ALIGN);
3335 	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3336 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3337 	ev.build_id.header.misc = misc;
3338 	ev.build_id.pid = machine->pid;
3339 	ev.build_id.header.size = sizeof(ev.build_id) + len;
3340 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3341 
3342 	err = process(tool, &ev, NULL, machine);
3343 
3344 	return err;
3345 }
3346 
3347 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3348 				 union perf_event *event,
3349 				 struct perf_session *session)
3350 {
3351 	__event_process_build_id(&event->build_id,
3352 				 event->build_id.filename,
3353 				 session);
3354 	return 0;
3355 }
3356