xref: /linux/tools/perf/util/header.c (revision 564f7dfde24a405d877168f150ae5d29d3ad99c7)
1 #include <errno.h>
2 #include <inttypes.h>
3 #include "util.h"
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <linux/list.h>
12 #include <linux/kernel.h>
13 #include <linux/bitops.h>
14 #include <sys/stat.h>
15 #include <sys/types.h>
16 #include <sys/utsname.h>
17 #include <unistd.h>
18 
19 #include "evlist.h"
20 #include "evsel.h"
21 #include "header.h"
22 #include "../perf.h"
23 #include "trace-event.h"
24 #include "session.h"
25 #include "symbol.h"
26 #include "debug.h"
27 #include "cpumap.h"
28 #include "pmu.h"
29 #include "vdso.h"
30 #include "strbuf.h"
31 #include "build-id.h"
32 #include "data.h"
33 #include <api/fs/fs.h>
34 #include "asm/bug.h"
35 
36 #include "sane_ctype.h"
37 
38 /*
39  * magic2 = "PERFILE2"
40  * must be a numerical value to let the endianness
41  * determine the memory layout. That way we are able
42  * to detect endianness when reading the perf.data file
43  * back.
44  *
45  * we check for legacy (PERFFILE) format.
46  */
47 static const char *__perf_magic1 = "PERFFILE";
48 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
49 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
50 
51 #define PERF_MAGIC	__perf_magic2
52 
53 const char perf_version_string[] = PERF_VERSION;
54 
55 struct perf_file_attr {
56 	struct perf_event_attr	attr;
57 	struct perf_file_section	ids;
58 };
59 
60 void perf_header__set_feat(struct perf_header *header, int feat)
61 {
62 	set_bit(feat, header->adds_features);
63 }
64 
65 void perf_header__clear_feat(struct perf_header *header, int feat)
66 {
67 	clear_bit(feat, header->adds_features);
68 }
69 
70 bool perf_header__has_feat(const struct perf_header *header, int feat)
71 {
72 	return test_bit(feat, header->adds_features);
73 }
74 
75 static int do_write(int fd, const void *buf, size_t size)
76 {
77 	while (size) {
78 		int ret = write(fd, buf, size);
79 
80 		if (ret < 0)
81 			return -errno;
82 
83 		size -= ret;
84 		buf += ret;
85 	}
86 
87 	return 0;
88 }
89 
90 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned)
91 {
92 	static const char zero_buf[NAME_ALIGN];
93 	int err = do_write(fd, bf, count);
94 
95 	if (!err)
96 		err = do_write(fd, zero_buf, count_aligned - count);
97 
98 	return err;
99 }
100 
101 #define string_size(str)						\
102 	(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
103 
104 static int do_write_string(int fd, const char *str)
105 {
106 	u32 len, olen;
107 	int ret;
108 
109 	olen = strlen(str) + 1;
110 	len = PERF_ALIGN(olen, NAME_ALIGN);
111 
112 	/* write len, incl. \0 */
113 	ret = do_write(fd, &len, sizeof(len));
114 	if (ret < 0)
115 		return ret;
116 
117 	return write_padded(fd, str, olen, len);
118 }
119 
120 static char *do_read_string(int fd, struct perf_header *ph)
121 {
122 	ssize_t sz, ret;
123 	u32 len;
124 	char *buf;
125 
126 	sz = readn(fd, &len, sizeof(len));
127 	if (sz < (ssize_t)sizeof(len))
128 		return NULL;
129 
130 	if (ph->needs_swap)
131 		len = bswap_32(len);
132 
133 	buf = malloc(len);
134 	if (!buf)
135 		return NULL;
136 
137 	ret = readn(fd, buf, len);
138 	if (ret == (ssize_t)len) {
139 		/*
140 		 * strings are padded by zeroes
141 		 * thus the actual strlen of buf
142 		 * may be less than len
143 		 */
144 		return buf;
145 	}
146 
147 	free(buf);
148 	return NULL;
149 }
150 
151 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused,
152 			    struct perf_evlist *evlist)
153 {
154 	return read_tracing_data(fd, &evlist->entries);
155 }
156 
157 
158 static int write_build_id(int fd, struct perf_header *h,
159 			  struct perf_evlist *evlist __maybe_unused)
160 {
161 	struct perf_session *session;
162 	int err;
163 
164 	session = container_of(h, struct perf_session, header);
165 
166 	if (!perf_session__read_build_ids(session, true))
167 		return -1;
168 
169 	err = perf_session__write_buildid_table(session, fd);
170 	if (err < 0) {
171 		pr_debug("failed to write buildid table\n");
172 		return err;
173 	}
174 	perf_session__cache_build_ids(session);
175 
176 	return 0;
177 }
178 
179 static int write_hostname(int fd, struct perf_header *h __maybe_unused,
180 			  struct perf_evlist *evlist __maybe_unused)
181 {
182 	struct utsname uts;
183 	int ret;
184 
185 	ret = uname(&uts);
186 	if (ret < 0)
187 		return -1;
188 
189 	return do_write_string(fd, uts.nodename);
190 }
191 
192 static int write_osrelease(int fd, struct perf_header *h __maybe_unused,
193 			   struct perf_evlist *evlist __maybe_unused)
194 {
195 	struct utsname uts;
196 	int ret;
197 
198 	ret = uname(&uts);
199 	if (ret < 0)
200 		return -1;
201 
202 	return do_write_string(fd, uts.release);
203 }
204 
205 static int write_arch(int fd, struct perf_header *h __maybe_unused,
206 		      struct perf_evlist *evlist __maybe_unused)
207 {
208 	struct utsname uts;
209 	int ret;
210 
211 	ret = uname(&uts);
212 	if (ret < 0)
213 		return -1;
214 
215 	return do_write_string(fd, uts.machine);
216 }
217 
218 static int write_version(int fd, struct perf_header *h __maybe_unused,
219 			 struct perf_evlist *evlist __maybe_unused)
220 {
221 	return do_write_string(fd, perf_version_string);
222 }
223 
224 static int __write_cpudesc(int fd, const char *cpuinfo_proc)
225 {
226 	FILE *file;
227 	char *buf = NULL;
228 	char *s, *p;
229 	const char *search = cpuinfo_proc;
230 	size_t len = 0;
231 	int ret = -1;
232 
233 	if (!search)
234 		return -1;
235 
236 	file = fopen("/proc/cpuinfo", "r");
237 	if (!file)
238 		return -1;
239 
240 	while (getline(&buf, &len, file) > 0) {
241 		ret = strncmp(buf, search, strlen(search));
242 		if (!ret)
243 			break;
244 	}
245 
246 	if (ret) {
247 		ret = -1;
248 		goto done;
249 	}
250 
251 	s = buf;
252 
253 	p = strchr(buf, ':');
254 	if (p && *(p+1) == ' ' && *(p+2))
255 		s = p + 2;
256 	p = strchr(s, '\n');
257 	if (p)
258 		*p = '\0';
259 
260 	/* squash extra space characters (branding string) */
261 	p = s;
262 	while (*p) {
263 		if (isspace(*p)) {
264 			char *r = p + 1;
265 			char *q = r;
266 			*p = ' ';
267 			while (*q && isspace(*q))
268 				q++;
269 			if (q != (p+1))
270 				while ((*r++ = *q++));
271 		}
272 		p++;
273 	}
274 	ret = do_write_string(fd, s);
275 done:
276 	free(buf);
277 	fclose(file);
278 	return ret;
279 }
280 
281 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
282 		       struct perf_evlist *evlist __maybe_unused)
283 {
284 #ifndef CPUINFO_PROC
285 #define CPUINFO_PROC {"model name", }
286 #endif
287 	const char *cpuinfo_procs[] = CPUINFO_PROC;
288 	unsigned int i;
289 
290 	for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
291 		int ret;
292 		ret = __write_cpudesc(fd, cpuinfo_procs[i]);
293 		if (ret >= 0)
294 			return ret;
295 	}
296 	return -1;
297 }
298 
299 
300 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
301 			struct perf_evlist *evlist __maybe_unused)
302 {
303 	long nr;
304 	u32 nrc, nra;
305 	int ret;
306 
307 	nrc = cpu__max_present_cpu();
308 
309 	nr = sysconf(_SC_NPROCESSORS_ONLN);
310 	if (nr < 0)
311 		return -1;
312 
313 	nra = (u32)(nr & UINT_MAX);
314 
315 	ret = do_write(fd, &nrc, sizeof(nrc));
316 	if (ret < 0)
317 		return ret;
318 
319 	return do_write(fd, &nra, sizeof(nra));
320 }
321 
322 static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
323 			    struct perf_evlist *evlist)
324 {
325 	struct perf_evsel *evsel;
326 	u32 nre, nri, sz;
327 	int ret;
328 
329 	nre = evlist->nr_entries;
330 
331 	/*
332 	 * write number of events
333 	 */
334 	ret = do_write(fd, &nre, sizeof(nre));
335 	if (ret < 0)
336 		return ret;
337 
338 	/*
339 	 * size of perf_event_attr struct
340 	 */
341 	sz = (u32)sizeof(evsel->attr);
342 	ret = do_write(fd, &sz, sizeof(sz));
343 	if (ret < 0)
344 		return ret;
345 
346 	evlist__for_each_entry(evlist, evsel) {
347 		ret = do_write(fd, &evsel->attr, sz);
348 		if (ret < 0)
349 			return ret;
350 		/*
351 		 * write number of unique id per event
352 		 * there is one id per instance of an event
353 		 *
354 		 * copy into an nri to be independent of the
355 		 * type of ids,
356 		 */
357 		nri = evsel->ids;
358 		ret = do_write(fd, &nri, sizeof(nri));
359 		if (ret < 0)
360 			return ret;
361 
362 		/*
363 		 * write event string as passed on cmdline
364 		 */
365 		ret = do_write_string(fd, perf_evsel__name(evsel));
366 		if (ret < 0)
367 			return ret;
368 		/*
369 		 * write unique ids for this event
370 		 */
371 		ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
372 		if (ret < 0)
373 			return ret;
374 	}
375 	return 0;
376 }
377 
378 static int write_cmdline(int fd, struct perf_header *h __maybe_unused,
379 			 struct perf_evlist *evlist __maybe_unused)
380 {
381 	char buf[MAXPATHLEN];
382 	u32 n;
383 	int i, ret;
384 
385 	/* actual path to perf binary */
386 	ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1);
387 	if (ret <= 0)
388 		return -1;
389 
390 	/* readlink() does not add null termination */
391 	buf[ret] = '\0';
392 
393 	/* account for binary path */
394 	n = perf_env.nr_cmdline + 1;
395 
396 	ret = do_write(fd, &n, sizeof(n));
397 	if (ret < 0)
398 		return ret;
399 
400 	ret = do_write_string(fd, buf);
401 	if (ret < 0)
402 		return ret;
403 
404 	for (i = 0 ; i < perf_env.nr_cmdline; i++) {
405 		ret = do_write_string(fd, perf_env.cmdline_argv[i]);
406 		if (ret < 0)
407 			return ret;
408 	}
409 	return 0;
410 }
411 
412 #define CORE_SIB_FMT \
413 	"/sys/devices/system/cpu/cpu%d/topology/core_siblings_list"
414 #define THRD_SIB_FMT \
415 	"/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list"
416 
417 struct cpu_topo {
418 	u32 cpu_nr;
419 	u32 core_sib;
420 	u32 thread_sib;
421 	char **core_siblings;
422 	char **thread_siblings;
423 };
424 
425 static int build_cpu_topo(struct cpu_topo *tp, int cpu)
426 {
427 	FILE *fp;
428 	char filename[MAXPATHLEN];
429 	char *buf = NULL, *p;
430 	size_t len = 0;
431 	ssize_t sret;
432 	u32 i = 0;
433 	int ret = -1;
434 
435 	sprintf(filename, CORE_SIB_FMT, cpu);
436 	fp = fopen(filename, "r");
437 	if (!fp)
438 		goto try_threads;
439 
440 	sret = getline(&buf, &len, fp);
441 	fclose(fp);
442 	if (sret <= 0)
443 		goto try_threads;
444 
445 	p = strchr(buf, '\n');
446 	if (p)
447 		*p = '\0';
448 
449 	for (i = 0; i < tp->core_sib; i++) {
450 		if (!strcmp(buf, tp->core_siblings[i]))
451 			break;
452 	}
453 	if (i == tp->core_sib) {
454 		tp->core_siblings[i] = buf;
455 		tp->core_sib++;
456 		buf = NULL;
457 		len = 0;
458 	}
459 	ret = 0;
460 
461 try_threads:
462 	sprintf(filename, THRD_SIB_FMT, cpu);
463 	fp = fopen(filename, "r");
464 	if (!fp)
465 		goto done;
466 
467 	if (getline(&buf, &len, fp) <= 0)
468 		goto done;
469 
470 	p = strchr(buf, '\n');
471 	if (p)
472 		*p = '\0';
473 
474 	for (i = 0; i < tp->thread_sib; i++) {
475 		if (!strcmp(buf, tp->thread_siblings[i]))
476 			break;
477 	}
478 	if (i == tp->thread_sib) {
479 		tp->thread_siblings[i] = buf;
480 		tp->thread_sib++;
481 		buf = NULL;
482 	}
483 	ret = 0;
484 done:
485 	if(fp)
486 		fclose(fp);
487 	free(buf);
488 	return ret;
489 }
490 
491 static void free_cpu_topo(struct cpu_topo *tp)
492 {
493 	u32 i;
494 
495 	if (!tp)
496 		return;
497 
498 	for (i = 0 ; i < tp->core_sib; i++)
499 		zfree(&tp->core_siblings[i]);
500 
501 	for (i = 0 ; i < tp->thread_sib; i++)
502 		zfree(&tp->thread_siblings[i]);
503 
504 	free(tp);
505 }
506 
507 static struct cpu_topo *build_cpu_topology(void)
508 {
509 	struct cpu_topo *tp = NULL;
510 	void *addr;
511 	u32 nr, i;
512 	size_t sz;
513 	long ncpus;
514 	int ret = -1;
515 	struct cpu_map *map;
516 
517 	ncpus = cpu__max_present_cpu();
518 
519 	/* build online CPU map */
520 	map = cpu_map__new(NULL);
521 	if (map == NULL) {
522 		pr_debug("failed to get system cpumap\n");
523 		return NULL;
524 	}
525 
526 	nr = (u32)(ncpus & UINT_MAX);
527 
528 	sz = nr * sizeof(char *);
529 	addr = calloc(1, sizeof(*tp) + 2 * sz);
530 	if (!addr)
531 		goto out_free;
532 
533 	tp = addr;
534 	tp->cpu_nr = nr;
535 	addr += sizeof(*tp);
536 	tp->core_siblings = addr;
537 	addr += sz;
538 	tp->thread_siblings = addr;
539 
540 	for (i = 0; i < nr; i++) {
541 		if (!cpu_map__has(map, i))
542 			continue;
543 
544 		ret = build_cpu_topo(tp, i);
545 		if (ret < 0)
546 			break;
547 	}
548 
549 out_free:
550 	cpu_map__put(map);
551 	if (ret) {
552 		free_cpu_topo(tp);
553 		tp = NULL;
554 	}
555 	return tp;
556 }
557 
558 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused,
559 			  struct perf_evlist *evlist __maybe_unused)
560 {
561 	struct cpu_topo *tp;
562 	u32 i;
563 	int ret, j;
564 
565 	tp = build_cpu_topology();
566 	if (!tp)
567 		return -1;
568 
569 	ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib));
570 	if (ret < 0)
571 		goto done;
572 
573 	for (i = 0; i < tp->core_sib; i++) {
574 		ret = do_write_string(fd, tp->core_siblings[i]);
575 		if (ret < 0)
576 			goto done;
577 	}
578 	ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib));
579 	if (ret < 0)
580 		goto done;
581 
582 	for (i = 0; i < tp->thread_sib; i++) {
583 		ret = do_write_string(fd, tp->thread_siblings[i]);
584 		if (ret < 0)
585 			break;
586 	}
587 
588 	ret = perf_env__read_cpu_topology_map(&perf_env);
589 	if (ret < 0)
590 		goto done;
591 
592 	for (j = 0; j < perf_env.nr_cpus_avail; j++) {
593 		ret = do_write(fd, &perf_env.cpu[j].core_id,
594 			       sizeof(perf_env.cpu[j].core_id));
595 		if (ret < 0)
596 			return ret;
597 		ret = do_write(fd, &perf_env.cpu[j].socket_id,
598 			       sizeof(perf_env.cpu[j].socket_id));
599 		if (ret < 0)
600 			return ret;
601 	}
602 done:
603 	free_cpu_topo(tp);
604 	return ret;
605 }
606 
607 
608 
609 static int write_total_mem(int fd, struct perf_header *h __maybe_unused,
610 			  struct perf_evlist *evlist __maybe_unused)
611 {
612 	char *buf = NULL;
613 	FILE *fp;
614 	size_t len = 0;
615 	int ret = -1, n;
616 	uint64_t mem;
617 
618 	fp = fopen("/proc/meminfo", "r");
619 	if (!fp)
620 		return -1;
621 
622 	while (getline(&buf, &len, fp) > 0) {
623 		ret = strncmp(buf, "MemTotal:", 9);
624 		if (!ret)
625 			break;
626 	}
627 	if (!ret) {
628 		n = sscanf(buf, "%*s %"PRIu64, &mem);
629 		if (n == 1)
630 			ret = do_write(fd, &mem, sizeof(mem));
631 	} else
632 		ret = -1;
633 	free(buf);
634 	fclose(fp);
635 	return ret;
636 }
637 
638 static int write_topo_node(int fd, int node)
639 {
640 	char str[MAXPATHLEN];
641 	char field[32];
642 	char *buf = NULL, *p;
643 	size_t len = 0;
644 	FILE *fp;
645 	u64 mem_total, mem_free, mem;
646 	int ret = -1;
647 
648 	sprintf(str, "/sys/devices/system/node/node%d/meminfo", node);
649 	fp = fopen(str, "r");
650 	if (!fp)
651 		return -1;
652 
653 	while (getline(&buf, &len, fp) > 0) {
654 		/* skip over invalid lines */
655 		if (!strchr(buf, ':'))
656 			continue;
657 		if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2)
658 			goto done;
659 		if (!strcmp(field, "MemTotal:"))
660 			mem_total = mem;
661 		if (!strcmp(field, "MemFree:"))
662 			mem_free = mem;
663 	}
664 
665 	fclose(fp);
666 	fp = NULL;
667 
668 	ret = do_write(fd, &mem_total, sizeof(u64));
669 	if (ret)
670 		goto done;
671 
672 	ret = do_write(fd, &mem_free, sizeof(u64));
673 	if (ret)
674 		goto done;
675 
676 	ret = -1;
677 	sprintf(str, "/sys/devices/system/node/node%d/cpulist", node);
678 
679 	fp = fopen(str, "r");
680 	if (!fp)
681 		goto done;
682 
683 	if (getline(&buf, &len, fp) <= 0)
684 		goto done;
685 
686 	p = strchr(buf, '\n');
687 	if (p)
688 		*p = '\0';
689 
690 	ret = do_write_string(fd, buf);
691 done:
692 	free(buf);
693 	if (fp)
694 		fclose(fp);
695 	return ret;
696 }
697 
698 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused,
699 			  struct perf_evlist *evlist __maybe_unused)
700 {
701 	char *buf = NULL;
702 	size_t len = 0;
703 	FILE *fp;
704 	struct cpu_map *node_map = NULL;
705 	char *c;
706 	u32 nr, i, j;
707 	int ret = -1;
708 
709 	fp = fopen("/sys/devices/system/node/online", "r");
710 	if (!fp)
711 		return -1;
712 
713 	if (getline(&buf, &len, fp) <= 0)
714 		goto done;
715 
716 	c = strchr(buf, '\n');
717 	if (c)
718 		*c = '\0';
719 
720 	node_map = cpu_map__new(buf);
721 	if (!node_map)
722 		goto done;
723 
724 	nr = (u32)node_map->nr;
725 
726 	ret = do_write(fd, &nr, sizeof(nr));
727 	if (ret < 0)
728 		goto done;
729 
730 	for (i = 0; i < nr; i++) {
731 		j = (u32)node_map->map[i];
732 		ret = do_write(fd, &j, sizeof(j));
733 		if (ret < 0)
734 			break;
735 
736 		ret = write_topo_node(fd, i);
737 		if (ret < 0)
738 			break;
739 	}
740 done:
741 	free(buf);
742 	fclose(fp);
743 	cpu_map__put(node_map);
744 	return ret;
745 }
746 
747 /*
748  * File format:
749  *
750  * struct pmu_mappings {
751  *	u32	pmu_num;
752  *	struct pmu_map {
753  *		u32	type;
754  *		char	name[];
755  *	}[pmu_num];
756  * };
757  */
758 
759 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused,
760 			      struct perf_evlist *evlist __maybe_unused)
761 {
762 	struct perf_pmu *pmu = NULL;
763 	off_t offset = lseek(fd, 0, SEEK_CUR);
764 	__u32 pmu_num = 0;
765 	int ret;
766 
767 	/* write real pmu_num later */
768 	ret = do_write(fd, &pmu_num, sizeof(pmu_num));
769 	if (ret < 0)
770 		return ret;
771 
772 	while ((pmu = perf_pmu__scan(pmu))) {
773 		if (!pmu->name)
774 			continue;
775 		pmu_num++;
776 
777 		ret = do_write(fd, &pmu->type, sizeof(pmu->type));
778 		if (ret < 0)
779 			return ret;
780 
781 		ret = do_write_string(fd, pmu->name);
782 		if (ret < 0)
783 			return ret;
784 	}
785 
786 	if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) {
787 		/* discard all */
788 		lseek(fd, offset, SEEK_SET);
789 		return -1;
790 	}
791 
792 	return 0;
793 }
794 
795 /*
796  * File format:
797  *
798  * struct group_descs {
799  *	u32	nr_groups;
800  *	struct group_desc {
801  *		char	name[];
802  *		u32	leader_idx;
803  *		u32	nr_members;
804  *	}[nr_groups];
805  * };
806  */
807 static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
808 			    struct perf_evlist *evlist)
809 {
810 	u32 nr_groups = evlist->nr_groups;
811 	struct perf_evsel *evsel;
812 	int ret;
813 
814 	ret = do_write(fd, &nr_groups, sizeof(nr_groups));
815 	if (ret < 0)
816 		return ret;
817 
818 	evlist__for_each_entry(evlist, evsel) {
819 		if (perf_evsel__is_group_leader(evsel) &&
820 		    evsel->nr_members > 1) {
821 			const char *name = evsel->group_name ?: "{anon_group}";
822 			u32 leader_idx = evsel->idx;
823 			u32 nr_members = evsel->nr_members;
824 
825 			ret = do_write_string(fd, name);
826 			if (ret < 0)
827 				return ret;
828 
829 			ret = do_write(fd, &leader_idx, sizeof(leader_idx));
830 			if (ret < 0)
831 				return ret;
832 
833 			ret = do_write(fd, &nr_members, sizeof(nr_members));
834 			if (ret < 0)
835 				return ret;
836 		}
837 	}
838 	return 0;
839 }
840 
841 /*
842  * default get_cpuid(): nothing gets recorded
843  * actual implementation must be in arch/$(ARCH)/util/header.c
844  */
845 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused)
846 {
847 	return -1;
848 }
849 
850 static int write_cpuid(int fd, struct perf_header *h __maybe_unused,
851 		       struct perf_evlist *evlist __maybe_unused)
852 {
853 	char buffer[64];
854 	int ret;
855 
856 	ret = get_cpuid(buffer, sizeof(buffer));
857 	if (!ret)
858 		goto write_it;
859 
860 	return -1;
861 write_it:
862 	return do_write_string(fd, buffer);
863 }
864 
865 static int write_branch_stack(int fd __maybe_unused,
866 			      struct perf_header *h __maybe_unused,
867 		       struct perf_evlist *evlist __maybe_unused)
868 {
869 	return 0;
870 }
871 
872 static int write_auxtrace(int fd, struct perf_header *h,
873 			  struct perf_evlist *evlist __maybe_unused)
874 {
875 	struct perf_session *session;
876 	int err;
877 
878 	session = container_of(h, struct perf_session, header);
879 
880 	err = auxtrace_index__write(fd, &session->auxtrace_index);
881 	if (err < 0)
882 		pr_err("Failed to write auxtrace index\n");
883 	return err;
884 }
885 
886 static int cpu_cache_level__sort(const void *a, const void *b)
887 {
888 	struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
889 	struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
890 
891 	return cache_a->level - cache_b->level;
892 }
893 
894 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
895 {
896 	if (a->level != b->level)
897 		return false;
898 
899 	if (a->line_size != b->line_size)
900 		return false;
901 
902 	if (a->sets != b->sets)
903 		return false;
904 
905 	if (a->ways != b->ways)
906 		return false;
907 
908 	if (strcmp(a->type, b->type))
909 		return false;
910 
911 	if (strcmp(a->size, b->size))
912 		return false;
913 
914 	if (strcmp(a->map, b->map))
915 		return false;
916 
917 	return true;
918 }
919 
920 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
921 {
922 	char path[PATH_MAX], file[PATH_MAX];
923 	struct stat st;
924 	size_t len;
925 
926 	scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
927 	scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
928 
929 	if (stat(file, &st))
930 		return 1;
931 
932 	scnprintf(file, PATH_MAX, "%s/level", path);
933 	if (sysfs__read_int(file, (int *) &cache->level))
934 		return -1;
935 
936 	scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
937 	if (sysfs__read_int(file, (int *) &cache->line_size))
938 		return -1;
939 
940 	scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
941 	if (sysfs__read_int(file, (int *) &cache->sets))
942 		return -1;
943 
944 	scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
945 	if (sysfs__read_int(file, (int *) &cache->ways))
946 		return -1;
947 
948 	scnprintf(file, PATH_MAX, "%s/type", path);
949 	if (sysfs__read_str(file, &cache->type, &len))
950 		return -1;
951 
952 	cache->type[len] = 0;
953 	cache->type = rtrim(cache->type);
954 
955 	scnprintf(file, PATH_MAX, "%s/size", path);
956 	if (sysfs__read_str(file, &cache->size, &len)) {
957 		free(cache->type);
958 		return -1;
959 	}
960 
961 	cache->size[len] = 0;
962 	cache->size = rtrim(cache->size);
963 
964 	scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
965 	if (sysfs__read_str(file, &cache->map, &len)) {
966 		free(cache->map);
967 		free(cache->type);
968 		return -1;
969 	}
970 
971 	cache->map[len] = 0;
972 	cache->map = rtrim(cache->map);
973 	return 0;
974 }
975 
976 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
977 {
978 	fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
979 }
980 
981 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
982 {
983 	u32 i, cnt = 0;
984 	long ncpus;
985 	u32 nr, cpu;
986 	u16 level;
987 
988 	ncpus = sysconf(_SC_NPROCESSORS_CONF);
989 	if (ncpus < 0)
990 		return -1;
991 
992 	nr = (u32)(ncpus & UINT_MAX);
993 
994 	for (cpu = 0; cpu < nr; cpu++) {
995 		for (level = 0; level < 10; level++) {
996 			struct cpu_cache_level c;
997 			int err;
998 
999 			err = cpu_cache_level__read(&c, cpu, level);
1000 			if (err < 0)
1001 				return err;
1002 
1003 			if (err == 1)
1004 				break;
1005 
1006 			for (i = 0; i < cnt; i++) {
1007 				if (cpu_cache_level__cmp(&c, &caches[i]))
1008 					break;
1009 			}
1010 
1011 			if (i == cnt)
1012 				caches[cnt++] = c;
1013 			else
1014 				cpu_cache_level__free(&c);
1015 
1016 			if (WARN_ONCE(cnt == size, "way too many cpu caches.."))
1017 				goto out;
1018 		}
1019 	}
1020  out:
1021 	*cntp = cnt;
1022 	return 0;
1023 }
1024 
1025 #define MAX_CACHES 2000
1026 
1027 static int write_cache(int fd, struct perf_header *h __maybe_unused,
1028 			  struct perf_evlist *evlist __maybe_unused)
1029 {
1030 	struct cpu_cache_level caches[MAX_CACHES];
1031 	u32 cnt = 0, i, version = 1;
1032 	int ret;
1033 
1034 	ret = build_caches(caches, MAX_CACHES, &cnt);
1035 	if (ret)
1036 		goto out;
1037 
1038 	qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1039 
1040 	ret = do_write(fd, &version, sizeof(u32));
1041 	if (ret < 0)
1042 		goto out;
1043 
1044 	ret = do_write(fd, &cnt, sizeof(u32));
1045 	if (ret < 0)
1046 		goto out;
1047 
1048 	for (i = 0; i < cnt; i++) {
1049 		struct cpu_cache_level *c = &caches[i];
1050 
1051 		#define _W(v)					\
1052 			ret = do_write(fd, &c->v, sizeof(u32));	\
1053 			if (ret < 0)				\
1054 				goto out;
1055 
1056 		_W(level)
1057 		_W(line_size)
1058 		_W(sets)
1059 		_W(ways)
1060 		#undef _W
1061 
1062 		#define _W(v)						\
1063 			ret = do_write_string(fd, (const char *) c->v);	\
1064 			if (ret < 0)					\
1065 				goto out;
1066 
1067 		_W(type)
1068 		_W(size)
1069 		_W(map)
1070 		#undef _W
1071 	}
1072 
1073 out:
1074 	for (i = 0; i < cnt; i++)
1075 		cpu_cache_level__free(&caches[i]);
1076 	return ret;
1077 }
1078 
1079 static int write_stat(int fd __maybe_unused,
1080 		      struct perf_header *h __maybe_unused,
1081 		      struct perf_evlist *evlist __maybe_unused)
1082 {
1083 	return 0;
1084 }
1085 
1086 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
1087 			   FILE *fp)
1088 {
1089 	fprintf(fp, "# hostname : %s\n", ph->env.hostname);
1090 }
1091 
1092 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused,
1093 			    FILE *fp)
1094 {
1095 	fprintf(fp, "# os release : %s\n", ph->env.os_release);
1096 }
1097 
1098 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1099 {
1100 	fprintf(fp, "# arch : %s\n", ph->env.arch);
1101 }
1102 
1103 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused,
1104 			  FILE *fp)
1105 {
1106 	fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc);
1107 }
1108 
1109 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused,
1110 			 FILE *fp)
1111 {
1112 	fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online);
1113 	fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail);
1114 }
1115 
1116 static void print_version(struct perf_header *ph, int fd __maybe_unused,
1117 			  FILE *fp)
1118 {
1119 	fprintf(fp, "# perf version : %s\n", ph->env.version);
1120 }
1121 
1122 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused,
1123 			  FILE *fp)
1124 {
1125 	int nr, i;
1126 
1127 	nr = ph->env.nr_cmdline;
1128 
1129 	fprintf(fp, "# cmdline : ");
1130 
1131 	for (i = 0; i < nr; i++)
1132 		fprintf(fp, "%s ", ph->env.cmdline_argv[i]);
1133 	fputc('\n', fp);
1134 }
1135 
1136 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused,
1137 			       FILE *fp)
1138 {
1139 	int nr, i;
1140 	char *str;
1141 	int cpu_nr = ph->env.nr_cpus_avail;
1142 
1143 	nr = ph->env.nr_sibling_cores;
1144 	str = ph->env.sibling_cores;
1145 
1146 	for (i = 0; i < nr; i++) {
1147 		fprintf(fp, "# sibling cores   : %s\n", str);
1148 		str += strlen(str) + 1;
1149 	}
1150 
1151 	nr = ph->env.nr_sibling_threads;
1152 	str = ph->env.sibling_threads;
1153 
1154 	for (i = 0; i < nr; i++) {
1155 		fprintf(fp, "# sibling threads : %s\n", str);
1156 		str += strlen(str) + 1;
1157 	}
1158 
1159 	if (ph->env.cpu != NULL) {
1160 		for (i = 0; i < cpu_nr; i++)
1161 			fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i,
1162 				ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id);
1163 	} else
1164 		fprintf(fp, "# Core ID and Socket ID information is not available\n");
1165 }
1166 
1167 static void free_event_desc(struct perf_evsel *events)
1168 {
1169 	struct perf_evsel *evsel;
1170 
1171 	if (!events)
1172 		return;
1173 
1174 	for (evsel = events; evsel->attr.size; evsel++) {
1175 		zfree(&evsel->name);
1176 		zfree(&evsel->id);
1177 	}
1178 
1179 	free(events);
1180 }
1181 
1182 static struct perf_evsel *
1183 read_event_desc(struct perf_header *ph, int fd)
1184 {
1185 	struct perf_evsel *evsel, *events = NULL;
1186 	u64 *id;
1187 	void *buf = NULL;
1188 	u32 nre, sz, nr, i, j;
1189 	ssize_t ret;
1190 	size_t msz;
1191 
1192 	/* number of events */
1193 	ret = readn(fd, &nre, sizeof(nre));
1194 	if (ret != (ssize_t)sizeof(nre))
1195 		goto error;
1196 
1197 	if (ph->needs_swap)
1198 		nre = bswap_32(nre);
1199 
1200 	ret = readn(fd, &sz, sizeof(sz));
1201 	if (ret != (ssize_t)sizeof(sz))
1202 		goto error;
1203 
1204 	if (ph->needs_swap)
1205 		sz = bswap_32(sz);
1206 
1207 	/* buffer to hold on file attr struct */
1208 	buf = malloc(sz);
1209 	if (!buf)
1210 		goto error;
1211 
1212 	/* the last event terminates with evsel->attr.size == 0: */
1213 	events = calloc(nre + 1, sizeof(*events));
1214 	if (!events)
1215 		goto error;
1216 
1217 	msz = sizeof(evsel->attr);
1218 	if (sz < msz)
1219 		msz = sz;
1220 
1221 	for (i = 0, evsel = events; i < nre; evsel++, i++) {
1222 		evsel->idx = i;
1223 
1224 		/*
1225 		 * must read entire on-file attr struct to
1226 		 * sync up with layout.
1227 		 */
1228 		ret = readn(fd, buf, sz);
1229 		if (ret != (ssize_t)sz)
1230 			goto error;
1231 
1232 		if (ph->needs_swap)
1233 			perf_event__attr_swap(buf);
1234 
1235 		memcpy(&evsel->attr, buf, msz);
1236 
1237 		ret = readn(fd, &nr, sizeof(nr));
1238 		if (ret != (ssize_t)sizeof(nr))
1239 			goto error;
1240 
1241 		if (ph->needs_swap) {
1242 			nr = bswap_32(nr);
1243 			evsel->needs_swap = true;
1244 		}
1245 
1246 		evsel->name = do_read_string(fd, ph);
1247 
1248 		if (!nr)
1249 			continue;
1250 
1251 		id = calloc(nr, sizeof(*id));
1252 		if (!id)
1253 			goto error;
1254 		evsel->ids = nr;
1255 		evsel->id = id;
1256 
1257 		for (j = 0 ; j < nr; j++) {
1258 			ret = readn(fd, id, sizeof(*id));
1259 			if (ret != (ssize_t)sizeof(*id))
1260 				goto error;
1261 			if (ph->needs_swap)
1262 				*id = bswap_64(*id);
1263 			id++;
1264 		}
1265 	}
1266 out:
1267 	free(buf);
1268 	return events;
1269 error:
1270 	free_event_desc(events);
1271 	events = NULL;
1272 	goto out;
1273 }
1274 
1275 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
1276 				void *priv __attribute__((unused)))
1277 {
1278 	return fprintf(fp, ", %s = %s", name, val);
1279 }
1280 
1281 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp)
1282 {
1283 	struct perf_evsel *evsel, *events = read_event_desc(ph, fd);
1284 	u32 j;
1285 	u64 *id;
1286 
1287 	if (!events) {
1288 		fprintf(fp, "# event desc: not available or unable to read\n");
1289 		return;
1290 	}
1291 
1292 	for (evsel = events; evsel->attr.size; evsel++) {
1293 		fprintf(fp, "# event : name = %s, ", evsel->name);
1294 
1295 		if (evsel->ids) {
1296 			fprintf(fp, ", id = {");
1297 			for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) {
1298 				if (j)
1299 					fputc(',', fp);
1300 				fprintf(fp, " %"PRIu64, *id);
1301 			}
1302 			fprintf(fp, " }");
1303 		}
1304 
1305 		perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL);
1306 
1307 		fputc('\n', fp);
1308 	}
1309 
1310 	free_event_desc(events);
1311 }
1312 
1313 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
1314 			    FILE *fp)
1315 {
1316 	fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem);
1317 }
1318 
1319 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
1320 				FILE *fp)
1321 {
1322 	int i;
1323 	struct numa_node *n;
1324 
1325 	for (i = 0; i < ph->env.nr_numa_nodes; i++) {
1326 		n = &ph->env.numa_nodes[i];
1327 
1328 		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
1329 			    " free = %"PRIu64" kB\n",
1330 			n->node, n->mem_total, n->mem_free);
1331 
1332 		fprintf(fp, "# node%u cpu list : ", n->node);
1333 		cpu_map__fprintf(n->map, fp);
1334 	}
1335 }
1336 
1337 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
1338 {
1339 	fprintf(fp, "# cpuid : %s\n", ph->env.cpuid);
1340 }
1341 
1342 static void print_branch_stack(struct perf_header *ph __maybe_unused,
1343 			       int fd __maybe_unused, FILE *fp)
1344 {
1345 	fprintf(fp, "# contains samples with branch stack\n");
1346 }
1347 
1348 static void print_auxtrace(struct perf_header *ph __maybe_unused,
1349 			   int fd __maybe_unused, FILE *fp)
1350 {
1351 	fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
1352 }
1353 
1354 static void print_stat(struct perf_header *ph __maybe_unused,
1355 		       int fd __maybe_unused, FILE *fp)
1356 {
1357 	fprintf(fp, "# contains stat data\n");
1358 }
1359 
1360 static void print_cache(struct perf_header *ph __maybe_unused,
1361 			int fd __maybe_unused, FILE *fp __maybe_unused)
1362 {
1363 	int i;
1364 
1365 	fprintf(fp, "# CPU cache info:\n");
1366 	for (i = 0; i < ph->env.caches_cnt; i++) {
1367 		fprintf(fp, "#  ");
1368 		cpu_cache_level__fprintf(fp, &ph->env.caches[i]);
1369 	}
1370 }
1371 
1372 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
1373 			       FILE *fp)
1374 {
1375 	const char *delimiter = "# pmu mappings: ";
1376 	char *str, *tmp;
1377 	u32 pmu_num;
1378 	u32 type;
1379 
1380 	pmu_num = ph->env.nr_pmu_mappings;
1381 	if (!pmu_num) {
1382 		fprintf(fp, "# pmu mappings: not available\n");
1383 		return;
1384 	}
1385 
1386 	str = ph->env.pmu_mappings;
1387 
1388 	while (pmu_num) {
1389 		type = strtoul(str, &tmp, 0);
1390 		if (*tmp != ':')
1391 			goto error;
1392 
1393 		str = tmp + 1;
1394 		fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
1395 
1396 		delimiter = ", ";
1397 		str += strlen(str) + 1;
1398 		pmu_num--;
1399 	}
1400 
1401 	fprintf(fp, "\n");
1402 
1403 	if (!pmu_num)
1404 		return;
1405 error:
1406 	fprintf(fp, "# pmu mappings: unable to read\n");
1407 }
1408 
1409 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
1410 			     FILE *fp)
1411 {
1412 	struct perf_session *session;
1413 	struct perf_evsel *evsel;
1414 	u32 nr = 0;
1415 
1416 	session = container_of(ph, struct perf_session, header);
1417 
1418 	evlist__for_each_entry(session->evlist, evsel) {
1419 		if (perf_evsel__is_group_leader(evsel) &&
1420 		    evsel->nr_members > 1) {
1421 			fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
1422 				perf_evsel__name(evsel));
1423 
1424 			nr = evsel->nr_members - 1;
1425 		} else if (nr) {
1426 			fprintf(fp, ",%s", perf_evsel__name(evsel));
1427 
1428 			if (--nr == 0)
1429 				fprintf(fp, "}\n");
1430 		}
1431 	}
1432 }
1433 
1434 static int __event_process_build_id(struct build_id_event *bev,
1435 				    char *filename,
1436 				    struct perf_session *session)
1437 {
1438 	int err = -1;
1439 	struct machine *machine;
1440 	u16 cpumode;
1441 	struct dso *dso;
1442 	enum dso_kernel_type dso_type;
1443 
1444 	machine = perf_session__findnew_machine(session, bev->pid);
1445 	if (!machine)
1446 		goto out;
1447 
1448 	cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1449 
1450 	switch (cpumode) {
1451 	case PERF_RECORD_MISC_KERNEL:
1452 		dso_type = DSO_TYPE_KERNEL;
1453 		break;
1454 	case PERF_RECORD_MISC_GUEST_KERNEL:
1455 		dso_type = DSO_TYPE_GUEST_KERNEL;
1456 		break;
1457 	case PERF_RECORD_MISC_USER:
1458 	case PERF_RECORD_MISC_GUEST_USER:
1459 		dso_type = DSO_TYPE_USER;
1460 		break;
1461 	default:
1462 		goto out;
1463 	}
1464 
1465 	dso = machine__findnew_dso(machine, filename);
1466 	if (dso != NULL) {
1467 		char sbuild_id[SBUILD_ID_SIZE];
1468 
1469 		dso__set_build_id(dso, &bev->build_id);
1470 
1471 		if (!is_kernel_module(filename, cpumode))
1472 			dso->kernel = dso_type;
1473 
1474 		build_id__sprintf(dso->build_id, sizeof(dso->build_id),
1475 				  sbuild_id);
1476 		pr_debug("build id event received for %s: %s\n",
1477 			 dso->long_name, sbuild_id);
1478 		dso__put(dso);
1479 	}
1480 
1481 	err = 0;
1482 out:
1483 	return err;
1484 }
1485 
1486 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
1487 						 int input, u64 offset, u64 size)
1488 {
1489 	struct perf_session *session = container_of(header, struct perf_session, header);
1490 	struct {
1491 		struct perf_event_header   header;
1492 		u8			   build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
1493 		char			   filename[0];
1494 	} old_bev;
1495 	struct build_id_event bev;
1496 	char filename[PATH_MAX];
1497 	u64 limit = offset + size;
1498 
1499 	while (offset < limit) {
1500 		ssize_t len;
1501 
1502 		if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
1503 			return -1;
1504 
1505 		if (header->needs_swap)
1506 			perf_event_header__bswap(&old_bev.header);
1507 
1508 		len = old_bev.header.size - sizeof(old_bev);
1509 		if (readn(input, filename, len) != len)
1510 			return -1;
1511 
1512 		bev.header = old_bev.header;
1513 
1514 		/*
1515 		 * As the pid is the missing value, we need to fill
1516 		 * it properly. The header.misc value give us nice hint.
1517 		 */
1518 		bev.pid	= HOST_KERNEL_ID;
1519 		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
1520 		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
1521 			bev.pid	= DEFAULT_GUEST_KERNEL_ID;
1522 
1523 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
1524 		__event_process_build_id(&bev, filename, session);
1525 
1526 		offset += bev.header.size;
1527 	}
1528 
1529 	return 0;
1530 }
1531 
1532 static int perf_header__read_build_ids(struct perf_header *header,
1533 				       int input, u64 offset, u64 size)
1534 {
1535 	struct perf_session *session = container_of(header, struct perf_session, header);
1536 	struct build_id_event bev;
1537 	char filename[PATH_MAX];
1538 	u64 limit = offset + size, orig_offset = offset;
1539 	int err = -1;
1540 
1541 	while (offset < limit) {
1542 		ssize_t len;
1543 
1544 		if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
1545 			goto out;
1546 
1547 		if (header->needs_swap)
1548 			perf_event_header__bswap(&bev.header);
1549 
1550 		len = bev.header.size - sizeof(bev);
1551 		if (readn(input, filename, len) != len)
1552 			goto out;
1553 		/*
1554 		 * The a1645ce1 changeset:
1555 		 *
1556 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
1557 		 *
1558 		 * Added a field to struct build_id_event that broke the file
1559 		 * format.
1560 		 *
1561 		 * Since the kernel build-id is the first entry, process the
1562 		 * table using the old format if the well known
1563 		 * '[kernel.kallsyms]' string for the kernel build-id has the
1564 		 * first 4 characters chopped off (where the pid_t sits).
1565 		 */
1566 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
1567 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
1568 				return -1;
1569 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
1570 		}
1571 
1572 		__event_process_build_id(&bev, filename, session);
1573 
1574 		offset += bev.header.size;
1575 	}
1576 	err = 0;
1577 out:
1578 	return err;
1579 }
1580 
1581 static int process_tracing_data(struct perf_file_section *section __maybe_unused,
1582 				struct perf_header *ph __maybe_unused,
1583 				int fd, void *data)
1584 {
1585 	ssize_t ret = trace_report(fd, data, false);
1586 	return ret < 0 ? -1 : 0;
1587 }
1588 
1589 static int process_build_id(struct perf_file_section *section,
1590 			    struct perf_header *ph, int fd,
1591 			    void *data __maybe_unused)
1592 {
1593 	if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
1594 		pr_debug("Failed to read buildids, continuing...\n");
1595 	return 0;
1596 }
1597 
1598 static int process_hostname(struct perf_file_section *section __maybe_unused,
1599 			    struct perf_header *ph, int fd,
1600 			    void *data __maybe_unused)
1601 {
1602 	ph->env.hostname = do_read_string(fd, ph);
1603 	return ph->env.hostname ? 0 : -ENOMEM;
1604 }
1605 
1606 static int process_osrelease(struct perf_file_section *section __maybe_unused,
1607 			     struct perf_header *ph, int fd,
1608 			     void *data __maybe_unused)
1609 {
1610 	ph->env.os_release = do_read_string(fd, ph);
1611 	return ph->env.os_release ? 0 : -ENOMEM;
1612 }
1613 
1614 static int process_version(struct perf_file_section *section __maybe_unused,
1615 			   struct perf_header *ph, int fd,
1616 			   void *data __maybe_unused)
1617 {
1618 	ph->env.version = do_read_string(fd, ph);
1619 	return ph->env.version ? 0 : -ENOMEM;
1620 }
1621 
1622 static int process_arch(struct perf_file_section *section __maybe_unused,
1623 			struct perf_header *ph,	int fd,
1624 			void *data __maybe_unused)
1625 {
1626 	ph->env.arch = do_read_string(fd, ph);
1627 	return ph->env.arch ? 0 : -ENOMEM;
1628 }
1629 
1630 static int process_nrcpus(struct perf_file_section *section __maybe_unused,
1631 			  struct perf_header *ph, int fd,
1632 			  void *data __maybe_unused)
1633 {
1634 	ssize_t ret;
1635 	u32 nr;
1636 
1637 	ret = readn(fd, &nr, sizeof(nr));
1638 	if (ret != sizeof(nr))
1639 		return -1;
1640 
1641 	if (ph->needs_swap)
1642 		nr = bswap_32(nr);
1643 
1644 	ph->env.nr_cpus_avail = nr;
1645 
1646 	ret = readn(fd, &nr, sizeof(nr));
1647 	if (ret != sizeof(nr))
1648 		return -1;
1649 
1650 	if (ph->needs_swap)
1651 		nr = bswap_32(nr);
1652 
1653 	ph->env.nr_cpus_online = nr;
1654 	return 0;
1655 }
1656 
1657 static int process_cpudesc(struct perf_file_section *section __maybe_unused,
1658 			   struct perf_header *ph, int fd,
1659 			   void *data __maybe_unused)
1660 {
1661 	ph->env.cpu_desc = do_read_string(fd, ph);
1662 	return ph->env.cpu_desc ? 0 : -ENOMEM;
1663 }
1664 
1665 static int process_cpuid(struct perf_file_section *section __maybe_unused,
1666 			 struct perf_header *ph,  int fd,
1667 			 void *data __maybe_unused)
1668 {
1669 	ph->env.cpuid = do_read_string(fd, ph);
1670 	return ph->env.cpuid ? 0 : -ENOMEM;
1671 }
1672 
1673 static int process_total_mem(struct perf_file_section *section __maybe_unused,
1674 			     struct perf_header *ph, int fd,
1675 			     void *data __maybe_unused)
1676 {
1677 	uint64_t mem;
1678 	ssize_t ret;
1679 
1680 	ret = readn(fd, &mem, sizeof(mem));
1681 	if (ret != sizeof(mem))
1682 		return -1;
1683 
1684 	if (ph->needs_swap)
1685 		mem = bswap_64(mem);
1686 
1687 	ph->env.total_mem = mem;
1688 	return 0;
1689 }
1690 
1691 static struct perf_evsel *
1692 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
1693 {
1694 	struct perf_evsel *evsel;
1695 
1696 	evlist__for_each_entry(evlist, evsel) {
1697 		if (evsel->idx == idx)
1698 			return evsel;
1699 	}
1700 
1701 	return NULL;
1702 }
1703 
1704 static void
1705 perf_evlist__set_event_name(struct perf_evlist *evlist,
1706 			    struct perf_evsel *event)
1707 {
1708 	struct perf_evsel *evsel;
1709 
1710 	if (!event->name)
1711 		return;
1712 
1713 	evsel = perf_evlist__find_by_index(evlist, event->idx);
1714 	if (!evsel)
1715 		return;
1716 
1717 	if (evsel->name)
1718 		return;
1719 
1720 	evsel->name = strdup(event->name);
1721 }
1722 
1723 static int
1724 process_event_desc(struct perf_file_section *section __maybe_unused,
1725 		   struct perf_header *header, int fd,
1726 		   void *data __maybe_unused)
1727 {
1728 	struct perf_session *session;
1729 	struct perf_evsel *evsel, *events = read_event_desc(header, fd);
1730 
1731 	if (!events)
1732 		return 0;
1733 
1734 	session = container_of(header, struct perf_session, header);
1735 	for (evsel = events; evsel->attr.size; evsel++)
1736 		perf_evlist__set_event_name(session->evlist, evsel);
1737 
1738 	free_event_desc(events);
1739 
1740 	return 0;
1741 }
1742 
1743 static int process_cmdline(struct perf_file_section *section,
1744 			   struct perf_header *ph, int fd,
1745 			   void *data __maybe_unused)
1746 {
1747 	ssize_t ret;
1748 	char *str, *cmdline = NULL, **argv = NULL;
1749 	u32 nr, i, len = 0;
1750 
1751 	ret = readn(fd, &nr, sizeof(nr));
1752 	if (ret != sizeof(nr))
1753 		return -1;
1754 
1755 	if (ph->needs_swap)
1756 		nr = bswap_32(nr);
1757 
1758 	ph->env.nr_cmdline = nr;
1759 
1760 	cmdline = zalloc(section->size + nr + 1);
1761 	if (!cmdline)
1762 		return -1;
1763 
1764 	argv = zalloc(sizeof(char *) * (nr + 1));
1765 	if (!argv)
1766 		goto error;
1767 
1768 	for (i = 0; i < nr; i++) {
1769 		str = do_read_string(fd, ph);
1770 		if (!str)
1771 			goto error;
1772 
1773 		argv[i] = cmdline + len;
1774 		memcpy(argv[i], str, strlen(str) + 1);
1775 		len += strlen(str) + 1;
1776 		free(str);
1777 	}
1778 	ph->env.cmdline = cmdline;
1779 	ph->env.cmdline_argv = (const char **) argv;
1780 	return 0;
1781 
1782 error:
1783 	free(argv);
1784 	free(cmdline);
1785 	return -1;
1786 }
1787 
1788 static int process_cpu_topology(struct perf_file_section *section,
1789 				struct perf_header *ph, int fd,
1790 				void *data __maybe_unused)
1791 {
1792 	ssize_t ret;
1793 	u32 nr, i;
1794 	char *str;
1795 	struct strbuf sb;
1796 	int cpu_nr = ph->env.nr_cpus_avail;
1797 	u64 size = 0;
1798 
1799 	ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
1800 	if (!ph->env.cpu)
1801 		return -1;
1802 
1803 	ret = readn(fd, &nr, sizeof(nr));
1804 	if (ret != sizeof(nr))
1805 		goto free_cpu;
1806 
1807 	if (ph->needs_swap)
1808 		nr = bswap_32(nr);
1809 
1810 	ph->env.nr_sibling_cores = nr;
1811 	size += sizeof(u32);
1812 	if (strbuf_init(&sb, 128) < 0)
1813 		goto free_cpu;
1814 
1815 	for (i = 0; i < nr; i++) {
1816 		str = do_read_string(fd, ph);
1817 		if (!str)
1818 			goto error;
1819 
1820 		/* include a NULL character at the end */
1821 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1822 			goto error;
1823 		size += string_size(str);
1824 		free(str);
1825 	}
1826 	ph->env.sibling_cores = strbuf_detach(&sb, NULL);
1827 
1828 	ret = readn(fd, &nr, sizeof(nr));
1829 	if (ret != sizeof(nr))
1830 		return -1;
1831 
1832 	if (ph->needs_swap)
1833 		nr = bswap_32(nr);
1834 
1835 	ph->env.nr_sibling_threads = nr;
1836 	size += sizeof(u32);
1837 
1838 	for (i = 0; i < nr; i++) {
1839 		str = do_read_string(fd, ph);
1840 		if (!str)
1841 			goto error;
1842 
1843 		/* include a NULL character at the end */
1844 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
1845 			goto error;
1846 		size += string_size(str);
1847 		free(str);
1848 	}
1849 	ph->env.sibling_threads = strbuf_detach(&sb, NULL);
1850 
1851 	/*
1852 	 * The header may be from old perf,
1853 	 * which doesn't include core id and socket id information.
1854 	 */
1855 	if (section->size <= size) {
1856 		zfree(&ph->env.cpu);
1857 		return 0;
1858 	}
1859 
1860 	for (i = 0; i < (u32)cpu_nr; i++) {
1861 		ret = readn(fd, &nr, sizeof(nr));
1862 		if (ret != sizeof(nr))
1863 			goto free_cpu;
1864 
1865 		if (ph->needs_swap)
1866 			nr = bswap_32(nr);
1867 
1868 		ph->env.cpu[i].core_id = nr;
1869 
1870 		ret = readn(fd, &nr, sizeof(nr));
1871 		if (ret != sizeof(nr))
1872 			goto free_cpu;
1873 
1874 		if (ph->needs_swap)
1875 			nr = bswap_32(nr);
1876 
1877 		if (nr != (u32)-1 && nr > (u32)cpu_nr) {
1878 			pr_debug("socket_id number is too big."
1879 				 "You may need to upgrade the perf tool.\n");
1880 			goto free_cpu;
1881 		}
1882 
1883 		ph->env.cpu[i].socket_id = nr;
1884 	}
1885 
1886 	return 0;
1887 
1888 error:
1889 	strbuf_release(&sb);
1890 free_cpu:
1891 	zfree(&ph->env.cpu);
1892 	return -1;
1893 }
1894 
1895 static int process_numa_topology(struct perf_file_section *section __maybe_unused,
1896 				 struct perf_header *ph, int fd,
1897 				 void *data __maybe_unused)
1898 {
1899 	struct numa_node *nodes, *n;
1900 	ssize_t ret;
1901 	u32 nr, i;
1902 	char *str;
1903 
1904 	/* nr nodes */
1905 	ret = readn(fd, &nr, sizeof(nr));
1906 	if (ret != sizeof(nr))
1907 		return -1;
1908 
1909 	if (ph->needs_swap)
1910 		nr = bswap_32(nr);
1911 
1912 	nodes = zalloc(sizeof(*nodes) * nr);
1913 	if (!nodes)
1914 		return -ENOMEM;
1915 
1916 	for (i = 0; i < nr; i++) {
1917 		n = &nodes[i];
1918 
1919 		/* node number */
1920 		ret = readn(fd, &n->node, sizeof(u32));
1921 		if (ret != sizeof(n->node))
1922 			goto error;
1923 
1924 		ret = readn(fd, &n->mem_total, sizeof(u64));
1925 		if (ret != sizeof(u64))
1926 			goto error;
1927 
1928 		ret = readn(fd, &n->mem_free, sizeof(u64));
1929 		if (ret != sizeof(u64))
1930 			goto error;
1931 
1932 		if (ph->needs_swap) {
1933 			n->node      = bswap_32(n->node);
1934 			n->mem_total = bswap_64(n->mem_total);
1935 			n->mem_free  = bswap_64(n->mem_free);
1936 		}
1937 
1938 		str = do_read_string(fd, ph);
1939 		if (!str)
1940 			goto error;
1941 
1942 		n->map = cpu_map__new(str);
1943 		if (!n->map)
1944 			goto error;
1945 
1946 		free(str);
1947 	}
1948 	ph->env.nr_numa_nodes = nr;
1949 	ph->env.numa_nodes = nodes;
1950 	return 0;
1951 
1952 error:
1953 	free(nodes);
1954 	return -1;
1955 }
1956 
1957 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused,
1958 				struct perf_header *ph, int fd,
1959 				void *data __maybe_unused)
1960 {
1961 	ssize_t ret;
1962 	char *name;
1963 	u32 pmu_num;
1964 	u32 type;
1965 	struct strbuf sb;
1966 
1967 	ret = readn(fd, &pmu_num, sizeof(pmu_num));
1968 	if (ret != sizeof(pmu_num))
1969 		return -1;
1970 
1971 	if (ph->needs_swap)
1972 		pmu_num = bswap_32(pmu_num);
1973 
1974 	if (!pmu_num) {
1975 		pr_debug("pmu mappings not available\n");
1976 		return 0;
1977 	}
1978 
1979 	ph->env.nr_pmu_mappings = pmu_num;
1980 	if (strbuf_init(&sb, 128) < 0)
1981 		return -1;
1982 
1983 	while (pmu_num) {
1984 		if (readn(fd, &type, sizeof(type)) != sizeof(type))
1985 			goto error;
1986 		if (ph->needs_swap)
1987 			type = bswap_32(type);
1988 
1989 		name = do_read_string(fd, ph);
1990 		if (!name)
1991 			goto error;
1992 
1993 		if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
1994 			goto error;
1995 		/* include a NULL character at the end */
1996 		if (strbuf_add(&sb, "", 1) < 0)
1997 			goto error;
1998 
1999 		if (!strcmp(name, "msr"))
2000 			ph->env.msr_pmu_type = type;
2001 
2002 		free(name);
2003 		pmu_num--;
2004 	}
2005 	ph->env.pmu_mappings = strbuf_detach(&sb, NULL);
2006 	return 0;
2007 
2008 error:
2009 	strbuf_release(&sb);
2010 	return -1;
2011 }
2012 
2013 static int process_group_desc(struct perf_file_section *section __maybe_unused,
2014 			      struct perf_header *ph, int fd,
2015 			      void *data __maybe_unused)
2016 {
2017 	size_t ret = -1;
2018 	u32 i, nr, nr_groups;
2019 	struct perf_session *session;
2020 	struct perf_evsel *evsel, *leader = NULL;
2021 	struct group_desc {
2022 		char *name;
2023 		u32 leader_idx;
2024 		u32 nr_members;
2025 	} *desc;
2026 
2027 	if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups))
2028 		return -1;
2029 
2030 	if (ph->needs_swap)
2031 		nr_groups = bswap_32(nr_groups);
2032 
2033 	ph->env.nr_groups = nr_groups;
2034 	if (!nr_groups) {
2035 		pr_debug("group desc not available\n");
2036 		return 0;
2037 	}
2038 
2039 	desc = calloc(nr_groups, sizeof(*desc));
2040 	if (!desc)
2041 		return -1;
2042 
2043 	for (i = 0; i < nr_groups; i++) {
2044 		desc[i].name = do_read_string(fd, ph);
2045 		if (!desc[i].name)
2046 			goto out_free;
2047 
2048 		if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32))
2049 			goto out_free;
2050 
2051 		if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32))
2052 			goto out_free;
2053 
2054 		if (ph->needs_swap) {
2055 			desc[i].leader_idx = bswap_32(desc[i].leader_idx);
2056 			desc[i].nr_members = bswap_32(desc[i].nr_members);
2057 		}
2058 	}
2059 
2060 	/*
2061 	 * Rebuild group relationship based on the group_desc
2062 	 */
2063 	session = container_of(ph, struct perf_session, header);
2064 	session->evlist->nr_groups = nr_groups;
2065 
2066 	i = nr = 0;
2067 	evlist__for_each_entry(session->evlist, evsel) {
2068 		if (evsel->idx == (int) desc[i].leader_idx) {
2069 			evsel->leader = evsel;
2070 			/* {anon_group} is a dummy name */
2071 			if (strcmp(desc[i].name, "{anon_group}")) {
2072 				evsel->group_name = desc[i].name;
2073 				desc[i].name = NULL;
2074 			}
2075 			evsel->nr_members = desc[i].nr_members;
2076 
2077 			if (i >= nr_groups || nr > 0) {
2078 				pr_debug("invalid group desc\n");
2079 				goto out_free;
2080 			}
2081 
2082 			leader = evsel;
2083 			nr = evsel->nr_members - 1;
2084 			i++;
2085 		} else if (nr) {
2086 			/* This is a group member */
2087 			evsel->leader = leader;
2088 
2089 			nr--;
2090 		}
2091 	}
2092 
2093 	if (i != nr_groups || nr != 0) {
2094 		pr_debug("invalid group desc\n");
2095 		goto out_free;
2096 	}
2097 
2098 	ret = 0;
2099 out_free:
2100 	for (i = 0; i < nr_groups; i++)
2101 		zfree(&desc[i].name);
2102 	free(desc);
2103 
2104 	return ret;
2105 }
2106 
2107 static int process_auxtrace(struct perf_file_section *section,
2108 			    struct perf_header *ph, int fd,
2109 			    void *data __maybe_unused)
2110 {
2111 	struct perf_session *session;
2112 	int err;
2113 
2114 	session = container_of(ph, struct perf_session, header);
2115 
2116 	err = auxtrace_index__process(fd, section->size, session,
2117 				      ph->needs_swap);
2118 	if (err < 0)
2119 		pr_err("Failed to process auxtrace index\n");
2120 	return err;
2121 }
2122 
2123 static int process_cache(struct perf_file_section *section __maybe_unused,
2124 			 struct perf_header *ph __maybe_unused, int fd __maybe_unused,
2125 			 void *data __maybe_unused)
2126 {
2127 	struct cpu_cache_level *caches;
2128 	u32 cnt, i, version;
2129 
2130 	if (readn(fd, &version, sizeof(version)) != sizeof(version))
2131 		return -1;
2132 
2133 	if (ph->needs_swap)
2134 		version = bswap_32(version);
2135 
2136 	if (version != 1)
2137 		return -1;
2138 
2139 	if (readn(fd, &cnt, sizeof(cnt)) != sizeof(cnt))
2140 		return -1;
2141 
2142 	if (ph->needs_swap)
2143 		cnt = bswap_32(cnt);
2144 
2145 	caches = zalloc(sizeof(*caches) * cnt);
2146 	if (!caches)
2147 		return -1;
2148 
2149 	for (i = 0; i < cnt; i++) {
2150 		struct cpu_cache_level c;
2151 
2152 		#define _R(v)						\
2153 			if (readn(fd, &c.v, sizeof(u32)) != sizeof(u32))\
2154 				goto out_free_caches;			\
2155 			if (ph->needs_swap)				\
2156 				c.v = bswap_32(c.v);			\
2157 
2158 		_R(level)
2159 		_R(line_size)
2160 		_R(sets)
2161 		_R(ways)
2162 		#undef _R
2163 
2164 		#define _R(v)				\
2165 			c.v = do_read_string(fd, ph);	\
2166 			if (!c.v)			\
2167 				goto out_free_caches;
2168 
2169 		_R(type)
2170 		_R(size)
2171 		_R(map)
2172 		#undef _R
2173 
2174 		caches[i] = c;
2175 	}
2176 
2177 	ph->env.caches = caches;
2178 	ph->env.caches_cnt = cnt;
2179 	return 0;
2180 out_free_caches:
2181 	free(caches);
2182 	return -1;
2183 }
2184 
2185 struct feature_ops {
2186 	int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
2187 	void (*print)(struct perf_header *h, int fd, FILE *fp);
2188 	int (*process)(struct perf_file_section *section,
2189 		       struct perf_header *h, int fd, void *data);
2190 	const char *name;
2191 	bool full_only;
2192 };
2193 
2194 #define FEAT_OPA(n, func) \
2195 	[n] = { .name = #n, .write = write_##func, .print = print_##func }
2196 #define FEAT_OPP(n, func) \
2197 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2198 		.process = process_##func }
2199 #define FEAT_OPF(n, func) \
2200 	[n] = { .name = #n, .write = write_##func, .print = print_##func, \
2201 		.process = process_##func, .full_only = true }
2202 
2203 /* feature_ops not implemented: */
2204 #define print_tracing_data	NULL
2205 #define print_build_id		NULL
2206 
2207 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
2208 	FEAT_OPP(HEADER_TRACING_DATA,	tracing_data),
2209 	FEAT_OPP(HEADER_BUILD_ID,	build_id),
2210 	FEAT_OPP(HEADER_HOSTNAME,	hostname),
2211 	FEAT_OPP(HEADER_OSRELEASE,	osrelease),
2212 	FEAT_OPP(HEADER_VERSION,	version),
2213 	FEAT_OPP(HEADER_ARCH,		arch),
2214 	FEAT_OPP(HEADER_NRCPUS,		nrcpus),
2215 	FEAT_OPP(HEADER_CPUDESC,	cpudesc),
2216 	FEAT_OPP(HEADER_CPUID,		cpuid),
2217 	FEAT_OPP(HEADER_TOTAL_MEM,	total_mem),
2218 	FEAT_OPP(HEADER_EVENT_DESC,	event_desc),
2219 	FEAT_OPP(HEADER_CMDLINE,	cmdline),
2220 	FEAT_OPF(HEADER_CPU_TOPOLOGY,	cpu_topology),
2221 	FEAT_OPF(HEADER_NUMA_TOPOLOGY,	numa_topology),
2222 	FEAT_OPA(HEADER_BRANCH_STACK,	branch_stack),
2223 	FEAT_OPP(HEADER_PMU_MAPPINGS,	pmu_mappings),
2224 	FEAT_OPP(HEADER_GROUP_DESC,	group_desc),
2225 	FEAT_OPP(HEADER_AUXTRACE,	auxtrace),
2226 	FEAT_OPA(HEADER_STAT,		stat),
2227 	FEAT_OPF(HEADER_CACHE,		cache),
2228 };
2229 
2230 struct header_print_data {
2231 	FILE *fp;
2232 	bool full; /* extended list of headers */
2233 };
2234 
2235 static int perf_file_section__fprintf_info(struct perf_file_section *section,
2236 					   struct perf_header *ph,
2237 					   int feat, int fd, void *data)
2238 {
2239 	struct header_print_data *hd = data;
2240 
2241 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2242 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2243 				"%d, continuing...\n", section->offset, feat);
2244 		return 0;
2245 	}
2246 	if (feat >= HEADER_LAST_FEATURE) {
2247 		pr_warning("unknown feature %d\n", feat);
2248 		return 0;
2249 	}
2250 	if (!feat_ops[feat].print)
2251 		return 0;
2252 
2253 	if (!feat_ops[feat].full_only || hd->full)
2254 		feat_ops[feat].print(ph, fd, hd->fp);
2255 	else
2256 		fprintf(hd->fp, "# %s info available, use -I to display\n",
2257 			feat_ops[feat].name);
2258 
2259 	return 0;
2260 }
2261 
2262 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
2263 {
2264 	struct header_print_data hd;
2265 	struct perf_header *header = &session->header;
2266 	int fd = perf_data_file__fd(session->file);
2267 	struct stat st;
2268 	int ret, bit;
2269 
2270 	hd.fp = fp;
2271 	hd.full = full;
2272 
2273 	ret = fstat(fd, &st);
2274 	if (ret == -1)
2275 		return -1;
2276 
2277 	fprintf(fp, "# captured on: %s", ctime(&st.st_ctime));
2278 
2279 	perf_header__process_sections(header, fd, &hd,
2280 				      perf_file_section__fprintf_info);
2281 
2282 	if (session->file->is_pipe)
2283 		return 0;
2284 
2285 	fprintf(fp, "# missing features: ");
2286 	for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
2287 		if (bit)
2288 			fprintf(fp, "%s ", feat_ops[bit].name);
2289 	}
2290 
2291 	fprintf(fp, "\n");
2292 	return 0;
2293 }
2294 
2295 static int do_write_feat(int fd, struct perf_header *h, int type,
2296 			 struct perf_file_section **p,
2297 			 struct perf_evlist *evlist)
2298 {
2299 	int err;
2300 	int ret = 0;
2301 
2302 	if (perf_header__has_feat(h, type)) {
2303 		if (!feat_ops[type].write)
2304 			return -1;
2305 
2306 		(*p)->offset = lseek(fd, 0, SEEK_CUR);
2307 
2308 		err = feat_ops[type].write(fd, h, evlist);
2309 		if (err < 0) {
2310 			pr_debug("failed to write feature %s\n", feat_ops[type].name);
2311 
2312 			/* undo anything written */
2313 			lseek(fd, (*p)->offset, SEEK_SET);
2314 
2315 			return -1;
2316 		}
2317 		(*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset;
2318 		(*p)++;
2319 	}
2320 	return ret;
2321 }
2322 
2323 static int perf_header__adds_write(struct perf_header *header,
2324 				   struct perf_evlist *evlist, int fd)
2325 {
2326 	int nr_sections;
2327 	struct perf_file_section *feat_sec, *p;
2328 	int sec_size;
2329 	u64 sec_start;
2330 	int feat;
2331 	int err;
2332 
2333 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2334 	if (!nr_sections)
2335 		return 0;
2336 
2337 	feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
2338 	if (feat_sec == NULL)
2339 		return -ENOMEM;
2340 
2341 	sec_size = sizeof(*feat_sec) * nr_sections;
2342 
2343 	sec_start = header->feat_offset;
2344 	lseek(fd, sec_start + sec_size, SEEK_SET);
2345 
2346 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2347 		if (do_write_feat(fd, header, feat, &p, evlist))
2348 			perf_header__clear_feat(header, feat);
2349 	}
2350 
2351 	lseek(fd, sec_start, SEEK_SET);
2352 	/*
2353 	 * may write more than needed due to dropped feature, but
2354 	 * this is okay, reader will skip the mising entries
2355 	 */
2356 	err = do_write(fd, feat_sec, sec_size);
2357 	if (err < 0)
2358 		pr_debug("failed to write feature section\n");
2359 	free(feat_sec);
2360 	return err;
2361 }
2362 
2363 int perf_header__write_pipe(int fd)
2364 {
2365 	struct perf_pipe_file_header f_header;
2366 	int err;
2367 
2368 	f_header = (struct perf_pipe_file_header){
2369 		.magic	   = PERF_MAGIC,
2370 		.size	   = sizeof(f_header),
2371 	};
2372 
2373 	err = do_write(fd, &f_header, sizeof(f_header));
2374 	if (err < 0) {
2375 		pr_debug("failed to write perf pipe header\n");
2376 		return err;
2377 	}
2378 
2379 	return 0;
2380 }
2381 
2382 int perf_session__write_header(struct perf_session *session,
2383 			       struct perf_evlist *evlist,
2384 			       int fd, bool at_exit)
2385 {
2386 	struct perf_file_header f_header;
2387 	struct perf_file_attr   f_attr;
2388 	struct perf_header *header = &session->header;
2389 	struct perf_evsel *evsel;
2390 	u64 attr_offset;
2391 	int err;
2392 
2393 	lseek(fd, sizeof(f_header), SEEK_SET);
2394 
2395 	evlist__for_each_entry(session->evlist, evsel) {
2396 		evsel->id_offset = lseek(fd, 0, SEEK_CUR);
2397 		err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
2398 		if (err < 0) {
2399 			pr_debug("failed to write perf header\n");
2400 			return err;
2401 		}
2402 	}
2403 
2404 	attr_offset = lseek(fd, 0, SEEK_CUR);
2405 
2406 	evlist__for_each_entry(evlist, evsel) {
2407 		f_attr = (struct perf_file_attr){
2408 			.attr = evsel->attr,
2409 			.ids  = {
2410 				.offset = evsel->id_offset,
2411 				.size   = evsel->ids * sizeof(u64),
2412 			}
2413 		};
2414 		err = do_write(fd, &f_attr, sizeof(f_attr));
2415 		if (err < 0) {
2416 			pr_debug("failed to write perf header attribute\n");
2417 			return err;
2418 		}
2419 	}
2420 
2421 	if (!header->data_offset)
2422 		header->data_offset = lseek(fd, 0, SEEK_CUR);
2423 	header->feat_offset = header->data_offset + header->data_size;
2424 
2425 	if (at_exit) {
2426 		err = perf_header__adds_write(header, evlist, fd);
2427 		if (err < 0)
2428 			return err;
2429 	}
2430 
2431 	f_header = (struct perf_file_header){
2432 		.magic	   = PERF_MAGIC,
2433 		.size	   = sizeof(f_header),
2434 		.attr_size = sizeof(f_attr),
2435 		.attrs = {
2436 			.offset = attr_offset,
2437 			.size   = evlist->nr_entries * sizeof(f_attr),
2438 		},
2439 		.data = {
2440 			.offset = header->data_offset,
2441 			.size	= header->data_size,
2442 		},
2443 		/* event_types is ignored, store zeros */
2444 	};
2445 
2446 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
2447 
2448 	lseek(fd, 0, SEEK_SET);
2449 	err = do_write(fd, &f_header, sizeof(f_header));
2450 	if (err < 0) {
2451 		pr_debug("failed to write perf header\n");
2452 		return err;
2453 	}
2454 	lseek(fd, header->data_offset + header->data_size, SEEK_SET);
2455 
2456 	return 0;
2457 }
2458 
2459 static int perf_header__getbuffer64(struct perf_header *header,
2460 				    int fd, void *buf, size_t size)
2461 {
2462 	if (readn(fd, buf, size) <= 0)
2463 		return -1;
2464 
2465 	if (header->needs_swap)
2466 		mem_bswap_64(buf, size);
2467 
2468 	return 0;
2469 }
2470 
2471 int perf_header__process_sections(struct perf_header *header, int fd,
2472 				  void *data,
2473 				  int (*process)(struct perf_file_section *section,
2474 						 struct perf_header *ph,
2475 						 int feat, int fd, void *data))
2476 {
2477 	struct perf_file_section *feat_sec, *sec;
2478 	int nr_sections;
2479 	int sec_size;
2480 	int feat;
2481 	int err;
2482 
2483 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
2484 	if (!nr_sections)
2485 		return 0;
2486 
2487 	feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
2488 	if (!feat_sec)
2489 		return -1;
2490 
2491 	sec_size = sizeof(*feat_sec) * nr_sections;
2492 
2493 	lseek(fd, header->feat_offset, SEEK_SET);
2494 
2495 	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
2496 	if (err < 0)
2497 		goto out_free;
2498 
2499 	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
2500 		err = process(sec++, header, feat, fd, data);
2501 		if (err < 0)
2502 			goto out_free;
2503 	}
2504 	err = 0;
2505 out_free:
2506 	free(feat_sec);
2507 	return err;
2508 }
2509 
2510 static const int attr_file_abi_sizes[] = {
2511 	[0] = PERF_ATTR_SIZE_VER0,
2512 	[1] = PERF_ATTR_SIZE_VER1,
2513 	[2] = PERF_ATTR_SIZE_VER2,
2514 	[3] = PERF_ATTR_SIZE_VER3,
2515 	[4] = PERF_ATTR_SIZE_VER4,
2516 	0,
2517 };
2518 
2519 /*
2520  * In the legacy file format, the magic number is not used to encode endianness.
2521  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
2522  * on ABI revisions, we need to try all combinations for all endianness to
2523  * detect the endianness.
2524  */
2525 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
2526 {
2527 	uint64_t ref_size, attr_size;
2528 	int i;
2529 
2530 	for (i = 0 ; attr_file_abi_sizes[i]; i++) {
2531 		ref_size = attr_file_abi_sizes[i]
2532 			 + sizeof(struct perf_file_section);
2533 		if (hdr_sz != ref_size) {
2534 			attr_size = bswap_64(hdr_sz);
2535 			if (attr_size != ref_size)
2536 				continue;
2537 
2538 			ph->needs_swap = true;
2539 		}
2540 		pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
2541 			 i,
2542 			 ph->needs_swap);
2543 		return 0;
2544 	}
2545 	/* could not determine endianness */
2546 	return -1;
2547 }
2548 
2549 #define PERF_PIPE_HDR_VER0	16
2550 
2551 static const size_t attr_pipe_abi_sizes[] = {
2552 	[0] = PERF_PIPE_HDR_VER0,
2553 	0,
2554 };
2555 
2556 /*
2557  * In the legacy pipe format, there is an implicit assumption that endiannesss
2558  * between host recording the samples, and host parsing the samples is the
2559  * same. This is not always the case given that the pipe output may always be
2560  * redirected into a file and analyzed on a different machine with possibly a
2561  * different endianness and perf_event ABI revsions in the perf tool itself.
2562  */
2563 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
2564 {
2565 	u64 attr_size;
2566 	int i;
2567 
2568 	for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
2569 		if (hdr_sz != attr_pipe_abi_sizes[i]) {
2570 			attr_size = bswap_64(hdr_sz);
2571 			if (attr_size != hdr_sz)
2572 				continue;
2573 
2574 			ph->needs_swap = true;
2575 		}
2576 		pr_debug("Pipe ABI%d perf.data file detected\n", i);
2577 		return 0;
2578 	}
2579 	return -1;
2580 }
2581 
2582 bool is_perf_magic(u64 magic)
2583 {
2584 	if (!memcmp(&magic, __perf_magic1, sizeof(magic))
2585 		|| magic == __perf_magic2
2586 		|| magic == __perf_magic2_sw)
2587 		return true;
2588 
2589 	return false;
2590 }
2591 
2592 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
2593 			      bool is_pipe, struct perf_header *ph)
2594 {
2595 	int ret;
2596 
2597 	/* check for legacy format */
2598 	ret = memcmp(&magic, __perf_magic1, sizeof(magic));
2599 	if (ret == 0) {
2600 		ph->version = PERF_HEADER_VERSION_1;
2601 		pr_debug("legacy perf.data format\n");
2602 		if (is_pipe)
2603 			return try_all_pipe_abis(hdr_sz, ph);
2604 
2605 		return try_all_file_abis(hdr_sz, ph);
2606 	}
2607 	/*
2608 	 * the new magic number serves two purposes:
2609 	 * - unique number to identify actual perf.data files
2610 	 * - encode endianness of file
2611 	 */
2612 	ph->version = PERF_HEADER_VERSION_2;
2613 
2614 	/* check magic number with one endianness */
2615 	if (magic == __perf_magic2)
2616 		return 0;
2617 
2618 	/* check magic number with opposite endianness */
2619 	if (magic != __perf_magic2_sw)
2620 		return -1;
2621 
2622 	ph->needs_swap = true;
2623 
2624 	return 0;
2625 }
2626 
2627 int perf_file_header__read(struct perf_file_header *header,
2628 			   struct perf_header *ph, int fd)
2629 {
2630 	ssize_t ret;
2631 
2632 	lseek(fd, 0, SEEK_SET);
2633 
2634 	ret = readn(fd, header, sizeof(*header));
2635 	if (ret <= 0)
2636 		return -1;
2637 
2638 	if (check_magic_endian(header->magic,
2639 			       header->attr_size, false, ph) < 0) {
2640 		pr_debug("magic/endian check failed\n");
2641 		return -1;
2642 	}
2643 
2644 	if (ph->needs_swap) {
2645 		mem_bswap_64(header, offsetof(struct perf_file_header,
2646 			     adds_features));
2647 	}
2648 
2649 	if (header->size != sizeof(*header)) {
2650 		/* Support the previous format */
2651 		if (header->size == offsetof(typeof(*header), adds_features))
2652 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2653 		else
2654 			return -1;
2655 	} else if (ph->needs_swap) {
2656 		/*
2657 		 * feature bitmap is declared as an array of unsigned longs --
2658 		 * not good since its size can differ between the host that
2659 		 * generated the data file and the host analyzing the file.
2660 		 *
2661 		 * We need to handle endianness, but we don't know the size of
2662 		 * the unsigned long where the file was generated. Take a best
2663 		 * guess at determining it: try 64-bit swap first (ie., file
2664 		 * created on a 64-bit host), and check if the hostname feature
2665 		 * bit is set (this feature bit is forced on as of fbe96f2).
2666 		 * If the bit is not, undo the 64-bit swap and try a 32-bit
2667 		 * swap. If the hostname bit is still not set (e.g., older data
2668 		 * file), punt and fallback to the original behavior --
2669 		 * clearing all feature bits and setting buildid.
2670 		 */
2671 		mem_bswap_64(&header->adds_features,
2672 			    BITS_TO_U64(HEADER_FEAT_BITS));
2673 
2674 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2675 			/* unswap as u64 */
2676 			mem_bswap_64(&header->adds_features,
2677 				    BITS_TO_U64(HEADER_FEAT_BITS));
2678 
2679 			/* unswap as u32 */
2680 			mem_bswap_32(&header->adds_features,
2681 				    BITS_TO_U32(HEADER_FEAT_BITS));
2682 		}
2683 
2684 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
2685 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
2686 			set_bit(HEADER_BUILD_ID, header->adds_features);
2687 		}
2688 	}
2689 
2690 	memcpy(&ph->adds_features, &header->adds_features,
2691 	       sizeof(ph->adds_features));
2692 
2693 	ph->data_offset  = header->data.offset;
2694 	ph->data_size	 = header->data.size;
2695 	ph->feat_offset  = header->data.offset + header->data.size;
2696 	return 0;
2697 }
2698 
2699 static int perf_file_section__process(struct perf_file_section *section,
2700 				      struct perf_header *ph,
2701 				      int feat, int fd, void *data)
2702 {
2703 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
2704 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
2705 			  "%d, continuing...\n", section->offset, feat);
2706 		return 0;
2707 	}
2708 
2709 	if (feat >= HEADER_LAST_FEATURE) {
2710 		pr_debug("unknown feature %d, continuing...\n", feat);
2711 		return 0;
2712 	}
2713 
2714 	if (!feat_ops[feat].process)
2715 		return 0;
2716 
2717 	return feat_ops[feat].process(section, ph, fd, data);
2718 }
2719 
2720 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
2721 				       struct perf_header *ph, int fd,
2722 				       bool repipe)
2723 {
2724 	ssize_t ret;
2725 
2726 	ret = readn(fd, header, sizeof(*header));
2727 	if (ret <= 0)
2728 		return -1;
2729 
2730 	if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
2731 		pr_debug("endian/magic failed\n");
2732 		return -1;
2733 	}
2734 
2735 	if (ph->needs_swap)
2736 		header->size = bswap_64(header->size);
2737 
2738 	if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
2739 		return -1;
2740 
2741 	return 0;
2742 }
2743 
2744 static int perf_header__read_pipe(struct perf_session *session)
2745 {
2746 	struct perf_header *header = &session->header;
2747 	struct perf_pipe_file_header f_header;
2748 
2749 	if (perf_file_header__read_pipe(&f_header, header,
2750 					perf_data_file__fd(session->file),
2751 					session->repipe) < 0) {
2752 		pr_debug("incompatible file format\n");
2753 		return -EINVAL;
2754 	}
2755 
2756 	return 0;
2757 }
2758 
2759 static int read_attr(int fd, struct perf_header *ph,
2760 		     struct perf_file_attr *f_attr)
2761 {
2762 	struct perf_event_attr *attr = &f_attr->attr;
2763 	size_t sz, left;
2764 	size_t our_sz = sizeof(f_attr->attr);
2765 	ssize_t ret;
2766 
2767 	memset(f_attr, 0, sizeof(*f_attr));
2768 
2769 	/* read minimal guaranteed structure */
2770 	ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
2771 	if (ret <= 0) {
2772 		pr_debug("cannot read %d bytes of header attr\n",
2773 			 PERF_ATTR_SIZE_VER0);
2774 		return -1;
2775 	}
2776 
2777 	/* on file perf_event_attr size */
2778 	sz = attr->size;
2779 
2780 	if (ph->needs_swap)
2781 		sz = bswap_32(sz);
2782 
2783 	if (sz == 0) {
2784 		/* assume ABI0 */
2785 		sz =  PERF_ATTR_SIZE_VER0;
2786 	} else if (sz > our_sz) {
2787 		pr_debug("file uses a more recent and unsupported ABI"
2788 			 " (%zu bytes extra)\n", sz - our_sz);
2789 		return -1;
2790 	}
2791 	/* what we have not yet read and that we know about */
2792 	left = sz - PERF_ATTR_SIZE_VER0;
2793 	if (left) {
2794 		void *ptr = attr;
2795 		ptr += PERF_ATTR_SIZE_VER0;
2796 
2797 		ret = readn(fd, ptr, left);
2798 	}
2799 	/* read perf_file_section, ids are read in caller */
2800 	ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
2801 
2802 	return ret <= 0 ? -1 : 0;
2803 }
2804 
2805 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel,
2806 						struct pevent *pevent)
2807 {
2808 	struct event_format *event;
2809 	char bf[128];
2810 
2811 	/* already prepared */
2812 	if (evsel->tp_format)
2813 		return 0;
2814 
2815 	if (pevent == NULL) {
2816 		pr_debug("broken or missing trace data\n");
2817 		return -1;
2818 	}
2819 
2820 	event = pevent_find_event(pevent, evsel->attr.config);
2821 	if (event == NULL) {
2822 		pr_debug("cannot find event format for %d\n", (int)evsel->attr.config);
2823 		return -1;
2824 	}
2825 
2826 	if (!evsel->name) {
2827 		snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2828 		evsel->name = strdup(bf);
2829 		if (evsel->name == NULL)
2830 			return -1;
2831 	}
2832 
2833 	evsel->tp_format = event;
2834 	return 0;
2835 }
2836 
2837 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
2838 						  struct pevent *pevent)
2839 {
2840 	struct perf_evsel *pos;
2841 
2842 	evlist__for_each_entry(evlist, pos) {
2843 		if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2844 		    perf_evsel__prepare_tracepoint_event(pos, pevent))
2845 			return -1;
2846 	}
2847 
2848 	return 0;
2849 }
2850 
2851 int perf_session__read_header(struct perf_session *session)
2852 {
2853 	struct perf_data_file *file = session->file;
2854 	struct perf_header *header = &session->header;
2855 	struct perf_file_header	f_header;
2856 	struct perf_file_attr	f_attr;
2857 	u64			f_id;
2858 	int nr_attrs, nr_ids, i, j;
2859 	int fd = perf_data_file__fd(file);
2860 
2861 	session->evlist = perf_evlist__new();
2862 	if (session->evlist == NULL)
2863 		return -ENOMEM;
2864 
2865 	session->evlist->env = &header->env;
2866 	session->machines.host.env = &header->env;
2867 	if (perf_data_file__is_pipe(file))
2868 		return perf_header__read_pipe(session);
2869 
2870 	if (perf_file_header__read(&f_header, header, fd) < 0)
2871 		return -EINVAL;
2872 
2873 	/*
2874 	 * Sanity check that perf.data was written cleanly; data size is
2875 	 * initialized to 0 and updated only if the on_exit function is run.
2876 	 * If data size is still 0 then the file contains only partial
2877 	 * information.  Just warn user and process it as much as it can.
2878 	 */
2879 	if (f_header.data.size == 0) {
2880 		pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
2881 			   "Was the 'perf record' command properly terminated?\n",
2882 			   file->path);
2883 	}
2884 
2885 	nr_attrs = f_header.attrs.size / f_header.attr_size;
2886 	lseek(fd, f_header.attrs.offset, SEEK_SET);
2887 
2888 	for (i = 0; i < nr_attrs; i++) {
2889 		struct perf_evsel *evsel;
2890 		off_t tmp;
2891 
2892 		if (read_attr(fd, header, &f_attr) < 0)
2893 			goto out_errno;
2894 
2895 		if (header->needs_swap) {
2896 			f_attr.ids.size   = bswap_64(f_attr.ids.size);
2897 			f_attr.ids.offset = bswap_64(f_attr.ids.offset);
2898 			perf_event__attr_swap(&f_attr.attr);
2899 		}
2900 
2901 		tmp = lseek(fd, 0, SEEK_CUR);
2902 		evsel = perf_evsel__new(&f_attr.attr);
2903 
2904 		if (evsel == NULL)
2905 			goto out_delete_evlist;
2906 
2907 		evsel->needs_swap = header->needs_swap;
2908 		/*
2909 		 * Do it before so that if perf_evsel__alloc_id fails, this
2910 		 * entry gets purged too at perf_evlist__delete().
2911 		 */
2912 		perf_evlist__add(session->evlist, evsel);
2913 
2914 		nr_ids = f_attr.ids.size / sizeof(u64);
2915 		/*
2916 		 * We don't have the cpu and thread maps on the header, so
2917 		 * for allocating the perf_sample_id table we fake 1 cpu and
2918 		 * hattr->ids threads.
2919 		 */
2920 		if (perf_evsel__alloc_id(evsel, 1, nr_ids))
2921 			goto out_delete_evlist;
2922 
2923 		lseek(fd, f_attr.ids.offset, SEEK_SET);
2924 
2925 		for (j = 0; j < nr_ids; j++) {
2926 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
2927 				goto out_errno;
2928 
2929 			perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
2930 		}
2931 
2932 		lseek(fd, tmp, SEEK_SET);
2933 	}
2934 
2935 	symbol_conf.nr_events = nr_attrs;
2936 
2937 	perf_header__process_sections(header, fd, &session->tevent,
2938 				      perf_file_section__process);
2939 
2940 	if (perf_evlist__prepare_tracepoint_events(session->evlist,
2941 						   session->tevent.pevent))
2942 		goto out_delete_evlist;
2943 
2944 	return 0;
2945 out_errno:
2946 	return -errno;
2947 
2948 out_delete_evlist:
2949 	perf_evlist__delete(session->evlist);
2950 	session->evlist = NULL;
2951 	return -ENOMEM;
2952 }
2953 
2954 int perf_event__synthesize_attr(struct perf_tool *tool,
2955 				struct perf_event_attr *attr, u32 ids, u64 *id,
2956 				perf_event__handler_t process)
2957 {
2958 	union perf_event *ev;
2959 	size_t size;
2960 	int err;
2961 
2962 	size = sizeof(struct perf_event_attr);
2963 	size = PERF_ALIGN(size, sizeof(u64));
2964 	size += sizeof(struct perf_event_header);
2965 	size += ids * sizeof(u64);
2966 
2967 	ev = malloc(size);
2968 
2969 	if (ev == NULL)
2970 		return -ENOMEM;
2971 
2972 	ev->attr.attr = *attr;
2973 	memcpy(ev->attr.id, id, ids * sizeof(u64));
2974 
2975 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2976 	ev->attr.header.size = (u16)size;
2977 
2978 	if (ev->attr.header.size == size)
2979 		err = process(tool, ev, NULL, NULL);
2980 	else
2981 		err = -E2BIG;
2982 
2983 	free(ev);
2984 
2985 	return err;
2986 }
2987 
2988 static struct event_update_event *
2989 event_update_event__new(size_t size, u64 type, u64 id)
2990 {
2991 	struct event_update_event *ev;
2992 
2993 	size += sizeof(*ev);
2994 	size  = PERF_ALIGN(size, sizeof(u64));
2995 
2996 	ev = zalloc(size);
2997 	if (ev) {
2998 		ev->header.type = PERF_RECORD_EVENT_UPDATE;
2999 		ev->header.size = (u16)size;
3000 		ev->type = type;
3001 		ev->id = id;
3002 	}
3003 	return ev;
3004 }
3005 
3006 int
3007 perf_event__synthesize_event_update_unit(struct perf_tool *tool,
3008 					 struct perf_evsel *evsel,
3009 					 perf_event__handler_t process)
3010 {
3011 	struct event_update_event *ev;
3012 	size_t size = strlen(evsel->unit);
3013 	int err;
3014 
3015 	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]);
3016 	if (ev == NULL)
3017 		return -ENOMEM;
3018 
3019 	strncpy(ev->data, evsel->unit, size);
3020 	err = process(tool, (union perf_event *)ev, NULL, NULL);
3021 	free(ev);
3022 	return err;
3023 }
3024 
3025 int
3026 perf_event__synthesize_event_update_scale(struct perf_tool *tool,
3027 					  struct perf_evsel *evsel,
3028 					  perf_event__handler_t process)
3029 {
3030 	struct event_update_event *ev;
3031 	struct event_update_event_scale *ev_data;
3032 	int err;
3033 
3034 	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]);
3035 	if (ev == NULL)
3036 		return -ENOMEM;
3037 
3038 	ev_data = (struct event_update_event_scale *) ev->data;
3039 	ev_data->scale = evsel->scale;
3040 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3041 	free(ev);
3042 	return err;
3043 }
3044 
3045 int
3046 perf_event__synthesize_event_update_name(struct perf_tool *tool,
3047 					 struct perf_evsel *evsel,
3048 					 perf_event__handler_t process)
3049 {
3050 	struct event_update_event *ev;
3051 	size_t len = strlen(evsel->name);
3052 	int err;
3053 
3054 	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]);
3055 	if (ev == NULL)
3056 		return -ENOMEM;
3057 
3058 	strncpy(ev->data, evsel->name, len);
3059 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3060 	free(ev);
3061 	return err;
3062 }
3063 
3064 int
3065 perf_event__synthesize_event_update_cpus(struct perf_tool *tool,
3066 					struct perf_evsel *evsel,
3067 					perf_event__handler_t process)
3068 {
3069 	size_t size = sizeof(struct event_update_event);
3070 	struct event_update_event *ev;
3071 	int max, err;
3072 	u16 type;
3073 
3074 	if (!evsel->own_cpus)
3075 		return 0;
3076 
3077 	ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max);
3078 	if (!ev)
3079 		return -ENOMEM;
3080 
3081 	ev->header.type = PERF_RECORD_EVENT_UPDATE;
3082 	ev->header.size = (u16)size;
3083 	ev->type = PERF_EVENT_UPDATE__CPUS;
3084 	ev->id   = evsel->id[0];
3085 
3086 	cpu_map_data__synthesize((struct cpu_map_data *) ev->data,
3087 				 evsel->own_cpus,
3088 				 type, max);
3089 
3090 	err = process(tool, (union perf_event*) ev, NULL, NULL);
3091 	free(ev);
3092 	return err;
3093 }
3094 
3095 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
3096 {
3097 	struct event_update_event *ev = &event->event_update;
3098 	struct event_update_event_scale *ev_scale;
3099 	struct event_update_event_cpus *ev_cpus;
3100 	struct cpu_map *map;
3101 	size_t ret;
3102 
3103 	ret = fprintf(fp, "\n... id:    %" PRIu64 "\n", ev->id);
3104 
3105 	switch (ev->type) {
3106 	case PERF_EVENT_UPDATE__SCALE:
3107 		ev_scale = (struct event_update_event_scale *) ev->data;
3108 		ret += fprintf(fp, "... scale: %f\n", ev_scale->scale);
3109 		break;
3110 	case PERF_EVENT_UPDATE__UNIT:
3111 		ret += fprintf(fp, "... unit:  %s\n", ev->data);
3112 		break;
3113 	case PERF_EVENT_UPDATE__NAME:
3114 		ret += fprintf(fp, "... name:  %s\n", ev->data);
3115 		break;
3116 	case PERF_EVENT_UPDATE__CPUS:
3117 		ev_cpus = (struct event_update_event_cpus *) ev->data;
3118 		ret += fprintf(fp, "... ");
3119 
3120 		map = cpu_map__new_data(&ev_cpus->cpus);
3121 		if (map)
3122 			ret += cpu_map__fprintf(map, fp);
3123 		else
3124 			ret += fprintf(fp, "failed to get cpus\n");
3125 		break;
3126 	default:
3127 		ret += fprintf(fp, "... unknown type\n");
3128 		break;
3129 	}
3130 
3131 	return ret;
3132 }
3133 
3134 int perf_event__synthesize_attrs(struct perf_tool *tool,
3135 				   struct perf_session *session,
3136 				   perf_event__handler_t process)
3137 {
3138 	struct perf_evsel *evsel;
3139 	int err = 0;
3140 
3141 	evlist__for_each_entry(session->evlist, evsel) {
3142 		err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
3143 						  evsel->id, process);
3144 		if (err) {
3145 			pr_debug("failed to create perf header attribute\n");
3146 			return err;
3147 		}
3148 	}
3149 
3150 	return err;
3151 }
3152 
3153 int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
3154 			     union perf_event *event,
3155 			     struct perf_evlist **pevlist)
3156 {
3157 	u32 i, ids, n_ids;
3158 	struct perf_evsel *evsel;
3159 	struct perf_evlist *evlist = *pevlist;
3160 
3161 	if (evlist == NULL) {
3162 		*pevlist = evlist = perf_evlist__new();
3163 		if (evlist == NULL)
3164 			return -ENOMEM;
3165 	}
3166 
3167 	evsel = perf_evsel__new(&event->attr.attr);
3168 	if (evsel == NULL)
3169 		return -ENOMEM;
3170 
3171 	perf_evlist__add(evlist, evsel);
3172 
3173 	ids = event->header.size;
3174 	ids -= (void *)&event->attr.id - (void *)event;
3175 	n_ids = ids / sizeof(u64);
3176 	/*
3177 	 * We don't have the cpu and thread maps on the header, so
3178 	 * for allocating the perf_sample_id table we fake 1 cpu and
3179 	 * hattr->ids threads.
3180 	 */
3181 	if (perf_evsel__alloc_id(evsel, 1, n_ids))
3182 		return -ENOMEM;
3183 
3184 	for (i = 0; i < n_ids; i++) {
3185 		perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]);
3186 	}
3187 
3188 	symbol_conf.nr_events = evlist->nr_entries;
3189 
3190 	return 0;
3191 }
3192 
3193 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
3194 				     union perf_event *event,
3195 				     struct perf_evlist **pevlist)
3196 {
3197 	struct event_update_event *ev = &event->event_update;
3198 	struct event_update_event_scale *ev_scale;
3199 	struct event_update_event_cpus *ev_cpus;
3200 	struct perf_evlist *evlist;
3201 	struct perf_evsel *evsel;
3202 	struct cpu_map *map;
3203 
3204 	if (!pevlist || *pevlist == NULL)
3205 		return -EINVAL;
3206 
3207 	evlist = *pevlist;
3208 
3209 	evsel = perf_evlist__id2evsel(evlist, ev->id);
3210 	if (evsel == NULL)
3211 		return -EINVAL;
3212 
3213 	switch (ev->type) {
3214 	case PERF_EVENT_UPDATE__UNIT:
3215 		evsel->unit = strdup(ev->data);
3216 		break;
3217 	case PERF_EVENT_UPDATE__NAME:
3218 		evsel->name = strdup(ev->data);
3219 		break;
3220 	case PERF_EVENT_UPDATE__SCALE:
3221 		ev_scale = (struct event_update_event_scale *) ev->data;
3222 		evsel->scale = ev_scale->scale;
3223 		break;
3224 	case PERF_EVENT_UPDATE__CPUS:
3225 		ev_cpus = (struct event_update_event_cpus *) ev->data;
3226 
3227 		map = cpu_map__new_data(&ev_cpus->cpus);
3228 		if (map)
3229 			evsel->own_cpus = map;
3230 		else
3231 			pr_err("failed to get event_update cpus\n");
3232 	default:
3233 		break;
3234 	}
3235 
3236 	return 0;
3237 }
3238 
3239 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
3240 					struct perf_evlist *evlist,
3241 					perf_event__handler_t process)
3242 {
3243 	union perf_event ev;
3244 	struct tracing_data *tdata;
3245 	ssize_t size = 0, aligned_size = 0, padding;
3246 	int err __maybe_unused = 0;
3247 
3248 	/*
3249 	 * We are going to store the size of the data followed
3250 	 * by the data contents. Since the fd descriptor is a pipe,
3251 	 * we cannot seek back to store the size of the data once
3252 	 * we know it. Instead we:
3253 	 *
3254 	 * - write the tracing data to the temp file
3255 	 * - get/write the data size to pipe
3256 	 * - write the tracing data from the temp file
3257 	 *   to the pipe
3258 	 */
3259 	tdata = tracing_data_get(&evlist->entries, fd, true);
3260 	if (!tdata)
3261 		return -1;
3262 
3263 	memset(&ev, 0, sizeof(ev));
3264 
3265 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
3266 	size = tdata->size;
3267 	aligned_size = PERF_ALIGN(size, sizeof(u64));
3268 	padding = aligned_size - size;
3269 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
3270 	ev.tracing_data.size = aligned_size;
3271 
3272 	process(tool, &ev, NULL, NULL);
3273 
3274 	/*
3275 	 * The put function will copy all the tracing data
3276 	 * stored in temp file to the pipe.
3277 	 */
3278 	tracing_data_put(tdata);
3279 
3280 	write_padded(fd, NULL, 0, padding);
3281 
3282 	return aligned_size;
3283 }
3284 
3285 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
3286 				     union perf_event *event,
3287 				     struct perf_session *session)
3288 {
3289 	ssize_t size_read, padding, size = event->tracing_data.size;
3290 	int fd = perf_data_file__fd(session->file);
3291 	off_t offset = lseek(fd, 0, SEEK_CUR);
3292 	char buf[BUFSIZ];
3293 
3294 	/* setup for reading amidst mmap */
3295 	lseek(fd, offset + sizeof(struct tracing_data_event),
3296 	      SEEK_SET);
3297 
3298 	size_read = trace_report(fd, &session->tevent,
3299 				 session->repipe);
3300 	padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
3301 
3302 	if (readn(fd, buf, padding) < 0) {
3303 		pr_err("%s: reading input file", __func__);
3304 		return -1;
3305 	}
3306 	if (session->repipe) {
3307 		int retw = write(STDOUT_FILENO, buf, padding);
3308 		if (retw <= 0 || retw != padding) {
3309 			pr_err("%s: repiping tracing data padding", __func__);
3310 			return -1;
3311 		}
3312 	}
3313 
3314 	if (size_read + padding != size) {
3315 		pr_err("%s: tracing data size mismatch", __func__);
3316 		return -1;
3317 	}
3318 
3319 	perf_evlist__prepare_tracepoint_events(session->evlist,
3320 					       session->tevent.pevent);
3321 
3322 	return size_read + padding;
3323 }
3324 
3325 int perf_event__synthesize_build_id(struct perf_tool *tool,
3326 				    struct dso *pos, u16 misc,
3327 				    perf_event__handler_t process,
3328 				    struct machine *machine)
3329 {
3330 	union perf_event ev;
3331 	size_t len;
3332 	int err = 0;
3333 
3334 	if (!pos->hit)
3335 		return err;
3336 
3337 	memset(&ev, 0, sizeof(ev));
3338 
3339 	len = pos->long_name_len + 1;
3340 	len = PERF_ALIGN(len, NAME_ALIGN);
3341 	memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
3342 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
3343 	ev.build_id.header.misc = misc;
3344 	ev.build_id.pid = machine->pid;
3345 	ev.build_id.header.size = sizeof(ev.build_id) + len;
3346 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
3347 
3348 	err = process(tool, &ev, NULL, machine);
3349 
3350 	return err;
3351 }
3352 
3353 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused,
3354 				 union perf_event *event,
3355 				 struct perf_session *session)
3356 {
3357 	__event_process_build_id(&event->build_id,
3358 				 event->build_id.filename,
3359 				 session);
3360 	return 0;
3361 }
3362