xref: /linux/tools/perf/util/header.c (revision c7decec2f2d2ab0366567f9e30c0e1418cece43f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include "string2.h"
5 #include <sys/param.h>
6 #include <sys/types.h>
7 #include <byteswap.h>
8 #include <unistd.h>
9 #include <regex.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <linux/compiler.h>
13 #include <linux/list.h>
14 #include <linux/kernel.h>
15 #include <linux/bitops.h>
16 #include <linux/string.h>
17 #include <linux/stringify.h>
18 #include <linux/zalloc.h>
19 #include <sys/stat.h>
20 #include <sys/utsname.h>
21 #include <linux/time64.h>
22 #include <dirent.h>
23 #ifdef HAVE_LIBBPF_SUPPORT
24 #include <bpf/libbpf.h>
25 #endif
26 #include <perf/cpumap.h>
27 #include <tools/libc_compat.h> // reallocarray
28 
29 #include "dso.h"
30 #include "evlist.h"
31 #include "evsel.h"
32 #include "util/evsel_fprintf.h"
33 #include "header.h"
34 #include "memswap.h"
35 #include "trace-event.h"
36 #include "session.h"
37 #include "symbol.h"
38 #include "debug.h"
39 #include "cpumap.h"
40 #include "pmu.h"
41 #include "pmus.h"
42 #include "vdso.h"
43 #include "strbuf.h"
44 #include "build-id.h"
45 #include "data.h"
46 #include <api/fs/fs.h>
47 #include <api/io_dir.h>
48 #include "asm/bug.h"
49 #include "tool.h"
50 #include "time-utils.h"
51 #include "units.h"
52 #include "util/util.h" // perf_exe()
53 #include "cputopo.h"
54 #include "bpf-event.h"
55 #include "bpf-utils.h"
56 #include "clockid.h"
57 
58 #include <linux/ctype.h>
59 #include <internal/lib.h>
60 
61 #ifdef HAVE_LIBTRACEEVENT
62 #include <event-parse.h>
63 #endif
64 
65 /*
66  * magic2 = "PERFILE2"
67  * must be a numerical value to let the endianness
68  * determine the memory layout. That way we are able
69  * to detect endianness when reading the perf.data file
70  * back.
71  *
72  * we check for legacy (PERFFILE) format.
73  */
74 static const char *__perf_magic1 = "PERFFILE";
75 static const u64 __perf_magic2    = 0x32454c4946524550ULL;
76 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL;
77 
78 #define PERF_MAGIC	__perf_magic2
79 #define DNAME_LEN	16
80 
81 const char perf_version_string[] = PERF_VERSION;
82 
83 struct perf_file_attr {
84 	struct perf_event_attr	attr;
85 	struct perf_file_section	ids;
86 };
87 
perf_header__set_feat(struct perf_header * header,int feat)88 void perf_header__set_feat(struct perf_header *header, int feat)
89 {
90 	__set_bit(feat, header->adds_features);
91 }
92 
perf_header__clear_feat(struct perf_header * header,int feat)93 void perf_header__clear_feat(struct perf_header *header, int feat)
94 {
95 	__clear_bit(feat, header->adds_features);
96 }
97 
perf_header__has_feat(const struct perf_header * header,int feat)98 bool perf_header__has_feat(const struct perf_header *header, int feat)
99 {
100 	return test_bit(feat, header->adds_features);
101 }
102 
__do_write_fd(struct feat_fd * ff,const void * buf,size_t size)103 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size)
104 {
105 	ssize_t ret = writen(ff->fd, buf, size);
106 
107 	if (ret != (ssize_t)size)
108 		return ret < 0 ? (int)ret : -1;
109 	return 0;
110 }
111 
__do_write_buf(struct feat_fd * ff,const void * buf,size_t size)112 static int __do_write_buf(struct feat_fd *ff,  const void *buf, size_t size)
113 {
114 	/* struct perf_event_header::size is u16 */
115 	const size_t max_size = 0xffff - sizeof(struct perf_event_header);
116 	size_t new_size = ff->size;
117 	void *addr;
118 
119 	if (size + ff->offset > max_size)
120 		return -E2BIG;
121 
122 	while (size > (new_size - ff->offset))
123 		new_size <<= 1;
124 	new_size = min(max_size, new_size);
125 
126 	if (ff->size < new_size) {
127 		addr = realloc(ff->buf, new_size);
128 		if (!addr)
129 			return -ENOMEM;
130 		ff->buf = addr;
131 		ff->size = new_size;
132 	}
133 
134 	memcpy(ff->buf + ff->offset, buf, size);
135 	ff->offset += size;
136 
137 	return 0;
138 }
139 
140 /* Return: 0 if succeeded, -ERR if failed. */
do_write(struct feat_fd * ff,const void * buf,size_t size)141 int do_write(struct feat_fd *ff, const void *buf, size_t size)
142 {
143 	if (!ff->buf)
144 		return __do_write_fd(ff, buf, size);
145 	return __do_write_buf(ff, buf, size);
146 }
147 
148 /* Return: 0 if succeeded, -ERR if failed. */
do_write_bitmap(struct feat_fd * ff,unsigned long * set,u64 size)149 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
150 {
151 	u64 *p = (u64 *) set;
152 	int i, ret;
153 
154 	ret = do_write(ff, &size, sizeof(size));
155 	if (ret < 0)
156 		return ret;
157 
158 	for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
159 		ret = do_write(ff, p + i, sizeof(*p));
160 		if (ret < 0)
161 			return ret;
162 	}
163 
164 	return 0;
165 }
166 
167 /* Return: 0 if succeeded, -ERR if failed. */
write_padded(struct feat_fd * ff,const void * bf,size_t count,size_t count_aligned)168 int write_padded(struct feat_fd *ff, const void *bf,
169 		 size_t count, size_t count_aligned)
170 {
171 	static const char zero_buf[NAME_ALIGN];
172 	int err = do_write(ff, bf, count);
173 
174 	if (!err)
175 		err = do_write(ff, zero_buf, count_aligned - count);
176 
177 	return err;
178 }
179 
180 #define string_size(str)						\
181 	(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
182 
183 /* Return: 0 if succeeded, -ERR if failed. */
do_write_string(struct feat_fd * ff,const char * str)184 static int do_write_string(struct feat_fd *ff, const char *str)
185 {
186 	u32 len, olen;
187 	int ret;
188 
189 	olen = strlen(str) + 1;
190 	len = PERF_ALIGN(olen, NAME_ALIGN);
191 
192 	/* write len, incl. \0 */
193 	ret = do_write(ff, &len, sizeof(len));
194 	if (ret < 0)
195 		return ret;
196 
197 	return write_padded(ff, str, olen, len);
198 }
199 
__do_read_fd(struct feat_fd * ff,void * addr,ssize_t size)200 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size)
201 {
202 	ssize_t ret = readn(ff->fd, addr, size);
203 
204 	if (ret != size)
205 		return ret < 0 ? (int)ret : -1;
206 	return 0;
207 }
208 
__do_read_buf(struct feat_fd * ff,void * addr,ssize_t size)209 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size)
210 {
211 	if (size > (ssize_t)ff->size - ff->offset)
212 		return -1;
213 
214 	memcpy(addr, ff->buf + ff->offset, size);
215 	ff->offset += size;
216 
217 	return 0;
218 
219 }
220 
__do_read(struct feat_fd * ff,void * addr,ssize_t size)221 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size)
222 {
223 	if (!ff->buf)
224 		return __do_read_fd(ff, addr, size);
225 	return __do_read_buf(ff, addr, size);
226 }
227 
do_read_u32(struct feat_fd * ff,u32 * addr)228 static int do_read_u32(struct feat_fd *ff, u32 *addr)
229 {
230 	int ret;
231 
232 	ret = __do_read(ff, addr, sizeof(*addr));
233 	if (ret)
234 		return ret;
235 
236 	if (ff->ph->needs_swap)
237 		*addr = bswap_32(*addr);
238 	return 0;
239 }
240 
do_read_u64(struct feat_fd * ff,u64 * addr)241 static int do_read_u64(struct feat_fd *ff, u64 *addr)
242 {
243 	int ret;
244 
245 	ret = __do_read(ff, addr, sizeof(*addr));
246 	if (ret)
247 		return ret;
248 
249 	if (ff->ph->needs_swap)
250 		*addr = bswap_64(*addr);
251 	return 0;
252 }
253 
do_read_string(struct feat_fd * ff)254 static char *do_read_string(struct feat_fd *ff)
255 {
256 	u32 len;
257 	char *buf;
258 
259 	if (do_read_u32(ff, &len))
260 		return NULL;
261 
262 	buf = malloc(len);
263 	if (!buf)
264 		return NULL;
265 
266 	if (!__do_read(ff, buf, len)) {
267 		/*
268 		 * strings are padded by zeroes
269 		 * thus the actual strlen of buf
270 		 * may be less than len
271 		 */
272 		return buf;
273 	}
274 
275 	free(buf);
276 	return NULL;
277 }
278 
279 /* Return: 0 if succeeded, -ERR if failed. */
do_read_bitmap(struct feat_fd * ff,unsigned long ** pset,u64 * psize)280 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
281 {
282 	unsigned long *set;
283 	u64 size, *p;
284 	int i, ret;
285 
286 	ret = do_read_u64(ff, &size);
287 	if (ret)
288 		return ret;
289 
290 	set = bitmap_zalloc(size);
291 	if (!set)
292 		return -ENOMEM;
293 
294 	p = (u64 *) set;
295 
296 	for (i = 0; (u64) i < BITS_TO_U64(size); i++) {
297 		ret = do_read_u64(ff, p + i);
298 		if (ret < 0) {
299 			free(set);
300 			return ret;
301 		}
302 	}
303 
304 	*pset  = set;
305 	*psize = size;
306 	return 0;
307 }
308 
309 #ifdef HAVE_LIBTRACEEVENT
write_tracing_data(struct feat_fd * ff,struct evlist * evlist)310 static int write_tracing_data(struct feat_fd *ff,
311 			      struct evlist *evlist)
312 {
313 	if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
314 		return -1;
315 
316 	return read_tracing_data(ff->fd, &evlist->core.entries);
317 }
318 #endif
319 
write_build_id(struct feat_fd * ff,struct evlist * evlist __maybe_unused)320 static int write_build_id(struct feat_fd *ff,
321 			  struct evlist *evlist __maybe_unused)
322 {
323 	struct perf_session *session;
324 	int err;
325 
326 	session = container_of(ff->ph, struct perf_session, header);
327 
328 	if (!perf_session__read_build_ids(session, true))
329 		return -1;
330 
331 	if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
332 		return -1;
333 
334 	err = perf_session__write_buildid_table(session, ff);
335 	if (err < 0) {
336 		pr_debug("failed to write buildid table\n");
337 		return err;
338 	}
339 
340 	return 0;
341 }
342 
write_hostname(struct feat_fd * ff,struct evlist * evlist __maybe_unused)343 static int write_hostname(struct feat_fd *ff,
344 			  struct evlist *evlist __maybe_unused)
345 {
346 	struct utsname uts;
347 	int ret;
348 
349 	ret = uname(&uts);
350 	if (ret < 0)
351 		return -1;
352 
353 	return do_write_string(ff, uts.nodename);
354 }
355 
write_osrelease(struct feat_fd * ff,struct evlist * evlist __maybe_unused)356 static int write_osrelease(struct feat_fd *ff,
357 			   struct evlist *evlist __maybe_unused)
358 {
359 	struct utsname uts;
360 	int ret;
361 
362 	ret = uname(&uts);
363 	if (ret < 0)
364 		return -1;
365 
366 	return do_write_string(ff, uts.release);
367 }
368 
write_arch(struct feat_fd * ff,struct evlist * evlist __maybe_unused)369 static int write_arch(struct feat_fd *ff,
370 		      struct evlist *evlist __maybe_unused)
371 {
372 	struct utsname uts;
373 	int ret;
374 
375 	ret = uname(&uts);
376 	if (ret < 0)
377 		return -1;
378 
379 	return do_write_string(ff, uts.machine);
380 }
381 
write_e_machine(struct feat_fd * ff,struct evlist * evlist __maybe_unused)382 static int write_e_machine(struct feat_fd *ff,
383 			   struct evlist *evlist __maybe_unused)
384 {
385 	/* e_machine expanded from 16 to 32-bits for alignment. */
386 	uint32_t e_flags;
387 	uint32_t e_machine = perf_session__e_machine(evlist->session, &e_flags);
388 	int ret;
389 
390 	ret = do_write(ff, &e_machine, sizeof(e_machine));
391 	if (ret)
392 		return ret;
393 
394 	return do_write(ff, &e_flags, sizeof(e_flags));
395 }
396 
write_version(struct feat_fd * ff,struct evlist * evlist __maybe_unused)397 static int write_version(struct feat_fd *ff,
398 			 struct evlist *evlist __maybe_unused)
399 {
400 	return do_write_string(ff, perf_version_string);
401 }
402 
__write_cpudesc(struct feat_fd * ff,const char * cpuinfo_proc)403 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc)
404 {
405 	FILE *file;
406 	char *buf = NULL;
407 	char *s, *p;
408 	const char *search = cpuinfo_proc;
409 	size_t len = 0;
410 	int ret = -1;
411 
412 	if (!search)
413 		return -1;
414 
415 	file = fopen("/proc/cpuinfo", "r");
416 	if (!file)
417 		return -1;
418 
419 	while (getline(&buf, &len, file) > 0) {
420 		ret = strncmp(buf, search, strlen(search));
421 		if (!ret)
422 			break;
423 	}
424 
425 	if (ret) {
426 		ret = -1;
427 		goto done;
428 	}
429 
430 	s = buf;
431 
432 	p = strchr(buf, ':');
433 	if (p && *(p+1) == ' ' && *(p+2))
434 		s = p + 2;
435 	p = strchr(s, '\n');
436 	if (p)
437 		*p = '\0';
438 
439 	/* squash extra space characters (branding string) */
440 	p = s;
441 	while (*p) {
442 		if (isspace(*p)) {
443 			char *r = p + 1;
444 			char *q = skip_spaces(r);
445 			*p = ' ';
446 			if (q != (p+1))
447 				while ((*r++ = *q++));
448 		}
449 		p++;
450 	}
451 	ret = do_write_string(ff, s);
452 done:
453 	free(buf);
454 	fclose(file);
455 	return ret;
456 }
457 
write_cpudesc(struct feat_fd * ff,struct evlist * evlist __maybe_unused)458 static int write_cpudesc(struct feat_fd *ff,
459 		       struct evlist *evlist __maybe_unused)
460 {
461 #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__)
462 #define CPUINFO_PROC	{ "cpu", }
463 #elif defined(__s390__)
464 #define CPUINFO_PROC	{ "vendor_id", }
465 #elif defined(__sh__)
466 #define CPUINFO_PROC	{ "cpu type", }
467 #elif defined(__alpha__) || defined(__mips__)
468 #define CPUINFO_PROC	{ "cpu model", }
469 #elif defined(__arm__)
470 #define CPUINFO_PROC	{ "model name", "Processor", }
471 #elif defined(__arc__)
472 #define CPUINFO_PROC	{ "Processor", }
473 #elif defined(__xtensa__)
474 #define CPUINFO_PROC	{ "core ID", }
475 #elif defined(__loongarch__)
476 #define CPUINFO_PROC	{ "Model Name", }
477 #else
478 #define CPUINFO_PROC	{ "model name", }
479 #endif
480 	const char *cpuinfo_procs[] = CPUINFO_PROC;
481 #undef CPUINFO_PROC
482 	unsigned int i;
483 
484 	for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
485 		int ret;
486 		ret = __write_cpudesc(ff, cpuinfo_procs[i]);
487 		if (ret >= 0)
488 			return ret;
489 	}
490 	return -1;
491 }
492 
493 
write_nrcpus(struct feat_fd * ff,struct evlist * evlist __maybe_unused)494 static int write_nrcpus(struct feat_fd *ff,
495 			struct evlist *evlist __maybe_unused)
496 {
497 	long nr;
498 	u32 nrc, nra;
499 	int ret;
500 
501 	nrc = cpu__max_present_cpu().cpu;
502 
503 	nr = sysconf(_SC_NPROCESSORS_ONLN);
504 	if (nr < 0)
505 		return -1;
506 
507 	nra = (u32)(nr & UINT_MAX);
508 
509 	ret = do_write(ff, &nrc, sizeof(nrc));
510 	if (ret < 0)
511 		return ret;
512 
513 	return do_write(ff, &nra, sizeof(nra));
514 }
515 
write_event_desc(struct feat_fd * ff,struct evlist * evlist)516 static int write_event_desc(struct feat_fd *ff,
517 			    struct evlist *evlist)
518 {
519 	struct evsel *evsel;
520 	u32 nre, nri, sz;
521 	int ret;
522 
523 	nre = evlist->core.nr_entries;
524 
525 	/*
526 	 * write number of events
527 	 */
528 	ret = do_write(ff, &nre, sizeof(nre));
529 	if (ret < 0)
530 		return ret;
531 
532 	/*
533 	 * size of perf_event_attr struct
534 	 */
535 	sz = (u32)sizeof(evsel->core.attr);
536 	ret = do_write(ff, &sz, sizeof(sz));
537 	if (ret < 0)
538 		return ret;
539 
540 	evlist__for_each_entry(evlist, evsel) {
541 		ret = do_write(ff, &evsel->core.attr, sz);
542 		if (ret < 0)
543 			return ret;
544 		/*
545 		 * write number of unique id per event
546 		 * there is one id per instance of an event
547 		 *
548 		 * copy into an nri to be independent of the
549 		 * type of ids,
550 		 */
551 		nri = evsel->core.ids;
552 		ret = do_write(ff, &nri, sizeof(nri));
553 		if (ret < 0)
554 			return ret;
555 
556 		/*
557 		 * write event string as passed on cmdline
558 		 */
559 		ret = do_write_string(ff, evsel__name(evsel));
560 		if (ret < 0)
561 			return ret;
562 		/*
563 		 * write unique ids for this event
564 		 */
565 		ret = do_write(ff, evsel->core.id, evsel->core.ids * sizeof(u64));
566 		if (ret < 0)
567 			return ret;
568 	}
569 	return 0;
570 }
571 
write_cmdline(struct feat_fd * ff,struct evlist * evlist __maybe_unused)572 static int write_cmdline(struct feat_fd *ff,
573 			 struct evlist *evlist __maybe_unused)
574 {
575 	struct perf_env *env = &ff->ph->env;
576 	char pbuf[MAXPATHLEN], *buf;
577 	int i, ret, n;
578 
579 	/* actual path to perf binary */
580 	buf = perf_exe(pbuf, MAXPATHLEN);
581 
582 	/* account for binary path */
583 	n = env->nr_cmdline + 1;
584 
585 	ret = do_write(ff, &n, sizeof(n));
586 	if (ret < 0)
587 		return ret;
588 
589 	ret = do_write_string(ff, buf);
590 	if (ret < 0)
591 		return ret;
592 
593 	for (i = 0 ; i < env->nr_cmdline; i++) {
594 		ret = do_write_string(ff, env->cmdline_argv[i]);
595 		if (ret < 0)
596 			return ret;
597 	}
598 	return 0;
599 }
600 
601 
write_cpu_topology(struct feat_fd * ff,struct evlist * evlist __maybe_unused)602 static int write_cpu_topology(struct feat_fd *ff,
603 			      struct evlist *evlist __maybe_unused)
604 {
605 	struct perf_env *env = &ff->ph->env;
606 	struct cpu_topology *tp;
607 	u32 i;
608 	int ret, j;
609 
610 	tp = cpu_topology__new();
611 	if (!tp)
612 		return -1;
613 
614 	ret = do_write(ff, &tp->package_cpus_lists, sizeof(tp->package_cpus_lists));
615 	if (ret < 0)
616 		goto done;
617 
618 	for (i = 0; i < tp->package_cpus_lists; i++) {
619 		ret = do_write_string(ff, tp->package_cpus_list[i]);
620 		if (ret < 0)
621 			goto done;
622 	}
623 	ret = do_write(ff, &tp->core_cpus_lists, sizeof(tp->core_cpus_lists));
624 	if (ret < 0)
625 		goto done;
626 
627 	for (i = 0; i < tp->core_cpus_lists; i++) {
628 		ret = do_write_string(ff, tp->core_cpus_list[i]);
629 		if (ret < 0)
630 			break;
631 	}
632 
633 	ret = perf_env__read_cpu_topology_map(env);
634 	if (ret < 0)
635 		goto done;
636 
637 	for (j = 0; j < env->nr_cpus_avail; j++) {
638 		ret = do_write(ff, &env->cpu[j].core_id,
639 			       sizeof(env->cpu[j].core_id));
640 		if (ret < 0)
641 			return ret;
642 		ret = do_write(ff, &env->cpu[j].socket_id,
643 			       sizeof(env->cpu[j].socket_id));
644 		if (ret < 0)
645 			return ret;
646 	}
647 
648 	if (!tp->die_cpus_lists)
649 		goto done;
650 
651 	ret = do_write(ff, &tp->die_cpus_lists, sizeof(tp->die_cpus_lists));
652 	if (ret < 0)
653 		goto done;
654 
655 	for (i = 0; i < tp->die_cpus_lists; i++) {
656 		ret = do_write_string(ff, tp->die_cpus_list[i]);
657 		if (ret < 0)
658 			goto done;
659 	}
660 
661 	for (j = 0; j < env->nr_cpus_avail; j++) {
662 		ret = do_write(ff, &env->cpu[j].die_id,
663 			       sizeof(env->cpu[j].die_id));
664 		if (ret < 0)
665 			return ret;
666 	}
667 
668 done:
669 	cpu_topology__delete(tp);
670 	return ret;
671 }
672 
673 
674 
write_total_mem(struct feat_fd * ff,struct evlist * evlist __maybe_unused)675 static int write_total_mem(struct feat_fd *ff,
676 			   struct evlist *evlist __maybe_unused)
677 {
678 	char *buf = NULL;
679 	FILE *fp;
680 	size_t len = 0;
681 	int ret = -1, n;
682 	uint64_t mem;
683 
684 	fp = fopen("/proc/meminfo", "r");
685 	if (!fp)
686 		return -1;
687 
688 	while (getline(&buf, &len, fp) > 0) {
689 		ret = strncmp(buf, "MemTotal:", 9);
690 		if (!ret)
691 			break;
692 	}
693 	if (!ret) {
694 		n = sscanf(buf, "%*s %"PRIu64, &mem);
695 		if (n == 1)
696 			ret = do_write(ff, &mem, sizeof(mem));
697 	} else
698 		ret = -1;
699 	free(buf);
700 	fclose(fp);
701 	return ret;
702 }
703 
write_numa_topology(struct feat_fd * ff,struct evlist * evlist __maybe_unused)704 static int write_numa_topology(struct feat_fd *ff,
705 			       struct evlist *evlist __maybe_unused)
706 {
707 	struct numa_topology *tp;
708 	int ret = -1;
709 	u32 i;
710 
711 	tp = numa_topology__new();
712 	if (!tp)
713 		return -ENOMEM;
714 
715 	ret = do_write(ff, &tp->nr, sizeof(u32));
716 	if (ret < 0)
717 		goto err;
718 
719 	for (i = 0; i < tp->nr; i++) {
720 		struct numa_topology_node *n = &tp->nodes[i];
721 
722 		ret = do_write(ff, &n->node, sizeof(u32));
723 		if (ret < 0)
724 			goto err;
725 
726 		ret = do_write(ff, &n->mem_total, sizeof(u64));
727 		if (ret)
728 			goto err;
729 
730 		ret = do_write(ff, &n->mem_free, sizeof(u64));
731 		if (ret)
732 			goto err;
733 
734 		ret = do_write_string(ff, n->cpus);
735 		if (ret < 0)
736 			goto err;
737 	}
738 
739 	ret = 0;
740 
741 err:
742 	numa_topology__delete(tp);
743 	return ret;
744 }
745 
746 /*
747  * File format:
748  *
749  * struct pmu_mappings {
750  *	u32	pmu_num;
751  *	struct pmu_map {
752  *		u32	type;
753  *		char	name[];
754  *	}[pmu_num];
755  * };
756  */
757 
write_pmu_mappings(struct feat_fd * ff,struct evlist * evlist __maybe_unused)758 static int write_pmu_mappings(struct feat_fd *ff,
759 			      struct evlist *evlist __maybe_unused)
760 {
761 	struct perf_pmu *pmu = NULL;
762 	u32 pmu_num = 0;
763 	int ret;
764 
765 	/*
766 	 * Do a first pass to count number of pmu to avoid lseek so this
767 	 * works in pipe mode as well.
768 	 */
769 	while ((pmu = perf_pmus__scan(pmu)))
770 		pmu_num++;
771 
772 	ret = do_write(ff, &pmu_num, sizeof(pmu_num));
773 	if (ret < 0)
774 		return ret;
775 
776 	while ((pmu = perf_pmus__scan(pmu))) {
777 		ret = do_write(ff, &pmu->type, sizeof(pmu->type));
778 		if (ret < 0)
779 			return ret;
780 
781 		ret = do_write_string(ff, pmu->name);
782 		if (ret < 0)
783 			return ret;
784 	}
785 
786 	return 0;
787 }
788 
789 /*
790  * File format:
791  *
792  * struct group_descs {
793  *	u32	nr_groups;
794  *	struct group_desc {
795  *		char	name[];
796  *		u32	leader_idx;
797  *		u32	nr_members;
798  *	}[nr_groups];
799  * };
800  */
write_group_desc(struct feat_fd * ff,struct evlist * evlist)801 static int write_group_desc(struct feat_fd *ff,
802 			    struct evlist *evlist)
803 {
804 	u32 nr_groups = evlist__nr_groups(evlist);
805 	struct evsel *evsel;
806 	int ret;
807 
808 	ret = do_write(ff, &nr_groups, sizeof(nr_groups));
809 	if (ret < 0)
810 		return ret;
811 
812 	evlist__for_each_entry(evlist, evsel) {
813 		if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
814 			const char *name = evsel->group_name ?: "{anon_group}";
815 			u32 leader_idx = evsel->core.idx;
816 			u32 nr_members = evsel->core.nr_members;
817 
818 			ret = do_write_string(ff, name);
819 			if (ret < 0)
820 				return ret;
821 
822 			ret = do_write(ff, &leader_idx, sizeof(leader_idx));
823 			if (ret < 0)
824 				return ret;
825 
826 			ret = do_write(ff, &nr_members, sizeof(nr_members));
827 			if (ret < 0)
828 				return ret;
829 		}
830 	}
831 	return 0;
832 }
833 
834 /*
835  * Return the CPU id as a raw string.
836  *
837  * Each architecture should provide a more precise id string that
838  * can be use to match the architecture's "mapfile".
839  */
get_cpuid_str(struct perf_cpu cpu __maybe_unused)840 char * __weak get_cpuid_str(struct perf_cpu cpu __maybe_unused)
841 {
842 	return NULL;
843 }
844 
get_cpuid_allow_env_override(struct perf_cpu cpu)845 char *get_cpuid_allow_env_override(struct perf_cpu cpu)
846 {
847 	char *cpuid;
848 	static bool printed;
849 
850 	cpuid = getenv("PERF_CPUID");
851 	if (cpuid)
852 		cpuid = strdup(cpuid);
853 	if (!cpuid)
854 		cpuid = get_cpuid_str(cpu);
855 	if (!cpuid)
856 		return NULL;
857 
858 	if (!printed) {
859 		pr_debug("Using CPUID %s\n", cpuid);
860 		printed = true;
861 	}
862 	return cpuid;
863 }
864 
865 /* Return zero when the cpuid from the mapfile.csv matches the
866  * cpuid string generated on this platform.
867  * Otherwise return non-zero.
868  */
strcmp_cpuid_str(const char * mapcpuid,const char * cpuid)869 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid)
870 {
871 	regex_t re;
872 	regmatch_t pmatch[1];
873 	int match;
874 
875 	if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) {
876 		/* Warn unable to generate match particular string. */
877 		pr_info("Invalid regular expression %s\n", mapcpuid);
878 		return 1;
879 	}
880 
881 	match = !regexec(&re, cpuid, 1, pmatch, 0);
882 	regfree(&re);
883 	if (match) {
884 		size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so);
885 
886 		/* Verify the entire string matched. */
887 		if (match_len == strlen(cpuid))
888 			return 0;
889 	}
890 	return 1;
891 }
892 
893 /*
894  * default get_cpuid(): nothing gets recorded
895  * actual implementation must be in arch/$(SRCARCH)/util/header.c
896  */
get_cpuid(char * buffer __maybe_unused,size_t sz __maybe_unused,struct perf_cpu cpu __maybe_unused)897 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused,
898 		     struct perf_cpu cpu __maybe_unused)
899 {
900 	return ENOSYS; /* Not implemented */
901 }
902 
write_cpuid(struct feat_fd * ff,struct evlist * evlist)903 static int write_cpuid(struct feat_fd *ff, struct evlist *evlist)
904 {
905 	struct perf_cpu cpu = perf_cpu_map__min(evlist->core.all_cpus);
906 	char buffer[64];
907 	int ret;
908 
909 	ret = get_cpuid(buffer, sizeof(buffer), cpu);
910 	if (ret)
911 		return -1;
912 
913 	return do_write_string(ff, buffer);
914 }
915 
write_branch_stack(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)916 static int write_branch_stack(struct feat_fd *ff __maybe_unused,
917 			      struct evlist *evlist __maybe_unused)
918 {
919 	return 0;
920 }
921 
write_auxtrace(struct feat_fd * ff,struct evlist * evlist __maybe_unused)922 static int write_auxtrace(struct feat_fd *ff,
923 			  struct evlist *evlist __maybe_unused)
924 {
925 	struct perf_session *session;
926 	int err;
927 
928 	if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
929 		return -1;
930 
931 	session = container_of(ff->ph, struct perf_session, header);
932 
933 	err = auxtrace_index__write(ff->fd, &session->auxtrace_index);
934 	if (err < 0)
935 		pr_err("Failed to write auxtrace index\n");
936 	return err;
937 }
938 
write_clockid(struct feat_fd * ff,struct evlist * evlist __maybe_unused)939 static int write_clockid(struct feat_fd *ff,
940 			 struct evlist *evlist __maybe_unused)
941 {
942 	return do_write(ff, &ff->ph->env.clock.clockid_res_ns,
943 			sizeof(ff->ph->env.clock.clockid_res_ns));
944 }
945 
write_clock_data(struct feat_fd * ff,struct evlist * evlist __maybe_unused)946 static int write_clock_data(struct feat_fd *ff,
947 			    struct evlist *evlist __maybe_unused)
948 {
949 	u64 *data64;
950 	u32 data32;
951 	int ret;
952 
953 	/* version */
954 	data32 = 1;
955 
956 	ret = do_write(ff, &data32, sizeof(data32));
957 	if (ret < 0)
958 		return ret;
959 
960 	/* clockid */
961 	data32 = ff->ph->env.clock.clockid;
962 
963 	ret = do_write(ff, &data32, sizeof(data32));
964 	if (ret < 0)
965 		return ret;
966 
967 	/* TOD ref time */
968 	data64 = &ff->ph->env.clock.tod_ns;
969 
970 	ret = do_write(ff, data64, sizeof(*data64));
971 	if (ret < 0)
972 		return ret;
973 
974 	/* clockid ref time */
975 	data64 = &ff->ph->env.clock.clockid_ns;
976 
977 	return do_write(ff, data64, sizeof(*data64));
978 }
979 
write_hybrid_topology(struct feat_fd * ff,struct evlist * evlist __maybe_unused)980 static int write_hybrid_topology(struct feat_fd *ff,
981 				 struct evlist *evlist __maybe_unused)
982 {
983 	struct hybrid_topology *tp;
984 	int ret;
985 	u32 i;
986 
987 	tp = hybrid_topology__new();
988 	if (!tp)
989 		return -ENOENT;
990 
991 	ret = do_write(ff, &tp->nr, sizeof(u32));
992 	if (ret < 0)
993 		goto err;
994 
995 	for (i = 0; i < tp->nr; i++) {
996 		struct hybrid_topology_node *n = &tp->nodes[i];
997 
998 		ret = do_write_string(ff, n->pmu_name);
999 		if (ret < 0)
1000 			goto err;
1001 
1002 		ret = do_write_string(ff, n->cpus);
1003 		if (ret < 0)
1004 			goto err;
1005 	}
1006 
1007 	ret = 0;
1008 
1009 err:
1010 	hybrid_topology__delete(tp);
1011 	return ret;
1012 }
1013 
write_dir_format(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1014 static int write_dir_format(struct feat_fd *ff,
1015 			    struct evlist *evlist __maybe_unused)
1016 {
1017 	struct perf_session *session;
1018 	struct perf_data *data;
1019 
1020 	session = container_of(ff->ph, struct perf_session, header);
1021 	data = session->data;
1022 
1023 	if (WARN_ON(!perf_data__is_dir(data)))
1024 		return -1;
1025 
1026 	return do_write(ff, &data->dir.version, sizeof(data->dir.version));
1027 }
1028 
1029 #ifdef HAVE_LIBBPF_SUPPORT
write_bpf_prog_info(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1030 static int write_bpf_prog_info(struct feat_fd *ff,
1031 			       struct evlist *evlist __maybe_unused)
1032 {
1033 	struct perf_env *env = &ff->ph->env;
1034 	struct rb_root *root;
1035 	struct rb_node *next;
1036 	int ret = 0;
1037 
1038 	down_read(&env->bpf_progs.lock);
1039 
1040 	ret = do_write(ff, &env->bpf_progs.infos_cnt,
1041 		       sizeof(env->bpf_progs.infos_cnt));
1042 	if (ret < 0 || env->bpf_progs.infos_cnt == 0)
1043 		goto out;
1044 
1045 	root = &env->bpf_progs.infos;
1046 	next = rb_first(root);
1047 	while (next) {
1048 		struct bpf_prog_info_node *node;
1049 		size_t len;
1050 
1051 		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
1052 		next = rb_next(&node->rb_node);
1053 		len = sizeof(struct perf_bpil) +
1054 			node->info_linear->data_len;
1055 
1056 		/* before writing to file, translate address to offset */
1057 		bpil_addr_to_offs(node->info_linear);
1058 		ret = do_write(ff, node->info_linear, len);
1059 		/*
1060 		 * translate back to address even when do_write() fails,
1061 		 * so that this function never changes the data.
1062 		 */
1063 		bpil_offs_to_addr(node->info_linear);
1064 		if (ret < 0)
1065 			goto out;
1066 	}
1067 out:
1068 	up_read(&env->bpf_progs.lock);
1069 	return ret;
1070 }
1071 
write_bpf_btf(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1072 static int write_bpf_btf(struct feat_fd *ff,
1073 			 struct evlist *evlist __maybe_unused)
1074 {
1075 	struct perf_env *env = &ff->ph->env;
1076 	struct rb_root *root;
1077 	struct rb_node *next;
1078 	int ret = 0;
1079 
1080 	down_read(&env->bpf_progs.lock);
1081 
1082 	ret = do_write(ff, &env->bpf_progs.btfs_cnt,
1083 		       sizeof(env->bpf_progs.btfs_cnt));
1084 
1085 	if (ret < 0 || env->bpf_progs.btfs_cnt == 0)
1086 		goto out;
1087 
1088 	root = &env->bpf_progs.btfs;
1089 	next = rb_first(root);
1090 	while (next) {
1091 		struct btf_node *node;
1092 
1093 		node = rb_entry(next, struct btf_node, rb_node);
1094 		next = rb_next(&node->rb_node);
1095 		ret = do_write(ff, &node->id,
1096 			       sizeof(u32) * 2 + node->data_size);
1097 		if (ret < 0)
1098 			goto out;
1099 	}
1100 out:
1101 	up_read(&env->bpf_progs.lock);
1102 	return ret;
1103 }
1104 #endif // HAVE_LIBBPF_SUPPORT
1105 
cpu_cache_level__sort(const void * a,const void * b)1106 static int cpu_cache_level__sort(const void *a, const void *b)
1107 {
1108 	struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
1109 	struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b;
1110 
1111 	return cache_a->level - cache_b->level;
1112 }
1113 
cpu_cache_level__cmp(struct cpu_cache_level * a,struct cpu_cache_level * b)1114 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b)
1115 {
1116 	if (a->level != b->level)
1117 		return false;
1118 
1119 	if (a->line_size != b->line_size)
1120 		return false;
1121 
1122 	if (a->sets != b->sets)
1123 		return false;
1124 
1125 	if (a->ways != b->ways)
1126 		return false;
1127 
1128 	if (strcmp(a->type, b->type))
1129 		return false;
1130 
1131 	if (strcmp(a->size, b->size))
1132 		return false;
1133 
1134 	if (strcmp(a->map, b->map))
1135 		return false;
1136 
1137 	return true;
1138 }
1139 
cpu_cache_level__read(struct cpu_cache_level * cache,u32 cpu,u16 level)1140 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level)
1141 {
1142 	char path[PATH_MAX], file[PATH_MAX];
1143 	struct stat st;
1144 	size_t len;
1145 
1146 	scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level);
1147 	scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path);
1148 
1149 	if (stat(file, &st))
1150 		return 1;
1151 
1152 	scnprintf(file, PATH_MAX, "%s/level", path);
1153 	if (sysfs__read_int(file, (int *) &cache->level))
1154 		return -1;
1155 
1156 	scnprintf(file, PATH_MAX, "%s/coherency_line_size", path);
1157 	if (sysfs__read_int(file, (int *) &cache->line_size))
1158 		return -1;
1159 
1160 	scnprintf(file, PATH_MAX, "%s/number_of_sets", path);
1161 	if (sysfs__read_int(file, (int *) &cache->sets))
1162 		return -1;
1163 
1164 	scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path);
1165 	if (sysfs__read_int(file, (int *) &cache->ways))
1166 		return -1;
1167 
1168 	scnprintf(file, PATH_MAX, "%s/type", path);
1169 	if (sysfs__read_str(file, &cache->type, &len))
1170 		return -1;
1171 
1172 	cache->type[len] = 0;
1173 	cache->type = strim(cache->type);
1174 
1175 	scnprintf(file, PATH_MAX, "%s/size", path);
1176 	if (sysfs__read_str(file, &cache->size, &len)) {
1177 		zfree(&cache->type);
1178 		return -1;
1179 	}
1180 
1181 	cache->size[len] = 0;
1182 	cache->size = strim(cache->size);
1183 
1184 	scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path);
1185 	if (sysfs__read_str(file, &cache->map, &len)) {
1186 		zfree(&cache->size);
1187 		zfree(&cache->type);
1188 		return -1;
1189 	}
1190 
1191 	cache->map[len] = 0;
1192 	cache->map = strim(cache->map);
1193 	return 0;
1194 }
1195 
cpu_cache_level__fprintf(FILE * out,struct cpu_cache_level * c)1196 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c)
1197 {
1198 	fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map);
1199 }
1200 
1201 /*
1202  * Build caches levels for a particular CPU from the data in
1203  * /sys/devices/system/cpu/cpu<cpu>/cache/
1204  * The cache level data is stored in caches[] from index at
1205  * *cntp.
1206  */
build_caches_for_cpu(u32 cpu,struct cpu_cache_level caches[],u32 * cntp)1207 int build_caches_for_cpu(u32 cpu, struct cpu_cache_level caches[], u32 *cntp)
1208 {
1209 	u16 level;
1210 
1211 	for (level = 0; level < MAX_CACHE_LVL; level++) {
1212 		struct cpu_cache_level c;
1213 		int err;
1214 		u32 i;
1215 
1216 		err = cpu_cache_level__read(&c, cpu, level);
1217 		if (err < 0)
1218 			return err;
1219 
1220 		if (err == 1)
1221 			break;
1222 
1223 		for (i = 0; i < *cntp; i++) {
1224 			if (cpu_cache_level__cmp(&c, &caches[i]))
1225 				break;
1226 		}
1227 
1228 		if (i == *cntp) {
1229 			caches[*cntp] = c;
1230 			*cntp = *cntp + 1;
1231 		} else
1232 			cpu_cache_level__free(&c);
1233 	}
1234 
1235 	return 0;
1236 }
1237 
build_caches(struct cpu_cache_level caches[],u32 * cntp)1238 static int build_caches(struct cpu_cache_level caches[], u32 *cntp)
1239 {
1240 	u32 nr, cpu, cnt = 0;
1241 
1242 	nr = cpu__max_cpu().cpu;
1243 
1244 	for (cpu = 0; cpu < nr; cpu++) {
1245 		int ret = build_caches_for_cpu(cpu, caches, &cnt);
1246 
1247 		if (ret)
1248 			return ret;
1249 	}
1250 	*cntp = cnt;
1251 	return 0;
1252 }
1253 
write_cache(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1254 static int write_cache(struct feat_fd *ff,
1255 		       struct evlist *evlist __maybe_unused)
1256 {
1257 	u32 max_caches = cpu__max_cpu().cpu * MAX_CACHE_LVL;
1258 	struct cpu_cache_level caches[max_caches];
1259 	u32 cnt = 0, i, version = 1;
1260 	int ret;
1261 
1262 	ret = build_caches(caches, &cnt);
1263 	if (ret)
1264 		goto out;
1265 
1266 	qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort);
1267 
1268 	ret = do_write(ff, &version, sizeof(u32));
1269 	if (ret < 0)
1270 		goto out;
1271 
1272 	ret = do_write(ff, &cnt, sizeof(u32));
1273 	if (ret < 0)
1274 		goto out;
1275 
1276 	for (i = 0; i < cnt; i++) {
1277 		struct cpu_cache_level *c = &caches[i];
1278 
1279 		#define _W(v)					\
1280 			ret = do_write(ff, &c->v, sizeof(u32));	\
1281 			if (ret < 0)				\
1282 				goto out;
1283 
1284 		_W(level)
1285 		_W(line_size)
1286 		_W(sets)
1287 		_W(ways)
1288 		#undef _W
1289 
1290 		#define _W(v)						\
1291 			ret = do_write_string(ff, (const char *) c->v);	\
1292 			if (ret < 0)					\
1293 				goto out;
1294 
1295 		_W(type)
1296 		_W(size)
1297 		_W(map)
1298 		#undef _W
1299 	}
1300 
1301 out:
1302 	for (i = 0; i < cnt; i++)
1303 		cpu_cache_level__free(&caches[i]);
1304 	return ret;
1305 }
1306 
write_stat(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)1307 static int write_stat(struct feat_fd *ff __maybe_unused,
1308 		      struct evlist *evlist __maybe_unused)
1309 {
1310 	return 0;
1311 }
1312 
write_sample_time(struct feat_fd * ff,struct evlist * evlist)1313 static int write_sample_time(struct feat_fd *ff,
1314 			     struct evlist *evlist)
1315 {
1316 	int ret;
1317 
1318 	ret = do_write(ff, &evlist->first_sample_time,
1319 		       sizeof(evlist->first_sample_time));
1320 	if (ret < 0)
1321 		return ret;
1322 
1323 	return do_write(ff, &evlist->last_sample_time,
1324 			sizeof(evlist->last_sample_time));
1325 }
1326 
1327 
memory_node__read(struct memory_node * n,unsigned long idx)1328 static int memory_node__read(struct memory_node *n, unsigned long idx)
1329 {
1330 	unsigned int phys, size = 0;
1331 	char path[PATH_MAX];
1332 	struct io_dirent64 *ent;
1333 	struct io_dir dir;
1334 
1335 #define for_each_memory(mem, dir)					\
1336 	while ((ent = io_dir__readdir(&dir)) != NULL)			\
1337 		if (strcmp(ent->d_name, ".") &&				\
1338 		    strcmp(ent->d_name, "..") &&			\
1339 		    sscanf(ent->d_name, "memory%u", &mem) == 1)
1340 
1341 	scnprintf(path, PATH_MAX,
1342 		  "%s/devices/system/node/node%lu",
1343 		  sysfs__mountpoint(), idx);
1344 
1345 	io_dir__init(&dir, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
1346 	if (dir.dirfd < 0) {
1347 		pr_warning("failed: can't open memory sysfs data '%s'\n", path);
1348 		return -1;
1349 	}
1350 
1351 	for_each_memory(phys, dir) {
1352 		size = max(phys, size);
1353 	}
1354 
1355 	size++;
1356 
1357 	n->set = bitmap_zalloc(size);
1358 	if (!n->set) {
1359 		close(dir.dirfd);
1360 		return -ENOMEM;
1361 	}
1362 
1363 	n->node = idx;
1364 	n->size = size;
1365 
1366 	io_dir__rewinddir(&dir);
1367 
1368 	for_each_memory(phys, dir) {
1369 		__set_bit(phys, n->set);
1370 	}
1371 
1372 	close(dir.dirfd);
1373 	return 0;
1374 }
1375 
memory_node__delete_nodes(struct memory_node * nodesp,u64 cnt)1376 static void memory_node__delete_nodes(struct memory_node *nodesp, u64 cnt)
1377 {
1378 	for (u64 i = 0; i < cnt; i++)
1379 		bitmap_free(nodesp[i].set);
1380 
1381 	free(nodesp);
1382 }
1383 
memory_node__sort(const void * a,const void * b)1384 static int memory_node__sort(const void *a, const void *b)
1385 {
1386 	const struct memory_node *na = a;
1387 	const struct memory_node *nb = b;
1388 
1389 	return na->node - nb->node;
1390 }
1391 
build_mem_topology(struct memory_node ** nodesp,u64 * cntp)1392 static int build_mem_topology(struct memory_node **nodesp, u64 *cntp)
1393 {
1394 	char path[PATH_MAX];
1395 	struct io_dirent64 *ent;
1396 	struct io_dir dir;
1397 	int ret = 0;
1398 	size_t cnt = 0, size = 0;
1399 	struct memory_node *nodes = NULL;
1400 
1401 	scnprintf(path, PATH_MAX, "%s/devices/system/node/",
1402 		  sysfs__mountpoint());
1403 
1404 	io_dir__init(&dir, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
1405 	if (dir.dirfd < 0) {
1406 		pr_debug2("%s: couldn't read %s, does this arch have topology information?\n",
1407 			  __func__, path);
1408 		return -1;
1409 	}
1410 
1411 	while (!ret && (ent = io_dir__readdir(&dir))) {
1412 		unsigned int idx;
1413 		int r;
1414 
1415 		if (!strcmp(ent->d_name, ".") ||
1416 		    !strcmp(ent->d_name, ".."))
1417 			continue;
1418 
1419 		r = sscanf(ent->d_name, "node%u", &idx);
1420 		if (r != 1)
1421 			continue;
1422 
1423 		if (cnt >= size) {
1424 			struct memory_node *new_nodes =
1425 				reallocarray(nodes, cnt + 4, sizeof(*nodes));
1426 
1427 			if (!new_nodes) {
1428 				pr_err("Failed to write MEM_TOPOLOGY, size %zd nodes\n", size);
1429 				ret = -ENOMEM;
1430 				goto out;
1431 			}
1432 			nodes = new_nodes;
1433 			size += 4;
1434 		}
1435 		ret = memory_node__read(&nodes[cnt], idx);
1436 		if (!ret)
1437 			cnt += 1;
1438 	}
1439 out:
1440 	close(dir.dirfd);
1441 	if (!ret) {
1442 		*cntp = cnt;
1443 		*nodesp = nodes;
1444 		qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort);
1445 	} else
1446 		memory_node__delete_nodes(nodes, cnt);
1447 
1448 	return ret;
1449 }
1450 
1451 /*
1452  * The MEM_TOPOLOGY holds physical memory map for every
1453  * node in system. The format of data is as follows:
1454  *
1455  *  0 - version          | for future changes
1456  *  8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes
1457  * 16 - count            | number of nodes
1458  *
1459  * For each node we store map of physical indexes for
1460  * each node:
1461  *
1462  * 32 - node id          | node index
1463  * 40 - size             | size of bitmap
1464  * 48 - bitmap           | bitmap of memory indexes that belongs to node
1465  */
write_mem_topology(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)1466 static int write_mem_topology(struct feat_fd *ff __maybe_unused,
1467 			      struct evlist *evlist __maybe_unused)
1468 {
1469 	struct memory_node *nodes = NULL;
1470 	u64 bsize, version = 1, i, nr = 0;
1471 	int ret;
1472 
1473 	ret = sysfs__read_xll("devices/system/memory/block_size_bytes",
1474 			      (unsigned long long *) &bsize);
1475 	if (ret)
1476 		return ret;
1477 
1478 	ret = build_mem_topology(&nodes, &nr);
1479 	if (ret)
1480 		return ret;
1481 
1482 	ret = do_write(ff, &version, sizeof(version));
1483 	if (ret < 0)
1484 		goto out;
1485 
1486 	ret = do_write(ff, &bsize, sizeof(bsize));
1487 	if (ret < 0)
1488 		goto out;
1489 
1490 	ret = do_write(ff, &nr, sizeof(nr));
1491 	if (ret < 0)
1492 		goto out;
1493 
1494 	for (i = 0; i < nr; i++) {
1495 		struct memory_node *n = &nodes[i];
1496 
1497 		#define _W(v)						\
1498 			ret = do_write(ff, &n->v, sizeof(n->v));	\
1499 			if (ret < 0)					\
1500 				goto out;
1501 
1502 		_W(node)
1503 		_W(size)
1504 
1505 		#undef _W
1506 
1507 		ret = do_write_bitmap(ff, n->set, n->size);
1508 		if (ret < 0)
1509 			goto out;
1510 	}
1511 
1512 out:
1513 	memory_node__delete_nodes(nodes, nr);
1514 	return ret;
1515 }
1516 
write_compressed(struct feat_fd * ff __maybe_unused,struct evlist * evlist __maybe_unused)1517 static int write_compressed(struct feat_fd *ff __maybe_unused,
1518 			    struct evlist *evlist __maybe_unused)
1519 {
1520 	int ret;
1521 
1522 	ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver));
1523 	if (ret)
1524 		return ret;
1525 
1526 	ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type));
1527 	if (ret)
1528 		return ret;
1529 
1530 	ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level));
1531 	if (ret)
1532 		return ret;
1533 
1534 	ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio));
1535 	if (ret)
1536 		return ret;
1537 
1538 	return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
1539 }
1540 
__write_pmu_caps(struct feat_fd * ff,struct perf_pmu * pmu,bool write_pmu)1541 static int __write_pmu_caps(struct feat_fd *ff, struct perf_pmu *pmu,
1542 			    bool write_pmu)
1543 {
1544 	struct perf_pmu_caps *caps = NULL;
1545 	int ret;
1546 
1547 	ret = do_write(ff, &pmu->nr_caps, sizeof(pmu->nr_caps));
1548 	if (ret < 0)
1549 		return ret;
1550 
1551 	list_for_each_entry(caps, &pmu->caps, list) {
1552 		ret = do_write_string(ff, caps->name);
1553 		if (ret < 0)
1554 			return ret;
1555 
1556 		ret = do_write_string(ff, caps->value);
1557 		if (ret < 0)
1558 			return ret;
1559 	}
1560 
1561 	if (write_pmu) {
1562 		ret = do_write_string(ff, pmu->name);
1563 		if (ret < 0)
1564 			return ret;
1565 	}
1566 
1567 	return ret;
1568 }
1569 
write_cpu_pmu_caps(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1570 static int write_cpu_pmu_caps(struct feat_fd *ff,
1571 			      struct evlist *evlist __maybe_unused)
1572 {
1573 	struct perf_pmu *cpu_pmu = perf_pmus__find_core_pmu();
1574 	int ret;
1575 
1576 	if (!cpu_pmu)
1577 		return -ENOENT;
1578 
1579 	ret = perf_pmu__caps_parse(cpu_pmu);
1580 	if (ret < 0)
1581 		return ret;
1582 
1583 	return __write_pmu_caps(ff, cpu_pmu, false);
1584 }
1585 
write_pmu_caps(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1586 static int write_pmu_caps(struct feat_fd *ff,
1587 			  struct evlist *evlist __maybe_unused)
1588 {
1589 	struct perf_pmu *pmu = NULL;
1590 	int nr_pmu = 0;
1591 	int ret;
1592 
1593 	while ((pmu = perf_pmus__scan(pmu))) {
1594 		if (!strcmp(pmu->name, "cpu")) {
1595 			/*
1596 			 * The "cpu" PMU is special and covered by
1597 			 * HEADER_CPU_PMU_CAPS. Note, core PMUs are
1598 			 * counted/written here for ARM, s390 and Intel hybrid.
1599 			 */
1600 			continue;
1601 		}
1602 		if (perf_pmu__caps_parse(pmu) <= 0)
1603 			continue;
1604 		nr_pmu++;
1605 	}
1606 
1607 	ret = do_write(ff, &nr_pmu, sizeof(nr_pmu));
1608 	if (ret < 0)
1609 		return ret;
1610 
1611 	if (!nr_pmu)
1612 		return 0;
1613 
1614 	/*
1615 	 * Note older perf tools assume core PMUs come first, this is a property
1616 	 * of perf_pmus__scan.
1617 	 */
1618 	pmu = NULL;
1619 	while ((pmu = perf_pmus__scan(pmu))) {
1620 		if (!strcmp(pmu->name, "cpu")) {
1621 			/* Skip as above. */
1622 			continue;
1623 		}
1624 		if (perf_pmu__caps_parse(pmu) <= 0)
1625 			continue;
1626 		ret = __write_pmu_caps(ff, pmu, true);
1627 		if (ret < 0)
1628 			return ret;
1629 	}
1630 	return 0;
1631 }
1632 
build_cpu_domain_map(u32 * schedstat_version,u32 * max_sched_domains,u32 nr)1633 struct cpu_domain_map **build_cpu_domain_map(u32 *schedstat_version, u32 *max_sched_domains, u32 nr)
1634 {
1635 	char dname[DNAME_LEN], cpumask[MAX_NR_CPUS];
1636 	struct domain_info *domain_info;
1637 	struct cpu_domain_map **cd_map;
1638 	char cpulist[MAX_NR_CPUS];
1639 	char *line = NULL;
1640 	u32 cpu, domain;
1641 	u32 dcount = 0;
1642 	size_t len;
1643 	FILE *fp;
1644 
1645 	fp = fopen("/proc/schedstat", "r");
1646 	if (!fp) {
1647 		pr_err("Failed to open /proc/schedstat\n");
1648 		return NULL;
1649 	}
1650 
1651 	cd_map = zalloc(sizeof(*cd_map) * nr);
1652 	if (!cd_map)
1653 		goto out;
1654 
1655 	while (getline(&line, &len, fp) > 0) {
1656 		int retval;
1657 
1658 		if (strncmp(line, "version", 7) == 0) {
1659 			retval = sscanf(line, "version %d\n", schedstat_version);
1660 			if (retval != 1)
1661 				continue;
1662 
1663 		} else if (strncmp(line, "cpu", 3) == 0) {
1664 			retval = sscanf(line, "cpu%u %*s", &cpu);
1665 			if (retval == 1) {
1666 				cd_map[cpu] = zalloc(sizeof(*cd_map[cpu]));
1667 				if (!cd_map[cpu])
1668 					goto out_free_line;
1669 				cd_map[cpu]->cpu = cpu;
1670 			} else
1671 				continue;
1672 
1673 			dcount = 0;
1674 		} else if (strncmp(line, "domain", 6) == 0) {
1675 			struct domain_info **temp_domains;
1676 
1677 			dcount++;
1678 			temp_domains = realloc(cd_map[cpu]->domains, dcount * sizeof(domain_info));
1679 			if (!temp_domains)
1680 				goto out_free_line;
1681 			else
1682 				cd_map[cpu]->domains = temp_domains;
1683 
1684 			domain_info = zalloc(sizeof(*domain_info));
1685 			if (!domain_info)
1686 				goto out_free_line;
1687 
1688 			cd_map[cpu]->domains[dcount - 1] = domain_info;
1689 
1690 			if (*schedstat_version >= 17) {
1691 				retval = sscanf(line, "domain%u %s %s %*s", &domain, dname,
1692 						cpumask);
1693 				if (retval != 3)
1694 					continue;
1695 
1696 				domain_info->dname = strdup(dname);
1697 				if (!domain_info->dname)
1698 					goto out_free_line;
1699 			} else {
1700 				retval = sscanf(line, "domain%u %s %*s", &domain, cpumask);
1701 				if (retval != 2)
1702 					continue;
1703 			}
1704 
1705 			domain_info->domain = domain;
1706 			if (domain > *max_sched_domains)
1707 				*max_sched_domains = domain;
1708 
1709 			domain_info->cpumask = strdup(cpumask);
1710 			if (!domain_info->cpumask)
1711 				goto out_free_line;
1712 
1713 			cpumask_to_cpulist(cpumask, cpulist);
1714 			domain_info->cpulist = strdup(cpulist);
1715 			if (!domain_info->cpulist)
1716 				goto out_free_line;
1717 
1718 			cd_map[cpu]->nr_domains = dcount;
1719 		}
1720 	}
1721 
1722 out_free_line:
1723 	free(line);
1724 out:
1725 	fclose(fp);
1726 	return cd_map;
1727 }
1728 
write_cpu_domain_info(struct feat_fd * ff,struct evlist * evlist __maybe_unused)1729 static int write_cpu_domain_info(struct feat_fd *ff,
1730 				 struct evlist *evlist __maybe_unused)
1731 {
1732 	u32 max_sched_domains = 0, schedstat_version = 0;
1733 	struct cpu_domain_map **cd_map;
1734 	u32 i, j, nr, ret;
1735 
1736 	nr = cpu__max_present_cpu().cpu;
1737 
1738 	cd_map = build_cpu_domain_map(&schedstat_version, &max_sched_domains, nr);
1739 	if (!cd_map)
1740 		return -1;
1741 
1742 	ret = do_write(ff, &schedstat_version, sizeof(u32));
1743 	if (ret < 0)
1744 		goto out;
1745 
1746 	max_sched_domains += 1;
1747 	ret = do_write(ff, &max_sched_domains, sizeof(u32));
1748 	if (ret < 0)
1749 		goto out;
1750 
1751 	for (i = 0; i < nr; i++) {
1752 		if (!cd_map[i])
1753 			continue;
1754 
1755 		ret = do_write(ff, &cd_map[i]->cpu, sizeof(u32));
1756 		if (ret < 0)
1757 			goto out;
1758 
1759 		ret = do_write(ff, &cd_map[i]->nr_domains, sizeof(u32));
1760 		if (ret < 0)
1761 			goto out;
1762 
1763 		for (j = 0; j < cd_map[i]->nr_domains; j++) {
1764 			ret = do_write(ff, &cd_map[i]->domains[j]->domain, sizeof(u32));
1765 			if (ret < 0)
1766 				goto out;
1767 			if (schedstat_version >= 17) {
1768 				ret = do_write_string(ff, cd_map[i]->domains[j]->dname);
1769 				if (ret < 0)
1770 					goto out;
1771 			}
1772 
1773 			ret = do_write_string(ff, cd_map[i]->domains[j]->cpumask);
1774 			if (ret < 0)
1775 				goto out;
1776 
1777 			ret = do_write_string(ff, cd_map[i]->domains[j]->cpulist);
1778 			if (ret < 0)
1779 				goto out;
1780 		}
1781 	}
1782 
1783 out:
1784 	free_cpu_domain_info(cd_map, schedstat_version, nr);
1785 	return ret;
1786 }
1787 
print_hostname(struct feat_fd * ff,FILE * fp)1788 static void print_hostname(struct feat_fd *ff, FILE *fp)
1789 {
1790 	fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
1791 }
1792 
print_osrelease(struct feat_fd * ff,FILE * fp)1793 static void print_osrelease(struct feat_fd *ff, FILE *fp)
1794 {
1795 	fprintf(fp, "# os release : %s\n", ff->ph->env.os_release);
1796 }
1797 
print_arch(struct feat_fd * ff,FILE * fp)1798 static void print_arch(struct feat_fd *ff, FILE *fp)
1799 {
1800 	fprintf(fp, "# arch : %s\n", ff->ph->env.arch);
1801 }
1802 
print_e_machine(struct feat_fd * ff,FILE * fp)1803 static void print_e_machine(struct feat_fd *ff, FILE *fp)
1804 {
1805 	fprintf(fp, "# e_machine : %u\n", ff->ph->env.e_machine);
1806 	fprintf(fp, "#   e_flags : %u\n", ff->ph->env.e_flags);
1807 }
1808 
print_cpudesc(struct feat_fd * ff,FILE * fp)1809 static void print_cpudesc(struct feat_fd *ff, FILE *fp)
1810 {
1811 	fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc);
1812 }
1813 
print_nrcpus(struct feat_fd * ff,FILE * fp)1814 static void print_nrcpus(struct feat_fd *ff, FILE *fp)
1815 {
1816 	fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online);
1817 	fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail);
1818 }
1819 
print_version(struct feat_fd * ff,FILE * fp)1820 static void print_version(struct feat_fd *ff, FILE *fp)
1821 {
1822 	fprintf(fp, "# perf version : %s\n", ff->ph->env.version);
1823 }
1824 
print_cmdline(struct feat_fd * ff,FILE * fp)1825 static void print_cmdline(struct feat_fd *ff, FILE *fp)
1826 {
1827 	int nr, i;
1828 
1829 	nr = ff->ph->env.nr_cmdline;
1830 
1831 	fprintf(fp, "# cmdline : ");
1832 
1833 	for (i = 0; i < nr; i++) {
1834 		char *argv_i = strdup(ff->ph->env.cmdline_argv[i]);
1835 		if (!argv_i) {
1836 			fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]);
1837 		} else {
1838 			char *mem = argv_i;
1839 			do {
1840 				char *quote = strchr(argv_i, '\'');
1841 				if (!quote)
1842 					break;
1843 				*quote++ = '\0';
1844 				fprintf(fp, "%s\\\'", argv_i);
1845 				argv_i = quote;
1846 			} while (1);
1847 			fprintf(fp, "%s ", argv_i);
1848 			free(mem);
1849 		}
1850 	}
1851 	fputc('\n', fp);
1852 }
1853 
print_cpu_topology(struct feat_fd * ff,FILE * fp)1854 static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
1855 {
1856 	struct perf_header *ph = ff->ph;
1857 	int cpu_nr = ph->env.nr_cpus_avail;
1858 	int nr, i;
1859 	char *str;
1860 
1861 	nr = ph->env.nr_sibling_cores;
1862 	str = ph->env.sibling_cores;
1863 
1864 	for (i = 0; i < nr; i++) {
1865 		fprintf(fp, "# sibling sockets : %s\n", str);
1866 		str += strlen(str) + 1;
1867 	}
1868 
1869 	if (ph->env.nr_sibling_dies) {
1870 		nr = ph->env.nr_sibling_dies;
1871 		str = ph->env.sibling_dies;
1872 
1873 		for (i = 0; i < nr; i++) {
1874 			fprintf(fp, "# sibling dies    : %s\n", str);
1875 			str += strlen(str) + 1;
1876 		}
1877 	}
1878 
1879 	nr = ph->env.nr_sibling_threads;
1880 	str = ph->env.sibling_threads;
1881 
1882 	for (i = 0; i < nr; i++) {
1883 		fprintf(fp, "# sibling threads : %s\n", str);
1884 		str += strlen(str) + 1;
1885 	}
1886 
1887 	if (ph->env.nr_sibling_dies) {
1888 		if (ph->env.cpu != NULL) {
1889 			for (i = 0; i < cpu_nr; i++)
1890 				fprintf(fp, "# CPU %d: Core ID %d, "
1891 					    "Die ID %d, Socket ID %d\n",
1892 					    i, ph->env.cpu[i].core_id,
1893 					    ph->env.cpu[i].die_id,
1894 					    ph->env.cpu[i].socket_id);
1895 		} else
1896 			fprintf(fp, "# Core ID, Die ID and Socket ID "
1897 				    "information is not available\n");
1898 	} else {
1899 		if (ph->env.cpu != NULL) {
1900 			for (i = 0; i < cpu_nr; i++)
1901 				fprintf(fp, "# CPU %d: Core ID %d, "
1902 					    "Socket ID %d\n",
1903 					    i, ph->env.cpu[i].core_id,
1904 					    ph->env.cpu[i].socket_id);
1905 		} else
1906 			fprintf(fp, "# Core ID and Socket ID "
1907 				    "information is not available\n");
1908 	}
1909 }
1910 
print_clockid(struct feat_fd * ff,FILE * fp)1911 static void print_clockid(struct feat_fd *ff, FILE *fp)
1912 {
1913 	fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
1914 		ff->ph->env.clock.clockid_res_ns * 1000);
1915 }
1916 
print_clock_data(struct feat_fd * ff,FILE * fp)1917 static void print_clock_data(struct feat_fd *ff, FILE *fp)
1918 {
1919 	struct timespec clockid_ns;
1920 	char tstr[64], date[64];
1921 	struct timeval tod_ns;
1922 	clockid_t clockid;
1923 	struct tm ltime;
1924 	u64 ref;
1925 
1926 	if (!ff->ph->env.clock.enabled) {
1927 		fprintf(fp, "# reference time disabled\n");
1928 		return;
1929 	}
1930 
1931 	/* Compute TOD time. */
1932 	ref = ff->ph->env.clock.tod_ns;
1933 	tod_ns.tv_sec = ref / NSEC_PER_SEC;
1934 	ref -= tod_ns.tv_sec * NSEC_PER_SEC;
1935 	tod_ns.tv_usec = ref / NSEC_PER_USEC;
1936 
1937 	/* Compute clockid time. */
1938 	ref = ff->ph->env.clock.clockid_ns;
1939 	clockid_ns.tv_sec = ref / NSEC_PER_SEC;
1940 	ref -= clockid_ns.tv_sec * NSEC_PER_SEC;
1941 	clockid_ns.tv_nsec = ref;
1942 
1943 	clockid = ff->ph->env.clock.clockid;
1944 
1945 	if (localtime_r(&tod_ns.tv_sec, &ltime) == NULL)
1946 		snprintf(tstr, sizeof(tstr), "<error>");
1947 	else {
1948 		strftime(date, sizeof(date), "%F %T", &ltime);
1949 		scnprintf(tstr, sizeof(tstr), "%s.%06d",
1950 			  date, (int) tod_ns.tv_usec);
1951 	}
1952 
1953 	fprintf(fp, "# clockid: %s (%u)\n", clockid_name(clockid), clockid);
1954 	fprintf(fp, "# reference time: %s = %ld.%06d (TOD) = %ld.%09ld (%s)\n",
1955 		    tstr, (long) tod_ns.tv_sec, (int) tod_ns.tv_usec,
1956 		    (long) clockid_ns.tv_sec, clockid_ns.tv_nsec,
1957 		    clockid_name(clockid));
1958 }
1959 
print_hybrid_topology(struct feat_fd * ff,FILE * fp)1960 static void print_hybrid_topology(struct feat_fd *ff, FILE *fp)
1961 {
1962 	int i;
1963 	struct hybrid_node *n;
1964 
1965 	fprintf(fp, "# hybrid cpu system:\n");
1966 	for (i = 0; i < ff->ph->env.nr_hybrid_nodes; i++) {
1967 		n = &ff->ph->env.hybrid_nodes[i];
1968 		fprintf(fp, "# %s cpu list : %s\n", n->pmu_name, n->cpus);
1969 	}
1970 }
1971 
print_dir_format(struct feat_fd * ff,FILE * fp)1972 static void print_dir_format(struct feat_fd *ff, FILE *fp)
1973 {
1974 	struct perf_session *session;
1975 	struct perf_data *data;
1976 
1977 	session = container_of(ff->ph, struct perf_session, header);
1978 	data = session->data;
1979 
1980 	fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version);
1981 }
1982 
1983 #ifdef HAVE_LIBBPF_SUPPORT
print_bpf_prog_info(struct feat_fd * ff,FILE * fp)1984 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp)
1985 {
1986 	struct perf_env *env = &ff->ph->env;
1987 	struct rb_root *root;
1988 	struct rb_node *next;
1989 
1990 	down_read(&env->bpf_progs.lock);
1991 
1992 	root = &env->bpf_progs.infos;
1993 	next = rb_first(root);
1994 
1995 	if (!next)
1996 		printf("# bpf_prog_info empty\n");
1997 
1998 	while (next) {
1999 		struct bpf_prog_info_node *node;
2000 
2001 		node = rb_entry(next, struct bpf_prog_info_node, rb_node);
2002 		next = rb_next(&node->rb_node);
2003 
2004 		__bpf_event__print_bpf_prog_info(&node->info_linear->info,
2005 						 env, fp);
2006 	}
2007 
2008 	up_read(&env->bpf_progs.lock);
2009 }
2010 
print_bpf_btf(struct feat_fd * ff,FILE * fp)2011 static void print_bpf_btf(struct feat_fd *ff, FILE *fp)
2012 {
2013 	struct perf_env *env = &ff->ph->env;
2014 	struct rb_root *root;
2015 	struct rb_node *next;
2016 
2017 	down_read(&env->bpf_progs.lock);
2018 
2019 	root = &env->bpf_progs.btfs;
2020 	next = rb_first(root);
2021 
2022 	if (!next)
2023 		printf("# btf info empty\n");
2024 
2025 	while (next) {
2026 		struct btf_node *node;
2027 
2028 		node = rb_entry(next, struct btf_node, rb_node);
2029 		next = rb_next(&node->rb_node);
2030 		fprintf(fp, "# btf info of id %u\n", node->id);
2031 	}
2032 
2033 	up_read(&env->bpf_progs.lock);
2034 }
2035 #endif // HAVE_LIBBPF_SUPPORT
2036 
free_event_desc(struct evsel * events)2037 static void free_event_desc(struct evsel *events)
2038 {
2039 	struct evsel *evsel;
2040 
2041 	if (!events)
2042 		return;
2043 
2044 	for (evsel = events; evsel->core.attr.size; evsel++) {
2045 		zfree(&evsel->name);
2046 		zfree(&evsel->core.id);
2047 	}
2048 
2049 	free(events);
2050 }
2051 
perf_attr_check(struct perf_event_attr * attr)2052 static bool perf_attr_check(struct perf_event_attr *attr)
2053 {
2054 	if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) {
2055 		pr_warning("Reserved bits are set unexpectedly. "
2056 			   "Please update perf tool.\n");
2057 		return false;
2058 	}
2059 
2060 	if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) {
2061 		pr_warning("Unknown sample type (0x%llx) is detected. "
2062 			   "Please update perf tool.\n",
2063 			   attr->sample_type);
2064 		return false;
2065 	}
2066 
2067 	if (attr->read_format & ~(PERF_FORMAT_MAX-1)) {
2068 		pr_warning("Unknown read format (0x%llx) is detected. "
2069 			   "Please update perf tool.\n",
2070 			   attr->read_format);
2071 		return false;
2072 	}
2073 
2074 	if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) &&
2075 	    (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) {
2076 		pr_warning("Unknown branch sample type (0x%llx) is detected. "
2077 			   "Please update perf tool.\n",
2078 			   attr->branch_sample_type);
2079 
2080 		return false;
2081 	}
2082 
2083 	return true;
2084 }
2085 
read_event_desc(struct feat_fd * ff)2086 static struct evsel *read_event_desc(struct feat_fd *ff)
2087 {
2088 	struct evsel *evsel, *events = NULL;
2089 	u64 *id;
2090 	void *buf = NULL;
2091 	u32 nre, sz, nr, i, j;
2092 	size_t msz;
2093 
2094 	/* number of events */
2095 	if (do_read_u32(ff, &nre))
2096 		goto error;
2097 
2098 	if (do_read_u32(ff, &sz))
2099 		goto error;
2100 
2101 	/* buffer to hold on file attr struct */
2102 	buf = malloc(sz);
2103 	if (!buf)
2104 		goto error;
2105 
2106 	/* the last event terminates with evsel->core.attr.size == 0: */
2107 	events = calloc(nre + 1, sizeof(*events));
2108 	if (!events)
2109 		goto error;
2110 
2111 	msz = sizeof(evsel->core.attr);
2112 	if (sz < msz)
2113 		msz = sz;
2114 
2115 	for (i = 0, evsel = events; i < nre; evsel++, i++) {
2116 		evsel->core.idx = i;
2117 
2118 		/*
2119 		 * must read entire on-file attr struct to
2120 		 * sync up with layout.
2121 		 */
2122 		if (__do_read(ff, buf, sz))
2123 			goto error;
2124 
2125 		if (ff->ph->needs_swap)
2126 			perf_event__attr_swap(buf);
2127 
2128 		memcpy(&evsel->core.attr, buf, msz);
2129 
2130 		if (!perf_attr_check(&evsel->core.attr))
2131 			goto error;
2132 
2133 		if (do_read_u32(ff, &nr))
2134 			goto error;
2135 
2136 		if (ff->ph->needs_swap)
2137 			evsel->needs_swap = true;
2138 
2139 		evsel->name = do_read_string(ff);
2140 		if (!evsel->name)
2141 			goto error;
2142 
2143 		if (!nr)
2144 			continue;
2145 
2146 		id = calloc(nr, sizeof(*id));
2147 		if (!id)
2148 			goto error;
2149 		evsel->core.ids = nr;
2150 		evsel->core.id = id;
2151 
2152 		for (j = 0 ; j < nr; j++) {
2153 			if (do_read_u64(ff, id))
2154 				goto error;
2155 			id++;
2156 		}
2157 	}
2158 out:
2159 	free(buf);
2160 	return events;
2161 error:
2162 	free_event_desc(events);
2163 	events = NULL;
2164 	goto out;
2165 }
2166 
__desc_attr__fprintf(FILE * fp,const char * name,const char * val,void * priv __maybe_unused)2167 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val,
2168 				void *priv __maybe_unused)
2169 {
2170 	return fprintf(fp, ", %s = %s", name, val);
2171 }
2172 
print_event_desc(struct feat_fd * ff,FILE * fp)2173 static void print_event_desc(struct feat_fd *ff, FILE *fp)
2174 {
2175 	struct evsel *evsel, *events;
2176 	u32 j;
2177 	u64 *id;
2178 
2179 	if (ff->events)
2180 		events = ff->events;
2181 	else
2182 		events = read_event_desc(ff);
2183 
2184 	if (!events) {
2185 		fprintf(fp, "# event desc: not available or unable to read\n");
2186 		return;
2187 	}
2188 
2189 	for (evsel = events; evsel->core.attr.size; evsel++) {
2190 		fprintf(fp, "# event : name = %s, ", evsel->name);
2191 
2192 		if (evsel->core.ids) {
2193 			fprintf(fp, ", id = {");
2194 			for (j = 0, id = evsel->core.id; j < evsel->core.ids; j++, id++) {
2195 				if (j)
2196 					fputc(',', fp);
2197 				fprintf(fp, " %"PRIu64, *id);
2198 			}
2199 			fprintf(fp, " }");
2200 		}
2201 
2202 		perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL);
2203 
2204 		fputc('\n', fp);
2205 	}
2206 
2207 	free_event_desc(events);
2208 	ff->events = NULL;
2209 }
2210 
print_total_mem(struct feat_fd * ff,FILE * fp)2211 static void print_total_mem(struct feat_fd *ff, FILE *fp)
2212 {
2213 	fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem);
2214 }
2215 
print_numa_topology(struct feat_fd * ff,FILE * fp)2216 static void print_numa_topology(struct feat_fd *ff, FILE *fp)
2217 {
2218 	int i;
2219 	struct numa_node *n;
2220 
2221 	for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) {
2222 		n = &ff->ph->env.numa_nodes[i];
2223 
2224 		fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
2225 			    " free = %"PRIu64" kB\n",
2226 			n->node, n->mem_total, n->mem_free);
2227 
2228 		fprintf(fp, "# node%u cpu list : ", n->node);
2229 		cpu_map__fprintf(n->map, fp);
2230 	}
2231 }
2232 
print_cpuid(struct feat_fd * ff,FILE * fp)2233 static void print_cpuid(struct feat_fd *ff, FILE *fp)
2234 {
2235 	fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid);
2236 }
2237 
print_branch_stack(struct feat_fd * ff __maybe_unused,FILE * fp)2238 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp)
2239 {
2240 	fprintf(fp, "# contains samples with branch stack\n");
2241 }
2242 
print_auxtrace(struct feat_fd * ff __maybe_unused,FILE * fp)2243 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp)
2244 {
2245 	fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
2246 }
2247 
print_stat(struct feat_fd * ff __maybe_unused,FILE * fp)2248 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp)
2249 {
2250 	fprintf(fp, "# contains stat data\n");
2251 }
2252 
print_cache(struct feat_fd * ff,FILE * fp __maybe_unused)2253 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused)
2254 {
2255 	int i;
2256 
2257 	fprintf(fp, "# CPU cache info:\n");
2258 	for (i = 0; i < ff->ph->env.caches_cnt; i++) {
2259 		fprintf(fp, "#  ");
2260 		cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]);
2261 	}
2262 }
2263 
print_compressed(struct feat_fd * ff,FILE * fp)2264 static void print_compressed(struct feat_fd *ff, FILE *fp)
2265 {
2266 	fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n",
2267 		ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown",
2268 		ff->ph->env.comp_level, ff->ph->env.comp_ratio);
2269 }
2270 
__print_pmu_caps(FILE * fp,int nr_caps,char ** caps,char * pmu_name)2271 static void __print_pmu_caps(FILE *fp, int nr_caps, char **caps, char *pmu_name)
2272 {
2273 	const char *delimiter = "";
2274 	int i;
2275 
2276 	if (!nr_caps) {
2277 		fprintf(fp, "# %s pmu capabilities: not available\n", pmu_name);
2278 		return;
2279 	}
2280 
2281 	fprintf(fp, "# %s pmu capabilities: ", pmu_name);
2282 	for (i = 0; i < nr_caps; i++) {
2283 		fprintf(fp, "%s%s", delimiter, caps[i]);
2284 		delimiter = ", ";
2285 	}
2286 
2287 	fprintf(fp, "\n");
2288 }
2289 
print_cpu_pmu_caps(struct feat_fd * ff,FILE * fp)2290 static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
2291 {
2292 	__print_pmu_caps(fp, ff->ph->env.nr_cpu_pmu_caps,
2293 			 ff->ph->env.cpu_pmu_caps, (char *)"cpu");
2294 }
2295 
print_pmu_caps(struct feat_fd * ff,FILE * fp)2296 static void print_pmu_caps(struct feat_fd *ff, FILE *fp)
2297 {
2298 	struct perf_env *env = &ff->ph->env;
2299 	struct pmu_caps *pmu_caps;
2300 
2301 	for (int i = 0; i < env->nr_pmus_with_caps; i++) {
2302 		pmu_caps = &env->pmu_caps[i];
2303 		__print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps,
2304 				 pmu_caps->pmu_name);
2305 	}
2306 
2307 	if (strcmp(perf_env__arch(env), "x86") == 0 &&
2308 	    perf_env__has_pmu_mapping(env, "ibs_op")) {
2309 		char *max_precise = perf_env__find_pmu_cap(env, "cpu", "max_precise");
2310 
2311 		if (max_precise != NULL && atoi(max_precise) == 0)
2312 			fprintf(fp, "# AMD systems uses ibs_op// PMU for some precise events, e.g.: cycles:p, see the 'perf list' man page for further details.\n");
2313 	}
2314 }
2315 
print_pmu_mappings(struct feat_fd * ff,FILE * fp)2316 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
2317 {
2318 	struct perf_env *env = &ff->ph->env;
2319 	const char *delimiter = "# pmu mappings: ";
2320 	char *str, *tmp;
2321 	u32 pmu_num;
2322 	u32 type;
2323 
2324 	pmu_num = env->nr_pmu_mappings;
2325 	if (!pmu_num) {
2326 		fprintf(fp, "# pmu mappings: not available\n");
2327 		return;
2328 	}
2329 
2330 	str = env->pmu_mappings;
2331 
2332 	while (pmu_num) {
2333 		type = strtoul(str, &tmp, 0);
2334 		if (*tmp != ':')
2335 			goto error;
2336 
2337 		str = tmp + 1;
2338 		fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type);
2339 
2340 		delimiter = ", ";
2341 		str += strlen(str) + 1;
2342 		pmu_num--;
2343 	}
2344 
2345 	fprintf(fp, "\n");
2346 
2347 	if (!pmu_num)
2348 		return;
2349 error:
2350 	fprintf(fp, "# pmu mappings: unable to read\n");
2351 }
2352 
print_group_desc(struct feat_fd * ff,FILE * fp)2353 static void print_group_desc(struct feat_fd *ff, FILE *fp)
2354 {
2355 	struct perf_session *session;
2356 	struct evsel *evsel;
2357 	u32 nr = 0;
2358 
2359 	session = container_of(ff->ph, struct perf_session, header);
2360 
2361 	evlist__for_each_entry(session->evlist, evsel) {
2362 		if (evsel__is_group_leader(evsel) && evsel->core.nr_members > 1) {
2363 			fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", evsel__name(evsel));
2364 
2365 			nr = evsel->core.nr_members - 1;
2366 		} else if (nr) {
2367 			fprintf(fp, ",%s", evsel__name(evsel));
2368 
2369 			if (--nr == 0)
2370 				fprintf(fp, "}\n");
2371 		}
2372 	}
2373 }
2374 
print_sample_time(struct feat_fd * ff,FILE * fp)2375 static void print_sample_time(struct feat_fd *ff, FILE *fp)
2376 {
2377 	struct perf_session *session;
2378 	char time_buf[32];
2379 	double d;
2380 
2381 	session = container_of(ff->ph, struct perf_session, header);
2382 
2383 	timestamp__scnprintf_usec(session->evlist->first_sample_time,
2384 				  time_buf, sizeof(time_buf));
2385 	fprintf(fp, "# time of first sample : %s\n", time_buf);
2386 
2387 	timestamp__scnprintf_usec(session->evlist->last_sample_time,
2388 				  time_buf, sizeof(time_buf));
2389 	fprintf(fp, "# time of last sample : %s\n", time_buf);
2390 
2391 	d = (double)(session->evlist->last_sample_time -
2392 		session->evlist->first_sample_time) / NSEC_PER_MSEC;
2393 
2394 	fprintf(fp, "# sample duration : %10.3f ms\n", d);
2395 }
2396 
memory_node__fprintf(struct memory_node * n,unsigned long long bsize,FILE * fp)2397 static void memory_node__fprintf(struct memory_node *n,
2398 				 unsigned long long bsize, FILE *fp)
2399 {
2400 	char buf_map[100], buf_size[50];
2401 	unsigned long long size;
2402 
2403 	size = bsize * bitmap_weight(n->set, n->size);
2404 	unit_number__scnprintf(buf_size, 50, size);
2405 
2406 	bitmap_scnprintf(n->set, n->size, buf_map, 100);
2407 	fprintf(fp, "#  %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map);
2408 }
2409 
print_mem_topology(struct feat_fd * ff,FILE * fp)2410 static void print_mem_topology(struct feat_fd *ff, FILE *fp)
2411 {
2412 	struct perf_env *env = &ff->ph->env;
2413 	struct memory_node *nodes;
2414 	int i, nr;
2415 
2416 	nodes = env->memory_nodes;
2417 	nr    = env->nr_memory_nodes;
2418 
2419 	fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n",
2420 		nr, env->memory_bsize);
2421 
2422 	for (i = 0; i < nr; i++) {
2423 		memory_node__fprintf(&nodes[i], env->memory_bsize, fp);
2424 	}
2425 }
2426 
print_cpu_domain_info(struct feat_fd * ff,FILE * fp)2427 static void print_cpu_domain_info(struct feat_fd *ff, FILE *fp)
2428 {
2429 	struct cpu_domain_map **cd_map = ff->ph->env.cpu_domain;
2430 	u32 nr = ff->ph->env.nr_cpus_avail;
2431 	struct domain_info *d_info;
2432 	u32 i, j;
2433 
2434 	fprintf(fp, "# schedstat version	: %u\n", ff->ph->env.schedstat_version);
2435 	fprintf(fp, "# Maximum sched domains	: %u\n", ff->ph->env.max_sched_domains);
2436 
2437 	for (i = 0; i < nr; i++) {
2438 		if (!cd_map[i])
2439 			continue;
2440 
2441 		fprintf(fp, "# cpu		: %u\n", cd_map[i]->cpu);
2442 		fprintf(fp, "# nr_domains	: %u\n", cd_map[i]->nr_domains);
2443 
2444 		for (j = 0; j < cd_map[i]->nr_domains; j++) {
2445 			d_info = cd_map[i]->domains[j];
2446 			if (!d_info)
2447 				continue;
2448 
2449 			fprintf(fp, "# Domain		: %u\n", d_info->domain);
2450 
2451 			if (ff->ph->env.schedstat_version >= 17)
2452 				fprintf(fp, "# Domain name      : %s\n", d_info->dname);
2453 
2454 			fprintf(fp, "# Domain cpu map   : %s\n", d_info->cpumask);
2455 			fprintf(fp, "# Domain cpu list  : %s\n", d_info->cpulist);
2456 		}
2457 	}
2458 }
2459 
__event_process_build_id(struct perf_record_header_build_id * bev,char * filename,struct perf_session * session)2460 static int __event_process_build_id(struct perf_record_header_build_id *bev,
2461 				    char *filename,
2462 				    struct perf_session *session)
2463 {
2464 	int err = -1;
2465 	struct machine *machine;
2466 	u16 cpumode;
2467 	struct dso *dso;
2468 	enum dso_space_type dso_space;
2469 
2470 	machine = perf_session__findnew_machine(session, bev->pid);
2471 	if (!machine)
2472 		goto out;
2473 
2474 	cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
2475 
2476 	switch (cpumode) {
2477 	case PERF_RECORD_MISC_KERNEL:
2478 		dso_space = DSO_SPACE__KERNEL;
2479 		break;
2480 	case PERF_RECORD_MISC_GUEST_KERNEL:
2481 		dso_space = DSO_SPACE__KERNEL_GUEST;
2482 		break;
2483 	case PERF_RECORD_MISC_USER:
2484 	case PERF_RECORD_MISC_GUEST_USER:
2485 		dso_space = DSO_SPACE__USER;
2486 		break;
2487 	default:
2488 		goto out;
2489 	}
2490 
2491 	dso = machine__findnew_dso(machine, filename);
2492 	if (dso != NULL) {
2493 		char sbuild_id[SBUILD_ID_SIZE];
2494 		struct build_id bid;
2495 		size_t size = BUILD_ID_SIZE;
2496 
2497 		if (bev->header.misc & PERF_RECORD_MISC_BUILD_ID_SIZE)
2498 			size = bev->size;
2499 
2500 		build_id__init(&bid, bev->data, size);
2501 		dso__set_build_id(dso, &bid);
2502 		dso__set_header_build_id(dso, true);
2503 
2504 		if (dso_space != DSO_SPACE__USER) {
2505 			struct kmod_path m = { .name = NULL, };
2506 
2507 			if (!kmod_path__parse_name(&m, filename) && m.kmod)
2508 				dso__set_module_info(dso, &m, machine);
2509 
2510 			dso__set_kernel(dso, dso_space);
2511 			free(m.name);
2512 		}
2513 
2514 		build_id__snprintf(dso__bid(dso), sbuild_id, sizeof(sbuild_id));
2515 		pr_debug("build id event received for %s: %s [%zu]\n",
2516 			 dso__long_name(dso), sbuild_id, size);
2517 		dso__put(dso);
2518 	}
2519 
2520 	err = 0;
2521 out:
2522 	return err;
2523 }
2524 
perf_header__read_build_ids_abi_quirk(struct perf_header * header,int input,u64 offset,u64 size)2525 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header,
2526 						 int input, u64 offset, u64 size)
2527 {
2528 	struct perf_session *session = container_of(header, struct perf_session, header);
2529 	struct {
2530 		struct perf_event_header   header;
2531 		u8			   build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))];
2532 		char			   filename[0];
2533 	} old_bev;
2534 	struct perf_record_header_build_id bev;
2535 	char filename[PATH_MAX];
2536 	u64 limit = offset + size;
2537 
2538 	while (offset < limit) {
2539 		ssize_t len;
2540 
2541 		if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev))
2542 			return -1;
2543 
2544 		if (header->needs_swap)
2545 			perf_event_header__bswap(&old_bev.header);
2546 
2547 		len = old_bev.header.size - sizeof(old_bev);
2548 		if (readn(input, filename, len) != len)
2549 			return -1;
2550 
2551 		bev.header = old_bev.header;
2552 
2553 		/*
2554 		 * As the pid is the missing value, we need to fill
2555 		 * it properly. The header.misc value give us nice hint.
2556 		 */
2557 		bev.pid	= HOST_KERNEL_ID;
2558 		if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER ||
2559 		    bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL)
2560 			bev.pid	= DEFAULT_GUEST_KERNEL_ID;
2561 
2562 		memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id));
2563 		__event_process_build_id(&bev, filename, session);
2564 
2565 		offset += bev.header.size;
2566 	}
2567 
2568 	return 0;
2569 }
2570 
perf_header__read_build_ids(struct perf_header * header,int input,u64 offset,u64 size)2571 static int perf_header__read_build_ids(struct perf_header *header,
2572 				       int input, u64 offset, u64 size)
2573 {
2574 	struct perf_session *session = container_of(header, struct perf_session, header);
2575 	struct perf_record_header_build_id bev;
2576 	char filename[PATH_MAX];
2577 	u64 limit = offset + size, orig_offset = offset;
2578 	int err = -1;
2579 
2580 	while (offset < limit) {
2581 		ssize_t len;
2582 
2583 		if (readn(input, &bev, sizeof(bev)) != sizeof(bev))
2584 			goto out;
2585 
2586 		if (header->needs_swap)
2587 			perf_event_header__bswap(&bev.header);
2588 
2589 		len = bev.header.size - sizeof(bev);
2590 		if (readn(input, filename, len) != len)
2591 			goto out;
2592 		/*
2593 		 * The a1645ce1 changeset:
2594 		 *
2595 		 * "perf: 'perf kvm' tool for monitoring guest performance from host"
2596 		 *
2597 		 * Added a field to struct perf_record_header_build_id that broke the file
2598 		 * format.
2599 		 *
2600 		 * Since the kernel build-id is the first entry, process the
2601 		 * table using the old format if the well known
2602 		 * '[kernel.kallsyms]' string for the kernel build-id has the
2603 		 * first 4 characters chopped off (where the pid_t sits).
2604 		 */
2605 		if (memcmp(filename, "nel.kallsyms]", 13) == 0) {
2606 			if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1)
2607 				return -1;
2608 			return perf_header__read_build_ids_abi_quirk(header, input, offset, size);
2609 		}
2610 
2611 		__event_process_build_id(&bev, filename, session);
2612 
2613 		offset += bev.header.size;
2614 	}
2615 	err = 0;
2616 out:
2617 	return err;
2618 }
2619 
2620 /* Macro for features that simply need to read and store a string. */
2621 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \
2622 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \
2623 {\
2624 	free(ff->ph->env.__feat_env);		     \
2625 	ff->ph->env.__feat_env = do_read_string(ff); \
2626 	return ff->ph->env.__feat_env ? 0 : -ENOMEM; \
2627 }
2628 
2629 FEAT_PROCESS_STR_FUN(hostname, hostname);
2630 FEAT_PROCESS_STR_FUN(osrelease, os_release);
2631 FEAT_PROCESS_STR_FUN(version, version);
2632 FEAT_PROCESS_STR_FUN(arch, arch);
2633 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc);
2634 FEAT_PROCESS_STR_FUN(cpuid, cpuid);
2635 
process_e_machine(struct feat_fd * ff,void * data __maybe_unused)2636 static int process_e_machine(struct feat_fd *ff, void *data __maybe_unused)
2637 {
2638 	int ret;
2639 
2640 	ret = do_read_u32(ff, &ff->ph->env.e_machine);
2641 	if (ret)
2642 		return ret;
2643 
2644 	return do_read_u32(ff, &ff->ph->env.e_flags);
2645 }
2646 
2647 #ifdef HAVE_LIBTRACEEVENT
process_tracing_data(struct feat_fd * ff,void * data)2648 static int process_tracing_data(struct feat_fd *ff, void *data)
2649 {
2650 	ssize_t ret = trace_report(ff->fd, data, false);
2651 
2652 	return ret < 0 ? -1 : 0;
2653 }
2654 #endif
2655 
process_build_id(struct feat_fd * ff,void * data __maybe_unused)2656 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused)
2657 {
2658 	if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size))
2659 		pr_debug("Failed to read buildids, continuing...\n");
2660 	return 0;
2661 }
2662 
process_nrcpus(struct feat_fd * ff,void * data __maybe_unused)2663 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused)
2664 {
2665 	struct perf_env *env = &ff->ph->env;
2666 	int ret;
2667 	u32 nr_cpus_avail, nr_cpus_online;
2668 
2669 	ret = do_read_u32(ff, &nr_cpus_avail);
2670 	if (ret)
2671 		return ret;
2672 
2673 	ret = do_read_u32(ff, &nr_cpus_online);
2674 	if (ret)
2675 		return ret;
2676 	env->nr_cpus_avail = (int)nr_cpus_avail;
2677 	env->nr_cpus_online = (int)nr_cpus_online;
2678 	return 0;
2679 }
2680 
process_total_mem(struct feat_fd * ff,void * data __maybe_unused)2681 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused)
2682 {
2683 	struct perf_env *env = &ff->ph->env;
2684 	u64 total_mem;
2685 	int ret;
2686 
2687 	ret = do_read_u64(ff, &total_mem);
2688 	if (ret)
2689 		return -1;
2690 	env->total_mem = (unsigned long long)total_mem;
2691 	return 0;
2692 }
2693 
evlist__find_by_index(struct evlist * evlist,int idx)2694 static struct evsel *evlist__find_by_index(struct evlist *evlist, int idx)
2695 {
2696 	struct evsel *evsel;
2697 
2698 	evlist__for_each_entry(evlist, evsel) {
2699 		if (evsel->core.idx == idx)
2700 			return evsel;
2701 	}
2702 
2703 	return NULL;
2704 }
2705 
evlist__set_event_name(struct evlist * evlist,struct evsel * event)2706 static void evlist__set_event_name(struct evlist *evlist, struct evsel *event)
2707 {
2708 	struct evsel *evsel;
2709 
2710 	if (!event->name)
2711 		return;
2712 
2713 	evsel = evlist__find_by_index(evlist, event->core.idx);
2714 	if (!evsel)
2715 		return;
2716 
2717 	if (evsel->name)
2718 		return;
2719 
2720 	evsel->name = strdup(event->name);
2721 }
2722 
2723 static int
process_event_desc(struct feat_fd * ff,void * data __maybe_unused)2724 process_event_desc(struct feat_fd *ff, void *data __maybe_unused)
2725 {
2726 	struct perf_session *session;
2727 	struct evsel *evsel, *events = read_event_desc(ff);
2728 
2729 	if (!events)
2730 		return 0;
2731 
2732 	session = container_of(ff->ph, struct perf_session, header);
2733 
2734 	if (session->data->is_pipe) {
2735 		/* Save events for reading later by print_event_desc,
2736 		 * since they can't be read again in pipe mode. */
2737 		ff->events = events;
2738 	}
2739 
2740 	for (evsel = events; evsel->core.attr.size; evsel++)
2741 		evlist__set_event_name(session->evlist, evsel);
2742 
2743 	if (!session->data->is_pipe)
2744 		free_event_desc(events);
2745 
2746 	return 0;
2747 }
2748 
process_cmdline(struct feat_fd * ff,void * data __maybe_unused)2749 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused)
2750 {
2751 	struct perf_env *env = &ff->ph->env;
2752 	char *str, *cmdline = NULL, **argv = NULL;
2753 	u32 nr, i, len = 0;
2754 
2755 	if (do_read_u32(ff, &nr))
2756 		return -1;
2757 
2758 	env->nr_cmdline = nr;
2759 
2760 	cmdline = zalloc(ff->size + nr + 1);
2761 	if (!cmdline)
2762 		return -1;
2763 
2764 	argv = zalloc(sizeof(char *) * (nr + 1));
2765 	if (!argv)
2766 		goto error;
2767 
2768 	for (i = 0; i < nr; i++) {
2769 		str = do_read_string(ff);
2770 		if (!str)
2771 			goto error;
2772 
2773 		argv[i] = cmdline + len;
2774 		memcpy(argv[i], str, strlen(str) + 1);
2775 		len += strlen(str) + 1;
2776 		free(str);
2777 	}
2778 	env->cmdline = cmdline;
2779 	env->cmdline_argv = (const char **) argv;
2780 	return 0;
2781 
2782 error:
2783 	free(argv);
2784 	free(cmdline);
2785 	return -1;
2786 }
2787 
process_cpu_topology(struct feat_fd * ff,void * data __maybe_unused)2788 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
2789 {
2790 	u32 nr, i;
2791 	char *str = NULL;
2792 	struct strbuf sb;
2793 	struct perf_env *env = &ff->ph->env;
2794 	int cpu_nr = env->nr_cpus_avail;
2795 	u64 size = 0;
2796 
2797 	env->cpu = calloc(cpu_nr, sizeof(*env->cpu));
2798 	if (!env->cpu)
2799 		return -1;
2800 
2801 	if (do_read_u32(ff, &nr))
2802 		goto free_cpu;
2803 
2804 	env->nr_sibling_cores = nr;
2805 	size += sizeof(u32);
2806 	if (strbuf_init(&sb, 128) < 0)
2807 		goto free_cpu;
2808 
2809 	for (i = 0; i < nr; i++) {
2810 		str = do_read_string(ff);
2811 		if (!str)
2812 			goto error;
2813 
2814 		/* include a NULL character at the end */
2815 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2816 			goto error;
2817 		size += string_size(str);
2818 		zfree(&str);
2819 	}
2820 	env->sibling_cores = strbuf_detach(&sb, NULL);
2821 
2822 	if (do_read_u32(ff, &nr))
2823 		return -1;
2824 
2825 	env->nr_sibling_threads = nr;
2826 	size += sizeof(u32);
2827 
2828 	for (i = 0; i < nr; i++) {
2829 		str = do_read_string(ff);
2830 		if (!str)
2831 			goto error;
2832 
2833 		/* include a NULL character at the end */
2834 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2835 			goto error;
2836 		size += string_size(str);
2837 		zfree(&str);
2838 	}
2839 	env->sibling_threads = strbuf_detach(&sb, NULL);
2840 
2841 	/*
2842 	 * The header may be from old perf,
2843 	 * which doesn't include core id and socket id information.
2844 	 */
2845 	if (ff->size <= size) {
2846 		zfree(&env->cpu);
2847 		return 0;
2848 	}
2849 
2850 	for (i = 0; i < (u32)cpu_nr; i++) {
2851 		if (do_read_u32(ff, &nr))
2852 			goto free_cpu;
2853 
2854 		env->cpu[i].core_id = nr;
2855 		size += sizeof(u32);
2856 
2857 		if (do_read_u32(ff, &nr))
2858 			goto free_cpu;
2859 
2860 		env->cpu[i].socket_id = nr;
2861 		size += sizeof(u32);
2862 	}
2863 
2864 	/*
2865 	 * The header may be from old perf,
2866 	 * which doesn't include die information.
2867 	 */
2868 	if (ff->size <= size)
2869 		return 0;
2870 
2871 	if (do_read_u32(ff, &nr))
2872 		return -1;
2873 
2874 	env->nr_sibling_dies = nr;
2875 	size += sizeof(u32);
2876 
2877 	for (i = 0; i < nr; i++) {
2878 		str = do_read_string(ff);
2879 		if (!str)
2880 			goto error;
2881 
2882 		/* include a NULL character at the end */
2883 		if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
2884 			goto error;
2885 		size += string_size(str);
2886 		zfree(&str);
2887 	}
2888 	env->sibling_dies = strbuf_detach(&sb, NULL);
2889 
2890 	for (i = 0; i < (u32)cpu_nr; i++) {
2891 		if (do_read_u32(ff, &nr))
2892 			goto free_cpu;
2893 
2894 		env->cpu[i].die_id = nr;
2895 	}
2896 
2897 	return 0;
2898 
2899 error:
2900 	strbuf_release(&sb);
2901 	zfree(&str);
2902 free_cpu:
2903 	zfree(&env->cpu);
2904 	return -1;
2905 }
2906 
process_numa_topology(struct feat_fd * ff,void * data __maybe_unused)2907 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused)
2908 {
2909 	struct perf_env *env = &ff->ph->env;
2910 	struct numa_node *nodes, *n;
2911 	u32 nr, i;
2912 	char *str;
2913 
2914 	/* nr nodes */
2915 	if (do_read_u32(ff, &nr))
2916 		return -1;
2917 
2918 	nodes = zalloc(sizeof(*nodes) * nr);
2919 	if (!nodes)
2920 		return -ENOMEM;
2921 
2922 	for (i = 0; i < nr; i++) {
2923 		n = &nodes[i];
2924 
2925 		/* node number */
2926 		if (do_read_u32(ff, &n->node))
2927 			goto error;
2928 
2929 		if (do_read_u64(ff, &n->mem_total))
2930 			goto error;
2931 
2932 		if (do_read_u64(ff, &n->mem_free))
2933 			goto error;
2934 
2935 		str = do_read_string(ff);
2936 		if (!str)
2937 			goto error;
2938 
2939 		n->map = perf_cpu_map__new(str);
2940 		free(str);
2941 		if (!n->map)
2942 			goto error;
2943 	}
2944 	env->nr_numa_nodes = nr;
2945 	env->numa_nodes = nodes;
2946 	return 0;
2947 
2948 error:
2949 	free(nodes);
2950 	return -1;
2951 }
2952 
process_pmu_mappings(struct feat_fd * ff,void * data __maybe_unused)2953 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused)
2954 {
2955 	struct perf_env *env = &ff->ph->env;
2956 	char *name;
2957 	u32 pmu_num;
2958 	u32 type;
2959 	struct strbuf sb;
2960 
2961 	if (do_read_u32(ff, &pmu_num))
2962 		return -1;
2963 
2964 	if (!pmu_num) {
2965 		pr_debug("pmu mappings not available\n");
2966 		return 0;
2967 	}
2968 
2969 	env->nr_pmu_mappings = pmu_num;
2970 	if (strbuf_init(&sb, 128) < 0)
2971 		return -1;
2972 
2973 	while (pmu_num) {
2974 		if (do_read_u32(ff, &type))
2975 			goto error;
2976 
2977 		name = do_read_string(ff);
2978 		if (!name)
2979 			goto error;
2980 
2981 		if (strbuf_addf(&sb, "%u:%s", type, name) < 0)
2982 			goto error;
2983 		/* include a NULL character at the end */
2984 		if (strbuf_add(&sb, "", 1) < 0)
2985 			goto error;
2986 
2987 		if (!strcmp(name, "msr"))
2988 			env->msr_pmu_type = type;
2989 
2990 		free(name);
2991 		pmu_num--;
2992 	}
2993 	/* AMD may set it by evlist__has_amd_ibs() from perf_session__new() */
2994 	free(env->pmu_mappings);
2995 	env->pmu_mappings = strbuf_detach(&sb, NULL);
2996 	return 0;
2997 
2998 error:
2999 	strbuf_release(&sb);
3000 	return -1;
3001 }
3002 
process_group_desc(struct feat_fd * ff,void * data __maybe_unused)3003 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused)
3004 {
3005 	struct perf_env *env = &ff->ph->env;
3006 	size_t ret = -1;
3007 	u32 i, nr, nr_groups;
3008 	struct perf_session *session;
3009 	struct evsel *evsel, *leader = NULL;
3010 	struct group_desc {
3011 		char *name;
3012 		u32 leader_idx;
3013 		u32 nr_members;
3014 	} *desc;
3015 
3016 	if (do_read_u32(ff, &nr_groups))
3017 		return -1;
3018 
3019 	env->nr_groups = nr_groups;
3020 	if (!nr_groups) {
3021 		pr_debug("group desc not available\n");
3022 		return 0;
3023 	}
3024 
3025 	desc = calloc(nr_groups, sizeof(*desc));
3026 	if (!desc)
3027 		return -1;
3028 
3029 	for (i = 0; i < nr_groups; i++) {
3030 		desc[i].name = do_read_string(ff);
3031 		if (!desc[i].name)
3032 			goto out_free;
3033 
3034 		if (do_read_u32(ff, &desc[i].leader_idx))
3035 			goto out_free;
3036 
3037 		if (do_read_u32(ff, &desc[i].nr_members))
3038 			goto out_free;
3039 	}
3040 
3041 	/*
3042 	 * Rebuild group relationship based on the group_desc
3043 	 */
3044 	session = container_of(ff->ph, struct perf_session, header);
3045 
3046 	i = nr = 0;
3047 	evlist__for_each_entry(session->evlist, evsel) {
3048 		if (i < nr_groups && evsel->core.idx == (int) desc[i].leader_idx) {
3049 			evsel__set_leader(evsel, evsel);
3050 			/* {anon_group} is a dummy name */
3051 			if (strcmp(desc[i].name, "{anon_group}")) {
3052 				evsel->group_name = desc[i].name;
3053 				desc[i].name = NULL;
3054 			}
3055 			evsel->core.nr_members = desc[i].nr_members;
3056 
3057 			if (i >= nr_groups || nr > 0) {
3058 				pr_debug("invalid group desc\n");
3059 				goto out_free;
3060 			}
3061 
3062 			leader = evsel;
3063 			nr = evsel->core.nr_members - 1;
3064 			i++;
3065 		} else if (nr) {
3066 			/* This is a group member */
3067 			evsel__set_leader(evsel, leader);
3068 
3069 			nr--;
3070 		}
3071 	}
3072 
3073 	if (i != nr_groups || nr != 0) {
3074 		pr_debug("invalid group desc\n");
3075 		goto out_free;
3076 	}
3077 
3078 	ret = 0;
3079 out_free:
3080 	for (i = 0; i < nr_groups; i++)
3081 		zfree(&desc[i].name);
3082 	free(desc);
3083 
3084 	return ret;
3085 }
3086 
process_auxtrace(struct feat_fd * ff,void * data __maybe_unused)3087 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused)
3088 {
3089 	struct perf_session *session;
3090 	int err;
3091 
3092 	session = container_of(ff->ph, struct perf_session, header);
3093 
3094 	err = auxtrace_index__process(ff->fd, ff->size, session,
3095 				      ff->ph->needs_swap);
3096 	if (err < 0)
3097 		pr_err("Failed to process auxtrace index\n");
3098 	return err;
3099 }
3100 
process_cache(struct feat_fd * ff,void * data __maybe_unused)3101 static int process_cache(struct feat_fd *ff, void *data __maybe_unused)
3102 {
3103 	struct perf_env *env = &ff->ph->env;
3104 	struct cpu_cache_level *caches;
3105 	u32 cnt, i, version;
3106 
3107 	if (do_read_u32(ff, &version))
3108 		return -1;
3109 
3110 	if (version != 1)
3111 		return -1;
3112 
3113 	if (do_read_u32(ff, &cnt))
3114 		return -1;
3115 
3116 	caches = zalloc(sizeof(*caches) * cnt);
3117 	if (!caches)
3118 		return -1;
3119 
3120 	for (i = 0; i < cnt; i++) {
3121 		struct cpu_cache_level *c = &caches[i];
3122 
3123 		#define _R(v)						\
3124 			if (do_read_u32(ff, &c->v))			\
3125 				goto out_free_caches;			\
3126 
3127 		_R(level)
3128 		_R(line_size)
3129 		_R(sets)
3130 		_R(ways)
3131 		#undef _R
3132 
3133 		#define _R(v)					\
3134 			c->v = do_read_string(ff);		\
3135 			if (!c->v)				\
3136 				goto out_free_caches;		\
3137 
3138 		_R(type)
3139 		_R(size)
3140 		_R(map)
3141 		#undef _R
3142 	}
3143 
3144 	env->caches = caches;
3145 	env->caches_cnt = cnt;
3146 	return 0;
3147 out_free_caches:
3148 	for (i = 0; i < cnt; i++) {
3149 		free(caches[i].type);
3150 		free(caches[i].size);
3151 		free(caches[i].map);
3152 	}
3153 	free(caches);
3154 	return -1;
3155 }
3156 
process_sample_time(struct feat_fd * ff,void * data __maybe_unused)3157 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused)
3158 {
3159 	struct perf_session *session;
3160 	u64 first_sample_time, last_sample_time;
3161 	int ret;
3162 
3163 	session = container_of(ff->ph, struct perf_session, header);
3164 
3165 	ret = do_read_u64(ff, &first_sample_time);
3166 	if (ret)
3167 		return -1;
3168 
3169 	ret = do_read_u64(ff, &last_sample_time);
3170 	if (ret)
3171 		return -1;
3172 
3173 	session->evlist->first_sample_time = first_sample_time;
3174 	session->evlist->last_sample_time = last_sample_time;
3175 	return 0;
3176 }
3177 
process_mem_topology(struct feat_fd * ff,void * data __maybe_unused)3178 static int process_mem_topology(struct feat_fd *ff,
3179 				void *data __maybe_unused)
3180 {
3181 	struct perf_env *env = &ff->ph->env;
3182 	struct memory_node *nodes;
3183 	u64 version, i, nr, bsize;
3184 	int ret = -1;
3185 
3186 	if (do_read_u64(ff, &version))
3187 		return -1;
3188 
3189 	if (version != 1)
3190 		return -1;
3191 
3192 	if (do_read_u64(ff, &bsize))
3193 		return -1;
3194 
3195 	if (do_read_u64(ff, &nr))
3196 		return -1;
3197 
3198 	nodes = zalloc(sizeof(*nodes) * nr);
3199 	if (!nodes)
3200 		return -1;
3201 
3202 	for (i = 0; i < nr; i++) {
3203 		struct memory_node n;
3204 
3205 		#define _R(v)				\
3206 			if (do_read_u64(ff, &n.v))	\
3207 				goto out;		\
3208 
3209 		_R(node)
3210 		_R(size)
3211 
3212 		#undef _R
3213 
3214 		if (do_read_bitmap(ff, &n.set, &n.size))
3215 			goto out;
3216 
3217 		nodes[i] = n;
3218 	}
3219 
3220 	env->memory_bsize    = bsize;
3221 	env->memory_nodes    = nodes;
3222 	env->nr_memory_nodes = nr;
3223 	ret = 0;
3224 
3225 out:
3226 	if (ret)
3227 		free(nodes);
3228 	return ret;
3229 }
3230 
process_clockid(struct feat_fd * ff,void * data __maybe_unused)3231 static int process_clockid(struct feat_fd *ff,
3232 			   void *data __maybe_unused)
3233 {
3234 	struct perf_env *env = &ff->ph->env;
3235 
3236 	if (do_read_u64(ff, &env->clock.clockid_res_ns))
3237 		return -1;
3238 
3239 	return 0;
3240 }
3241 
process_clock_data(struct feat_fd * ff,void * _data __maybe_unused)3242 static int process_clock_data(struct feat_fd *ff,
3243 			      void *_data __maybe_unused)
3244 {
3245 	struct perf_env *env = &ff->ph->env;
3246 	u32 data32;
3247 	u64 data64;
3248 
3249 	/* version */
3250 	if (do_read_u32(ff, &data32))
3251 		return -1;
3252 
3253 	if (data32 != 1)
3254 		return -1;
3255 
3256 	/* clockid */
3257 	if (do_read_u32(ff, &data32))
3258 		return -1;
3259 
3260 	env->clock.clockid = data32;
3261 
3262 	/* TOD ref time */
3263 	if (do_read_u64(ff, &data64))
3264 		return -1;
3265 
3266 	env->clock.tod_ns = data64;
3267 
3268 	/* clockid ref time */
3269 	if (do_read_u64(ff, &data64))
3270 		return -1;
3271 
3272 	env->clock.clockid_ns = data64;
3273 	env->clock.enabled = true;
3274 	return 0;
3275 }
3276 
process_hybrid_topology(struct feat_fd * ff,void * data __maybe_unused)3277 static int process_hybrid_topology(struct feat_fd *ff,
3278 				   void *data __maybe_unused)
3279 {
3280 	struct perf_env *env = &ff->ph->env;
3281 	struct hybrid_node *nodes, *n;
3282 	u32 nr, i;
3283 
3284 	/* nr nodes */
3285 	if (do_read_u32(ff, &nr))
3286 		return -1;
3287 
3288 	nodes = zalloc(sizeof(*nodes) * nr);
3289 	if (!nodes)
3290 		return -ENOMEM;
3291 
3292 	for (i = 0; i < nr; i++) {
3293 		n = &nodes[i];
3294 
3295 		n->pmu_name = do_read_string(ff);
3296 		if (!n->pmu_name)
3297 			goto error;
3298 
3299 		n->cpus = do_read_string(ff);
3300 		if (!n->cpus)
3301 			goto error;
3302 	}
3303 
3304 	env->nr_hybrid_nodes = nr;
3305 	env->hybrid_nodes = nodes;
3306 	return 0;
3307 
3308 error:
3309 	for (i = 0; i < nr; i++) {
3310 		free(nodes[i].pmu_name);
3311 		free(nodes[i].cpus);
3312 	}
3313 
3314 	free(nodes);
3315 	return -1;
3316 }
3317 
process_dir_format(struct feat_fd * ff,void * _data __maybe_unused)3318 static int process_dir_format(struct feat_fd *ff,
3319 			      void *_data __maybe_unused)
3320 {
3321 	struct perf_session *session;
3322 	struct perf_data *data;
3323 
3324 	session = container_of(ff->ph, struct perf_session, header);
3325 	data = session->data;
3326 
3327 	if (WARN_ON(!perf_data__is_dir(data)))
3328 		return -1;
3329 
3330 	return do_read_u64(ff, &data->dir.version);
3331 }
3332 
3333 #ifdef HAVE_LIBBPF_SUPPORT
process_bpf_prog_info(struct feat_fd * ff,void * data __maybe_unused)3334 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
3335 {
3336 	struct bpf_prog_info_node *info_node;
3337 	struct perf_env *env = &ff->ph->env;
3338 	struct perf_bpil *info_linear;
3339 	u32 count, i;
3340 	int err = -1;
3341 
3342 	if (ff->ph->needs_swap) {
3343 		pr_warning("interpreting bpf_prog_info from systems with endianness is not yet supported\n");
3344 		return 0;
3345 	}
3346 
3347 	if (do_read_u32(ff, &count))
3348 		return -1;
3349 
3350 	down_write(&env->bpf_progs.lock);
3351 
3352 	for (i = 0; i < count; ++i) {
3353 		u32 info_len, data_len;
3354 
3355 		info_linear = NULL;
3356 		info_node = NULL;
3357 		if (do_read_u32(ff, &info_len))
3358 			goto out;
3359 		if (do_read_u32(ff, &data_len))
3360 			goto out;
3361 
3362 		if (info_len > sizeof(struct bpf_prog_info)) {
3363 			pr_warning("detected invalid bpf_prog_info\n");
3364 			goto out;
3365 		}
3366 
3367 		info_linear = malloc(sizeof(struct perf_bpil) +
3368 				     data_len);
3369 		if (!info_linear)
3370 			goto out;
3371 		info_linear->info_len = sizeof(struct bpf_prog_info);
3372 		info_linear->data_len = data_len;
3373 		if (do_read_u64(ff, (u64 *)(&info_linear->arrays)))
3374 			goto out;
3375 		if (__do_read(ff, &info_linear->info, info_len))
3376 			goto out;
3377 		if (info_len < sizeof(struct bpf_prog_info))
3378 			memset(((void *)(&info_linear->info)) + info_len, 0,
3379 			       sizeof(struct bpf_prog_info) - info_len);
3380 
3381 		if (__do_read(ff, info_linear->data, data_len))
3382 			goto out;
3383 
3384 		info_node = malloc(sizeof(struct bpf_prog_info_node));
3385 		if (!info_node)
3386 			goto out;
3387 
3388 		/* after reading from file, translate offset to address */
3389 		bpil_offs_to_addr(info_linear);
3390 		info_node->info_linear = info_linear;
3391 		info_node->metadata = NULL;
3392 		if (!__perf_env__insert_bpf_prog_info(env, info_node)) {
3393 			free(info_linear);
3394 			free(info_node);
3395 		}
3396 	}
3397 
3398 	up_write(&env->bpf_progs.lock);
3399 	return 0;
3400 out:
3401 	free(info_linear);
3402 	free(info_node);
3403 	up_write(&env->bpf_progs.lock);
3404 	return err;
3405 }
3406 
process_bpf_btf(struct feat_fd * ff,void * data __maybe_unused)3407 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
3408 {
3409 	struct perf_env *env = &ff->ph->env;
3410 	struct btf_node *node = NULL;
3411 	u32 count, i;
3412 	int err = -1;
3413 
3414 	if (ff->ph->needs_swap) {
3415 		pr_warning("interpreting btf from systems with endianness is not yet supported\n");
3416 		return 0;
3417 	}
3418 
3419 	if (do_read_u32(ff, &count))
3420 		return -1;
3421 
3422 	down_write(&env->bpf_progs.lock);
3423 
3424 	for (i = 0; i < count; ++i) {
3425 		u32 id, data_size;
3426 
3427 		if (do_read_u32(ff, &id))
3428 			goto out;
3429 		if (do_read_u32(ff, &data_size))
3430 			goto out;
3431 
3432 		node = malloc(sizeof(struct btf_node) + data_size);
3433 		if (!node)
3434 			goto out;
3435 
3436 		node->id = id;
3437 		node->data_size = data_size;
3438 
3439 		if (__do_read(ff, node->data, data_size))
3440 			goto out;
3441 
3442 		if (!__perf_env__insert_btf(env, node))
3443 			free(node);
3444 		node = NULL;
3445 	}
3446 
3447 	err = 0;
3448 out:
3449 	up_write(&env->bpf_progs.lock);
3450 	free(node);
3451 	return err;
3452 }
3453 #endif // HAVE_LIBBPF_SUPPORT
3454 
process_compressed(struct feat_fd * ff,void * data __maybe_unused)3455 static int process_compressed(struct feat_fd *ff,
3456 			      void *data __maybe_unused)
3457 {
3458 	struct perf_env *env = &ff->ph->env;
3459 
3460 	if (do_read_u32(ff, &(env->comp_ver)))
3461 		return -1;
3462 
3463 	if (do_read_u32(ff, &(env->comp_type)))
3464 		return -1;
3465 
3466 	if (do_read_u32(ff, &(env->comp_level)))
3467 		return -1;
3468 
3469 	if (do_read_u32(ff, &(env->comp_ratio)))
3470 		return -1;
3471 
3472 	if (do_read_u32(ff, &(env->comp_mmap_len)))
3473 		return -1;
3474 
3475 	return 0;
3476 }
3477 
__process_pmu_caps(struct feat_fd * ff,int * nr_caps,char *** caps,unsigned int * max_branches,unsigned int * br_cntr_nr,unsigned int * br_cntr_width)3478 static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps,
3479 			      char ***caps, unsigned int *max_branches,
3480 			      unsigned int *br_cntr_nr,
3481 			      unsigned int *br_cntr_width)
3482 {
3483 	char *name, *value, *ptr;
3484 	u32 nr_pmu_caps, i;
3485 
3486 	*nr_caps = 0;
3487 	*caps = NULL;
3488 
3489 	if (do_read_u32(ff, &nr_pmu_caps))
3490 		return -1;
3491 
3492 	if (!nr_pmu_caps)
3493 		return 0;
3494 
3495 	*caps = zalloc(sizeof(char *) * nr_pmu_caps);
3496 	if (!*caps)
3497 		return -1;
3498 
3499 	for (i = 0; i < nr_pmu_caps; i++) {
3500 		name = do_read_string(ff);
3501 		if (!name)
3502 			goto error;
3503 
3504 		value = do_read_string(ff);
3505 		if (!value)
3506 			goto free_name;
3507 
3508 		if (asprintf(&ptr, "%s=%s", name, value) < 0)
3509 			goto free_value;
3510 
3511 		(*caps)[i] = ptr;
3512 
3513 		if (!strcmp(name, "branches"))
3514 			*max_branches = atoi(value);
3515 
3516 		if (!strcmp(name, "branch_counter_nr"))
3517 			*br_cntr_nr = atoi(value);
3518 
3519 		if (!strcmp(name, "branch_counter_width"))
3520 			*br_cntr_width = atoi(value);
3521 
3522 		free(value);
3523 		free(name);
3524 	}
3525 	*nr_caps = nr_pmu_caps;
3526 	return 0;
3527 
3528 free_value:
3529 	free(value);
3530 free_name:
3531 	free(name);
3532 error:
3533 	for (; i > 0; i--)
3534 		free((*caps)[i - 1]);
3535 	free(*caps);
3536 	*caps = NULL;
3537 	*nr_caps = 0;
3538 	return -1;
3539 }
3540 
process_cpu_pmu_caps(struct feat_fd * ff,void * data __maybe_unused)3541 static int process_cpu_pmu_caps(struct feat_fd *ff,
3542 				void *data __maybe_unused)
3543 {
3544 	struct perf_env *env = &ff->ph->env;
3545 	int ret = __process_pmu_caps(ff, &env->nr_cpu_pmu_caps,
3546 				     &env->cpu_pmu_caps,
3547 				     &env->max_branches,
3548 				     &env->br_cntr_nr,
3549 				     &env->br_cntr_width);
3550 
3551 	if (!ret && !env->cpu_pmu_caps)
3552 		pr_debug("cpu pmu capabilities not available\n");
3553 	return ret;
3554 }
3555 
process_pmu_caps(struct feat_fd * ff,void * data __maybe_unused)3556 static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused)
3557 {
3558 	struct perf_env *env = &ff->ph->env;
3559 	struct pmu_caps *pmu_caps;
3560 	u32 nr_pmu, i;
3561 	int ret;
3562 	int j;
3563 
3564 	if (do_read_u32(ff, &nr_pmu))
3565 		return -1;
3566 
3567 	if (!nr_pmu) {
3568 		pr_debug("pmu capabilities not available\n");
3569 		return 0;
3570 	}
3571 
3572 	pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
3573 	if (!pmu_caps)
3574 		return -ENOMEM;
3575 
3576 	for (i = 0; i < nr_pmu; i++) {
3577 		ret = __process_pmu_caps(ff, &pmu_caps[i].nr_caps,
3578 					 &pmu_caps[i].caps,
3579 					 &pmu_caps[i].max_branches,
3580 					 &pmu_caps[i].br_cntr_nr,
3581 					 &pmu_caps[i].br_cntr_width);
3582 		if (ret)
3583 			goto err;
3584 
3585 		pmu_caps[i].pmu_name = do_read_string(ff);
3586 		if (!pmu_caps[i].pmu_name) {
3587 			ret = -1;
3588 			goto err;
3589 		}
3590 		if (!pmu_caps[i].nr_caps) {
3591 			pr_debug("%s pmu capabilities not available\n",
3592 				 pmu_caps[i].pmu_name);
3593 		}
3594 	}
3595 
3596 	env->nr_pmus_with_caps = nr_pmu;
3597 	env->pmu_caps = pmu_caps;
3598 	return 0;
3599 
3600 err:
3601 	for (i = 0; i < nr_pmu; i++) {
3602 		for (j = 0; j < pmu_caps[i].nr_caps; j++)
3603 			free(pmu_caps[i].caps[j]);
3604 		free(pmu_caps[i].caps);
3605 		free(pmu_caps[i].pmu_name);
3606 	}
3607 
3608 	free(pmu_caps);
3609 	return ret;
3610 }
3611 
process_cpu_domain_info(struct feat_fd * ff,void * data __maybe_unused)3612 static int process_cpu_domain_info(struct feat_fd *ff, void *data __maybe_unused)
3613 {
3614 	u32 schedstat_version, max_sched_domains, cpu, domain, nr_domains;
3615 	struct perf_env *env = &ff->ph->env;
3616 	char *dname, *cpumask, *cpulist;
3617 	struct cpu_domain_map **cd_map;
3618 	struct domain_info *d_info;
3619 	u32 nra, nr, i, j;
3620 	int ret;
3621 
3622 	nra = env->nr_cpus_avail;
3623 	nr = env->nr_cpus_online;
3624 
3625 	cd_map = zalloc(sizeof(*cd_map) * nra);
3626 	if (!cd_map)
3627 		return -1;
3628 
3629 	env->cpu_domain = cd_map;
3630 
3631 	ret = do_read_u32(ff, &schedstat_version);
3632 	if (ret)
3633 		return ret;
3634 
3635 	env->schedstat_version = schedstat_version;
3636 
3637 	ret = do_read_u32(ff, &max_sched_domains);
3638 	if (ret)
3639 		return ret;
3640 
3641 	env->max_sched_domains = max_sched_domains;
3642 
3643 	for (i = 0; i < nr; i++) {
3644 		if (do_read_u32(ff, &cpu))
3645 			return -1;
3646 
3647 		cd_map[cpu] = zalloc(sizeof(*cd_map[cpu]));
3648 		if (!cd_map[cpu])
3649 			return -1;
3650 
3651 		cd_map[cpu]->cpu = cpu;
3652 
3653 		if (do_read_u32(ff, &nr_domains))
3654 			return -1;
3655 
3656 		cd_map[cpu]->nr_domains = nr_domains;
3657 
3658 		cd_map[cpu]->domains = zalloc(sizeof(*d_info) * max_sched_domains);
3659 		if (!cd_map[cpu]->domains)
3660 			return -1;
3661 
3662 		for (j = 0; j < nr_domains; j++) {
3663 			if (do_read_u32(ff, &domain))
3664 				return -1;
3665 
3666 			d_info = zalloc(sizeof(*d_info));
3667 			if (!d_info)
3668 				return -1;
3669 
3670 			assert(cd_map[cpu]->domains[domain] == NULL);
3671 			cd_map[cpu]->domains[domain] = d_info;
3672 			d_info->domain = domain;
3673 
3674 			if (schedstat_version >= 17) {
3675 				dname = do_read_string(ff);
3676 				if (!dname)
3677 					return -1;
3678 
3679 				d_info->dname = dname;
3680 			}
3681 
3682 			cpumask = do_read_string(ff);
3683 			if (!cpumask)
3684 				return -1;
3685 
3686 			d_info->cpumask = cpumask;
3687 
3688 			cpulist = do_read_string(ff);
3689 			if (!cpulist)
3690 				return -1;
3691 
3692 			d_info->cpulist = cpulist;
3693 		}
3694 	}
3695 
3696 	return ret;
3697 }
3698 
3699 #define FEAT_OPR(n, func, __full_only) \
3700 	[HEADER_##n] = {					\
3701 		.name	    = __stringify(n),			\
3702 		.write	    = write_##func,			\
3703 		.print	    = print_##func,			\
3704 		.full_only  = __full_only,			\
3705 		.process    = process_##func,			\
3706 		.synthesize = true				\
3707 	}
3708 
3709 #define FEAT_OPN(n, func, __full_only) \
3710 	[HEADER_##n] = {					\
3711 		.name	    = __stringify(n),			\
3712 		.write	    = write_##func,			\
3713 		.print	    = print_##func,			\
3714 		.full_only  = __full_only,			\
3715 		.process    = process_##func			\
3716 	}
3717 
3718 /* feature_ops not implemented: */
3719 #define print_tracing_data	NULL
3720 #define print_build_id		NULL
3721 
3722 #define process_branch_stack	NULL
3723 #define process_stat		NULL
3724 
3725 // Only used in util/synthetic-events.c
3726 const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
3727 
3728 const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
3729 #ifdef HAVE_LIBTRACEEVENT
3730 	FEAT_OPN(TRACING_DATA,	tracing_data,	false),
3731 #endif
3732 	FEAT_OPN(BUILD_ID,	build_id,	false),
3733 	FEAT_OPR(HOSTNAME,	hostname,	false),
3734 	FEAT_OPR(OSRELEASE,	osrelease,	false),
3735 	FEAT_OPR(VERSION,	version,	false),
3736 	FEAT_OPR(ARCH,		arch,		false),
3737 	FEAT_OPR(NRCPUS,	nrcpus,		false),
3738 	FEAT_OPR(CPUDESC,	cpudesc,	false),
3739 	FEAT_OPR(CPUID,		cpuid,		false),
3740 	FEAT_OPR(TOTAL_MEM,	total_mem,	false),
3741 	FEAT_OPR(EVENT_DESC,	event_desc,	false),
3742 	FEAT_OPR(CMDLINE,	cmdline,	false),
3743 	FEAT_OPR(CPU_TOPOLOGY,	cpu_topology,	true),
3744 	FEAT_OPR(NUMA_TOPOLOGY,	numa_topology,	true),
3745 	FEAT_OPN(BRANCH_STACK,	branch_stack,	false),
3746 	FEAT_OPR(PMU_MAPPINGS,	pmu_mappings,	false),
3747 	FEAT_OPR(GROUP_DESC,	group_desc,	false),
3748 	FEAT_OPN(AUXTRACE,	auxtrace,	false),
3749 	FEAT_OPN(STAT,		stat,		false),
3750 	FEAT_OPN(CACHE,		cache,		true),
3751 	FEAT_OPR(SAMPLE_TIME,	sample_time,	false),
3752 	FEAT_OPR(MEM_TOPOLOGY,	mem_topology,	true),
3753 	FEAT_OPR(CLOCKID,	clockid,	false),
3754 	FEAT_OPN(DIR_FORMAT,	dir_format,	false),
3755 #ifdef HAVE_LIBBPF_SUPPORT
3756 	FEAT_OPR(BPF_PROG_INFO, bpf_prog_info,  false),
3757 	FEAT_OPR(BPF_BTF,       bpf_btf,        false),
3758 #endif
3759 	FEAT_OPR(COMPRESSED,	compressed,	false),
3760 	FEAT_OPR(CPU_PMU_CAPS,	cpu_pmu_caps,	false),
3761 	FEAT_OPR(CLOCK_DATA,	clock_data,	false),
3762 	FEAT_OPN(HYBRID_TOPOLOGY,	hybrid_topology,	true),
3763 	FEAT_OPR(PMU_CAPS,	pmu_caps,	false),
3764 	FEAT_OPR(CPU_DOMAIN_INFO,	cpu_domain_info,	true),
3765 	FEAT_OPR(E_MACHINE,	e_machine,	false),
3766 };
3767 
3768 struct header_print_data {
3769 	FILE *fp;
3770 	bool full; /* extended list of headers */
3771 };
3772 
perf_file_section__fprintf_info(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data)3773 static int perf_file_section__fprintf_info(struct perf_file_section *section,
3774 					   struct perf_header *ph,
3775 					   int feat, int fd, void *data)
3776 {
3777 	struct header_print_data *hd = data;
3778 	struct feat_fd ff;
3779 
3780 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
3781 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
3782 				"%d, continuing...\n", section->offset, feat);
3783 		return 0;
3784 	}
3785 	if (feat >= HEADER_LAST_FEATURE) {
3786 		pr_warning("unknown feature %d\n", feat);
3787 		return 0;
3788 	}
3789 	if (!feat_ops[feat].print)
3790 		return 0;
3791 
3792 	ff = (struct  feat_fd) {
3793 		.fd = fd,
3794 		.ph = ph,
3795 	};
3796 
3797 	if (!feat_ops[feat].full_only || hd->full)
3798 		feat_ops[feat].print(&ff, hd->fp);
3799 	else
3800 		fprintf(hd->fp, "# %s info available, use -I to display\n",
3801 			feat_ops[feat].name);
3802 
3803 	return 0;
3804 }
3805 
perf_header__fprintf_info(struct perf_session * session,FILE * fp,bool full)3806 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
3807 {
3808 	struct header_print_data hd;
3809 	struct perf_header *header = &session->header;
3810 	int fd = perf_data__fd(session->data);
3811 	struct stat st;
3812 	time_t stctime;
3813 	int ret, bit;
3814 
3815 	hd.fp = fp;
3816 	hd.full = full;
3817 
3818 	ret = fstat(fd, &st);
3819 	if (ret == -1)
3820 		return -1;
3821 
3822 	stctime = st.st_mtime;
3823 	fprintf(fp, "# captured on    : %s", ctime(&stctime));
3824 
3825 	fprintf(fp, "# header version : %u\n", header->version);
3826 	fprintf(fp, "# data offset    : %" PRIu64 "\n", header->data_offset);
3827 	fprintf(fp, "# data size      : %" PRIu64 "\n", header->data_size);
3828 	fprintf(fp, "# feat offset    : %" PRIu64 "\n", header->feat_offset);
3829 
3830 	perf_header__process_sections(header, fd, &hd,
3831 				      perf_file_section__fprintf_info);
3832 
3833 	if (session->data->is_pipe)
3834 		return 0;
3835 
3836 	fprintf(fp, "# missing features: ");
3837 	for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) {
3838 		if (bit)
3839 			fprintf(fp, "%s ", feat_ops[bit].name);
3840 	}
3841 
3842 	fprintf(fp, "\n");
3843 	return 0;
3844 }
3845 
3846 struct header_fw {
3847 	struct feat_writer	fw;
3848 	struct feat_fd		*ff;
3849 };
3850 
feat_writer_cb(struct feat_writer * fw,void * buf,size_t sz)3851 static int feat_writer_cb(struct feat_writer *fw, void *buf, size_t sz)
3852 {
3853 	struct header_fw *h = container_of(fw, struct header_fw, fw);
3854 
3855 	return do_write(h->ff, buf, sz);
3856 }
3857 
do_write_feat(struct feat_fd * ff,int type,struct perf_file_section ** p,struct evlist * evlist,struct feat_copier * fc)3858 static int do_write_feat(struct feat_fd *ff, int type,
3859 			 struct perf_file_section **p,
3860 			 struct evlist *evlist,
3861 			 struct feat_copier *fc)
3862 {
3863 	int err;
3864 	int ret = 0;
3865 
3866 	if (perf_header__has_feat(ff->ph, type)) {
3867 		if (!feat_ops[type].write)
3868 			return -1;
3869 
3870 		if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__))
3871 			return -1;
3872 
3873 		(*p)->offset = lseek(ff->fd, 0, SEEK_CUR);
3874 
3875 		/*
3876 		 * Hook to let perf inject copy features sections from the input
3877 		 * file.
3878 		 */
3879 		if (fc && fc->copy) {
3880 			struct header_fw h = {
3881 				.fw.write = feat_writer_cb,
3882 				.ff = ff,
3883 			};
3884 
3885 			/* ->copy() returns 0 if the feature was not copied */
3886 			err = fc->copy(fc, type, &h.fw);
3887 		} else {
3888 			err = 0;
3889 		}
3890 		if (!err)
3891 			err = feat_ops[type].write(ff, evlist);
3892 		if (err < 0) {
3893 			pr_debug("failed to write feature %s\n", feat_ops[type].name);
3894 
3895 			/* undo anything written */
3896 			lseek(ff->fd, (*p)->offset, SEEK_SET);
3897 
3898 			return -1;
3899 		}
3900 		(*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset;
3901 		(*p)++;
3902 	}
3903 	return ret;
3904 }
3905 
perf_header__adds_write(struct perf_header * header,struct evlist * evlist,int fd,struct feat_copier * fc)3906 static int perf_header__adds_write(struct perf_header *header,
3907 				   struct evlist *evlist, int fd,
3908 				   struct feat_copier *fc)
3909 {
3910 	int nr_sections;
3911 	struct feat_fd ff = {
3912 		.fd  = fd,
3913 		.ph = header,
3914 	};
3915 	struct perf_file_section *feat_sec, *p;
3916 	int sec_size;
3917 	u64 sec_start;
3918 	int feat;
3919 	int err;
3920 
3921 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
3922 	if (!nr_sections)
3923 		return 0;
3924 
3925 	feat_sec = p = calloc(nr_sections, sizeof(*feat_sec));
3926 	if (feat_sec == NULL)
3927 		return -ENOMEM;
3928 
3929 	sec_size = sizeof(*feat_sec) * nr_sections;
3930 
3931 	sec_start = header->feat_offset;
3932 	lseek(fd, sec_start + sec_size, SEEK_SET);
3933 
3934 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
3935 		if (do_write_feat(&ff, feat, &p, evlist, fc))
3936 			perf_header__clear_feat(header, feat);
3937 	}
3938 
3939 	lseek(fd, sec_start, SEEK_SET);
3940 	/*
3941 	 * may write more than needed due to dropped feature, but
3942 	 * this is okay, reader will skip the missing entries
3943 	 */
3944 	err = do_write(&ff, feat_sec, sec_size);
3945 	if (err < 0)
3946 		pr_debug("failed to write feature section\n");
3947 	free(ff.buf); /* TODO: added to silence clang-tidy. */
3948 	free(feat_sec);
3949 	return err;
3950 }
3951 
perf_header__write_pipe(int fd)3952 int perf_header__write_pipe(int fd)
3953 {
3954 	struct perf_pipe_file_header f_header;
3955 	struct feat_fd ff = {
3956 		.fd = fd,
3957 	};
3958 	int err;
3959 
3960 	f_header = (struct perf_pipe_file_header){
3961 		.magic	   = PERF_MAGIC,
3962 		.size	   = sizeof(f_header),
3963 	};
3964 
3965 	err = do_write(&ff, &f_header, sizeof(f_header));
3966 	if (err < 0) {
3967 		pr_debug("failed to write perf pipe header\n");
3968 		return err;
3969 	}
3970 	free(ff.buf);
3971 	return 0;
3972 }
3973 
perf_session__do_write_header(struct perf_session * session,struct evlist * evlist,int fd,bool at_exit,struct feat_copier * fc,bool write_attrs_after_data)3974 static int perf_session__do_write_header(struct perf_session *session,
3975 					 struct evlist *evlist,
3976 					 int fd, bool at_exit,
3977 					 struct feat_copier *fc,
3978 					 bool write_attrs_after_data)
3979 {
3980 	struct perf_file_header f_header;
3981 	struct perf_header *header = &session->header;
3982 	struct evsel *evsel;
3983 	struct feat_fd ff = {
3984 		.ph = header,
3985 		.fd = fd,
3986 	};
3987 	u64 attr_offset = sizeof(f_header), attr_size = 0;
3988 	int err;
3989 
3990 	if (write_attrs_after_data && at_exit) {
3991 		/*
3992 		 * Write features at the end of the file first so that
3993 		 * attributes may come after them.
3994 		 */
3995 		if (!header->data_offset && header->data_size) {
3996 			pr_err("File contains data but offset unknown\n");
3997 			err = -1;
3998 			goto err_out;
3999 		}
4000 		header->feat_offset = header->data_offset + header->data_size;
4001 		err = perf_header__adds_write(header, evlist, fd, fc);
4002 		if (err < 0)
4003 			goto err_out;
4004 		attr_offset = lseek(fd, 0, SEEK_CUR);
4005 	} else {
4006 		lseek(fd, attr_offset, SEEK_SET);
4007 	}
4008 
4009 	evlist__for_each_entry(session->evlist, evsel) {
4010 		evsel->id_offset = attr_offset;
4011 		/* Avoid writing at the end of the file until the session is exiting. */
4012 		if (!write_attrs_after_data || at_exit) {
4013 			err = do_write(&ff, evsel->core.id, evsel->core.ids * sizeof(u64));
4014 			if (err < 0) {
4015 				pr_debug("failed to write perf header\n");
4016 				goto err_out;
4017 			}
4018 		}
4019 		attr_offset += evsel->core.ids * sizeof(u64);
4020 	}
4021 
4022 	evlist__for_each_entry(evlist, evsel) {
4023 		if (evsel->core.attr.size < sizeof(evsel->core.attr)) {
4024 			/*
4025 			 * We are likely in "perf inject" and have read
4026 			 * from an older file. Update attr size so that
4027 			 * reader gets the right offset to the ids.
4028 			 */
4029 			evsel->core.attr.size = sizeof(evsel->core.attr);
4030 		}
4031 		/* Avoid writing at the end of the file until the session is exiting. */
4032 		if (!write_attrs_after_data || at_exit) {
4033 			struct perf_file_attr f_attr = {
4034 				.attr = evsel->core.attr,
4035 				.ids  = {
4036 					.offset = evsel->id_offset,
4037 					.size   = evsel->core.ids * sizeof(u64),
4038 				}
4039 			};
4040 			err = do_write(&ff, &f_attr, sizeof(f_attr));
4041 			if (err < 0) {
4042 				pr_debug("failed to write perf header attribute\n");
4043 				goto err_out;
4044 			}
4045 		}
4046 		attr_size += sizeof(struct perf_file_attr);
4047 	}
4048 
4049 	if (!header->data_offset) {
4050 		if (write_attrs_after_data)
4051 			header->data_offset = sizeof(f_header);
4052 		else
4053 			header->data_offset = attr_offset + attr_size;
4054 	}
4055 	header->feat_offset = header->data_offset + header->data_size;
4056 
4057 	if (!write_attrs_after_data && at_exit) {
4058 		/* Write features now feat_offset is known. */
4059 		err = perf_header__adds_write(header, evlist, fd, fc);
4060 		if (err < 0)
4061 			goto err_out;
4062 	}
4063 
4064 	f_header = (struct perf_file_header){
4065 		.magic	   = PERF_MAGIC,
4066 		.size	   = sizeof(f_header),
4067 		.attr_size = sizeof(struct perf_file_attr),
4068 		.attrs = {
4069 			.offset = attr_offset,
4070 			.size   = attr_size,
4071 		},
4072 		.data = {
4073 			.offset = header->data_offset,
4074 			.size	= header->data_size,
4075 		},
4076 		/* event_types is ignored, store zeros */
4077 	};
4078 
4079 	memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
4080 
4081 	lseek(fd, 0, SEEK_SET);
4082 	err = do_write(&ff, &f_header, sizeof(f_header));
4083 	if (err < 0) {
4084 		pr_debug("failed to write perf header\n");
4085 		goto err_out;
4086 	} else {
4087 		lseek(fd, 0, SEEK_END);
4088 		err = 0;
4089 	}
4090 err_out:
4091 	free(ff.buf);
4092 	return err;
4093 }
4094 
perf_session__write_header(struct perf_session * session,struct evlist * evlist,int fd,bool at_exit)4095 int perf_session__write_header(struct perf_session *session,
4096 			       struct evlist *evlist,
4097 			       int fd, bool at_exit)
4098 {
4099 	return perf_session__do_write_header(session, evlist, fd, at_exit, /*fc=*/NULL,
4100 					     /*write_attrs_after_data=*/false);
4101 }
4102 
perf_session__data_offset(const struct evlist * evlist)4103 size_t perf_session__data_offset(const struct evlist *evlist)
4104 {
4105 	struct evsel *evsel;
4106 	size_t data_offset;
4107 
4108 	data_offset = sizeof(struct perf_file_header);
4109 	evlist__for_each_entry(evlist, evsel) {
4110 		data_offset += evsel->core.ids * sizeof(u64);
4111 	}
4112 	data_offset += evlist->core.nr_entries * sizeof(struct perf_file_attr);
4113 
4114 	return data_offset;
4115 }
4116 
perf_session__inject_header(struct perf_session * session,struct evlist * evlist,int fd,struct feat_copier * fc,bool write_attrs_after_data)4117 int perf_session__inject_header(struct perf_session *session,
4118 				struct evlist *evlist,
4119 				int fd,
4120 				struct feat_copier *fc,
4121 				bool write_attrs_after_data)
4122 {
4123 	return perf_session__do_write_header(session, evlist, fd, true, fc,
4124 					     write_attrs_after_data);
4125 }
4126 
perf_header__getbuffer64(struct perf_header * header,int fd,void * buf,size_t size)4127 static int perf_header__getbuffer64(struct perf_header *header,
4128 				    int fd, void *buf, size_t size)
4129 {
4130 	if (readn(fd, buf, size) <= 0)
4131 		return -1;
4132 
4133 	if (header->needs_swap)
4134 		mem_bswap_64(buf, size);
4135 
4136 	return 0;
4137 }
4138 
perf_header__process_sections(struct perf_header * header,int fd,void * data,int (* process)(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data))4139 int perf_header__process_sections(struct perf_header *header, int fd,
4140 				  void *data,
4141 				  int (*process)(struct perf_file_section *section,
4142 						 struct perf_header *ph,
4143 						 int feat, int fd, void *data))
4144 {
4145 	struct perf_file_section *feat_sec, *sec;
4146 	int nr_sections;
4147 	int sec_size;
4148 	int feat;
4149 	int err;
4150 
4151 	nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
4152 	if (!nr_sections)
4153 		return 0;
4154 
4155 	feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec));
4156 	if (!feat_sec)
4157 		return -1;
4158 
4159 	sec_size = sizeof(*feat_sec) * nr_sections;
4160 
4161 	lseek(fd, header->feat_offset, SEEK_SET);
4162 
4163 	err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
4164 	if (err < 0)
4165 		goto out_free;
4166 
4167 	for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) {
4168 		err = process(sec++, header, feat, fd, data);
4169 		if (err < 0)
4170 			goto out_free;
4171 	}
4172 	err = 0;
4173 out_free:
4174 	free(feat_sec);
4175 	return err;
4176 }
4177 
4178 static const int attr_file_abi_sizes[] = {
4179 	[0] = PERF_ATTR_SIZE_VER0,
4180 	[1] = PERF_ATTR_SIZE_VER1,
4181 	[2] = PERF_ATTR_SIZE_VER2,
4182 	[3] = PERF_ATTR_SIZE_VER3,
4183 	[4] = PERF_ATTR_SIZE_VER4,
4184 	0,
4185 };
4186 
4187 /*
4188  * In the legacy file format, the magic number is not used to encode endianness.
4189  * hdr_sz was used to encode endianness. But given that hdr_sz can vary based
4190  * on ABI revisions, we need to try all combinations for all endianness to
4191  * detect the endianness.
4192  */
try_all_file_abis(uint64_t hdr_sz,struct perf_header * ph)4193 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph)
4194 {
4195 	uint64_t ref_size, attr_size;
4196 	int i;
4197 
4198 	for (i = 0 ; attr_file_abi_sizes[i]; i++) {
4199 		ref_size = attr_file_abi_sizes[i]
4200 			 + sizeof(struct perf_file_section);
4201 		if (hdr_sz != ref_size) {
4202 			attr_size = bswap_64(hdr_sz);
4203 			if (attr_size != ref_size)
4204 				continue;
4205 
4206 			ph->needs_swap = true;
4207 		}
4208 		pr_debug("ABI%d perf.data file detected, need_swap=%d\n",
4209 			 i,
4210 			 ph->needs_swap);
4211 		return 0;
4212 	}
4213 	/* could not determine endianness */
4214 	return -1;
4215 }
4216 
4217 #define PERF_PIPE_HDR_VER0	16
4218 
4219 static const size_t attr_pipe_abi_sizes[] = {
4220 	[0] = PERF_PIPE_HDR_VER0,
4221 	0,
4222 };
4223 
4224 /*
4225  * In the legacy pipe format, there is an implicit assumption that endianness
4226  * between host recording the samples, and host parsing the samples is the
4227  * same. This is not always the case given that the pipe output may always be
4228  * redirected into a file and analyzed on a different machine with possibly a
4229  * different endianness and perf_event ABI revisions in the perf tool itself.
4230  */
try_all_pipe_abis(uint64_t hdr_sz,struct perf_header * ph)4231 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
4232 {
4233 	u64 attr_size;
4234 	int i;
4235 
4236 	for (i = 0 ; attr_pipe_abi_sizes[i]; i++) {
4237 		if (hdr_sz != attr_pipe_abi_sizes[i]) {
4238 			attr_size = bswap_64(hdr_sz);
4239 			if (attr_size != hdr_sz)
4240 				continue;
4241 
4242 			ph->needs_swap = true;
4243 		}
4244 		pr_debug("Pipe ABI%d perf.data file detected\n", i);
4245 		return 0;
4246 	}
4247 	return -1;
4248 }
4249 
is_perf_magic(u64 magic)4250 bool is_perf_magic(u64 magic)
4251 {
4252 	if (!memcmp(&magic, __perf_magic1, sizeof(magic))
4253 		|| magic == __perf_magic2
4254 		|| magic == __perf_magic2_sw)
4255 		return true;
4256 
4257 	return false;
4258 }
4259 
check_magic_endian(u64 magic,uint64_t hdr_sz,bool is_pipe,struct perf_header * ph)4260 static int check_magic_endian(u64 magic, uint64_t hdr_sz,
4261 			      bool is_pipe, struct perf_header *ph)
4262 {
4263 	int ret;
4264 
4265 	/* check for legacy format */
4266 	ret = memcmp(&magic, __perf_magic1, sizeof(magic));
4267 	if (ret == 0) {
4268 		ph->version = PERF_HEADER_VERSION_1;
4269 		pr_debug("legacy perf.data format\n");
4270 		if (is_pipe)
4271 			return try_all_pipe_abis(hdr_sz, ph);
4272 
4273 		return try_all_file_abis(hdr_sz, ph);
4274 	}
4275 	/*
4276 	 * the new magic number serves two purposes:
4277 	 * - unique number to identify actual perf.data files
4278 	 * - encode endianness of file
4279 	 */
4280 	ph->version = PERF_HEADER_VERSION_2;
4281 
4282 	/* check magic number with one endianness */
4283 	if (magic == __perf_magic2)
4284 		return 0;
4285 
4286 	/* check magic number with opposite endianness */
4287 	if (magic != __perf_magic2_sw)
4288 		return -1;
4289 
4290 	ph->needs_swap = true;
4291 
4292 	return 0;
4293 }
4294 
perf_file_header__read(struct perf_file_header * header,struct perf_header * ph,int fd)4295 int perf_file_header__read(struct perf_file_header *header,
4296 			   struct perf_header *ph, int fd)
4297 {
4298 	ssize_t ret;
4299 
4300 	lseek(fd, 0, SEEK_SET);
4301 
4302 	ret = readn(fd, header, sizeof(*header));
4303 	if (ret <= 0)
4304 		return -1;
4305 
4306 	if (check_magic_endian(header->magic,
4307 			       header->attr_size, false, ph) < 0) {
4308 		pr_debug("magic/endian check failed\n");
4309 		return -1;
4310 	}
4311 
4312 	if (ph->needs_swap) {
4313 		mem_bswap_64(header, offsetof(struct perf_file_header,
4314 			     adds_features));
4315 	}
4316 
4317 	if (header->size > header->attrs.offset) {
4318 		pr_err("Perf file header corrupt: header overlaps attrs\n");
4319 		return -1;
4320 	}
4321 
4322 	if (header->size > header->data.offset) {
4323 		pr_err("Perf file header corrupt: header overlaps data\n");
4324 		return -1;
4325 	}
4326 
4327 	if ((header->attrs.offset <= header->data.offset &&
4328 	     header->attrs.offset + header->attrs.size > header->data.offset) ||
4329 	    (header->attrs.offset > header->data.offset &&
4330 	     header->data.offset + header->data.size > header->attrs.offset)) {
4331 		pr_err("Perf file header corrupt: Attributes and data overlap\n");
4332 		return -1;
4333 	}
4334 
4335 	if (header->size != sizeof(*header)) {
4336 		/* Support the previous format */
4337 		if (header->size == offsetof(typeof(*header), adds_features))
4338 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
4339 		else
4340 			return -1;
4341 	} else if (ph->needs_swap) {
4342 		/*
4343 		 * feature bitmap is declared as an array of unsigned longs --
4344 		 * not good since its size can differ between the host that
4345 		 * generated the data file and the host analyzing the file.
4346 		 *
4347 		 * We need to handle endianness, but we don't know the size of
4348 		 * the unsigned long where the file was generated. Take a best
4349 		 * guess at determining it: try 64-bit swap first (ie., file
4350 		 * created on a 64-bit host), and check if the hostname feature
4351 		 * bit is set (this feature bit is forced on as of fbe96f2).
4352 		 * If the bit is not, undo the 64-bit swap and try a 32-bit
4353 		 * swap. If the hostname bit is still not set (e.g., older data
4354 		 * file), punt and fallback to the original behavior --
4355 		 * clearing all feature bits and setting buildid.
4356 		 */
4357 		mem_bswap_64(&header->adds_features,
4358 			    BITS_TO_U64(HEADER_FEAT_BITS));
4359 
4360 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
4361 			/* unswap as u64 */
4362 			mem_bswap_64(&header->adds_features,
4363 				    BITS_TO_U64(HEADER_FEAT_BITS));
4364 
4365 			/* unswap as u32 */
4366 			mem_bswap_32(&header->adds_features,
4367 				    BITS_TO_U32(HEADER_FEAT_BITS));
4368 		}
4369 
4370 		if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
4371 			bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
4372 			__set_bit(HEADER_BUILD_ID, header->adds_features);
4373 		}
4374 	}
4375 
4376 	memcpy(&ph->adds_features, &header->adds_features,
4377 	       sizeof(ph->adds_features));
4378 
4379 	ph->data_offset  = header->data.offset;
4380 	ph->data_size	 = header->data.size;
4381 	ph->feat_offset  = header->data.offset + header->data.size;
4382 	return 0;
4383 }
4384 
perf_file_section__process(struct perf_file_section * section,struct perf_header * ph,int feat,int fd,void * data)4385 static int perf_file_section__process(struct perf_file_section *section,
4386 				      struct perf_header *ph,
4387 				      int feat, int fd, void *data)
4388 {
4389 	struct feat_fd fdd = {
4390 		.fd	= fd,
4391 		.ph	= ph,
4392 		.size	= section->size,
4393 		.offset	= section->offset,
4394 	};
4395 
4396 	if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
4397 		pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
4398 			  "%d, continuing...\n", section->offset, feat);
4399 		return 0;
4400 	}
4401 
4402 	if (feat >= HEADER_LAST_FEATURE) {
4403 		pr_debug("unknown feature %d, continuing...\n", feat);
4404 		return 0;
4405 	}
4406 
4407 	if (!feat_ops[feat].process)
4408 		return 0;
4409 
4410 	return feat_ops[feat].process(&fdd, data);
4411 }
4412 
perf_file_header__read_pipe(struct perf_pipe_file_header * header,struct perf_header * ph,struct perf_data * data)4413 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
4414 				       struct perf_header *ph,
4415 				       struct perf_data *data)
4416 {
4417 	ssize_t ret;
4418 
4419 	ret = perf_data__read(data, header, sizeof(*header));
4420 	if (ret <= 0)
4421 		return -1;
4422 
4423 	if (check_magic_endian(header->magic, header->size, true, ph) < 0) {
4424 		pr_debug("endian/magic failed\n");
4425 		return -1;
4426 	}
4427 
4428 	if (ph->needs_swap)
4429 		header->size = bswap_64(header->size);
4430 
4431 	return 0;
4432 }
4433 
perf_header__read_pipe(struct perf_session * session)4434 static int perf_header__read_pipe(struct perf_session *session)
4435 {
4436 	struct perf_header *header = &session->header;
4437 	struct perf_pipe_file_header f_header;
4438 
4439 	if (perf_file_header__read_pipe(&f_header, header, session->data) < 0) {
4440 		pr_debug("incompatible file format\n");
4441 		return -EINVAL;
4442 	}
4443 
4444 	return f_header.size == sizeof(f_header) ? 0 : -1;
4445 }
4446 
read_attr(int fd,struct perf_header * ph,struct perf_file_attr * f_attr)4447 static int read_attr(int fd, struct perf_header *ph,
4448 		     struct perf_file_attr *f_attr)
4449 {
4450 	struct perf_event_attr *attr = &f_attr->attr;
4451 	size_t sz, left;
4452 	size_t our_sz = sizeof(f_attr->attr);
4453 	ssize_t ret;
4454 
4455 	memset(f_attr, 0, sizeof(*f_attr));
4456 
4457 	/* read minimal guaranteed structure */
4458 	ret = readn(fd, attr, PERF_ATTR_SIZE_VER0);
4459 	if (ret <= 0) {
4460 		pr_debug("cannot read %d bytes of header attr\n",
4461 			 PERF_ATTR_SIZE_VER0);
4462 		return -1;
4463 	}
4464 
4465 	/* on file perf_event_attr size */
4466 	sz = attr->size;
4467 
4468 	if (ph->needs_swap)
4469 		sz = bswap_32(sz);
4470 
4471 	if (sz == 0) {
4472 		/* assume ABI0 */
4473 		sz =  PERF_ATTR_SIZE_VER0;
4474 	} else if (sz > our_sz) {
4475 		pr_debug("file uses a more recent and unsupported ABI"
4476 			 " (%zu bytes extra)\n", sz - our_sz);
4477 		return -1;
4478 	}
4479 	/* what we have not yet read and that we know about */
4480 	left = sz - PERF_ATTR_SIZE_VER0;
4481 	if (left) {
4482 		void *ptr = attr;
4483 		ptr += PERF_ATTR_SIZE_VER0;
4484 
4485 		ret = readn(fd, ptr, left);
4486 	}
4487 	/* read perf_file_section, ids are read in caller */
4488 	ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids));
4489 
4490 	return ret <= 0 ? -1 : 0;
4491 }
4492 
4493 #ifdef HAVE_LIBTRACEEVENT
evsel__prepare_tracepoint_event(struct evsel * evsel,struct tep_handle * pevent)4494 static int evsel__prepare_tracepoint_event(struct evsel *evsel, struct tep_handle *pevent)
4495 {
4496 	struct tep_event *event;
4497 	char bf[128];
4498 
4499 	/* already prepared */
4500 	if (evsel->tp_format)
4501 		return 0;
4502 
4503 	if (pevent == NULL) {
4504 		pr_debug("broken or missing trace data\n");
4505 		return -1;
4506 	}
4507 
4508 	event = tep_find_event(pevent, evsel->core.attr.config);
4509 	if (event == NULL) {
4510 		pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config);
4511 		return -1;
4512 	}
4513 
4514 	if (!evsel->name) {
4515 		snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
4516 		evsel->name = strdup(bf);
4517 		if (evsel->name == NULL)
4518 			return -1;
4519 	}
4520 
4521 	evsel->tp_format = event;
4522 	return 0;
4523 }
4524 
evlist__prepare_tracepoint_events(struct evlist * evlist,struct tep_handle * pevent)4525 static int evlist__prepare_tracepoint_events(struct evlist *evlist, struct tep_handle *pevent)
4526 {
4527 	struct evsel *pos;
4528 
4529 	evlist__for_each_entry(evlist, pos) {
4530 		if (pos->core.attr.type == PERF_TYPE_TRACEPOINT &&
4531 		    evsel__prepare_tracepoint_event(pos, pevent))
4532 			return -1;
4533 	}
4534 
4535 	return 0;
4536 }
4537 #endif
4538 
perf_session__read_header(struct perf_session * session)4539 int perf_session__read_header(struct perf_session *session)
4540 {
4541 	struct perf_data *data = session->data;
4542 	struct perf_header *header = &session->header;
4543 	struct perf_file_header	f_header;
4544 	struct perf_file_attr	f_attr;
4545 	u64			f_id;
4546 	int nr_attrs, nr_ids, i, j, err;
4547 	int fd = perf_data__fd(data);
4548 
4549 	session->evlist = evlist__new();
4550 	if (session->evlist == NULL)
4551 		return -ENOMEM;
4552 
4553 	session->evlist->session = session;
4554 	session->machines.host.env = &header->env;
4555 
4556 	/*
4557 	 * We can read 'pipe' data event from regular file,
4558 	 * check for the pipe header regardless of source.
4559 	 */
4560 	err = perf_header__read_pipe(session);
4561 	if (!err || perf_data__is_pipe(data)) {
4562 		data->is_pipe = true;
4563 		return err;
4564 	}
4565 
4566 	if (perf_file_header__read(&f_header, header, fd) < 0)
4567 		return -EINVAL;
4568 
4569 	if (header->needs_swap && data->in_place_update) {
4570 		pr_err("In-place update not supported when byte-swapping is required\n");
4571 		return -EINVAL;
4572 	}
4573 
4574 	/*
4575 	 * Sanity check that perf.data was written cleanly; data size is
4576 	 * initialized to 0 and updated only if the on_exit function is run.
4577 	 * If data size is still 0 then the file contains only partial
4578 	 * information.  Just warn user and process it as much as it can.
4579 	 */
4580 	if (f_header.data.size == 0) {
4581 		pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n"
4582 			   "Was the 'perf record' command properly terminated?\n",
4583 			   data->file.path);
4584 	}
4585 
4586 	if (f_header.attr_size == 0) {
4587 		pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
4588 		       "Was the 'perf record' command properly terminated?\n",
4589 		       data->file.path);
4590 		return -EINVAL;
4591 	}
4592 
4593 	nr_attrs = f_header.attrs.size / f_header.attr_size;
4594 	lseek(fd, f_header.attrs.offset, SEEK_SET);
4595 
4596 	for (i = 0; i < nr_attrs; i++) {
4597 		struct evsel *evsel;
4598 		off_t tmp;
4599 
4600 		if (read_attr(fd, header, &f_attr) < 0)
4601 			goto out_errno;
4602 
4603 		if (header->needs_swap) {
4604 			f_attr.ids.size   = bswap_64(f_attr.ids.size);
4605 			f_attr.ids.offset = bswap_64(f_attr.ids.offset);
4606 			perf_event__attr_swap(&f_attr.attr);
4607 		}
4608 
4609 		tmp = lseek(fd, 0, SEEK_CUR);
4610 		evsel = evsel__new(&f_attr.attr);
4611 
4612 		if (evsel == NULL)
4613 			goto out_delete_evlist;
4614 
4615 		evsel->needs_swap = header->needs_swap;
4616 		/*
4617 		 * Do it before so that if perf_evsel__alloc_id fails, this
4618 		 * entry gets purged too at evlist__delete().
4619 		 */
4620 		evlist__add(session->evlist, evsel);
4621 
4622 		nr_ids = f_attr.ids.size / sizeof(u64);
4623 		/*
4624 		 * We don't have the cpu and thread maps on the header, so
4625 		 * for allocating the perf_sample_id table we fake 1 cpu and
4626 		 * hattr->ids threads.
4627 		 */
4628 		if (perf_evsel__alloc_id(&evsel->core, 1, nr_ids))
4629 			goto out_delete_evlist;
4630 
4631 		lseek(fd, f_attr.ids.offset, SEEK_SET);
4632 
4633 		for (j = 0; j < nr_ids; j++) {
4634 			if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
4635 				goto out_errno;
4636 
4637 			perf_evlist__id_add(&session->evlist->core, &evsel->core, 0, j, f_id);
4638 		}
4639 
4640 		lseek(fd, tmp, SEEK_SET);
4641 	}
4642 
4643 #ifdef HAVE_LIBTRACEEVENT
4644 	perf_header__process_sections(header, fd, &session->tevent,
4645 				      perf_file_section__process);
4646 
4647 	if (evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent))
4648 		goto out_delete_evlist;
4649 #else
4650 	perf_header__process_sections(header, fd, NULL, perf_file_section__process);
4651 #endif
4652 
4653 	return 0;
4654 out_errno:
4655 	return -errno;
4656 
4657 out_delete_evlist:
4658 	evlist__delete(session->evlist);
4659 	session->evlist = NULL;
4660 	return -ENOMEM;
4661 }
4662 
perf_event__process_feature(struct perf_session * session,union perf_event * event)4663 int perf_event__process_feature(struct perf_session *session,
4664 				union perf_event *event)
4665 {
4666 	struct feat_fd ff = { .fd = 0 };
4667 	struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event;
4668 	int type = fe->header.type;
4669 	u64 feat = fe->feat_id;
4670 	int ret = 0;
4671 	bool print = dump_trace;
4672 
4673 	if (type < 0 || type >= PERF_RECORD_HEADER_MAX) {
4674 		pr_warning("invalid record type %d in pipe-mode\n", type);
4675 		return 0;
4676 	}
4677 	if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
4678 		pr_warning("invalid record type %d in pipe-mode\n", type);
4679 		return -1;
4680 	}
4681 
4682 	ff.buf  = (void *)fe->data;
4683 	ff.size = event->header.size - sizeof(*fe);
4684 	ff.ph = &session->header;
4685 
4686 	if (feat_ops[feat].process && feat_ops[feat].process(&ff, NULL)) {
4687 		ret = -1;
4688 		goto out;
4689 	}
4690 
4691 	if (session->tool->show_feat_hdr) {
4692 		if (!feat_ops[feat].full_only ||
4693 		    session->tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) {
4694 			print = true;
4695 		} else {
4696 			fprintf(stdout, "# %s info available, use -I to display\n",
4697 				feat_ops[feat].name);
4698 		}
4699 	}
4700 
4701 	if (dump_trace)
4702 		printf(", ");
4703 
4704 	if (print) {
4705 		if (feat_ops[feat].print)
4706 			feat_ops[feat].print(&ff, stdout);
4707 		else
4708 			printf("# %s", feat_ops[feat].name);
4709 	}
4710 
4711 out:
4712 	free_event_desc(ff.events);
4713 	return ret;
4714 }
4715 
perf_event__fprintf_event_update(union perf_event * event,FILE * fp)4716 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp)
4717 {
4718 	struct perf_record_event_update *ev = &event->event_update;
4719 	struct perf_cpu_map *map;
4720 	size_t ret;
4721 
4722 	ret = fprintf(fp, "\n... id:    %" PRI_lu64 "\n", ev->id);
4723 
4724 	switch (ev->type) {
4725 	case PERF_EVENT_UPDATE__SCALE:
4726 		ret += fprintf(fp, "... scale: %f\n", ev->scale.scale);
4727 		break;
4728 	case PERF_EVENT_UPDATE__UNIT:
4729 		ret += fprintf(fp, "... unit:  %s\n", ev->unit);
4730 		break;
4731 	case PERF_EVENT_UPDATE__NAME:
4732 		ret += fprintf(fp, "... name:  %s\n", ev->name);
4733 		break;
4734 	case PERF_EVENT_UPDATE__CPUS:
4735 		ret += fprintf(fp, "... ");
4736 
4737 		map = cpu_map__new_data(&ev->cpus.cpus);
4738 		if (map) {
4739 			ret += cpu_map__fprintf(map, fp);
4740 			perf_cpu_map__put(map);
4741 		} else
4742 			ret += fprintf(fp, "failed to get cpus\n");
4743 		break;
4744 	default:
4745 		ret += fprintf(fp, "... unknown type\n");
4746 		break;
4747 	}
4748 
4749 	return ret;
4750 }
4751 
perf_event__fprintf_attr(union perf_event * event,FILE * fp)4752 size_t perf_event__fprintf_attr(union perf_event *event, FILE *fp)
4753 {
4754 	return perf_event_attr__fprintf(fp, &event->attr.attr, __desc_attr__fprintf, NULL);
4755 }
4756 
perf_event__process_attr(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct evlist ** pevlist)4757 int perf_event__process_attr(const struct perf_tool *tool __maybe_unused,
4758 			     union perf_event *event,
4759 			     struct evlist **pevlist)
4760 {
4761 	u32 i, n_ids;
4762 	u64 *ids;
4763 	struct evsel *evsel;
4764 	struct evlist *evlist = *pevlist;
4765 
4766 	if (dump_trace)
4767 		perf_event__fprintf_attr(event, stdout);
4768 
4769 	if (evlist == NULL) {
4770 		*pevlist = evlist = evlist__new();
4771 		if (evlist == NULL)
4772 			return -ENOMEM;
4773 	}
4774 
4775 	evsel = evsel__new(&event->attr.attr);
4776 	if (evsel == NULL)
4777 		return -ENOMEM;
4778 
4779 	evlist__add(evlist, evsel);
4780 
4781 	n_ids = event->header.size - sizeof(event->header) - event->attr.attr.size;
4782 	n_ids = n_ids / sizeof(u64);
4783 	/*
4784 	 * We don't have the cpu and thread maps on the header, so
4785 	 * for allocating the perf_sample_id table we fake 1 cpu and
4786 	 * hattr->ids threads.
4787 	 */
4788 	if (perf_evsel__alloc_id(&evsel->core, 1, n_ids))
4789 		return -ENOMEM;
4790 
4791 	ids = perf_record_header_attr_id(event);
4792 	for (i = 0; i < n_ids; i++) {
4793 		perf_evlist__id_add(&evlist->core, &evsel->core, 0, i, ids[i]);
4794 	}
4795 
4796 	return 0;
4797 }
4798 
perf_event__process_event_update(const struct perf_tool * tool __maybe_unused,union perf_event * event,struct evlist ** pevlist)4799 int perf_event__process_event_update(const struct perf_tool *tool __maybe_unused,
4800 				     union perf_event *event,
4801 				     struct evlist **pevlist)
4802 {
4803 	struct perf_record_event_update *ev = &event->event_update;
4804 	struct evlist *evlist;
4805 	struct evsel *evsel;
4806 	struct perf_cpu_map *map;
4807 
4808 	if (dump_trace)
4809 		perf_event__fprintf_event_update(event, stdout);
4810 
4811 	if (!pevlist || *pevlist == NULL)
4812 		return -EINVAL;
4813 
4814 	evlist = *pevlist;
4815 
4816 	evsel = evlist__id2evsel(evlist, ev->id);
4817 	if (evsel == NULL)
4818 		return -EINVAL;
4819 
4820 	switch (ev->type) {
4821 	case PERF_EVENT_UPDATE__UNIT:
4822 		free((char *)evsel->unit);
4823 		evsel->unit = strdup(ev->unit);
4824 		break;
4825 	case PERF_EVENT_UPDATE__NAME:
4826 		free(evsel->name);
4827 		evsel->name = strdup(ev->name);
4828 		break;
4829 	case PERF_EVENT_UPDATE__SCALE:
4830 		evsel->scale = ev->scale.scale;
4831 		break;
4832 	case PERF_EVENT_UPDATE__CPUS:
4833 		map = cpu_map__new_data(&ev->cpus.cpus);
4834 		if (map) {
4835 			perf_cpu_map__put(evsel->core.pmu_cpus);
4836 			evsel->core.pmu_cpus = map;
4837 		} else
4838 			pr_err("failed to get event_update cpus\n");
4839 	default:
4840 		break;
4841 	}
4842 
4843 	return 0;
4844 }
4845 
4846 #ifdef HAVE_LIBTRACEEVENT
perf_event__process_tracing_data(const struct perf_tool * tool __maybe_unused,struct perf_session * session,union perf_event * event)4847 int perf_event__process_tracing_data(const struct perf_tool *tool __maybe_unused,
4848 				     struct perf_session *session,
4849 				     union perf_event *event)
4850 {
4851 	ssize_t size_read, padding, size = event->tracing_data.size;
4852 	int fd = perf_data__fd(session->data);
4853 	char buf[BUFSIZ];
4854 
4855 	/*
4856 	 * The pipe fd is already in proper place and in any case
4857 	 * we can't move it, and we'd screw the case where we read
4858 	 * 'pipe' data from regular file. The trace_report reads
4859 	 * data from 'fd' so we need to set it directly behind the
4860 	 * event, where the tracing data starts.
4861 	 */
4862 	if (!perf_data__is_pipe(session->data)) {
4863 		off_t offset = lseek(fd, 0, SEEK_CUR);
4864 
4865 		/* setup for reading amidst mmap */
4866 		lseek(fd, offset + sizeof(struct perf_record_header_tracing_data),
4867 		      SEEK_SET);
4868 	}
4869 
4870 	size_read = trace_report(fd, &session->tevent, session->trace_event_repipe);
4871 	padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read;
4872 
4873 	if (readn(fd, buf, padding) < 0) {
4874 		pr_err("%s: reading input file", __func__);
4875 		return -1;
4876 	}
4877 	if (session->trace_event_repipe) {
4878 		int retw = write(STDOUT_FILENO, buf, padding);
4879 		if (retw <= 0 || retw != padding) {
4880 			pr_err("%s: repiping tracing data padding", __func__);
4881 			return -1;
4882 		}
4883 	}
4884 
4885 	if (size_read + padding != size) {
4886 		pr_err("%s: tracing data size mismatch", __func__);
4887 		return -1;
4888 	}
4889 
4890 	evlist__prepare_tracepoint_events(session->evlist, session->tevent.pevent);
4891 
4892 	return size_read + padding;
4893 }
4894 #endif
4895 
perf_event__process_build_id(const struct perf_tool * tool __maybe_unused,struct perf_session * session,union perf_event * event)4896 int perf_event__process_build_id(const struct perf_tool *tool __maybe_unused,
4897 				 struct perf_session *session,
4898 				 union perf_event *event)
4899 {
4900 	__event_process_build_id(&event->build_id,
4901 				 event->build_id.filename,
4902 				 session);
4903 	return 0;
4904 }
4905