xref: /linux/tools/perf/util/session.c (revision 8520a98dbab61e9e340cdfb72dd17ccc8a98961e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <api/fs/fs.h>
8 
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13 #include <perf/cpumap.h>
14 
15 #include "debug.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "memswap.h"
19 #include "map.h"
20 #include "symbol.h"
21 #include "session.h"
22 #include "tool.h"
23 #include "sort.h"
24 #include "cpumap.h"
25 #include "perf_regs.h"
26 #include "asm/bug.h"
27 #include "auxtrace.h"
28 #include "thread.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
31 #include "stat.h"
32 #include "util.h"
33 #include "../perf.h"
34 #include "arch/common.h"
35 
36 #ifdef HAVE_ZSTD_SUPPORT
37 static int perf_session__process_compressed_event(struct perf_session *session,
38 						  union perf_event *event, u64 file_offset)
39 {
40 	void *src;
41 	size_t decomp_size, src_size;
42 	u64 decomp_last_rem = 0;
43 	size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
44 	struct decomp *decomp, *decomp_last = session->decomp_last;
45 
46 	if (decomp_last) {
47 		decomp_last_rem = decomp_last->size - decomp_last->head;
48 		decomp_len += decomp_last_rem;
49 	}
50 
51 	mmap_len = sizeof(struct decomp) + decomp_len;
52 	decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
53 		      MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
54 	if (decomp == MAP_FAILED) {
55 		pr_err("Couldn't allocate memory for decompression\n");
56 		return -1;
57 	}
58 
59 	decomp->file_pos = file_offset;
60 	decomp->mmap_len = mmap_len;
61 	decomp->head = 0;
62 
63 	if (decomp_last_rem) {
64 		memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
65 		decomp->size = decomp_last_rem;
66 	}
67 
68 	src = (void *)event + sizeof(struct perf_record_compressed);
69 	src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
70 
71 	decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
72 				&(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
73 	if (!decomp_size) {
74 		munmap(decomp, mmap_len);
75 		pr_err("Couldn't decompress data\n");
76 		return -1;
77 	}
78 
79 	decomp->size += decomp_size;
80 
81 	if (session->decomp == NULL) {
82 		session->decomp = decomp;
83 		session->decomp_last = decomp;
84 	} else {
85 		session->decomp_last->next = decomp;
86 		session->decomp_last = decomp;
87 	}
88 
89 	pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
90 
91 	return 0;
92 }
93 #else /* !HAVE_ZSTD_SUPPORT */
94 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
95 #endif
96 
97 static int perf_session__deliver_event(struct perf_session *session,
98 				       union perf_event *event,
99 				       struct perf_tool *tool,
100 				       u64 file_offset);
101 
102 static int perf_session__open(struct perf_session *session)
103 {
104 	struct perf_data *data = session->data;
105 
106 	if (perf_session__read_header(session) < 0) {
107 		pr_err("incompatible file format (rerun with -v to learn more)\n");
108 		return -1;
109 	}
110 
111 	if (perf_data__is_pipe(data))
112 		return 0;
113 
114 	if (perf_header__has_feat(&session->header, HEADER_STAT))
115 		return 0;
116 
117 	if (!perf_evlist__valid_sample_type(session->evlist)) {
118 		pr_err("non matching sample_type\n");
119 		return -1;
120 	}
121 
122 	if (!perf_evlist__valid_sample_id_all(session->evlist)) {
123 		pr_err("non matching sample_id_all\n");
124 		return -1;
125 	}
126 
127 	if (!perf_evlist__valid_read_format(session->evlist)) {
128 		pr_err("non matching read_format\n");
129 		return -1;
130 	}
131 
132 	return 0;
133 }
134 
135 void perf_session__set_id_hdr_size(struct perf_session *session)
136 {
137 	u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
138 
139 	machines__set_id_hdr_size(&session->machines, id_hdr_size);
140 }
141 
142 int perf_session__create_kernel_maps(struct perf_session *session)
143 {
144 	int ret = machine__create_kernel_maps(&session->machines.host);
145 
146 	if (ret >= 0)
147 		ret = machines__create_guest_kernel_maps(&session->machines);
148 	return ret;
149 }
150 
151 static void perf_session__destroy_kernel_maps(struct perf_session *session)
152 {
153 	machines__destroy_kernel_maps(&session->machines);
154 }
155 
156 static bool perf_session__has_comm_exec(struct perf_session *session)
157 {
158 	struct evsel *evsel;
159 
160 	evlist__for_each_entry(session->evlist, evsel) {
161 		if (evsel->core.attr.comm_exec)
162 			return true;
163 	}
164 
165 	return false;
166 }
167 
168 static void perf_session__set_comm_exec(struct perf_session *session)
169 {
170 	bool comm_exec = perf_session__has_comm_exec(session);
171 
172 	machines__set_comm_exec(&session->machines, comm_exec);
173 }
174 
175 static int ordered_events__deliver_event(struct ordered_events *oe,
176 					 struct ordered_event *event)
177 {
178 	struct perf_session *session = container_of(oe, struct perf_session,
179 						    ordered_events);
180 
181 	return perf_session__deliver_event(session, event->event,
182 					   session->tool, event->file_offset);
183 }
184 
185 struct perf_session *perf_session__new(struct perf_data *data,
186 				       bool repipe, struct perf_tool *tool)
187 {
188 	struct perf_session *session = zalloc(sizeof(*session));
189 
190 	if (!session)
191 		goto out;
192 
193 	session->repipe = repipe;
194 	session->tool   = tool;
195 	INIT_LIST_HEAD(&session->auxtrace_index);
196 	machines__init(&session->machines);
197 	ordered_events__init(&session->ordered_events,
198 			     ordered_events__deliver_event, NULL);
199 
200 	perf_env__init(&session->header.env);
201 	if (data) {
202 		if (perf_data__open(data))
203 			goto out_delete;
204 
205 		session->data = data;
206 
207 		if (perf_data__is_read(data)) {
208 			if (perf_session__open(session) < 0)
209 				goto out_delete;
210 
211 			/*
212 			 * set session attributes that are present in perf.data
213 			 * but not in pipe-mode.
214 			 */
215 			if (!data->is_pipe) {
216 				perf_session__set_id_hdr_size(session);
217 				perf_session__set_comm_exec(session);
218 			}
219 
220 			perf_evlist__init_trace_event_sample_raw(session->evlist);
221 
222 			/* Open the directory data. */
223 			if (data->is_dir && perf_data__open_dir(data))
224 				goto out_delete;
225 		}
226 	} else  {
227 		session->machines.host.env = &perf_env;
228 	}
229 
230 	session->machines.host.single_address_space =
231 		perf_env__single_address_space(session->machines.host.env);
232 
233 	if (!data || perf_data__is_write(data)) {
234 		/*
235 		 * In O_RDONLY mode this will be performed when reading the
236 		 * kernel MMAP event, in perf_event__process_mmap().
237 		 */
238 		if (perf_session__create_kernel_maps(session) < 0)
239 			pr_warning("Cannot read kernel map\n");
240 	}
241 
242 	/*
243 	 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
244 	 * processed, so perf_evlist__sample_id_all is not meaningful here.
245 	 */
246 	if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
247 	    tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
248 		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
249 		tool->ordered_events = false;
250 	}
251 
252 	return session;
253 
254  out_delete:
255 	perf_session__delete(session);
256  out:
257 	return NULL;
258 }
259 
260 static void perf_session__delete_threads(struct perf_session *session)
261 {
262 	machine__delete_threads(&session->machines.host);
263 }
264 
265 static void perf_session__release_decomp_events(struct perf_session *session)
266 {
267 	struct decomp *next, *decomp;
268 	size_t mmap_len;
269 	next = session->decomp;
270 	do {
271 		decomp = next;
272 		if (decomp == NULL)
273 			break;
274 		next = decomp->next;
275 		mmap_len = decomp->mmap_len;
276 		munmap(decomp, mmap_len);
277 	} while (1);
278 }
279 
280 void perf_session__delete(struct perf_session *session)
281 {
282 	if (session == NULL)
283 		return;
284 	auxtrace__free(session);
285 	auxtrace_index__free(&session->auxtrace_index);
286 	perf_session__destroy_kernel_maps(session);
287 	perf_session__delete_threads(session);
288 	perf_session__release_decomp_events(session);
289 	perf_env__exit(&session->header.env);
290 	machines__exit(&session->machines);
291 	if (session->data)
292 		perf_data__close(session->data);
293 	free(session);
294 }
295 
296 static int process_event_synth_tracing_data_stub(struct perf_session *session
297 						 __maybe_unused,
298 						 union perf_event *event
299 						 __maybe_unused)
300 {
301 	dump_printf(": unhandled!\n");
302 	return 0;
303 }
304 
305 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
306 					 union perf_event *event __maybe_unused,
307 					 struct evlist **pevlist
308 					 __maybe_unused)
309 {
310 	dump_printf(": unhandled!\n");
311 	return 0;
312 }
313 
314 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
315 						 union perf_event *event __maybe_unused,
316 						 struct evlist **pevlist
317 						 __maybe_unused)
318 {
319 	if (dump_trace)
320 		perf_event__fprintf_event_update(event, stdout);
321 
322 	dump_printf(": unhandled!\n");
323 	return 0;
324 }
325 
326 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
327 				     union perf_event *event __maybe_unused,
328 				     struct perf_sample *sample __maybe_unused,
329 				     struct evsel *evsel __maybe_unused,
330 				     struct machine *machine __maybe_unused)
331 {
332 	dump_printf(": unhandled!\n");
333 	return 0;
334 }
335 
336 static int process_event_stub(struct perf_tool *tool __maybe_unused,
337 			      union perf_event *event __maybe_unused,
338 			      struct perf_sample *sample __maybe_unused,
339 			      struct machine *machine __maybe_unused)
340 {
341 	dump_printf(": unhandled!\n");
342 	return 0;
343 }
344 
345 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
346 				       union perf_event *event __maybe_unused,
347 				       struct ordered_events *oe __maybe_unused)
348 {
349 	dump_printf(": unhandled!\n");
350 	return 0;
351 }
352 
353 static int process_finished_round(struct perf_tool *tool,
354 				  union perf_event *event,
355 				  struct ordered_events *oe);
356 
357 static int skipn(int fd, off_t n)
358 {
359 	char buf[4096];
360 	ssize_t ret;
361 
362 	while (n > 0) {
363 		ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
364 		if (ret <= 0)
365 			return ret;
366 		n -= ret;
367 	}
368 
369 	return 0;
370 }
371 
372 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
373 				       union perf_event *event)
374 {
375 	dump_printf(": unhandled!\n");
376 	if (perf_data__is_pipe(session->data))
377 		skipn(perf_data__fd(session->data), event->auxtrace.size);
378 	return event->auxtrace.size;
379 }
380 
381 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
382 				  union perf_event *event __maybe_unused)
383 {
384 	dump_printf(": unhandled!\n");
385 	return 0;
386 }
387 
388 
389 static
390 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
391 				  union perf_event *event __maybe_unused)
392 {
393 	if (dump_trace)
394 		perf_event__fprintf_thread_map(event, stdout);
395 
396 	dump_printf(": unhandled!\n");
397 	return 0;
398 }
399 
400 static
401 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
402 			       union perf_event *event __maybe_unused)
403 {
404 	if (dump_trace)
405 		perf_event__fprintf_cpu_map(event, stdout);
406 
407 	dump_printf(": unhandled!\n");
408 	return 0;
409 }
410 
411 static
412 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
413 				   union perf_event *event __maybe_unused)
414 {
415 	if (dump_trace)
416 		perf_event__fprintf_stat_config(event, stdout);
417 
418 	dump_printf(": unhandled!\n");
419 	return 0;
420 }
421 
422 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
423 			     union perf_event *event)
424 {
425 	if (dump_trace)
426 		perf_event__fprintf_stat(event, stdout);
427 
428 	dump_printf(": unhandled!\n");
429 	return 0;
430 }
431 
432 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
433 				   union perf_event *event)
434 {
435 	if (dump_trace)
436 		perf_event__fprintf_stat_round(event, stdout);
437 
438 	dump_printf(": unhandled!\n");
439 	return 0;
440 }
441 
442 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
443 						       union perf_event *event __maybe_unused,
444 						       u64 file_offset __maybe_unused)
445 {
446        dump_printf(": unhandled!\n");
447        return 0;
448 }
449 
450 void perf_tool__fill_defaults(struct perf_tool *tool)
451 {
452 	if (tool->sample == NULL)
453 		tool->sample = process_event_sample_stub;
454 	if (tool->mmap == NULL)
455 		tool->mmap = process_event_stub;
456 	if (tool->mmap2 == NULL)
457 		tool->mmap2 = process_event_stub;
458 	if (tool->comm == NULL)
459 		tool->comm = process_event_stub;
460 	if (tool->namespaces == NULL)
461 		tool->namespaces = process_event_stub;
462 	if (tool->fork == NULL)
463 		tool->fork = process_event_stub;
464 	if (tool->exit == NULL)
465 		tool->exit = process_event_stub;
466 	if (tool->lost == NULL)
467 		tool->lost = perf_event__process_lost;
468 	if (tool->lost_samples == NULL)
469 		tool->lost_samples = perf_event__process_lost_samples;
470 	if (tool->aux == NULL)
471 		tool->aux = perf_event__process_aux;
472 	if (tool->itrace_start == NULL)
473 		tool->itrace_start = perf_event__process_itrace_start;
474 	if (tool->context_switch == NULL)
475 		tool->context_switch = perf_event__process_switch;
476 	if (tool->ksymbol == NULL)
477 		tool->ksymbol = perf_event__process_ksymbol;
478 	if (tool->bpf == NULL)
479 		tool->bpf = perf_event__process_bpf;
480 	if (tool->read == NULL)
481 		tool->read = process_event_sample_stub;
482 	if (tool->throttle == NULL)
483 		tool->throttle = process_event_stub;
484 	if (tool->unthrottle == NULL)
485 		tool->unthrottle = process_event_stub;
486 	if (tool->attr == NULL)
487 		tool->attr = process_event_synth_attr_stub;
488 	if (tool->event_update == NULL)
489 		tool->event_update = process_event_synth_event_update_stub;
490 	if (tool->tracing_data == NULL)
491 		tool->tracing_data = process_event_synth_tracing_data_stub;
492 	if (tool->build_id == NULL)
493 		tool->build_id = process_event_op2_stub;
494 	if (tool->finished_round == NULL) {
495 		if (tool->ordered_events)
496 			tool->finished_round = process_finished_round;
497 		else
498 			tool->finished_round = process_finished_round_stub;
499 	}
500 	if (tool->id_index == NULL)
501 		tool->id_index = process_event_op2_stub;
502 	if (tool->auxtrace_info == NULL)
503 		tool->auxtrace_info = process_event_op2_stub;
504 	if (tool->auxtrace == NULL)
505 		tool->auxtrace = process_event_auxtrace_stub;
506 	if (tool->auxtrace_error == NULL)
507 		tool->auxtrace_error = process_event_op2_stub;
508 	if (tool->thread_map == NULL)
509 		tool->thread_map = process_event_thread_map_stub;
510 	if (tool->cpu_map == NULL)
511 		tool->cpu_map = process_event_cpu_map_stub;
512 	if (tool->stat_config == NULL)
513 		tool->stat_config = process_event_stat_config_stub;
514 	if (tool->stat == NULL)
515 		tool->stat = process_stat_stub;
516 	if (tool->stat_round == NULL)
517 		tool->stat_round = process_stat_round_stub;
518 	if (tool->time_conv == NULL)
519 		tool->time_conv = process_event_op2_stub;
520 	if (tool->feature == NULL)
521 		tool->feature = process_event_op2_stub;
522 	if (tool->compressed == NULL)
523 		tool->compressed = perf_session__process_compressed_event;
524 }
525 
526 static void swap_sample_id_all(union perf_event *event, void *data)
527 {
528 	void *end = (void *) event + event->header.size;
529 	int size = end - data;
530 
531 	BUG_ON(size % sizeof(u64));
532 	mem_bswap_64(data, size);
533 }
534 
535 static void perf_event__all64_swap(union perf_event *event,
536 				   bool sample_id_all __maybe_unused)
537 {
538 	struct perf_event_header *hdr = &event->header;
539 	mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
540 }
541 
542 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
543 {
544 	event->comm.pid = bswap_32(event->comm.pid);
545 	event->comm.tid = bswap_32(event->comm.tid);
546 
547 	if (sample_id_all) {
548 		void *data = &event->comm.comm;
549 
550 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
551 		swap_sample_id_all(event, data);
552 	}
553 }
554 
555 static void perf_event__mmap_swap(union perf_event *event,
556 				  bool sample_id_all)
557 {
558 	event->mmap.pid	  = bswap_32(event->mmap.pid);
559 	event->mmap.tid	  = bswap_32(event->mmap.tid);
560 	event->mmap.start = bswap_64(event->mmap.start);
561 	event->mmap.len	  = bswap_64(event->mmap.len);
562 	event->mmap.pgoff = bswap_64(event->mmap.pgoff);
563 
564 	if (sample_id_all) {
565 		void *data = &event->mmap.filename;
566 
567 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
568 		swap_sample_id_all(event, data);
569 	}
570 }
571 
572 static void perf_event__mmap2_swap(union perf_event *event,
573 				  bool sample_id_all)
574 {
575 	event->mmap2.pid   = bswap_32(event->mmap2.pid);
576 	event->mmap2.tid   = bswap_32(event->mmap2.tid);
577 	event->mmap2.start = bswap_64(event->mmap2.start);
578 	event->mmap2.len   = bswap_64(event->mmap2.len);
579 	event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
580 	event->mmap2.maj   = bswap_32(event->mmap2.maj);
581 	event->mmap2.min   = bswap_32(event->mmap2.min);
582 	event->mmap2.ino   = bswap_64(event->mmap2.ino);
583 
584 	if (sample_id_all) {
585 		void *data = &event->mmap2.filename;
586 
587 		data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
588 		swap_sample_id_all(event, data);
589 	}
590 }
591 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
592 {
593 	event->fork.pid	 = bswap_32(event->fork.pid);
594 	event->fork.tid	 = bswap_32(event->fork.tid);
595 	event->fork.ppid = bswap_32(event->fork.ppid);
596 	event->fork.ptid = bswap_32(event->fork.ptid);
597 	event->fork.time = bswap_64(event->fork.time);
598 
599 	if (sample_id_all)
600 		swap_sample_id_all(event, &event->fork + 1);
601 }
602 
603 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
604 {
605 	event->read.pid		 = bswap_32(event->read.pid);
606 	event->read.tid		 = bswap_32(event->read.tid);
607 	event->read.value	 = bswap_64(event->read.value);
608 	event->read.time_enabled = bswap_64(event->read.time_enabled);
609 	event->read.time_running = bswap_64(event->read.time_running);
610 	event->read.id		 = bswap_64(event->read.id);
611 
612 	if (sample_id_all)
613 		swap_sample_id_all(event, &event->read + 1);
614 }
615 
616 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
617 {
618 	event->aux.aux_offset = bswap_64(event->aux.aux_offset);
619 	event->aux.aux_size   = bswap_64(event->aux.aux_size);
620 	event->aux.flags      = bswap_64(event->aux.flags);
621 
622 	if (sample_id_all)
623 		swap_sample_id_all(event, &event->aux + 1);
624 }
625 
626 static void perf_event__itrace_start_swap(union perf_event *event,
627 					  bool sample_id_all)
628 {
629 	event->itrace_start.pid	 = bswap_32(event->itrace_start.pid);
630 	event->itrace_start.tid	 = bswap_32(event->itrace_start.tid);
631 
632 	if (sample_id_all)
633 		swap_sample_id_all(event, &event->itrace_start + 1);
634 }
635 
636 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
637 {
638 	if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
639 		event->context_switch.next_prev_pid =
640 				bswap_32(event->context_switch.next_prev_pid);
641 		event->context_switch.next_prev_tid =
642 				bswap_32(event->context_switch.next_prev_tid);
643 	}
644 
645 	if (sample_id_all)
646 		swap_sample_id_all(event, &event->context_switch + 1);
647 }
648 
649 static void perf_event__throttle_swap(union perf_event *event,
650 				      bool sample_id_all)
651 {
652 	event->throttle.time	  = bswap_64(event->throttle.time);
653 	event->throttle.id	  = bswap_64(event->throttle.id);
654 	event->throttle.stream_id = bswap_64(event->throttle.stream_id);
655 
656 	if (sample_id_all)
657 		swap_sample_id_all(event, &event->throttle + 1);
658 }
659 
660 static void perf_event__namespaces_swap(union perf_event *event,
661 					bool sample_id_all)
662 {
663 	u64 i;
664 
665 	event->namespaces.pid		= bswap_32(event->namespaces.pid);
666 	event->namespaces.tid		= bswap_32(event->namespaces.tid);
667 	event->namespaces.nr_namespaces	= bswap_64(event->namespaces.nr_namespaces);
668 
669 	for (i = 0; i < event->namespaces.nr_namespaces; i++) {
670 		struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
671 
672 		ns->dev = bswap_64(ns->dev);
673 		ns->ino = bswap_64(ns->ino);
674 	}
675 
676 	if (sample_id_all)
677 		swap_sample_id_all(event, &event->namespaces.link_info[i]);
678 }
679 
680 static u8 revbyte(u8 b)
681 {
682 	int rev = (b >> 4) | ((b & 0xf) << 4);
683 	rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
684 	rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
685 	return (u8) rev;
686 }
687 
688 /*
689  * XXX this is hack in attempt to carry flags bitfield
690  * through endian village. ABI says:
691  *
692  * Bit-fields are allocated from right to left (least to most significant)
693  * on little-endian implementations and from left to right (most to least
694  * significant) on big-endian implementations.
695  *
696  * The above seems to be byte specific, so we need to reverse each
697  * byte of the bitfield. 'Internet' also says this might be implementation
698  * specific and we probably need proper fix and carry perf_event_attr
699  * bitfield flags in separate data file FEAT_ section. Thought this seems
700  * to work for now.
701  */
702 static void swap_bitfield(u8 *p, unsigned len)
703 {
704 	unsigned i;
705 
706 	for (i = 0; i < len; i++) {
707 		*p = revbyte(*p);
708 		p++;
709 	}
710 }
711 
712 /* exported for swapping attributes in file header */
713 void perf_event__attr_swap(struct perf_event_attr *attr)
714 {
715 	attr->type		= bswap_32(attr->type);
716 	attr->size		= bswap_32(attr->size);
717 
718 #define bswap_safe(f, n) 					\
719 	(attr->size > (offsetof(struct perf_event_attr, f) + 	\
720 		       sizeof(attr->f) * (n)))
721 #define bswap_field(f, sz) 			\
722 do { 						\
723 	if (bswap_safe(f, 0))			\
724 		attr->f = bswap_##sz(attr->f);	\
725 } while(0)
726 #define bswap_field_16(f) bswap_field(f, 16)
727 #define bswap_field_32(f) bswap_field(f, 32)
728 #define bswap_field_64(f) bswap_field(f, 64)
729 
730 	bswap_field_64(config);
731 	bswap_field_64(sample_period);
732 	bswap_field_64(sample_type);
733 	bswap_field_64(read_format);
734 	bswap_field_32(wakeup_events);
735 	bswap_field_32(bp_type);
736 	bswap_field_64(bp_addr);
737 	bswap_field_64(bp_len);
738 	bswap_field_64(branch_sample_type);
739 	bswap_field_64(sample_regs_user);
740 	bswap_field_32(sample_stack_user);
741 	bswap_field_32(aux_watermark);
742 	bswap_field_16(sample_max_stack);
743 
744 	/*
745 	 * After read_format are bitfields. Check read_format because
746 	 * we are unable to use offsetof on bitfield.
747 	 */
748 	if (bswap_safe(read_format, 1))
749 		swap_bitfield((u8 *) (&attr->read_format + 1),
750 			      sizeof(u64));
751 #undef bswap_field_64
752 #undef bswap_field_32
753 #undef bswap_field
754 #undef bswap_safe
755 }
756 
757 static void perf_event__hdr_attr_swap(union perf_event *event,
758 				      bool sample_id_all __maybe_unused)
759 {
760 	size_t size;
761 
762 	perf_event__attr_swap(&event->attr.attr);
763 
764 	size = event->header.size;
765 	size -= (void *)&event->attr.id - (void *)event;
766 	mem_bswap_64(event->attr.id, size);
767 }
768 
769 static void perf_event__event_update_swap(union perf_event *event,
770 					  bool sample_id_all __maybe_unused)
771 {
772 	event->event_update.type = bswap_64(event->event_update.type);
773 	event->event_update.id   = bswap_64(event->event_update.id);
774 }
775 
776 static void perf_event__event_type_swap(union perf_event *event,
777 					bool sample_id_all __maybe_unused)
778 {
779 	event->event_type.event_type.event_id =
780 		bswap_64(event->event_type.event_type.event_id);
781 }
782 
783 static void perf_event__tracing_data_swap(union perf_event *event,
784 					  bool sample_id_all __maybe_unused)
785 {
786 	event->tracing_data.size = bswap_32(event->tracing_data.size);
787 }
788 
789 static void perf_event__auxtrace_info_swap(union perf_event *event,
790 					   bool sample_id_all __maybe_unused)
791 {
792 	size_t size;
793 
794 	event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
795 
796 	size = event->header.size;
797 	size -= (void *)&event->auxtrace_info.priv - (void *)event;
798 	mem_bswap_64(event->auxtrace_info.priv, size);
799 }
800 
801 static void perf_event__auxtrace_swap(union perf_event *event,
802 				      bool sample_id_all __maybe_unused)
803 {
804 	event->auxtrace.size      = bswap_64(event->auxtrace.size);
805 	event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
806 	event->auxtrace.reference = bswap_64(event->auxtrace.reference);
807 	event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
808 	event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
809 	event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
810 }
811 
812 static void perf_event__auxtrace_error_swap(union perf_event *event,
813 					    bool sample_id_all __maybe_unused)
814 {
815 	event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
816 	event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
817 	event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
818 	event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
819 	event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
820 	event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
821 	event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
822 	if (event->auxtrace_error.fmt)
823 		event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
824 }
825 
826 static void perf_event__thread_map_swap(union perf_event *event,
827 					bool sample_id_all __maybe_unused)
828 {
829 	unsigned i;
830 
831 	event->thread_map.nr = bswap_64(event->thread_map.nr);
832 
833 	for (i = 0; i < event->thread_map.nr; i++)
834 		event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
835 }
836 
837 static void perf_event__cpu_map_swap(union perf_event *event,
838 				     bool sample_id_all __maybe_unused)
839 {
840 	struct perf_record_cpu_map_data *data = &event->cpu_map.data;
841 	struct cpu_map_entries *cpus;
842 	struct perf_record_record_cpu_map *mask;
843 	unsigned i;
844 
845 	data->type = bswap_64(data->type);
846 
847 	switch (data->type) {
848 	case PERF_CPU_MAP__CPUS:
849 		cpus = (struct cpu_map_entries *)data->data;
850 
851 		cpus->nr = bswap_16(cpus->nr);
852 
853 		for (i = 0; i < cpus->nr; i++)
854 			cpus->cpu[i] = bswap_16(cpus->cpu[i]);
855 		break;
856 	case PERF_CPU_MAP__MASK:
857 		mask = (struct perf_record_record_cpu_map *)data->data;
858 
859 		mask->nr = bswap_16(mask->nr);
860 		mask->long_size = bswap_16(mask->long_size);
861 
862 		switch (mask->long_size) {
863 		case 4: mem_bswap_32(&mask->mask, mask->nr); break;
864 		case 8: mem_bswap_64(&mask->mask, mask->nr); break;
865 		default:
866 			pr_err("cpu_map swap: unsupported long size\n");
867 		}
868 	default:
869 		break;
870 	}
871 }
872 
873 static void perf_event__stat_config_swap(union perf_event *event,
874 					 bool sample_id_all __maybe_unused)
875 {
876 	u64 size;
877 
878 	size  = event->stat_config.nr * sizeof(event->stat_config.data[0]);
879 	size += 1; /* nr item itself */
880 	mem_bswap_64(&event->stat_config.nr, size);
881 }
882 
883 static void perf_event__stat_swap(union perf_event *event,
884 				  bool sample_id_all __maybe_unused)
885 {
886 	event->stat.id     = bswap_64(event->stat.id);
887 	event->stat.thread = bswap_32(event->stat.thread);
888 	event->stat.cpu    = bswap_32(event->stat.cpu);
889 	event->stat.val    = bswap_64(event->stat.val);
890 	event->stat.ena    = bswap_64(event->stat.ena);
891 	event->stat.run    = bswap_64(event->stat.run);
892 }
893 
894 static void perf_event__stat_round_swap(union perf_event *event,
895 					bool sample_id_all __maybe_unused)
896 {
897 	event->stat_round.type = bswap_64(event->stat_round.type);
898 	event->stat_round.time = bswap_64(event->stat_round.time);
899 }
900 
901 typedef void (*perf_event__swap_op)(union perf_event *event,
902 				    bool sample_id_all);
903 
904 static perf_event__swap_op perf_event__swap_ops[] = {
905 	[PERF_RECORD_MMAP]		  = perf_event__mmap_swap,
906 	[PERF_RECORD_MMAP2]		  = perf_event__mmap2_swap,
907 	[PERF_RECORD_COMM]		  = perf_event__comm_swap,
908 	[PERF_RECORD_FORK]		  = perf_event__task_swap,
909 	[PERF_RECORD_EXIT]		  = perf_event__task_swap,
910 	[PERF_RECORD_LOST]		  = perf_event__all64_swap,
911 	[PERF_RECORD_READ]		  = perf_event__read_swap,
912 	[PERF_RECORD_THROTTLE]		  = perf_event__throttle_swap,
913 	[PERF_RECORD_UNTHROTTLE]	  = perf_event__throttle_swap,
914 	[PERF_RECORD_SAMPLE]		  = perf_event__all64_swap,
915 	[PERF_RECORD_AUX]		  = perf_event__aux_swap,
916 	[PERF_RECORD_ITRACE_START]	  = perf_event__itrace_start_swap,
917 	[PERF_RECORD_LOST_SAMPLES]	  = perf_event__all64_swap,
918 	[PERF_RECORD_SWITCH]		  = perf_event__switch_swap,
919 	[PERF_RECORD_SWITCH_CPU_WIDE]	  = perf_event__switch_swap,
920 	[PERF_RECORD_NAMESPACES]	  = perf_event__namespaces_swap,
921 	[PERF_RECORD_HEADER_ATTR]	  = perf_event__hdr_attr_swap,
922 	[PERF_RECORD_HEADER_EVENT_TYPE]	  = perf_event__event_type_swap,
923 	[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
924 	[PERF_RECORD_HEADER_BUILD_ID]	  = NULL,
925 	[PERF_RECORD_ID_INDEX]		  = perf_event__all64_swap,
926 	[PERF_RECORD_AUXTRACE_INFO]	  = perf_event__auxtrace_info_swap,
927 	[PERF_RECORD_AUXTRACE]		  = perf_event__auxtrace_swap,
928 	[PERF_RECORD_AUXTRACE_ERROR]	  = perf_event__auxtrace_error_swap,
929 	[PERF_RECORD_THREAD_MAP]	  = perf_event__thread_map_swap,
930 	[PERF_RECORD_CPU_MAP]		  = perf_event__cpu_map_swap,
931 	[PERF_RECORD_STAT_CONFIG]	  = perf_event__stat_config_swap,
932 	[PERF_RECORD_STAT]		  = perf_event__stat_swap,
933 	[PERF_RECORD_STAT_ROUND]	  = perf_event__stat_round_swap,
934 	[PERF_RECORD_EVENT_UPDATE]	  = perf_event__event_update_swap,
935 	[PERF_RECORD_TIME_CONV]		  = perf_event__all64_swap,
936 	[PERF_RECORD_HEADER_MAX]	  = NULL,
937 };
938 
939 /*
940  * When perf record finishes a pass on every buffers, it records this pseudo
941  * event.
942  * We record the max timestamp t found in the pass n.
943  * Assuming these timestamps are monotonic across cpus, we know that if
944  * a buffer still has events with timestamps below t, they will be all
945  * available and then read in the pass n + 1.
946  * Hence when we start to read the pass n + 2, we can safely flush every
947  * events with timestamps below t.
948  *
949  *    ============ PASS n =================
950  *       CPU 0         |   CPU 1
951  *                     |
952  *    cnt1 timestamps  |   cnt2 timestamps
953  *          1          |         2
954  *          2          |         3
955  *          -          |         4  <--- max recorded
956  *
957  *    ============ PASS n + 1 ==============
958  *       CPU 0         |   CPU 1
959  *                     |
960  *    cnt1 timestamps  |   cnt2 timestamps
961  *          3          |         5
962  *          4          |         6
963  *          5          |         7 <---- max recorded
964  *
965  *      Flush every events below timestamp 4
966  *
967  *    ============ PASS n + 2 ==============
968  *       CPU 0         |   CPU 1
969  *                     |
970  *    cnt1 timestamps  |   cnt2 timestamps
971  *          6          |         8
972  *          7          |         9
973  *          -          |         10
974  *
975  *      Flush every events below timestamp 7
976  *      etc...
977  */
978 static int process_finished_round(struct perf_tool *tool __maybe_unused,
979 				  union perf_event *event __maybe_unused,
980 				  struct ordered_events *oe)
981 {
982 	if (dump_trace)
983 		fprintf(stdout, "\n");
984 	return ordered_events__flush(oe, OE_FLUSH__ROUND);
985 }
986 
987 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
988 			      u64 timestamp, u64 file_offset)
989 {
990 	return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
991 }
992 
993 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
994 {
995 	struct ip_callchain *callchain = sample->callchain;
996 	struct branch_stack *lbr_stack = sample->branch_stack;
997 	u64 kernel_callchain_nr = callchain->nr;
998 	unsigned int i;
999 
1000 	for (i = 0; i < kernel_callchain_nr; i++) {
1001 		if (callchain->ips[i] == PERF_CONTEXT_USER)
1002 			break;
1003 	}
1004 
1005 	if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1006 		u64 total_nr;
1007 		/*
1008 		 * LBR callstack can only get user call chain,
1009 		 * i is kernel call chain number,
1010 		 * 1 is PERF_CONTEXT_USER.
1011 		 *
1012 		 * The user call chain is stored in LBR registers.
1013 		 * LBR are pair registers. The caller is stored
1014 		 * in "from" register, while the callee is stored
1015 		 * in "to" register.
1016 		 * For example, there is a call stack
1017 		 * "A"->"B"->"C"->"D".
1018 		 * The LBR registers will recorde like
1019 		 * "C"->"D", "B"->"C", "A"->"B".
1020 		 * So only the first "to" register and all "from"
1021 		 * registers are needed to construct the whole stack.
1022 		 */
1023 		total_nr = i + 1 + lbr_stack->nr + 1;
1024 		kernel_callchain_nr = i + 1;
1025 
1026 		printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1027 
1028 		for (i = 0; i < kernel_callchain_nr; i++)
1029 			printf("..... %2d: %016" PRIx64 "\n",
1030 			       i, callchain->ips[i]);
1031 
1032 		printf("..... %2d: %016" PRIx64 "\n",
1033 		       (int)(kernel_callchain_nr), lbr_stack->entries[0].to);
1034 		for (i = 0; i < lbr_stack->nr; i++)
1035 			printf("..... %2d: %016" PRIx64 "\n",
1036 			       (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from);
1037 	}
1038 }
1039 
1040 static void callchain__printf(struct evsel *evsel,
1041 			      struct perf_sample *sample)
1042 {
1043 	unsigned int i;
1044 	struct ip_callchain *callchain = sample->callchain;
1045 
1046 	if (perf_evsel__has_branch_callstack(evsel))
1047 		callchain__lbr_callstack_printf(sample);
1048 
1049 	printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1050 
1051 	for (i = 0; i < callchain->nr; i++)
1052 		printf("..... %2d: %016" PRIx64 "\n",
1053 		       i, callchain->ips[i]);
1054 }
1055 
1056 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1057 {
1058 	uint64_t i;
1059 
1060 	printf("%s: nr:%" PRIu64 "\n",
1061 		!callstack ? "... branch stack" : "... branch callstack",
1062 		sample->branch_stack->nr);
1063 
1064 	for (i = 0; i < sample->branch_stack->nr; i++) {
1065 		struct branch_entry *e = &sample->branch_stack->entries[i];
1066 
1067 		if (!callstack) {
1068 			printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1069 				i, e->from, e->to,
1070 				(unsigned short)e->flags.cycles,
1071 				e->flags.mispred ? "M" : " ",
1072 				e->flags.predicted ? "P" : " ",
1073 				e->flags.abort ? "A" : " ",
1074 				e->flags.in_tx ? "T" : " ",
1075 				(unsigned)e->flags.reserved);
1076 		} else {
1077 			printf("..... %2"PRIu64": %016" PRIx64 "\n",
1078 				i, i > 0 ? e->from : e->to);
1079 		}
1080 	}
1081 }
1082 
1083 static void regs_dump__printf(u64 mask, u64 *regs)
1084 {
1085 	unsigned rid, i = 0;
1086 
1087 	for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1088 		u64 val = regs[i++];
1089 
1090 		printf(".... %-5s 0x%" PRIx64 "\n",
1091 		       perf_reg_name(rid), val);
1092 	}
1093 }
1094 
1095 static const char *regs_abi[] = {
1096 	[PERF_SAMPLE_REGS_ABI_NONE] = "none",
1097 	[PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1098 	[PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1099 };
1100 
1101 static inline const char *regs_dump_abi(struct regs_dump *d)
1102 {
1103 	if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1104 		return "unknown";
1105 
1106 	return regs_abi[d->abi];
1107 }
1108 
1109 static void regs__printf(const char *type, struct regs_dump *regs)
1110 {
1111 	u64 mask = regs->mask;
1112 
1113 	printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1114 	       type,
1115 	       mask,
1116 	       regs_dump_abi(regs));
1117 
1118 	regs_dump__printf(mask, regs->regs);
1119 }
1120 
1121 static void regs_user__printf(struct perf_sample *sample)
1122 {
1123 	struct regs_dump *user_regs = &sample->user_regs;
1124 
1125 	if (user_regs->regs)
1126 		regs__printf("user", user_regs);
1127 }
1128 
1129 static void regs_intr__printf(struct perf_sample *sample)
1130 {
1131 	struct regs_dump *intr_regs = &sample->intr_regs;
1132 
1133 	if (intr_regs->regs)
1134 		regs__printf("intr", intr_regs);
1135 }
1136 
1137 static void stack_user__printf(struct stack_dump *dump)
1138 {
1139 	printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1140 	       dump->size, dump->offset);
1141 }
1142 
1143 static void perf_evlist__print_tstamp(struct evlist *evlist,
1144 				       union perf_event *event,
1145 				       struct perf_sample *sample)
1146 {
1147 	u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1148 
1149 	if (event->header.type != PERF_RECORD_SAMPLE &&
1150 	    !perf_evlist__sample_id_all(evlist)) {
1151 		fputs("-1 -1 ", stdout);
1152 		return;
1153 	}
1154 
1155 	if ((sample_type & PERF_SAMPLE_CPU))
1156 		printf("%u ", sample->cpu);
1157 
1158 	if (sample_type & PERF_SAMPLE_TIME)
1159 		printf("%" PRIu64 " ", sample->time);
1160 }
1161 
1162 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1163 {
1164 	printf("... sample_read:\n");
1165 
1166 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1167 		printf("...... time enabled %016" PRIx64 "\n",
1168 		       sample->read.time_enabled);
1169 
1170 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1171 		printf("...... time running %016" PRIx64 "\n",
1172 		       sample->read.time_running);
1173 
1174 	if (read_format & PERF_FORMAT_GROUP) {
1175 		u64 i;
1176 
1177 		printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1178 
1179 		for (i = 0; i < sample->read.group.nr; i++) {
1180 			struct sample_read_value *value;
1181 
1182 			value = &sample->read.group.values[i];
1183 			printf("..... id %016" PRIx64
1184 			       ", value %016" PRIx64 "\n",
1185 			       value->id, value->value);
1186 		}
1187 	} else
1188 		printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1189 			sample->read.one.id, sample->read.one.value);
1190 }
1191 
1192 static void dump_event(struct evlist *evlist, union perf_event *event,
1193 		       u64 file_offset, struct perf_sample *sample)
1194 {
1195 	if (!dump_trace)
1196 		return;
1197 
1198 	printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1199 	       file_offset, event->header.size, event->header.type);
1200 
1201 	trace_event(event);
1202 	if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1203 		evlist->trace_event_sample_raw(evlist, event, sample);
1204 
1205 	if (sample)
1206 		perf_evlist__print_tstamp(evlist, event, sample);
1207 
1208 	printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1209 	       event->header.size, perf_event__name(event->header.type));
1210 }
1211 
1212 static void dump_sample(struct evsel *evsel, union perf_event *event,
1213 			struct perf_sample *sample)
1214 {
1215 	u64 sample_type;
1216 
1217 	if (!dump_trace)
1218 		return;
1219 
1220 	printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1221 	       event->header.misc, sample->pid, sample->tid, sample->ip,
1222 	       sample->period, sample->addr);
1223 
1224 	sample_type = evsel->core.attr.sample_type;
1225 
1226 	if (evsel__has_callchain(evsel))
1227 		callchain__printf(evsel, sample);
1228 
1229 	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
1230 		branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel));
1231 
1232 	if (sample_type & PERF_SAMPLE_REGS_USER)
1233 		regs_user__printf(sample);
1234 
1235 	if (sample_type & PERF_SAMPLE_REGS_INTR)
1236 		regs_intr__printf(sample);
1237 
1238 	if (sample_type & PERF_SAMPLE_STACK_USER)
1239 		stack_user__printf(&sample->user_stack);
1240 
1241 	if (sample_type & PERF_SAMPLE_WEIGHT)
1242 		printf("... weight: %" PRIu64 "\n", sample->weight);
1243 
1244 	if (sample_type & PERF_SAMPLE_DATA_SRC)
1245 		printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1246 
1247 	if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1248 		printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1249 
1250 	if (sample_type & PERF_SAMPLE_TRANSACTION)
1251 		printf("... transaction: %" PRIx64 "\n", sample->transaction);
1252 
1253 	if (sample_type & PERF_SAMPLE_READ)
1254 		sample_read__printf(sample, evsel->core.attr.read_format);
1255 }
1256 
1257 static void dump_read(struct evsel *evsel, union perf_event *event)
1258 {
1259 	struct perf_record_read *read_event = &event->read;
1260 	u64 read_format;
1261 
1262 	if (!dump_trace)
1263 		return;
1264 
1265 	printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1266 	       perf_evsel__name(evsel),
1267 	       event->read.value);
1268 
1269 	if (!evsel)
1270 		return;
1271 
1272 	read_format = evsel->core.attr.read_format;
1273 
1274 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1275 		printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1276 
1277 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1278 		printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1279 
1280 	if (read_format & PERF_FORMAT_ID)
1281 		printf("... id           : %" PRI_lu64 "\n", read_event->id);
1282 }
1283 
1284 static struct machine *machines__find_for_cpumode(struct machines *machines,
1285 					       union perf_event *event,
1286 					       struct perf_sample *sample)
1287 {
1288 	struct machine *machine;
1289 
1290 	if (perf_guest &&
1291 	    ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1292 	     (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1293 		u32 pid;
1294 
1295 		if (event->header.type == PERF_RECORD_MMAP
1296 		    || event->header.type == PERF_RECORD_MMAP2)
1297 			pid = event->mmap.pid;
1298 		else
1299 			pid = sample->pid;
1300 
1301 		machine = machines__find(machines, pid);
1302 		if (!machine)
1303 			machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1304 		return machine;
1305 	}
1306 
1307 	return &machines->host;
1308 }
1309 
1310 static int deliver_sample_value(struct evlist *evlist,
1311 				struct perf_tool *tool,
1312 				union perf_event *event,
1313 				struct perf_sample *sample,
1314 				struct sample_read_value *v,
1315 				struct machine *machine)
1316 {
1317 	struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1318 
1319 	if (sid) {
1320 		sample->id     = v->id;
1321 		sample->period = v->value - sid->period;
1322 		sid->period    = v->value;
1323 	}
1324 
1325 	if (!sid || sid->evsel == NULL) {
1326 		++evlist->stats.nr_unknown_id;
1327 		return 0;
1328 	}
1329 
1330 	/*
1331 	 * There's no reason to deliver sample
1332 	 * for zero period, bail out.
1333 	 */
1334 	if (!sample->period)
1335 		return 0;
1336 
1337 	return tool->sample(tool, event, sample, sid->evsel, machine);
1338 }
1339 
1340 static int deliver_sample_group(struct evlist *evlist,
1341 				struct perf_tool *tool,
1342 				union  perf_event *event,
1343 				struct perf_sample *sample,
1344 				struct machine *machine)
1345 {
1346 	int ret = -EINVAL;
1347 	u64 i;
1348 
1349 	for (i = 0; i < sample->read.group.nr; i++) {
1350 		ret = deliver_sample_value(evlist, tool, event, sample,
1351 					   &sample->read.group.values[i],
1352 					   machine);
1353 		if (ret)
1354 			break;
1355 	}
1356 
1357 	return ret;
1358 }
1359 
1360 static int
1361  perf_evlist__deliver_sample(struct evlist *evlist,
1362 			     struct perf_tool *tool,
1363 			     union  perf_event *event,
1364 			     struct perf_sample *sample,
1365 			     struct evsel *evsel,
1366 			     struct machine *machine)
1367 {
1368 	/* We know evsel != NULL. */
1369 	u64 sample_type = evsel->core.attr.sample_type;
1370 	u64 read_format = evsel->core.attr.read_format;
1371 
1372 	/* Standard sample delivery. */
1373 	if (!(sample_type & PERF_SAMPLE_READ))
1374 		return tool->sample(tool, event, sample, evsel, machine);
1375 
1376 	/* For PERF_SAMPLE_READ we have either single or group mode. */
1377 	if (read_format & PERF_FORMAT_GROUP)
1378 		return deliver_sample_group(evlist, tool, event, sample,
1379 					    machine);
1380 	else
1381 		return deliver_sample_value(evlist, tool, event, sample,
1382 					    &sample->read.one, machine);
1383 }
1384 
1385 static int machines__deliver_event(struct machines *machines,
1386 				   struct evlist *evlist,
1387 				   union perf_event *event,
1388 				   struct perf_sample *sample,
1389 				   struct perf_tool *tool, u64 file_offset)
1390 {
1391 	struct evsel *evsel;
1392 	struct machine *machine;
1393 
1394 	dump_event(evlist, event, file_offset, sample);
1395 
1396 	evsel = perf_evlist__id2evsel(evlist, sample->id);
1397 
1398 	machine = machines__find_for_cpumode(machines, event, sample);
1399 
1400 	switch (event->header.type) {
1401 	case PERF_RECORD_SAMPLE:
1402 		if (evsel == NULL) {
1403 			++evlist->stats.nr_unknown_id;
1404 			return 0;
1405 		}
1406 		dump_sample(evsel, event, sample);
1407 		if (machine == NULL) {
1408 			++evlist->stats.nr_unprocessable_samples;
1409 			return 0;
1410 		}
1411 		return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1412 	case PERF_RECORD_MMAP:
1413 		return tool->mmap(tool, event, sample, machine);
1414 	case PERF_RECORD_MMAP2:
1415 		if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1416 			++evlist->stats.nr_proc_map_timeout;
1417 		return tool->mmap2(tool, event, sample, machine);
1418 	case PERF_RECORD_COMM:
1419 		return tool->comm(tool, event, sample, machine);
1420 	case PERF_RECORD_NAMESPACES:
1421 		return tool->namespaces(tool, event, sample, machine);
1422 	case PERF_RECORD_FORK:
1423 		return tool->fork(tool, event, sample, machine);
1424 	case PERF_RECORD_EXIT:
1425 		return tool->exit(tool, event, sample, machine);
1426 	case PERF_RECORD_LOST:
1427 		if (tool->lost == perf_event__process_lost)
1428 			evlist->stats.total_lost += event->lost.lost;
1429 		return tool->lost(tool, event, sample, machine);
1430 	case PERF_RECORD_LOST_SAMPLES:
1431 		if (tool->lost_samples == perf_event__process_lost_samples)
1432 			evlist->stats.total_lost_samples += event->lost_samples.lost;
1433 		return tool->lost_samples(tool, event, sample, machine);
1434 	case PERF_RECORD_READ:
1435 		dump_read(evsel, event);
1436 		return tool->read(tool, event, sample, evsel, machine);
1437 	case PERF_RECORD_THROTTLE:
1438 		return tool->throttle(tool, event, sample, machine);
1439 	case PERF_RECORD_UNTHROTTLE:
1440 		return tool->unthrottle(tool, event, sample, machine);
1441 	case PERF_RECORD_AUX:
1442 		if (tool->aux == perf_event__process_aux) {
1443 			if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1444 				evlist->stats.total_aux_lost += 1;
1445 			if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1446 				evlist->stats.total_aux_partial += 1;
1447 		}
1448 		return tool->aux(tool, event, sample, machine);
1449 	case PERF_RECORD_ITRACE_START:
1450 		return tool->itrace_start(tool, event, sample, machine);
1451 	case PERF_RECORD_SWITCH:
1452 	case PERF_RECORD_SWITCH_CPU_WIDE:
1453 		return tool->context_switch(tool, event, sample, machine);
1454 	case PERF_RECORD_KSYMBOL:
1455 		return tool->ksymbol(tool, event, sample, machine);
1456 	case PERF_RECORD_BPF_EVENT:
1457 		return tool->bpf(tool, event, sample, machine);
1458 	default:
1459 		++evlist->stats.nr_unknown_events;
1460 		return -1;
1461 	}
1462 }
1463 
1464 static int perf_session__deliver_event(struct perf_session *session,
1465 				       union perf_event *event,
1466 				       struct perf_tool *tool,
1467 				       u64 file_offset)
1468 {
1469 	struct perf_sample sample;
1470 	int ret;
1471 
1472 	ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1473 	if (ret) {
1474 		pr_err("Can't parse sample, err = %d\n", ret);
1475 		return ret;
1476 	}
1477 
1478 	ret = auxtrace__process_event(session, event, &sample, tool);
1479 	if (ret < 0)
1480 		return ret;
1481 	if (ret > 0)
1482 		return 0;
1483 
1484 	return machines__deliver_event(&session->machines, session->evlist,
1485 				       event, &sample, tool, file_offset);
1486 }
1487 
1488 static s64 perf_session__process_user_event(struct perf_session *session,
1489 					    union perf_event *event,
1490 					    u64 file_offset)
1491 {
1492 	struct ordered_events *oe = &session->ordered_events;
1493 	struct perf_tool *tool = session->tool;
1494 	struct perf_sample sample = { .time = 0, };
1495 	int fd = perf_data__fd(session->data);
1496 	int err;
1497 
1498 	if (event->header.type != PERF_RECORD_COMPRESSED ||
1499 	    tool->compressed == perf_session__process_compressed_event_stub)
1500 		dump_event(session->evlist, event, file_offset, &sample);
1501 
1502 	/* These events are processed right away */
1503 	switch (event->header.type) {
1504 	case PERF_RECORD_HEADER_ATTR:
1505 		err = tool->attr(tool, event, &session->evlist);
1506 		if (err == 0) {
1507 			perf_session__set_id_hdr_size(session);
1508 			perf_session__set_comm_exec(session);
1509 		}
1510 		return err;
1511 	case PERF_RECORD_EVENT_UPDATE:
1512 		return tool->event_update(tool, event, &session->evlist);
1513 	case PERF_RECORD_HEADER_EVENT_TYPE:
1514 		/*
1515 		 * Depreceated, but we need to handle it for sake
1516 		 * of old data files create in pipe mode.
1517 		 */
1518 		return 0;
1519 	case PERF_RECORD_HEADER_TRACING_DATA:
1520 		/* setup for reading amidst mmap */
1521 		lseek(fd, file_offset, SEEK_SET);
1522 		return tool->tracing_data(session, event);
1523 	case PERF_RECORD_HEADER_BUILD_ID:
1524 		return tool->build_id(session, event);
1525 	case PERF_RECORD_FINISHED_ROUND:
1526 		return tool->finished_round(tool, event, oe);
1527 	case PERF_RECORD_ID_INDEX:
1528 		return tool->id_index(session, event);
1529 	case PERF_RECORD_AUXTRACE_INFO:
1530 		return tool->auxtrace_info(session, event);
1531 	case PERF_RECORD_AUXTRACE:
1532 		/* setup for reading amidst mmap */
1533 		lseek(fd, file_offset + event->header.size, SEEK_SET);
1534 		return tool->auxtrace(session, event);
1535 	case PERF_RECORD_AUXTRACE_ERROR:
1536 		perf_session__auxtrace_error_inc(session, event);
1537 		return tool->auxtrace_error(session, event);
1538 	case PERF_RECORD_THREAD_MAP:
1539 		return tool->thread_map(session, event);
1540 	case PERF_RECORD_CPU_MAP:
1541 		return tool->cpu_map(session, event);
1542 	case PERF_RECORD_STAT_CONFIG:
1543 		return tool->stat_config(session, event);
1544 	case PERF_RECORD_STAT:
1545 		return tool->stat(session, event);
1546 	case PERF_RECORD_STAT_ROUND:
1547 		return tool->stat_round(session, event);
1548 	case PERF_RECORD_TIME_CONV:
1549 		session->time_conv = event->time_conv;
1550 		return tool->time_conv(session, event);
1551 	case PERF_RECORD_HEADER_FEATURE:
1552 		return tool->feature(session, event);
1553 	case PERF_RECORD_COMPRESSED:
1554 		err = tool->compressed(session, event, file_offset);
1555 		if (err)
1556 			dump_event(session->evlist, event, file_offset, &sample);
1557 		return err;
1558 	default:
1559 		return -EINVAL;
1560 	}
1561 }
1562 
1563 int perf_session__deliver_synth_event(struct perf_session *session,
1564 				      union perf_event *event,
1565 				      struct perf_sample *sample)
1566 {
1567 	struct evlist *evlist = session->evlist;
1568 	struct perf_tool *tool = session->tool;
1569 
1570 	events_stats__inc(&evlist->stats, event->header.type);
1571 
1572 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1573 		return perf_session__process_user_event(session, event, 0);
1574 
1575 	return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1576 }
1577 
1578 static void event_swap(union perf_event *event, bool sample_id_all)
1579 {
1580 	perf_event__swap_op swap;
1581 
1582 	swap = perf_event__swap_ops[event->header.type];
1583 	if (swap)
1584 		swap(event, sample_id_all);
1585 }
1586 
1587 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1588 			     void *buf, size_t buf_sz,
1589 			     union perf_event **event_ptr,
1590 			     struct perf_sample *sample)
1591 {
1592 	union perf_event *event;
1593 	size_t hdr_sz, rest;
1594 	int fd;
1595 
1596 	if (session->one_mmap && !session->header.needs_swap) {
1597 		event = file_offset - session->one_mmap_offset +
1598 			session->one_mmap_addr;
1599 		goto out_parse_sample;
1600 	}
1601 
1602 	if (perf_data__is_pipe(session->data))
1603 		return -1;
1604 
1605 	fd = perf_data__fd(session->data);
1606 	hdr_sz = sizeof(struct perf_event_header);
1607 
1608 	if (buf_sz < hdr_sz)
1609 		return -1;
1610 
1611 	if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1612 	    readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1613 		return -1;
1614 
1615 	event = (union perf_event *)buf;
1616 
1617 	if (session->header.needs_swap)
1618 		perf_event_header__bswap(&event->header);
1619 
1620 	if (event->header.size < hdr_sz || event->header.size > buf_sz)
1621 		return -1;
1622 
1623 	rest = event->header.size - hdr_sz;
1624 
1625 	if (readn(fd, buf, rest) != (ssize_t)rest)
1626 		return -1;
1627 
1628 	if (session->header.needs_swap)
1629 		event_swap(event, perf_evlist__sample_id_all(session->evlist));
1630 
1631 out_parse_sample:
1632 
1633 	if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1634 	    perf_evlist__parse_sample(session->evlist, event, sample))
1635 		return -1;
1636 
1637 	*event_ptr = event;
1638 
1639 	return 0;
1640 }
1641 
1642 static s64 perf_session__process_event(struct perf_session *session,
1643 				       union perf_event *event, u64 file_offset)
1644 {
1645 	struct evlist *evlist = session->evlist;
1646 	struct perf_tool *tool = session->tool;
1647 	int ret;
1648 
1649 	if (session->header.needs_swap)
1650 		event_swap(event, perf_evlist__sample_id_all(evlist));
1651 
1652 	if (event->header.type >= PERF_RECORD_HEADER_MAX)
1653 		return -EINVAL;
1654 
1655 	events_stats__inc(&evlist->stats, event->header.type);
1656 
1657 	if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1658 		return perf_session__process_user_event(session, event, file_offset);
1659 
1660 	if (tool->ordered_events) {
1661 		u64 timestamp = -1ULL;
1662 
1663 		ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1664 		if (ret && ret != -1)
1665 			return ret;
1666 
1667 		ret = perf_session__queue_event(session, event, timestamp, file_offset);
1668 		if (ret != -ETIME)
1669 			return ret;
1670 	}
1671 
1672 	return perf_session__deliver_event(session, event, tool, file_offset);
1673 }
1674 
1675 void perf_event_header__bswap(struct perf_event_header *hdr)
1676 {
1677 	hdr->type = bswap_32(hdr->type);
1678 	hdr->misc = bswap_16(hdr->misc);
1679 	hdr->size = bswap_16(hdr->size);
1680 }
1681 
1682 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1683 {
1684 	return machine__findnew_thread(&session->machines.host, -1, pid);
1685 }
1686 
1687 /*
1688  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1689  * So here a single thread is created for that, but actually there is a separate
1690  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1691  * is only 1. That causes problems for some tools, requiring workarounds. For
1692  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1693  */
1694 int perf_session__register_idle_thread(struct perf_session *session)
1695 {
1696 	struct thread *thread;
1697 	int err = 0;
1698 
1699 	thread = machine__findnew_thread(&session->machines.host, 0, 0);
1700 	if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1701 		pr_err("problem inserting idle task.\n");
1702 		err = -1;
1703 	}
1704 
1705 	if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1706 		pr_err("problem inserting idle task.\n");
1707 		err = -1;
1708 	}
1709 
1710 	/* machine__findnew_thread() got the thread, so put it */
1711 	thread__put(thread);
1712 	return err;
1713 }
1714 
1715 static void
1716 perf_session__warn_order(const struct perf_session *session)
1717 {
1718 	const struct ordered_events *oe = &session->ordered_events;
1719 	struct evsel *evsel;
1720 	bool should_warn = true;
1721 
1722 	evlist__for_each_entry(session->evlist, evsel) {
1723 		if (evsel->core.attr.write_backward)
1724 			should_warn = false;
1725 	}
1726 
1727 	if (!should_warn)
1728 		return;
1729 	if (oe->nr_unordered_events != 0)
1730 		ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1731 }
1732 
1733 static void perf_session__warn_about_errors(const struct perf_session *session)
1734 {
1735 	const struct events_stats *stats = &session->evlist->stats;
1736 
1737 	if (session->tool->lost == perf_event__process_lost &&
1738 	    stats->nr_events[PERF_RECORD_LOST] != 0) {
1739 		ui__warning("Processed %d events and lost %d chunks!\n\n"
1740 			    "Check IO/CPU overload!\n\n",
1741 			    stats->nr_events[0],
1742 			    stats->nr_events[PERF_RECORD_LOST]);
1743 	}
1744 
1745 	if (session->tool->lost_samples == perf_event__process_lost_samples) {
1746 		double drop_rate;
1747 
1748 		drop_rate = (double)stats->total_lost_samples /
1749 			    (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1750 		if (drop_rate > 0.05) {
1751 			ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1752 				    stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1753 				    drop_rate * 100.0);
1754 		}
1755 	}
1756 
1757 	if (session->tool->aux == perf_event__process_aux &&
1758 	    stats->total_aux_lost != 0) {
1759 		ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1760 			    stats->total_aux_lost,
1761 			    stats->nr_events[PERF_RECORD_AUX]);
1762 	}
1763 
1764 	if (session->tool->aux == perf_event__process_aux &&
1765 	    stats->total_aux_partial != 0) {
1766 		bool vmm_exclusive = false;
1767 
1768 		(void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1769 		                       &vmm_exclusive);
1770 
1771 		ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1772 		            "Are you running a KVM guest in the background?%s\n\n",
1773 			    stats->total_aux_partial,
1774 			    stats->nr_events[PERF_RECORD_AUX],
1775 			    vmm_exclusive ?
1776 			    "\nReloading kvm_intel module with vmm_exclusive=0\n"
1777 			    "will reduce the gaps to only guest's timeslices." :
1778 			    "");
1779 	}
1780 
1781 	if (stats->nr_unknown_events != 0) {
1782 		ui__warning("Found %u unknown events!\n\n"
1783 			    "Is this an older tool processing a perf.data "
1784 			    "file generated by a more recent tool?\n\n"
1785 			    "If that is not the case, consider "
1786 			    "reporting to linux-kernel@vger.kernel.org.\n\n",
1787 			    stats->nr_unknown_events);
1788 	}
1789 
1790 	if (stats->nr_unknown_id != 0) {
1791 		ui__warning("%u samples with id not present in the header\n",
1792 			    stats->nr_unknown_id);
1793 	}
1794 
1795 	if (stats->nr_invalid_chains != 0) {
1796 		ui__warning("Found invalid callchains!\n\n"
1797 			    "%u out of %u events were discarded for this reason.\n\n"
1798 			    "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1799 			    stats->nr_invalid_chains,
1800 			    stats->nr_events[PERF_RECORD_SAMPLE]);
1801 	}
1802 
1803 	if (stats->nr_unprocessable_samples != 0) {
1804 		ui__warning("%u unprocessable samples recorded.\n"
1805 			    "Do you have a KVM guest running and not using 'perf kvm'?\n",
1806 			    stats->nr_unprocessable_samples);
1807 	}
1808 
1809 	perf_session__warn_order(session);
1810 
1811 	events_stats__auxtrace_error_warn(stats);
1812 
1813 	if (stats->nr_proc_map_timeout != 0) {
1814 		ui__warning("%d map information files for pre-existing threads were\n"
1815 			    "not processed, if there are samples for addresses they\n"
1816 			    "will not be resolved, you may find out which are these\n"
1817 			    "threads by running with -v and redirecting the output\n"
1818 			    "to a file.\n"
1819 			    "The time limit to process proc map is too short?\n"
1820 			    "Increase it by --proc-map-timeout\n",
1821 			    stats->nr_proc_map_timeout);
1822 	}
1823 }
1824 
1825 static int perf_session__flush_thread_stack(struct thread *thread,
1826 					    void *p __maybe_unused)
1827 {
1828 	return thread_stack__flush(thread);
1829 }
1830 
1831 static int perf_session__flush_thread_stacks(struct perf_session *session)
1832 {
1833 	return machines__for_each_thread(&session->machines,
1834 					 perf_session__flush_thread_stack,
1835 					 NULL);
1836 }
1837 
1838 volatile int session_done;
1839 
1840 static int __perf_session__process_decomp_events(struct perf_session *session);
1841 
1842 static int __perf_session__process_pipe_events(struct perf_session *session)
1843 {
1844 	struct ordered_events *oe = &session->ordered_events;
1845 	struct perf_tool *tool = session->tool;
1846 	int fd = perf_data__fd(session->data);
1847 	union perf_event *event;
1848 	uint32_t size, cur_size = 0;
1849 	void *buf = NULL;
1850 	s64 skip = 0;
1851 	u64 head;
1852 	ssize_t err;
1853 	void *p;
1854 
1855 	perf_tool__fill_defaults(tool);
1856 
1857 	head = 0;
1858 	cur_size = sizeof(union perf_event);
1859 
1860 	buf = malloc(cur_size);
1861 	if (!buf)
1862 		return -errno;
1863 	ordered_events__set_copy_on_queue(oe, true);
1864 more:
1865 	event = buf;
1866 	err = readn(fd, event, sizeof(struct perf_event_header));
1867 	if (err <= 0) {
1868 		if (err == 0)
1869 			goto done;
1870 
1871 		pr_err("failed to read event header\n");
1872 		goto out_err;
1873 	}
1874 
1875 	if (session->header.needs_swap)
1876 		perf_event_header__bswap(&event->header);
1877 
1878 	size = event->header.size;
1879 	if (size < sizeof(struct perf_event_header)) {
1880 		pr_err("bad event header size\n");
1881 		goto out_err;
1882 	}
1883 
1884 	if (size > cur_size) {
1885 		void *new = realloc(buf, size);
1886 		if (!new) {
1887 			pr_err("failed to allocate memory to read event\n");
1888 			goto out_err;
1889 		}
1890 		buf = new;
1891 		cur_size = size;
1892 		event = buf;
1893 	}
1894 	p = event;
1895 	p += sizeof(struct perf_event_header);
1896 
1897 	if (size - sizeof(struct perf_event_header)) {
1898 		err = readn(fd, p, size - sizeof(struct perf_event_header));
1899 		if (err <= 0) {
1900 			if (err == 0) {
1901 				pr_err("unexpected end of event stream\n");
1902 				goto done;
1903 			}
1904 
1905 			pr_err("failed to read event data\n");
1906 			goto out_err;
1907 		}
1908 	}
1909 
1910 	if ((skip = perf_session__process_event(session, event, head)) < 0) {
1911 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1912 		       head, event->header.size, event->header.type);
1913 		err = -EINVAL;
1914 		goto out_err;
1915 	}
1916 
1917 	head += size;
1918 
1919 	if (skip > 0)
1920 		head += skip;
1921 
1922 	err = __perf_session__process_decomp_events(session);
1923 	if (err)
1924 		goto out_err;
1925 
1926 	if (!session_done())
1927 		goto more;
1928 done:
1929 	/* do the final flush for ordered samples */
1930 	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1931 	if (err)
1932 		goto out_err;
1933 	err = auxtrace__flush_events(session, tool);
1934 	if (err)
1935 		goto out_err;
1936 	err = perf_session__flush_thread_stacks(session);
1937 out_err:
1938 	free(buf);
1939 	if (!tool->no_warn)
1940 		perf_session__warn_about_errors(session);
1941 	ordered_events__free(&session->ordered_events);
1942 	auxtrace__free_events(session);
1943 	return err;
1944 }
1945 
1946 static union perf_event *
1947 fetch_mmaped_event(struct perf_session *session,
1948 		   u64 head, size_t mmap_size, char *buf)
1949 {
1950 	union perf_event *event;
1951 
1952 	/*
1953 	 * Ensure we have enough space remaining to read
1954 	 * the size of the event in the headers.
1955 	 */
1956 	if (head + sizeof(event->header) > mmap_size)
1957 		return NULL;
1958 
1959 	event = (union perf_event *)(buf + head);
1960 
1961 	if (session->header.needs_swap)
1962 		perf_event_header__bswap(&event->header);
1963 
1964 	if (head + event->header.size > mmap_size) {
1965 		/* We're not fetching the event so swap back again */
1966 		if (session->header.needs_swap)
1967 			perf_event_header__bswap(&event->header);
1968 		pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n",
1969 			 __func__, head, event->header.size, mmap_size);
1970 		return ERR_PTR(-EINVAL);
1971 	}
1972 
1973 	return event;
1974 }
1975 
1976 static int __perf_session__process_decomp_events(struct perf_session *session)
1977 {
1978 	s64 skip;
1979 	u64 size, file_pos = 0;
1980 	struct decomp *decomp = session->decomp_last;
1981 
1982 	if (!decomp)
1983 		return 0;
1984 
1985 	while (decomp->head < decomp->size && !session_done()) {
1986 		union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data);
1987 
1988 		if (IS_ERR(event))
1989 			return PTR_ERR(event);
1990 
1991 		if (!event)
1992 			break;
1993 
1994 		size = event->header.size;
1995 
1996 		if (size < sizeof(struct perf_event_header) ||
1997 		    (skip = perf_session__process_event(session, event, file_pos)) < 0) {
1998 			pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1999 				decomp->file_pos + decomp->head, event->header.size, event->header.type);
2000 			return -EINVAL;
2001 		}
2002 
2003 		if (skip)
2004 			size += skip;
2005 
2006 		decomp->head += size;
2007 	}
2008 
2009 	return 0;
2010 }
2011 
2012 /*
2013  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2014  * slices. On 32bit we use 32MB.
2015  */
2016 #if BITS_PER_LONG == 64
2017 #define MMAP_SIZE ULLONG_MAX
2018 #define NUM_MMAPS 1
2019 #else
2020 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2021 #define NUM_MMAPS 128
2022 #endif
2023 
2024 struct reader;
2025 
2026 typedef s64 (*reader_cb_t)(struct perf_session *session,
2027 			   union perf_event *event,
2028 			   u64 file_offset);
2029 
2030 struct reader {
2031 	int		 fd;
2032 	u64		 data_size;
2033 	u64		 data_offset;
2034 	reader_cb_t	 process;
2035 };
2036 
2037 static int
2038 reader__process_events(struct reader *rd, struct perf_session *session,
2039 		       struct ui_progress *prog)
2040 {
2041 	u64 data_size = rd->data_size;
2042 	u64 head, page_offset, file_offset, file_pos, size;
2043 	int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2044 	size_t	mmap_size;
2045 	char *buf, *mmaps[NUM_MMAPS];
2046 	union perf_event *event;
2047 	s64 skip;
2048 
2049 	page_offset = page_size * (rd->data_offset / page_size);
2050 	file_offset = page_offset;
2051 	head = rd->data_offset - page_offset;
2052 
2053 	ui_progress__init_size(prog, data_size, "Processing events...");
2054 
2055 	data_size += rd->data_offset;
2056 
2057 	mmap_size = MMAP_SIZE;
2058 	if (mmap_size > data_size) {
2059 		mmap_size = data_size;
2060 		session->one_mmap = true;
2061 	}
2062 
2063 	memset(mmaps, 0, sizeof(mmaps));
2064 
2065 	mmap_prot  = PROT_READ;
2066 	mmap_flags = MAP_SHARED;
2067 
2068 	if (session->header.needs_swap) {
2069 		mmap_prot  |= PROT_WRITE;
2070 		mmap_flags = MAP_PRIVATE;
2071 	}
2072 remap:
2073 	buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2074 		   file_offset);
2075 	if (buf == MAP_FAILED) {
2076 		pr_err("failed to mmap file\n");
2077 		err = -errno;
2078 		goto out;
2079 	}
2080 	mmaps[map_idx] = buf;
2081 	map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2082 	file_pos = file_offset + head;
2083 	if (session->one_mmap) {
2084 		session->one_mmap_addr = buf;
2085 		session->one_mmap_offset = file_offset;
2086 	}
2087 
2088 more:
2089 	event = fetch_mmaped_event(session, head, mmap_size, buf);
2090 	if (IS_ERR(event))
2091 		return PTR_ERR(event);
2092 
2093 	if (!event) {
2094 		if (mmaps[map_idx]) {
2095 			munmap(mmaps[map_idx], mmap_size);
2096 			mmaps[map_idx] = NULL;
2097 		}
2098 
2099 		page_offset = page_size * (head / page_size);
2100 		file_offset += page_offset;
2101 		head -= page_offset;
2102 		goto remap;
2103 	}
2104 
2105 	size = event->header.size;
2106 
2107 	skip = -EINVAL;
2108 
2109 	if (size < sizeof(struct perf_event_header) ||
2110 	    (skip = rd->process(session, event, file_pos)) < 0) {
2111 		pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2112 		       file_offset + head, event->header.size,
2113 		       event->header.type, strerror(-skip));
2114 		err = skip;
2115 		goto out;
2116 	}
2117 
2118 	if (skip)
2119 		size += skip;
2120 
2121 	head += size;
2122 	file_pos += size;
2123 
2124 	err = __perf_session__process_decomp_events(session);
2125 	if (err)
2126 		goto out;
2127 
2128 	ui_progress__update(prog, size);
2129 
2130 	if (session_done())
2131 		goto out;
2132 
2133 	if (file_pos < data_size)
2134 		goto more;
2135 
2136 out:
2137 	return err;
2138 }
2139 
2140 static s64 process_simple(struct perf_session *session,
2141 			  union perf_event *event,
2142 			  u64 file_offset)
2143 {
2144 	return perf_session__process_event(session, event, file_offset);
2145 }
2146 
2147 static int __perf_session__process_events(struct perf_session *session)
2148 {
2149 	struct reader rd = {
2150 		.fd		= perf_data__fd(session->data),
2151 		.data_size	= session->header.data_size,
2152 		.data_offset	= session->header.data_offset,
2153 		.process	= process_simple,
2154 	};
2155 	struct ordered_events *oe = &session->ordered_events;
2156 	struct perf_tool *tool = session->tool;
2157 	struct ui_progress prog;
2158 	int err;
2159 
2160 	perf_tool__fill_defaults(tool);
2161 
2162 	if (rd.data_size == 0)
2163 		return -1;
2164 
2165 	ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2166 
2167 	err = reader__process_events(&rd, session, &prog);
2168 	if (err)
2169 		goto out_err;
2170 	/* do the final flush for ordered samples */
2171 	err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2172 	if (err)
2173 		goto out_err;
2174 	err = auxtrace__flush_events(session, tool);
2175 	if (err)
2176 		goto out_err;
2177 	err = perf_session__flush_thread_stacks(session);
2178 out_err:
2179 	ui_progress__finish();
2180 	if (!tool->no_warn)
2181 		perf_session__warn_about_errors(session);
2182 	/*
2183 	 * We may switching perf.data output, make ordered_events
2184 	 * reusable.
2185 	 */
2186 	ordered_events__reinit(&session->ordered_events);
2187 	auxtrace__free_events(session);
2188 	session->one_mmap = false;
2189 	return err;
2190 }
2191 
2192 int perf_session__process_events(struct perf_session *session)
2193 {
2194 	if (perf_session__register_idle_thread(session) < 0)
2195 		return -ENOMEM;
2196 
2197 	if (perf_data__is_pipe(session->data))
2198 		return __perf_session__process_pipe_events(session);
2199 
2200 	return __perf_session__process_events(session);
2201 }
2202 
2203 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2204 {
2205 	struct evsel *evsel;
2206 
2207 	evlist__for_each_entry(session->evlist, evsel) {
2208 		if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2209 			return true;
2210 	}
2211 
2212 	pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2213 	return false;
2214 }
2215 
2216 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2217 {
2218 	char *bracket;
2219 	struct ref_reloc_sym *ref;
2220 	struct kmap *kmap;
2221 
2222 	ref = zalloc(sizeof(struct ref_reloc_sym));
2223 	if (ref == NULL)
2224 		return -ENOMEM;
2225 
2226 	ref->name = strdup(symbol_name);
2227 	if (ref->name == NULL) {
2228 		free(ref);
2229 		return -ENOMEM;
2230 	}
2231 
2232 	bracket = strchr(ref->name, ']');
2233 	if (bracket)
2234 		*bracket = '\0';
2235 
2236 	ref->addr = addr;
2237 
2238 	kmap = map__kmap(map);
2239 	if (kmap)
2240 		kmap->ref_reloc_sym = ref;
2241 
2242 	return 0;
2243 }
2244 
2245 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2246 {
2247 	return machines__fprintf_dsos(&session->machines, fp);
2248 }
2249 
2250 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2251 					  bool (skip)(struct dso *dso, int parm), int parm)
2252 {
2253 	return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2254 }
2255 
2256 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2257 {
2258 	size_t ret;
2259 	const char *msg = "";
2260 
2261 	if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2262 		msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2263 
2264 	ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2265 
2266 	ret += events_stats__fprintf(&session->evlist->stats, fp);
2267 	return ret;
2268 }
2269 
2270 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2271 {
2272 	/*
2273 	 * FIXME: Here we have to actually print all the machines in this
2274 	 * session, not just the host...
2275 	 */
2276 	return machine__fprintf(&session->machines.host, fp);
2277 }
2278 
2279 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2280 					      unsigned int type)
2281 {
2282 	struct evsel *pos;
2283 
2284 	evlist__for_each_entry(session->evlist, pos) {
2285 		if (pos->core.attr.type == type)
2286 			return pos;
2287 	}
2288 	return NULL;
2289 }
2290 
2291 int perf_session__cpu_bitmap(struct perf_session *session,
2292 			     const char *cpu_list, unsigned long *cpu_bitmap)
2293 {
2294 	int i, err = -1;
2295 	struct perf_cpu_map *map;
2296 	int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2297 
2298 	for (i = 0; i < PERF_TYPE_MAX; ++i) {
2299 		struct evsel *evsel;
2300 
2301 		evsel = perf_session__find_first_evtype(session, i);
2302 		if (!evsel)
2303 			continue;
2304 
2305 		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2306 			pr_err("File does not contain CPU events. "
2307 			       "Remove -C option to proceed.\n");
2308 			return -1;
2309 		}
2310 	}
2311 
2312 	map = perf_cpu_map__new(cpu_list);
2313 	if (map == NULL) {
2314 		pr_err("Invalid cpu_list\n");
2315 		return -1;
2316 	}
2317 
2318 	for (i = 0; i < map->nr; i++) {
2319 		int cpu = map->map[i];
2320 
2321 		if (cpu >= nr_cpus) {
2322 			pr_err("Requested CPU %d too large. "
2323 			       "Consider raising MAX_NR_CPUS\n", cpu);
2324 			goto out_delete_map;
2325 		}
2326 
2327 		set_bit(cpu, cpu_bitmap);
2328 	}
2329 
2330 	err = 0;
2331 
2332 out_delete_map:
2333 	perf_cpu_map__put(map);
2334 	return err;
2335 }
2336 
2337 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2338 				bool full)
2339 {
2340 	if (session == NULL || fp == NULL)
2341 		return;
2342 
2343 	fprintf(fp, "# ========\n");
2344 	perf_header__fprintf_info(session, fp, full);
2345 	fprintf(fp, "# ========\n#\n");
2346 }
2347 
2348 
2349 int __perf_session__set_tracepoints_handlers(struct perf_session *session,
2350 					     const struct evsel_str_handler *assocs,
2351 					     size_t nr_assocs)
2352 {
2353 	struct evsel *evsel;
2354 	size_t i;
2355 	int err;
2356 
2357 	for (i = 0; i < nr_assocs; i++) {
2358 		/*
2359 		 * Adding a handler for an event not in the session,
2360 		 * just ignore it.
2361 		 */
2362 		evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name);
2363 		if (evsel == NULL)
2364 			continue;
2365 
2366 		err = -EEXIST;
2367 		if (evsel->handler != NULL)
2368 			goto out;
2369 		evsel->handler = assocs[i].handler;
2370 	}
2371 
2372 	err = 0;
2373 out:
2374 	return err;
2375 }
2376 
2377 int perf_event__process_id_index(struct perf_session *session,
2378 				 union perf_event *event)
2379 {
2380 	struct evlist *evlist = session->evlist;
2381 	struct perf_record_id_index *ie = &event->id_index;
2382 	size_t i, nr, max_nr;
2383 
2384 	max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2385 		 sizeof(struct id_index_entry);
2386 	nr = ie->nr;
2387 	if (nr > max_nr)
2388 		return -EINVAL;
2389 
2390 	if (dump_trace)
2391 		fprintf(stdout, " nr: %zu\n", nr);
2392 
2393 	for (i = 0; i < nr; i++) {
2394 		struct id_index_entry *e = &ie->entries[i];
2395 		struct perf_sample_id *sid;
2396 
2397 		if (dump_trace) {
2398 			fprintf(stdout,	" ... id: %"PRI_lu64, e->id);
2399 			fprintf(stdout,	"  idx: %"PRI_lu64, e->idx);
2400 			fprintf(stdout,	"  cpu: %"PRI_ld64, e->cpu);
2401 			fprintf(stdout,	"  tid: %"PRI_ld64"\n", e->tid);
2402 		}
2403 
2404 		sid = perf_evlist__id2sid(evlist, e->id);
2405 		if (!sid)
2406 			return -ENOENT;
2407 		sid->idx = e->idx;
2408 		sid->cpu = e->cpu;
2409 		sid->tid = e->tid;
2410 	}
2411 	return 0;
2412 }
2413 
2414 int perf_event__synthesize_id_index(struct perf_tool *tool,
2415 				    perf_event__handler_t process,
2416 				    struct evlist *evlist,
2417 				    struct machine *machine)
2418 {
2419 	union perf_event *ev;
2420 	struct evsel *evsel;
2421 	size_t nr = 0, i = 0, sz, max_nr, n;
2422 	int err;
2423 
2424 	pr_debug2("Synthesizing id index\n");
2425 
2426 	max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
2427 		 sizeof(struct id_index_entry);
2428 
2429 	evlist__for_each_entry(evlist, evsel)
2430 		nr += evsel->ids;
2431 
2432 	n = nr > max_nr ? max_nr : nr;
2433 	sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
2434 	ev = zalloc(sz);
2435 	if (!ev)
2436 		return -ENOMEM;
2437 
2438 	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
2439 	ev->id_index.header.size = sz;
2440 	ev->id_index.nr = n;
2441 
2442 	evlist__for_each_entry(evlist, evsel) {
2443 		u32 j;
2444 
2445 		for (j = 0; j < evsel->ids; j++) {
2446 			struct id_index_entry *e;
2447 			struct perf_sample_id *sid;
2448 
2449 			if (i >= n) {
2450 				err = process(tool, ev, NULL, machine);
2451 				if (err)
2452 					goto out_err;
2453 				nr -= n;
2454 				i = 0;
2455 			}
2456 
2457 			e = &ev->id_index.entries[i++];
2458 
2459 			e->id = evsel->id[j];
2460 
2461 			sid = perf_evlist__id2sid(evlist, e->id);
2462 			if (!sid) {
2463 				free(ev);
2464 				return -ENOENT;
2465 			}
2466 
2467 			e->idx = sid->idx;
2468 			e->cpu = sid->cpu;
2469 			e->tid = sid->tid;
2470 		}
2471 	}
2472 
2473 	sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
2474 	ev->id_index.header.size = sz;
2475 	ev->id_index.nr = nr;
2476 
2477 	err = process(tool, ev, NULL, machine);
2478 out_err:
2479 	free(ev);
2480 
2481 	return err;
2482 }
2483