1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <signal.h>
4 #include <inttypes.h>
5 #include <linux/err.h>
6 #include <linux/kernel.h>
7 #include <linux/zalloc.h>
8 #include <api/fs/fs.h>
9
10 #include <byteswap.h>
11 #include <unistd.h>
12 #include <sys/types.h>
13 #include <sys/mman.h>
14 #include <perf/cpumap.h>
15 #include <perf/event.h>
16
17 #include "map_symbol.h"
18 #include "branch.h"
19 #include "debug.h"
20 #include "env.h"
21 #include "evlist.h"
22 #include "evsel.h"
23 #include "memswap.h"
24 #include "map.h"
25 #include "symbol.h"
26 #include "session.h"
27 #include "tool.h"
28 #include "perf_regs.h"
29 #include "asm/bug.h"
30 #include "auxtrace.h"
31 #include "thread.h"
32 #include "thread-stack.h"
33 #include "sample-raw.h"
34 #include "stat.h"
35 #include "tsc.h"
36 #include "ui/progress.h"
37 #include "util.h"
38 #include "arch/common.h"
39 #include "units.h"
40 #include "annotate.h"
41 #include "perf.h"
42 #include <internal/lib.h>
43
44 static int perf_session__deliver_event(struct perf_session *session,
45 union perf_event *event,
46 const struct perf_tool *tool,
47 u64 file_offset,
48 const char *file_path);
49
perf_session__open(struct perf_session * session)50 static int perf_session__open(struct perf_session *session)
51 {
52 struct perf_data *data = session->data;
53
54 if (perf_session__read_header(session) < 0) {
55 pr_err("incompatible file format (rerun with -v to learn more)\n");
56 return -1;
57 }
58
59 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) {
60 /* Auxiliary events may reference exited threads, hold onto dead ones. */
61 symbol_conf.keep_exited_threads = true;
62 }
63
64 if (perf_data__is_pipe(data))
65 return 0;
66
67 if (perf_header__has_feat(&session->header, HEADER_STAT))
68 return 0;
69
70 if (!evlist__valid_sample_type(session->evlist)) {
71 pr_err("non matching sample_type\n");
72 return -1;
73 }
74
75 if (!evlist__valid_sample_id_all(session->evlist)) {
76 pr_err("non matching sample_id_all\n");
77 return -1;
78 }
79
80 if (!evlist__valid_read_format(session->evlist)) {
81 pr_err("non matching read_format\n");
82 return -1;
83 }
84
85 return 0;
86 }
87
perf_session__set_id_hdr_size(struct perf_session * session)88 void perf_session__set_id_hdr_size(struct perf_session *session)
89 {
90 u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
91
92 machines__set_id_hdr_size(&session->machines, id_hdr_size);
93 }
94
perf_session__create_kernel_maps(struct perf_session * session)95 int perf_session__create_kernel_maps(struct perf_session *session)
96 {
97 int ret = machine__create_kernel_maps(&session->machines.host);
98
99 if (ret >= 0)
100 ret = machines__create_guest_kernel_maps(&session->machines);
101 return ret;
102 }
103
perf_session__destroy_kernel_maps(struct perf_session * session)104 static void perf_session__destroy_kernel_maps(struct perf_session *session)
105 {
106 machines__destroy_kernel_maps(&session->machines);
107 }
108
perf_session__has_comm_exec(struct perf_session * session)109 static bool perf_session__has_comm_exec(struct perf_session *session)
110 {
111 struct evsel *evsel;
112
113 evlist__for_each_entry(session->evlist, evsel) {
114 if (evsel->core.attr.comm_exec)
115 return true;
116 }
117
118 return false;
119 }
120
perf_session__set_comm_exec(struct perf_session * session)121 static void perf_session__set_comm_exec(struct perf_session *session)
122 {
123 bool comm_exec = perf_session__has_comm_exec(session);
124
125 machines__set_comm_exec(&session->machines, comm_exec);
126 }
127
ordered_events__deliver_event(struct ordered_events * oe,struct ordered_event * event)128 static int ordered_events__deliver_event(struct ordered_events *oe,
129 struct ordered_event *event)
130 {
131 struct perf_session *session = container_of(oe, struct perf_session,
132 ordered_events);
133
134 return perf_session__deliver_event(session, event->event,
135 session->tool, event->file_offset,
136 event->file_path);
137 }
138
__perf_session__new(struct perf_data * data,struct perf_tool * tool,bool trace_event_repipe,struct perf_env * host_env)139 struct perf_session *__perf_session__new(struct perf_data *data,
140 struct perf_tool *tool,
141 bool trace_event_repipe,
142 struct perf_env *host_env)
143 {
144 int ret = -ENOMEM;
145 struct perf_session *session = zalloc(sizeof(*session));
146
147 if (!session)
148 goto out;
149
150 session->trace_event_repipe = trace_event_repipe;
151 session->tool = tool;
152 session->decomp_data.zstd_decomp = &session->zstd_data;
153 session->active_decomp = &session->decomp_data;
154 INIT_LIST_HEAD(&session->auxtrace_index);
155 machines__init(&session->machines);
156 ordered_events__init(&session->ordered_events,
157 ordered_events__deliver_event, NULL);
158
159 perf_env__init(&session->header.env);
160 if (data) {
161 ret = perf_data__open(data);
162 if (ret < 0)
163 goto out_delete;
164
165 session->data = data;
166
167 if (perf_data__is_read(data)) {
168 ret = perf_session__open(session);
169 if (ret < 0)
170 goto out_delete;
171
172 /*
173 * set session attributes that are present in perf.data
174 * but not in pipe-mode.
175 */
176 if (!data->is_pipe) {
177 perf_session__set_id_hdr_size(session);
178 perf_session__set_comm_exec(session);
179 }
180
181 evlist__init_trace_event_sample_raw(session->evlist, &session->header.env);
182
183 /* Open the directory data. */
184 if (data->is_dir) {
185 ret = perf_data__open_dir(data);
186 if (ret)
187 goto out_delete;
188 }
189
190 if (!symbol_conf.kallsyms_name &&
191 !symbol_conf.vmlinux_name)
192 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
193 }
194 } else {
195 assert(host_env != NULL);
196 session->machines.host.env = host_env;
197 }
198 if (session->evlist)
199 session->evlist->session = session;
200
201 session->machines.host.single_address_space =
202 perf_env__single_address_space(session->machines.host.env);
203
204 if (!data || perf_data__is_write(data)) {
205 /*
206 * In O_RDONLY mode this will be performed when reading the
207 * kernel MMAP event, in perf_event__process_mmap().
208 */
209 if (perf_session__create_kernel_maps(session) < 0)
210 pr_warning("Cannot read kernel map\n");
211 }
212
213 /*
214 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
215 * processed, so evlist__sample_id_all is not meaningful here.
216 */
217 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
218 tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
219 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
220 tool->ordered_events = false;
221 }
222
223 return session;
224
225 out_delete:
226 perf_session__delete(session);
227 out:
228 return ERR_PTR(ret);
229 }
230
perf_decomp__release_events(struct decomp * next)231 static void perf_decomp__release_events(struct decomp *next)
232 {
233 struct decomp *decomp;
234 size_t mmap_len;
235
236 do {
237 decomp = next;
238 if (decomp == NULL)
239 break;
240 next = decomp->next;
241 mmap_len = decomp->mmap_len;
242 munmap(decomp, mmap_len);
243 } while (1);
244 }
245
perf_session__delete(struct perf_session * session)246 void perf_session__delete(struct perf_session *session)
247 {
248 if (session == NULL)
249 return;
250 auxtrace__free(session);
251 auxtrace_index__free(&session->auxtrace_index);
252 debuginfo_cache__delete();
253 perf_session__destroy_kernel_maps(session);
254 perf_decomp__release_events(session->decomp_data.decomp);
255 perf_env__exit(&session->header.env);
256 machines__exit(&session->machines);
257 if (session->data) {
258 if (perf_data__is_read(session->data))
259 evlist__delete(session->evlist);
260 perf_data__close(session->data);
261 }
262 #ifdef HAVE_LIBTRACEEVENT
263 trace_event__cleanup(&session->tevent);
264 #endif
265 free(session);
266 }
267
swap_sample_id_all(union perf_event * event,void * data)268 static void swap_sample_id_all(union perf_event *event, void *data)
269 {
270 void *end = (void *) event + event->header.size;
271 int size = end - data;
272
273 BUG_ON(size % sizeof(u64));
274 mem_bswap_64(data, size);
275 }
276
perf_event__all64_swap(union perf_event * event,bool sample_id_all __maybe_unused)277 static void perf_event__all64_swap(union perf_event *event,
278 bool sample_id_all __maybe_unused)
279 {
280 struct perf_event_header *hdr = &event->header;
281 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
282 }
283
perf_event__comm_swap(union perf_event * event,bool sample_id_all)284 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
285 {
286 event->comm.pid = bswap_32(event->comm.pid);
287 event->comm.tid = bswap_32(event->comm.tid);
288
289 if (sample_id_all) {
290 void *data = &event->comm.comm;
291
292 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
293 swap_sample_id_all(event, data);
294 }
295 }
296
perf_event__mmap_swap(union perf_event * event,bool sample_id_all)297 static void perf_event__mmap_swap(union perf_event *event,
298 bool sample_id_all)
299 {
300 event->mmap.pid = bswap_32(event->mmap.pid);
301 event->mmap.tid = bswap_32(event->mmap.tid);
302 event->mmap.start = bswap_64(event->mmap.start);
303 event->mmap.len = bswap_64(event->mmap.len);
304 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
305
306 if (sample_id_all) {
307 void *data = &event->mmap.filename;
308
309 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
310 swap_sample_id_all(event, data);
311 }
312 }
313
perf_event__mmap2_swap(union perf_event * event,bool sample_id_all)314 static void perf_event__mmap2_swap(union perf_event *event,
315 bool sample_id_all)
316 {
317 event->mmap2.pid = bswap_32(event->mmap2.pid);
318 event->mmap2.tid = bswap_32(event->mmap2.tid);
319 event->mmap2.start = bswap_64(event->mmap2.start);
320 event->mmap2.len = bswap_64(event->mmap2.len);
321 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
322
323 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
324 event->mmap2.maj = bswap_32(event->mmap2.maj);
325 event->mmap2.min = bswap_32(event->mmap2.min);
326 event->mmap2.ino = bswap_64(event->mmap2.ino);
327 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
328 }
329
330 if (sample_id_all) {
331 void *data = &event->mmap2.filename;
332
333 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
334 swap_sample_id_all(event, data);
335 }
336 }
perf_event__task_swap(union perf_event * event,bool sample_id_all)337 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
338 {
339 event->fork.pid = bswap_32(event->fork.pid);
340 event->fork.tid = bswap_32(event->fork.tid);
341 event->fork.ppid = bswap_32(event->fork.ppid);
342 event->fork.ptid = bswap_32(event->fork.ptid);
343 event->fork.time = bswap_64(event->fork.time);
344
345 if (sample_id_all)
346 swap_sample_id_all(event, &event->fork + 1);
347 }
348
perf_event__read_swap(union perf_event * event,bool sample_id_all)349 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
350 {
351 event->read.pid = bswap_32(event->read.pid);
352 event->read.tid = bswap_32(event->read.tid);
353 event->read.value = bswap_64(event->read.value);
354 event->read.time_enabled = bswap_64(event->read.time_enabled);
355 event->read.time_running = bswap_64(event->read.time_running);
356 event->read.id = bswap_64(event->read.id);
357
358 if (sample_id_all)
359 swap_sample_id_all(event, &event->read + 1);
360 }
361
perf_event__aux_swap(union perf_event * event,bool sample_id_all)362 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
363 {
364 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
365 event->aux.aux_size = bswap_64(event->aux.aux_size);
366 event->aux.flags = bswap_64(event->aux.flags);
367
368 if (sample_id_all)
369 swap_sample_id_all(event, &event->aux + 1);
370 }
371
perf_event__itrace_start_swap(union perf_event * event,bool sample_id_all)372 static void perf_event__itrace_start_swap(union perf_event *event,
373 bool sample_id_all)
374 {
375 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
376 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
377
378 if (sample_id_all)
379 swap_sample_id_all(event, &event->itrace_start + 1);
380 }
381
perf_event__switch_swap(union perf_event * event,bool sample_id_all)382 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
383 {
384 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
385 event->context_switch.next_prev_pid =
386 bswap_32(event->context_switch.next_prev_pid);
387 event->context_switch.next_prev_tid =
388 bswap_32(event->context_switch.next_prev_tid);
389 }
390
391 if (sample_id_all)
392 swap_sample_id_all(event, &event->context_switch + 1);
393 }
394
perf_event__text_poke_swap(union perf_event * event,bool sample_id_all)395 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
396 {
397 event->text_poke.addr = bswap_64(event->text_poke.addr);
398 event->text_poke.old_len = bswap_16(event->text_poke.old_len);
399 event->text_poke.new_len = bswap_16(event->text_poke.new_len);
400
401 if (sample_id_all) {
402 size_t len = sizeof(event->text_poke.old_len) +
403 sizeof(event->text_poke.new_len) +
404 event->text_poke.old_len +
405 event->text_poke.new_len;
406 void *data = &event->text_poke.old_len;
407
408 data += PERF_ALIGN(len, sizeof(u64));
409 swap_sample_id_all(event, data);
410 }
411 }
412
perf_event__throttle_swap(union perf_event * event,bool sample_id_all)413 static void perf_event__throttle_swap(union perf_event *event,
414 bool sample_id_all)
415 {
416 event->throttle.time = bswap_64(event->throttle.time);
417 event->throttle.id = bswap_64(event->throttle.id);
418 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
419
420 if (sample_id_all)
421 swap_sample_id_all(event, &event->throttle + 1);
422 }
423
perf_event__namespaces_swap(union perf_event * event,bool sample_id_all)424 static void perf_event__namespaces_swap(union perf_event *event,
425 bool sample_id_all)
426 {
427 u64 i;
428
429 event->namespaces.pid = bswap_32(event->namespaces.pid);
430 event->namespaces.tid = bswap_32(event->namespaces.tid);
431 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
432
433 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
434 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
435
436 ns->dev = bswap_64(ns->dev);
437 ns->ino = bswap_64(ns->ino);
438 }
439
440 if (sample_id_all)
441 swap_sample_id_all(event, &event->namespaces.link_info[i]);
442 }
443
perf_event__cgroup_swap(union perf_event * event,bool sample_id_all)444 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
445 {
446 event->cgroup.id = bswap_64(event->cgroup.id);
447
448 if (sample_id_all) {
449 void *data = &event->cgroup.path;
450
451 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
452 swap_sample_id_all(event, data);
453 }
454 }
455
revbyte(u8 b)456 static u8 revbyte(u8 b)
457 {
458 int rev = (b >> 4) | ((b & 0xf) << 4);
459 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
460 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
461 return (u8) rev;
462 }
463
464 /*
465 * XXX this is hack in attempt to carry flags bitfield
466 * through endian village. ABI says:
467 *
468 * Bit-fields are allocated from right to left (least to most significant)
469 * on little-endian implementations and from left to right (most to least
470 * significant) on big-endian implementations.
471 *
472 * The above seems to be byte specific, so we need to reverse each
473 * byte of the bitfield. 'Internet' also says this might be implementation
474 * specific and we probably need proper fix and carry perf_event_attr
475 * bitfield flags in separate data file FEAT_ section. Thought this seems
476 * to work for now.
477 */
swap_bitfield(u8 * p,unsigned len)478 static void swap_bitfield(u8 *p, unsigned len)
479 {
480 unsigned i;
481
482 for (i = 0; i < len; i++) {
483 *p = revbyte(*p);
484 p++;
485 }
486 }
487
488 /* exported for swapping attributes in file header */
perf_event__attr_swap(struct perf_event_attr * attr)489 void perf_event__attr_swap(struct perf_event_attr *attr)
490 {
491 attr->type = bswap_32(attr->type);
492 attr->size = bswap_32(attr->size);
493
494 #define bswap_safe(f, n) \
495 (attr->size > (offsetof(struct perf_event_attr, f) + \
496 sizeof(attr->f) * (n)))
497 #define bswap_field(f, sz) \
498 do { \
499 if (bswap_safe(f, 0)) \
500 attr->f = bswap_##sz(attr->f); \
501 } while(0)
502 #define bswap_field_16(f) bswap_field(f, 16)
503 #define bswap_field_32(f) bswap_field(f, 32)
504 #define bswap_field_64(f) bswap_field(f, 64)
505
506 bswap_field_64(config);
507 bswap_field_64(sample_period);
508 bswap_field_64(sample_type);
509 bswap_field_64(read_format);
510 bswap_field_32(wakeup_events);
511 bswap_field_32(bp_type);
512 bswap_field_64(bp_addr);
513 bswap_field_64(bp_len);
514 bswap_field_64(branch_sample_type);
515 bswap_field_64(sample_regs_user);
516 bswap_field_32(sample_stack_user);
517 bswap_field_32(aux_watermark);
518 bswap_field_16(sample_max_stack);
519 bswap_field_32(aux_sample_size);
520
521 /*
522 * After read_format are bitfields. Check read_format because
523 * we are unable to use offsetof on bitfield.
524 */
525 if (bswap_safe(read_format, 1))
526 swap_bitfield((u8 *) (&attr->read_format + 1),
527 sizeof(u64));
528 #undef bswap_field_64
529 #undef bswap_field_32
530 #undef bswap_field
531 #undef bswap_safe
532 }
533
perf_event__hdr_attr_swap(union perf_event * event,bool sample_id_all __maybe_unused)534 static void perf_event__hdr_attr_swap(union perf_event *event,
535 bool sample_id_all __maybe_unused)
536 {
537 size_t size;
538
539 perf_event__attr_swap(&event->attr.attr);
540
541 size = event->header.size;
542 size -= perf_record_header_attr_id(event) - (void *)event;
543 mem_bswap_64(perf_record_header_attr_id(event), size);
544 }
545
perf_event__event_update_swap(union perf_event * event,bool sample_id_all __maybe_unused)546 static void perf_event__event_update_swap(union perf_event *event,
547 bool sample_id_all __maybe_unused)
548 {
549 event->event_update.type = bswap_64(event->event_update.type);
550 event->event_update.id = bswap_64(event->event_update.id);
551 }
552
perf_event__event_type_swap(union perf_event * event,bool sample_id_all __maybe_unused)553 static void perf_event__event_type_swap(union perf_event *event,
554 bool sample_id_all __maybe_unused)
555 {
556 event->event_type.event_type.event_id =
557 bswap_64(event->event_type.event_type.event_id);
558 }
559
perf_event__tracing_data_swap(union perf_event * event,bool sample_id_all __maybe_unused)560 static void perf_event__tracing_data_swap(union perf_event *event,
561 bool sample_id_all __maybe_unused)
562 {
563 event->tracing_data.size = bswap_32(event->tracing_data.size);
564 }
565
perf_event__auxtrace_info_swap(union perf_event * event,bool sample_id_all __maybe_unused)566 static void perf_event__auxtrace_info_swap(union perf_event *event,
567 bool sample_id_all __maybe_unused)
568 {
569 size_t size;
570
571 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
572
573 size = event->header.size;
574 size -= (void *)&event->auxtrace_info.priv - (void *)event;
575 mem_bswap_64(event->auxtrace_info.priv, size);
576 }
577
perf_event__auxtrace_swap(union perf_event * event,bool sample_id_all __maybe_unused)578 static void perf_event__auxtrace_swap(union perf_event *event,
579 bool sample_id_all __maybe_unused)
580 {
581 event->auxtrace.size = bswap_64(event->auxtrace.size);
582 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
583 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
584 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
585 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
586 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
587 }
588
perf_event__auxtrace_error_swap(union perf_event * event,bool sample_id_all __maybe_unused)589 static void perf_event__auxtrace_error_swap(union perf_event *event,
590 bool sample_id_all __maybe_unused)
591 {
592 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
593 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
594 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
595 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
596 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
597 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
598 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
599 if (event->auxtrace_error.fmt)
600 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
601 if (event->auxtrace_error.fmt >= 2) {
602 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
603 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
604 }
605 }
606
perf_event__thread_map_swap(union perf_event * event,bool sample_id_all __maybe_unused)607 static void perf_event__thread_map_swap(union perf_event *event,
608 bool sample_id_all __maybe_unused)
609 {
610 unsigned i;
611
612 event->thread_map.nr = bswap_64(event->thread_map.nr);
613
614 for (i = 0; i < event->thread_map.nr; i++)
615 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
616 }
617
perf_event__cpu_map_swap(union perf_event * event,bool sample_id_all __maybe_unused)618 static void perf_event__cpu_map_swap(union perf_event *event,
619 bool sample_id_all __maybe_unused)
620 {
621 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
622
623 data->type = bswap_16(data->type);
624
625 switch (data->type) {
626 case PERF_CPU_MAP__CPUS:
627 data->cpus_data.nr = bswap_16(data->cpus_data.nr);
628
629 for (unsigned i = 0; i < data->cpus_data.nr; i++)
630 data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
631 break;
632 case PERF_CPU_MAP__MASK:
633 data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
634
635 switch (data->mask32_data.long_size) {
636 case 4:
637 data->mask32_data.nr = bswap_16(data->mask32_data.nr);
638 for (unsigned i = 0; i < data->mask32_data.nr; i++)
639 data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
640 break;
641 case 8:
642 data->mask64_data.nr = bswap_16(data->mask64_data.nr);
643 for (unsigned i = 0; i < data->mask64_data.nr; i++)
644 data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
645 break;
646 default:
647 pr_err("cpu_map swap: unsupported long size\n");
648 }
649 break;
650 case PERF_CPU_MAP__RANGE_CPUS:
651 data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu);
652 data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu);
653 break;
654 default:
655 break;
656 }
657 }
658
perf_event__stat_config_swap(union perf_event * event,bool sample_id_all __maybe_unused)659 static void perf_event__stat_config_swap(union perf_event *event,
660 bool sample_id_all __maybe_unused)
661 {
662 u64 size;
663
664 size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
665 size += 1; /* nr item itself */
666 mem_bswap_64(&event->stat_config.nr, size);
667 }
668
perf_event__stat_swap(union perf_event * event,bool sample_id_all __maybe_unused)669 static void perf_event__stat_swap(union perf_event *event,
670 bool sample_id_all __maybe_unused)
671 {
672 event->stat.id = bswap_64(event->stat.id);
673 event->stat.thread = bswap_32(event->stat.thread);
674 event->stat.cpu = bswap_32(event->stat.cpu);
675 event->stat.val = bswap_64(event->stat.val);
676 event->stat.ena = bswap_64(event->stat.ena);
677 event->stat.run = bswap_64(event->stat.run);
678 }
679
perf_event__stat_round_swap(union perf_event * event,bool sample_id_all __maybe_unused)680 static void perf_event__stat_round_swap(union perf_event *event,
681 bool sample_id_all __maybe_unused)
682 {
683 event->stat_round.type = bswap_64(event->stat_round.type);
684 event->stat_round.time = bswap_64(event->stat_round.time);
685 }
686
perf_event__time_conv_swap(union perf_event * event,bool sample_id_all __maybe_unused)687 static void perf_event__time_conv_swap(union perf_event *event,
688 bool sample_id_all __maybe_unused)
689 {
690 event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
691 event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
692 event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
693
694 if (event_contains(event->time_conv, time_cycles)) {
695 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
696 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
697 }
698 }
699
700 typedef void (*perf_event__swap_op)(union perf_event *event,
701 bool sample_id_all);
702
703 static perf_event__swap_op perf_event__swap_ops[] = {
704 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
705 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
706 [PERF_RECORD_COMM] = perf_event__comm_swap,
707 [PERF_RECORD_FORK] = perf_event__task_swap,
708 [PERF_RECORD_EXIT] = perf_event__task_swap,
709 [PERF_RECORD_LOST] = perf_event__all64_swap,
710 [PERF_RECORD_READ] = perf_event__read_swap,
711 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
712 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
713 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
714 [PERF_RECORD_AUX] = perf_event__aux_swap,
715 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
716 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
717 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
718 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
719 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
720 [PERF_RECORD_CGROUP] = perf_event__cgroup_swap,
721 [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap,
722 [PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap,
723 [PERF_RECORD_CALLCHAIN_DEFERRED] = perf_event__all64_swap,
724 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
725 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
726 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
727 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
728 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
729 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
730 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
731 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
732 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
733 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
734 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
735 [PERF_RECORD_STAT] = perf_event__stat_swap,
736 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
737 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
738 [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
739 [PERF_RECORD_HEADER_MAX] = NULL,
740 };
741
742 /*
743 * When perf record finishes a pass on every buffers, it records this pseudo
744 * event.
745 * We record the max timestamp t found in the pass n.
746 * Assuming these timestamps are monotonic across cpus, we know that if
747 * a buffer still has events with timestamps below t, they will be all
748 * available and then read in the pass n + 1.
749 * Hence when we start to read the pass n + 2, we can safely flush every
750 * events with timestamps below t.
751 *
752 * ============ PASS n =================
753 * CPU 0 | CPU 1
754 * |
755 * cnt1 timestamps | cnt2 timestamps
756 * 1 | 2
757 * 2 | 3
758 * - | 4 <--- max recorded
759 *
760 * ============ PASS n + 1 ==============
761 * CPU 0 | CPU 1
762 * |
763 * cnt1 timestamps | cnt2 timestamps
764 * 3 | 5
765 * 4 | 6
766 * 5 | 7 <---- max recorded
767 *
768 * Flush every events below timestamp 4
769 *
770 * ============ PASS n + 2 ==============
771 * CPU 0 | CPU 1
772 * |
773 * cnt1 timestamps | cnt2 timestamps
774 * 6 | 8
775 * 7 | 9
776 * - | 10
777 *
778 * Flush every events below timestamp 7
779 * etc...
780 */
perf_event__process_finished_round(const struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct ordered_events * oe)781 int perf_event__process_finished_round(const struct perf_tool *tool __maybe_unused,
782 union perf_event *event __maybe_unused,
783 struct ordered_events *oe)
784 {
785 if (dump_trace)
786 fprintf(stdout, "\n");
787 return ordered_events__flush(oe, OE_FLUSH__ROUND);
788 }
789
perf_session__queue_event(struct perf_session * s,union perf_event * event,u64 timestamp,u64 file_offset,const char * file_path)790 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
791 u64 timestamp, u64 file_offset, const char *file_path)
792 {
793 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path);
794 }
795
callchain__lbr_callstack_printf(struct perf_sample * sample)796 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
797 {
798 struct ip_callchain *callchain = sample->callchain;
799 struct branch_stack *lbr_stack = sample->branch_stack;
800 struct branch_entry *entries = perf_sample__branch_entries(sample);
801 u64 kernel_callchain_nr = callchain->nr;
802 unsigned int i;
803
804 for (i = 0; i < kernel_callchain_nr; i++) {
805 if (callchain->ips[i] == PERF_CONTEXT_USER)
806 break;
807 }
808
809 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
810 u64 total_nr;
811 /*
812 * LBR callstack can only get user call chain,
813 * i is kernel call chain number,
814 * 1 is PERF_CONTEXT_USER.
815 *
816 * The user call chain is stored in LBR registers.
817 * LBR are pair registers. The caller is stored
818 * in "from" register, while the callee is stored
819 * in "to" register.
820 * For example, there is a call stack
821 * "A"->"B"->"C"->"D".
822 * The LBR registers will be recorded like
823 * "C"->"D", "B"->"C", "A"->"B".
824 * So only the first "to" register and all "from"
825 * registers are needed to construct the whole stack.
826 */
827 total_nr = i + 1 + lbr_stack->nr + 1;
828 kernel_callchain_nr = i + 1;
829
830 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
831
832 for (i = 0; i < kernel_callchain_nr; i++)
833 printf("..... %2d: %016" PRIx64 "\n",
834 i, callchain->ips[i]);
835
836 printf("..... %2d: %016" PRIx64 "\n",
837 (int)(kernel_callchain_nr), entries[0].to);
838 for (i = 0; i < lbr_stack->nr; i++)
839 printf("..... %2d: %016" PRIx64 "\n",
840 (int)(i + kernel_callchain_nr + 1), entries[i].from);
841 }
842 }
843
callchain__printf(struct evsel * evsel,struct perf_sample * sample)844 static void callchain__printf(struct evsel *evsel,
845 struct perf_sample *sample)
846 {
847 unsigned int i;
848 struct ip_callchain *callchain = sample->callchain;
849
850 if (evsel__has_branch_callstack(evsel))
851 callchain__lbr_callstack_printf(sample);
852
853 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
854
855 for (i = 0; i < callchain->nr; i++)
856 printf("..... %2d: %016" PRIx64 "\n",
857 i, callchain->ips[i]);
858
859 if (sample->deferred_callchain)
860 printf("...... (deferred)\n");
861 }
862
branch_stack__printf(struct perf_sample * sample,struct evsel * evsel)863 static void branch_stack__printf(struct perf_sample *sample,
864 struct evsel *evsel)
865 {
866 struct branch_entry *entries = perf_sample__branch_entries(sample);
867 bool callstack = evsel__has_branch_callstack(evsel);
868 u64 *branch_stack_cntr = sample->branch_stack_cntr;
869 uint64_t i;
870
871 if (!callstack) {
872 printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr);
873 } else {
874 /* the reason of adding 1 to nr is because after expanding
875 * branch stack it generates nr + 1 callstack records. e.g.,
876 * B()->C()
877 * A()->B()
878 * the final callstack should be:
879 * C()
880 * B()
881 * A()
882 */
883 printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1);
884 }
885
886 for (i = 0; i < sample->branch_stack->nr; i++) {
887 struct branch_entry *e = &entries[i];
888
889 if (!callstack) {
890 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s %s\n",
891 i, e->from, e->to,
892 (unsigned short)e->flags.cycles,
893 e->flags.mispred ? "M" : " ",
894 e->flags.predicted ? "P" : " ",
895 e->flags.abort ? "A" : " ",
896 e->flags.in_tx ? "T" : " ",
897 (unsigned)e->flags.reserved,
898 get_branch_type(e),
899 e->flags.spec ? branch_spec_desc(e->flags.spec) : "");
900 } else {
901 if (i == 0) {
902 printf("..... %2"PRIu64": %016" PRIx64 "\n"
903 "..... %2"PRIu64": %016" PRIx64 "\n",
904 i, e->to, i+1, e->from);
905 } else {
906 printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from);
907 }
908 }
909 }
910
911 if (branch_stack_cntr) {
912 unsigned int br_cntr_width, br_cntr_nr;
913
914 perf_env__find_br_cntr_info(evsel__env(evsel), &br_cntr_nr, &br_cntr_width);
915 printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n",
916 sample->branch_stack->nr, br_cntr_width, br_cntr_nr);
917 for (i = 0; i < sample->branch_stack->nr; i++)
918 printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]);
919 }
920 }
921
regs_dump__printf(u64 mask,u64 * regs,const char * arch)922 static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
923 {
924 unsigned rid, i = 0;
925
926 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
927 u64 val = regs[i++];
928
929 printf(".... %-5s 0x%016" PRIx64 "\n",
930 perf_reg_name(rid, arch), val);
931 }
932 }
933
934 static const char *regs_abi[] = {
935 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
936 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
937 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
938 };
939
regs_dump_abi(struct regs_dump * d)940 static inline const char *regs_dump_abi(struct regs_dump *d)
941 {
942 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
943 return "unknown";
944
945 return regs_abi[d->abi];
946 }
947
regs__printf(const char * type,struct regs_dump * regs,const char * arch)948 static void regs__printf(const char *type, struct regs_dump *regs, const char *arch)
949 {
950 u64 mask = regs->mask;
951
952 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
953 type,
954 mask,
955 regs_dump_abi(regs));
956
957 regs_dump__printf(mask, regs->regs, arch);
958 }
959
regs_user__printf(struct perf_sample * sample,const char * arch)960 static void regs_user__printf(struct perf_sample *sample, const char *arch)
961 {
962 struct regs_dump *user_regs;
963
964 if (!sample->user_regs)
965 return;
966
967 user_regs = perf_sample__user_regs(sample);
968
969 if (user_regs->regs)
970 regs__printf("user", user_regs, arch);
971 }
972
regs_intr__printf(struct perf_sample * sample,const char * arch)973 static void regs_intr__printf(struct perf_sample *sample, const char *arch)
974 {
975 struct regs_dump *intr_regs;
976
977 if (!sample->intr_regs)
978 return;
979
980 intr_regs = perf_sample__intr_regs(sample);
981
982 if (intr_regs->regs)
983 regs__printf("intr", intr_regs, arch);
984 }
985
stack_user__printf(struct stack_dump * dump)986 static void stack_user__printf(struct stack_dump *dump)
987 {
988 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
989 dump->size, dump->offset);
990 }
991
evlist__print_tstamp(struct evlist * evlist,union perf_event * event,struct perf_sample * sample)992 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
993 {
994 u64 sample_type = __evlist__combined_sample_type(evlist);
995
996 if (event->header.type != PERF_RECORD_SAMPLE &&
997 !evlist__sample_id_all(evlist)) {
998 fputs("-1 -1 ", stdout);
999 return;
1000 }
1001
1002 if ((sample_type & PERF_SAMPLE_CPU))
1003 printf("%u ", sample->cpu);
1004
1005 if (sample_type & PERF_SAMPLE_TIME)
1006 printf("%" PRIu64 " ", sample->time);
1007 }
1008
sample_read__printf(struct perf_sample * sample,u64 read_format)1009 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1010 {
1011 printf("... sample_read:\n");
1012
1013 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1014 printf("...... time enabled %016" PRIx64 "\n",
1015 sample->read.time_enabled);
1016
1017 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1018 printf("...... time running %016" PRIx64 "\n",
1019 sample->read.time_running);
1020
1021 if (read_format & PERF_FORMAT_GROUP) {
1022 struct sample_read_value *value = sample->read.group.values;
1023
1024 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1025
1026 sample_read_group__for_each(value, sample->read.group.nr, read_format) {
1027 printf("..... id %016" PRIx64
1028 ", value %016" PRIx64,
1029 value->id, value->value);
1030 if (read_format & PERF_FORMAT_LOST)
1031 printf(", lost %" PRIu64, value->lost);
1032 printf("\n");
1033 }
1034 } else {
1035 printf("..... id %016" PRIx64 ", value %016" PRIx64,
1036 sample->read.one.id, sample->read.one.value);
1037 if (read_format & PERF_FORMAT_LOST)
1038 printf(", lost %" PRIu64, sample->read.one.lost);
1039 printf("\n");
1040 }
1041 }
1042
dump_event(struct evlist * evlist,union perf_event * event,u64 file_offset,struct perf_sample * sample,const char * file_path)1043 static void dump_event(struct evlist *evlist, union perf_event *event,
1044 u64 file_offset, struct perf_sample *sample,
1045 const char *file_path)
1046 {
1047 if (!dump_trace)
1048 return;
1049
1050 printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n",
1051 file_offset, file_path, event->header.size, event->header.type);
1052
1053 trace_event(event);
1054 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1055 evlist->trace_event_sample_raw(evlist, event, sample);
1056
1057 if (sample)
1058 evlist__print_tstamp(evlist, event, sample);
1059
1060 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1061 event->header.size, perf_event__name(event->header.type));
1062 }
1063
get_page_size_name(u64 size,char * str)1064 char *get_page_size_name(u64 size, char *str)
1065 {
1066 if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1067 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1068
1069 return str;
1070 }
1071
dump_sample(struct evsel * evsel,union perf_event * event,struct perf_sample * sample,const char * arch)1072 static void dump_sample(struct evsel *evsel, union perf_event *event,
1073 struct perf_sample *sample, const char *arch)
1074 {
1075 u64 sample_type;
1076 char str[PAGE_SIZE_NAME_LEN];
1077
1078 if (!dump_trace)
1079 return;
1080
1081 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1082 event->header.misc, sample->pid, sample->tid, sample->ip,
1083 sample->period, sample->addr);
1084
1085 sample_type = evsel->core.attr.sample_type;
1086
1087 if (evsel__has_callchain(evsel))
1088 callchain__printf(evsel, sample);
1089
1090 if (evsel__has_br_stack(evsel))
1091 branch_stack__printf(sample, evsel);
1092
1093 if (sample_type & PERF_SAMPLE_REGS_USER)
1094 regs_user__printf(sample, arch);
1095
1096 if (sample_type & PERF_SAMPLE_REGS_INTR)
1097 regs_intr__printf(sample, arch);
1098
1099 if (sample_type & PERF_SAMPLE_STACK_USER)
1100 stack_user__printf(&sample->user_stack);
1101
1102 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1103 printf("... weight: %" PRIu64 "", sample->weight);
1104 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1105 printf(",0x%"PRIx16"", sample->ins_lat);
1106 printf(",0x%"PRIx16"", sample->weight3);
1107 }
1108 printf("\n");
1109 }
1110
1111 if (sample_type & PERF_SAMPLE_DATA_SRC)
1112 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1113
1114 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1115 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1116
1117 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1118 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1119
1120 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1121 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1122
1123 if (sample_type & PERF_SAMPLE_TRANSACTION)
1124 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1125
1126 if (sample_type & PERF_SAMPLE_READ)
1127 sample_read__printf(sample, evsel->core.attr.read_format);
1128 }
1129
dump_deferred_callchain(struct evsel * evsel,union perf_event * event,struct perf_sample * sample)1130 static void dump_deferred_callchain(struct evsel *evsel, union perf_event *event,
1131 struct perf_sample *sample)
1132 {
1133 if (!dump_trace)
1134 return;
1135
1136 printf("(IP, 0x%x): %d/%d: %#" PRIx64 "\n",
1137 event->header.misc, sample->pid, sample->tid, sample->deferred_cookie);
1138
1139 if (evsel__has_callchain(evsel))
1140 callchain__printf(evsel, sample);
1141 }
1142
dump_read(struct evsel * evsel,union perf_event * event)1143 static void dump_read(struct evsel *evsel, union perf_event *event)
1144 {
1145 struct perf_record_read *read_event = &event->read;
1146 u64 read_format;
1147
1148 if (!dump_trace)
1149 return;
1150
1151 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1152 evsel__name(evsel), event->read.value);
1153
1154 if (!evsel)
1155 return;
1156
1157 read_format = evsel->core.attr.read_format;
1158
1159 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1160 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1161
1162 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1163 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1164
1165 if (read_format & PERF_FORMAT_ID)
1166 printf("... id : %" PRI_lu64 "\n", read_event->id);
1167
1168 if (read_format & PERF_FORMAT_LOST)
1169 printf("... lost : %" PRI_lu64 "\n", read_event->lost);
1170 }
1171
machines__find_for_cpumode(struct machines * machines,union perf_event * event,struct perf_sample * sample)1172 static struct machine *machines__find_for_cpumode(struct machines *machines,
1173 union perf_event *event,
1174 struct perf_sample *sample)
1175 {
1176 if (perf_guest &&
1177 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1178 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1179 u32 pid;
1180
1181 if (sample->machine_pid)
1182 pid = sample->machine_pid;
1183 else if (event->header.type == PERF_RECORD_MMAP
1184 || event->header.type == PERF_RECORD_MMAP2)
1185 pid = event->mmap.pid;
1186 else
1187 pid = sample->pid;
1188
1189 /*
1190 * Guest code machine is created as needed and does not use
1191 * DEFAULT_GUEST_KERNEL_ID.
1192 */
1193 if (symbol_conf.guest_code)
1194 return machines__findnew(machines, pid);
1195
1196 return machines__find_guest(machines, pid);
1197 }
1198
1199 return &machines->host;
1200 }
1201
deliver_sample_value(struct evlist * evlist,const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct sample_read_value * v,struct machine * machine,bool per_thread)1202 static int deliver_sample_value(struct evlist *evlist,
1203 const struct perf_tool *tool,
1204 union perf_event *event,
1205 struct perf_sample *sample,
1206 struct sample_read_value *v,
1207 struct machine *machine,
1208 bool per_thread)
1209 {
1210 struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1211 struct evsel *evsel;
1212 u64 *storage = NULL;
1213
1214 if (sid) {
1215 storage = perf_sample_id__get_period_storage(sid, sample->tid, per_thread);
1216 }
1217
1218 if (storage) {
1219 sample->id = v->id;
1220 sample->period = v->value - *storage;
1221 *storage = v->value;
1222 }
1223
1224 if (!storage || sid->evsel == NULL) {
1225 ++evlist->stats.nr_unknown_id;
1226 return 0;
1227 }
1228
1229 /*
1230 * There's no reason to deliver sample
1231 * for zero period, bail out.
1232 */
1233 if (!sample->period)
1234 return 0;
1235
1236 evsel = container_of(sid->evsel, struct evsel, core);
1237 return tool->sample(tool, event, sample, evsel, machine);
1238 }
1239
deliver_sample_group(struct evlist * evlist,const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine,u64 read_format,bool per_thread)1240 static int deliver_sample_group(struct evlist *evlist,
1241 const struct perf_tool *tool,
1242 union perf_event *event,
1243 struct perf_sample *sample,
1244 struct machine *machine,
1245 u64 read_format,
1246 bool per_thread)
1247 {
1248 int ret = -EINVAL;
1249 struct sample_read_value *v = sample->read.group.values;
1250
1251 if (tool->dont_split_sample_group)
1252 return deliver_sample_value(evlist, tool, event, sample, v, machine,
1253 per_thread);
1254
1255 sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1256 ret = deliver_sample_value(evlist, tool, event, sample, v,
1257 machine, per_thread);
1258 if (ret)
1259 break;
1260 }
1261
1262 return ret;
1263 }
1264
evlist__deliver_sample(struct evlist * evlist,const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1265 static int evlist__deliver_sample(struct evlist *evlist, const struct perf_tool *tool,
1266 union perf_event *event, struct perf_sample *sample,
1267 struct evsel *evsel, struct machine *machine)
1268 {
1269 /* We know evsel != NULL. */
1270 u64 sample_type = evsel->core.attr.sample_type;
1271 u64 read_format = evsel->core.attr.read_format;
1272 bool per_thread = perf_evsel__attr_has_per_thread_sample_period(&evsel->core);
1273
1274 /* Standard sample delivery. */
1275 if (!(sample_type & PERF_SAMPLE_READ))
1276 return tool->sample(tool, event, sample, evsel, machine);
1277
1278 /* For PERF_SAMPLE_READ we have either single or group mode. */
1279 if (read_format & PERF_FORMAT_GROUP)
1280 return deliver_sample_group(evlist, tool, event, sample,
1281 machine, read_format, per_thread);
1282 else
1283 return deliver_sample_value(evlist, tool, event, sample,
1284 &sample->read.one, machine,
1285 per_thread);
1286 }
1287
1288 /*
1289 * Samples with deferred callchains should wait for the next matching
1290 * PERF_RECORD_CALLCHAIN_RECORD entries. Keep the events in a list and
1291 * deliver them once it finds the callchains.
1292 */
1293 struct deferred_event {
1294 struct list_head list;
1295 union perf_event *event;
1296 };
1297
1298 /*
1299 * This is called when a deferred callchain record comes up. Find all matching
1300 * samples, merge the callchains and process them.
1301 */
evlist__deliver_deferred_callchain(struct evlist * evlist,const struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1302 static int evlist__deliver_deferred_callchain(struct evlist *evlist,
1303 const struct perf_tool *tool,
1304 union perf_event *event,
1305 struct perf_sample *sample,
1306 struct machine *machine)
1307 {
1308 struct deferred_event *de, *tmp;
1309 struct evsel *evsel;
1310 int ret = 0;
1311
1312 if (!tool->merge_deferred_callchains) {
1313 evsel = evlist__id2evsel(evlist, sample->id);
1314 return tool->callchain_deferred(tool, event, sample,
1315 evsel, machine);
1316 }
1317
1318 list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) {
1319 struct perf_sample orig_sample;
1320
1321 ret = evlist__parse_sample(evlist, de->event, &orig_sample);
1322 if (ret < 0) {
1323 pr_err("failed to parse original sample\n");
1324 break;
1325 }
1326
1327 if (sample->tid != orig_sample.tid)
1328 continue;
1329
1330 if (event->callchain_deferred.cookie == orig_sample.deferred_cookie)
1331 sample__merge_deferred_callchain(&orig_sample, sample);
1332 else
1333 orig_sample.deferred_callchain = false;
1334
1335 evsel = evlist__id2evsel(evlist, orig_sample.id);
1336 ret = evlist__deliver_sample(evlist, tool, de->event,
1337 &orig_sample, evsel, machine);
1338
1339 if (orig_sample.deferred_callchain)
1340 free(orig_sample.callchain);
1341
1342 list_del(&de->list);
1343 free(de->event);
1344 free(de);
1345
1346 if (ret)
1347 break;
1348 }
1349 return ret;
1350 }
1351
1352 /*
1353 * This is called at the end of the data processing for the session. Flush the
1354 * remaining samples as there's no hope for matching deferred callchains.
1355 */
session__flush_deferred_samples(struct perf_session * session,const struct perf_tool * tool)1356 static int session__flush_deferred_samples(struct perf_session *session,
1357 const struct perf_tool *tool)
1358 {
1359 struct evlist *evlist = session->evlist;
1360 struct machine *machine = &session->machines.host;
1361 struct deferred_event *de, *tmp;
1362 struct evsel *evsel;
1363 int ret = 0;
1364
1365 list_for_each_entry_safe(de, tmp, &evlist->deferred_samples, list) {
1366 struct perf_sample sample;
1367
1368 ret = evlist__parse_sample(evlist, de->event, &sample);
1369 if (ret < 0) {
1370 pr_err("failed to parse original sample\n");
1371 break;
1372 }
1373
1374 evsel = evlist__id2evsel(evlist, sample.id);
1375 ret = evlist__deliver_sample(evlist, tool, de->event,
1376 &sample, evsel, machine);
1377
1378 list_del(&de->list);
1379 free(de->event);
1380 free(de);
1381
1382 if (ret)
1383 break;
1384 }
1385 return ret;
1386 }
1387
machines__deliver_event(struct machines * machines,struct evlist * evlist,union perf_event * event,struct perf_sample * sample,const struct perf_tool * tool,u64 file_offset,const char * file_path)1388 static int machines__deliver_event(struct machines *machines,
1389 struct evlist *evlist,
1390 union perf_event *event,
1391 struct perf_sample *sample,
1392 const struct perf_tool *tool, u64 file_offset,
1393 const char *file_path)
1394 {
1395 struct evsel *evsel;
1396 struct machine *machine;
1397
1398 dump_event(evlist, event, file_offset, sample, file_path);
1399
1400 evsel = evlist__id2evsel(evlist, sample->id);
1401
1402 machine = machines__find_for_cpumode(machines, event, sample);
1403
1404 switch (event->header.type) {
1405 case PERF_RECORD_SAMPLE:
1406 if (evsel == NULL) {
1407 ++evlist->stats.nr_unknown_id;
1408 return 0;
1409 }
1410 if (machine == NULL) {
1411 ++evlist->stats.nr_unprocessable_samples;
1412 dump_sample(evsel, event, sample, perf_env__arch(NULL));
1413 return 0;
1414 }
1415 dump_sample(evsel, event, sample, perf_env__arch(machine->env));
1416 if (sample->deferred_callchain && tool->merge_deferred_callchains) {
1417 struct deferred_event *de = malloc(sizeof(*de));
1418 size_t sz = event->header.size;
1419
1420 if (de == NULL)
1421 return -ENOMEM;
1422
1423 de->event = malloc(sz);
1424 if (de->event == NULL) {
1425 free(de);
1426 return -ENOMEM;
1427 }
1428 memcpy(de->event, event, sz);
1429 list_add_tail(&de->list, &evlist->deferred_samples);
1430 return 0;
1431 }
1432 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1433 case PERF_RECORD_MMAP:
1434 return tool->mmap(tool, event, sample, machine);
1435 case PERF_RECORD_MMAP2:
1436 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1437 ++evlist->stats.nr_proc_map_timeout;
1438 return tool->mmap2(tool, event, sample, machine);
1439 case PERF_RECORD_COMM:
1440 return tool->comm(tool, event, sample, machine);
1441 case PERF_RECORD_NAMESPACES:
1442 return tool->namespaces(tool, event, sample, machine);
1443 case PERF_RECORD_CGROUP:
1444 return tool->cgroup(tool, event, sample, machine);
1445 case PERF_RECORD_FORK:
1446 return tool->fork(tool, event, sample, machine);
1447 case PERF_RECORD_EXIT:
1448 return tool->exit(tool, event, sample, machine);
1449 case PERF_RECORD_LOST:
1450 if (tool->lost == perf_event__process_lost)
1451 evlist->stats.total_lost += event->lost.lost;
1452 return tool->lost(tool, event, sample, machine);
1453 case PERF_RECORD_LOST_SAMPLES:
1454 if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF)
1455 evlist->stats.total_dropped_samples += event->lost_samples.lost;
1456 else if (tool->lost_samples == perf_event__process_lost_samples)
1457 evlist->stats.total_lost_samples += event->lost_samples.lost;
1458 return tool->lost_samples(tool, event, sample, machine);
1459 case PERF_RECORD_READ:
1460 dump_read(evsel, event);
1461 return tool->read(tool, event, sample, evsel, machine);
1462 case PERF_RECORD_THROTTLE:
1463 return tool->throttle(tool, event, sample, machine);
1464 case PERF_RECORD_UNTHROTTLE:
1465 return tool->unthrottle(tool, event, sample, machine);
1466 case PERF_RECORD_AUX:
1467 if (tool->aux == perf_event__process_aux) {
1468 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1469 evlist->stats.total_aux_lost += 1;
1470 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1471 evlist->stats.total_aux_partial += 1;
1472 if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
1473 evlist->stats.total_aux_collision += 1;
1474 }
1475 return tool->aux(tool, event, sample, machine);
1476 case PERF_RECORD_ITRACE_START:
1477 return tool->itrace_start(tool, event, sample, machine);
1478 case PERF_RECORD_SWITCH:
1479 case PERF_RECORD_SWITCH_CPU_WIDE:
1480 return tool->context_switch(tool, event, sample, machine);
1481 case PERF_RECORD_KSYMBOL:
1482 return tool->ksymbol(tool, event, sample, machine);
1483 case PERF_RECORD_BPF_EVENT:
1484 return tool->bpf(tool, event, sample, machine);
1485 case PERF_RECORD_TEXT_POKE:
1486 return tool->text_poke(tool, event, sample, machine);
1487 case PERF_RECORD_AUX_OUTPUT_HW_ID:
1488 return tool->aux_output_hw_id(tool, event, sample, machine);
1489 case PERF_RECORD_CALLCHAIN_DEFERRED:
1490 dump_deferred_callchain(evsel, event, sample);
1491 return evlist__deliver_deferred_callchain(evlist, tool, event,
1492 sample, machine);
1493 default:
1494 ++evlist->stats.nr_unknown_events;
1495 return -1;
1496 }
1497 }
1498
perf_session__deliver_event(struct perf_session * session,union perf_event * event,const struct perf_tool * tool,u64 file_offset,const char * file_path)1499 static int perf_session__deliver_event(struct perf_session *session,
1500 union perf_event *event,
1501 const struct perf_tool *tool,
1502 u64 file_offset,
1503 const char *file_path)
1504 {
1505 struct perf_sample sample;
1506 int ret;
1507
1508 perf_sample__init(&sample, /*all=*/false);
1509 ret = evlist__parse_sample(session->evlist, event, &sample);
1510 if (ret) {
1511 pr_err("Can't parse sample, err = %d\n", ret);
1512 goto out;
1513 }
1514
1515 ret = auxtrace__process_event(session, event, &sample, tool);
1516 if (ret < 0)
1517 goto out;
1518 if (ret > 0) {
1519 ret = 0;
1520 goto out;
1521 }
1522
1523 ret = machines__deliver_event(&session->machines, session->evlist,
1524 event, &sample, tool, file_offset, file_path);
1525
1526 if (dump_trace && sample.aux_sample.size)
1527 auxtrace__dump_auxtrace_sample(session, &sample);
1528 out:
1529 perf_sample__exit(&sample);
1530 return ret;
1531 }
1532
perf_session__process_user_event(struct perf_session * session,union perf_event * event,u64 file_offset,const char * file_path)1533 static s64 perf_session__process_user_event(struct perf_session *session,
1534 union perf_event *event,
1535 u64 file_offset,
1536 const char *file_path)
1537 {
1538 struct ordered_events *oe = &session->ordered_events;
1539 const struct perf_tool *tool = session->tool;
1540 struct perf_sample sample;
1541 int fd = perf_data__fd(session->data);
1542 s64 err;
1543
1544 perf_sample__init(&sample, /*all=*/true);
1545 if ((event->header.type != PERF_RECORD_COMPRESSED &&
1546 event->header.type != PERF_RECORD_COMPRESSED2) ||
1547 perf_tool__compressed_is_stub(tool))
1548 dump_event(session->evlist, event, file_offset, &sample, file_path);
1549
1550 /* These events are processed right away */
1551 switch (event->header.type) {
1552 case PERF_RECORD_HEADER_ATTR:
1553 err = tool->attr(tool, event, &session->evlist);
1554 if (err == 0) {
1555 perf_session__set_id_hdr_size(session);
1556 perf_session__set_comm_exec(session);
1557 }
1558 break;
1559 case PERF_RECORD_EVENT_UPDATE:
1560 err = tool->event_update(tool, event, &session->evlist);
1561 break;
1562 case PERF_RECORD_HEADER_EVENT_TYPE:
1563 /*
1564 * Deprecated, but we need to handle it for sake
1565 * of old data files create in pipe mode.
1566 */
1567 err = 0;
1568 break;
1569 case PERF_RECORD_HEADER_TRACING_DATA:
1570 /*
1571 * Setup for reading amidst mmap, but only when we
1572 * are in 'file' mode. The 'pipe' fd is in proper
1573 * place already.
1574 */
1575 if (!perf_data__is_pipe(session->data))
1576 lseek(fd, file_offset, SEEK_SET);
1577 err = tool->tracing_data(tool, session, event);
1578 break;
1579 case PERF_RECORD_HEADER_BUILD_ID:
1580 err = tool->build_id(tool, session, event);
1581 break;
1582 case PERF_RECORD_FINISHED_ROUND:
1583 err = tool->finished_round(tool, event, oe);
1584 break;
1585 case PERF_RECORD_ID_INDEX:
1586 err = tool->id_index(tool, session, event);
1587 break;
1588 case PERF_RECORD_AUXTRACE_INFO:
1589 err = tool->auxtrace_info(tool, session, event);
1590 break;
1591 case PERF_RECORD_AUXTRACE:
1592 /*
1593 * Setup for reading amidst mmap, but only when we
1594 * are in 'file' mode. The 'pipe' fd is in proper
1595 * place already.
1596 */
1597 if (!perf_data__is_pipe(session->data))
1598 lseek(fd, file_offset + event->header.size, SEEK_SET);
1599 err = tool->auxtrace(tool, session, event);
1600 break;
1601 case PERF_RECORD_AUXTRACE_ERROR:
1602 perf_session__auxtrace_error_inc(session, event);
1603 err = tool->auxtrace_error(tool, session, event);
1604 break;
1605 case PERF_RECORD_THREAD_MAP:
1606 err = tool->thread_map(tool, session, event);
1607 break;
1608 case PERF_RECORD_CPU_MAP:
1609 err = tool->cpu_map(tool, session, event);
1610 break;
1611 case PERF_RECORD_STAT_CONFIG:
1612 err = tool->stat_config(tool, session, event);
1613 break;
1614 case PERF_RECORD_STAT:
1615 err = tool->stat(tool, session, event);
1616 break;
1617 case PERF_RECORD_STAT_ROUND:
1618 err = tool->stat_round(tool, session, event);
1619 break;
1620 case PERF_RECORD_TIME_CONV:
1621 session->time_conv = event->time_conv;
1622 err = tool->time_conv(tool, session, event);
1623 break;
1624 case PERF_RECORD_HEADER_FEATURE:
1625 err = tool->feature(tool, session, event);
1626 break;
1627 case PERF_RECORD_COMPRESSED:
1628 case PERF_RECORD_COMPRESSED2:
1629 err = tool->compressed(tool, session, event, file_offset, file_path);
1630 if (err)
1631 dump_event(session->evlist, event, file_offset, &sample, file_path);
1632 break;
1633 case PERF_RECORD_FINISHED_INIT:
1634 err = tool->finished_init(tool, session, event);
1635 break;
1636 case PERF_RECORD_BPF_METADATA:
1637 err = tool->bpf_metadata(tool, session, event);
1638 break;
1639 default:
1640 err = -EINVAL;
1641 break;
1642 }
1643 perf_sample__exit(&sample);
1644 return err;
1645 }
1646
perf_session__deliver_synth_event(struct perf_session * session,union perf_event * event,struct perf_sample * sample)1647 int perf_session__deliver_synth_event(struct perf_session *session,
1648 union perf_event *event,
1649 struct perf_sample *sample)
1650 {
1651 struct evlist *evlist = session->evlist;
1652 const struct perf_tool *tool = session->tool;
1653
1654 events_stats__inc(&evlist->stats, event->header.type);
1655
1656 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1657 return perf_session__process_user_event(session, event, 0, NULL);
1658
1659 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
1660 }
1661
perf_session__deliver_synth_attr_event(struct perf_session * session,const struct perf_event_attr * attr,u64 id)1662 int perf_session__deliver_synth_attr_event(struct perf_session *session,
1663 const struct perf_event_attr *attr,
1664 u64 id)
1665 {
1666 union {
1667 struct {
1668 struct perf_record_header_attr attr;
1669 u64 ids[1];
1670 } attr_id;
1671 union perf_event ev;
1672 } ev = {
1673 .attr_id.attr.header.type = PERF_RECORD_HEADER_ATTR,
1674 .attr_id.attr.header.size = sizeof(ev.attr_id),
1675 .attr_id.ids[0] = id,
1676 };
1677
1678 if (attr->size != sizeof(ev.attr_id.attr.attr)) {
1679 pr_debug("Unexpected perf_event_attr size\n");
1680 return -EINVAL;
1681 }
1682 ev.attr_id.attr.attr = *attr;
1683 return perf_session__deliver_synth_event(session, &ev.ev, NULL);
1684 }
1685
event_swap(union perf_event * event,bool sample_id_all)1686 static void event_swap(union perf_event *event, bool sample_id_all)
1687 {
1688 perf_event__swap_op swap;
1689
1690 swap = perf_event__swap_ops[event->header.type];
1691 if (swap)
1692 swap(event, sample_id_all);
1693 }
1694
perf_session__peek_event(struct perf_session * session,off_t file_offset,void * buf,size_t buf_sz,union perf_event ** event_ptr,struct perf_sample * sample)1695 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1696 void *buf, size_t buf_sz,
1697 union perf_event **event_ptr,
1698 struct perf_sample *sample)
1699 {
1700 union perf_event *event;
1701 size_t hdr_sz, rest;
1702 int fd;
1703
1704 if (session->one_mmap && !session->header.needs_swap) {
1705 event = file_offset - session->one_mmap_offset +
1706 session->one_mmap_addr;
1707 goto out_parse_sample;
1708 }
1709
1710 if (perf_data__is_pipe(session->data))
1711 return -1;
1712
1713 fd = perf_data__fd(session->data);
1714 hdr_sz = sizeof(struct perf_event_header);
1715
1716 if (buf_sz < hdr_sz)
1717 return -1;
1718
1719 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1720 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1721 return -1;
1722
1723 event = (union perf_event *)buf;
1724
1725 if (session->header.needs_swap)
1726 perf_event_header__bswap(&event->header);
1727
1728 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1729 return -1;
1730
1731 buf += hdr_sz;
1732 rest = event->header.size - hdr_sz;
1733
1734 if (readn(fd, buf, rest) != (ssize_t)rest)
1735 return -1;
1736
1737 if (session->header.needs_swap)
1738 event_swap(event, evlist__sample_id_all(session->evlist));
1739
1740 out_parse_sample:
1741
1742 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1743 evlist__parse_sample(session->evlist, event, sample))
1744 return -1;
1745
1746 *event_ptr = event;
1747
1748 return 0;
1749 }
1750
perf_session__peek_events(struct perf_session * session,u64 offset,u64 size,peek_events_cb_t cb,void * data)1751 int perf_session__peek_events(struct perf_session *session, u64 offset,
1752 u64 size, peek_events_cb_t cb, void *data)
1753 {
1754 u64 max_offset = offset + size;
1755 char buf[PERF_SAMPLE_MAX_SIZE];
1756 union perf_event *event;
1757 int err;
1758
1759 do {
1760 err = perf_session__peek_event(session, offset, buf,
1761 PERF_SAMPLE_MAX_SIZE, &event,
1762 NULL);
1763 if (err)
1764 return err;
1765
1766 err = cb(session, event, offset, data);
1767 if (err)
1768 return err;
1769
1770 offset += event->header.size;
1771 if (event->header.type == PERF_RECORD_AUXTRACE)
1772 offset += event->auxtrace.size;
1773
1774 } while (offset < max_offset);
1775
1776 return err;
1777 }
1778
perf_session__process_event(struct perf_session * session,union perf_event * event,u64 file_offset,const char * file_path)1779 static s64 perf_session__process_event(struct perf_session *session,
1780 union perf_event *event, u64 file_offset,
1781 const char *file_path)
1782 {
1783 struct evlist *evlist = session->evlist;
1784 const struct perf_tool *tool = session->tool;
1785 int ret;
1786
1787 if (session->header.needs_swap)
1788 event_swap(event, evlist__sample_id_all(evlist));
1789
1790 if (event->header.type >= PERF_RECORD_HEADER_MAX) {
1791 /* perf should not support unaligned event, stop here. */
1792 if (event->header.size % sizeof(u64))
1793 return -EINVAL;
1794
1795 /* This perf is outdated and does not support the latest event type. */
1796 ui__warning("Unsupported header type %u, please consider updating perf.\n",
1797 event->header.type);
1798 /* Skip unsupported event by returning its size. */
1799 return event->header.size;
1800 }
1801
1802 events_stats__inc(&evlist->stats, event->header.type);
1803
1804 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1805 return perf_session__process_user_event(session, event, file_offset, file_path);
1806
1807 if (tool->ordered_events) {
1808 u64 timestamp = -1ULL;
1809
1810 ret = evlist__parse_sample_timestamp(evlist, event, ×tamp);
1811 if (ret && ret != -1)
1812 return ret;
1813
1814 ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path);
1815 if (ret != -ETIME)
1816 return ret;
1817 }
1818
1819 return perf_session__deliver_event(session, event, tool, file_offset, file_path);
1820 }
1821
perf_event_header__bswap(struct perf_event_header * hdr)1822 void perf_event_header__bswap(struct perf_event_header *hdr)
1823 {
1824 hdr->type = bswap_32(hdr->type);
1825 hdr->misc = bswap_16(hdr->misc);
1826 hdr->size = bswap_16(hdr->size);
1827 }
1828
perf_session__findnew(struct perf_session * session,pid_t pid)1829 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1830 {
1831 return machine__findnew_thread(&session->machines.host, -1, pid);
1832 }
1833
perf_session__register_idle_thread(struct perf_session * session)1834 int perf_session__register_idle_thread(struct perf_session *session)
1835 {
1836 struct thread *thread = machine__idle_thread(&session->machines.host);
1837
1838 /* machine__idle_thread() got the thread, so put it */
1839 thread__put(thread);
1840 return thread ? 0 : -1;
1841 }
1842
1843 static void
perf_session__warn_order(const struct perf_session * session)1844 perf_session__warn_order(const struct perf_session *session)
1845 {
1846 const struct ordered_events *oe = &session->ordered_events;
1847 struct evsel *evsel;
1848 bool should_warn = true;
1849
1850 evlist__for_each_entry(session->evlist, evsel) {
1851 if (evsel->core.attr.write_backward)
1852 should_warn = false;
1853 }
1854
1855 if (!should_warn)
1856 return;
1857 if (oe->nr_unordered_events != 0)
1858 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1859 }
1860
perf_session__warn_about_errors(const struct perf_session * session)1861 static void perf_session__warn_about_errors(const struct perf_session *session)
1862 {
1863 const struct events_stats *stats = &session->evlist->stats;
1864
1865 if (session->tool->lost == perf_event__process_lost &&
1866 stats->nr_events[PERF_RECORD_LOST] != 0) {
1867 ui__warning("Processed %d events and lost %d chunks!\n\n"
1868 "Check IO/CPU overload!\n\n",
1869 stats->nr_events[0],
1870 stats->nr_events[PERF_RECORD_LOST]);
1871 }
1872
1873 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1874 double drop_rate;
1875
1876 drop_rate = (double)stats->total_lost_samples /
1877 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1878 if (drop_rate > 0.05) {
1879 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1880 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1881 drop_rate * 100.0);
1882 }
1883 }
1884
1885 if (session->tool->aux == perf_event__process_aux &&
1886 stats->total_aux_lost != 0) {
1887 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1888 stats->total_aux_lost,
1889 stats->nr_events[PERF_RECORD_AUX]);
1890 }
1891
1892 if (session->tool->aux == perf_event__process_aux &&
1893 stats->total_aux_partial != 0) {
1894 bool vmm_exclusive = false;
1895
1896 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1897 &vmm_exclusive);
1898
1899 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1900 "Are you running a KVM guest in the background?%s\n\n",
1901 stats->total_aux_partial,
1902 stats->nr_events[PERF_RECORD_AUX],
1903 vmm_exclusive ?
1904 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1905 "will reduce the gaps to only guest's timeslices." :
1906 "");
1907 }
1908
1909 if (session->tool->aux == perf_event__process_aux &&
1910 stats->total_aux_collision != 0) {
1911 ui__warning("AUX data detected collision %" PRIu64 " times out of %u!\n\n",
1912 stats->total_aux_collision,
1913 stats->nr_events[PERF_RECORD_AUX]);
1914 }
1915
1916 if (stats->nr_unknown_events != 0) {
1917 ui__warning("Found %u unknown events!\n\n"
1918 "Is this an older tool processing a perf.data "
1919 "file generated by a more recent tool?\n\n"
1920 "If that is not the case, consider "
1921 "reporting to linux-kernel@vger.kernel.org.\n\n",
1922 stats->nr_unknown_events);
1923 }
1924
1925 if (stats->nr_unknown_id != 0) {
1926 ui__warning("%u samples with id not present in the header\n",
1927 stats->nr_unknown_id);
1928 }
1929
1930 if (stats->nr_invalid_chains != 0) {
1931 ui__warning("Found invalid callchains!\n\n"
1932 "%u out of %u events were discarded for this reason.\n\n"
1933 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1934 stats->nr_invalid_chains,
1935 stats->nr_events[PERF_RECORD_SAMPLE]);
1936 }
1937
1938 if (stats->nr_unprocessable_samples != 0) {
1939 ui__warning("%u unprocessable samples recorded.\n"
1940 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1941 stats->nr_unprocessable_samples);
1942 }
1943
1944 perf_session__warn_order(session);
1945
1946 events_stats__auxtrace_error_warn(stats);
1947
1948 if (stats->nr_proc_map_timeout != 0) {
1949 ui__warning("%d map information files for pre-existing threads were\n"
1950 "not processed, if there are samples for addresses they\n"
1951 "will not be resolved, you may find out which are these\n"
1952 "threads by running with -v and redirecting the output\n"
1953 "to a file.\n"
1954 "The time limit to process proc map is too short?\n"
1955 "Increase it by --proc-map-timeout\n",
1956 stats->nr_proc_map_timeout);
1957 }
1958 }
1959
perf_session__flush_thread_stack(struct thread * thread,void * p __maybe_unused)1960 static int perf_session__flush_thread_stack(struct thread *thread,
1961 void *p __maybe_unused)
1962 {
1963 return thread_stack__flush(thread);
1964 }
1965
perf_session__flush_thread_stacks(struct perf_session * session)1966 static int perf_session__flush_thread_stacks(struct perf_session *session)
1967 {
1968 return machines__for_each_thread(&session->machines,
1969 perf_session__flush_thread_stack,
1970 NULL);
1971 }
1972
1973 volatile sig_atomic_t session_done;
1974
1975 static int __perf_session__process_decomp_events(struct perf_session *session);
1976
__perf_session__process_pipe_events(struct perf_session * session)1977 static int __perf_session__process_pipe_events(struct perf_session *session)
1978 {
1979 struct ordered_events *oe = &session->ordered_events;
1980 const struct perf_tool *tool = session->tool;
1981 struct ui_progress prog;
1982 union perf_event *event;
1983 uint32_t size, cur_size = 0;
1984 void *buf = NULL;
1985 s64 skip = 0;
1986 u64 head;
1987 ssize_t err;
1988 void *p;
1989 bool update_prog = false;
1990
1991 /*
1992 * If it's from a file saving pipe data (by redirection), it would have
1993 * a file name other than "-". Then we can get the total size and show
1994 * the progress.
1995 */
1996 if (strcmp(session->data->path, "-") && session->data->file.size) {
1997 ui_progress__init_size(&prog, session->data->file.size,
1998 "Processing events...");
1999 update_prog = true;
2000 }
2001
2002 head = 0;
2003 cur_size = sizeof(union perf_event);
2004
2005 buf = malloc(cur_size);
2006 if (!buf)
2007 return -errno;
2008 ordered_events__set_copy_on_queue(oe, true);
2009 more:
2010 event = buf;
2011 err = perf_data__read(session->data, event,
2012 sizeof(struct perf_event_header));
2013 if (err <= 0) {
2014 if (err == 0)
2015 goto done;
2016
2017 pr_err("failed to read event header\n");
2018 goto out_err;
2019 }
2020
2021 if (session->header.needs_swap)
2022 perf_event_header__bswap(&event->header);
2023
2024 size = event->header.size;
2025 if (size < sizeof(struct perf_event_header)) {
2026 pr_err("bad event header size\n");
2027 goto out_err;
2028 }
2029
2030 if (size > cur_size) {
2031 void *new = realloc(buf, size);
2032 if (!new) {
2033 pr_err("failed to allocate memory to read event\n");
2034 goto out_err;
2035 }
2036 buf = new;
2037 cur_size = size;
2038 event = buf;
2039 }
2040 p = event;
2041 p += sizeof(struct perf_event_header);
2042
2043 if (size - sizeof(struct perf_event_header)) {
2044 err = perf_data__read(session->data, p,
2045 size - sizeof(struct perf_event_header));
2046 if (err <= 0) {
2047 if (err == 0) {
2048 pr_err("unexpected end of event stream\n");
2049 goto done;
2050 }
2051
2052 pr_err("failed to read event data\n");
2053 goto out_err;
2054 }
2055 }
2056
2057 if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
2058 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2059 head, event->header.size, event->header.type);
2060 err = -EINVAL;
2061 goto out_err;
2062 }
2063
2064 head += size;
2065
2066 if (skip > 0)
2067 head += skip;
2068
2069 err = __perf_session__process_decomp_events(session);
2070 if (err)
2071 goto out_err;
2072
2073 if (update_prog)
2074 ui_progress__update(&prog, size);
2075
2076 if (!session_done())
2077 goto more;
2078 done:
2079 /* do the final flush for ordered samples */
2080 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2081 if (err)
2082 goto out_err;
2083 err = session__flush_deferred_samples(session, tool);
2084 if (err)
2085 goto out_err;
2086 err = auxtrace__flush_events(session, tool);
2087 if (err)
2088 goto out_err;
2089 err = perf_session__flush_thread_stacks(session);
2090 out_err:
2091 free(buf);
2092 if (update_prog)
2093 ui_progress__finish();
2094 if (!tool->no_warn)
2095 perf_session__warn_about_errors(session);
2096 ordered_events__free(&session->ordered_events);
2097 auxtrace__free_events(session);
2098 return err;
2099 }
2100
2101 static union perf_event *
prefetch_event(char * buf,u64 head,size_t mmap_size,bool needs_swap,union perf_event * error)2102 prefetch_event(char *buf, u64 head, size_t mmap_size,
2103 bool needs_swap, union perf_event *error)
2104 {
2105 union perf_event *event;
2106 u16 event_size;
2107
2108 /*
2109 * Ensure we have enough space remaining to read
2110 * the size of the event in the headers.
2111 */
2112 if (head + sizeof(event->header) > mmap_size)
2113 return NULL;
2114
2115 event = (union perf_event *)(buf + head);
2116 if (needs_swap)
2117 perf_event_header__bswap(&event->header);
2118
2119 event_size = event->header.size;
2120 if (head + event_size <= mmap_size)
2121 return event;
2122
2123 /* We're not fetching the event so swap back again */
2124 if (needs_swap)
2125 perf_event_header__bswap(&event->header);
2126
2127 /* Check if the event fits into the next mmapped buf. */
2128 if (event_size <= mmap_size - head % page_size) {
2129 /* Remap buf and fetch again. */
2130 return NULL;
2131 }
2132
2133 /* Invalid input. Event size should never exceed mmap_size. */
2134 pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
2135 " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
2136
2137 return error;
2138 }
2139
2140 static union perf_event *
fetch_mmaped_event(u64 head,size_t mmap_size,char * buf,bool needs_swap)2141 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2142 {
2143 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2144 }
2145
2146 static union perf_event *
fetch_decomp_event(u64 head,size_t mmap_size,char * buf,bool needs_swap)2147 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2148 {
2149 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2150 }
2151
__perf_session__process_decomp_events(struct perf_session * session)2152 static int __perf_session__process_decomp_events(struct perf_session *session)
2153 {
2154 s64 skip;
2155 u64 size;
2156 struct decomp *decomp = session->active_decomp->decomp_last;
2157
2158 if (!decomp)
2159 return 0;
2160
2161 while (decomp->head < decomp->size && !session_done()) {
2162 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2163 session->header.needs_swap);
2164
2165 if (!event)
2166 break;
2167
2168 size = event->header.size;
2169
2170 if (size < sizeof(struct perf_event_header) ||
2171 (skip = perf_session__process_event(session, event, decomp->file_pos,
2172 decomp->file_path)) < 0) {
2173 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2174 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2175 return -EINVAL;
2176 }
2177
2178 if (skip)
2179 size += skip;
2180
2181 decomp->head += size;
2182 }
2183
2184 return 0;
2185 }
2186
2187 /*
2188 * On 64bit we can mmap the data file in one go. No need for tiny mmap
2189 * slices. On 32bit we use 32MB.
2190 */
2191 #if BITS_PER_LONG == 64
2192 #define MMAP_SIZE ULLONG_MAX
2193 #define NUM_MMAPS 1
2194 #else
2195 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2196 #define NUM_MMAPS 128
2197 #endif
2198
2199 struct reader;
2200
2201 typedef s64 (*reader_cb_t)(struct perf_session *session,
2202 union perf_event *event,
2203 u64 file_offset,
2204 const char *file_path);
2205
2206 struct reader {
2207 int fd;
2208 const char *path;
2209 u64 data_size;
2210 u64 data_offset;
2211 reader_cb_t process;
2212 bool in_place_update;
2213 char *mmaps[NUM_MMAPS];
2214 size_t mmap_size;
2215 int mmap_idx;
2216 char *mmap_cur;
2217 u64 file_pos;
2218 u64 file_offset;
2219 u64 head;
2220 u64 size;
2221 bool done;
2222 struct zstd_data zstd_data;
2223 struct decomp_data decomp_data;
2224 };
2225
2226 static int
reader__init(struct reader * rd,bool * one_mmap)2227 reader__init(struct reader *rd, bool *one_mmap)
2228 {
2229 u64 data_size = rd->data_size;
2230 char **mmaps = rd->mmaps;
2231
2232 rd->head = rd->data_offset;
2233 data_size += rd->data_offset;
2234
2235 rd->mmap_size = MMAP_SIZE;
2236 if (rd->mmap_size > data_size) {
2237 rd->mmap_size = data_size;
2238 if (one_mmap)
2239 *one_mmap = true;
2240 }
2241
2242 memset(mmaps, 0, sizeof(rd->mmaps));
2243
2244 if (zstd_init(&rd->zstd_data, 0))
2245 return -1;
2246 rd->decomp_data.zstd_decomp = &rd->zstd_data;
2247
2248 return 0;
2249 }
2250
2251 static void
reader__release_decomp(struct reader * rd)2252 reader__release_decomp(struct reader *rd)
2253 {
2254 perf_decomp__release_events(rd->decomp_data.decomp);
2255 zstd_fini(&rd->zstd_data);
2256 }
2257
2258 static int
reader__mmap(struct reader * rd,struct perf_session * session)2259 reader__mmap(struct reader *rd, struct perf_session *session)
2260 {
2261 int mmap_prot, mmap_flags;
2262 char *buf, **mmaps = rd->mmaps;
2263 u64 page_offset;
2264
2265 mmap_prot = PROT_READ;
2266 mmap_flags = MAP_SHARED;
2267
2268 if (rd->in_place_update) {
2269 mmap_prot |= PROT_WRITE;
2270 } else if (session->header.needs_swap) {
2271 mmap_prot |= PROT_WRITE;
2272 mmap_flags = MAP_PRIVATE;
2273 }
2274
2275 if (mmaps[rd->mmap_idx]) {
2276 munmap(mmaps[rd->mmap_idx], rd->mmap_size);
2277 mmaps[rd->mmap_idx] = NULL;
2278 }
2279
2280 page_offset = page_size * (rd->head / page_size);
2281 rd->file_offset += page_offset;
2282 rd->head -= page_offset;
2283
2284 buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd,
2285 rd->file_offset);
2286 if (buf == MAP_FAILED) {
2287 pr_err("failed to mmap file\n");
2288 return -errno;
2289 }
2290 mmaps[rd->mmap_idx] = rd->mmap_cur = buf;
2291 rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1);
2292 rd->file_pos = rd->file_offset + rd->head;
2293 if (session->one_mmap) {
2294 session->one_mmap_addr = buf;
2295 session->one_mmap_offset = rd->file_offset;
2296 }
2297
2298 return 0;
2299 }
2300
2301 enum {
2302 READER_OK,
2303 READER_NODATA,
2304 };
2305
2306 static int
reader__read_event(struct reader * rd,struct perf_session * session,struct ui_progress * prog)2307 reader__read_event(struct reader *rd, struct perf_session *session,
2308 struct ui_progress *prog)
2309 {
2310 u64 size;
2311 int err = READER_OK;
2312 union perf_event *event;
2313 s64 skip;
2314
2315 event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
2316 session->header.needs_swap);
2317 if (IS_ERR(event))
2318 return PTR_ERR(event);
2319
2320 if (!event)
2321 return READER_NODATA;
2322
2323 size = event->header.size;
2324
2325 skip = -EINVAL;
2326
2327 if (size < sizeof(struct perf_event_header) ||
2328 (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
2329 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2330 rd->file_offset + rd->head, event->header.size,
2331 event->header.type, strerror(-skip));
2332 err = skip;
2333 goto out;
2334 }
2335
2336 if (skip)
2337 size += skip;
2338
2339 rd->size += size;
2340 rd->head += size;
2341 rd->file_pos += size;
2342
2343 err = __perf_session__process_decomp_events(session);
2344 if (err)
2345 goto out;
2346
2347 ui_progress__update(prog, size);
2348
2349 out:
2350 return err;
2351 }
2352
2353 static inline bool
reader__eof(struct reader * rd)2354 reader__eof(struct reader *rd)
2355 {
2356 return (rd->file_pos >= rd->data_size + rd->data_offset);
2357 }
2358
2359 static int
reader__process_events(struct reader * rd,struct perf_session * session,struct ui_progress * prog)2360 reader__process_events(struct reader *rd, struct perf_session *session,
2361 struct ui_progress *prog)
2362 {
2363 int err;
2364
2365 err = reader__init(rd, &session->one_mmap);
2366 if (err)
2367 goto out;
2368
2369 session->active_decomp = &rd->decomp_data;
2370
2371 remap:
2372 err = reader__mmap(rd, session);
2373 if (err)
2374 goto out;
2375
2376 more:
2377 err = reader__read_event(rd, session, prog);
2378 if (err < 0)
2379 goto out;
2380 else if (err == READER_NODATA)
2381 goto remap;
2382
2383 if (session_done())
2384 goto out;
2385
2386 if (!reader__eof(rd))
2387 goto more;
2388
2389 out:
2390 session->active_decomp = &session->decomp_data;
2391 return err;
2392 }
2393
process_simple(struct perf_session * session,union perf_event * event,u64 file_offset,const char * file_path)2394 static s64 process_simple(struct perf_session *session,
2395 union perf_event *event,
2396 u64 file_offset,
2397 const char *file_path)
2398 {
2399 return perf_session__process_event(session, event, file_offset, file_path);
2400 }
2401
__perf_session__process_events(struct perf_session * session)2402 static int __perf_session__process_events(struct perf_session *session)
2403 {
2404 struct reader rd = {
2405 .fd = perf_data__fd(session->data),
2406 .path = session->data->file.path,
2407 .data_size = session->header.data_size,
2408 .data_offset = session->header.data_offset,
2409 .process = process_simple,
2410 .in_place_update = session->data->in_place_update,
2411 };
2412 struct ordered_events *oe = &session->ordered_events;
2413 const struct perf_tool *tool = session->tool;
2414 struct ui_progress prog;
2415 int err;
2416
2417 if (rd.data_size == 0)
2418 return -1;
2419
2420 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2421
2422 err = reader__process_events(&rd, session, &prog);
2423 if (err)
2424 goto out_err;
2425 /* do the final flush for ordered samples */
2426 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2427 if (err)
2428 goto out_err;
2429 err = auxtrace__flush_events(session, tool);
2430 if (err)
2431 goto out_err;
2432 err = session__flush_deferred_samples(session, tool);
2433 if (err)
2434 goto out_err;
2435 err = perf_session__flush_thread_stacks(session);
2436 out_err:
2437 ui_progress__finish();
2438 if (!tool->no_warn)
2439 perf_session__warn_about_errors(session);
2440 /*
2441 * We may switching perf.data output, make ordered_events
2442 * reusable.
2443 */
2444 ordered_events__reinit(&session->ordered_events);
2445 auxtrace__free_events(session);
2446 reader__release_decomp(&rd);
2447 session->one_mmap = false;
2448 return err;
2449 }
2450
2451 /*
2452 * Processing 2 MB of data from each reader in sequence,
2453 * because that's the way the ordered events sorting works
2454 * most efficiently.
2455 */
2456 #define READER_MAX_SIZE (2 * 1024 * 1024)
2457
2458 /*
2459 * This function reads, merge and process directory data.
2460 * It assumens the version 1 of directory data, where each
2461 * data file holds per-cpu data, already sorted by kernel.
2462 */
__perf_session__process_dir_events(struct perf_session * session)2463 static int __perf_session__process_dir_events(struct perf_session *session)
2464 {
2465 struct perf_data *data = session->data;
2466 const struct perf_tool *tool = session->tool;
2467 int i, ret, readers, nr_readers;
2468 struct ui_progress prog;
2469 u64 total_size = perf_data__size(session->data);
2470 struct reader *rd;
2471
2472 ui_progress__init_size(&prog, total_size, "Processing events...");
2473
2474 nr_readers = 1;
2475 for (i = 0; i < data->dir.nr; i++) {
2476 if (data->dir.files[i].size)
2477 nr_readers++;
2478 }
2479
2480 rd = zalloc(nr_readers * sizeof(struct reader));
2481 if (!rd)
2482 return -ENOMEM;
2483
2484 rd[0] = (struct reader) {
2485 .fd = perf_data__fd(session->data),
2486 .path = session->data->file.path,
2487 .data_size = session->header.data_size,
2488 .data_offset = session->header.data_offset,
2489 .process = process_simple,
2490 .in_place_update = session->data->in_place_update,
2491 };
2492 ret = reader__init(&rd[0], NULL);
2493 if (ret)
2494 goto out_err;
2495 ret = reader__mmap(&rd[0], session);
2496 if (ret)
2497 goto out_err;
2498 readers = 1;
2499
2500 for (i = 0; i < data->dir.nr; i++) {
2501 if (!data->dir.files[i].size)
2502 continue;
2503 rd[readers] = (struct reader) {
2504 .fd = data->dir.files[i].fd,
2505 .path = data->dir.files[i].path,
2506 .data_size = data->dir.files[i].size,
2507 .data_offset = 0,
2508 .process = process_simple,
2509 .in_place_update = session->data->in_place_update,
2510 };
2511 ret = reader__init(&rd[readers], NULL);
2512 if (ret)
2513 goto out_err;
2514 ret = reader__mmap(&rd[readers], session);
2515 if (ret)
2516 goto out_err;
2517 readers++;
2518 }
2519
2520 i = 0;
2521 while (readers) {
2522 if (session_done())
2523 break;
2524
2525 if (rd[i].done) {
2526 i = (i + 1) % nr_readers;
2527 continue;
2528 }
2529 if (reader__eof(&rd[i])) {
2530 rd[i].done = true;
2531 readers--;
2532 continue;
2533 }
2534
2535 session->active_decomp = &rd[i].decomp_data;
2536 ret = reader__read_event(&rd[i], session, &prog);
2537 if (ret < 0) {
2538 goto out_err;
2539 } else if (ret == READER_NODATA) {
2540 ret = reader__mmap(&rd[i], session);
2541 if (ret)
2542 goto out_err;
2543 }
2544
2545 if (rd[i].size >= READER_MAX_SIZE) {
2546 rd[i].size = 0;
2547 i = (i + 1) % nr_readers;
2548 }
2549 }
2550
2551 ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL);
2552 if (ret)
2553 goto out_err;
2554
2555 ret = session__flush_deferred_samples(session, tool);
2556 if (ret)
2557 goto out_err;
2558
2559 ret = perf_session__flush_thread_stacks(session);
2560 out_err:
2561 ui_progress__finish();
2562
2563 if (!tool->no_warn)
2564 perf_session__warn_about_errors(session);
2565
2566 /*
2567 * We may switching perf.data output, make ordered_events
2568 * reusable.
2569 */
2570 ordered_events__reinit(&session->ordered_events);
2571
2572 session->one_mmap = false;
2573
2574 session->active_decomp = &session->decomp_data;
2575 for (i = 0; i < nr_readers; i++)
2576 reader__release_decomp(&rd[i]);
2577 zfree(&rd);
2578
2579 return ret;
2580 }
2581
perf_session__process_events(struct perf_session * session)2582 int perf_session__process_events(struct perf_session *session)
2583 {
2584 if (perf_session__register_idle_thread(session) < 0)
2585 return -ENOMEM;
2586
2587 if (perf_data__is_pipe(session->data))
2588 return __perf_session__process_pipe_events(session);
2589
2590 if (perf_data__is_dir(session->data) && session->data->dir.nr)
2591 return __perf_session__process_dir_events(session);
2592
2593 return __perf_session__process_events(session);
2594 }
2595
perf_session__has_traces(struct perf_session * session,const char * msg)2596 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2597 {
2598 struct evsel *evsel;
2599
2600 evlist__for_each_entry(session->evlist, evsel) {
2601 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2602 return true;
2603 }
2604
2605 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2606 return false;
2607 }
2608
perf_session__has_switch_events(struct perf_session * session)2609 bool perf_session__has_switch_events(struct perf_session *session)
2610 {
2611 struct evsel *evsel;
2612
2613 evlist__for_each_entry(session->evlist, evsel) {
2614 if (evsel->core.attr.context_switch)
2615 return true;
2616 }
2617
2618 return false;
2619 }
2620
map__set_kallsyms_ref_reloc_sym(struct map * map,const char * symbol_name,u64 addr)2621 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2622 {
2623 char *bracket;
2624 struct ref_reloc_sym *ref;
2625 struct kmap *kmap;
2626
2627 ref = zalloc(sizeof(struct ref_reloc_sym));
2628 if (ref == NULL)
2629 return -ENOMEM;
2630
2631 ref->name = strdup(symbol_name);
2632 if (ref->name == NULL) {
2633 free(ref);
2634 return -ENOMEM;
2635 }
2636
2637 bracket = strchr(ref->name, ']');
2638 if (bracket)
2639 *bracket = '\0';
2640
2641 ref->addr = addr;
2642
2643 kmap = map__kmap(map);
2644 if (kmap)
2645 kmap->ref_reloc_sym = ref;
2646
2647 return 0;
2648 }
2649
perf_session__fprintf_dsos(struct perf_session * session,FILE * fp)2650 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2651 {
2652 return machines__fprintf_dsos(&session->machines, fp);
2653 }
2654
perf_session__fprintf_dsos_buildid(struct perf_session * session,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)2655 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2656 bool (skip)(struct dso *dso, int parm), int parm)
2657 {
2658 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2659 }
2660
perf_session__fprintf_nr_events(struct perf_session * session,FILE * fp)2661 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2662 {
2663 size_t ret;
2664 const char *msg = "";
2665
2666 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2667 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2668
2669 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2670
2671 ret += events_stats__fprintf(&session->evlist->stats, fp);
2672 return ret;
2673 }
2674
perf_session__fprintf(struct perf_session * session,FILE * fp)2675 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2676 {
2677 /*
2678 * FIXME: Here we have to actually print all the machines in this
2679 * session, not just the host...
2680 */
2681 return machine__fprintf(&session->machines.host, fp);
2682 }
2683
perf_session__dump_kmaps(struct perf_session * session)2684 void perf_session__dump_kmaps(struct perf_session *session)
2685 {
2686 int save_verbose = verbose;
2687
2688 fflush(stdout);
2689 fprintf(stderr, "Kernel and module maps:\n");
2690 verbose = 0; /* Suppress verbose to print a summary only */
2691 maps__fprintf(machine__kernel_maps(&session->machines.host), stderr);
2692 verbose = save_verbose;
2693 }
2694
perf_session__find_first_evtype(struct perf_session * session,unsigned int type)2695 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2696 unsigned int type)
2697 {
2698 struct evsel *pos;
2699
2700 evlist__for_each_entry(session->evlist, pos) {
2701 if (pos->core.attr.type == type)
2702 return pos;
2703 }
2704 return NULL;
2705 }
2706
perf_session__cpu_bitmap(struct perf_session * session,const char * cpu_list,unsigned long * cpu_bitmap)2707 int perf_session__cpu_bitmap(struct perf_session *session,
2708 const char *cpu_list, unsigned long *cpu_bitmap)
2709 {
2710 int i, err = -1;
2711 struct perf_cpu_map *map;
2712 int nr_cpus = min(perf_session__env(session)->nr_cpus_avail, MAX_NR_CPUS);
2713 struct perf_cpu cpu;
2714
2715 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2716 struct evsel *evsel;
2717
2718 evsel = perf_session__find_first_evtype(session, i);
2719 if (!evsel)
2720 continue;
2721
2722 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2723 pr_err("File does not contain CPU events. "
2724 "Remove -C option to proceed.\n");
2725 return -1;
2726 }
2727 }
2728
2729 map = perf_cpu_map__new(cpu_list);
2730 if (map == NULL) {
2731 pr_err("Invalid cpu_list\n");
2732 return -1;
2733 }
2734
2735 perf_cpu_map__for_each_cpu(cpu, i, map) {
2736 if (cpu.cpu >= nr_cpus) {
2737 pr_err("Requested CPU %d too large. "
2738 "Consider raising MAX_NR_CPUS\n", cpu.cpu);
2739 goto out_delete_map;
2740 }
2741
2742 __set_bit(cpu.cpu, cpu_bitmap);
2743 }
2744
2745 err = 0;
2746
2747 out_delete_map:
2748 perf_cpu_map__put(map);
2749 return err;
2750 }
2751
perf_session__fprintf_info(struct perf_session * session,FILE * fp,bool full)2752 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2753 bool full)
2754 {
2755 if (session == NULL || fp == NULL)
2756 return;
2757
2758 fprintf(fp, "# ========\n");
2759 perf_header__fprintf_info(session, fp, full);
2760 fprintf(fp, "# ========\n#\n");
2761 }
2762
perf_session__register_guest(struct perf_session * session,pid_t machine_pid)2763 static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid)
2764 {
2765 struct machine *machine = machines__findnew(&session->machines, machine_pid);
2766 struct thread *thread;
2767
2768 if (!machine)
2769 return -ENOMEM;
2770
2771 machine->single_address_space = session->machines.host.single_address_space;
2772
2773 thread = machine__idle_thread(machine);
2774 if (!thread)
2775 return -ENOMEM;
2776 thread__put(thread);
2777
2778 machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid);
2779
2780 return 0;
2781 }
2782
perf_session__set_guest_cpu(struct perf_session * session,pid_t pid,pid_t tid,int guest_cpu)2783 static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
2784 pid_t tid, int guest_cpu)
2785 {
2786 struct machine *machine = &session->machines.host;
2787 struct thread *thread = machine__findnew_thread(machine, pid, tid);
2788
2789 if (!thread)
2790 return -ENOMEM;
2791 thread__set_guest_cpu(thread, guest_cpu);
2792 thread__put(thread);
2793
2794 return 0;
2795 }
2796
perf_event__process_id_index(const struct perf_tool * tool __maybe_unused,struct perf_session * session,union perf_event * event)2797 int perf_event__process_id_index(const struct perf_tool *tool __maybe_unused,
2798 struct perf_session *session,
2799 union perf_event *event)
2800 {
2801 struct evlist *evlist = session->evlist;
2802 struct perf_record_id_index *ie = &event->id_index;
2803 size_t sz = ie->header.size - sizeof(*ie);
2804 size_t i, nr, max_nr;
2805 size_t e1_sz = sizeof(struct id_index_entry);
2806 size_t e2_sz = sizeof(struct id_index_entry_2);
2807 size_t etot_sz = e1_sz + e2_sz;
2808 struct id_index_entry_2 *e2;
2809 pid_t last_pid = 0;
2810
2811 max_nr = sz / e1_sz;
2812 nr = ie->nr;
2813 if (nr > max_nr) {
2814 printf("Too big: nr %zu max_nr %zu\n", nr, max_nr);
2815 return -EINVAL;
2816 }
2817
2818 if (sz >= nr * etot_sz) {
2819 max_nr = sz / etot_sz;
2820 if (nr > max_nr) {
2821 printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr);
2822 return -EINVAL;
2823 }
2824 e2 = (void *)ie + sizeof(*ie) + nr * e1_sz;
2825 } else {
2826 e2 = NULL;
2827 }
2828
2829 if (dump_trace)
2830 fprintf(stdout, " nr: %zu\n", nr);
2831
2832 for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) {
2833 struct id_index_entry *e = &ie->entries[i];
2834 struct perf_sample_id *sid;
2835 int ret;
2836
2837 if (dump_trace) {
2838 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2839 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2840 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2841 fprintf(stdout, " tid: %"PRI_ld64, e->tid);
2842 if (e2) {
2843 fprintf(stdout, " machine_pid: %"PRI_ld64, e2->machine_pid);
2844 fprintf(stdout, " vcpu: %"PRI_lu64"\n", e2->vcpu);
2845 } else {
2846 fprintf(stdout, "\n");
2847 }
2848 }
2849
2850 sid = evlist__id2sid(evlist, e->id);
2851 if (!sid)
2852 return -ENOENT;
2853
2854 sid->idx = e->idx;
2855 sid->cpu.cpu = e->cpu;
2856 sid->tid = e->tid;
2857
2858 if (!e2)
2859 continue;
2860
2861 sid->machine_pid = e2->machine_pid;
2862 sid->vcpu.cpu = e2->vcpu;
2863
2864 if (!sid->machine_pid)
2865 continue;
2866
2867 if (sid->machine_pid != last_pid) {
2868 ret = perf_session__register_guest(session, sid->machine_pid);
2869 if (ret)
2870 return ret;
2871 last_pid = sid->machine_pid;
2872 perf_guest = true;
2873 }
2874
2875 ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu);
2876 if (ret)
2877 return ret;
2878 }
2879 return 0;
2880 }
2881
perf_session__dsos_hit_all(struct perf_session * session)2882 int perf_session__dsos_hit_all(struct perf_session *session)
2883 {
2884 struct rb_node *nd;
2885 int err;
2886
2887 err = machine__hit_all_dsos(&session->machines.host);
2888 if (err)
2889 return err;
2890
2891 for (nd = rb_first_cached(&session->machines.guests); nd;
2892 nd = rb_next(nd)) {
2893 struct machine *pos = rb_entry(nd, struct machine, rb_node);
2894
2895 err = machine__hit_all_dsos(pos);
2896 if (err)
2897 return err;
2898 }
2899
2900 return 0;
2901 }
2902
perf_session__env(struct perf_session * session)2903 struct perf_env *perf_session__env(struct perf_session *session)
2904 {
2905 return &session->header.env;
2906 }
2907