1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <signal.h> 4 #include <inttypes.h> 5 #include <linux/err.h> 6 #include <linux/kernel.h> 7 #include <linux/zalloc.h> 8 #include <api/fs/fs.h> 9 10 #include <byteswap.h> 11 #include <unistd.h> 12 #include <sys/types.h> 13 #include <sys/mman.h> 14 #include <perf/cpumap.h> 15 #include <perf/event.h> 16 17 #include "map_symbol.h" 18 #include "branch.h" 19 #include "debug.h" 20 #include "env.h" 21 #include "evlist.h" 22 #include "evsel.h" 23 #include "memswap.h" 24 #include "map.h" 25 #include "symbol.h" 26 #include "session.h" 27 #include "tool.h" 28 #include "perf_regs.h" 29 #include "asm/bug.h" 30 #include "auxtrace.h" 31 #include "thread.h" 32 #include "thread-stack.h" 33 #include "sample-raw.h" 34 #include "stat.h" 35 #include "tsc.h" 36 #include "ui/progress.h" 37 #include "util.h" 38 #include "arch/common.h" 39 #include "units.h" 40 #include "annotate.h" 41 #include "perf.h" 42 #include <internal/lib.h> 43 44 static int perf_session__deliver_event(struct perf_session *session, 45 union perf_event *event, 46 const struct perf_tool *tool, 47 u64 file_offset, 48 const char *file_path); 49 50 static int perf_session__open(struct perf_session *session) 51 { 52 struct perf_data *data = session->data; 53 54 if (perf_session__read_header(session) < 0) { 55 pr_err("incompatible file format (rerun with -v to learn more)\n"); 56 return -1; 57 } 58 59 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) { 60 /* Auxiliary events may reference exited threads, hold onto dead ones. */ 61 symbol_conf.keep_exited_threads = true; 62 } 63 64 if (perf_data__is_pipe(data)) 65 return 0; 66 67 if (perf_header__has_feat(&session->header, HEADER_STAT)) 68 return 0; 69 70 if (!evlist__valid_sample_type(session->evlist)) { 71 pr_err("non matching sample_type\n"); 72 return -1; 73 } 74 75 if (!evlist__valid_sample_id_all(session->evlist)) { 76 pr_err("non matching sample_id_all\n"); 77 return -1; 78 } 79 80 if (!evlist__valid_read_format(session->evlist)) { 81 pr_err("non matching read_format\n"); 82 return -1; 83 } 84 85 return 0; 86 } 87 88 void perf_session__set_id_hdr_size(struct perf_session *session) 89 { 90 u16 id_hdr_size = evlist__id_hdr_size(session->evlist); 91 92 machines__set_id_hdr_size(&session->machines, id_hdr_size); 93 } 94 95 int perf_session__create_kernel_maps(struct perf_session *session) 96 { 97 int ret = machine__create_kernel_maps(&session->machines.host); 98 99 if (ret >= 0) 100 ret = machines__create_guest_kernel_maps(&session->machines); 101 return ret; 102 } 103 104 static void perf_session__destroy_kernel_maps(struct perf_session *session) 105 { 106 machines__destroy_kernel_maps(&session->machines); 107 } 108 109 static bool perf_session__has_comm_exec(struct perf_session *session) 110 { 111 struct evsel *evsel; 112 113 evlist__for_each_entry(session->evlist, evsel) { 114 if (evsel->core.attr.comm_exec) 115 return true; 116 } 117 118 return false; 119 } 120 121 static void perf_session__set_comm_exec(struct perf_session *session) 122 { 123 bool comm_exec = perf_session__has_comm_exec(session); 124 125 machines__set_comm_exec(&session->machines, comm_exec); 126 } 127 128 static int ordered_events__deliver_event(struct ordered_events *oe, 129 struct ordered_event *event) 130 { 131 struct perf_session *session = container_of(oe, struct perf_session, 132 ordered_events); 133 134 return perf_session__deliver_event(session, event->event, 135 session->tool, event->file_offset, 136 event->file_path); 137 } 138 139 struct perf_session *__perf_session__new(struct perf_data *data, 140 struct perf_tool *tool, 141 bool trace_event_repipe, 142 struct perf_env *host_env) 143 { 144 int ret = -ENOMEM; 145 struct perf_session *session = zalloc(sizeof(*session)); 146 147 if (!session) 148 goto out; 149 150 session->trace_event_repipe = trace_event_repipe; 151 session->tool = tool; 152 session->decomp_data.zstd_decomp = &session->zstd_data; 153 session->active_decomp = &session->decomp_data; 154 INIT_LIST_HEAD(&session->auxtrace_index); 155 machines__init(&session->machines); 156 ordered_events__init(&session->ordered_events, 157 ordered_events__deliver_event, NULL); 158 159 perf_env__init(&session->header.env); 160 if (data) { 161 ret = perf_data__open(data); 162 if (ret < 0) 163 goto out_delete; 164 165 session->data = data; 166 167 if (perf_data__is_read(data)) { 168 ret = perf_session__open(session); 169 if (ret < 0) 170 goto out_delete; 171 172 /* 173 * set session attributes that are present in perf.data 174 * but not in pipe-mode. 175 */ 176 if (!data->is_pipe) { 177 perf_session__set_id_hdr_size(session); 178 perf_session__set_comm_exec(session); 179 } 180 181 evlist__init_trace_event_sample_raw(session->evlist, &session->header.env); 182 183 /* Open the directory data. */ 184 if (data->is_dir) { 185 ret = perf_data__open_dir(data); 186 if (ret) 187 goto out_delete; 188 } 189 190 if (!symbol_conf.kallsyms_name && 191 !symbol_conf.vmlinux_name) 192 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data); 193 } 194 } else { 195 assert(host_env != NULL); 196 session->machines.host.env = host_env; 197 } 198 if (session->evlist) 199 session->evlist->session = session; 200 201 session->machines.host.single_address_space = 202 perf_env__single_address_space(session->machines.host.env); 203 204 if (!data || perf_data__is_write(data)) { 205 /* 206 * In O_RDONLY mode this will be performed when reading the 207 * kernel MMAP event, in perf_event__process_mmap(). 208 */ 209 if (perf_session__create_kernel_maps(session) < 0) 210 pr_warning("Cannot read kernel map\n"); 211 } 212 213 /* 214 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is 215 * processed, so evlist__sample_id_all is not meaningful here. 216 */ 217 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps && 218 tool->ordered_events && !evlist__sample_id_all(session->evlist)) { 219 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 220 tool->ordered_events = false; 221 } 222 223 return session; 224 225 out_delete: 226 perf_session__delete(session); 227 out: 228 return ERR_PTR(ret); 229 } 230 231 static void perf_decomp__release_events(struct decomp *next) 232 { 233 struct decomp *decomp; 234 size_t mmap_len; 235 236 do { 237 decomp = next; 238 if (decomp == NULL) 239 break; 240 next = decomp->next; 241 mmap_len = decomp->mmap_len; 242 munmap(decomp, mmap_len); 243 } while (1); 244 } 245 246 void perf_session__delete(struct perf_session *session) 247 { 248 if (session == NULL) 249 return; 250 auxtrace__free(session); 251 auxtrace_index__free(&session->auxtrace_index); 252 debuginfo_cache__delete(); 253 perf_session__destroy_kernel_maps(session); 254 perf_decomp__release_events(session->decomp_data.decomp); 255 perf_env__exit(&session->header.env); 256 machines__exit(&session->machines); 257 if (session->data) { 258 if (perf_data__is_read(session->data)) 259 evlist__delete(session->evlist); 260 perf_data__close(session->data); 261 } 262 #ifdef HAVE_LIBTRACEEVENT 263 trace_event__cleanup(&session->tevent); 264 #endif 265 free(session); 266 } 267 268 static void swap_sample_id_all(union perf_event *event, void *data) 269 { 270 void *end = (void *) event + event->header.size; 271 int size = end - data; 272 273 BUG_ON(size % sizeof(u64)); 274 mem_bswap_64(data, size); 275 } 276 277 static void perf_event__all64_swap(union perf_event *event, 278 bool sample_id_all __maybe_unused) 279 { 280 struct perf_event_header *hdr = &event->header; 281 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 282 } 283 284 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 285 { 286 event->comm.pid = bswap_32(event->comm.pid); 287 event->comm.tid = bswap_32(event->comm.tid); 288 289 if (sample_id_all) { 290 void *data = &event->comm.comm; 291 292 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 293 swap_sample_id_all(event, data); 294 } 295 } 296 297 static void perf_event__mmap_swap(union perf_event *event, 298 bool sample_id_all) 299 { 300 event->mmap.pid = bswap_32(event->mmap.pid); 301 event->mmap.tid = bswap_32(event->mmap.tid); 302 event->mmap.start = bswap_64(event->mmap.start); 303 event->mmap.len = bswap_64(event->mmap.len); 304 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 305 306 if (sample_id_all) { 307 void *data = &event->mmap.filename; 308 309 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 310 swap_sample_id_all(event, data); 311 } 312 } 313 314 static void perf_event__mmap2_swap(union perf_event *event, 315 bool sample_id_all) 316 { 317 event->mmap2.pid = bswap_32(event->mmap2.pid); 318 event->mmap2.tid = bswap_32(event->mmap2.tid); 319 event->mmap2.start = bswap_64(event->mmap2.start); 320 event->mmap2.len = bswap_64(event->mmap2.len); 321 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 322 323 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) { 324 event->mmap2.maj = bswap_32(event->mmap2.maj); 325 event->mmap2.min = bswap_32(event->mmap2.min); 326 event->mmap2.ino = bswap_64(event->mmap2.ino); 327 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation); 328 } 329 330 if (sample_id_all) { 331 void *data = &event->mmap2.filename; 332 333 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 334 swap_sample_id_all(event, data); 335 } 336 } 337 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 338 { 339 event->fork.pid = bswap_32(event->fork.pid); 340 event->fork.tid = bswap_32(event->fork.tid); 341 event->fork.ppid = bswap_32(event->fork.ppid); 342 event->fork.ptid = bswap_32(event->fork.ptid); 343 event->fork.time = bswap_64(event->fork.time); 344 345 if (sample_id_all) 346 swap_sample_id_all(event, &event->fork + 1); 347 } 348 349 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 350 { 351 event->read.pid = bswap_32(event->read.pid); 352 event->read.tid = bswap_32(event->read.tid); 353 event->read.value = bswap_64(event->read.value); 354 event->read.time_enabled = bswap_64(event->read.time_enabled); 355 event->read.time_running = bswap_64(event->read.time_running); 356 event->read.id = bswap_64(event->read.id); 357 358 if (sample_id_all) 359 swap_sample_id_all(event, &event->read + 1); 360 } 361 362 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) 363 { 364 event->aux.aux_offset = bswap_64(event->aux.aux_offset); 365 event->aux.aux_size = bswap_64(event->aux.aux_size); 366 event->aux.flags = bswap_64(event->aux.flags); 367 368 if (sample_id_all) 369 swap_sample_id_all(event, &event->aux + 1); 370 } 371 372 static void perf_event__itrace_start_swap(union perf_event *event, 373 bool sample_id_all) 374 { 375 event->itrace_start.pid = bswap_32(event->itrace_start.pid); 376 event->itrace_start.tid = bswap_32(event->itrace_start.tid); 377 378 if (sample_id_all) 379 swap_sample_id_all(event, &event->itrace_start + 1); 380 } 381 382 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) 383 { 384 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { 385 event->context_switch.next_prev_pid = 386 bswap_32(event->context_switch.next_prev_pid); 387 event->context_switch.next_prev_tid = 388 bswap_32(event->context_switch.next_prev_tid); 389 } 390 391 if (sample_id_all) 392 swap_sample_id_all(event, &event->context_switch + 1); 393 } 394 395 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all) 396 { 397 event->text_poke.addr = bswap_64(event->text_poke.addr); 398 event->text_poke.old_len = bswap_16(event->text_poke.old_len); 399 event->text_poke.new_len = bswap_16(event->text_poke.new_len); 400 401 if (sample_id_all) { 402 size_t len = sizeof(event->text_poke.old_len) + 403 sizeof(event->text_poke.new_len) + 404 event->text_poke.old_len + 405 event->text_poke.new_len; 406 void *data = &event->text_poke.old_len; 407 408 data += PERF_ALIGN(len, sizeof(u64)); 409 swap_sample_id_all(event, data); 410 } 411 } 412 413 static void perf_event__throttle_swap(union perf_event *event, 414 bool sample_id_all) 415 { 416 event->throttle.time = bswap_64(event->throttle.time); 417 event->throttle.id = bswap_64(event->throttle.id); 418 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 419 420 if (sample_id_all) 421 swap_sample_id_all(event, &event->throttle + 1); 422 } 423 424 static void perf_event__namespaces_swap(union perf_event *event, 425 bool sample_id_all) 426 { 427 u64 i; 428 429 event->namespaces.pid = bswap_32(event->namespaces.pid); 430 event->namespaces.tid = bswap_32(event->namespaces.tid); 431 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces); 432 433 for (i = 0; i < event->namespaces.nr_namespaces; i++) { 434 struct perf_ns_link_info *ns = &event->namespaces.link_info[i]; 435 436 ns->dev = bswap_64(ns->dev); 437 ns->ino = bswap_64(ns->ino); 438 } 439 440 if (sample_id_all) 441 swap_sample_id_all(event, &event->namespaces.link_info[i]); 442 } 443 444 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all) 445 { 446 event->cgroup.id = bswap_64(event->cgroup.id); 447 448 if (sample_id_all) { 449 void *data = &event->cgroup.path; 450 451 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 452 swap_sample_id_all(event, data); 453 } 454 } 455 456 static u8 revbyte(u8 b) 457 { 458 int rev = (b >> 4) | ((b & 0xf) << 4); 459 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 460 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 461 return (u8) rev; 462 } 463 464 /* 465 * XXX this is hack in attempt to carry flags bitfield 466 * through endian village. ABI says: 467 * 468 * Bit-fields are allocated from right to left (least to most significant) 469 * on little-endian implementations and from left to right (most to least 470 * significant) on big-endian implementations. 471 * 472 * The above seems to be byte specific, so we need to reverse each 473 * byte of the bitfield. 'Internet' also says this might be implementation 474 * specific and we probably need proper fix and carry perf_event_attr 475 * bitfield flags in separate data file FEAT_ section. Thought this seems 476 * to work for now. 477 */ 478 static void swap_bitfield(u8 *p, unsigned len) 479 { 480 unsigned i; 481 482 for (i = 0; i < len; i++) { 483 *p = revbyte(*p); 484 p++; 485 } 486 } 487 488 /* exported for swapping attributes in file header */ 489 void perf_event__attr_swap(struct perf_event_attr *attr) 490 { 491 attr->type = bswap_32(attr->type); 492 attr->size = bswap_32(attr->size); 493 494 #define bswap_safe(f, n) \ 495 (attr->size > (offsetof(struct perf_event_attr, f) + \ 496 sizeof(attr->f) * (n))) 497 #define bswap_field(f, sz) \ 498 do { \ 499 if (bswap_safe(f, 0)) \ 500 attr->f = bswap_##sz(attr->f); \ 501 } while(0) 502 #define bswap_field_16(f) bswap_field(f, 16) 503 #define bswap_field_32(f) bswap_field(f, 32) 504 #define bswap_field_64(f) bswap_field(f, 64) 505 506 bswap_field_64(config); 507 bswap_field_64(sample_period); 508 bswap_field_64(sample_type); 509 bswap_field_64(read_format); 510 bswap_field_32(wakeup_events); 511 bswap_field_32(bp_type); 512 bswap_field_64(bp_addr); 513 bswap_field_64(bp_len); 514 bswap_field_64(branch_sample_type); 515 bswap_field_64(sample_regs_user); 516 bswap_field_32(sample_stack_user); 517 bswap_field_32(aux_watermark); 518 bswap_field_16(sample_max_stack); 519 bswap_field_32(aux_sample_size); 520 521 /* 522 * After read_format are bitfields. Check read_format because 523 * we are unable to use offsetof on bitfield. 524 */ 525 if (bswap_safe(read_format, 1)) 526 swap_bitfield((u8 *) (&attr->read_format + 1), 527 sizeof(u64)); 528 #undef bswap_field_64 529 #undef bswap_field_32 530 #undef bswap_field 531 #undef bswap_safe 532 } 533 534 static void perf_event__hdr_attr_swap(union perf_event *event, 535 bool sample_id_all __maybe_unused) 536 { 537 size_t size; 538 539 perf_event__attr_swap(&event->attr.attr); 540 541 size = event->header.size; 542 size -= perf_record_header_attr_id(event) - (void *)event; 543 mem_bswap_64(perf_record_header_attr_id(event), size); 544 } 545 546 static void perf_event__event_update_swap(union perf_event *event, 547 bool sample_id_all __maybe_unused) 548 { 549 event->event_update.type = bswap_64(event->event_update.type); 550 event->event_update.id = bswap_64(event->event_update.id); 551 } 552 553 static void perf_event__event_type_swap(union perf_event *event, 554 bool sample_id_all __maybe_unused) 555 { 556 event->event_type.event_type.event_id = 557 bswap_64(event->event_type.event_type.event_id); 558 } 559 560 static void perf_event__tracing_data_swap(union perf_event *event, 561 bool sample_id_all __maybe_unused) 562 { 563 event->tracing_data.size = bswap_32(event->tracing_data.size); 564 } 565 566 static void perf_event__auxtrace_info_swap(union perf_event *event, 567 bool sample_id_all __maybe_unused) 568 { 569 size_t size; 570 571 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); 572 573 size = event->header.size; 574 size -= (void *)&event->auxtrace_info.priv - (void *)event; 575 mem_bswap_64(event->auxtrace_info.priv, size); 576 } 577 578 static void perf_event__auxtrace_swap(union perf_event *event, 579 bool sample_id_all __maybe_unused) 580 { 581 event->auxtrace.size = bswap_64(event->auxtrace.size); 582 event->auxtrace.offset = bswap_64(event->auxtrace.offset); 583 event->auxtrace.reference = bswap_64(event->auxtrace.reference); 584 event->auxtrace.idx = bswap_32(event->auxtrace.idx); 585 event->auxtrace.tid = bswap_32(event->auxtrace.tid); 586 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); 587 } 588 589 static void perf_event__auxtrace_error_swap(union perf_event *event, 590 bool sample_id_all __maybe_unused) 591 { 592 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); 593 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); 594 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); 595 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); 596 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); 597 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt); 598 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); 599 if (event->auxtrace_error.fmt) 600 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time); 601 if (event->auxtrace_error.fmt >= 2) { 602 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid); 603 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu); 604 } 605 } 606 607 static void perf_event__thread_map_swap(union perf_event *event, 608 bool sample_id_all __maybe_unused) 609 { 610 unsigned i; 611 612 event->thread_map.nr = bswap_64(event->thread_map.nr); 613 614 for (i = 0; i < event->thread_map.nr; i++) 615 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); 616 } 617 618 static void perf_event__cpu_map_swap(union perf_event *event, 619 bool sample_id_all __maybe_unused) 620 { 621 struct perf_record_cpu_map_data *data = &event->cpu_map.data; 622 623 data->type = bswap_16(data->type); 624 625 switch (data->type) { 626 case PERF_CPU_MAP__CPUS: 627 data->cpus_data.nr = bswap_16(data->cpus_data.nr); 628 629 for (unsigned i = 0; i < data->cpus_data.nr; i++) 630 data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]); 631 break; 632 case PERF_CPU_MAP__MASK: 633 data->mask32_data.long_size = bswap_16(data->mask32_data.long_size); 634 635 switch (data->mask32_data.long_size) { 636 case 4: 637 data->mask32_data.nr = bswap_16(data->mask32_data.nr); 638 for (unsigned i = 0; i < data->mask32_data.nr; i++) 639 data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]); 640 break; 641 case 8: 642 data->mask64_data.nr = bswap_16(data->mask64_data.nr); 643 for (unsigned i = 0; i < data->mask64_data.nr; i++) 644 data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]); 645 break; 646 default: 647 pr_err("cpu_map swap: unsupported long size\n"); 648 } 649 break; 650 case PERF_CPU_MAP__RANGE_CPUS: 651 data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu); 652 data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu); 653 break; 654 default: 655 break; 656 } 657 } 658 659 static void perf_event__stat_config_swap(union perf_event *event, 660 bool sample_id_all __maybe_unused) 661 { 662 u64 size; 663 664 size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]); 665 size += 1; /* nr item itself */ 666 mem_bswap_64(&event->stat_config.nr, size); 667 } 668 669 static void perf_event__stat_swap(union perf_event *event, 670 bool sample_id_all __maybe_unused) 671 { 672 event->stat.id = bswap_64(event->stat.id); 673 event->stat.thread = bswap_32(event->stat.thread); 674 event->stat.cpu = bswap_32(event->stat.cpu); 675 event->stat.val = bswap_64(event->stat.val); 676 event->stat.ena = bswap_64(event->stat.ena); 677 event->stat.run = bswap_64(event->stat.run); 678 } 679 680 static void perf_event__stat_round_swap(union perf_event *event, 681 bool sample_id_all __maybe_unused) 682 { 683 event->stat_round.type = bswap_64(event->stat_round.type); 684 event->stat_round.time = bswap_64(event->stat_round.time); 685 } 686 687 static void perf_event__time_conv_swap(union perf_event *event, 688 bool sample_id_all __maybe_unused) 689 { 690 event->time_conv.time_shift = bswap_64(event->time_conv.time_shift); 691 event->time_conv.time_mult = bswap_64(event->time_conv.time_mult); 692 event->time_conv.time_zero = bswap_64(event->time_conv.time_zero); 693 694 if (event_contains(event->time_conv, time_cycles)) { 695 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles); 696 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask); 697 } 698 } 699 700 typedef void (*perf_event__swap_op)(union perf_event *event, 701 bool sample_id_all); 702 703 static perf_event__swap_op perf_event__swap_ops[] = { 704 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 705 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 706 [PERF_RECORD_COMM] = perf_event__comm_swap, 707 [PERF_RECORD_FORK] = perf_event__task_swap, 708 [PERF_RECORD_EXIT] = perf_event__task_swap, 709 [PERF_RECORD_LOST] = perf_event__all64_swap, 710 [PERF_RECORD_READ] = perf_event__read_swap, 711 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 712 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 713 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 714 [PERF_RECORD_AUX] = perf_event__aux_swap, 715 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap, 716 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, 717 [PERF_RECORD_SWITCH] = perf_event__switch_swap, 718 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, 719 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, 720 [PERF_RECORD_CGROUP] = perf_event__cgroup_swap, 721 [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap, 722 [PERF_RECORD_AUX_OUTPUT_HW_ID] = perf_event__all64_swap, 723 [PERF_RECORD_CALLCHAIN_DEFERRED] = perf_event__all64_swap, 724 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 725 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 726 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 727 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 728 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, 729 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap, 730 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap, 731 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap, 732 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap, 733 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap, 734 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap, 735 [PERF_RECORD_STAT] = perf_event__stat_swap, 736 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap, 737 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap, 738 [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap, 739 [PERF_RECORD_HEADER_MAX] = NULL, 740 }; 741 742 /* 743 * When perf record finishes a pass on every buffers, it records this pseudo 744 * event. 745 * We record the max timestamp t found in the pass n. 746 * Assuming these timestamps are monotonic across cpus, we know that if 747 * a buffer still has events with timestamps below t, they will be all 748 * available and then read in the pass n + 1. 749 * Hence when we start to read the pass n + 2, we can safely flush every 750 * events with timestamps below t. 751 * 752 * ============ PASS n ================= 753 * CPU 0 | CPU 1 754 * | 755 * cnt1 timestamps | cnt2 timestamps 756 * 1 | 2 757 * 2 | 3 758 * - | 4 <--- max recorded 759 * 760 * ============ PASS n + 1 ============== 761 * CPU 0 | CPU 1 762 * | 763 * cnt1 timestamps | cnt2 timestamps 764 * 3 | 5 765 * 4 | 6 766 * 5 | 7 <---- max recorded 767 * 768 * Flush every events below timestamp 4 769 * 770 * ============ PASS n + 2 ============== 771 * CPU 0 | CPU 1 772 * | 773 * cnt1 timestamps | cnt2 timestamps 774 * 6 | 8 775 * 7 | 9 776 * - | 10 777 * 778 * Flush every events below timestamp 7 779 * etc... 780 */ 781 int perf_event__process_finished_round(const struct perf_tool *tool __maybe_unused, 782 union perf_event *event __maybe_unused, 783 struct ordered_events *oe) 784 { 785 if (dump_trace) 786 fprintf(stdout, "\n"); 787 return ordered_events__flush(oe, OE_FLUSH__ROUND); 788 } 789 790 int perf_session__queue_event(struct perf_session *s, union perf_event *event, 791 u64 timestamp, u64 file_offset, const char *file_path) 792 { 793 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path); 794 } 795 796 static void callchain__lbr_callstack_printf(struct perf_sample *sample) 797 { 798 struct ip_callchain *callchain = sample->callchain; 799 struct branch_stack *lbr_stack = sample->branch_stack; 800 struct branch_entry *entries = perf_sample__branch_entries(sample); 801 u64 kernel_callchain_nr = callchain->nr; 802 unsigned int i; 803 804 for (i = 0; i < kernel_callchain_nr; i++) { 805 if (callchain->ips[i] == PERF_CONTEXT_USER) 806 break; 807 } 808 809 if ((i != kernel_callchain_nr) && lbr_stack->nr) { 810 u64 total_nr; 811 /* 812 * LBR callstack can only get user call chain, 813 * i is kernel call chain number, 814 * 1 is PERF_CONTEXT_USER. 815 * 816 * The user call chain is stored in LBR registers. 817 * LBR are pair registers. The caller is stored 818 * in "from" register, while the callee is stored 819 * in "to" register. 820 * For example, there is a call stack 821 * "A"->"B"->"C"->"D". 822 * The LBR registers will be recorded like 823 * "C"->"D", "B"->"C", "A"->"B". 824 * So only the first "to" register and all "from" 825 * registers are needed to construct the whole stack. 826 */ 827 total_nr = i + 1 + lbr_stack->nr + 1; 828 kernel_callchain_nr = i + 1; 829 830 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr); 831 832 for (i = 0; i < kernel_callchain_nr; i++) 833 printf("..... %2d: %016" PRIx64 "\n", 834 i, callchain->ips[i]); 835 836 printf("..... %2d: %016" PRIx64 "\n", 837 (int)(kernel_callchain_nr), entries[0].to); 838 for (i = 0; i < lbr_stack->nr; i++) 839 printf("..... %2d: %016" PRIx64 "\n", 840 (int)(i + kernel_callchain_nr + 1), entries[i].from); 841 } 842 } 843 844 static void callchain__printf(struct evsel *evsel, 845 struct perf_sample *sample) 846 { 847 unsigned int i; 848 struct ip_callchain *callchain = sample->callchain; 849 850 if (evsel__has_branch_callstack(evsel)) 851 callchain__lbr_callstack_printf(sample); 852 853 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); 854 855 for (i = 0; i < callchain->nr; i++) 856 printf("..... %2d: %016" PRIx64 "\n", 857 i, callchain->ips[i]); 858 859 if (sample->deferred_callchain) 860 printf("...... (deferred)\n"); 861 } 862 863 static void branch_stack__printf(struct perf_sample *sample, 864 struct evsel *evsel) 865 { 866 struct branch_entry *entries = perf_sample__branch_entries(sample); 867 bool callstack = evsel__has_branch_callstack(evsel); 868 u64 *branch_stack_cntr = sample->branch_stack_cntr; 869 uint64_t i; 870 871 if (!callstack) { 872 printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr); 873 } else { 874 /* the reason of adding 1 to nr is because after expanding 875 * branch stack it generates nr + 1 callstack records. e.g., 876 * B()->C() 877 * A()->B() 878 * the final callstack should be: 879 * C() 880 * B() 881 * A() 882 */ 883 printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1); 884 } 885 886 for (i = 0; i < sample->branch_stack->nr; i++) { 887 struct branch_entry *e = &entries[i]; 888 889 if (!callstack) { 890 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s %s\n", 891 i, e->from, e->to, 892 (unsigned short)e->flags.cycles, 893 e->flags.mispred ? "M" : " ", 894 e->flags.predicted ? "P" : " ", 895 e->flags.abort ? "A" : " ", 896 e->flags.in_tx ? "T" : " ", 897 (unsigned)e->flags.reserved, 898 get_branch_type(e), 899 e->flags.spec ? branch_spec_desc(e->flags.spec) : ""); 900 } else { 901 if (i == 0) { 902 printf("..... %2"PRIu64": %016" PRIx64 "\n" 903 "..... %2"PRIu64": %016" PRIx64 "\n", 904 i, e->to, i+1, e->from); 905 } else { 906 printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from); 907 } 908 } 909 } 910 911 if (branch_stack_cntr) { 912 unsigned int br_cntr_width, br_cntr_nr; 913 914 perf_env__find_br_cntr_info(evsel__env(evsel), &br_cntr_nr, &br_cntr_width); 915 printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n", 916 sample->branch_stack->nr, br_cntr_width, br_cntr_nr); 917 for (i = 0; i < sample->branch_stack->nr; i++) 918 printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]); 919 } 920 } 921 922 static void regs_dump__printf(u64 mask, u64 *regs, const char *arch) 923 { 924 unsigned rid, i = 0; 925 926 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 927 u64 val = regs[i++]; 928 929 printf(".... %-5s 0x%016" PRIx64 "\n", 930 perf_reg_name(rid, arch), val); 931 } 932 } 933 934 static const char *regs_abi[] = { 935 [PERF_SAMPLE_REGS_ABI_NONE] = "none", 936 [PERF_SAMPLE_REGS_ABI_32] = "32-bit", 937 [PERF_SAMPLE_REGS_ABI_64] = "64-bit", 938 }; 939 940 static inline const char *regs_dump_abi(struct regs_dump *d) 941 { 942 if (d->abi > PERF_SAMPLE_REGS_ABI_64) 943 return "unknown"; 944 945 return regs_abi[d->abi]; 946 } 947 948 static void regs__printf(const char *type, struct regs_dump *regs, const char *arch) 949 { 950 u64 mask = regs->mask; 951 952 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n", 953 type, 954 mask, 955 regs_dump_abi(regs)); 956 957 regs_dump__printf(mask, regs->regs, arch); 958 } 959 960 static void regs_user__printf(struct perf_sample *sample, const char *arch) 961 { 962 struct regs_dump *user_regs; 963 964 if (!sample->user_regs) 965 return; 966 967 user_regs = perf_sample__user_regs(sample); 968 969 if (user_regs->regs) 970 regs__printf("user", user_regs, arch); 971 } 972 973 static void regs_intr__printf(struct perf_sample *sample, const char *arch) 974 { 975 struct regs_dump *intr_regs; 976 977 if (!sample->intr_regs) 978 return; 979 980 intr_regs = perf_sample__intr_regs(sample); 981 982 if (intr_regs->regs) 983 regs__printf("intr", intr_regs, arch); 984 } 985 986 static void stack_user__printf(struct stack_dump *dump) 987 { 988 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 989 dump->size, dump->offset); 990 } 991 992 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) 993 { 994 u64 sample_type = __evlist__combined_sample_type(evlist); 995 996 if (event->header.type != PERF_RECORD_SAMPLE && 997 !evlist__sample_id_all(evlist)) { 998 fputs("-1 -1 ", stdout); 999 return; 1000 } 1001 1002 if ((sample_type & PERF_SAMPLE_CPU)) 1003 printf("%u ", sample->cpu); 1004 1005 if (sample_type & PERF_SAMPLE_TIME) 1006 printf("%" PRIu64 " ", sample->time); 1007 } 1008 1009 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 1010 { 1011 printf("... sample_read:\n"); 1012 1013 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1014 printf("...... time enabled %016" PRIx64 "\n", 1015 sample->read.time_enabled); 1016 1017 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1018 printf("...... time running %016" PRIx64 "\n", 1019 sample->read.time_running); 1020 1021 if (read_format & PERF_FORMAT_GROUP) { 1022 struct sample_read_value *value = sample->read.group.values; 1023 1024 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 1025 1026 sample_read_group__for_each(value, sample->read.group.nr, read_format) { 1027 printf("..... id %016" PRIx64 1028 ", value %016" PRIx64, 1029 value->id, value->value); 1030 if (read_format & PERF_FORMAT_LOST) 1031 printf(", lost %" PRIu64, value->lost); 1032 printf("\n"); 1033 } 1034 } else { 1035 printf("..... id %016" PRIx64 ", value %016" PRIx64, 1036 sample->read.one.id, sample->read.one.value); 1037 if (read_format & PERF_FORMAT_LOST) 1038 printf(", lost %" PRIu64, sample->read.one.lost); 1039 printf("\n"); 1040 } 1041 } 1042 1043 static void dump_event(struct evlist *evlist, union perf_event *event, 1044 u64 file_offset, struct perf_sample *sample, 1045 const char *file_path) 1046 { 1047 if (!dump_trace) 1048 return; 1049 1050 printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n", 1051 file_offset, file_path, event->header.size, event->header.type); 1052 1053 trace_event(event); 1054 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) 1055 evlist->trace_event_sample_raw(evlist, event, sample); 1056 1057 if (sample) 1058 evlist__print_tstamp(evlist, event, sample); 1059 1060 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 1061 event->header.size, perf_event__name(event->header.type)); 1062 } 1063 1064 char *get_page_size_name(u64 size, char *str) 1065 { 1066 if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size)) 1067 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A"); 1068 1069 return str; 1070 } 1071 1072 static void dump_sample(struct evsel *evsel, union perf_event *event, 1073 struct perf_sample *sample, const char *arch) 1074 { 1075 u64 sample_type; 1076 char str[PAGE_SIZE_NAME_LEN]; 1077 1078 if (!dump_trace) 1079 return; 1080 1081 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 1082 event->header.misc, sample->pid, sample->tid, sample->ip, 1083 sample->period, sample->addr); 1084 1085 sample_type = evsel->core.attr.sample_type; 1086 1087 if (evsel__has_callchain(evsel)) 1088 callchain__printf(evsel, sample); 1089 1090 if (evsel__has_br_stack(evsel)) 1091 branch_stack__printf(sample, evsel); 1092 1093 if (sample_type & PERF_SAMPLE_REGS_USER) 1094 regs_user__printf(sample, arch); 1095 1096 if (sample_type & PERF_SAMPLE_REGS_INTR) 1097 regs_intr__printf(sample, arch); 1098 1099 if (sample_type & PERF_SAMPLE_STACK_USER) 1100 stack_user__printf(&sample->user_stack); 1101 1102 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { 1103 printf("... weight: %" PRIu64 "", sample->weight); 1104 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) { 1105 printf(",0x%"PRIx16"", sample->ins_lat); 1106 printf(",0x%"PRIx16"", sample->weight3); 1107 } 1108 printf("\n"); 1109 } 1110 1111 if (sample_type & PERF_SAMPLE_DATA_SRC) 1112 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 1113 1114 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 1115 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr); 1116 1117 if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE) 1118 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str)); 1119 1120 if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE) 1121 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str)); 1122 1123 if (sample_type & PERF_SAMPLE_TRANSACTION) 1124 printf("... transaction: %" PRIx64 "\n", sample->transaction); 1125 1126 if (sample_type & PERF_SAMPLE_READ) 1127 sample_read__printf(sample, evsel->core.attr.read_format); 1128 } 1129 1130 static void dump_deferred_callchain(struct evsel *evsel, union perf_event *event, 1131 struct perf_sample *sample) 1132 { 1133 if (!dump_trace) 1134 return; 1135 1136 printf("(IP, 0x%x): %d/%d: %#" PRIx64 "\n", 1137 event->header.misc, sample->pid, sample->tid, sample->deferred_cookie); 1138 1139 if (evsel__has_callchain(evsel)) 1140 callchain__printf(evsel, sample); 1141 } 1142 1143 static void dump_read(struct evsel *evsel, union perf_event *event) 1144 { 1145 struct perf_record_read *read_event = &event->read; 1146 u64 read_format; 1147 1148 if (!dump_trace) 1149 return; 1150 1151 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid, 1152 evsel__name(evsel), event->read.value); 1153 1154 if (!evsel) 1155 return; 1156 1157 read_format = evsel->core.attr.read_format; 1158 1159 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1160 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled); 1161 1162 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1163 printf("... time running : %" PRI_lu64 "\n", read_event->time_running); 1164 1165 if (read_format & PERF_FORMAT_ID) 1166 printf("... id : %" PRI_lu64 "\n", read_event->id); 1167 1168 if (read_format & PERF_FORMAT_LOST) 1169 printf("... lost : %" PRI_lu64 "\n", read_event->lost); 1170 } 1171 1172 static struct machine *machines__find_for_cpumode(struct machines *machines, 1173 union perf_event *event, 1174 struct perf_sample *sample) 1175 { 1176 if (perf_guest && 1177 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 1178 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) { 1179 u32 pid; 1180 1181 if (sample->machine_pid) 1182 pid = sample->machine_pid; 1183 else if (event->header.type == PERF_RECORD_MMAP 1184 || event->header.type == PERF_RECORD_MMAP2) 1185 pid = event->mmap.pid; 1186 else 1187 pid = sample->pid; 1188 1189 /* 1190 * Guest code machine is created as needed and does not use 1191 * DEFAULT_GUEST_KERNEL_ID. 1192 */ 1193 if (symbol_conf.guest_code) 1194 return machines__findnew(machines, pid); 1195 1196 return machines__find_guest(machines, pid); 1197 } 1198 1199 return &machines->host; 1200 } 1201 1202 static int deliver_sample_value(struct evlist *evlist, 1203 const struct perf_tool *tool, 1204 union perf_event *event, 1205 struct perf_sample *sample, 1206 struct sample_read_value *v, 1207 struct machine *machine, 1208 bool per_thread) 1209 { 1210 struct perf_sample_id *sid = evlist__id2sid(evlist, v->id); 1211 struct evsel *evsel; 1212 u64 *storage = NULL; 1213 1214 if (sid) { 1215 storage = perf_sample_id__get_period_storage(sid, sample->tid, per_thread); 1216 } 1217 1218 if (storage) { 1219 sample->id = v->id; 1220 sample->period = v->value - *storage; 1221 *storage = v->value; 1222 } 1223 1224 if (!storage || sid->evsel == NULL) { 1225 ++evlist->stats.nr_unknown_id; 1226 return 0; 1227 } 1228 1229 /* 1230 * There's no reason to deliver sample 1231 * for zero period, bail out. 1232 */ 1233 if (!sample->period) 1234 return 0; 1235 1236 evsel = container_of(sid->evsel, struct evsel, core); 1237 return tool->sample(tool, event, sample, evsel, machine); 1238 } 1239 1240 static int deliver_sample_group(struct evlist *evlist, 1241 const struct perf_tool *tool, 1242 union perf_event *event, 1243 struct perf_sample *sample, 1244 struct machine *machine, 1245 u64 read_format, 1246 bool per_thread) 1247 { 1248 int ret = -EINVAL; 1249 struct sample_read_value *v = sample->read.group.values; 1250 1251 if (tool->dont_split_sample_group) 1252 return deliver_sample_value(evlist, tool, event, sample, v, machine, 1253 per_thread); 1254 1255 sample_read_group__for_each(v, sample->read.group.nr, read_format) { 1256 ret = deliver_sample_value(evlist, tool, event, sample, v, 1257 machine, per_thread); 1258 if (ret) 1259 break; 1260 } 1261 1262 return ret; 1263 } 1264 1265 static int evlist__deliver_sample(struct evlist *evlist, const struct perf_tool *tool, 1266 union perf_event *event, struct perf_sample *sample, 1267 struct evsel *evsel, struct machine *machine) 1268 { 1269 /* We know evsel != NULL. */ 1270 u64 sample_type = evsel->core.attr.sample_type; 1271 u64 read_format = evsel->core.attr.read_format; 1272 bool per_thread = perf_evsel__attr_has_per_thread_sample_period(&evsel->core); 1273 1274 /* Standard sample delivery. */ 1275 if (!(sample_type & PERF_SAMPLE_READ)) 1276 return tool->sample(tool, event, sample, evsel, machine); 1277 1278 /* For PERF_SAMPLE_READ we have either single or group mode. */ 1279 if (read_format & PERF_FORMAT_GROUP) 1280 return deliver_sample_group(evlist, tool, event, sample, 1281 machine, read_format, per_thread); 1282 else 1283 return deliver_sample_value(evlist, tool, event, sample, 1284 &sample->read.one, machine, 1285 per_thread); 1286 } 1287 1288 static int machines__deliver_event(struct machines *machines, 1289 struct evlist *evlist, 1290 union perf_event *event, 1291 struct perf_sample *sample, 1292 const struct perf_tool *tool, u64 file_offset, 1293 const char *file_path) 1294 { 1295 struct evsel *evsel; 1296 struct machine *machine; 1297 1298 dump_event(evlist, event, file_offset, sample, file_path); 1299 1300 evsel = evlist__id2evsel(evlist, sample->id); 1301 1302 machine = machines__find_for_cpumode(machines, event, sample); 1303 1304 switch (event->header.type) { 1305 case PERF_RECORD_SAMPLE: 1306 if (evsel == NULL) { 1307 ++evlist->stats.nr_unknown_id; 1308 return 0; 1309 } 1310 if (machine == NULL) { 1311 ++evlist->stats.nr_unprocessable_samples; 1312 dump_sample(evsel, event, sample, perf_env__arch(NULL)); 1313 return 0; 1314 } 1315 dump_sample(evsel, event, sample, perf_env__arch(machine->env)); 1316 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); 1317 case PERF_RECORD_MMAP: 1318 return tool->mmap(tool, event, sample, machine); 1319 case PERF_RECORD_MMAP2: 1320 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) 1321 ++evlist->stats.nr_proc_map_timeout; 1322 return tool->mmap2(tool, event, sample, machine); 1323 case PERF_RECORD_COMM: 1324 return tool->comm(tool, event, sample, machine); 1325 case PERF_RECORD_NAMESPACES: 1326 return tool->namespaces(tool, event, sample, machine); 1327 case PERF_RECORD_CGROUP: 1328 return tool->cgroup(tool, event, sample, machine); 1329 case PERF_RECORD_FORK: 1330 return tool->fork(tool, event, sample, machine); 1331 case PERF_RECORD_EXIT: 1332 return tool->exit(tool, event, sample, machine); 1333 case PERF_RECORD_LOST: 1334 if (tool->lost == perf_event__process_lost) 1335 evlist->stats.total_lost += event->lost.lost; 1336 return tool->lost(tool, event, sample, machine); 1337 case PERF_RECORD_LOST_SAMPLES: 1338 if (event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF) 1339 evlist->stats.total_dropped_samples += event->lost_samples.lost; 1340 else if (tool->lost_samples == perf_event__process_lost_samples) 1341 evlist->stats.total_lost_samples += event->lost_samples.lost; 1342 return tool->lost_samples(tool, event, sample, machine); 1343 case PERF_RECORD_READ: 1344 dump_read(evsel, event); 1345 return tool->read(tool, event, sample, evsel, machine); 1346 case PERF_RECORD_THROTTLE: 1347 return tool->throttle(tool, event, sample, machine); 1348 case PERF_RECORD_UNTHROTTLE: 1349 return tool->unthrottle(tool, event, sample, machine); 1350 case PERF_RECORD_AUX: 1351 if (tool->aux == perf_event__process_aux) { 1352 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) 1353 evlist->stats.total_aux_lost += 1; 1354 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) 1355 evlist->stats.total_aux_partial += 1; 1356 if (event->aux.flags & PERF_AUX_FLAG_COLLISION) 1357 evlist->stats.total_aux_collision += 1; 1358 } 1359 return tool->aux(tool, event, sample, machine); 1360 case PERF_RECORD_ITRACE_START: 1361 return tool->itrace_start(tool, event, sample, machine); 1362 case PERF_RECORD_SWITCH: 1363 case PERF_RECORD_SWITCH_CPU_WIDE: 1364 return tool->context_switch(tool, event, sample, machine); 1365 case PERF_RECORD_KSYMBOL: 1366 return tool->ksymbol(tool, event, sample, machine); 1367 case PERF_RECORD_BPF_EVENT: 1368 return tool->bpf(tool, event, sample, machine); 1369 case PERF_RECORD_TEXT_POKE: 1370 return tool->text_poke(tool, event, sample, machine); 1371 case PERF_RECORD_AUX_OUTPUT_HW_ID: 1372 return tool->aux_output_hw_id(tool, event, sample, machine); 1373 case PERF_RECORD_CALLCHAIN_DEFERRED: 1374 dump_deferred_callchain(evsel, event, sample); 1375 return tool->callchain_deferred(tool, event, sample, evsel, machine); 1376 default: 1377 ++evlist->stats.nr_unknown_events; 1378 return -1; 1379 } 1380 } 1381 1382 static int perf_session__deliver_event(struct perf_session *session, 1383 union perf_event *event, 1384 const struct perf_tool *tool, 1385 u64 file_offset, 1386 const char *file_path) 1387 { 1388 struct perf_sample sample; 1389 int ret; 1390 1391 perf_sample__init(&sample, /*all=*/false); 1392 ret = evlist__parse_sample(session->evlist, event, &sample); 1393 if (ret) { 1394 pr_err("Can't parse sample, err = %d\n", ret); 1395 goto out; 1396 } 1397 1398 ret = auxtrace__process_event(session, event, &sample, tool); 1399 if (ret < 0) 1400 goto out; 1401 if (ret > 0) { 1402 ret = 0; 1403 goto out; 1404 } 1405 1406 ret = machines__deliver_event(&session->machines, session->evlist, 1407 event, &sample, tool, file_offset, file_path); 1408 1409 if (dump_trace && sample.aux_sample.size) 1410 auxtrace__dump_auxtrace_sample(session, &sample); 1411 out: 1412 perf_sample__exit(&sample); 1413 return ret; 1414 } 1415 1416 static s64 perf_session__process_user_event(struct perf_session *session, 1417 union perf_event *event, 1418 u64 file_offset, 1419 const char *file_path) 1420 { 1421 struct ordered_events *oe = &session->ordered_events; 1422 const struct perf_tool *tool = session->tool; 1423 struct perf_sample sample; 1424 int fd = perf_data__fd(session->data); 1425 s64 err; 1426 1427 perf_sample__init(&sample, /*all=*/true); 1428 if ((event->header.type != PERF_RECORD_COMPRESSED && 1429 event->header.type != PERF_RECORD_COMPRESSED2) || 1430 perf_tool__compressed_is_stub(tool)) 1431 dump_event(session->evlist, event, file_offset, &sample, file_path); 1432 1433 /* These events are processed right away */ 1434 switch (event->header.type) { 1435 case PERF_RECORD_HEADER_ATTR: 1436 err = tool->attr(tool, event, &session->evlist); 1437 if (err == 0) { 1438 perf_session__set_id_hdr_size(session); 1439 perf_session__set_comm_exec(session); 1440 } 1441 break; 1442 case PERF_RECORD_EVENT_UPDATE: 1443 err = tool->event_update(tool, event, &session->evlist); 1444 break; 1445 case PERF_RECORD_HEADER_EVENT_TYPE: 1446 /* 1447 * Deprecated, but we need to handle it for sake 1448 * of old data files create in pipe mode. 1449 */ 1450 err = 0; 1451 break; 1452 case PERF_RECORD_HEADER_TRACING_DATA: 1453 /* 1454 * Setup for reading amidst mmap, but only when we 1455 * are in 'file' mode. The 'pipe' fd is in proper 1456 * place already. 1457 */ 1458 if (!perf_data__is_pipe(session->data)) 1459 lseek(fd, file_offset, SEEK_SET); 1460 err = tool->tracing_data(tool, session, event); 1461 break; 1462 case PERF_RECORD_HEADER_BUILD_ID: 1463 err = tool->build_id(tool, session, event); 1464 break; 1465 case PERF_RECORD_FINISHED_ROUND: 1466 err = tool->finished_round(tool, event, oe); 1467 break; 1468 case PERF_RECORD_ID_INDEX: 1469 err = tool->id_index(tool, session, event); 1470 break; 1471 case PERF_RECORD_AUXTRACE_INFO: 1472 err = tool->auxtrace_info(tool, session, event); 1473 break; 1474 case PERF_RECORD_AUXTRACE: 1475 /* 1476 * Setup for reading amidst mmap, but only when we 1477 * are in 'file' mode. The 'pipe' fd is in proper 1478 * place already. 1479 */ 1480 if (!perf_data__is_pipe(session->data)) 1481 lseek(fd, file_offset + event->header.size, SEEK_SET); 1482 err = tool->auxtrace(tool, session, event); 1483 break; 1484 case PERF_RECORD_AUXTRACE_ERROR: 1485 perf_session__auxtrace_error_inc(session, event); 1486 err = tool->auxtrace_error(tool, session, event); 1487 break; 1488 case PERF_RECORD_THREAD_MAP: 1489 err = tool->thread_map(tool, session, event); 1490 break; 1491 case PERF_RECORD_CPU_MAP: 1492 err = tool->cpu_map(tool, session, event); 1493 break; 1494 case PERF_RECORD_STAT_CONFIG: 1495 err = tool->stat_config(tool, session, event); 1496 break; 1497 case PERF_RECORD_STAT: 1498 err = tool->stat(tool, session, event); 1499 break; 1500 case PERF_RECORD_STAT_ROUND: 1501 err = tool->stat_round(tool, session, event); 1502 break; 1503 case PERF_RECORD_TIME_CONV: 1504 session->time_conv = event->time_conv; 1505 err = tool->time_conv(tool, session, event); 1506 break; 1507 case PERF_RECORD_HEADER_FEATURE: 1508 err = tool->feature(tool, session, event); 1509 break; 1510 case PERF_RECORD_COMPRESSED: 1511 case PERF_RECORD_COMPRESSED2: 1512 err = tool->compressed(tool, session, event, file_offset, file_path); 1513 if (err) 1514 dump_event(session->evlist, event, file_offset, &sample, file_path); 1515 break; 1516 case PERF_RECORD_FINISHED_INIT: 1517 err = tool->finished_init(tool, session, event); 1518 break; 1519 case PERF_RECORD_BPF_METADATA: 1520 err = tool->bpf_metadata(tool, session, event); 1521 break; 1522 default: 1523 err = -EINVAL; 1524 break; 1525 } 1526 perf_sample__exit(&sample); 1527 return err; 1528 } 1529 1530 int perf_session__deliver_synth_event(struct perf_session *session, 1531 union perf_event *event, 1532 struct perf_sample *sample) 1533 { 1534 struct evlist *evlist = session->evlist; 1535 const struct perf_tool *tool = session->tool; 1536 1537 events_stats__inc(&evlist->stats, event->header.type); 1538 1539 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1540 return perf_session__process_user_event(session, event, 0, NULL); 1541 1542 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL); 1543 } 1544 1545 int perf_session__deliver_synth_attr_event(struct perf_session *session, 1546 const struct perf_event_attr *attr, 1547 u64 id) 1548 { 1549 union { 1550 struct { 1551 struct perf_record_header_attr attr; 1552 u64 ids[1]; 1553 } attr_id; 1554 union perf_event ev; 1555 } ev = { 1556 .attr_id.attr.header.type = PERF_RECORD_HEADER_ATTR, 1557 .attr_id.attr.header.size = sizeof(ev.attr_id), 1558 .attr_id.ids[0] = id, 1559 }; 1560 1561 if (attr->size != sizeof(ev.attr_id.attr.attr)) { 1562 pr_debug("Unexpected perf_event_attr size\n"); 1563 return -EINVAL; 1564 } 1565 ev.attr_id.attr.attr = *attr; 1566 return perf_session__deliver_synth_event(session, &ev.ev, NULL); 1567 } 1568 1569 static void event_swap(union perf_event *event, bool sample_id_all) 1570 { 1571 perf_event__swap_op swap; 1572 1573 swap = perf_event__swap_ops[event->header.type]; 1574 if (swap) 1575 swap(event, sample_id_all); 1576 } 1577 1578 int perf_session__peek_event(struct perf_session *session, off_t file_offset, 1579 void *buf, size_t buf_sz, 1580 union perf_event **event_ptr, 1581 struct perf_sample *sample) 1582 { 1583 union perf_event *event; 1584 size_t hdr_sz, rest; 1585 int fd; 1586 1587 if (session->one_mmap && !session->header.needs_swap) { 1588 event = file_offset - session->one_mmap_offset + 1589 session->one_mmap_addr; 1590 goto out_parse_sample; 1591 } 1592 1593 if (perf_data__is_pipe(session->data)) 1594 return -1; 1595 1596 fd = perf_data__fd(session->data); 1597 hdr_sz = sizeof(struct perf_event_header); 1598 1599 if (buf_sz < hdr_sz) 1600 return -1; 1601 1602 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || 1603 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz) 1604 return -1; 1605 1606 event = (union perf_event *)buf; 1607 1608 if (session->header.needs_swap) 1609 perf_event_header__bswap(&event->header); 1610 1611 if (event->header.size < hdr_sz || event->header.size > buf_sz) 1612 return -1; 1613 1614 buf += hdr_sz; 1615 rest = event->header.size - hdr_sz; 1616 1617 if (readn(fd, buf, rest) != (ssize_t)rest) 1618 return -1; 1619 1620 if (session->header.needs_swap) 1621 event_swap(event, evlist__sample_id_all(session->evlist)); 1622 1623 out_parse_sample: 1624 1625 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && 1626 evlist__parse_sample(session->evlist, event, sample)) 1627 return -1; 1628 1629 *event_ptr = event; 1630 1631 return 0; 1632 } 1633 1634 int perf_session__peek_events(struct perf_session *session, u64 offset, 1635 u64 size, peek_events_cb_t cb, void *data) 1636 { 1637 u64 max_offset = offset + size; 1638 char buf[PERF_SAMPLE_MAX_SIZE]; 1639 union perf_event *event; 1640 int err; 1641 1642 do { 1643 err = perf_session__peek_event(session, offset, buf, 1644 PERF_SAMPLE_MAX_SIZE, &event, 1645 NULL); 1646 if (err) 1647 return err; 1648 1649 err = cb(session, event, offset, data); 1650 if (err) 1651 return err; 1652 1653 offset += event->header.size; 1654 if (event->header.type == PERF_RECORD_AUXTRACE) 1655 offset += event->auxtrace.size; 1656 1657 } while (offset < max_offset); 1658 1659 return err; 1660 } 1661 1662 static s64 perf_session__process_event(struct perf_session *session, 1663 union perf_event *event, u64 file_offset, 1664 const char *file_path) 1665 { 1666 struct evlist *evlist = session->evlist; 1667 const struct perf_tool *tool = session->tool; 1668 int ret; 1669 1670 if (session->header.needs_swap) 1671 event_swap(event, evlist__sample_id_all(evlist)); 1672 1673 if (event->header.type >= PERF_RECORD_HEADER_MAX) { 1674 /* perf should not support unaligned event, stop here. */ 1675 if (event->header.size % sizeof(u64)) 1676 return -EINVAL; 1677 1678 /* This perf is outdated and does not support the latest event type. */ 1679 ui__warning("Unsupported header type %u, please consider updating perf.\n", 1680 event->header.type); 1681 /* Skip unsupported event by returning its size. */ 1682 return event->header.size; 1683 } 1684 1685 events_stats__inc(&evlist->stats, event->header.type); 1686 1687 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1688 return perf_session__process_user_event(session, event, file_offset, file_path); 1689 1690 if (tool->ordered_events) { 1691 u64 timestamp = -1ULL; 1692 1693 ret = evlist__parse_sample_timestamp(evlist, event, ×tamp); 1694 if (ret && ret != -1) 1695 return ret; 1696 1697 ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path); 1698 if (ret != -ETIME) 1699 return ret; 1700 } 1701 1702 return perf_session__deliver_event(session, event, tool, file_offset, file_path); 1703 } 1704 1705 void perf_event_header__bswap(struct perf_event_header *hdr) 1706 { 1707 hdr->type = bswap_32(hdr->type); 1708 hdr->misc = bswap_16(hdr->misc); 1709 hdr->size = bswap_16(hdr->size); 1710 } 1711 1712 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1713 { 1714 return machine__findnew_thread(&session->machines.host, -1, pid); 1715 } 1716 1717 int perf_session__register_idle_thread(struct perf_session *session) 1718 { 1719 struct thread *thread = machine__idle_thread(&session->machines.host); 1720 1721 /* machine__idle_thread() got the thread, so put it */ 1722 thread__put(thread); 1723 return thread ? 0 : -1; 1724 } 1725 1726 static void 1727 perf_session__warn_order(const struct perf_session *session) 1728 { 1729 const struct ordered_events *oe = &session->ordered_events; 1730 struct evsel *evsel; 1731 bool should_warn = true; 1732 1733 evlist__for_each_entry(session->evlist, evsel) { 1734 if (evsel->core.attr.write_backward) 1735 should_warn = false; 1736 } 1737 1738 if (!should_warn) 1739 return; 1740 if (oe->nr_unordered_events != 0) 1741 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); 1742 } 1743 1744 static void perf_session__warn_about_errors(const struct perf_session *session) 1745 { 1746 const struct events_stats *stats = &session->evlist->stats; 1747 1748 if (session->tool->lost == perf_event__process_lost && 1749 stats->nr_events[PERF_RECORD_LOST] != 0) { 1750 ui__warning("Processed %d events and lost %d chunks!\n\n" 1751 "Check IO/CPU overload!\n\n", 1752 stats->nr_events[0], 1753 stats->nr_events[PERF_RECORD_LOST]); 1754 } 1755 1756 if (session->tool->lost_samples == perf_event__process_lost_samples) { 1757 double drop_rate; 1758 1759 drop_rate = (double)stats->total_lost_samples / 1760 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); 1761 if (drop_rate > 0.05) { 1762 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n", 1763 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, 1764 drop_rate * 100.0); 1765 } 1766 } 1767 1768 if (session->tool->aux == perf_event__process_aux && 1769 stats->total_aux_lost != 0) { 1770 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n", 1771 stats->total_aux_lost, 1772 stats->nr_events[PERF_RECORD_AUX]); 1773 } 1774 1775 if (session->tool->aux == perf_event__process_aux && 1776 stats->total_aux_partial != 0) { 1777 bool vmm_exclusive = false; 1778 1779 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive", 1780 &vmm_exclusive); 1781 1782 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n" 1783 "Are you running a KVM guest in the background?%s\n\n", 1784 stats->total_aux_partial, 1785 stats->nr_events[PERF_RECORD_AUX], 1786 vmm_exclusive ? 1787 "\nReloading kvm_intel module with vmm_exclusive=0\n" 1788 "will reduce the gaps to only guest's timeslices." : 1789 ""); 1790 } 1791 1792 if (session->tool->aux == perf_event__process_aux && 1793 stats->total_aux_collision != 0) { 1794 ui__warning("AUX data detected collision %" PRIu64 " times out of %u!\n\n", 1795 stats->total_aux_collision, 1796 stats->nr_events[PERF_RECORD_AUX]); 1797 } 1798 1799 if (stats->nr_unknown_events != 0) { 1800 ui__warning("Found %u unknown events!\n\n" 1801 "Is this an older tool processing a perf.data " 1802 "file generated by a more recent tool?\n\n" 1803 "If that is not the case, consider " 1804 "reporting to linux-kernel@vger.kernel.org.\n\n", 1805 stats->nr_unknown_events); 1806 } 1807 1808 if (stats->nr_unknown_id != 0) { 1809 ui__warning("%u samples with id not present in the header\n", 1810 stats->nr_unknown_id); 1811 } 1812 1813 if (stats->nr_invalid_chains != 0) { 1814 ui__warning("Found invalid callchains!\n\n" 1815 "%u out of %u events were discarded for this reason.\n\n" 1816 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1817 stats->nr_invalid_chains, 1818 stats->nr_events[PERF_RECORD_SAMPLE]); 1819 } 1820 1821 if (stats->nr_unprocessable_samples != 0) { 1822 ui__warning("%u unprocessable samples recorded.\n" 1823 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1824 stats->nr_unprocessable_samples); 1825 } 1826 1827 perf_session__warn_order(session); 1828 1829 events_stats__auxtrace_error_warn(stats); 1830 1831 if (stats->nr_proc_map_timeout != 0) { 1832 ui__warning("%d map information files for pre-existing threads were\n" 1833 "not processed, if there are samples for addresses they\n" 1834 "will not be resolved, you may find out which are these\n" 1835 "threads by running with -v and redirecting the output\n" 1836 "to a file.\n" 1837 "The time limit to process proc map is too short?\n" 1838 "Increase it by --proc-map-timeout\n", 1839 stats->nr_proc_map_timeout); 1840 } 1841 } 1842 1843 static int perf_session__flush_thread_stack(struct thread *thread, 1844 void *p __maybe_unused) 1845 { 1846 return thread_stack__flush(thread); 1847 } 1848 1849 static int perf_session__flush_thread_stacks(struct perf_session *session) 1850 { 1851 return machines__for_each_thread(&session->machines, 1852 perf_session__flush_thread_stack, 1853 NULL); 1854 } 1855 1856 volatile sig_atomic_t session_done; 1857 1858 static int __perf_session__process_decomp_events(struct perf_session *session); 1859 1860 static int __perf_session__process_pipe_events(struct perf_session *session) 1861 { 1862 struct ordered_events *oe = &session->ordered_events; 1863 const struct perf_tool *tool = session->tool; 1864 struct ui_progress prog; 1865 union perf_event *event; 1866 uint32_t size, cur_size = 0; 1867 void *buf = NULL; 1868 s64 skip = 0; 1869 u64 head; 1870 ssize_t err; 1871 void *p; 1872 bool update_prog = false; 1873 1874 /* 1875 * If it's from a file saving pipe data (by redirection), it would have 1876 * a file name other than "-". Then we can get the total size and show 1877 * the progress. 1878 */ 1879 if (strcmp(session->data->path, "-") && session->data->file.size) { 1880 ui_progress__init_size(&prog, session->data->file.size, 1881 "Processing events..."); 1882 update_prog = true; 1883 } 1884 1885 head = 0; 1886 cur_size = sizeof(union perf_event); 1887 1888 buf = malloc(cur_size); 1889 if (!buf) 1890 return -errno; 1891 ordered_events__set_copy_on_queue(oe, true); 1892 more: 1893 event = buf; 1894 err = perf_data__read(session->data, event, 1895 sizeof(struct perf_event_header)); 1896 if (err <= 0) { 1897 if (err == 0) 1898 goto done; 1899 1900 pr_err("failed to read event header\n"); 1901 goto out_err; 1902 } 1903 1904 if (session->header.needs_swap) 1905 perf_event_header__bswap(&event->header); 1906 1907 size = event->header.size; 1908 if (size < sizeof(struct perf_event_header)) { 1909 pr_err("bad event header size\n"); 1910 goto out_err; 1911 } 1912 1913 if (size > cur_size) { 1914 void *new = realloc(buf, size); 1915 if (!new) { 1916 pr_err("failed to allocate memory to read event\n"); 1917 goto out_err; 1918 } 1919 buf = new; 1920 cur_size = size; 1921 event = buf; 1922 } 1923 p = event; 1924 p += sizeof(struct perf_event_header); 1925 1926 if (size - sizeof(struct perf_event_header)) { 1927 err = perf_data__read(session->data, p, 1928 size - sizeof(struct perf_event_header)); 1929 if (err <= 0) { 1930 if (err == 0) { 1931 pr_err("unexpected end of event stream\n"); 1932 goto done; 1933 } 1934 1935 pr_err("failed to read event data\n"); 1936 goto out_err; 1937 } 1938 } 1939 1940 if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) { 1941 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1942 head, event->header.size, event->header.type); 1943 err = -EINVAL; 1944 goto out_err; 1945 } 1946 1947 head += size; 1948 1949 if (skip > 0) 1950 head += skip; 1951 1952 err = __perf_session__process_decomp_events(session); 1953 if (err) 1954 goto out_err; 1955 1956 if (update_prog) 1957 ui_progress__update(&prog, size); 1958 1959 if (!session_done()) 1960 goto more; 1961 done: 1962 /* do the final flush for ordered samples */ 1963 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 1964 if (err) 1965 goto out_err; 1966 err = auxtrace__flush_events(session, tool); 1967 if (err) 1968 goto out_err; 1969 err = perf_session__flush_thread_stacks(session); 1970 out_err: 1971 free(buf); 1972 if (update_prog) 1973 ui_progress__finish(); 1974 if (!tool->no_warn) 1975 perf_session__warn_about_errors(session); 1976 ordered_events__free(&session->ordered_events); 1977 auxtrace__free_events(session); 1978 return err; 1979 } 1980 1981 static union perf_event * 1982 prefetch_event(char *buf, u64 head, size_t mmap_size, 1983 bool needs_swap, union perf_event *error) 1984 { 1985 union perf_event *event; 1986 u16 event_size; 1987 1988 /* 1989 * Ensure we have enough space remaining to read 1990 * the size of the event in the headers. 1991 */ 1992 if (head + sizeof(event->header) > mmap_size) 1993 return NULL; 1994 1995 event = (union perf_event *)(buf + head); 1996 if (needs_swap) 1997 perf_event_header__bswap(&event->header); 1998 1999 event_size = event->header.size; 2000 if (head + event_size <= mmap_size) 2001 return event; 2002 2003 /* We're not fetching the event so swap back again */ 2004 if (needs_swap) 2005 perf_event_header__bswap(&event->header); 2006 2007 /* Check if the event fits into the next mmapped buf. */ 2008 if (event_size <= mmap_size - head % page_size) { 2009 /* Remap buf and fetch again. */ 2010 return NULL; 2011 } 2012 2013 /* Invalid input. Event size should never exceed mmap_size. */ 2014 pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:" 2015 " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size); 2016 2017 return error; 2018 } 2019 2020 static union perf_event * 2021 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) 2022 { 2023 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL)); 2024 } 2025 2026 static union perf_event * 2027 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) 2028 { 2029 return prefetch_event(buf, head, mmap_size, needs_swap, NULL); 2030 } 2031 2032 static int __perf_session__process_decomp_events(struct perf_session *session) 2033 { 2034 s64 skip; 2035 u64 size; 2036 struct decomp *decomp = session->active_decomp->decomp_last; 2037 2038 if (!decomp) 2039 return 0; 2040 2041 while (decomp->head < decomp->size && !session_done()) { 2042 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data, 2043 session->header.needs_swap); 2044 2045 if (!event) 2046 break; 2047 2048 size = event->header.size; 2049 2050 if (size < sizeof(struct perf_event_header) || 2051 (skip = perf_session__process_event(session, event, decomp->file_pos, 2052 decomp->file_path)) < 0) { 2053 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 2054 decomp->file_pos + decomp->head, event->header.size, event->header.type); 2055 return -EINVAL; 2056 } 2057 2058 if (skip) 2059 size += skip; 2060 2061 decomp->head += size; 2062 } 2063 2064 return 0; 2065 } 2066 2067 /* 2068 * On 64bit we can mmap the data file in one go. No need for tiny mmap 2069 * slices. On 32bit we use 32MB. 2070 */ 2071 #if BITS_PER_LONG == 64 2072 #define MMAP_SIZE ULLONG_MAX 2073 #define NUM_MMAPS 1 2074 #else 2075 #define MMAP_SIZE (32 * 1024 * 1024ULL) 2076 #define NUM_MMAPS 128 2077 #endif 2078 2079 struct reader; 2080 2081 typedef s64 (*reader_cb_t)(struct perf_session *session, 2082 union perf_event *event, 2083 u64 file_offset, 2084 const char *file_path); 2085 2086 struct reader { 2087 int fd; 2088 const char *path; 2089 u64 data_size; 2090 u64 data_offset; 2091 reader_cb_t process; 2092 bool in_place_update; 2093 char *mmaps[NUM_MMAPS]; 2094 size_t mmap_size; 2095 int mmap_idx; 2096 char *mmap_cur; 2097 u64 file_pos; 2098 u64 file_offset; 2099 u64 head; 2100 u64 size; 2101 bool done; 2102 struct zstd_data zstd_data; 2103 struct decomp_data decomp_data; 2104 }; 2105 2106 static int 2107 reader__init(struct reader *rd, bool *one_mmap) 2108 { 2109 u64 data_size = rd->data_size; 2110 char **mmaps = rd->mmaps; 2111 2112 rd->head = rd->data_offset; 2113 data_size += rd->data_offset; 2114 2115 rd->mmap_size = MMAP_SIZE; 2116 if (rd->mmap_size > data_size) { 2117 rd->mmap_size = data_size; 2118 if (one_mmap) 2119 *one_mmap = true; 2120 } 2121 2122 memset(mmaps, 0, sizeof(rd->mmaps)); 2123 2124 if (zstd_init(&rd->zstd_data, 0)) 2125 return -1; 2126 rd->decomp_data.zstd_decomp = &rd->zstd_data; 2127 2128 return 0; 2129 } 2130 2131 static void 2132 reader__release_decomp(struct reader *rd) 2133 { 2134 perf_decomp__release_events(rd->decomp_data.decomp); 2135 zstd_fini(&rd->zstd_data); 2136 } 2137 2138 static int 2139 reader__mmap(struct reader *rd, struct perf_session *session) 2140 { 2141 int mmap_prot, mmap_flags; 2142 char *buf, **mmaps = rd->mmaps; 2143 u64 page_offset; 2144 2145 mmap_prot = PROT_READ; 2146 mmap_flags = MAP_SHARED; 2147 2148 if (rd->in_place_update) { 2149 mmap_prot |= PROT_WRITE; 2150 } else if (session->header.needs_swap) { 2151 mmap_prot |= PROT_WRITE; 2152 mmap_flags = MAP_PRIVATE; 2153 } 2154 2155 if (mmaps[rd->mmap_idx]) { 2156 munmap(mmaps[rd->mmap_idx], rd->mmap_size); 2157 mmaps[rd->mmap_idx] = NULL; 2158 } 2159 2160 page_offset = page_size * (rd->head / page_size); 2161 rd->file_offset += page_offset; 2162 rd->head -= page_offset; 2163 2164 buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd, 2165 rd->file_offset); 2166 if (buf == MAP_FAILED) { 2167 pr_err("failed to mmap file\n"); 2168 return -errno; 2169 } 2170 mmaps[rd->mmap_idx] = rd->mmap_cur = buf; 2171 rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1); 2172 rd->file_pos = rd->file_offset + rd->head; 2173 if (session->one_mmap) { 2174 session->one_mmap_addr = buf; 2175 session->one_mmap_offset = rd->file_offset; 2176 } 2177 2178 return 0; 2179 } 2180 2181 enum { 2182 READER_OK, 2183 READER_NODATA, 2184 }; 2185 2186 static int 2187 reader__read_event(struct reader *rd, struct perf_session *session, 2188 struct ui_progress *prog) 2189 { 2190 u64 size; 2191 int err = READER_OK; 2192 union perf_event *event; 2193 s64 skip; 2194 2195 event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur, 2196 session->header.needs_swap); 2197 if (IS_ERR(event)) 2198 return PTR_ERR(event); 2199 2200 if (!event) 2201 return READER_NODATA; 2202 2203 size = event->header.size; 2204 2205 skip = -EINVAL; 2206 2207 if (size < sizeof(struct perf_event_header) || 2208 (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) { 2209 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n", 2210 rd->file_offset + rd->head, event->header.size, 2211 event->header.type, strerror(-skip)); 2212 err = skip; 2213 goto out; 2214 } 2215 2216 if (skip) 2217 size += skip; 2218 2219 rd->size += size; 2220 rd->head += size; 2221 rd->file_pos += size; 2222 2223 err = __perf_session__process_decomp_events(session); 2224 if (err) 2225 goto out; 2226 2227 ui_progress__update(prog, size); 2228 2229 out: 2230 return err; 2231 } 2232 2233 static inline bool 2234 reader__eof(struct reader *rd) 2235 { 2236 return (rd->file_pos >= rd->data_size + rd->data_offset); 2237 } 2238 2239 static int 2240 reader__process_events(struct reader *rd, struct perf_session *session, 2241 struct ui_progress *prog) 2242 { 2243 int err; 2244 2245 err = reader__init(rd, &session->one_mmap); 2246 if (err) 2247 goto out; 2248 2249 session->active_decomp = &rd->decomp_data; 2250 2251 remap: 2252 err = reader__mmap(rd, session); 2253 if (err) 2254 goto out; 2255 2256 more: 2257 err = reader__read_event(rd, session, prog); 2258 if (err < 0) 2259 goto out; 2260 else if (err == READER_NODATA) 2261 goto remap; 2262 2263 if (session_done()) 2264 goto out; 2265 2266 if (!reader__eof(rd)) 2267 goto more; 2268 2269 out: 2270 session->active_decomp = &session->decomp_data; 2271 return err; 2272 } 2273 2274 static s64 process_simple(struct perf_session *session, 2275 union perf_event *event, 2276 u64 file_offset, 2277 const char *file_path) 2278 { 2279 return perf_session__process_event(session, event, file_offset, file_path); 2280 } 2281 2282 static int __perf_session__process_events(struct perf_session *session) 2283 { 2284 struct reader rd = { 2285 .fd = perf_data__fd(session->data), 2286 .path = session->data->file.path, 2287 .data_size = session->header.data_size, 2288 .data_offset = session->header.data_offset, 2289 .process = process_simple, 2290 .in_place_update = session->data->in_place_update, 2291 }; 2292 struct ordered_events *oe = &session->ordered_events; 2293 const struct perf_tool *tool = session->tool; 2294 struct ui_progress prog; 2295 int err; 2296 2297 if (rd.data_size == 0) 2298 return -1; 2299 2300 ui_progress__init_size(&prog, rd.data_size, "Processing events..."); 2301 2302 err = reader__process_events(&rd, session, &prog); 2303 if (err) 2304 goto out_err; 2305 /* do the final flush for ordered samples */ 2306 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 2307 if (err) 2308 goto out_err; 2309 err = auxtrace__flush_events(session, tool); 2310 if (err) 2311 goto out_err; 2312 err = perf_session__flush_thread_stacks(session); 2313 out_err: 2314 ui_progress__finish(); 2315 if (!tool->no_warn) 2316 perf_session__warn_about_errors(session); 2317 /* 2318 * We may switching perf.data output, make ordered_events 2319 * reusable. 2320 */ 2321 ordered_events__reinit(&session->ordered_events); 2322 auxtrace__free_events(session); 2323 reader__release_decomp(&rd); 2324 session->one_mmap = false; 2325 return err; 2326 } 2327 2328 /* 2329 * Processing 2 MB of data from each reader in sequence, 2330 * because that's the way the ordered events sorting works 2331 * most efficiently. 2332 */ 2333 #define READER_MAX_SIZE (2 * 1024 * 1024) 2334 2335 /* 2336 * This function reads, merge and process directory data. 2337 * It assumens the version 1 of directory data, where each 2338 * data file holds per-cpu data, already sorted by kernel. 2339 */ 2340 static int __perf_session__process_dir_events(struct perf_session *session) 2341 { 2342 struct perf_data *data = session->data; 2343 const struct perf_tool *tool = session->tool; 2344 int i, ret, readers, nr_readers; 2345 struct ui_progress prog; 2346 u64 total_size = perf_data__size(session->data); 2347 struct reader *rd; 2348 2349 ui_progress__init_size(&prog, total_size, "Processing events..."); 2350 2351 nr_readers = 1; 2352 for (i = 0; i < data->dir.nr; i++) { 2353 if (data->dir.files[i].size) 2354 nr_readers++; 2355 } 2356 2357 rd = zalloc(nr_readers * sizeof(struct reader)); 2358 if (!rd) 2359 return -ENOMEM; 2360 2361 rd[0] = (struct reader) { 2362 .fd = perf_data__fd(session->data), 2363 .path = session->data->file.path, 2364 .data_size = session->header.data_size, 2365 .data_offset = session->header.data_offset, 2366 .process = process_simple, 2367 .in_place_update = session->data->in_place_update, 2368 }; 2369 ret = reader__init(&rd[0], NULL); 2370 if (ret) 2371 goto out_err; 2372 ret = reader__mmap(&rd[0], session); 2373 if (ret) 2374 goto out_err; 2375 readers = 1; 2376 2377 for (i = 0; i < data->dir.nr; i++) { 2378 if (!data->dir.files[i].size) 2379 continue; 2380 rd[readers] = (struct reader) { 2381 .fd = data->dir.files[i].fd, 2382 .path = data->dir.files[i].path, 2383 .data_size = data->dir.files[i].size, 2384 .data_offset = 0, 2385 .process = process_simple, 2386 .in_place_update = session->data->in_place_update, 2387 }; 2388 ret = reader__init(&rd[readers], NULL); 2389 if (ret) 2390 goto out_err; 2391 ret = reader__mmap(&rd[readers], session); 2392 if (ret) 2393 goto out_err; 2394 readers++; 2395 } 2396 2397 i = 0; 2398 while (readers) { 2399 if (session_done()) 2400 break; 2401 2402 if (rd[i].done) { 2403 i = (i + 1) % nr_readers; 2404 continue; 2405 } 2406 if (reader__eof(&rd[i])) { 2407 rd[i].done = true; 2408 readers--; 2409 continue; 2410 } 2411 2412 session->active_decomp = &rd[i].decomp_data; 2413 ret = reader__read_event(&rd[i], session, &prog); 2414 if (ret < 0) { 2415 goto out_err; 2416 } else if (ret == READER_NODATA) { 2417 ret = reader__mmap(&rd[i], session); 2418 if (ret) 2419 goto out_err; 2420 } 2421 2422 if (rd[i].size >= READER_MAX_SIZE) { 2423 rd[i].size = 0; 2424 i = (i + 1) % nr_readers; 2425 } 2426 } 2427 2428 ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL); 2429 if (ret) 2430 goto out_err; 2431 2432 ret = perf_session__flush_thread_stacks(session); 2433 out_err: 2434 ui_progress__finish(); 2435 2436 if (!tool->no_warn) 2437 perf_session__warn_about_errors(session); 2438 2439 /* 2440 * We may switching perf.data output, make ordered_events 2441 * reusable. 2442 */ 2443 ordered_events__reinit(&session->ordered_events); 2444 2445 session->one_mmap = false; 2446 2447 session->active_decomp = &session->decomp_data; 2448 for (i = 0; i < nr_readers; i++) 2449 reader__release_decomp(&rd[i]); 2450 zfree(&rd); 2451 2452 return ret; 2453 } 2454 2455 int perf_session__process_events(struct perf_session *session) 2456 { 2457 if (perf_session__register_idle_thread(session) < 0) 2458 return -ENOMEM; 2459 2460 if (perf_data__is_pipe(session->data)) 2461 return __perf_session__process_pipe_events(session); 2462 2463 if (perf_data__is_dir(session->data) && session->data->dir.nr) 2464 return __perf_session__process_dir_events(session); 2465 2466 return __perf_session__process_events(session); 2467 } 2468 2469 bool perf_session__has_traces(struct perf_session *session, const char *msg) 2470 { 2471 struct evsel *evsel; 2472 2473 evlist__for_each_entry(session->evlist, evsel) { 2474 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) 2475 return true; 2476 } 2477 2478 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 2479 return false; 2480 } 2481 2482 bool perf_session__has_switch_events(struct perf_session *session) 2483 { 2484 struct evsel *evsel; 2485 2486 evlist__for_each_entry(session->evlist, evsel) { 2487 if (evsel->core.attr.context_switch) 2488 return true; 2489 } 2490 2491 return false; 2492 } 2493 2494 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) 2495 { 2496 char *bracket; 2497 struct ref_reloc_sym *ref; 2498 struct kmap *kmap; 2499 2500 ref = zalloc(sizeof(struct ref_reloc_sym)); 2501 if (ref == NULL) 2502 return -ENOMEM; 2503 2504 ref->name = strdup(symbol_name); 2505 if (ref->name == NULL) { 2506 free(ref); 2507 return -ENOMEM; 2508 } 2509 2510 bracket = strchr(ref->name, ']'); 2511 if (bracket) 2512 *bracket = '\0'; 2513 2514 ref->addr = addr; 2515 2516 kmap = map__kmap(map); 2517 if (kmap) 2518 kmap->ref_reloc_sym = ref; 2519 2520 return 0; 2521 } 2522 2523 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 2524 { 2525 return machines__fprintf_dsos(&session->machines, fp); 2526 } 2527 2528 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 2529 bool (skip)(struct dso *dso, int parm), int parm) 2530 { 2531 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 2532 } 2533 2534 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 2535 { 2536 size_t ret; 2537 const char *msg = ""; 2538 2539 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) 2540 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; 2541 2542 ret = fprintf(fp, "\nAggregated stats:%s\n", msg); 2543 2544 ret += events_stats__fprintf(&session->evlist->stats, fp); 2545 return ret; 2546 } 2547 2548 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 2549 { 2550 /* 2551 * FIXME: Here we have to actually print all the machines in this 2552 * session, not just the host... 2553 */ 2554 return machine__fprintf(&session->machines.host, fp); 2555 } 2556 2557 void perf_session__dump_kmaps(struct perf_session *session) 2558 { 2559 int save_verbose = verbose; 2560 2561 fflush(stdout); 2562 fprintf(stderr, "Kernel and module maps:\n"); 2563 verbose = 0; /* Suppress verbose to print a summary only */ 2564 maps__fprintf(machine__kernel_maps(&session->machines.host), stderr); 2565 verbose = save_verbose; 2566 } 2567 2568 struct evsel *perf_session__find_first_evtype(struct perf_session *session, 2569 unsigned int type) 2570 { 2571 struct evsel *pos; 2572 2573 evlist__for_each_entry(session->evlist, pos) { 2574 if (pos->core.attr.type == type) 2575 return pos; 2576 } 2577 return NULL; 2578 } 2579 2580 int perf_session__cpu_bitmap(struct perf_session *session, 2581 const char *cpu_list, unsigned long *cpu_bitmap) 2582 { 2583 int i, err = -1; 2584 struct perf_cpu_map *map; 2585 int nr_cpus = min(perf_session__env(session)->nr_cpus_avail, MAX_NR_CPUS); 2586 struct perf_cpu cpu; 2587 2588 for (i = 0; i < PERF_TYPE_MAX; ++i) { 2589 struct evsel *evsel; 2590 2591 evsel = perf_session__find_first_evtype(session, i); 2592 if (!evsel) 2593 continue; 2594 2595 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) { 2596 pr_err("File does not contain CPU events. " 2597 "Remove -C option to proceed.\n"); 2598 return -1; 2599 } 2600 } 2601 2602 map = perf_cpu_map__new(cpu_list); 2603 if (map == NULL) { 2604 pr_err("Invalid cpu_list\n"); 2605 return -1; 2606 } 2607 2608 perf_cpu_map__for_each_cpu(cpu, i, map) { 2609 if (cpu.cpu >= nr_cpus) { 2610 pr_err("Requested CPU %d too large. " 2611 "Consider raising MAX_NR_CPUS\n", cpu.cpu); 2612 goto out_delete_map; 2613 } 2614 2615 __set_bit(cpu.cpu, cpu_bitmap); 2616 } 2617 2618 err = 0; 2619 2620 out_delete_map: 2621 perf_cpu_map__put(map); 2622 return err; 2623 } 2624 2625 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 2626 bool full) 2627 { 2628 if (session == NULL || fp == NULL) 2629 return; 2630 2631 fprintf(fp, "# ========\n"); 2632 perf_header__fprintf_info(session, fp, full); 2633 fprintf(fp, "# ========\n#\n"); 2634 } 2635 2636 static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid) 2637 { 2638 struct machine *machine = machines__findnew(&session->machines, machine_pid); 2639 struct thread *thread; 2640 2641 if (!machine) 2642 return -ENOMEM; 2643 2644 machine->single_address_space = session->machines.host.single_address_space; 2645 2646 thread = machine__idle_thread(machine); 2647 if (!thread) 2648 return -ENOMEM; 2649 thread__put(thread); 2650 2651 machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid); 2652 2653 return 0; 2654 } 2655 2656 static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid, 2657 pid_t tid, int guest_cpu) 2658 { 2659 struct machine *machine = &session->machines.host; 2660 struct thread *thread = machine__findnew_thread(machine, pid, tid); 2661 2662 if (!thread) 2663 return -ENOMEM; 2664 thread__set_guest_cpu(thread, guest_cpu); 2665 thread__put(thread); 2666 2667 return 0; 2668 } 2669 2670 int perf_event__process_id_index(const struct perf_tool *tool __maybe_unused, 2671 struct perf_session *session, 2672 union perf_event *event) 2673 { 2674 struct evlist *evlist = session->evlist; 2675 struct perf_record_id_index *ie = &event->id_index; 2676 size_t sz = ie->header.size - sizeof(*ie); 2677 size_t i, nr, max_nr; 2678 size_t e1_sz = sizeof(struct id_index_entry); 2679 size_t e2_sz = sizeof(struct id_index_entry_2); 2680 size_t etot_sz = e1_sz + e2_sz; 2681 struct id_index_entry_2 *e2; 2682 pid_t last_pid = 0; 2683 2684 max_nr = sz / e1_sz; 2685 nr = ie->nr; 2686 if (nr > max_nr) { 2687 printf("Too big: nr %zu max_nr %zu\n", nr, max_nr); 2688 return -EINVAL; 2689 } 2690 2691 if (sz >= nr * etot_sz) { 2692 max_nr = sz / etot_sz; 2693 if (nr > max_nr) { 2694 printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr); 2695 return -EINVAL; 2696 } 2697 e2 = (void *)ie + sizeof(*ie) + nr * e1_sz; 2698 } else { 2699 e2 = NULL; 2700 } 2701 2702 if (dump_trace) 2703 fprintf(stdout, " nr: %zu\n", nr); 2704 2705 for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) { 2706 struct id_index_entry *e = &ie->entries[i]; 2707 struct perf_sample_id *sid; 2708 int ret; 2709 2710 if (dump_trace) { 2711 fprintf(stdout, " ... id: %"PRI_lu64, e->id); 2712 fprintf(stdout, " idx: %"PRI_lu64, e->idx); 2713 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu); 2714 fprintf(stdout, " tid: %"PRI_ld64, e->tid); 2715 if (e2) { 2716 fprintf(stdout, " machine_pid: %"PRI_ld64, e2->machine_pid); 2717 fprintf(stdout, " vcpu: %"PRI_lu64"\n", e2->vcpu); 2718 } else { 2719 fprintf(stdout, "\n"); 2720 } 2721 } 2722 2723 sid = evlist__id2sid(evlist, e->id); 2724 if (!sid) 2725 return -ENOENT; 2726 2727 sid->idx = e->idx; 2728 sid->cpu.cpu = e->cpu; 2729 sid->tid = e->tid; 2730 2731 if (!e2) 2732 continue; 2733 2734 sid->machine_pid = e2->machine_pid; 2735 sid->vcpu.cpu = e2->vcpu; 2736 2737 if (!sid->machine_pid) 2738 continue; 2739 2740 if (sid->machine_pid != last_pid) { 2741 ret = perf_session__register_guest(session, sid->machine_pid); 2742 if (ret) 2743 return ret; 2744 last_pid = sid->machine_pid; 2745 perf_guest = true; 2746 } 2747 2748 ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu); 2749 if (ret) 2750 return ret; 2751 } 2752 return 0; 2753 } 2754 2755 int perf_session__dsos_hit_all(struct perf_session *session) 2756 { 2757 struct rb_node *nd; 2758 int err; 2759 2760 err = machine__hit_all_dsos(&session->machines.host); 2761 if (err) 2762 return err; 2763 2764 for (nd = rb_first_cached(&session->machines.guests); nd; 2765 nd = rb_next(nd)) { 2766 struct machine *pos = rb_entry(nd, struct machine, rb_node); 2767 2768 err = machine__hit_all_dsos(pos); 2769 if (err) 2770 return err; 2771 } 2772 2773 return 0; 2774 } 2775 2776 struct perf_env *perf_session__env(struct perf_session *session) 2777 { 2778 return &session->header.env; 2779 } 2780