1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <linux/kernel.h> 5 #include <traceevent/event-parse.h> 6 #include <api/fs/fs.h> 7 8 #include <byteswap.h> 9 #include <unistd.h> 10 #include <sys/types.h> 11 #include <sys/mman.h> 12 13 #include "evlist.h" 14 #include "evsel.h" 15 #include "memswap.h" 16 #include "session.h" 17 #include "tool.h" 18 #include "sort.h" 19 #include "util.h" 20 #include "cpumap.h" 21 #include "perf_regs.h" 22 #include "asm/bug.h" 23 #include "auxtrace.h" 24 #include "thread.h" 25 #include "thread-stack.h" 26 #include "sample-raw.h" 27 #include "stat.h" 28 #include "arch/common.h" 29 30 static int perf_session__deliver_event(struct perf_session *session, 31 union perf_event *event, 32 struct perf_tool *tool, 33 u64 file_offset); 34 35 static int perf_session__open(struct perf_session *session) 36 { 37 struct perf_data *data = session->data; 38 39 if (perf_session__read_header(session) < 0) { 40 pr_err("incompatible file format (rerun with -v to learn more)\n"); 41 return -1; 42 } 43 44 if (perf_data__is_pipe(data)) 45 return 0; 46 47 if (perf_header__has_feat(&session->header, HEADER_STAT)) 48 return 0; 49 50 if (!perf_evlist__valid_sample_type(session->evlist)) { 51 pr_err("non matching sample_type\n"); 52 return -1; 53 } 54 55 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 56 pr_err("non matching sample_id_all\n"); 57 return -1; 58 } 59 60 if (!perf_evlist__valid_read_format(session->evlist)) { 61 pr_err("non matching read_format\n"); 62 return -1; 63 } 64 65 return 0; 66 } 67 68 void perf_session__set_id_hdr_size(struct perf_session *session) 69 { 70 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 71 72 machines__set_id_hdr_size(&session->machines, id_hdr_size); 73 } 74 75 int perf_session__create_kernel_maps(struct perf_session *session) 76 { 77 int ret = machine__create_kernel_maps(&session->machines.host); 78 79 if (ret >= 0) 80 ret = machines__create_guest_kernel_maps(&session->machines); 81 return ret; 82 } 83 84 static void perf_session__destroy_kernel_maps(struct perf_session *session) 85 { 86 machines__destroy_kernel_maps(&session->machines); 87 } 88 89 static bool perf_session__has_comm_exec(struct perf_session *session) 90 { 91 struct perf_evsel *evsel; 92 93 evlist__for_each_entry(session->evlist, evsel) { 94 if (evsel->attr.comm_exec) 95 return true; 96 } 97 98 return false; 99 } 100 101 static void perf_session__set_comm_exec(struct perf_session *session) 102 { 103 bool comm_exec = perf_session__has_comm_exec(session); 104 105 machines__set_comm_exec(&session->machines, comm_exec); 106 } 107 108 static int ordered_events__deliver_event(struct ordered_events *oe, 109 struct ordered_event *event) 110 { 111 struct perf_session *session = container_of(oe, struct perf_session, 112 ordered_events); 113 114 return perf_session__deliver_event(session, event->event, 115 session->tool, event->file_offset); 116 } 117 118 struct perf_session *perf_session__new(struct perf_data *data, 119 bool repipe, struct perf_tool *tool) 120 { 121 struct perf_session *session = zalloc(sizeof(*session)); 122 123 if (!session) 124 goto out; 125 126 session->repipe = repipe; 127 session->tool = tool; 128 INIT_LIST_HEAD(&session->auxtrace_index); 129 machines__init(&session->machines); 130 ordered_events__init(&session->ordered_events, 131 ordered_events__deliver_event, NULL); 132 133 if (data) { 134 if (perf_data__open(data)) 135 goto out_delete; 136 137 session->data = data; 138 139 if (perf_data__is_read(data)) { 140 if (perf_session__open(session) < 0) 141 goto out_close; 142 143 /* 144 * set session attributes that are present in perf.data 145 * but not in pipe-mode. 146 */ 147 if (!data->is_pipe) { 148 perf_session__set_id_hdr_size(session); 149 perf_session__set_comm_exec(session); 150 } 151 152 perf_evlist__init_trace_event_sample_raw(session->evlist); 153 } 154 } else { 155 session->machines.host.env = &perf_env; 156 } 157 158 session->machines.host.single_address_space = 159 perf_env__single_address_space(session->machines.host.env); 160 161 if (!data || perf_data__is_write(data)) { 162 /* 163 * In O_RDONLY mode this will be performed when reading the 164 * kernel MMAP event, in perf_event__process_mmap(). 165 */ 166 if (perf_session__create_kernel_maps(session) < 0) 167 pr_warning("Cannot read kernel map\n"); 168 } 169 170 /* 171 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is 172 * processed, so perf_evlist__sample_id_all is not meaningful here. 173 */ 174 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps && 175 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { 176 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 177 tool->ordered_events = false; 178 } 179 180 return session; 181 182 out_close: 183 perf_data__close(data); 184 out_delete: 185 perf_session__delete(session); 186 out: 187 return NULL; 188 } 189 190 static void perf_session__delete_threads(struct perf_session *session) 191 { 192 machine__delete_threads(&session->machines.host); 193 } 194 195 void perf_session__delete(struct perf_session *session) 196 { 197 if (session == NULL) 198 return; 199 auxtrace__free(session); 200 auxtrace_index__free(&session->auxtrace_index); 201 perf_session__destroy_kernel_maps(session); 202 perf_session__delete_threads(session); 203 perf_env__exit(&session->header.env); 204 machines__exit(&session->machines); 205 if (session->data) 206 perf_data__close(session->data); 207 free(session); 208 } 209 210 static int process_event_synth_tracing_data_stub(struct perf_session *session 211 __maybe_unused, 212 union perf_event *event 213 __maybe_unused) 214 { 215 dump_printf(": unhandled!\n"); 216 return 0; 217 } 218 219 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 220 union perf_event *event __maybe_unused, 221 struct perf_evlist **pevlist 222 __maybe_unused) 223 { 224 dump_printf(": unhandled!\n"); 225 return 0; 226 } 227 228 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused, 229 union perf_event *event __maybe_unused, 230 struct perf_evlist **pevlist 231 __maybe_unused) 232 { 233 if (dump_trace) 234 perf_event__fprintf_event_update(event, stdout); 235 236 dump_printf(": unhandled!\n"); 237 return 0; 238 } 239 240 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 241 union perf_event *event __maybe_unused, 242 struct perf_sample *sample __maybe_unused, 243 struct perf_evsel *evsel __maybe_unused, 244 struct machine *machine __maybe_unused) 245 { 246 dump_printf(": unhandled!\n"); 247 return 0; 248 } 249 250 static int process_event_stub(struct perf_tool *tool __maybe_unused, 251 union perf_event *event __maybe_unused, 252 struct perf_sample *sample __maybe_unused, 253 struct machine *machine __maybe_unused) 254 { 255 dump_printf(": unhandled!\n"); 256 return 0; 257 } 258 259 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 260 union perf_event *event __maybe_unused, 261 struct ordered_events *oe __maybe_unused) 262 { 263 dump_printf(": unhandled!\n"); 264 return 0; 265 } 266 267 static int process_finished_round(struct perf_tool *tool, 268 union perf_event *event, 269 struct ordered_events *oe); 270 271 static int skipn(int fd, off_t n) 272 { 273 char buf[4096]; 274 ssize_t ret; 275 276 while (n > 0) { 277 ret = read(fd, buf, min(n, (off_t)sizeof(buf))); 278 if (ret <= 0) 279 return ret; 280 n -= ret; 281 } 282 283 return 0; 284 } 285 286 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused, 287 union perf_event *event) 288 { 289 dump_printf(": unhandled!\n"); 290 if (perf_data__is_pipe(session->data)) 291 skipn(perf_data__fd(session->data), event->auxtrace.size); 292 return event->auxtrace.size; 293 } 294 295 static int process_event_op2_stub(struct perf_session *session __maybe_unused, 296 union perf_event *event __maybe_unused) 297 { 298 dump_printf(": unhandled!\n"); 299 return 0; 300 } 301 302 303 static 304 int process_event_thread_map_stub(struct perf_session *session __maybe_unused, 305 union perf_event *event __maybe_unused) 306 { 307 if (dump_trace) 308 perf_event__fprintf_thread_map(event, stdout); 309 310 dump_printf(": unhandled!\n"); 311 return 0; 312 } 313 314 static 315 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused, 316 union perf_event *event __maybe_unused) 317 { 318 if (dump_trace) 319 perf_event__fprintf_cpu_map(event, stdout); 320 321 dump_printf(": unhandled!\n"); 322 return 0; 323 } 324 325 static 326 int process_event_stat_config_stub(struct perf_session *session __maybe_unused, 327 union perf_event *event __maybe_unused) 328 { 329 if (dump_trace) 330 perf_event__fprintf_stat_config(event, stdout); 331 332 dump_printf(": unhandled!\n"); 333 return 0; 334 } 335 336 static int process_stat_stub(struct perf_session *perf_session __maybe_unused, 337 union perf_event *event) 338 { 339 if (dump_trace) 340 perf_event__fprintf_stat(event, stdout); 341 342 dump_printf(": unhandled!\n"); 343 return 0; 344 } 345 346 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused, 347 union perf_event *event) 348 { 349 if (dump_trace) 350 perf_event__fprintf_stat_round(event, stdout); 351 352 dump_printf(": unhandled!\n"); 353 return 0; 354 } 355 356 void perf_tool__fill_defaults(struct perf_tool *tool) 357 { 358 if (tool->sample == NULL) 359 tool->sample = process_event_sample_stub; 360 if (tool->mmap == NULL) 361 tool->mmap = process_event_stub; 362 if (tool->mmap2 == NULL) 363 tool->mmap2 = process_event_stub; 364 if (tool->comm == NULL) 365 tool->comm = process_event_stub; 366 if (tool->namespaces == NULL) 367 tool->namespaces = process_event_stub; 368 if (tool->fork == NULL) 369 tool->fork = process_event_stub; 370 if (tool->exit == NULL) 371 tool->exit = process_event_stub; 372 if (tool->lost == NULL) 373 tool->lost = perf_event__process_lost; 374 if (tool->lost_samples == NULL) 375 tool->lost_samples = perf_event__process_lost_samples; 376 if (tool->aux == NULL) 377 tool->aux = perf_event__process_aux; 378 if (tool->itrace_start == NULL) 379 tool->itrace_start = perf_event__process_itrace_start; 380 if (tool->context_switch == NULL) 381 tool->context_switch = perf_event__process_switch; 382 if (tool->ksymbol == NULL) 383 tool->ksymbol = perf_event__process_ksymbol; 384 if (tool->bpf_event == NULL) 385 tool->bpf_event = perf_event__process_bpf_event; 386 if (tool->read == NULL) 387 tool->read = process_event_sample_stub; 388 if (tool->throttle == NULL) 389 tool->throttle = process_event_stub; 390 if (tool->unthrottle == NULL) 391 tool->unthrottle = process_event_stub; 392 if (tool->attr == NULL) 393 tool->attr = process_event_synth_attr_stub; 394 if (tool->event_update == NULL) 395 tool->event_update = process_event_synth_event_update_stub; 396 if (tool->tracing_data == NULL) 397 tool->tracing_data = process_event_synth_tracing_data_stub; 398 if (tool->build_id == NULL) 399 tool->build_id = process_event_op2_stub; 400 if (tool->finished_round == NULL) { 401 if (tool->ordered_events) 402 tool->finished_round = process_finished_round; 403 else 404 tool->finished_round = process_finished_round_stub; 405 } 406 if (tool->id_index == NULL) 407 tool->id_index = process_event_op2_stub; 408 if (tool->auxtrace_info == NULL) 409 tool->auxtrace_info = process_event_op2_stub; 410 if (tool->auxtrace == NULL) 411 tool->auxtrace = process_event_auxtrace_stub; 412 if (tool->auxtrace_error == NULL) 413 tool->auxtrace_error = process_event_op2_stub; 414 if (tool->thread_map == NULL) 415 tool->thread_map = process_event_thread_map_stub; 416 if (tool->cpu_map == NULL) 417 tool->cpu_map = process_event_cpu_map_stub; 418 if (tool->stat_config == NULL) 419 tool->stat_config = process_event_stat_config_stub; 420 if (tool->stat == NULL) 421 tool->stat = process_stat_stub; 422 if (tool->stat_round == NULL) 423 tool->stat_round = process_stat_round_stub; 424 if (tool->time_conv == NULL) 425 tool->time_conv = process_event_op2_stub; 426 if (tool->feature == NULL) 427 tool->feature = process_event_op2_stub; 428 } 429 430 static void swap_sample_id_all(union perf_event *event, void *data) 431 { 432 void *end = (void *) event + event->header.size; 433 int size = end - data; 434 435 BUG_ON(size % sizeof(u64)); 436 mem_bswap_64(data, size); 437 } 438 439 static void perf_event__all64_swap(union perf_event *event, 440 bool sample_id_all __maybe_unused) 441 { 442 struct perf_event_header *hdr = &event->header; 443 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 444 } 445 446 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 447 { 448 event->comm.pid = bswap_32(event->comm.pid); 449 event->comm.tid = bswap_32(event->comm.tid); 450 451 if (sample_id_all) { 452 void *data = &event->comm.comm; 453 454 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 455 swap_sample_id_all(event, data); 456 } 457 } 458 459 static void perf_event__mmap_swap(union perf_event *event, 460 bool sample_id_all) 461 { 462 event->mmap.pid = bswap_32(event->mmap.pid); 463 event->mmap.tid = bswap_32(event->mmap.tid); 464 event->mmap.start = bswap_64(event->mmap.start); 465 event->mmap.len = bswap_64(event->mmap.len); 466 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 467 468 if (sample_id_all) { 469 void *data = &event->mmap.filename; 470 471 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 472 swap_sample_id_all(event, data); 473 } 474 } 475 476 static void perf_event__mmap2_swap(union perf_event *event, 477 bool sample_id_all) 478 { 479 event->mmap2.pid = bswap_32(event->mmap2.pid); 480 event->mmap2.tid = bswap_32(event->mmap2.tid); 481 event->mmap2.start = bswap_64(event->mmap2.start); 482 event->mmap2.len = bswap_64(event->mmap2.len); 483 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 484 event->mmap2.maj = bswap_32(event->mmap2.maj); 485 event->mmap2.min = bswap_32(event->mmap2.min); 486 event->mmap2.ino = bswap_64(event->mmap2.ino); 487 488 if (sample_id_all) { 489 void *data = &event->mmap2.filename; 490 491 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 492 swap_sample_id_all(event, data); 493 } 494 } 495 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 496 { 497 event->fork.pid = bswap_32(event->fork.pid); 498 event->fork.tid = bswap_32(event->fork.tid); 499 event->fork.ppid = bswap_32(event->fork.ppid); 500 event->fork.ptid = bswap_32(event->fork.ptid); 501 event->fork.time = bswap_64(event->fork.time); 502 503 if (sample_id_all) 504 swap_sample_id_all(event, &event->fork + 1); 505 } 506 507 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 508 { 509 event->read.pid = bswap_32(event->read.pid); 510 event->read.tid = bswap_32(event->read.tid); 511 event->read.value = bswap_64(event->read.value); 512 event->read.time_enabled = bswap_64(event->read.time_enabled); 513 event->read.time_running = bswap_64(event->read.time_running); 514 event->read.id = bswap_64(event->read.id); 515 516 if (sample_id_all) 517 swap_sample_id_all(event, &event->read + 1); 518 } 519 520 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) 521 { 522 event->aux.aux_offset = bswap_64(event->aux.aux_offset); 523 event->aux.aux_size = bswap_64(event->aux.aux_size); 524 event->aux.flags = bswap_64(event->aux.flags); 525 526 if (sample_id_all) 527 swap_sample_id_all(event, &event->aux + 1); 528 } 529 530 static void perf_event__itrace_start_swap(union perf_event *event, 531 bool sample_id_all) 532 { 533 event->itrace_start.pid = bswap_32(event->itrace_start.pid); 534 event->itrace_start.tid = bswap_32(event->itrace_start.tid); 535 536 if (sample_id_all) 537 swap_sample_id_all(event, &event->itrace_start + 1); 538 } 539 540 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) 541 { 542 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { 543 event->context_switch.next_prev_pid = 544 bswap_32(event->context_switch.next_prev_pid); 545 event->context_switch.next_prev_tid = 546 bswap_32(event->context_switch.next_prev_tid); 547 } 548 549 if (sample_id_all) 550 swap_sample_id_all(event, &event->context_switch + 1); 551 } 552 553 static void perf_event__throttle_swap(union perf_event *event, 554 bool sample_id_all) 555 { 556 event->throttle.time = bswap_64(event->throttle.time); 557 event->throttle.id = bswap_64(event->throttle.id); 558 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 559 560 if (sample_id_all) 561 swap_sample_id_all(event, &event->throttle + 1); 562 } 563 564 static u8 revbyte(u8 b) 565 { 566 int rev = (b >> 4) | ((b & 0xf) << 4); 567 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 568 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 569 return (u8) rev; 570 } 571 572 /* 573 * XXX this is hack in attempt to carry flags bitfield 574 * through endian village. ABI says: 575 * 576 * Bit-fields are allocated from right to left (least to most significant) 577 * on little-endian implementations and from left to right (most to least 578 * significant) on big-endian implementations. 579 * 580 * The above seems to be byte specific, so we need to reverse each 581 * byte of the bitfield. 'Internet' also says this might be implementation 582 * specific and we probably need proper fix and carry perf_event_attr 583 * bitfield flags in separate data file FEAT_ section. Thought this seems 584 * to work for now. 585 */ 586 static void swap_bitfield(u8 *p, unsigned len) 587 { 588 unsigned i; 589 590 for (i = 0; i < len; i++) { 591 *p = revbyte(*p); 592 p++; 593 } 594 } 595 596 /* exported for swapping attributes in file header */ 597 void perf_event__attr_swap(struct perf_event_attr *attr) 598 { 599 attr->type = bswap_32(attr->type); 600 attr->size = bswap_32(attr->size); 601 602 #define bswap_safe(f, n) \ 603 (attr->size > (offsetof(struct perf_event_attr, f) + \ 604 sizeof(attr->f) * (n))) 605 #define bswap_field(f, sz) \ 606 do { \ 607 if (bswap_safe(f, 0)) \ 608 attr->f = bswap_##sz(attr->f); \ 609 } while(0) 610 #define bswap_field_16(f) bswap_field(f, 16) 611 #define bswap_field_32(f) bswap_field(f, 32) 612 #define bswap_field_64(f) bswap_field(f, 64) 613 614 bswap_field_64(config); 615 bswap_field_64(sample_period); 616 bswap_field_64(sample_type); 617 bswap_field_64(read_format); 618 bswap_field_32(wakeup_events); 619 bswap_field_32(bp_type); 620 bswap_field_64(bp_addr); 621 bswap_field_64(bp_len); 622 bswap_field_64(branch_sample_type); 623 bswap_field_64(sample_regs_user); 624 bswap_field_32(sample_stack_user); 625 bswap_field_32(aux_watermark); 626 bswap_field_16(sample_max_stack); 627 628 /* 629 * After read_format are bitfields. Check read_format because 630 * we are unable to use offsetof on bitfield. 631 */ 632 if (bswap_safe(read_format, 1)) 633 swap_bitfield((u8 *) (&attr->read_format + 1), 634 sizeof(u64)); 635 #undef bswap_field_64 636 #undef bswap_field_32 637 #undef bswap_field 638 #undef bswap_safe 639 } 640 641 static void perf_event__hdr_attr_swap(union perf_event *event, 642 bool sample_id_all __maybe_unused) 643 { 644 size_t size; 645 646 perf_event__attr_swap(&event->attr.attr); 647 648 size = event->header.size; 649 size -= (void *)&event->attr.id - (void *)event; 650 mem_bswap_64(event->attr.id, size); 651 } 652 653 static void perf_event__event_update_swap(union perf_event *event, 654 bool sample_id_all __maybe_unused) 655 { 656 event->event_update.type = bswap_64(event->event_update.type); 657 event->event_update.id = bswap_64(event->event_update.id); 658 } 659 660 static void perf_event__event_type_swap(union perf_event *event, 661 bool sample_id_all __maybe_unused) 662 { 663 event->event_type.event_type.event_id = 664 bswap_64(event->event_type.event_type.event_id); 665 } 666 667 static void perf_event__tracing_data_swap(union perf_event *event, 668 bool sample_id_all __maybe_unused) 669 { 670 event->tracing_data.size = bswap_32(event->tracing_data.size); 671 } 672 673 static void perf_event__auxtrace_info_swap(union perf_event *event, 674 bool sample_id_all __maybe_unused) 675 { 676 size_t size; 677 678 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); 679 680 size = event->header.size; 681 size -= (void *)&event->auxtrace_info.priv - (void *)event; 682 mem_bswap_64(event->auxtrace_info.priv, size); 683 } 684 685 static void perf_event__auxtrace_swap(union perf_event *event, 686 bool sample_id_all __maybe_unused) 687 { 688 event->auxtrace.size = bswap_64(event->auxtrace.size); 689 event->auxtrace.offset = bswap_64(event->auxtrace.offset); 690 event->auxtrace.reference = bswap_64(event->auxtrace.reference); 691 event->auxtrace.idx = bswap_32(event->auxtrace.idx); 692 event->auxtrace.tid = bswap_32(event->auxtrace.tid); 693 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); 694 } 695 696 static void perf_event__auxtrace_error_swap(union perf_event *event, 697 bool sample_id_all __maybe_unused) 698 { 699 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); 700 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); 701 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); 702 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); 703 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); 704 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); 705 } 706 707 static void perf_event__thread_map_swap(union perf_event *event, 708 bool sample_id_all __maybe_unused) 709 { 710 unsigned i; 711 712 event->thread_map.nr = bswap_64(event->thread_map.nr); 713 714 for (i = 0; i < event->thread_map.nr; i++) 715 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); 716 } 717 718 static void perf_event__cpu_map_swap(union perf_event *event, 719 bool sample_id_all __maybe_unused) 720 { 721 struct cpu_map_data *data = &event->cpu_map.data; 722 struct cpu_map_entries *cpus; 723 struct cpu_map_mask *mask; 724 unsigned i; 725 726 data->type = bswap_64(data->type); 727 728 switch (data->type) { 729 case PERF_CPU_MAP__CPUS: 730 cpus = (struct cpu_map_entries *)data->data; 731 732 cpus->nr = bswap_16(cpus->nr); 733 734 for (i = 0; i < cpus->nr; i++) 735 cpus->cpu[i] = bswap_16(cpus->cpu[i]); 736 break; 737 case PERF_CPU_MAP__MASK: 738 mask = (struct cpu_map_mask *) data->data; 739 740 mask->nr = bswap_16(mask->nr); 741 mask->long_size = bswap_16(mask->long_size); 742 743 switch (mask->long_size) { 744 case 4: mem_bswap_32(&mask->mask, mask->nr); break; 745 case 8: mem_bswap_64(&mask->mask, mask->nr); break; 746 default: 747 pr_err("cpu_map swap: unsupported long size\n"); 748 } 749 default: 750 break; 751 } 752 } 753 754 static void perf_event__stat_config_swap(union perf_event *event, 755 bool sample_id_all __maybe_unused) 756 { 757 u64 size; 758 759 size = event->stat_config.nr * sizeof(event->stat_config.data[0]); 760 size += 1; /* nr item itself */ 761 mem_bswap_64(&event->stat_config.nr, size); 762 } 763 764 static void perf_event__stat_swap(union perf_event *event, 765 bool sample_id_all __maybe_unused) 766 { 767 event->stat.id = bswap_64(event->stat.id); 768 event->stat.thread = bswap_32(event->stat.thread); 769 event->stat.cpu = bswap_32(event->stat.cpu); 770 event->stat.val = bswap_64(event->stat.val); 771 event->stat.ena = bswap_64(event->stat.ena); 772 event->stat.run = bswap_64(event->stat.run); 773 } 774 775 static void perf_event__stat_round_swap(union perf_event *event, 776 bool sample_id_all __maybe_unused) 777 { 778 event->stat_round.type = bswap_64(event->stat_round.type); 779 event->stat_round.time = bswap_64(event->stat_round.time); 780 } 781 782 typedef void (*perf_event__swap_op)(union perf_event *event, 783 bool sample_id_all); 784 785 static perf_event__swap_op perf_event__swap_ops[] = { 786 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 787 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 788 [PERF_RECORD_COMM] = perf_event__comm_swap, 789 [PERF_RECORD_FORK] = perf_event__task_swap, 790 [PERF_RECORD_EXIT] = perf_event__task_swap, 791 [PERF_RECORD_LOST] = perf_event__all64_swap, 792 [PERF_RECORD_READ] = perf_event__read_swap, 793 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 794 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 795 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 796 [PERF_RECORD_AUX] = perf_event__aux_swap, 797 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap, 798 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, 799 [PERF_RECORD_SWITCH] = perf_event__switch_swap, 800 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, 801 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 802 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 803 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 804 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 805 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, 806 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap, 807 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap, 808 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap, 809 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap, 810 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap, 811 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap, 812 [PERF_RECORD_STAT] = perf_event__stat_swap, 813 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap, 814 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap, 815 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap, 816 [PERF_RECORD_HEADER_MAX] = NULL, 817 }; 818 819 /* 820 * When perf record finishes a pass on every buffers, it records this pseudo 821 * event. 822 * We record the max timestamp t found in the pass n. 823 * Assuming these timestamps are monotonic across cpus, we know that if 824 * a buffer still has events with timestamps below t, they will be all 825 * available and then read in the pass n + 1. 826 * Hence when we start to read the pass n + 2, we can safely flush every 827 * events with timestamps below t. 828 * 829 * ============ PASS n ================= 830 * CPU 0 | CPU 1 831 * | 832 * cnt1 timestamps | cnt2 timestamps 833 * 1 | 2 834 * 2 | 3 835 * - | 4 <--- max recorded 836 * 837 * ============ PASS n + 1 ============== 838 * CPU 0 | CPU 1 839 * | 840 * cnt1 timestamps | cnt2 timestamps 841 * 3 | 5 842 * 4 | 6 843 * 5 | 7 <---- max recorded 844 * 845 * Flush every events below timestamp 4 846 * 847 * ============ PASS n + 2 ============== 848 * CPU 0 | CPU 1 849 * | 850 * cnt1 timestamps | cnt2 timestamps 851 * 6 | 8 852 * 7 | 9 853 * - | 10 854 * 855 * Flush every events below timestamp 7 856 * etc... 857 */ 858 static int process_finished_round(struct perf_tool *tool __maybe_unused, 859 union perf_event *event __maybe_unused, 860 struct ordered_events *oe) 861 { 862 if (dump_trace) 863 fprintf(stdout, "\n"); 864 return ordered_events__flush(oe, OE_FLUSH__ROUND); 865 } 866 867 int perf_session__queue_event(struct perf_session *s, union perf_event *event, 868 u64 timestamp, u64 file_offset) 869 { 870 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset); 871 } 872 873 static void callchain__lbr_callstack_printf(struct perf_sample *sample) 874 { 875 struct ip_callchain *callchain = sample->callchain; 876 struct branch_stack *lbr_stack = sample->branch_stack; 877 u64 kernel_callchain_nr = callchain->nr; 878 unsigned int i; 879 880 for (i = 0; i < kernel_callchain_nr; i++) { 881 if (callchain->ips[i] == PERF_CONTEXT_USER) 882 break; 883 } 884 885 if ((i != kernel_callchain_nr) && lbr_stack->nr) { 886 u64 total_nr; 887 /* 888 * LBR callstack can only get user call chain, 889 * i is kernel call chain number, 890 * 1 is PERF_CONTEXT_USER. 891 * 892 * The user call chain is stored in LBR registers. 893 * LBR are pair registers. The caller is stored 894 * in "from" register, while the callee is stored 895 * in "to" register. 896 * For example, there is a call stack 897 * "A"->"B"->"C"->"D". 898 * The LBR registers will recorde like 899 * "C"->"D", "B"->"C", "A"->"B". 900 * So only the first "to" register and all "from" 901 * registers are needed to construct the whole stack. 902 */ 903 total_nr = i + 1 + lbr_stack->nr + 1; 904 kernel_callchain_nr = i + 1; 905 906 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr); 907 908 for (i = 0; i < kernel_callchain_nr; i++) 909 printf("..... %2d: %016" PRIx64 "\n", 910 i, callchain->ips[i]); 911 912 printf("..... %2d: %016" PRIx64 "\n", 913 (int)(kernel_callchain_nr), lbr_stack->entries[0].to); 914 for (i = 0; i < lbr_stack->nr; i++) 915 printf("..... %2d: %016" PRIx64 "\n", 916 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from); 917 } 918 } 919 920 static void callchain__printf(struct perf_evsel *evsel, 921 struct perf_sample *sample) 922 { 923 unsigned int i; 924 struct ip_callchain *callchain = sample->callchain; 925 926 if (perf_evsel__has_branch_callstack(evsel)) 927 callchain__lbr_callstack_printf(sample); 928 929 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); 930 931 for (i = 0; i < callchain->nr; i++) 932 printf("..... %2d: %016" PRIx64 "\n", 933 i, callchain->ips[i]); 934 } 935 936 static void branch_stack__printf(struct perf_sample *sample) 937 { 938 uint64_t i; 939 940 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 941 942 for (i = 0; i < sample->branch_stack->nr; i++) { 943 struct branch_entry *e = &sample->branch_stack->entries[i]; 944 945 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n", 946 i, e->from, e->to, 947 (unsigned short)e->flags.cycles, 948 e->flags.mispred ? "M" : " ", 949 e->flags.predicted ? "P" : " ", 950 e->flags.abort ? "A" : " ", 951 e->flags.in_tx ? "T" : " ", 952 (unsigned)e->flags.reserved); 953 } 954 } 955 956 static void regs_dump__printf(u64 mask, u64 *regs) 957 { 958 unsigned rid, i = 0; 959 960 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 961 u64 val = regs[i++]; 962 963 printf(".... %-5s 0x%" PRIx64 "\n", 964 perf_reg_name(rid), val); 965 } 966 } 967 968 static const char *regs_abi[] = { 969 [PERF_SAMPLE_REGS_ABI_NONE] = "none", 970 [PERF_SAMPLE_REGS_ABI_32] = "32-bit", 971 [PERF_SAMPLE_REGS_ABI_64] = "64-bit", 972 }; 973 974 static inline const char *regs_dump_abi(struct regs_dump *d) 975 { 976 if (d->abi > PERF_SAMPLE_REGS_ABI_64) 977 return "unknown"; 978 979 return regs_abi[d->abi]; 980 } 981 982 static void regs__printf(const char *type, struct regs_dump *regs) 983 { 984 u64 mask = regs->mask; 985 986 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n", 987 type, 988 mask, 989 regs_dump_abi(regs)); 990 991 regs_dump__printf(mask, regs->regs); 992 } 993 994 static void regs_user__printf(struct perf_sample *sample) 995 { 996 struct regs_dump *user_regs = &sample->user_regs; 997 998 if (user_regs->regs) 999 regs__printf("user", user_regs); 1000 } 1001 1002 static void regs_intr__printf(struct perf_sample *sample) 1003 { 1004 struct regs_dump *intr_regs = &sample->intr_regs; 1005 1006 if (intr_regs->regs) 1007 regs__printf("intr", intr_regs); 1008 } 1009 1010 static void stack_user__printf(struct stack_dump *dump) 1011 { 1012 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 1013 dump->size, dump->offset); 1014 } 1015 1016 static void perf_evlist__print_tstamp(struct perf_evlist *evlist, 1017 union perf_event *event, 1018 struct perf_sample *sample) 1019 { 1020 u64 sample_type = __perf_evlist__combined_sample_type(evlist); 1021 1022 if (event->header.type != PERF_RECORD_SAMPLE && 1023 !perf_evlist__sample_id_all(evlist)) { 1024 fputs("-1 -1 ", stdout); 1025 return; 1026 } 1027 1028 if ((sample_type & PERF_SAMPLE_CPU)) 1029 printf("%u ", sample->cpu); 1030 1031 if (sample_type & PERF_SAMPLE_TIME) 1032 printf("%" PRIu64 " ", sample->time); 1033 } 1034 1035 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 1036 { 1037 printf("... sample_read:\n"); 1038 1039 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1040 printf("...... time enabled %016" PRIx64 "\n", 1041 sample->read.time_enabled); 1042 1043 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1044 printf("...... time running %016" PRIx64 "\n", 1045 sample->read.time_running); 1046 1047 if (read_format & PERF_FORMAT_GROUP) { 1048 u64 i; 1049 1050 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 1051 1052 for (i = 0; i < sample->read.group.nr; i++) { 1053 struct sample_read_value *value; 1054 1055 value = &sample->read.group.values[i]; 1056 printf("..... id %016" PRIx64 1057 ", value %016" PRIx64 "\n", 1058 value->id, value->value); 1059 } 1060 } else 1061 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 1062 sample->read.one.id, sample->read.one.value); 1063 } 1064 1065 static void dump_event(struct perf_evlist *evlist, union perf_event *event, 1066 u64 file_offset, struct perf_sample *sample) 1067 { 1068 if (!dump_trace) 1069 return; 1070 1071 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 1072 file_offset, event->header.size, event->header.type); 1073 1074 trace_event(event); 1075 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) 1076 evlist->trace_event_sample_raw(evlist, event, sample); 1077 1078 if (sample) 1079 perf_evlist__print_tstamp(evlist, event, sample); 1080 1081 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 1082 event->header.size, perf_event__name(event->header.type)); 1083 } 1084 1085 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, 1086 struct perf_sample *sample) 1087 { 1088 u64 sample_type; 1089 1090 if (!dump_trace) 1091 return; 1092 1093 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 1094 event->header.misc, sample->pid, sample->tid, sample->ip, 1095 sample->period, sample->addr); 1096 1097 sample_type = evsel->attr.sample_type; 1098 1099 if (evsel__has_callchain(evsel)) 1100 callchain__printf(evsel, sample); 1101 1102 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel)) 1103 branch_stack__printf(sample); 1104 1105 if (sample_type & PERF_SAMPLE_REGS_USER) 1106 regs_user__printf(sample); 1107 1108 if (sample_type & PERF_SAMPLE_REGS_INTR) 1109 regs_intr__printf(sample); 1110 1111 if (sample_type & PERF_SAMPLE_STACK_USER) 1112 stack_user__printf(&sample->user_stack); 1113 1114 if (sample_type & PERF_SAMPLE_WEIGHT) 1115 printf("... weight: %" PRIu64 "\n", sample->weight); 1116 1117 if (sample_type & PERF_SAMPLE_DATA_SRC) 1118 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 1119 1120 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 1121 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr); 1122 1123 if (sample_type & PERF_SAMPLE_TRANSACTION) 1124 printf("... transaction: %" PRIx64 "\n", sample->transaction); 1125 1126 if (sample_type & PERF_SAMPLE_READ) 1127 sample_read__printf(sample, evsel->attr.read_format); 1128 } 1129 1130 static void dump_read(struct perf_evsel *evsel, union perf_event *event) 1131 { 1132 struct read_event *read_event = &event->read; 1133 u64 read_format; 1134 1135 if (!dump_trace) 1136 return; 1137 1138 printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid, 1139 evsel ? perf_evsel__name(evsel) : "FAIL", 1140 event->read.value); 1141 1142 read_format = evsel->attr.read_format; 1143 1144 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1145 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled); 1146 1147 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1148 printf("... time running : %" PRIu64 "\n", read_event->time_running); 1149 1150 if (read_format & PERF_FORMAT_ID) 1151 printf("... id : %" PRIu64 "\n", read_event->id); 1152 } 1153 1154 static struct machine *machines__find_for_cpumode(struct machines *machines, 1155 union perf_event *event, 1156 struct perf_sample *sample) 1157 { 1158 struct machine *machine; 1159 1160 if (perf_guest && 1161 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 1162 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) { 1163 u32 pid; 1164 1165 if (event->header.type == PERF_RECORD_MMAP 1166 || event->header.type == PERF_RECORD_MMAP2) 1167 pid = event->mmap.pid; 1168 else 1169 pid = sample->pid; 1170 1171 machine = machines__find(machines, pid); 1172 if (!machine) 1173 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); 1174 return machine; 1175 } 1176 1177 return &machines->host; 1178 } 1179 1180 static int deliver_sample_value(struct perf_evlist *evlist, 1181 struct perf_tool *tool, 1182 union perf_event *event, 1183 struct perf_sample *sample, 1184 struct sample_read_value *v, 1185 struct machine *machine) 1186 { 1187 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id); 1188 1189 if (sid) { 1190 sample->id = v->id; 1191 sample->period = v->value - sid->period; 1192 sid->period = v->value; 1193 } 1194 1195 if (!sid || sid->evsel == NULL) { 1196 ++evlist->stats.nr_unknown_id; 1197 return 0; 1198 } 1199 1200 return tool->sample(tool, event, sample, sid->evsel, machine); 1201 } 1202 1203 static int deliver_sample_group(struct perf_evlist *evlist, 1204 struct perf_tool *tool, 1205 union perf_event *event, 1206 struct perf_sample *sample, 1207 struct machine *machine) 1208 { 1209 int ret = -EINVAL; 1210 u64 i; 1211 1212 for (i = 0; i < sample->read.group.nr; i++) { 1213 ret = deliver_sample_value(evlist, tool, event, sample, 1214 &sample->read.group.values[i], 1215 machine); 1216 if (ret) 1217 break; 1218 } 1219 1220 return ret; 1221 } 1222 1223 static int 1224 perf_evlist__deliver_sample(struct perf_evlist *evlist, 1225 struct perf_tool *tool, 1226 union perf_event *event, 1227 struct perf_sample *sample, 1228 struct perf_evsel *evsel, 1229 struct machine *machine) 1230 { 1231 /* We know evsel != NULL. */ 1232 u64 sample_type = evsel->attr.sample_type; 1233 u64 read_format = evsel->attr.read_format; 1234 1235 /* Standard sample delivery. */ 1236 if (!(sample_type & PERF_SAMPLE_READ)) 1237 return tool->sample(tool, event, sample, evsel, machine); 1238 1239 /* For PERF_SAMPLE_READ we have either single or group mode. */ 1240 if (read_format & PERF_FORMAT_GROUP) 1241 return deliver_sample_group(evlist, tool, event, sample, 1242 machine); 1243 else 1244 return deliver_sample_value(evlist, tool, event, sample, 1245 &sample->read.one, machine); 1246 } 1247 1248 static int machines__deliver_event(struct machines *machines, 1249 struct perf_evlist *evlist, 1250 union perf_event *event, 1251 struct perf_sample *sample, 1252 struct perf_tool *tool, u64 file_offset) 1253 { 1254 struct perf_evsel *evsel; 1255 struct machine *machine; 1256 1257 dump_event(evlist, event, file_offset, sample); 1258 1259 evsel = perf_evlist__id2evsel(evlist, sample->id); 1260 1261 machine = machines__find_for_cpumode(machines, event, sample); 1262 1263 switch (event->header.type) { 1264 case PERF_RECORD_SAMPLE: 1265 if (evsel == NULL) { 1266 ++evlist->stats.nr_unknown_id; 1267 return 0; 1268 } 1269 dump_sample(evsel, event, sample); 1270 if (machine == NULL) { 1271 ++evlist->stats.nr_unprocessable_samples; 1272 return 0; 1273 } 1274 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); 1275 case PERF_RECORD_MMAP: 1276 return tool->mmap(tool, event, sample, machine); 1277 case PERF_RECORD_MMAP2: 1278 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) 1279 ++evlist->stats.nr_proc_map_timeout; 1280 return tool->mmap2(tool, event, sample, machine); 1281 case PERF_RECORD_COMM: 1282 return tool->comm(tool, event, sample, machine); 1283 case PERF_RECORD_NAMESPACES: 1284 return tool->namespaces(tool, event, sample, machine); 1285 case PERF_RECORD_FORK: 1286 return tool->fork(tool, event, sample, machine); 1287 case PERF_RECORD_EXIT: 1288 return tool->exit(tool, event, sample, machine); 1289 case PERF_RECORD_LOST: 1290 if (tool->lost == perf_event__process_lost) 1291 evlist->stats.total_lost += event->lost.lost; 1292 return tool->lost(tool, event, sample, machine); 1293 case PERF_RECORD_LOST_SAMPLES: 1294 if (tool->lost_samples == perf_event__process_lost_samples) 1295 evlist->stats.total_lost_samples += event->lost_samples.lost; 1296 return tool->lost_samples(tool, event, sample, machine); 1297 case PERF_RECORD_READ: 1298 dump_read(evsel, event); 1299 return tool->read(tool, event, sample, evsel, machine); 1300 case PERF_RECORD_THROTTLE: 1301 return tool->throttle(tool, event, sample, machine); 1302 case PERF_RECORD_UNTHROTTLE: 1303 return tool->unthrottle(tool, event, sample, machine); 1304 case PERF_RECORD_AUX: 1305 if (tool->aux == perf_event__process_aux) { 1306 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) 1307 evlist->stats.total_aux_lost += 1; 1308 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) 1309 evlist->stats.total_aux_partial += 1; 1310 } 1311 return tool->aux(tool, event, sample, machine); 1312 case PERF_RECORD_ITRACE_START: 1313 return tool->itrace_start(tool, event, sample, machine); 1314 case PERF_RECORD_SWITCH: 1315 case PERF_RECORD_SWITCH_CPU_WIDE: 1316 return tool->context_switch(tool, event, sample, machine); 1317 case PERF_RECORD_KSYMBOL: 1318 return tool->ksymbol(tool, event, sample, machine); 1319 case PERF_RECORD_BPF_EVENT: 1320 return tool->bpf_event(tool, event, sample, machine); 1321 default: 1322 ++evlist->stats.nr_unknown_events; 1323 return -1; 1324 } 1325 } 1326 1327 static int perf_session__deliver_event(struct perf_session *session, 1328 union perf_event *event, 1329 struct perf_tool *tool, 1330 u64 file_offset) 1331 { 1332 struct perf_sample sample; 1333 int ret; 1334 1335 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1336 if (ret) { 1337 pr_err("Can't parse sample, err = %d\n", ret); 1338 return ret; 1339 } 1340 1341 ret = auxtrace__process_event(session, event, &sample, tool); 1342 if (ret < 0) 1343 return ret; 1344 if (ret > 0) 1345 return 0; 1346 1347 return machines__deliver_event(&session->machines, session->evlist, 1348 event, &sample, tool, file_offset); 1349 } 1350 1351 static s64 perf_session__process_user_event(struct perf_session *session, 1352 union perf_event *event, 1353 u64 file_offset) 1354 { 1355 struct ordered_events *oe = &session->ordered_events; 1356 struct perf_tool *tool = session->tool; 1357 struct perf_sample sample = { .time = 0, }; 1358 int fd = perf_data__fd(session->data); 1359 int err; 1360 1361 dump_event(session->evlist, event, file_offset, &sample); 1362 1363 /* These events are processed right away */ 1364 switch (event->header.type) { 1365 case PERF_RECORD_HEADER_ATTR: 1366 err = tool->attr(tool, event, &session->evlist); 1367 if (err == 0) { 1368 perf_session__set_id_hdr_size(session); 1369 perf_session__set_comm_exec(session); 1370 } 1371 return err; 1372 case PERF_RECORD_EVENT_UPDATE: 1373 return tool->event_update(tool, event, &session->evlist); 1374 case PERF_RECORD_HEADER_EVENT_TYPE: 1375 /* 1376 * Depreceated, but we need to handle it for sake 1377 * of old data files create in pipe mode. 1378 */ 1379 return 0; 1380 case PERF_RECORD_HEADER_TRACING_DATA: 1381 /* setup for reading amidst mmap */ 1382 lseek(fd, file_offset, SEEK_SET); 1383 return tool->tracing_data(session, event); 1384 case PERF_RECORD_HEADER_BUILD_ID: 1385 return tool->build_id(session, event); 1386 case PERF_RECORD_FINISHED_ROUND: 1387 return tool->finished_round(tool, event, oe); 1388 case PERF_RECORD_ID_INDEX: 1389 return tool->id_index(session, event); 1390 case PERF_RECORD_AUXTRACE_INFO: 1391 return tool->auxtrace_info(session, event); 1392 case PERF_RECORD_AUXTRACE: 1393 /* setup for reading amidst mmap */ 1394 lseek(fd, file_offset + event->header.size, SEEK_SET); 1395 return tool->auxtrace(session, event); 1396 case PERF_RECORD_AUXTRACE_ERROR: 1397 perf_session__auxtrace_error_inc(session, event); 1398 return tool->auxtrace_error(session, event); 1399 case PERF_RECORD_THREAD_MAP: 1400 return tool->thread_map(session, event); 1401 case PERF_RECORD_CPU_MAP: 1402 return tool->cpu_map(session, event); 1403 case PERF_RECORD_STAT_CONFIG: 1404 return tool->stat_config(session, event); 1405 case PERF_RECORD_STAT: 1406 return tool->stat(session, event); 1407 case PERF_RECORD_STAT_ROUND: 1408 return tool->stat_round(session, event); 1409 case PERF_RECORD_TIME_CONV: 1410 session->time_conv = event->time_conv; 1411 return tool->time_conv(session, event); 1412 case PERF_RECORD_HEADER_FEATURE: 1413 return tool->feature(session, event); 1414 default: 1415 return -EINVAL; 1416 } 1417 } 1418 1419 int perf_session__deliver_synth_event(struct perf_session *session, 1420 union perf_event *event, 1421 struct perf_sample *sample) 1422 { 1423 struct perf_evlist *evlist = session->evlist; 1424 struct perf_tool *tool = session->tool; 1425 1426 events_stats__inc(&evlist->stats, event->header.type); 1427 1428 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1429 return perf_session__process_user_event(session, event, 0); 1430 1431 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); 1432 } 1433 1434 static void event_swap(union perf_event *event, bool sample_id_all) 1435 { 1436 perf_event__swap_op swap; 1437 1438 swap = perf_event__swap_ops[event->header.type]; 1439 if (swap) 1440 swap(event, sample_id_all); 1441 } 1442 1443 int perf_session__peek_event(struct perf_session *session, off_t file_offset, 1444 void *buf, size_t buf_sz, 1445 union perf_event **event_ptr, 1446 struct perf_sample *sample) 1447 { 1448 union perf_event *event; 1449 size_t hdr_sz, rest; 1450 int fd; 1451 1452 if (session->one_mmap && !session->header.needs_swap) { 1453 event = file_offset - session->one_mmap_offset + 1454 session->one_mmap_addr; 1455 goto out_parse_sample; 1456 } 1457 1458 if (perf_data__is_pipe(session->data)) 1459 return -1; 1460 1461 fd = perf_data__fd(session->data); 1462 hdr_sz = sizeof(struct perf_event_header); 1463 1464 if (buf_sz < hdr_sz) 1465 return -1; 1466 1467 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || 1468 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz) 1469 return -1; 1470 1471 event = (union perf_event *)buf; 1472 1473 if (session->header.needs_swap) 1474 perf_event_header__bswap(&event->header); 1475 1476 if (event->header.size < hdr_sz || event->header.size > buf_sz) 1477 return -1; 1478 1479 rest = event->header.size - hdr_sz; 1480 1481 if (readn(fd, buf, rest) != (ssize_t)rest) 1482 return -1; 1483 1484 if (session->header.needs_swap) 1485 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1486 1487 out_parse_sample: 1488 1489 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && 1490 perf_evlist__parse_sample(session->evlist, event, sample)) 1491 return -1; 1492 1493 *event_ptr = event; 1494 1495 return 0; 1496 } 1497 1498 static s64 perf_session__process_event(struct perf_session *session, 1499 union perf_event *event, u64 file_offset) 1500 { 1501 struct perf_evlist *evlist = session->evlist; 1502 struct perf_tool *tool = session->tool; 1503 int ret; 1504 1505 if (session->header.needs_swap) 1506 event_swap(event, perf_evlist__sample_id_all(evlist)); 1507 1508 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1509 return -EINVAL; 1510 1511 events_stats__inc(&evlist->stats, event->header.type); 1512 1513 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1514 return perf_session__process_user_event(session, event, file_offset); 1515 1516 if (tool->ordered_events) { 1517 u64 timestamp = -1ULL; 1518 1519 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp); 1520 if (ret && ret != -1) 1521 return ret; 1522 1523 ret = perf_session__queue_event(session, event, timestamp, file_offset); 1524 if (ret != -ETIME) 1525 return ret; 1526 } 1527 1528 return perf_session__deliver_event(session, event, tool, file_offset); 1529 } 1530 1531 void perf_event_header__bswap(struct perf_event_header *hdr) 1532 { 1533 hdr->type = bswap_32(hdr->type); 1534 hdr->misc = bswap_16(hdr->misc); 1535 hdr->size = bswap_16(hdr->size); 1536 } 1537 1538 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1539 { 1540 return machine__findnew_thread(&session->machines.host, -1, pid); 1541 } 1542 1543 /* 1544 * Threads are identified by pid and tid, and the idle task has pid == tid == 0. 1545 * So here a single thread is created for that, but actually there is a separate 1546 * idle task per cpu, so there should be one 'struct thread' per cpu, but there 1547 * is only 1. That causes problems for some tools, requiring workarounds. For 1548 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). 1549 */ 1550 int perf_session__register_idle_thread(struct perf_session *session) 1551 { 1552 struct thread *thread; 1553 int err = 0; 1554 1555 thread = machine__findnew_thread(&session->machines.host, 0, 0); 1556 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { 1557 pr_err("problem inserting idle task.\n"); 1558 err = -1; 1559 } 1560 1561 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) { 1562 pr_err("problem inserting idle task.\n"); 1563 err = -1; 1564 } 1565 1566 /* machine__findnew_thread() got the thread, so put it */ 1567 thread__put(thread); 1568 return err; 1569 } 1570 1571 static void 1572 perf_session__warn_order(const struct perf_session *session) 1573 { 1574 const struct ordered_events *oe = &session->ordered_events; 1575 struct perf_evsel *evsel; 1576 bool should_warn = true; 1577 1578 evlist__for_each_entry(session->evlist, evsel) { 1579 if (evsel->attr.write_backward) 1580 should_warn = false; 1581 } 1582 1583 if (!should_warn) 1584 return; 1585 if (oe->nr_unordered_events != 0) 1586 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); 1587 } 1588 1589 static void perf_session__warn_about_errors(const struct perf_session *session) 1590 { 1591 const struct events_stats *stats = &session->evlist->stats; 1592 1593 if (session->tool->lost == perf_event__process_lost && 1594 stats->nr_events[PERF_RECORD_LOST] != 0) { 1595 ui__warning("Processed %d events and lost %d chunks!\n\n" 1596 "Check IO/CPU overload!\n\n", 1597 stats->nr_events[0], 1598 stats->nr_events[PERF_RECORD_LOST]); 1599 } 1600 1601 if (session->tool->lost_samples == perf_event__process_lost_samples) { 1602 double drop_rate; 1603 1604 drop_rate = (double)stats->total_lost_samples / 1605 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); 1606 if (drop_rate > 0.05) { 1607 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n", 1608 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, 1609 drop_rate * 100.0); 1610 } 1611 } 1612 1613 if (session->tool->aux == perf_event__process_aux && 1614 stats->total_aux_lost != 0) { 1615 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n", 1616 stats->total_aux_lost, 1617 stats->nr_events[PERF_RECORD_AUX]); 1618 } 1619 1620 if (session->tool->aux == perf_event__process_aux && 1621 stats->total_aux_partial != 0) { 1622 bool vmm_exclusive = false; 1623 1624 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive", 1625 &vmm_exclusive); 1626 1627 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n" 1628 "Are you running a KVM guest in the background?%s\n\n", 1629 stats->total_aux_partial, 1630 stats->nr_events[PERF_RECORD_AUX], 1631 vmm_exclusive ? 1632 "\nReloading kvm_intel module with vmm_exclusive=0\n" 1633 "will reduce the gaps to only guest's timeslices." : 1634 ""); 1635 } 1636 1637 if (stats->nr_unknown_events != 0) { 1638 ui__warning("Found %u unknown events!\n\n" 1639 "Is this an older tool processing a perf.data " 1640 "file generated by a more recent tool?\n\n" 1641 "If that is not the case, consider " 1642 "reporting to linux-kernel@vger.kernel.org.\n\n", 1643 stats->nr_unknown_events); 1644 } 1645 1646 if (stats->nr_unknown_id != 0) { 1647 ui__warning("%u samples with id not present in the header\n", 1648 stats->nr_unknown_id); 1649 } 1650 1651 if (stats->nr_invalid_chains != 0) { 1652 ui__warning("Found invalid callchains!\n\n" 1653 "%u out of %u events were discarded for this reason.\n\n" 1654 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1655 stats->nr_invalid_chains, 1656 stats->nr_events[PERF_RECORD_SAMPLE]); 1657 } 1658 1659 if (stats->nr_unprocessable_samples != 0) { 1660 ui__warning("%u unprocessable samples recorded.\n" 1661 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1662 stats->nr_unprocessable_samples); 1663 } 1664 1665 perf_session__warn_order(session); 1666 1667 events_stats__auxtrace_error_warn(stats); 1668 1669 if (stats->nr_proc_map_timeout != 0) { 1670 ui__warning("%d map information files for pre-existing threads were\n" 1671 "not processed, if there are samples for addresses they\n" 1672 "will not be resolved, you may find out which are these\n" 1673 "threads by running with -v and redirecting the output\n" 1674 "to a file.\n" 1675 "The time limit to process proc map is too short?\n" 1676 "Increase it by --proc-map-timeout\n", 1677 stats->nr_proc_map_timeout); 1678 } 1679 } 1680 1681 static int perf_session__flush_thread_stack(struct thread *thread, 1682 void *p __maybe_unused) 1683 { 1684 return thread_stack__flush(thread); 1685 } 1686 1687 static int perf_session__flush_thread_stacks(struct perf_session *session) 1688 { 1689 return machines__for_each_thread(&session->machines, 1690 perf_session__flush_thread_stack, 1691 NULL); 1692 } 1693 1694 volatile int session_done; 1695 1696 static int __perf_session__process_pipe_events(struct perf_session *session) 1697 { 1698 struct ordered_events *oe = &session->ordered_events; 1699 struct perf_tool *tool = session->tool; 1700 int fd = perf_data__fd(session->data); 1701 union perf_event *event; 1702 uint32_t size, cur_size = 0; 1703 void *buf = NULL; 1704 s64 skip = 0; 1705 u64 head; 1706 ssize_t err; 1707 void *p; 1708 1709 perf_tool__fill_defaults(tool); 1710 1711 head = 0; 1712 cur_size = sizeof(union perf_event); 1713 1714 buf = malloc(cur_size); 1715 if (!buf) 1716 return -errno; 1717 ordered_events__set_copy_on_queue(oe, true); 1718 more: 1719 event = buf; 1720 err = readn(fd, event, sizeof(struct perf_event_header)); 1721 if (err <= 0) { 1722 if (err == 0) 1723 goto done; 1724 1725 pr_err("failed to read event header\n"); 1726 goto out_err; 1727 } 1728 1729 if (session->header.needs_swap) 1730 perf_event_header__bswap(&event->header); 1731 1732 size = event->header.size; 1733 if (size < sizeof(struct perf_event_header)) { 1734 pr_err("bad event header size\n"); 1735 goto out_err; 1736 } 1737 1738 if (size > cur_size) { 1739 void *new = realloc(buf, size); 1740 if (!new) { 1741 pr_err("failed to allocate memory to read event\n"); 1742 goto out_err; 1743 } 1744 buf = new; 1745 cur_size = size; 1746 event = buf; 1747 } 1748 p = event; 1749 p += sizeof(struct perf_event_header); 1750 1751 if (size - sizeof(struct perf_event_header)) { 1752 err = readn(fd, p, size - sizeof(struct perf_event_header)); 1753 if (err <= 0) { 1754 if (err == 0) { 1755 pr_err("unexpected end of event stream\n"); 1756 goto done; 1757 } 1758 1759 pr_err("failed to read event data\n"); 1760 goto out_err; 1761 } 1762 } 1763 1764 if ((skip = perf_session__process_event(session, event, head)) < 0) { 1765 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1766 head, event->header.size, event->header.type); 1767 err = -EINVAL; 1768 goto out_err; 1769 } 1770 1771 head += size; 1772 1773 if (skip > 0) 1774 head += skip; 1775 1776 if (!session_done()) 1777 goto more; 1778 done: 1779 /* do the final flush for ordered samples */ 1780 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 1781 if (err) 1782 goto out_err; 1783 err = auxtrace__flush_events(session, tool); 1784 if (err) 1785 goto out_err; 1786 err = perf_session__flush_thread_stacks(session); 1787 out_err: 1788 free(buf); 1789 if (!tool->no_warn) 1790 perf_session__warn_about_errors(session); 1791 ordered_events__free(&session->ordered_events); 1792 auxtrace__free_events(session); 1793 return err; 1794 } 1795 1796 static union perf_event * 1797 fetch_mmaped_event(struct perf_session *session, 1798 u64 head, size_t mmap_size, char *buf) 1799 { 1800 union perf_event *event; 1801 1802 /* 1803 * Ensure we have enough space remaining to read 1804 * the size of the event in the headers. 1805 */ 1806 if (head + sizeof(event->header) > mmap_size) 1807 return NULL; 1808 1809 event = (union perf_event *)(buf + head); 1810 1811 if (session->header.needs_swap) 1812 perf_event_header__bswap(&event->header); 1813 1814 if (head + event->header.size > mmap_size) { 1815 /* We're not fetching the event so swap back again */ 1816 if (session->header.needs_swap) 1817 perf_event_header__bswap(&event->header); 1818 return NULL; 1819 } 1820 1821 return event; 1822 } 1823 1824 /* 1825 * On 64bit we can mmap the data file in one go. No need for tiny mmap 1826 * slices. On 32bit we use 32MB. 1827 */ 1828 #if BITS_PER_LONG == 64 1829 #define MMAP_SIZE ULLONG_MAX 1830 #define NUM_MMAPS 1 1831 #else 1832 #define MMAP_SIZE (32 * 1024 * 1024ULL) 1833 #define NUM_MMAPS 128 1834 #endif 1835 1836 struct reader { 1837 int fd; 1838 u64 data_size; 1839 u64 data_offset; 1840 }; 1841 1842 static int 1843 reader__process_events(struct reader *rd, struct perf_session *session, 1844 struct ui_progress *prog) 1845 { 1846 u64 data_size = rd->data_size; 1847 u64 head, page_offset, file_offset, file_pos, size; 1848 int err = 0, mmap_prot, mmap_flags, map_idx = 0; 1849 size_t mmap_size; 1850 char *buf, *mmaps[NUM_MMAPS]; 1851 union perf_event *event; 1852 s64 skip; 1853 1854 page_offset = page_size * (rd->data_offset / page_size); 1855 file_offset = page_offset; 1856 head = rd->data_offset - page_offset; 1857 1858 ui_progress__init_size(prog, data_size, "Processing events..."); 1859 1860 data_size += rd->data_offset; 1861 1862 mmap_size = MMAP_SIZE; 1863 if (mmap_size > data_size) { 1864 mmap_size = data_size; 1865 session->one_mmap = true; 1866 } 1867 1868 memset(mmaps, 0, sizeof(mmaps)); 1869 1870 mmap_prot = PROT_READ; 1871 mmap_flags = MAP_SHARED; 1872 1873 if (session->header.needs_swap) { 1874 mmap_prot |= PROT_WRITE; 1875 mmap_flags = MAP_PRIVATE; 1876 } 1877 remap: 1878 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd, 1879 file_offset); 1880 if (buf == MAP_FAILED) { 1881 pr_err("failed to mmap file\n"); 1882 err = -errno; 1883 goto out; 1884 } 1885 mmaps[map_idx] = buf; 1886 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1887 file_pos = file_offset + head; 1888 if (session->one_mmap) { 1889 session->one_mmap_addr = buf; 1890 session->one_mmap_offset = file_offset; 1891 } 1892 1893 more: 1894 event = fetch_mmaped_event(session, head, mmap_size, buf); 1895 if (!event) { 1896 if (mmaps[map_idx]) { 1897 munmap(mmaps[map_idx], mmap_size); 1898 mmaps[map_idx] = NULL; 1899 } 1900 1901 page_offset = page_size * (head / page_size); 1902 file_offset += page_offset; 1903 head -= page_offset; 1904 goto remap; 1905 } 1906 1907 size = event->header.size; 1908 1909 if (size < sizeof(struct perf_event_header) || 1910 (skip = perf_session__process_event(session, event, file_pos)) < 0) { 1911 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1912 file_offset + head, event->header.size, 1913 event->header.type); 1914 err = -EINVAL; 1915 goto out; 1916 } 1917 1918 if (skip) 1919 size += skip; 1920 1921 head += size; 1922 file_pos += size; 1923 1924 ui_progress__update(prog, size); 1925 1926 if (session_done()) 1927 goto out; 1928 1929 if (file_pos < data_size) 1930 goto more; 1931 1932 out: 1933 return err; 1934 } 1935 1936 static int __perf_session__process_events(struct perf_session *session) 1937 { 1938 struct reader rd = { 1939 .fd = perf_data__fd(session->data), 1940 .data_size = session->header.data_size, 1941 .data_offset = session->header.data_offset, 1942 }; 1943 struct ordered_events *oe = &session->ordered_events; 1944 struct perf_tool *tool = session->tool; 1945 struct ui_progress prog; 1946 int err; 1947 1948 perf_tool__fill_defaults(tool); 1949 1950 if (rd.data_size == 0) 1951 return -1; 1952 1953 ui_progress__init_size(&prog, rd.data_size, "Processing events..."); 1954 1955 err = reader__process_events(&rd, session, &prog); 1956 if (err) 1957 goto out_err; 1958 /* do the final flush for ordered samples */ 1959 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 1960 if (err) 1961 goto out_err; 1962 err = auxtrace__flush_events(session, tool); 1963 if (err) 1964 goto out_err; 1965 err = perf_session__flush_thread_stacks(session); 1966 out_err: 1967 ui_progress__finish(); 1968 if (!tool->no_warn) 1969 perf_session__warn_about_errors(session); 1970 /* 1971 * We may switching perf.data output, make ordered_events 1972 * reusable. 1973 */ 1974 ordered_events__reinit(&session->ordered_events); 1975 auxtrace__free_events(session); 1976 session->one_mmap = false; 1977 return err; 1978 } 1979 1980 int perf_session__process_events(struct perf_session *session) 1981 { 1982 if (perf_session__register_idle_thread(session) < 0) 1983 return -ENOMEM; 1984 1985 if (perf_data__is_pipe(session->data)) 1986 return __perf_session__process_pipe_events(session); 1987 1988 return __perf_session__process_events(session); 1989 } 1990 1991 bool perf_session__has_traces(struct perf_session *session, const char *msg) 1992 { 1993 struct perf_evsel *evsel; 1994 1995 evlist__for_each_entry(session->evlist, evsel) { 1996 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) 1997 return true; 1998 } 1999 2000 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 2001 return false; 2002 } 2003 2004 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) 2005 { 2006 char *bracket; 2007 struct ref_reloc_sym *ref; 2008 struct kmap *kmap; 2009 2010 ref = zalloc(sizeof(struct ref_reloc_sym)); 2011 if (ref == NULL) 2012 return -ENOMEM; 2013 2014 ref->name = strdup(symbol_name); 2015 if (ref->name == NULL) { 2016 free(ref); 2017 return -ENOMEM; 2018 } 2019 2020 bracket = strchr(ref->name, ']'); 2021 if (bracket) 2022 *bracket = '\0'; 2023 2024 ref->addr = addr; 2025 2026 kmap = map__kmap(map); 2027 if (kmap) 2028 kmap->ref_reloc_sym = ref; 2029 2030 return 0; 2031 } 2032 2033 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 2034 { 2035 return machines__fprintf_dsos(&session->machines, fp); 2036 } 2037 2038 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 2039 bool (skip)(struct dso *dso, int parm), int parm) 2040 { 2041 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 2042 } 2043 2044 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 2045 { 2046 size_t ret; 2047 const char *msg = ""; 2048 2049 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) 2050 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; 2051 2052 ret = fprintf(fp, "\nAggregated stats:%s\n", msg); 2053 2054 ret += events_stats__fprintf(&session->evlist->stats, fp); 2055 return ret; 2056 } 2057 2058 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 2059 { 2060 /* 2061 * FIXME: Here we have to actually print all the machines in this 2062 * session, not just the host... 2063 */ 2064 return machine__fprintf(&session->machines.host, fp); 2065 } 2066 2067 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 2068 unsigned int type) 2069 { 2070 struct perf_evsel *pos; 2071 2072 evlist__for_each_entry(session->evlist, pos) { 2073 if (pos->attr.type == type) 2074 return pos; 2075 } 2076 return NULL; 2077 } 2078 2079 int perf_session__cpu_bitmap(struct perf_session *session, 2080 const char *cpu_list, unsigned long *cpu_bitmap) 2081 { 2082 int i, err = -1; 2083 struct cpu_map *map; 2084 2085 for (i = 0; i < PERF_TYPE_MAX; ++i) { 2086 struct perf_evsel *evsel; 2087 2088 evsel = perf_session__find_first_evtype(session, i); 2089 if (!evsel) 2090 continue; 2091 2092 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 2093 pr_err("File does not contain CPU events. " 2094 "Remove -C option to proceed.\n"); 2095 return -1; 2096 } 2097 } 2098 2099 map = cpu_map__new(cpu_list); 2100 if (map == NULL) { 2101 pr_err("Invalid cpu_list\n"); 2102 return -1; 2103 } 2104 2105 for (i = 0; i < map->nr; i++) { 2106 int cpu = map->map[i]; 2107 2108 if (cpu >= MAX_NR_CPUS) { 2109 pr_err("Requested CPU %d too large. " 2110 "Consider raising MAX_NR_CPUS\n", cpu); 2111 goto out_delete_map; 2112 } 2113 2114 set_bit(cpu, cpu_bitmap); 2115 } 2116 2117 err = 0; 2118 2119 out_delete_map: 2120 cpu_map__put(map); 2121 return err; 2122 } 2123 2124 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 2125 bool full) 2126 { 2127 if (session == NULL || fp == NULL) 2128 return; 2129 2130 fprintf(fp, "# ========\n"); 2131 perf_header__fprintf_info(session, fp, full); 2132 fprintf(fp, "# ========\n#\n"); 2133 } 2134 2135 2136 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 2137 const struct perf_evsel_str_handler *assocs, 2138 size_t nr_assocs) 2139 { 2140 struct perf_evsel *evsel; 2141 size_t i; 2142 int err; 2143 2144 for (i = 0; i < nr_assocs; i++) { 2145 /* 2146 * Adding a handler for an event not in the session, 2147 * just ignore it. 2148 */ 2149 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); 2150 if (evsel == NULL) 2151 continue; 2152 2153 err = -EEXIST; 2154 if (evsel->handler != NULL) 2155 goto out; 2156 evsel->handler = assocs[i].handler; 2157 } 2158 2159 err = 0; 2160 out: 2161 return err; 2162 } 2163 2164 int perf_event__process_id_index(struct perf_session *session, 2165 union perf_event *event) 2166 { 2167 struct perf_evlist *evlist = session->evlist; 2168 struct id_index_event *ie = &event->id_index; 2169 size_t i, nr, max_nr; 2170 2171 max_nr = (ie->header.size - sizeof(struct id_index_event)) / 2172 sizeof(struct id_index_entry); 2173 nr = ie->nr; 2174 if (nr > max_nr) 2175 return -EINVAL; 2176 2177 if (dump_trace) 2178 fprintf(stdout, " nr: %zu\n", nr); 2179 2180 for (i = 0; i < nr; i++) { 2181 struct id_index_entry *e = &ie->entries[i]; 2182 struct perf_sample_id *sid; 2183 2184 if (dump_trace) { 2185 fprintf(stdout, " ... id: %"PRIu64, e->id); 2186 fprintf(stdout, " idx: %"PRIu64, e->idx); 2187 fprintf(stdout, " cpu: %"PRId64, e->cpu); 2188 fprintf(stdout, " tid: %"PRId64"\n", e->tid); 2189 } 2190 2191 sid = perf_evlist__id2sid(evlist, e->id); 2192 if (!sid) 2193 return -ENOENT; 2194 sid->idx = e->idx; 2195 sid->cpu = e->cpu; 2196 sid->tid = e->tid; 2197 } 2198 return 0; 2199 } 2200 2201 int perf_event__synthesize_id_index(struct perf_tool *tool, 2202 perf_event__handler_t process, 2203 struct perf_evlist *evlist, 2204 struct machine *machine) 2205 { 2206 union perf_event *ev; 2207 struct perf_evsel *evsel; 2208 size_t nr = 0, i = 0, sz, max_nr, n; 2209 int err; 2210 2211 pr_debug2("Synthesizing id index\n"); 2212 2213 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) / 2214 sizeof(struct id_index_entry); 2215 2216 evlist__for_each_entry(evlist, evsel) 2217 nr += evsel->ids; 2218 2219 n = nr > max_nr ? max_nr : nr; 2220 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry); 2221 ev = zalloc(sz); 2222 if (!ev) 2223 return -ENOMEM; 2224 2225 ev->id_index.header.type = PERF_RECORD_ID_INDEX; 2226 ev->id_index.header.size = sz; 2227 ev->id_index.nr = n; 2228 2229 evlist__for_each_entry(evlist, evsel) { 2230 u32 j; 2231 2232 for (j = 0; j < evsel->ids; j++) { 2233 struct id_index_entry *e; 2234 struct perf_sample_id *sid; 2235 2236 if (i >= n) { 2237 err = process(tool, ev, NULL, machine); 2238 if (err) 2239 goto out_err; 2240 nr -= n; 2241 i = 0; 2242 } 2243 2244 e = &ev->id_index.entries[i++]; 2245 2246 e->id = evsel->id[j]; 2247 2248 sid = perf_evlist__id2sid(evlist, e->id); 2249 if (!sid) { 2250 free(ev); 2251 return -ENOENT; 2252 } 2253 2254 e->idx = sid->idx; 2255 e->cpu = sid->cpu; 2256 e->tid = sid->tid; 2257 } 2258 } 2259 2260 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry); 2261 ev->id_index.header.size = sz; 2262 ev->id_index.nr = nr; 2263 2264 err = process(tool, ev, NULL, machine); 2265 out_err: 2266 free(ev); 2267 2268 return err; 2269 } 2270