1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pt.c: Intel Processor Trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #include <inttypes.h> 8 #include <linux/perf_event.h> 9 #include <stdio.h> 10 #include <stdbool.h> 11 #include <errno.h> 12 #include <linux/kernel.h> 13 #include <linux/string.h> 14 #include <linux/types.h> 15 #include <linux/zalloc.h> 16 17 #include "session.h" 18 #include "machine.h" 19 #include "memswap.h" 20 #include "sort.h" 21 #include "tool.h" 22 #include "event.h" 23 #include "evlist.h" 24 #include "evsel.h" 25 #include "map.h" 26 #include "color.h" 27 #include "thread.h" 28 #include "thread-stack.h" 29 #include "symbol.h" 30 #include "callchain.h" 31 #include "dso.h" 32 #include "debug.h" 33 #include "auxtrace.h" 34 #include "tsc.h" 35 #include "intel-pt.h" 36 #include "config.h" 37 #include "util/perf_api_probe.h" 38 #include "util/synthetic-events.h" 39 #include "time-utils.h" 40 41 #include "../arch/x86/include/uapi/asm/perf_regs.h" 42 43 #include "intel-pt-decoder/intel-pt-log.h" 44 #include "intel-pt-decoder/intel-pt-decoder.h" 45 #include "intel-pt-decoder/intel-pt-insn-decoder.h" 46 #include "intel-pt-decoder/intel-pt-pkt-decoder.h" 47 48 #define MAX_TIMESTAMP (~0ULL) 49 50 #define INTEL_PT_CFG_PASS_THRU BIT_ULL(0) 51 #define INTEL_PT_CFG_PWR_EVT_EN BIT_ULL(4) 52 #define INTEL_PT_CFG_BRANCH_EN BIT_ULL(13) 53 #define INTEL_PT_CFG_EVT_EN BIT_ULL(31) 54 #define INTEL_PT_CFG_TNT_DIS BIT_ULL(55) 55 56 struct range { 57 u64 start; 58 u64 end; 59 }; 60 61 struct intel_pt { 62 struct auxtrace auxtrace; 63 struct auxtrace_queues queues; 64 struct auxtrace_heap heap; 65 u32 auxtrace_type; 66 struct perf_session *session; 67 struct machine *machine; 68 struct evsel *switch_evsel; 69 struct thread *unknown_thread; 70 bool timeless_decoding; 71 bool sampling_mode; 72 bool snapshot_mode; 73 bool per_cpu_mmaps; 74 bool have_tsc; 75 bool data_queued; 76 bool est_tsc; 77 bool sync_switch; 78 bool sync_switch_not_supported; 79 bool mispred_all; 80 bool use_thread_stack; 81 bool callstack; 82 bool cap_event_trace; 83 bool have_guest_sideband; 84 unsigned int br_stack_sz; 85 unsigned int br_stack_sz_plus; 86 int have_sched_switch; 87 u32 pmu_type; 88 u64 kernel_start; 89 u64 switch_ip; 90 u64 ptss_ip; 91 u64 first_timestamp; 92 93 struct perf_tsc_conversion tc; 94 bool cap_user_time_zero; 95 96 struct itrace_synth_opts synth_opts; 97 98 bool sample_instructions; 99 u64 instructions_sample_type; 100 u64 instructions_id; 101 102 bool sample_cycles; 103 u64 cycles_sample_type; 104 u64 cycles_id; 105 106 bool sample_branches; 107 u32 branches_filter; 108 u64 branches_sample_type; 109 u64 branches_id; 110 111 bool sample_transactions; 112 u64 transactions_sample_type; 113 u64 transactions_id; 114 115 bool sample_ptwrites; 116 u64 ptwrites_sample_type; 117 u64 ptwrites_id; 118 119 bool sample_pwr_events; 120 u64 pwr_events_sample_type; 121 u64 mwait_id; 122 u64 pwre_id; 123 u64 exstop_id; 124 u64 pwrx_id; 125 u64 cbr_id; 126 u64 psb_id; 127 128 bool single_pebs; 129 bool sample_pebs; 130 struct evsel *pebs_evsel; 131 132 u64 evt_sample_type; 133 u64 evt_id; 134 135 u64 iflag_chg_sample_type; 136 u64 iflag_chg_id; 137 138 u64 tsc_bit; 139 u64 mtc_bit; 140 u64 mtc_freq_bits; 141 u32 tsc_ctc_ratio_n; 142 u32 tsc_ctc_ratio_d; 143 u64 cyc_bit; 144 u64 noretcomp_bit; 145 unsigned max_non_turbo_ratio; 146 unsigned cbr2khz; 147 int max_loops; 148 149 unsigned long num_events; 150 151 char *filter; 152 struct addr_filters filts; 153 154 struct range *time_ranges; 155 unsigned int range_cnt; 156 157 struct ip_callchain *chain; 158 struct branch_stack *br_stack; 159 160 u64 dflt_tsc_offset; 161 struct rb_root vmcs_info; 162 }; 163 164 enum switch_state { 165 INTEL_PT_SS_NOT_TRACING, 166 INTEL_PT_SS_UNKNOWN, 167 INTEL_PT_SS_TRACING, 168 INTEL_PT_SS_EXPECTING_SWITCH_EVENT, 169 INTEL_PT_SS_EXPECTING_SWITCH_IP, 170 }; 171 172 /* applicable_counters is 64-bits */ 173 #define INTEL_PT_MAX_PEBS 64 174 175 struct intel_pt_pebs_event { 176 struct evsel *evsel; 177 u64 id; 178 }; 179 180 struct intel_pt_queue { 181 struct intel_pt *pt; 182 unsigned int queue_nr; 183 struct auxtrace_buffer *buffer; 184 struct auxtrace_buffer *old_buffer; 185 void *decoder; 186 const struct intel_pt_state *state; 187 struct ip_callchain *chain; 188 struct branch_stack *last_branch; 189 union perf_event *event_buf; 190 bool on_heap; 191 bool stop; 192 bool step_through_buffers; 193 bool use_buffer_pid_tid; 194 bool sync_switch; 195 bool sample_ipc; 196 pid_t pid, tid; 197 int cpu; 198 int switch_state; 199 pid_t next_tid; 200 struct thread *thread; 201 struct machine *guest_machine; 202 struct thread *guest_thread; 203 struct thread *unknown_guest_thread; 204 pid_t guest_machine_pid; 205 pid_t guest_pid; 206 pid_t guest_tid; 207 int vcpu; 208 bool exclude_kernel; 209 bool have_sample; 210 u64 time; 211 u64 timestamp; 212 u64 sel_timestamp; 213 bool sel_start; 214 unsigned int sel_idx; 215 u32 flags; 216 u16 insn_len; 217 u64 last_insn_cnt; 218 u64 ipc_insn_cnt; 219 u64 ipc_cyc_cnt; 220 u64 last_in_insn_cnt; 221 u64 last_in_cyc_cnt; 222 u64 last_cy_insn_cnt; 223 u64 last_cy_cyc_cnt; 224 u64 last_br_insn_cnt; 225 u64 last_br_cyc_cnt; 226 unsigned int cbr_seen; 227 char insn[INTEL_PT_INSN_BUF_SZ]; 228 struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS]; 229 }; 230 231 static void intel_pt_dump(struct intel_pt *pt __maybe_unused, 232 unsigned char *buf, size_t len) 233 { 234 struct intel_pt_pkt packet; 235 size_t pos = 0; 236 int ret, pkt_len, i; 237 char desc[INTEL_PT_PKT_DESC_MAX]; 238 const char *color = PERF_COLOR_BLUE; 239 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX; 240 241 color_fprintf(stdout, color, 242 ". ... Intel Processor Trace data: size %zu bytes\n", 243 len); 244 245 while (len) { 246 ret = intel_pt_get_packet(buf, len, &packet, &ctx); 247 if (ret > 0) 248 pkt_len = ret; 249 else 250 pkt_len = 1; 251 printf("."); 252 color_fprintf(stdout, color, " %08x: ", pos); 253 for (i = 0; i < pkt_len; i++) 254 color_fprintf(stdout, color, " %02x", buf[i]); 255 for (; i < 16; i++) 256 color_fprintf(stdout, color, " "); 257 if (ret > 0) { 258 ret = intel_pt_pkt_desc(&packet, desc, 259 INTEL_PT_PKT_DESC_MAX); 260 if (ret > 0) 261 color_fprintf(stdout, color, " %s\n", desc); 262 } else { 263 color_fprintf(stdout, color, " Bad packet!\n"); 264 } 265 pos += pkt_len; 266 buf += pkt_len; 267 len -= pkt_len; 268 } 269 } 270 271 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf, 272 size_t len) 273 { 274 printf(".\n"); 275 intel_pt_dump(pt, buf, len); 276 } 277 278 static void intel_pt_log_event(union perf_event *event) 279 { 280 FILE *f = intel_pt_log_fp(); 281 282 if (!intel_pt_enable_logging || !f) 283 return; 284 285 perf_event__fprintf(event, NULL, f); 286 } 287 288 static void intel_pt_dump_sample(struct perf_session *session, 289 struct perf_sample *sample) 290 { 291 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 292 auxtrace); 293 294 printf("\n"); 295 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size); 296 } 297 298 static bool intel_pt_log_events(struct intel_pt *pt, u64 tm) 299 { 300 struct perf_time_interval *range = pt->synth_opts.ptime_range; 301 int n = pt->synth_opts.range_num; 302 303 if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS) 304 return true; 305 306 if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS) 307 return false; 308 309 /* perf_time__ranges_skip_sample does not work if time is zero */ 310 if (!tm) 311 tm = 1; 312 313 return !n || !perf_time__ranges_skip_sample(range, n, tm); 314 } 315 316 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root, 317 u64 vmcs, 318 u64 dflt_tsc_offset) 319 { 320 struct rb_node **p = &rb_root->rb_node; 321 struct rb_node *parent = NULL; 322 struct intel_pt_vmcs_info *v; 323 324 while (*p) { 325 parent = *p; 326 v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node); 327 328 if (v->vmcs == vmcs) 329 return v; 330 331 if (vmcs < v->vmcs) 332 p = &(*p)->rb_left; 333 else 334 p = &(*p)->rb_right; 335 } 336 337 v = zalloc(sizeof(*v)); 338 if (v) { 339 v->vmcs = vmcs; 340 v->tsc_offset = dflt_tsc_offset; 341 v->reliable = dflt_tsc_offset; 342 343 rb_link_node(&v->rb_node, parent, p); 344 rb_insert_color(&v->rb_node, rb_root); 345 } 346 347 return v; 348 } 349 350 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs) 351 { 352 struct intel_pt_queue *ptq = data; 353 struct intel_pt *pt = ptq->pt; 354 355 if (!vmcs && !pt->dflt_tsc_offset) 356 return NULL; 357 358 return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset); 359 } 360 361 static void intel_pt_free_vmcs_info(struct intel_pt *pt) 362 { 363 struct intel_pt_vmcs_info *v; 364 struct rb_node *n; 365 366 n = rb_first(&pt->vmcs_info); 367 while (n) { 368 v = rb_entry(n, struct intel_pt_vmcs_info, rb_node); 369 n = rb_next(n); 370 rb_erase(&v->rb_node, &pt->vmcs_info); 371 free(v); 372 } 373 } 374 375 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, 376 struct auxtrace_buffer *b) 377 { 378 bool consecutive = false; 379 void *start; 380 381 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size, 382 pt->have_tsc, &consecutive, 383 pt->synth_opts.vm_time_correlation); 384 if (!start) 385 return -EINVAL; 386 /* 387 * In the case of vm_time_correlation, the overlap might contain TSC 388 * packets that will not be fixed, and that will then no longer work for 389 * overlap detection. Avoid that by zeroing out the overlap. 390 */ 391 if (pt->synth_opts.vm_time_correlation) 392 memset(b->data, 0, start - b->data); 393 b->use_size = b->data + b->size - start; 394 b->use_data = start; 395 if (b->use_size && consecutive) 396 b->consecutive = true; 397 return 0; 398 } 399 400 static int intel_pt_get_buffer(struct intel_pt_queue *ptq, 401 struct auxtrace_buffer *buffer, 402 struct auxtrace_buffer *old_buffer, 403 struct intel_pt_buffer *b) 404 { 405 bool might_overlap; 406 407 if (!buffer->data) { 408 int fd = perf_data__fd(ptq->pt->session->data); 409 410 buffer->data = auxtrace_buffer__get_data(buffer, fd); 411 if (!buffer->data) 412 return -ENOMEM; 413 } 414 415 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode; 416 if (might_overlap && !buffer->consecutive && old_buffer && 417 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer)) 418 return -ENOMEM; 419 420 if (buffer->use_data) { 421 b->len = buffer->use_size; 422 b->buf = buffer->use_data; 423 } else { 424 b->len = buffer->size; 425 b->buf = buffer->data; 426 } 427 b->ref_timestamp = buffer->reference; 428 429 if (!old_buffer || (might_overlap && !buffer->consecutive)) { 430 b->consecutive = false; 431 b->trace_nr = buffer->buffer_nr + 1; 432 } else { 433 b->consecutive = true; 434 } 435 436 return 0; 437 } 438 439 /* Do not drop buffers with references - refer intel_pt_get_trace() */ 440 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq, 441 struct auxtrace_buffer *buffer) 442 { 443 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer) 444 return; 445 446 auxtrace_buffer__drop_data(buffer); 447 } 448 449 /* Must be serialized with respect to intel_pt_get_trace() */ 450 static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb, 451 void *cb_data) 452 { 453 struct intel_pt_queue *ptq = data; 454 struct auxtrace_buffer *buffer = ptq->buffer; 455 struct auxtrace_buffer *old_buffer = ptq->old_buffer; 456 struct auxtrace_queue *queue; 457 int err = 0; 458 459 queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; 460 461 while (1) { 462 struct intel_pt_buffer b = { .len = 0 }; 463 464 buffer = auxtrace_buffer__next(queue, buffer); 465 if (!buffer) 466 break; 467 468 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b); 469 if (err) 470 break; 471 472 if (b.len) { 473 intel_pt_lookahead_drop_buffer(ptq, old_buffer); 474 old_buffer = buffer; 475 } else { 476 intel_pt_lookahead_drop_buffer(ptq, buffer); 477 continue; 478 } 479 480 err = cb(&b, cb_data); 481 if (err) 482 break; 483 } 484 485 if (buffer != old_buffer) 486 intel_pt_lookahead_drop_buffer(ptq, buffer); 487 intel_pt_lookahead_drop_buffer(ptq, old_buffer); 488 489 return err; 490 } 491 492 /* 493 * This function assumes data is processed sequentially only. 494 * Must be serialized with respect to intel_pt_lookahead() 495 */ 496 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) 497 { 498 struct intel_pt_queue *ptq = data; 499 struct auxtrace_buffer *buffer = ptq->buffer; 500 struct auxtrace_buffer *old_buffer = ptq->old_buffer; 501 struct auxtrace_queue *queue; 502 int err; 503 504 if (ptq->stop) { 505 b->len = 0; 506 return 0; 507 } 508 509 queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; 510 511 buffer = auxtrace_buffer__next(queue, buffer); 512 if (!buffer) { 513 if (old_buffer) 514 auxtrace_buffer__drop_data(old_buffer); 515 b->len = 0; 516 return 0; 517 } 518 519 ptq->buffer = buffer; 520 521 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b); 522 if (err) 523 return err; 524 525 if (ptq->step_through_buffers) 526 ptq->stop = true; 527 528 if (b->len) { 529 if (old_buffer) 530 auxtrace_buffer__drop_data(old_buffer); 531 ptq->old_buffer = buffer; 532 } else { 533 auxtrace_buffer__drop_data(buffer); 534 return intel_pt_get_trace(b, data); 535 } 536 537 return 0; 538 } 539 540 struct intel_pt_cache_entry { 541 struct auxtrace_cache_entry entry; 542 u64 insn_cnt; 543 u64 byte_cnt; 544 enum intel_pt_insn_op op; 545 enum intel_pt_insn_branch branch; 546 bool emulated_ptwrite; 547 int length; 548 int32_t rel; 549 char insn[INTEL_PT_INSN_BUF_SZ]; 550 }; 551 552 static int intel_pt_config_div(const char *var, const char *value, void *data) 553 { 554 int *d = data; 555 long val; 556 557 if (!strcmp(var, "intel-pt.cache-divisor")) { 558 val = strtol(value, NULL, 0); 559 if (val > 0 && val <= INT_MAX) 560 *d = val; 561 } 562 563 return 0; 564 } 565 566 static int intel_pt_cache_divisor(void) 567 { 568 static int d; 569 570 if (d) 571 return d; 572 573 perf_config(intel_pt_config_div, &d); 574 575 if (!d) 576 d = 64; 577 578 return d; 579 } 580 581 static unsigned int intel_pt_cache_size(struct dso *dso, 582 struct machine *machine) 583 { 584 off_t size; 585 586 size = dso__data_size(dso, machine); 587 size /= intel_pt_cache_divisor(); 588 if (size < 1000) 589 return 10; 590 if (size > (1 << 21)) 591 return 21; 592 return 32 - __builtin_clz(size); 593 } 594 595 static struct auxtrace_cache *intel_pt_cache(struct dso *dso, 596 struct machine *machine) 597 { 598 struct auxtrace_cache *c; 599 unsigned int bits; 600 601 if (dso__auxtrace_cache(dso)) 602 return dso__auxtrace_cache(dso); 603 604 bits = intel_pt_cache_size(dso, machine); 605 606 /* Ignoring cache creation failure */ 607 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200); 608 609 dso__set_auxtrace_cache(dso, c); 610 611 return c; 612 } 613 614 static int intel_pt_cache_add(struct dso *dso, struct machine *machine, 615 u64 offset, u64 insn_cnt, u64 byte_cnt, 616 struct intel_pt_insn *intel_pt_insn) 617 { 618 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 619 struct intel_pt_cache_entry *e; 620 int err; 621 622 if (!c) 623 return -ENOMEM; 624 625 e = auxtrace_cache__alloc_entry(c); 626 if (!e) 627 return -ENOMEM; 628 629 e->insn_cnt = insn_cnt; 630 e->byte_cnt = byte_cnt; 631 e->op = intel_pt_insn->op; 632 e->branch = intel_pt_insn->branch; 633 e->emulated_ptwrite = intel_pt_insn->emulated_ptwrite; 634 e->length = intel_pt_insn->length; 635 e->rel = intel_pt_insn->rel; 636 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ); 637 638 err = auxtrace_cache__add(c, offset, &e->entry); 639 if (err) 640 auxtrace_cache__free_entry(c, e); 641 642 return err; 643 } 644 645 static struct intel_pt_cache_entry * 646 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) 647 { 648 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 649 650 if (!c) 651 return NULL; 652 653 return auxtrace_cache__lookup(dso__auxtrace_cache(dso), offset); 654 } 655 656 static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine, 657 u64 offset) 658 { 659 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 660 661 if (!c) 662 return; 663 664 auxtrace_cache__remove(dso__auxtrace_cache(dso), offset); 665 } 666 667 static inline bool intel_pt_guest_kernel_ip(uint64_t ip) 668 { 669 /* Assumes 64-bit kernel */ 670 return ip & (1ULL << 63); 671 } 672 673 static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr) 674 { 675 if (nr) { 676 return intel_pt_guest_kernel_ip(ip) ? 677 PERF_RECORD_MISC_GUEST_KERNEL : 678 PERF_RECORD_MISC_GUEST_USER; 679 } 680 681 return ip >= ptq->pt->kernel_start ? 682 PERF_RECORD_MISC_KERNEL : 683 PERF_RECORD_MISC_USER; 684 } 685 686 static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip) 687 { 688 /* No support for non-zero CS base */ 689 if (from_ip) 690 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr); 691 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr); 692 } 693 694 static int intel_pt_get_guest(struct intel_pt_queue *ptq) 695 { 696 struct machines *machines = &ptq->pt->session->machines; 697 struct machine *machine; 698 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid; 699 700 if (ptq->guest_machine && pid == ptq->guest_machine->pid) 701 return 0; 702 703 ptq->guest_machine = NULL; 704 thread__zput(ptq->unknown_guest_thread); 705 706 if (symbol_conf.guest_code) { 707 thread__zput(ptq->guest_thread); 708 ptq->guest_thread = machines__findnew_guest_code(machines, pid); 709 } 710 711 machine = machines__find_guest(machines, pid); 712 if (!machine) 713 return -1; 714 715 ptq->unknown_guest_thread = machine__idle_thread(machine); 716 if (!ptq->unknown_guest_thread) 717 return -1; 718 719 ptq->guest_machine = machine; 720 721 return 0; 722 } 723 724 static inline bool intel_pt_jmp_16(struct intel_pt_insn *intel_pt_insn) 725 { 726 return intel_pt_insn->rel == 16 && intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL; 727 } 728 729 #define PTWRITE_MAGIC "\x0f\x0bperf,ptwrite " 730 #define PTWRITE_MAGIC_LEN 16 731 732 static bool intel_pt_emulated_ptwrite(struct dso *dso, struct machine *machine, u64 offset) 733 { 734 unsigned char buf[PTWRITE_MAGIC_LEN]; 735 ssize_t len; 736 737 len = dso__data_read_offset(dso, machine, offset, buf, PTWRITE_MAGIC_LEN); 738 if (len == PTWRITE_MAGIC_LEN && !memcmp(buf, PTWRITE_MAGIC, PTWRITE_MAGIC_LEN)) { 739 intel_pt_log("Emulated ptwrite signature found\n"); 740 return true; 741 } 742 intel_pt_log("Emulated ptwrite signature not found\n"); 743 return false; 744 } 745 746 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, 747 uint64_t *insn_cnt_ptr, uint64_t *ip, 748 uint64_t to_ip, uint64_t max_insn_cnt, 749 void *data) 750 { 751 struct intel_pt_queue *ptq = data; 752 struct machine *machine = ptq->pt->machine; 753 struct thread *thread; 754 struct addr_location al; 755 unsigned char buf[INTEL_PT_INSN_BUF_SZ]; 756 ssize_t len; 757 int x86_64, ret = 0; 758 u8 cpumode; 759 u64 offset, start_offset, start_ip; 760 u64 insn_cnt = 0; 761 bool one_map = true; 762 bool nr; 763 764 765 addr_location__init(&al); 766 intel_pt_insn->length = 0; 767 intel_pt_insn->op = INTEL_PT_OP_OTHER; 768 769 if (to_ip && *ip == to_ip) 770 goto out_no_cache; 771 772 nr = ptq->state->to_nr; 773 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr); 774 775 if (nr) { 776 if (ptq->pt->have_guest_sideband) { 777 if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) { 778 intel_pt_log("ERROR: guest sideband but no guest machine\n"); 779 ret = -EINVAL; 780 goto out_ret; 781 } 782 } else if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) || 783 intel_pt_get_guest(ptq)) { 784 intel_pt_log("ERROR: no guest machine\n"); 785 ret = -EINVAL; 786 goto out_ret; 787 } 788 machine = ptq->guest_machine; 789 thread = ptq->guest_thread; 790 if (!thread) { 791 if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL) { 792 intel_pt_log("ERROR: no guest thread\n"); 793 ret = -EINVAL; 794 goto out_ret; 795 } 796 thread = ptq->unknown_guest_thread; 797 } 798 } else { 799 thread = ptq->thread; 800 if (!thread) { 801 if (cpumode != PERF_RECORD_MISC_KERNEL) { 802 intel_pt_log("ERROR: no thread\n"); 803 ret = -EINVAL; 804 goto out_ret; 805 } 806 thread = ptq->pt->unknown_thread; 807 } 808 } 809 810 while (1) { 811 struct dso *dso; 812 813 if (!thread__find_map(thread, cpumode, *ip, &al) || !map__dso(al.map)) { 814 if (al.map) 815 intel_pt_log("ERROR: thread has no dso for %#" PRIx64 "\n", *ip); 816 else 817 intel_pt_log("ERROR: thread has no map for %#" PRIx64 "\n", *ip); 818 addr_location__exit(&al); 819 ret = -EINVAL; 820 goto out_ret; 821 } 822 dso = map__dso(al.map); 823 824 if (dso__data(dso)->status == DSO_DATA_STATUS_ERROR && 825 dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) { 826 ret = -ENOENT; 827 goto out_ret; 828 } 829 830 offset = map__map_ip(al.map, *ip); 831 832 if (!to_ip && one_map) { 833 struct intel_pt_cache_entry *e; 834 835 e = intel_pt_cache_lookup(dso, machine, offset); 836 if (e && 837 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) { 838 *insn_cnt_ptr = e->insn_cnt; 839 *ip += e->byte_cnt; 840 intel_pt_insn->op = e->op; 841 intel_pt_insn->branch = e->branch; 842 intel_pt_insn->emulated_ptwrite = e->emulated_ptwrite; 843 intel_pt_insn->length = e->length; 844 intel_pt_insn->rel = e->rel; 845 memcpy(intel_pt_insn->buf, e->insn, INTEL_PT_INSN_BUF_SZ); 846 intel_pt_log_insn_no_data(intel_pt_insn, *ip); 847 ret = 0; 848 goto out_ret; 849 } 850 } 851 852 start_offset = offset; 853 start_ip = *ip; 854 855 /* Load maps to ensure dso->is_64_bit has been updated */ 856 map__load(al.map); 857 858 x86_64 = dso__is_64_bit(dso); 859 860 while (1) { 861 len = dso__data_read_offset(dso, machine, 862 offset, buf, 863 INTEL_PT_INSN_BUF_SZ); 864 if (len <= 0) { 865 intel_pt_log("ERROR: failed to read at offset %#" PRIx64 " ", 866 offset); 867 if (intel_pt_enable_logging) 868 dso__fprintf(dso, intel_pt_log_fp()); 869 ret = -EINVAL; 870 goto out_ret; 871 } 872 873 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) { 874 ret = -EINVAL; 875 goto out_ret; 876 } 877 878 intel_pt_log_insn(intel_pt_insn, *ip); 879 880 insn_cnt += 1; 881 882 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) { 883 bool eptw; 884 u64 offs; 885 886 if (!intel_pt_jmp_16(intel_pt_insn)) 887 goto out; 888 /* Check for emulated ptwrite */ 889 offs = offset + intel_pt_insn->length; 890 eptw = intel_pt_emulated_ptwrite(dso, machine, offs); 891 intel_pt_insn->emulated_ptwrite = eptw; 892 goto out; 893 } 894 895 if (max_insn_cnt && insn_cnt >= max_insn_cnt) 896 goto out_no_cache; 897 898 *ip += intel_pt_insn->length; 899 900 if (to_ip && *ip == to_ip) { 901 intel_pt_insn->length = 0; 902 intel_pt_insn->op = INTEL_PT_OP_OTHER; 903 goto out_no_cache; 904 } 905 906 if (*ip >= map__end(al.map)) 907 break; 908 909 offset += intel_pt_insn->length; 910 } 911 one_map = false; 912 } 913 out: 914 *insn_cnt_ptr = insn_cnt; 915 916 if (!one_map) 917 goto out_no_cache; 918 919 /* 920 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate 921 * entries. 922 */ 923 if (to_ip) { 924 struct intel_pt_cache_entry *e; 925 926 e = intel_pt_cache_lookup(map__dso(al.map), machine, start_offset); 927 if (e) 928 goto out_ret; 929 } 930 931 /* Ignore cache errors */ 932 intel_pt_cache_add(map__dso(al.map), machine, start_offset, insn_cnt, 933 *ip - start_ip, intel_pt_insn); 934 935 out_ret: 936 addr_location__exit(&al); 937 return ret; 938 939 out_no_cache: 940 *insn_cnt_ptr = insn_cnt; 941 addr_location__exit(&al); 942 return 0; 943 } 944 945 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip, 946 uint64_t offset, const char *filename) 947 { 948 struct addr_filter *filt; 949 bool have_filter = false; 950 bool hit_tracestop = false; 951 bool hit_filter = false; 952 953 list_for_each_entry(filt, &pt->filts.head, list) { 954 if (filt->start) 955 have_filter = true; 956 957 if ((filename && !filt->filename) || 958 (!filename && filt->filename) || 959 (filename && strcmp(filename, filt->filename))) 960 continue; 961 962 if (!(offset >= filt->addr && offset < filt->addr + filt->size)) 963 continue; 964 965 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n", 966 ip, offset, filename ? filename : "[kernel]", 967 filt->start ? "filter" : "stop", 968 filt->addr, filt->size); 969 970 if (filt->start) 971 hit_filter = true; 972 else 973 hit_tracestop = true; 974 } 975 976 if (!hit_tracestop && !hit_filter) 977 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n", 978 ip, offset, filename ? filename : "[kernel]"); 979 980 return hit_tracestop || (have_filter && !hit_filter); 981 } 982 983 static int __intel_pt_pgd_ip(uint64_t ip, void *data) 984 { 985 struct intel_pt_queue *ptq = data; 986 struct thread *thread; 987 struct addr_location al; 988 u8 cpumode; 989 u64 offset; 990 int res; 991 992 if (ptq->state->to_nr) { 993 if (intel_pt_guest_kernel_ip(ip)) 994 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL); 995 /* No support for decoding guest user space */ 996 return -EINVAL; 997 } else if (ip >= ptq->pt->kernel_start) { 998 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL); 999 } 1000 1001 cpumode = PERF_RECORD_MISC_USER; 1002 1003 thread = ptq->thread; 1004 if (!thread) 1005 return -EINVAL; 1006 1007 addr_location__init(&al); 1008 if (!thread__find_map(thread, cpumode, ip, &al) || !map__dso(al.map)) 1009 return -EINVAL; 1010 1011 offset = map__map_ip(al.map, ip); 1012 1013 res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, dso__long_name(map__dso(al.map))); 1014 addr_location__exit(&al); 1015 return res; 1016 } 1017 1018 static bool intel_pt_pgd_ip(uint64_t ip, void *data) 1019 { 1020 return __intel_pt_pgd_ip(ip, data) > 0; 1021 } 1022 1023 static bool intel_pt_get_config(struct intel_pt *pt, 1024 struct perf_event_attr *attr, u64 *config) 1025 { 1026 if (attr->type == pt->pmu_type) { 1027 if (config) 1028 *config = attr->config; 1029 return true; 1030 } 1031 1032 return false; 1033 } 1034 1035 static bool intel_pt_exclude_kernel(struct intel_pt *pt) 1036 { 1037 struct evsel *evsel; 1038 1039 evlist__for_each_entry(pt->session->evlist, evsel) { 1040 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) && 1041 !evsel->core.attr.exclude_kernel) 1042 return false; 1043 } 1044 return true; 1045 } 1046 1047 static bool intel_pt_return_compression(struct intel_pt *pt) 1048 { 1049 struct evsel *evsel; 1050 u64 config; 1051 1052 if (!pt->noretcomp_bit) 1053 return true; 1054 1055 evlist__for_each_entry(pt->session->evlist, evsel) { 1056 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 1057 (config & pt->noretcomp_bit)) 1058 return false; 1059 } 1060 return true; 1061 } 1062 1063 static bool intel_pt_branch_enable(struct intel_pt *pt) 1064 { 1065 struct evsel *evsel; 1066 u64 config; 1067 1068 evlist__for_each_entry(pt->session->evlist, evsel) { 1069 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 1070 (config & INTEL_PT_CFG_PASS_THRU) && 1071 !(config & INTEL_PT_CFG_BRANCH_EN)) 1072 return false; 1073 } 1074 return true; 1075 } 1076 1077 static bool intel_pt_disabled_tnt(struct intel_pt *pt) 1078 { 1079 struct evsel *evsel; 1080 u64 config; 1081 1082 evlist__for_each_entry(pt->session->evlist, evsel) { 1083 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 1084 config & INTEL_PT_CFG_TNT_DIS) 1085 return true; 1086 } 1087 return false; 1088 } 1089 1090 static unsigned int intel_pt_mtc_period(struct intel_pt *pt) 1091 { 1092 struct evsel *evsel; 1093 unsigned int shift; 1094 u64 config; 1095 1096 if (!pt->mtc_freq_bits) 1097 return 0; 1098 1099 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++) 1100 config >>= 1; 1101 1102 evlist__for_each_entry(pt->session->evlist, evsel) { 1103 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) 1104 return (config & pt->mtc_freq_bits) >> shift; 1105 } 1106 return 0; 1107 } 1108 1109 static bool intel_pt_timeless_decoding(struct intel_pt *pt) 1110 { 1111 struct evsel *evsel; 1112 bool timeless_decoding = true; 1113 u64 config; 1114 1115 if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding) 1116 return true; 1117 1118 evlist__for_each_entry(pt->session->evlist, evsel) { 1119 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 1120 return true; 1121 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) { 1122 if (config & pt->tsc_bit) 1123 timeless_decoding = false; 1124 else 1125 return true; 1126 } 1127 } 1128 return timeless_decoding; 1129 } 1130 1131 static bool intel_pt_tracing_kernel(struct intel_pt *pt) 1132 { 1133 struct evsel *evsel; 1134 1135 evlist__for_each_entry(pt->session->evlist, evsel) { 1136 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) && 1137 !evsel->core.attr.exclude_kernel) 1138 return true; 1139 } 1140 return false; 1141 } 1142 1143 static bool intel_pt_have_tsc(struct intel_pt *pt) 1144 { 1145 struct evsel *evsel; 1146 bool have_tsc = false; 1147 u64 config; 1148 1149 if (!pt->tsc_bit) 1150 return false; 1151 1152 evlist__for_each_entry(pt->session->evlist, evsel) { 1153 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) { 1154 if (config & pt->tsc_bit) 1155 have_tsc = true; 1156 else 1157 return false; 1158 } 1159 } 1160 return have_tsc; 1161 } 1162 1163 static bool intel_pt_have_mtc(struct intel_pt *pt) 1164 { 1165 struct evsel *evsel; 1166 u64 config; 1167 1168 evlist__for_each_entry(pt->session->evlist, evsel) { 1169 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 1170 (config & pt->mtc_bit)) 1171 return true; 1172 } 1173 return false; 1174 } 1175 1176 static bool intel_pt_sampling_mode(struct intel_pt *pt) 1177 { 1178 struct evsel *evsel; 1179 1180 evlist__for_each_entry(pt->session->evlist, evsel) { 1181 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) && 1182 evsel->core.attr.aux_sample_size) 1183 return true; 1184 } 1185 return false; 1186 } 1187 1188 static u64 intel_pt_ctl(struct intel_pt *pt) 1189 { 1190 struct evsel *evsel; 1191 u64 config; 1192 1193 evlist__for_each_entry(pt->session->evlist, evsel) { 1194 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) 1195 return config; 1196 } 1197 return 0; 1198 } 1199 1200 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns) 1201 { 1202 u64 quot, rem; 1203 1204 quot = ns / pt->tc.time_mult; 1205 rem = ns % pt->tc.time_mult; 1206 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) / 1207 pt->tc.time_mult; 1208 } 1209 1210 static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt) 1211 { 1212 size_t sz = sizeof(struct ip_callchain); 1213 1214 /* Add 1 to callchain_sz for callchain context */ 1215 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64); 1216 return zalloc(sz); 1217 } 1218 1219 static int intel_pt_callchain_init(struct intel_pt *pt) 1220 { 1221 struct evsel *evsel; 1222 1223 evlist__for_each_entry(pt->session->evlist, evsel) { 1224 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN)) 1225 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN; 1226 } 1227 1228 pt->chain = intel_pt_alloc_chain(pt); 1229 if (!pt->chain) 1230 return -ENOMEM; 1231 1232 return 0; 1233 } 1234 1235 static void intel_pt_add_callchain(struct intel_pt *pt, 1236 struct perf_sample *sample) 1237 { 1238 struct thread *thread = machine__findnew_thread(pt->machine, 1239 sample->pid, 1240 sample->tid); 1241 1242 thread_stack__sample_late(thread, sample->cpu, pt->chain, 1243 pt->synth_opts.callchain_sz + 1, sample->ip, 1244 pt->kernel_start); 1245 1246 sample->callchain = pt->chain; 1247 } 1248 1249 static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt) 1250 { 1251 size_t sz = sizeof(struct branch_stack); 1252 1253 sz += entry_cnt * sizeof(struct branch_entry); 1254 return zalloc(sz); 1255 } 1256 1257 static int intel_pt_br_stack_init(struct intel_pt *pt) 1258 { 1259 struct evsel *evsel; 1260 1261 evlist__for_each_entry(pt->session->evlist, evsel) { 1262 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)) 1263 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK; 1264 } 1265 1266 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz); 1267 if (!pt->br_stack) 1268 return -ENOMEM; 1269 1270 return 0; 1271 } 1272 1273 static void intel_pt_add_br_stack(struct intel_pt *pt, 1274 struct perf_sample *sample) 1275 { 1276 struct thread *thread = machine__findnew_thread(pt->machine, 1277 sample->pid, 1278 sample->tid); 1279 1280 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack, 1281 pt->br_stack_sz, sample->ip, 1282 pt->kernel_start); 1283 1284 sample->branch_stack = pt->br_stack; 1285 thread__put(thread); 1286 } 1287 1288 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */ 1289 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U) 1290 1291 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, 1292 unsigned int queue_nr) 1293 { 1294 struct intel_pt_params params = { .get_trace = 0, }; 1295 struct perf_env *env = pt->machine->env; 1296 struct intel_pt_queue *ptq; 1297 1298 ptq = zalloc(sizeof(struct intel_pt_queue)); 1299 if (!ptq) 1300 return NULL; 1301 1302 if (pt->synth_opts.callchain) { 1303 ptq->chain = intel_pt_alloc_chain(pt); 1304 if (!ptq->chain) 1305 goto out_free; 1306 } 1307 1308 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) { 1309 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz); 1310 1311 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt); 1312 if (!ptq->last_branch) 1313 goto out_free; 1314 } 1315 1316 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); 1317 if (!ptq->event_buf) 1318 goto out_free; 1319 1320 ptq->pt = pt; 1321 ptq->queue_nr = queue_nr; 1322 ptq->exclude_kernel = intel_pt_exclude_kernel(pt); 1323 ptq->pid = -1; 1324 ptq->tid = -1; 1325 ptq->cpu = -1; 1326 ptq->next_tid = -1; 1327 1328 params.get_trace = intel_pt_get_trace; 1329 params.walk_insn = intel_pt_walk_next_insn; 1330 params.lookahead = intel_pt_lookahead; 1331 params.findnew_vmcs_info = intel_pt_findnew_vmcs_info; 1332 params.data = ptq; 1333 params.return_compression = intel_pt_return_compression(pt); 1334 params.branch_enable = intel_pt_branch_enable(pt); 1335 params.ctl = intel_pt_ctl(pt); 1336 params.max_non_turbo_ratio = pt->max_non_turbo_ratio; 1337 params.mtc_period = intel_pt_mtc_period(pt); 1338 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n; 1339 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d; 1340 params.quick = pt->synth_opts.quick; 1341 params.vm_time_correlation = pt->synth_opts.vm_time_correlation; 1342 params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run; 1343 params.first_timestamp = pt->first_timestamp; 1344 params.max_loops = pt->max_loops; 1345 1346 /* Cannot walk code without TNT, so force 'quick' mode */ 1347 if (params.branch_enable && intel_pt_disabled_tnt(pt) && !params.quick) 1348 params.quick = 1; 1349 1350 if (pt->filts.cnt > 0) 1351 params.pgd_ip = intel_pt_pgd_ip; 1352 1353 if (pt->synth_opts.instructions || pt->synth_opts.cycles) { 1354 if (pt->synth_opts.period) { 1355 switch (pt->synth_opts.period_type) { 1356 case PERF_ITRACE_PERIOD_INSTRUCTIONS: 1357 params.period_type = 1358 INTEL_PT_PERIOD_INSTRUCTIONS; 1359 params.period = pt->synth_opts.period; 1360 break; 1361 case PERF_ITRACE_PERIOD_TICKS: 1362 params.period_type = INTEL_PT_PERIOD_TICKS; 1363 params.period = pt->synth_opts.period; 1364 break; 1365 case PERF_ITRACE_PERIOD_NANOSECS: 1366 params.period_type = INTEL_PT_PERIOD_TICKS; 1367 params.period = intel_pt_ns_to_ticks(pt, 1368 pt->synth_opts.period); 1369 break; 1370 default: 1371 break; 1372 } 1373 } 1374 1375 if (!params.period) { 1376 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS; 1377 params.period = 1; 1378 } 1379 } 1380 1381 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18)) 1382 params.flags |= INTEL_PT_FUP_WITH_NLIP; 1383 1384 ptq->decoder = intel_pt_decoder_new(¶ms); 1385 if (!ptq->decoder) 1386 goto out_free; 1387 1388 return ptq; 1389 1390 out_free: 1391 zfree(&ptq->event_buf); 1392 zfree(&ptq->last_branch); 1393 zfree(&ptq->chain); 1394 free(ptq); 1395 return NULL; 1396 } 1397 1398 static void intel_pt_free_queue(void *priv) 1399 { 1400 struct intel_pt_queue *ptq = priv; 1401 1402 if (!ptq) 1403 return; 1404 thread__zput(ptq->thread); 1405 thread__zput(ptq->guest_thread); 1406 thread__zput(ptq->unknown_guest_thread); 1407 intel_pt_decoder_free(ptq->decoder); 1408 zfree(&ptq->event_buf); 1409 zfree(&ptq->last_branch); 1410 zfree(&ptq->chain); 1411 free(ptq); 1412 } 1413 1414 static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp) 1415 { 1416 unsigned int i; 1417 1418 pt->first_timestamp = timestamp; 1419 1420 for (i = 0; i < pt->queues.nr_queues; i++) { 1421 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 1422 struct intel_pt_queue *ptq = queue->priv; 1423 1424 if (ptq && ptq->decoder) 1425 intel_pt_set_first_timestamp(ptq->decoder, timestamp); 1426 } 1427 } 1428 1429 static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq) 1430 { 1431 struct machines *machines = &ptq->pt->session->machines; 1432 struct machine *machine; 1433 pid_t machine_pid = ptq->pid; 1434 pid_t tid; 1435 int vcpu; 1436 1437 if (machine_pid <= 0) 1438 return 0; /* Not a guest machine */ 1439 1440 machine = machines__find(machines, machine_pid); 1441 if (!machine) 1442 return 0; /* Not a guest machine */ 1443 1444 if (ptq->guest_machine != machine) { 1445 ptq->guest_machine = NULL; 1446 thread__zput(ptq->guest_thread); 1447 thread__zput(ptq->unknown_guest_thread); 1448 1449 ptq->unknown_guest_thread = machine__find_thread(machine, 0, 0); 1450 if (!ptq->unknown_guest_thread) 1451 return -1; 1452 ptq->guest_machine = machine; 1453 } 1454 1455 vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1; 1456 if (vcpu < 0) 1457 return -1; 1458 1459 tid = machine__get_current_tid(machine, vcpu); 1460 1461 if (ptq->guest_thread && thread__tid(ptq->guest_thread) != tid) 1462 thread__zput(ptq->guest_thread); 1463 1464 if (!ptq->guest_thread) { 1465 ptq->guest_thread = machine__find_thread(machine, -1, tid); 1466 if (!ptq->guest_thread) 1467 return -1; 1468 } 1469 1470 ptq->guest_machine_pid = machine_pid; 1471 ptq->guest_pid = thread__pid(ptq->guest_thread); 1472 ptq->guest_tid = tid; 1473 ptq->vcpu = vcpu; 1474 1475 return 0; 1476 } 1477 1478 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, 1479 struct auxtrace_queue *queue) 1480 { 1481 struct intel_pt_queue *ptq = queue->priv; 1482 1483 if (queue->tid == -1 || pt->have_sched_switch) { 1484 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu); 1485 if (ptq->tid == -1) 1486 ptq->pid = -1; 1487 thread__zput(ptq->thread); 1488 } 1489 1490 if (!ptq->thread && ptq->tid != -1) 1491 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid); 1492 1493 if (ptq->thread) { 1494 ptq->pid = thread__pid(ptq->thread); 1495 if (queue->cpu == -1) 1496 ptq->cpu = thread__cpu(ptq->thread); 1497 } 1498 1499 if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) { 1500 ptq->guest_machine_pid = 0; 1501 ptq->guest_pid = -1; 1502 ptq->guest_tid = -1; 1503 ptq->vcpu = -1; 1504 } 1505 } 1506 1507 static void intel_pt_sample_flags(struct intel_pt_queue *ptq) 1508 { 1509 struct intel_pt *pt = ptq->pt; 1510 1511 ptq->insn_len = 0; 1512 if (ptq->state->flags & INTEL_PT_ABORT_TX) { 1513 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT; 1514 } else if (ptq->state->flags & INTEL_PT_ASYNC) { 1515 if (!ptq->state->to_ip) 1516 ptq->flags = PERF_IP_FLAG_BRANCH | 1517 PERF_IP_FLAG_ASYNC | 1518 PERF_IP_FLAG_TRACE_END; 1519 else if (ptq->state->from_nr && !ptq->state->to_nr) 1520 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 1521 PERF_IP_FLAG_ASYNC | 1522 PERF_IP_FLAG_VMEXIT; 1523 else 1524 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 1525 PERF_IP_FLAG_ASYNC | 1526 PERF_IP_FLAG_INTERRUPT; 1527 } else { 1528 if (ptq->state->from_ip) 1529 ptq->flags = intel_pt_insn_type(ptq->state->insn_op); 1530 else 1531 ptq->flags = PERF_IP_FLAG_BRANCH | 1532 PERF_IP_FLAG_TRACE_BEGIN; 1533 if (ptq->state->flags & INTEL_PT_IN_TX) 1534 ptq->flags |= PERF_IP_FLAG_IN_TX; 1535 ptq->insn_len = ptq->state->insn_len; 1536 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ); 1537 } 1538 1539 if (ptq->state->type & INTEL_PT_TRACE_BEGIN) 1540 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN; 1541 if (ptq->state->type & INTEL_PT_TRACE_END) 1542 ptq->flags |= PERF_IP_FLAG_TRACE_END; 1543 1544 if (pt->cap_event_trace) { 1545 if (ptq->state->type & INTEL_PT_IFLAG_CHG) { 1546 if (!ptq->state->from_iflag) 1547 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE; 1548 if (ptq->state->from_iflag != ptq->state->to_iflag) 1549 ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE; 1550 } else if (!ptq->state->to_iflag) { 1551 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE; 1552 } 1553 } 1554 } 1555 1556 static void intel_pt_setup_time_range(struct intel_pt *pt, 1557 struct intel_pt_queue *ptq) 1558 { 1559 if (!pt->range_cnt) 1560 return; 1561 1562 ptq->sel_timestamp = pt->time_ranges[0].start; 1563 ptq->sel_idx = 0; 1564 1565 if (ptq->sel_timestamp) { 1566 ptq->sel_start = true; 1567 } else { 1568 ptq->sel_timestamp = pt->time_ranges[0].end; 1569 ptq->sel_start = false; 1570 } 1571 } 1572 1573 static int intel_pt_setup_queue(struct intel_pt *pt, 1574 struct auxtrace_queue *queue, 1575 unsigned int queue_nr) 1576 { 1577 struct intel_pt_queue *ptq = queue->priv; 1578 1579 if (list_empty(&queue->head)) 1580 return 0; 1581 1582 if (!ptq) { 1583 ptq = intel_pt_alloc_queue(pt, queue_nr); 1584 if (!ptq) 1585 return -ENOMEM; 1586 queue->priv = ptq; 1587 1588 if (queue->cpu != -1) 1589 ptq->cpu = queue->cpu; 1590 ptq->tid = queue->tid; 1591 1592 ptq->cbr_seen = UINT_MAX; 1593 1594 if (pt->sampling_mode && !pt->snapshot_mode && 1595 pt->timeless_decoding) 1596 ptq->step_through_buffers = true; 1597 1598 ptq->sync_switch = pt->sync_switch; 1599 1600 intel_pt_setup_time_range(pt, ptq); 1601 } 1602 1603 if (!ptq->on_heap && 1604 (!ptq->sync_switch || 1605 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { 1606 const struct intel_pt_state *state; 1607 int ret; 1608 1609 if (pt->timeless_decoding) 1610 return 0; 1611 1612 intel_pt_log("queue %u getting timestamp\n", queue_nr); 1613 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 1614 queue_nr, ptq->cpu, ptq->pid, ptq->tid); 1615 1616 if (ptq->sel_start && ptq->sel_timestamp) { 1617 ret = intel_pt_fast_forward(ptq->decoder, 1618 ptq->sel_timestamp); 1619 if (ret) 1620 return ret; 1621 } 1622 1623 while (1) { 1624 state = intel_pt_decode(ptq->decoder); 1625 if (state->err) { 1626 if (state->err == INTEL_PT_ERR_NODATA) { 1627 intel_pt_log("queue %u has no timestamp\n", 1628 queue_nr); 1629 return 0; 1630 } 1631 continue; 1632 } 1633 if (state->timestamp) 1634 break; 1635 } 1636 1637 ptq->timestamp = state->timestamp; 1638 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n", 1639 queue_nr, ptq->timestamp); 1640 ptq->state = state; 1641 ptq->have_sample = true; 1642 if (ptq->sel_start && ptq->sel_timestamp && 1643 ptq->timestamp < ptq->sel_timestamp) 1644 ptq->have_sample = false; 1645 intel_pt_sample_flags(ptq); 1646 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp); 1647 if (ret) 1648 return ret; 1649 ptq->on_heap = true; 1650 } 1651 1652 return 0; 1653 } 1654 1655 static int intel_pt_setup_queues(struct intel_pt *pt) 1656 { 1657 unsigned int i; 1658 int ret; 1659 1660 for (i = 0; i < pt->queues.nr_queues; i++) { 1661 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i); 1662 if (ret) 1663 return ret; 1664 } 1665 return 0; 1666 } 1667 1668 static inline bool intel_pt_skip_event(struct intel_pt *pt) 1669 { 1670 return pt->synth_opts.initial_skip && 1671 pt->num_events++ < pt->synth_opts.initial_skip; 1672 } 1673 1674 /* 1675 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen. 1676 * Also ensure CBR is first non-skipped event by allowing for 4 more samples 1677 * from this decoder state. 1678 */ 1679 static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt) 1680 { 1681 return pt->synth_opts.initial_skip && 1682 pt->num_events + 4 < pt->synth_opts.initial_skip; 1683 } 1684 1685 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq, 1686 union perf_event *event, 1687 struct perf_sample *sample) 1688 { 1689 event->sample.header.type = PERF_RECORD_SAMPLE; 1690 event->sample.header.size = sizeof(struct perf_event_header); 1691 1692 sample->pid = ptq->pid; 1693 sample->tid = ptq->tid; 1694 1695 if (ptq->pt->have_guest_sideband) { 1696 if ((ptq->state->from_ip && ptq->state->from_nr) || 1697 (ptq->state->to_ip && ptq->state->to_nr)) { 1698 sample->pid = ptq->guest_pid; 1699 sample->tid = ptq->guest_tid; 1700 sample->machine_pid = ptq->guest_machine_pid; 1701 sample->vcpu = ptq->vcpu; 1702 } 1703 } 1704 1705 sample->cpu = ptq->cpu; 1706 sample->insn_len = ptq->insn_len; 1707 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ); 1708 } 1709 1710 static void intel_pt_prep_b_sample(struct intel_pt *pt, 1711 struct intel_pt_queue *ptq, 1712 union perf_event *event, 1713 struct perf_sample *sample) 1714 { 1715 intel_pt_prep_a_sample(ptq, event, sample); 1716 1717 if (!pt->timeless_decoding) 1718 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc); 1719 1720 sample->ip = ptq->state->from_ip; 1721 sample->addr = ptq->state->to_ip; 1722 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr); 1723 sample->period = 1; 1724 sample->flags = ptq->flags; 1725 1726 event->sample.header.misc = sample->cpumode; 1727 } 1728 1729 static int intel_pt_inject_event(union perf_event *event, 1730 struct perf_sample *sample, u64 type) 1731 { 1732 event->header.size = perf_event__sample_event_size(sample, type, 0); 1733 return perf_event__synthesize_sample(event, type, 0, sample); 1734 } 1735 1736 static inline int intel_pt_opt_inject(struct intel_pt *pt, 1737 union perf_event *event, 1738 struct perf_sample *sample, u64 type) 1739 { 1740 if (!pt->synth_opts.inject) 1741 return 0; 1742 1743 return intel_pt_inject_event(event, sample, type); 1744 } 1745 1746 static int intel_pt_deliver_synth_event(struct intel_pt *pt, 1747 union perf_event *event, 1748 struct perf_sample *sample, u64 type) 1749 { 1750 int ret; 1751 1752 ret = intel_pt_opt_inject(pt, event, sample, type); 1753 if (ret) 1754 return ret; 1755 1756 ret = perf_session__deliver_synth_event(pt->session, event, sample); 1757 if (ret) 1758 pr_err("Intel PT: failed to deliver event, error %d\n", ret); 1759 1760 return ret; 1761 } 1762 1763 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) 1764 { 1765 struct intel_pt *pt = ptq->pt; 1766 union perf_event *event = ptq->event_buf; 1767 struct perf_sample sample = { .ip = 0, }; 1768 struct dummy_branch_stack { 1769 u64 nr; 1770 u64 hw_idx; 1771 struct branch_entry entries; 1772 } dummy_bs; 1773 1774 if (pt->branches_filter && !(pt->branches_filter & ptq->flags)) 1775 return 0; 1776 1777 if (intel_pt_skip_event(pt)) 1778 return 0; 1779 1780 intel_pt_prep_b_sample(pt, ptq, event, &sample); 1781 1782 sample.id = ptq->pt->branches_id; 1783 sample.stream_id = ptq->pt->branches_id; 1784 1785 /* 1786 * perf report cannot handle events without a branch stack when using 1787 * SORT_MODE__BRANCH so make a dummy one. 1788 */ 1789 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) { 1790 dummy_bs = (struct dummy_branch_stack){ 1791 .nr = 1, 1792 .hw_idx = -1ULL, 1793 .entries = { 1794 .from = sample.ip, 1795 .to = sample.addr, 1796 }, 1797 }; 1798 sample.branch_stack = (struct branch_stack *)&dummy_bs; 1799 } 1800 1801 if (ptq->sample_ipc) 1802 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt; 1803 if (sample.cyc_cnt) { 1804 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt; 1805 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt; 1806 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt; 1807 } 1808 1809 return intel_pt_deliver_synth_event(pt, event, &sample, 1810 pt->branches_sample_type); 1811 } 1812 1813 static void intel_pt_prep_sample(struct intel_pt *pt, 1814 struct intel_pt_queue *ptq, 1815 union perf_event *event, 1816 struct perf_sample *sample) 1817 { 1818 intel_pt_prep_b_sample(pt, ptq, event, sample); 1819 1820 if (pt->synth_opts.callchain) { 1821 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain, 1822 pt->synth_opts.callchain_sz + 1, 1823 sample->ip, pt->kernel_start); 1824 sample->callchain = ptq->chain; 1825 } 1826 1827 if (pt->synth_opts.last_branch) { 1828 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch, 1829 pt->br_stack_sz); 1830 sample->branch_stack = ptq->last_branch; 1831 } 1832 } 1833 1834 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) 1835 { 1836 struct intel_pt *pt = ptq->pt; 1837 union perf_event *event = ptq->event_buf; 1838 struct perf_sample sample = { .ip = 0, }; 1839 1840 if (intel_pt_skip_event(pt)) 1841 return 0; 1842 1843 intel_pt_prep_sample(pt, ptq, event, &sample); 1844 1845 sample.id = ptq->pt->instructions_id; 1846 sample.stream_id = ptq->pt->instructions_id; 1847 if (pt->synth_opts.quick) 1848 sample.period = 1; 1849 else 1850 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; 1851 1852 if (ptq->sample_ipc) 1853 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt; 1854 if (sample.cyc_cnt) { 1855 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt; 1856 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt; 1857 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt; 1858 } 1859 1860 ptq->last_insn_cnt = ptq->state->tot_insn_cnt; 1861 1862 return intel_pt_deliver_synth_event(pt, event, &sample, 1863 pt->instructions_sample_type); 1864 } 1865 1866 static int intel_pt_synth_cycle_sample(struct intel_pt_queue *ptq) 1867 { 1868 struct intel_pt *pt = ptq->pt; 1869 union perf_event *event = ptq->event_buf; 1870 struct perf_sample sample = { .ip = 0, }; 1871 u64 period = 0; 1872 1873 if (ptq->sample_ipc) 1874 period = ptq->ipc_cyc_cnt - ptq->last_cy_cyc_cnt; 1875 1876 if (!period || intel_pt_skip_event(pt)) 1877 return 0; 1878 1879 intel_pt_prep_sample(pt, ptq, event, &sample); 1880 1881 sample.id = ptq->pt->cycles_id; 1882 sample.stream_id = ptq->pt->cycles_id; 1883 sample.period = period; 1884 1885 sample.cyc_cnt = period; 1886 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt; 1887 ptq->last_cy_insn_cnt = ptq->ipc_insn_cnt; 1888 ptq->last_cy_cyc_cnt = ptq->ipc_cyc_cnt; 1889 1890 return intel_pt_deliver_synth_event(pt, event, &sample, pt->cycles_sample_type); 1891 } 1892 1893 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq) 1894 { 1895 struct intel_pt *pt = ptq->pt; 1896 union perf_event *event = ptq->event_buf; 1897 struct perf_sample sample = { .ip = 0, }; 1898 1899 if (intel_pt_skip_event(pt)) 1900 return 0; 1901 1902 intel_pt_prep_sample(pt, ptq, event, &sample); 1903 1904 sample.id = ptq->pt->transactions_id; 1905 sample.stream_id = ptq->pt->transactions_id; 1906 1907 return intel_pt_deliver_synth_event(pt, event, &sample, 1908 pt->transactions_sample_type); 1909 } 1910 1911 static void intel_pt_prep_p_sample(struct intel_pt *pt, 1912 struct intel_pt_queue *ptq, 1913 union perf_event *event, 1914 struct perf_sample *sample) 1915 { 1916 intel_pt_prep_sample(pt, ptq, event, sample); 1917 1918 /* 1919 * Zero IP is used to mean "trace start" but that is not the case for 1920 * power or PTWRITE events with no IP, so clear the flags. 1921 */ 1922 if (!sample->ip) 1923 sample->flags = 0; 1924 } 1925 1926 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq) 1927 { 1928 struct intel_pt *pt = ptq->pt; 1929 union perf_event *event = ptq->event_buf; 1930 struct perf_sample sample = { .ip = 0, }; 1931 struct perf_synth_intel_ptwrite raw; 1932 1933 if (intel_pt_skip_event(pt)) 1934 return 0; 1935 1936 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1937 1938 sample.id = ptq->pt->ptwrites_id; 1939 sample.stream_id = ptq->pt->ptwrites_id; 1940 1941 raw.flags = 0; 1942 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 1943 raw.payload = cpu_to_le64(ptq->state->ptw_payload); 1944 1945 sample.raw_size = perf_synth__raw_size(raw); 1946 sample.raw_data = perf_synth__raw_data(&raw); 1947 1948 return intel_pt_deliver_synth_event(pt, event, &sample, 1949 pt->ptwrites_sample_type); 1950 } 1951 1952 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq) 1953 { 1954 struct intel_pt *pt = ptq->pt; 1955 union perf_event *event = ptq->event_buf; 1956 struct perf_sample sample = { .ip = 0, }; 1957 struct perf_synth_intel_cbr raw; 1958 u32 flags; 1959 1960 if (intel_pt_skip_cbr_event(pt)) 1961 return 0; 1962 1963 ptq->cbr_seen = ptq->state->cbr; 1964 1965 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1966 1967 sample.id = ptq->pt->cbr_id; 1968 sample.stream_id = ptq->pt->cbr_id; 1969 1970 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16); 1971 raw.flags = cpu_to_le32(flags); 1972 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz); 1973 raw.reserved3 = 0; 1974 1975 sample.raw_size = perf_synth__raw_size(raw); 1976 sample.raw_data = perf_synth__raw_data(&raw); 1977 1978 return intel_pt_deliver_synth_event(pt, event, &sample, 1979 pt->pwr_events_sample_type); 1980 } 1981 1982 static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq) 1983 { 1984 struct intel_pt *pt = ptq->pt; 1985 union perf_event *event = ptq->event_buf; 1986 struct perf_sample sample = { .ip = 0, }; 1987 struct perf_synth_intel_psb raw; 1988 1989 if (intel_pt_skip_event(pt)) 1990 return 0; 1991 1992 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1993 1994 sample.id = ptq->pt->psb_id; 1995 sample.stream_id = ptq->pt->psb_id; 1996 sample.flags = 0; 1997 1998 raw.reserved = 0; 1999 raw.offset = ptq->state->psb_offset; 2000 2001 sample.raw_size = perf_synth__raw_size(raw); 2002 sample.raw_data = perf_synth__raw_data(&raw); 2003 2004 return intel_pt_deliver_synth_event(pt, event, &sample, 2005 pt->pwr_events_sample_type); 2006 } 2007 2008 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq) 2009 { 2010 struct intel_pt *pt = ptq->pt; 2011 union perf_event *event = ptq->event_buf; 2012 struct perf_sample sample = { .ip = 0, }; 2013 struct perf_synth_intel_mwait raw; 2014 2015 if (intel_pt_skip_event(pt)) 2016 return 0; 2017 2018 intel_pt_prep_p_sample(pt, ptq, event, &sample); 2019 2020 sample.id = ptq->pt->mwait_id; 2021 sample.stream_id = ptq->pt->mwait_id; 2022 2023 raw.reserved = 0; 2024 raw.payload = cpu_to_le64(ptq->state->mwait_payload); 2025 2026 sample.raw_size = perf_synth__raw_size(raw); 2027 sample.raw_data = perf_synth__raw_data(&raw); 2028 2029 return intel_pt_deliver_synth_event(pt, event, &sample, 2030 pt->pwr_events_sample_type); 2031 } 2032 2033 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq) 2034 { 2035 struct intel_pt *pt = ptq->pt; 2036 union perf_event *event = ptq->event_buf; 2037 struct perf_sample sample = { .ip = 0, }; 2038 struct perf_synth_intel_pwre raw; 2039 2040 if (intel_pt_skip_event(pt)) 2041 return 0; 2042 2043 intel_pt_prep_p_sample(pt, ptq, event, &sample); 2044 2045 sample.id = ptq->pt->pwre_id; 2046 sample.stream_id = ptq->pt->pwre_id; 2047 2048 raw.reserved = 0; 2049 raw.payload = cpu_to_le64(ptq->state->pwre_payload); 2050 2051 sample.raw_size = perf_synth__raw_size(raw); 2052 sample.raw_data = perf_synth__raw_data(&raw); 2053 2054 return intel_pt_deliver_synth_event(pt, event, &sample, 2055 pt->pwr_events_sample_type); 2056 } 2057 2058 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq) 2059 { 2060 struct intel_pt *pt = ptq->pt; 2061 union perf_event *event = ptq->event_buf; 2062 struct perf_sample sample = { .ip = 0, }; 2063 struct perf_synth_intel_exstop raw; 2064 2065 if (intel_pt_skip_event(pt)) 2066 return 0; 2067 2068 intel_pt_prep_p_sample(pt, ptq, event, &sample); 2069 2070 sample.id = ptq->pt->exstop_id; 2071 sample.stream_id = ptq->pt->exstop_id; 2072 2073 raw.flags = 0; 2074 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 2075 2076 sample.raw_size = perf_synth__raw_size(raw); 2077 sample.raw_data = perf_synth__raw_data(&raw); 2078 2079 return intel_pt_deliver_synth_event(pt, event, &sample, 2080 pt->pwr_events_sample_type); 2081 } 2082 2083 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq) 2084 { 2085 struct intel_pt *pt = ptq->pt; 2086 union perf_event *event = ptq->event_buf; 2087 struct perf_sample sample = { .ip = 0, }; 2088 struct perf_synth_intel_pwrx raw; 2089 2090 if (intel_pt_skip_event(pt)) 2091 return 0; 2092 2093 intel_pt_prep_p_sample(pt, ptq, event, &sample); 2094 2095 sample.id = ptq->pt->pwrx_id; 2096 sample.stream_id = ptq->pt->pwrx_id; 2097 2098 raw.reserved = 0; 2099 raw.payload = cpu_to_le64(ptq->state->pwrx_payload); 2100 2101 sample.raw_size = perf_synth__raw_size(raw); 2102 sample.raw_data = perf_synth__raw_data(&raw); 2103 2104 return intel_pt_deliver_synth_event(pt, event, &sample, 2105 pt->pwr_events_sample_type); 2106 } 2107 2108 /* 2109 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer 2110 * intel_pt_add_gp_regs(). 2111 */ 2112 static const int pebs_gp_regs[] = { 2113 [PERF_REG_X86_FLAGS] = 1, 2114 [PERF_REG_X86_IP] = 2, 2115 [PERF_REG_X86_AX] = 3, 2116 [PERF_REG_X86_CX] = 4, 2117 [PERF_REG_X86_DX] = 5, 2118 [PERF_REG_X86_BX] = 6, 2119 [PERF_REG_X86_SP] = 7, 2120 [PERF_REG_X86_BP] = 8, 2121 [PERF_REG_X86_SI] = 9, 2122 [PERF_REG_X86_DI] = 10, 2123 [PERF_REG_X86_R8] = 11, 2124 [PERF_REG_X86_R9] = 12, 2125 [PERF_REG_X86_R10] = 13, 2126 [PERF_REG_X86_R11] = 14, 2127 [PERF_REG_X86_R12] = 15, 2128 [PERF_REG_X86_R13] = 16, 2129 [PERF_REG_X86_R14] = 17, 2130 [PERF_REG_X86_R15] = 18, 2131 }; 2132 2133 static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos, 2134 const struct intel_pt_blk_items *items, 2135 u64 regs_mask) 2136 { 2137 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS]; 2138 u32 mask = items->mask[INTEL_PT_GP_REGS_POS]; 2139 u32 bit; 2140 int i; 2141 2142 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) { 2143 /* Get the PEBS gp_regs array index */ 2144 int n = pebs_gp_regs[i] - 1; 2145 2146 if (n < 0) 2147 continue; 2148 /* 2149 * Add only registers that were requested (i.e. 'regs_mask') and 2150 * that were provided (i.e. 'mask'), and update the resulting 2151 * mask (i.e. 'intr_regs->mask') accordingly. 2152 */ 2153 if (mask & 1 << n && regs_mask & bit) { 2154 intr_regs->mask |= bit; 2155 *pos++ = gp_regs[n]; 2156 } 2157 } 2158 2159 return pos; 2160 } 2161 2162 #ifndef PERF_REG_X86_XMM0 2163 #define PERF_REG_X86_XMM0 32 2164 #endif 2165 2166 static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos, 2167 const struct intel_pt_blk_items *items, 2168 u64 regs_mask) 2169 { 2170 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0); 2171 const u64 *xmm = items->xmm; 2172 2173 /* 2174 * If there are any XMM registers, then there should be all of them. 2175 * Nevertheless, follow the logic to add only registers that were 2176 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'), 2177 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly. 2178 */ 2179 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0; 2180 2181 for (; mask; mask >>= 1, xmm++) { 2182 if (mask & 1) 2183 *pos++ = *xmm; 2184 } 2185 } 2186 2187 #define LBR_INFO_MISPRED (1ULL << 63) 2188 #define LBR_INFO_IN_TX (1ULL << 62) 2189 #define LBR_INFO_ABORT (1ULL << 61) 2190 #define LBR_INFO_CYCLES 0xffff 2191 2192 /* Refer kernel's intel_pmu_store_pebs_lbrs() */ 2193 static u64 intel_pt_lbr_flags(u64 info) 2194 { 2195 union { 2196 struct branch_flags flags; 2197 u64 result; 2198 } u; 2199 2200 u.result = 0; 2201 u.flags.mispred = !!(info & LBR_INFO_MISPRED); 2202 u.flags.predicted = !(info & LBR_INFO_MISPRED); 2203 u.flags.in_tx = !!(info & LBR_INFO_IN_TX); 2204 u.flags.abort = !!(info & LBR_INFO_ABORT); 2205 u.flags.cycles = info & LBR_INFO_CYCLES; 2206 2207 return u.result; 2208 } 2209 2210 static void intel_pt_add_lbrs(struct branch_stack *br_stack, 2211 const struct intel_pt_blk_items *items) 2212 { 2213 u64 *to; 2214 int i; 2215 2216 br_stack->nr = 0; 2217 2218 to = &br_stack->entries[0].from; 2219 2220 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) { 2221 u32 mask = items->mask[i]; 2222 const u64 *from = items->val[i]; 2223 2224 for (; mask; mask >>= 3, from += 3) { 2225 if ((mask & 7) == 7) { 2226 *to++ = from[0]; 2227 *to++ = from[1]; 2228 *to++ = intel_pt_lbr_flags(from[2]); 2229 br_stack->nr += 1; 2230 } 2231 } 2232 } 2233 } 2234 2235 static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id) 2236 { 2237 const struct intel_pt_blk_items *items = &ptq->state->items; 2238 struct perf_sample sample = { .ip = 0, }; 2239 union perf_event *event = ptq->event_buf; 2240 struct intel_pt *pt = ptq->pt; 2241 u64 sample_type = evsel->core.attr.sample_type; 2242 u8 cpumode; 2243 u64 regs[8 * sizeof(sample.intr_regs.mask)]; 2244 2245 if (intel_pt_skip_event(pt)) 2246 return 0; 2247 2248 intel_pt_prep_a_sample(ptq, event, &sample); 2249 2250 sample.id = id; 2251 sample.stream_id = id; 2252 2253 if (!evsel->core.attr.freq) 2254 sample.period = evsel->core.attr.sample_period; 2255 2256 /* No support for non-zero CS base */ 2257 if (items->has_ip) 2258 sample.ip = items->ip; 2259 else if (items->has_rip) 2260 sample.ip = items->rip; 2261 else 2262 sample.ip = ptq->state->from_ip; 2263 2264 cpumode = intel_pt_cpumode(ptq, sample.ip, 0); 2265 2266 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP; 2267 2268 sample.cpumode = cpumode; 2269 2270 if (sample_type & PERF_SAMPLE_TIME) { 2271 u64 timestamp = 0; 2272 2273 if (items->has_timestamp) 2274 timestamp = items->timestamp; 2275 else if (!pt->timeless_decoding) 2276 timestamp = ptq->timestamp; 2277 if (timestamp) 2278 sample.time = tsc_to_perf_time(timestamp, &pt->tc); 2279 } 2280 2281 if (sample_type & PERF_SAMPLE_CALLCHAIN && 2282 pt->synth_opts.callchain) { 2283 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain, 2284 pt->synth_opts.callchain_sz, sample.ip, 2285 pt->kernel_start); 2286 sample.callchain = ptq->chain; 2287 } 2288 2289 if (sample_type & PERF_SAMPLE_REGS_INTR && 2290 (items->mask[INTEL_PT_GP_REGS_POS] || 2291 items->mask[INTEL_PT_XMM_POS])) { 2292 u64 regs_mask = evsel->core.attr.sample_regs_intr; 2293 u64 *pos; 2294 2295 sample.intr_regs.abi = items->is_32_bit ? 2296 PERF_SAMPLE_REGS_ABI_32 : 2297 PERF_SAMPLE_REGS_ABI_64; 2298 sample.intr_regs.regs = regs; 2299 2300 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask); 2301 2302 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask); 2303 } 2304 2305 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 2306 if (items->mask[INTEL_PT_LBR_0_POS] || 2307 items->mask[INTEL_PT_LBR_1_POS] || 2308 items->mask[INTEL_PT_LBR_2_POS]) { 2309 intel_pt_add_lbrs(ptq->last_branch, items); 2310 } else if (pt->synth_opts.last_branch) { 2311 thread_stack__br_sample(ptq->thread, ptq->cpu, 2312 ptq->last_branch, 2313 pt->br_stack_sz); 2314 } else { 2315 ptq->last_branch->nr = 0; 2316 } 2317 sample.branch_stack = ptq->last_branch; 2318 } 2319 2320 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address) 2321 sample.addr = items->mem_access_address; 2322 2323 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { 2324 /* 2325 * Refer kernel's setup_pebs_adaptive_sample_data() and 2326 * intel_hsw_weight(). 2327 */ 2328 if (items->has_mem_access_latency) { 2329 u64 weight = items->mem_access_latency >> 32; 2330 2331 /* 2332 * Starts from SPR, the mem access latency field 2333 * contains both cache latency [47:32] and instruction 2334 * latency [15:0]. The cache latency is the same as the 2335 * mem access latency on previous platforms. 2336 * 2337 * In practice, no memory access could last than 4G 2338 * cycles. Use latency >> 32 to distinguish the 2339 * different format of the mem access latency field. 2340 */ 2341 if (weight > 0) { 2342 sample.weight = weight & 0xffff; 2343 sample.ins_lat = items->mem_access_latency & 0xffff; 2344 } else 2345 sample.weight = items->mem_access_latency; 2346 } 2347 if (!sample.weight && items->has_tsx_aux_info) { 2348 /* Cycles last block */ 2349 sample.weight = (u32)items->tsx_aux_info; 2350 } 2351 } 2352 2353 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) { 2354 u64 ax = items->has_rax ? items->rax : 0; 2355 /* Refer kernel's intel_hsw_transaction() */ 2356 u64 txn = (u8)(items->tsx_aux_info >> 32); 2357 2358 /* For RTM XABORTs also log the abort code from AX */ 2359 if (txn & PERF_TXN_TRANSACTION && ax & 1) 2360 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; 2361 sample.transaction = txn; 2362 } 2363 2364 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type); 2365 } 2366 2367 static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq) 2368 { 2369 struct intel_pt *pt = ptq->pt; 2370 struct evsel *evsel = pt->pebs_evsel; 2371 u64 id = evsel->core.id[0]; 2372 2373 return intel_pt_do_synth_pebs_sample(ptq, evsel, id); 2374 } 2375 2376 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) 2377 { 2378 const struct intel_pt_blk_items *items = &ptq->state->items; 2379 struct intel_pt_pebs_event *pe; 2380 struct intel_pt *pt = ptq->pt; 2381 int err = -EINVAL; 2382 int hw_id; 2383 2384 if (!items->has_applicable_counters || !items->applicable_counters) { 2385 if (!pt->single_pebs) 2386 pr_err("PEBS-via-PT record with no applicable_counters\n"); 2387 return intel_pt_synth_single_pebs_sample(ptq); 2388 } 2389 2390 for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) { 2391 pe = &ptq->pebs[hw_id]; 2392 if (!pe->evsel) { 2393 if (!pt->single_pebs) 2394 pr_err("PEBS-via-PT record with no matching event, hw_id %d\n", 2395 hw_id); 2396 return intel_pt_synth_single_pebs_sample(ptq); 2397 } 2398 err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id); 2399 if (err) 2400 return err; 2401 } 2402 2403 return err; 2404 } 2405 2406 static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq) 2407 { 2408 struct intel_pt *pt = ptq->pt; 2409 union perf_event *event = ptq->event_buf; 2410 struct perf_sample sample = { .ip = 0, }; 2411 struct { 2412 struct perf_synth_intel_evt cfe; 2413 struct perf_synth_intel_evd evd[INTEL_PT_MAX_EVDS]; 2414 } raw; 2415 int i; 2416 2417 if (intel_pt_skip_event(pt)) 2418 return 0; 2419 2420 intel_pt_prep_p_sample(pt, ptq, event, &sample); 2421 2422 sample.id = ptq->pt->evt_id; 2423 sample.stream_id = ptq->pt->evt_id; 2424 2425 raw.cfe.type = ptq->state->cfe_type; 2426 raw.cfe.reserved = 0; 2427 raw.cfe.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 2428 raw.cfe.vector = ptq->state->cfe_vector; 2429 raw.cfe.evd_cnt = ptq->state->evd_cnt; 2430 2431 for (i = 0; i < ptq->state->evd_cnt; i++) { 2432 raw.evd[i].et = 0; 2433 raw.evd[i].evd_type = ptq->state->evd[i].type; 2434 raw.evd[i].payload = ptq->state->evd[i].payload; 2435 } 2436 2437 sample.raw_size = perf_synth__raw_size(raw) + 2438 ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd); 2439 sample.raw_data = perf_synth__raw_data(&raw); 2440 2441 return intel_pt_deliver_synth_event(pt, event, &sample, 2442 pt->evt_sample_type); 2443 } 2444 2445 static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq) 2446 { 2447 struct intel_pt *pt = ptq->pt; 2448 union perf_event *event = ptq->event_buf; 2449 struct perf_sample sample = { .ip = 0, }; 2450 struct perf_synth_intel_iflag_chg raw; 2451 2452 if (intel_pt_skip_event(pt)) 2453 return 0; 2454 2455 intel_pt_prep_p_sample(pt, ptq, event, &sample); 2456 2457 sample.id = ptq->pt->iflag_chg_id; 2458 sample.stream_id = ptq->pt->iflag_chg_id; 2459 2460 raw.flags = 0; 2461 raw.iflag = ptq->state->to_iflag; 2462 2463 if (ptq->state->type & INTEL_PT_BRANCH) { 2464 raw.via_branch = 1; 2465 raw.branch_ip = ptq->state->to_ip; 2466 } else { 2467 sample.addr = 0; 2468 } 2469 sample.flags = ptq->flags; 2470 2471 sample.raw_size = perf_synth__raw_size(raw); 2472 sample.raw_data = perf_synth__raw_data(&raw); 2473 2474 return intel_pt_deliver_synth_event(pt, event, &sample, 2475 pt->iflag_chg_sample_type); 2476 } 2477 2478 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu, 2479 pid_t pid, pid_t tid, u64 ip, u64 timestamp, 2480 pid_t machine_pid, int vcpu) 2481 { 2482 bool dump_log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR; 2483 bool log_on_stdout = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT; 2484 union perf_event event; 2485 char msg[MAX_AUXTRACE_ERROR_MSG]; 2486 int err; 2487 2488 if (pt->synth_opts.error_minus_flags) { 2489 if (code == INTEL_PT_ERR_OVR && 2490 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW) 2491 return 0; 2492 if (code == INTEL_PT_ERR_LOST && 2493 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST) 2494 return 0; 2495 } 2496 2497 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG); 2498 2499 auxtrace_synth_guest_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, 2500 code, cpu, pid, tid, ip, msg, timestamp, 2501 machine_pid, vcpu); 2502 2503 if (intel_pt_enable_logging && !log_on_stdout) { 2504 FILE *fp = intel_pt_log_fp(); 2505 2506 if (fp) 2507 perf_event__fprintf_auxtrace_error(&event, fp); 2508 } 2509 2510 if (code != INTEL_PT_ERR_LOST && dump_log_on_error) 2511 intel_pt_log_dump_buf(); 2512 2513 err = perf_session__deliver_synth_event(pt->session, &event, NULL); 2514 if (err) 2515 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n", 2516 err); 2517 2518 return err; 2519 } 2520 2521 static int intel_ptq_synth_error(struct intel_pt_queue *ptq, 2522 const struct intel_pt_state *state) 2523 { 2524 struct intel_pt *pt = ptq->pt; 2525 u64 tm = ptq->timestamp; 2526 pid_t machine_pid = 0; 2527 pid_t pid = ptq->pid; 2528 pid_t tid = ptq->tid; 2529 int vcpu = -1; 2530 2531 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc); 2532 2533 if (pt->have_guest_sideband && state->from_nr) { 2534 machine_pid = ptq->guest_machine_pid; 2535 vcpu = ptq->vcpu; 2536 pid = ptq->guest_pid; 2537 tid = ptq->guest_tid; 2538 } 2539 2540 return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid, 2541 state->from_ip, tm, machine_pid, vcpu); 2542 } 2543 2544 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq) 2545 { 2546 struct auxtrace_queue *queue; 2547 pid_t tid = ptq->next_tid; 2548 int err; 2549 2550 if (tid == -1) 2551 return 0; 2552 2553 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid); 2554 2555 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid); 2556 2557 queue = &pt->queues.queue_array[ptq->queue_nr]; 2558 intel_pt_set_pid_tid_cpu(pt, queue); 2559 2560 ptq->next_tid = -1; 2561 2562 return err; 2563 } 2564 2565 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip) 2566 { 2567 struct intel_pt *pt = ptq->pt; 2568 2569 return ip == pt->switch_ip && 2570 (ptq->flags & PERF_IP_FLAG_BRANCH) && 2571 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC | 2572 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT)); 2573 } 2574 2575 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \ 2576 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT) 2577 2578 static int intel_pt_sample(struct intel_pt_queue *ptq) 2579 { 2580 const struct intel_pt_state *state = ptq->state; 2581 struct intel_pt *pt = ptq->pt; 2582 int err; 2583 2584 if (!ptq->have_sample) 2585 return 0; 2586 2587 ptq->have_sample = false; 2588 2589 if (pt->synth_opts.approx_ipc) { 2590 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; 2591 ptq->ipc_cyc_cnt = ptq->state->cycles; 2592 ptq->sample_ipc = true; 2593 } else { 2594 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; 2595 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt; 2596 ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC; 2597 } 2598 2599 /* Ensure guest code maps are set up */ 2600 if (symbol_conf.guest_code && (state->from_nr || state->to_nr)) 2601 intel_pt_get_guest(ptq); 2602 2603 /* 2604 * Do PEBS first to allow for the possibility that the PEBS timestamp 2605 * precedes the current timestamp. 2606 */ 2607 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) { 2608 err = intel_pt_synth_pebs_sample(ptq); 2609 if (err) 2610 return err; 2611 } 2612 2613 if (pt->synth_opts.intr_events) { 2614 if (state->type & INTEL_PT_EVT) { 2615 err = intel_pt_synth_events_sample(ptq); 2616 if (err) 2617 return err; 2618 } 2619 if (state->type & INTEL_PT_IFLAG_CHG) { 2620 err = intel_pt_synth_iflag_chg_sample(ptq); 2621 if (err) 2622 return err; 2623 } 2624 } 2625 2626 if (pt->sample_pwr_events) { 2627 if (state->type & INTEL_PT_PSB_EVT) { 2628 err = intel_pt_synth_psb_sample(ptq); 2629 if (err) 2630 return err; 2631 } 2632 if (ptq->state->cbr != ptq->cbr_seen) { 2633 err = intel_pt_synth_cbr_sample(ptq); 2634 if (err) 2635 return err; 2636 } 2637 if (state->type & INTEL_PT_PWR_EVT) { 2638 if (state->type & INTEL_PT_MWAIT_OP) { 2639 err = intel_pt_synth_mwait_sample(ptq); 2640 if (err) 2641 return err; 2642 } 2643 if (state->type & INTEL_PT_PWR_ENTRY) { 2644 err = intel_pt_synth_pwre_sample(ptq); 2645 if (err) 2646 return err; 2647 } 2648 if (state->type & INTEL_PT_EX_STOP) { 2649 err = intel_pt_synth_exstop_sample(ptq); 2650 if (err) 2651 return err; 2652 } 2653 if (state->type & INTEL_PT_PWR_EXIT) { 2654 err = intel_pt_synth_pwrx_sample(ptq); 2655 if (err) 2656 return err; 2657 } 2658 } 2659 } 2660 2661 if (state->type & INTEL_PT_INSTRUCTION) { 2662 if (pt->sample_instructions) { 2663 err = intel_pt_synth_instruction_sample(ptq); 2664 if (err) 2665 return err; 2666 } 2667 if (pt->sample_cycles) { 2668 err = intel_pt_synth_cycle_sample(ptq); 2669 if (err) 2670 return err; 2671 } 2672 } 2673 2674 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) { 2675 err = intel_pt_synth_transaction_sample(ptq); 2676 if (err) 2677 return err; 2678 } 2679 2680 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) { 2681 err = intel_pt_synth_ptwrite_sample(ptq); 2682 if (err) 2683 return err; 2684 } 2685 2686 if (!(state->type & INTEL_PT_BRANCH)) 2687 return 0; 2688 2689 if (pt->use_thread_stack) { 2690 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, 2691 state->from_ip, state->to_ip, ptq->insn_len, 2692 state->trace_nr, pt->callstack, 2693 pt->br_stack_sz_plus, 2694 pt->mispred_all); 2695 } else { 2696 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr); 2697 } 2698 2699 if (pt->sample_branches) { 2700 if (state->from_nr != state->to_nr && 2701 state->from_ip && state->to_ip) { 2702 struct intel_pt_state *st = (struct intel_pt_state *)state; 2703 u64 to_ip = st->to_ip; 2704 u64 from_ip = st->from_ip; 2705 2706 /* 2707 * perf cannot handle having different machines for ip 2708 * and addr, so create 2 branches. 2709 */ 2710 st->to_ip = 0; 2711 err = intel_pt_synth_branch_sample(ptq); 2712 if (err) 2713 return err; 2714 st->from_ip = 0; 2715 st->to_ip = to_ip; 2716 err = intel_pt_synth_branch_sample(ptq); 2717 st->from_ip = from_ip; 2718 } else { 2719 err = intel_pt_synth_branch_sample(ptq); 2720 } 2721 if (err) 2722 return err; 2723 } 2724 2725 if (!ptq->sync_switch) 2726 return 0; 2727 2728 if (intel_pt_is_switch_ip(ptq, state->to_ip)) { 2729 switch (ptq->switch_state) { 2730 case INTEL_PT_SS_NOT_TRACING: 2731 case INTEL_PT_SS_UNKNOWN: 2732 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 2733 err = intel_pt_next_tid(pt, ptq); 2734 if (err) 2735 return err; 2736 ptq->switch_state = INTEL_PT_SS_TRACING; 2737 break; 2738 default: 2739 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT; 2740 return 1; 2741 } 2742 } else if (!state->to_ip) { 2743 ptq->switch_state = INTEL_PT_SS_NOT_TRACING; 2744 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) { 2745 ptq->switch_state = INTEL_PT_SS_UNKNOWN; 2746 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN && 2747 state->to_ip == pt->ptss_ip && 2748 (ptq->flags & PERF_IP_FLAG_CALL)) { 2749 ptq->switch_state = INTEL_PT_SS_TRACING; 2750 } 2751 2752 return 0; 2753 } 2754 2755 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) 2756 { 2757 struct machine *machine = pt->machine; 2758 struct map *map; 2759 struct symbol *sym, *start; 2760 u64 ip, switch_ip = 0; 2761 const char *ptss; 2762 2763 if (ptss_ip) 2764 *ptss_ip = 0; 2765 2766 map = machine__kernel_map(machine); 2767 if (!map) 2768 return 0; 2769 2770 if (map__load(map)) 2771 return 0; 2772 2773 start = dso__first_symbol(map__dso(map)); 2774 2775 for (sym = start; sym; sym = dso__next_symbol(sym)) { 2776 if (sym->binding == STB_GLOBAL && 2777 !strcmp(sym->name, "__switch_to")) { 2778 ip = map__unmap_ip(map, sym->start); 2779 if (ip >= map__start(map) && ip < map__end(map)) { 2780 switch_ip = ip; 2781 break; 2782 } 2783 } 2784 } 2785 2786 if (!switch_ip || !ptss_ip) 2787 return 0; 2788 2789 if (pt->have_sched_switch == 1) 2790 ptss = "perf_trace_sched_switch"; 2791 else 2792 ptss = "__perf_event_task_sched_out"; 2793 2794 for (sym = start; sym; sym = dso__next_symbol(sym)) { 2795 if (!strcmp(sym->name, ptss)) { 2796 ip = map__unmap_ip(map, sym->start); 2797 if (ip >= map__start(map) && ip < map__end(map)) { 2798 *ptss_ip = ip; 2799 break; 2800 } 2801 } 2802 } 2803 2804 return switch_ip; 2805 } 2806 2807 static void intel_pt_enable_sync_switch(struct intel_pt *pt) 2808 { 2809 unsigned int i; 2810 2811 if (pt->sync_switch_not_supported) 2812 return; 2813 2814 pt->sync_switch = true; 2815 2816 for (i = 0; i < pt->queues.nr_queues; i++) { 2817 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 2818 struct intel_pt_queue *ptq = queue->priv; 2819 2820 if (ptq) 2821 ptq->sync_switch = true; 2822 } 2823 } 2824 2825 static void intel_pt_disable_sync_switch(struct intel_pt *pt) 2826 { 2827 unsigned int i; 2828 2829 pt->sync_switch = false; 2830 2831 for (i = 0; i < pt->queues.nr_queues; i++) { 2832 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 2833 struct intel_pt_queue *ptq = queue->priv; 2834 2835 if (ptq) { 2836 ptq->sync_switch = false; 2837 intel_pt_next_tid(pt, ptq); 2838 } 2839 } 2840 } 2841 2842 /* 2843 * To filter against time ranges, it is only necessary to look at the next start 2844 * or end time. 2845 */ 2846 static bool intel_pt_next_time(struct intel_pt_queue *ptq) 2847 { 2848 struct intel_pt *pt = ptq->pt; 2849 2850 if (ptq->sel_start) { 2851 /* Next time is an end time */ 2852 ptq->sel_start = false; 2853 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end; 2854 return true; 2855 } else if (ptq->sel_idx + 1 < pt->range_cnt) { 2856 /* Next time is a start time */ 2857 ptq->sel_start = true; 2858 ptq->sel_idx += 1; 2859 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start; 2860 return true; 2861 } 2862 2863 /* No next time */ 2864 return false; 2865 } 2866 2867 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp) 2868 { 2869 int err; 2870 2871 while (1) { 2872 if (ptq->sel_start) { 2873 if (ptq->timestamp >= ptq->sel_timestamp) { 2874 /* After start time, so consider next time */ 2875 intel_pt_next_time(ptq); 2876 if (!ptq->sel_timestamp) { 2877 /* No end time */ 2878 return 0; 2879 } 2880 /* Check against end time */ 2881 continue; 2882 } 2883 /* Before start time, so fast forward */ 2884 ptq->have_sample = false; 2885 if (ptq->sel_timestamp > *ff_timestamp) { 2886 if (ptq->sync_switch) { 2887 intel_pt_next_tid(ptq->pt, ptq); 2888 ptq->switch_state = INTEL_PT_SS_UNKNOWN; 2889 } 2890 *ff_timestamp = ptq->sel_timestamp; 2891 err = intel_pt_fast_forward(ptq->decoder, 2892 ptq->sel_timestamp); 2893 if (err) 2894 return err; 2895 } 2896 return 0; 2897 } else if (ptq->timestamp > ptq->sel_timestamp) { 2898 /* After end time, so consider next time */ 2899 if (!intel_pt_next_time(ptq)) { 2900 /* No next time range, so stop decoding */ 2901 ptq->have_sample = false; 2902 ptq->switch_state = INTEL_PT_SS_NOT_TRACING; 2903 return 1; 2904 } 2905 /* Check against next start time */ 2906 continue; 2907 } else { 2908 /* Before end time */ 2909 return 0; 2910 } 2911 } 2912 } 2913 2914 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) 2915 { 2916 const struct intel_pt_state *state = ptq->state; 2917 struct intel_pt *pt = ptq->pt; 2918 u64 ff_timestamp = 0; 2919 int err; 2920 2921 if (!pt->kernel_start) { 2922 pt->kernel_start = machine__kernel_start(pt->machine); 2923 if (pt->per_cpu_mmaps && 2924 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) && 2925 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) && 2926 !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) { 2927 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip); 2928 if (pt->switch_ip) { 2929 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", 2930 pt->switch_ip, pt->ptss_ip); 2931 intel_pt_enable_sync_switch(pt); 2932 } 2933 } 2934 } 2935 2936 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 2937 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); 2938 while (1) { 2939 err = intel_pt_sample(ptq); 2940 if (err) 2941 return err; 2942 2943 state = intel_pt_decode(ptq->decoder); 2944 if (state->err) { 2945 if (state->err == INTEL_PT_ERR_NODATA) 2946 return 1; 2947 if (ptq->sync_switch && 2948 state->from_ip >= pt->kernel_start) { 2949 ptq->sync_switch = false; 2950 intel_pt_next_tid(pt, ptq); 2951 } 2952 ptq->timestamp = state->est_timestamp; 2953 if (pt->synth_opts.errors) { 2954 err = intel_ptq_synth_error(ptq, state); 2955 if (err) 2956 return err; 2957 } 2958 continue; 2959 } 2960 2961 ptq->state = state; 2962 ptq->have_sample = true; 2963 intel_pt_sample_flags(ptq); 2964 2965 /* Use estimated TSC upon return to user space */ 2966 if (pt->est_tsc && 2967 (state->from_ip >= pt->kernel_start || !state->from_ip) && 2968 state->to_ip && state->to_ip < pt->kernel_start) { 2969 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 2970 state->timestamp, state->est_timestamp); 2971 ptq->timestamp = state->est_timestamp; 2972 /* Use estimated TSC in unknown switch state */ 2973 } else if (ptq->sync_switch && 2974 ptq->switch_state == INTEL_PT_SS_UNKNOWN && 2975 intel_pt_is_switch_ip(ptq, state->to_ip) && 2976 ptq->next_tid == -1) { 2977 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 2978 state->timestamp, state->est_timestamp); 2979 ptq->timestamp = state->est_timestamp; 2980 } else if (state->timestamp > ptq->timestamp) { 2981 ptq->timestamp = state->timestamp; 2982 } 2983 2984 if (ptq->sel_timestamp) { 2985 err = intel_pt_time_filter(ptq, &ff_timestamp); 2986 if (err) 2987 return err; 2988 } 2989 2990 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) { 2991 *timestamp = ptq->timestamp; 2992 return 0; 2993 } 2994 } 2995 return 0; 2996 } 2997 2998 static inline int intel_pt_update_queues(struct intel_pt *pt) 2999 { 3000 if (pt->queues.new_data) { 3001 pt->queues.new_data = false; 3002 return intel_pt_setup_queues(pt); 3003 } 3004 return 0; 3005 } 3006 3007 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp) 3008 { 3009 unsigned int queue_nr; 3010 u64 ts; 3011 int ret; 3012 3013 while (1) { 3014 struct auxtrace_queue *queue; 3015 struct intel_pt_queue *ptq; 3016 3017 if (!pt->heap.heap_cnt) 3018 return 0; 3019 3020 if (pt->heap.heap_array[0].ordinal >= timestamp) 3021 return 0; 3022 3023 queue_nr = pt->heap.heap_array[0].queue_nr; 3024 queue = &pt->queues.queue_array[queue_nr]; 3025 ptq = queue->priv; 3026 3027 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n", 3028 queue_nr, pt->heap.heap_array[0].ordinal, 3029 timestamp); 3030 3031 auxtrace_heap__pop(&pt->heap); 3032 3033 if (pt->heap.heap_cnt) { 3034 ts = pt->heap.heap_array[0].ordinal + 1; 3035 if (ts > timestamp) 3036 ts = timestamp; 3037 } else { 3038 ts = timestamp; 3039 } 3040 3041 intel_pt_set_pid_tid_cpu(pt, queue); 3042 3043 ret = intel_pt_run_decoder(ptq, &ts); 3044 3045 if (ret < 0) { 3046 auxtrace_heap__add(&pt->heap, queue_nr, ts); 3047 return ret; 3048 } 3049 3050 if (!ret) { 3051 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts); 3052 if (ret < 0) 3053 return ret; 3054 } else { 3055 ptq->on_heap = false; 3056 } 3057 } 3058 3059 return 0; 3060 } 3061 3062 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid, 3063 u64 time_) 3064 { 3065 struct auxtrace_queues *queues = &pt->queues; 3066 unsigned int i; 3067 u64 ts = 0; 3068 3069 for (i = 0; i < queues->nr_queues; i++) { 3070 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 3071 struct intel_pt_queue *ptq = queue->priv; 3072 3073 if (ptq && (tid == -1 || ptq->tid == tid)) { 3074 ptq->time = time_; 3075 intel_pt_set_pid_tid_cpu(pt, queue); 3076 intel_pt_run_decoder(ptq, &ts); 3077 } 3078 } 3079 return 0; 3080 } 3081 3082 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq, 3083 struct auxtrace_queue *queue, 3084 struct perf_sample *sample) 3085 { 3086 struct machine *m = ptq->pt->machine; 3087 3088 ptq->pid = sample->pid; 3089 ptq->tid = sample->tid; 3090 ptq->cpu = queue->cpu; 3091 3092 intel_pt_log("queue %u cpu %d pid %d tid %d\n", 3093 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); 3094 3095 thread__zput(ptq->thread); 3096 3097 if (ptq->tid == -1) 3098 return; 3099 3100 if (ptq->pid == -1) { 3101 ptq->thread = machine__find_thread(m, -1, ptq->tid); 3102 if (ptq->thread) 3103 ptq->pid = thread__pid(ptq->thread); 3104 return; 3105 } 3106 3107 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid); 3108 } 3109 3110 static int intel_pt_process_timeless_sample(struct intel_pt *pt, 3111 struct perf_sample *sample) 3112 { 3113 struct auxtrace_queue *queue; 3114 struct intel_pt_queue *ptq; 3115 u64 ts = 0; 3116 3117 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session); 3118 if (!queue) 3119 return -EINVAL; 3120 3121 ptq = queue->priv; 3122 if (!ptq) 3123 return 0; 3124 3125 ptq->stop = false; 3126 ptq->time = sample->time; 3127 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample); 3128 intel_pt_run_decoder(ptq, &ts); 3129 return 0; 3130 } 3131 3132 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample) 3133 { 3134 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu, 3135 sample->pid, sample->tid, 0, sample->time, 3136 sample->machine_pid, sample->vcpu); 3137 } 3138 3139 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu) 3140 { 3141 unsigned i, j; 3142 3143 if (cpu < 0 || !pt->queues.nr_queues) 3144 return NULL; 3145 3146 if ((unsigned)cpu >= pt->queues.nr_queues) 3147 i = pt->queues.nr_queues - 1; 3148 else 3149 i = cpu; 3150 3151 if (pt->queues.queue_array[i].cpu == cpu) 3152 return pt->queues.queue_array[i].priv; 3153 3154 for (j = 0; i > 0; j++) { 3155 if (pt->queues.queue_array[--i].cpu == cpu) 3156 return pt->queues.queue_array[i].priv; 3157 } 3158 3159 for (; j < pt->queues.nr_queues; j++) { 3160 if (pt->queues.queue_array[j].cpu == cpu) 3161 return pt->queues.queue_array[j].priv; 3162 } 3163 3164 return NULL; 3165 } 3166 3167 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid, 3168 u64 timestamp) 3169 { 3170 struct intel_pt_queue *ptq; 3171 int err; 3172 3173 if (!pt->sync_switch) 3174 return 1; 3175 3176 ptq = intel_pt_cpu_to_ptq(pt, cpu); 3177 if (!ptq || !ptq->sync_switch) 3178 return 1; 3179 3180 switch (ptq->switch_state) { 3181 case INTEL_PT_SS_NOT_TRACING: 3182 break; 3183 case INTEL_PT_SS_UNKNOWN: 3184 case INTEL_PT_SS_TRACING: 3185 ptq->next_tid = tid; 3186 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP; 3187 return 0; 3188 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: 3189 if (!ptq->on_heap) { 3190 ptq->timestamp = perf_time_to_tsc(timestamp, 3191 &pt->tc); 3192 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr, 3193 ptq->timestamp); 3194 if (err) 3195 return err; 3196 ptq->on_heap = true; 3197 } 3198 ptq->switch_state = INTEL_PT_SS_TRACING; 3199 break; 3200 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 3201 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu); 3202 break; 3203 default: 3204 break; 3205 } 3206 3207 ptq->next_tid = -1; 3208 3209 return 1; 3210 } 3211 3212 #ifdef HAVE_LIBTRACEEVENT 3213 static int intel_pt_process_switch(struct intel_pt *pt, 3214 struct perf_sample *sample) 3215 { 3216 pid_t tid; 3217 int cpu, ret; 3218 struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id); 3219 3220 if (evsel != pt->switch_evsel) 3221 return 0; 3222 3223 tid = evsel__intval(evsel, sample, "next_pid"); 3224 cpu = sample->cpu; 3225 3226 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 3227 cpu, tid, sample->time, perf_time_to_tsc(sample->time, 3228 &pt->tc)); 3229 3230 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 3231 if (ret <= 0) 3232 return ret; 3233 3234 return machine__set_current_tid(pt->machine, cpu, -1, tid); 3235 } 3236 #endif /* HAVE_LIBTRACEEVENT */ 3237 3238 static int intel_pt_context_switch_in(struct intel_pt *pt, 3239 struct perf_sample *sample) 3240 { 3241 pid_t pid = sample->pid; 3242 pid_t tid = sample->tid; 3243 int cpu = sample->cpu; 3244 3245 if (pt->sync_switch) { 3246 struct intel_pt_queue *ptq; 3247 3248 ptq = intel_pt_cpu_to_ptq(pt, cpu); 3249 if (ptq && ptq->sync_switch) { 3250 ptq->next_tid = -1; 3251 switch (ptq->switch_state) { 3252 case INTEL_PT_SS_NOT_TRACING: 3253 case INTEL_PT_SS_UNKNOWN: 3254 case INTEL_PT_SS_TRACING: 3255 break; 3256 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: 3257 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 3258 ptq->switch_state = INTEL_PT_SS_TRACING; 3259 break; 3260 default: 3261 break; 3262 } 3263 } 3264 } 3265 3266 /* 3267 * If the current tid has not been updated yet, ensure it is now that 3268 * a "switch in" event has occurred. 3269 */ 3270 if (machine__get_current_tid(pt->machine, cpu) == tid) 3271 return 0; 3272 3273 return machine__set_current_tid(pt->machine, cpu, pid, tid); 3274 } 3275 3276 static int intel_pt_guest_context_switch(struct intel_pt *pt, 3277 union perf_event *event, 3278 struct perf_sample *sample) 3279 { 3280 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 3281 struct machines *machines = &pt->session->machines; 3282 struct machine *machine = machines__find(machines, sample->machine_pid); 3283 3284 pt->have_guest_sideband = true; 3285 3286 /* 3287 * sync_switch cannot handle guest machines at present, so just disable 3288 * it. 3289 */ 3290 pt->sync_switch_not_supported = true; 3291 if (pt->sync_switch) 3292 intel_pt_disable_sync_switch(pt); 3293 3294 if (out) 3295 return 0; 3296 3297 if (!machine) 3298 return -EINVAL; 3299 3300 return machine__set_current_tid(machine, sample->vcpu, sample->pid, sample->tid); 3301 } 3302 3303 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, 3304 struct perf_sample *sample) 3305 { 3306 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 3307 pid_t pid, tid; 3308 int cpu, ret; 3309 3310 if (perf_event__is_guest(event)) 3311 return intel_pt_guest_context_switch(pt, event, sample); 3312 3313 cpu = sample->cpu; 3314 3315 if (pt->have_sched_switch == 3) { 3316 if (!out) 3317 return intel_pt_context_switch_in(pt, sample); 3318 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) { 3319 pr_err("Expecting CPU-wide context switch event\n"); 3320 return -EINVAL; 3321 } 3322 pid = event->context_switch.next_prev_pid; 3323 tid = event->context_switch.next_prev_tid; 3324 } else { 3325 if (out) 3326 return 0; 3327 pid = sample->pid; 3328 tid = sample->tid; 3329 } 3330 3331 if (tid == -1) 3332 intel_pt_log("context_switch event has no tid\n"); 3333 3334 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 3335 if (ret <= 0) 3336 return ret; 3337 3338 return machine__set_current_tid(pt->machine, cpu, pid, tid); 3339 } 3340 3341 static int intel_pt_process_itrace_start(struct intel_pt *pt, 3342 union perf_event *event, 3343 struct perf_sample *sample) 3344 { 3345 if (!pt->per_cpu_mmaps) 3346 return 0; 3347 3348 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 3349 sample->cpu, event->itrace_start.pid, 3350 event->itrace_start.tid, sample->time, 3351 perf_time_to_tsc(sample->time, &pt->tc)); 3352 3353 return machine__set_current_tid(pt->machine, sample->cpu, 3354 event->itrace_start.pid, 3355 event->itrace_start.tid); 3356 } 3357 3358 static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt, 3359 union perf_event *event, 3360 struct perf_sample *sample) 3361 { 3362 u64 hw_id = event->aux_output_hw_id.hw_id; 3363 struct auxtrace_queue *queue; 3364 struct intel_pt_queue *ptq; 3365 struct evsel *evsel; 3366 3367 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session); 3368 evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id); 3369 if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) { 3370 pr_err("Bad AUX output hardware ID\n"); 3371 return -EINVAL; 3372 } 3373 3374 ptq = queue->priv; 3375 3376 ptq->pebs[hw_id].evsel = evsel; 3377 ptq->pebs[hw_id].id = sample->id; 3378 3379 return 0; 3380 } 3381 3382 static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr, 3383 struct addr_location *al) 3384 { 3385 if (!al->map || addr < map__start(al->map) || addr >= map__end(al->map)) { 3386 if (!thread__find_map(thread, cpumode, addr, al)) 3387 return -1; 3388 } 3389 3390 return 0; 3391 } 3392 3393 /* Invalidate all instruction cache entries that overlap the text poke */ 3394 static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event) 3395 { 3396 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 3397 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1; 3398 /* Assume text poke begins in a basic block no more than 4096 bytes */ 3399 int cnt = 4096 + event->text_poke.new_len; 3400 struct thread *thread = pt->unknown_thread; 3401 struct addr_location al; 3402 struct machine *machine = pt->machine; 3403 struct intel_pt_cache_entry *e; 3404 u64 offset; 3405 int ret = 0; 3406 3407 addr_location__init(&al); 3408 if (!event->text_poke.new_len) 3409 goto out; 3410 3411 for (; cnt; cnt--, addr--) { 3412 struct dso *dso; 3413 3414 if (intel_pt_find_map(thread, cpumode, addr, &al)) { 3415 if (addr < event->text_poke.addr) 3416 goto out; 3417 continue; 3418 } 3419 3420 dso = map__dso(al.map); 3421 if (!dso || !dso__auxtrace_cache(dso)) 3422 continue; 3423 3424 offset = map__map_ip(al.map, addr); 3425 3426 e = intel_pt_cache_lookup(dso, machine, offset); 3427 if (!e) 3428 continue; 3429 3430 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) { 3431 /* 3432 * No overlap. Working backwards there cannot be another 3433 * basic block that overlaps the text poke if there is a 3434 * branch instruction before the text poke address. 3435 */ 3436 if (e->branch != INTEL_PT_BR_NO_BRANCH) 3437 goto out; 3438 } else { 3439 intel_pt_cache_invalidate(dso, machine, offset); 3440 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n", 3441 dso__long_name(dso), addr); 3442 } 3443 } 3444 out: 3445 addr_location__exit(&al); 3446 return ret; 3447 } 3448 3449 static int intel_pt_process_event(struct perf_session *session, 3450 union perf_event *event, 3451 struct perf_sample *sample, 3452 const struct perf_tool *tool) 3453 { 3454 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3455 auxtrace); 3456 u64 timestamp; 3457 int err = 0; 3458 3459 if (dump_trace) 3460 return 0; 3461 3462 if (!tool->ordered_events) { 3463 pr_err("Intel Processor Trace requires ordered events\n"); 3464 return -EINVAL; 3465 } 3466 3467 if (sample->time && sample->time != (u64)-1) 3468 timestamp = perf_time_to_tsc(sample->time, &pt->tc); 3469 else 3470 timestamp = 0; 3471 3472 if (timestamp || pt->timeless_decoding) { 3473 err = intel_pt_update_queues(pt); 3474 if (err) 3475 return err; 3476 } 3477 3478 if (pt->timeless_decoding) { 3479 if (pt->sampling_mode) { 3480 if (sample->aux_sample.size) 3481 err = intel_pt_process_timeless_sample(pt, 3482 sample); 3483 } else if (event->header.type == PERF_RECORD_EXIT) { 3484 err = intel_pt_process_timeless_queues(pt, 3485 event->fork.tid, 3486 sample->time); 3487 } 3488 } else if (timestamp) { 3489 if (!pt->first_timestamp) 3490 intel_pt_first_timestamp(pt, timestamp); 3491 err = intel_pt_process_queues(pt, timestamp); 3492 } 3493 if (err) 3494 return err; 3495 3496 if (event->header.type == PERF_RECORD_SAMPLE) { 3497 if (pt->synth_opts.add_callchain && !sample->callchain) 3498 intel_pt_add_callchain(pt, sample); 3499 if (pt->synth_opts.add_last_branch && !sample->branch_stack) 3500 intel_pt_add_br_stack(pt, sample); 3501 } 3502 3503 if (event->header.type == PERF_RECORD_AUX && 3504 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && 3505 pt->synth_opts.errors) { 3506 err = intel_pt_lost(pt, sample); 3507 if (err) 3508 return err; 3509 } 3510 3511 #ifdef HAVE_LIBTRACEEVENT 3512 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE) 3513 err = intel_pt_process_switch(pt, sample); 3514 else 3515 #endif 3516 if (event->header.type == PERF_RECORD_ITRACE_START) 3517 err = intel_pt_process_itrace_start(pt, event, sample); 3518 else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID) 3519 err = intel_pt_process_aux_output_hw_id(pt, event, sample); 3520 else if (event->header.type == PERF_RECORD_SWITCH || 3521 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) 3522 err = intel_pt_context_switch(pt, event, sample); 3523 3524 if (!err && event->header.type == PERF_RECORD_TEXT_POKE) 3525 err = intel_pt_text_poke(pt, event); 3526 3527 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) { 3528 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ", 3529 event->header.type, sample->cpu, sample->time, timestamp); 3530 intel_pt_log_event(event); 3531 } 3532 3533 return err; 3534 } 3535 3536 static int intel_pt_flush(struct perf_session *session, const struct perf_tool *tool) 3537 { 3538 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3539 auxtrace); 3540 int ret; 3541 3542 if (dump_trace) 3543 return 0; 3544 3545 if (!tool->ordered_events) 3546 return -EINVAL; 3547 3548 ret = intel_pt_update_queues(pt); 3549 if (ret < 0) 3550 return ret; 3551 3552 if (pt->timeless_decoding) 3553 return intel_pt_process_timeless_queues(pt, -1, 3554 MAX_TIMESTAMP - 1); 3555 3556 return intel_pt_process_queues(pt, MAX_TIMESTAMP); 3557 } 3558 3559 static void intel_pt_free_events(struct perf_session *session) 3560 { 3561 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3562 auxtrace); 3563 struct auxtrace_queues *queues = &pt->queues; 3564 unsigned int i; 3565 3566 for (i = 0; i < queues->nr_queues; i++) { 3567 intel_pt_free_queue(queues->queue_array[i].priv); 3568 queues->queue_array[i].priv = NULL; 3569 } 3570 intel_pt_log_disable(); 3571 auxtrace_queues__free(queues); 3572 } 3573 3574 static void intel_pt_free(struct perf_session *session) 3575 { 3576 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3577 auxtrace); 3578 3579 auxtrace_heap__free(&pt->heap); 3580 intel_pt_free_events(session); 3581 session->auxtrace = NULL; 3582 intel_pt_free_vmcs_info(pt); 3583 thread__put(pt->unknown_thread); 3584 addr_filters__exit(&pt->filts); 3585 zfree(&pt->chain); 3586 zfree(&pt->filter); 3587 zfree(&pt->time_ranges); 3588 zfree(&pt->br_stack); 3589 free(pt); 3590 } 3591 3592 static bool intel_pt_evsel_is_auxtrace(struct perf_session *session, 3593 struct evsel *evsel) 3594 { 3595 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3596 auxtrace); 3597 3598 return evsel->core.attr.type == pt->pmu_type; 3599 } 3600 3601 static int intel_pt_process_auxtrace_event(struct perf_session *session, 3602 union perf_event *event, 3603 const struct perf_tool *tool __maybe_unused) 3604 { 3605 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3606 auxtrace); 3607 3608 if (!pt->data_queued) { 3609 struct auxtrace_buffer *buffer; 3610 off_t data_offset; 3611 int fd = perf_data__fd(session->data); 3612 int err; 3613 3614 if (perf_data__is_pipe(session->data)) { 3615 data_offset = 0; 3616 } else { 3617 data_offset = lseek(fd, 0, SEEK_CUR); 3618 if (data_offset == -1) 3619 return -errno; 3620 } 3621 3622 err = auxtrace_queues__add_event(&pt->queues, session, event, 3623 data_offset, &buffer); 3624 if (err) 3625 return err; 3626 3627 /* Dump here now we have copied a piped trace out of the pipe */ 3628 if (dump_trace) { 3629 if (auxtrace_buffer__get_data(buffer, fd)) { 3630 intel_pt_dump_event(pt, buffer->data, 3631 buffer->size); 3632 auxtrace_buffer__put_data(buffer); 3633 } 3634 } 3635 } 3636 3637 return 0; 3638 } 3639 3640 static int intel_pt_queue_data(struct perf_session *session, 3641 struct perf_sample *sample, 3642 union perf_event *event, u64 data_offset) 3643 { 3644 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3645 auxtrace); 3646 u64 timestamp; 3647 3648 if (event) { 3649 return auxtrace_queues__add_event(&pt->queues, session, event, 3650 data_offset, NULL); 3651 } 3652 3653 if (sample->time && sample->time != (u64)-1) 3654 timestamp = perf_time_to_tsc(sample->time, &pt->tc); 3655 else 3656 timestamp = 0; 3657 3658 return auxtrace_queues__add_sample(&pt->queues, session, sample, 3659 data_offset, timestamp); 3660 } 3661 3662 static int intel_pt_synth_event(struct perf_session *session, const char *name, 3663 struct perf_event_attr *attr, u64 id) 3664 { 3665 int err; 3666 3667 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 3668 name, id, (u64)attr->sample_type); 3669 3670 err = perf_session__deliver_synth_attr_event(session, attr, id); 3671 if (err) 3672 pr_err("%s: failed to synthesize '%s' event type\n", 3673 __func__, name); 3674 3675 return err; 3676 } 3677 3678 static void intel_pt_set_event_name(struct evlist *evlist, u64 id, 3679 const char *name) 3680 { 3681 struct evsel *evsel; 3682 3683 evlist__for_each_entry(evlist, evsel) { 3684 if (evsel->core.id && evsel->core.id[0] == id) { 3685 if (evsel->name) 3686 zfree(&evsel->name); 3687 evsel->name = strdup(name); 3688 break; 3689 } 3690 } 3691 } 3692 3693 static struct evsel *intel_pt_evsel(struct intel_pt *pt, 3694 struct evlist *evlist) 3695 { 3696 struct evsel *evsel; 3697 3698 evlist__for_each_entry(evlist, evsel) { 3699 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids) 3700 return evsel; 3701 } 3702 3703 return NULL; 3704 } 3705 3706 static int intel_pt_synth_events(struct intel_pt *pt, 3707 struct perf_session *session) 3708 { 3709 struct evlist *evlist = session->evlist; 3710 struct evsel *evsel = intel_pt_evsel(pt, evlist); 3711 struct perf_event_attr attr; 3712 u64 id; 3713 int err; 3714 3715 if (!evsel) { 3716 pr_debug("There are no selected events with Intel Processor Trace data\n"); 3717 return 0; 3718 } 3719 3720 memset(&attr, 0, sizeof(struct perf_event_attr)); 3721 attr.size = sizeof(struct perf_event_attr); 3722 attr.type = PERF_TYPE_HARDWARE; 3723 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; 3724 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | 3725 PERF_SAMPLE_PERIOD; 3726 if (pt->timeless_decoding) 3727 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; 3728 else 3729 attr.sample_type |= PERF_SAMPLE_TIME; 3730 if (!pt->per_cpu_mmaps) 3731 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; 3732 attr.exclude_user = evsel->core.attr.exclude_user; 3733 attr.exclude_kernel = evsel->core.attr.exclude_kernel; 3734 attr.exclude_hv = evsel->core.attr.exclude_hv; 3735 attr.exclude_host = evsel->core.attr.exclude_host; 3736 attr.exclude_guest = evsel->core.attr.exclude_guest; 3737 attr.sample_id_all = evsel->core.attr.sample_id_all; 3738 attr.read_format = evsel->core.attr.read_format; 3739 3740 id = evsel->core.id[0] + 1000000000; 3741 if (!id) 3742 id = 1; 3743 3744 if (pt->synth_opts.branches) { 3745 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; 3746 attr.sample_period = 1; 3747 attr.sample_type |= PERF_SAMPLE_ADDR; 3748 err = intel_pt_synth_event(session, "branches", &attr, id); 3749 if (err) 3750 return err; 3751 pt->sample_branches = true; 3752 pt->branches_sample_type = attr.sample_type; 3753 pt->branches_id = id; 3754 id += 1; 3755 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR; 3756 } 3757 3758 if (pt->synth_opts.callchain) 3759 attr.sample_type |= PERF_SAMPLE_CALLCHAIN; 3760 if (pt->synth_opts.last_branch) { 3761 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; 3762 /* 3763 * We don't use the hardware index, but the sample generation 3764 * code uses the new format branch_stack with this field, 3765 * so the event attributes must indicate that it's present. 3766 */ 3767 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX; 3768 } 3769 3770 if (pt->synth_opts.instructions) { 3771 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 3772 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS) 3773 attr.sample_period = 3774 intel_pt_ns_to_ticks(pt, pt->synth_opts.period); 3775 else 3776 attr.sample_period = pt->synth_opts.period; 3777 err = intel_pt_synth_event(session, "instructions", &attr, id); 3778 if (err) 3779 return err; 3780 pt->sample_instructions = true; 3781 pt->instructions_sample_type = attr.sample_type; 3782 pt->instructions_id = id; 3783 id += 1; 3784 } 3785 3786 if (pt->synth_opts.cycles) { 3787 attr.config = PERF_COUNT_HW_CPU_CYCLES; 3788 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS) 3789 attr.sample_period = 3790 intel_pt_ns_to_ticks(pt, pt->synth_opts.period); 3791 else 3792 attr.sample_period = pt->synth_opts.period; 3793 err = intel_pt_synth_event(session, "cycles", &attr, id); 3794 if (err) 3795 return err; 3796 pt->sample_cycles = true; 3797 pt->cycles_sample_type = attr.sample_type; 3798 pt->cycles_id = id; 3799 id += 1; 3800 } 3801 3802 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD; 3803 attr.sample_period = 1; 3804 3805 if (pt->synth_opts.transactions) { 3806 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 3807 err = intel_pt_synth_event(session, "transactions", &attr, id); 3808 if (err) 3809 return err; 3810 pt->sample_transactions = true; 3811 pt->transactions_sample_type = attr.sample_type; 3812 pt->transactions_id = id; 3813 intel_pt_set_event_name(evlist, id, "transactions"); 3814 id += 1; 3815 } 3816 3817 attr.type = PERF_TYPE_SYNTH; 3818 attr.sample_type |= PERF_SAMPLE_RAW; 3819 3820 if (pt->synth_opts.ptwrites) { 3821 attr.config = PERF_SYNTH_INTEL_PTWRITE; 3822 err = intel_pt_synth_event(session, "ptwrite", &attr, id); 3823 if (err) 3824 return err; 3825 pt->sample_ptwrites = true; 3826 pt->ptwrites_sample_type = attr.sample_type; 3827 pt->ptwrites_id = id; 3828 intel_pt_set_event_name(evlist, id, "ptwrite"); 3829 id += 1; 3830 } 3831 3832 if (pt->synth_opts.pwr_events) { 3833 pt->sample_pwr_events = true; 3834 pt->pwr_events_sample_type = attr.sample_type; 3835 3836 attr.config = PERF_SYNTH_INTEL_CBR; 3837 err = intel_pt_synth_event(session, "cbr", &attr, id); 3838 if (err) 3839 return err; 3840 pt->cbr_id = id; 3841 intel_pt_set_event_name(evlist, id, "cbr"); 3842 id += 1; 3843 3844 attr.config = PERF_SYNTH_INTEL_PSB; 3845 err = intel_pt_synth_event(session, "psb", &attr, id); 3846 if (err) 3847 return err; 3848 pt->psb_id = id; 3849 intel_pt_set_event_name(evlist, id, "psb"); 3850 id += 1; 3851 } 3852 3853 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & INTEL_PT_CFG_PWR_EVT_EN)) { 3854 attr.config = PERF_SYNTH_INTEL_MWAIT; 3855 err = intel_pt_synth_event(session, "mwait", &attr, id); 3856 if (err) 3857 return err; 3858 pt->mwait_id = id; 3859 intel_pt_set_event_name(evlist, id, "mwait"); 3860 id += 1; 3861 3862 attr.config = PERF_SYNTH_INTEL_PWRE; 3863 err = intel_pt_synth_event(session, "pwre", &attr, id); 3864 if (err) 3865 return err; 3866 pt->pwre_id = id; 3867 intel_pt_set_event_name(evlist, id, "pwre"); 3868 id += 1; 3869 3870 attr.config = PERF_SYNTH_INTEL_EXSTOP; 3871 err = intel_pt_synth_event(session, "exstop", &attr, id); 3872 if (err) 3873 return err; 3874 pt->exstop_id = id; 3875 intel_pt_set_event_name(evlist, id, "exstop"); 3876 id += 1; 3877 3878 attr.config = PERF_SYNTH_INTEL_PWRX; 3879 err = intel_pt_synth_event(session, "pwrx", &attr, id); 3880 if (err) 3881 return err; 3882 pt->pwrx_id = id; 3883 intel_pt_set_event_name(evlist, id, "pwrx"); 3884 id += 1; 3885 } 3886 3887 if (pt->synth_opts.intr_events && (evsel->core.attr.config & INTEL_PT_CFG_EVT_EN)) { 3888 attr.config = PERF_SYNTH_INTEL_EVT; 3889 err = intel_pt_synth_event(session, "evt", &attr, id); 3890 if (err) 3891 return err; 3892 pt->evt_sample_type = attr.sample_type; 3893 pt->evt_id = id; 3894 intel_pt_set_event_name(evlist, id, "evt"); 3895 id += 1; 3896 } 3897 3898 if (pt->synth_opts.intr_events && pt->cap_event_trace) { 3899 attr.config = PERF_SYNTH_INTEL_IFLAG_CHG; 3900 err = intel_pt_synth_event(session, "iflag", &attr, id); 3901 if (err) 3902 return err; 3903 pt->iflag_chg_sample_type = attr.sample_type; 3904 pt->iflag_chg_id = id; 3905 intel_pt_set_event_name(evlist, id, "iflag"); 3906 id += 1; 3907 } 3908 3909 return 0; 3910 } 3911 3912 static void intel_pt_setup_pebs_events(struct intel_pt *pt) 3913 { 3914 struct evsel *evsel; 3915 3916 if (!pt->synth_opts.other_events) 3917 return; 3918 3919 evlist__for_each_entry(pt->session->evlist, evsel) { 3920 if (evsel->core.attr.aux_output && evsel->core.id) { 3921 if (pt->single_pebs) { 3922 pt->single_pebs = false; 3923 return; 3924 } 3925 pt->single_pebs = true; 3926 pt->sample_pebs = true; 3927 pt->pebs_evsel = evsel; 3928 } 3929 } 3930 } 3931 3932 static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist) 3933 { 3934 struct evsel *evsel; 3935 3936 evlist__for_each_entry_reverse(evlist, evsel) { 3937 const char *name = evsel__name(evsel); 3938 3939 if (!strcmp(name, "sched:sched_switch")) 3940 return evsel; 3941 } 3942 3943 return NULL; 3944 } 3945 3946 static bool intel_pt_find_switch(struct evlist *evlist) 3947 { 3948 struct evsel *evsel; 3949 3950 evlist__for_each_entry(evlist, evsel) { 3951 if (evsel->core.attr.context_switch) 3952 return true; 3953 } 3954 3955 return false; 3956 } 3957 3958 static int intel_pt_perf_config(const char *var, const char *value, void *data) 3959 { 3960 struct intel_pt *pt = data; 3961 3962 if (!strcmp(var, "intel-pt.mispred-all")) 3963 pt->mispred_all = perf_config_bool(var, value); 3964 3965 if (!strcmp(var, "intel-pt.max-loops")) 3966 perf_config_int(&pt->max_loops, var, value); 3967 3968 return 0; 3969 } 3970 3971 /* Find least TSC which converts to ns or later */ 3972 static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt) 3973 { 3974 u64 tsc, tm; 3975 3976 tsc = perf_time_to_tsc(ns, &pt->tc); 3977 3978 while (1) { 3979 tm = tsc_to_perf_time(tsc, &pt->tc); 3980 if (tm < ns) 3981 break; 3982 tsc -= 1; 3983 } 3984 3985 while (tm < ns) 3986 tm = tsc_to_perf_time(++tsc, &pt->tc); 3987 3988 return tsc; 3989 } 3990 3991 /* Find greatest TSC which converts to ns or earlier */ 3992 static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt) 3993 { 3994 u64 tsc, tm; 3995 3996 tsc = perf_time_to_tsc(ns, &pt->tc); 3997 3998 while (1) { 3999 tm = tsc_to_perf_time(tsc, &pt->tc); 4000 if (tm > ns) 4001 break; 4002 tsc += 1; 4003 } 4004 4005 while (tm > ns) 4006 tm = tsc_to_perf_time(--tsc, &pt->tc); 4007 4008 return tsc; 4009 } 4010 4011 static int intel_pt_setup_time_ranges(struct intel_pt *pt, 4012 struct itrace_synth_opts *opts) 4013 { 4014 struct perf_time_interval *p = opts->ptime_range; 4015 int n = opts->range_num; 4016 int i; 4017 4018 if (!n || !p || pt->timeless_decoding) 4019 return 0; 4020 4021 pt->time_ranges = calloc(n, sizeof(struct range)); 4022 if (!pt->time_ranges) 4023 return -ENOMEM; 4024 4025 pt->range_cnt = n; 4026 4027 intel_pt_log("%s: %u range(s)\n", __func__, n); 4028 4029 for (i = 0; i < n; i++) { 4030 struct range *r = &pt->time_ranges[i]; 4031 u64 ts = p[i].start; 4032 u64 te = p[i].end; 4033 4034 /* 4035 * Take care to ensure the TSC range matches the perf-time range 4036 * when converted back to perf-time. 4037 */ 4038 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0; 4039 r->end = te ? intel_pt_tsc_end(te, pt) : 0; 4040 4041 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n", 4042 i, ts, te); 4043 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n", 4044 i, r->start, r->end); 4045 } 4046 4047 return 0; 4048 } 4049 4050 static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args) 4051 { 4052 struct intel_pt_vmcs_info *vmcs_info; 4053 u64 tsc_offset, vmcs; 4054 char *p = *args; 4055 4056 errno = 0; 4057 4058 p = skip_spaces(p); 4059 if (!*p) 4060 return 1; 4061 4062 tsc_offset = strtoull(p, &p, 0); 4063 if (errno) 4064 return -errno; 4065 p = skip_spaces(p); 4066 if (*p != ':') { 4067 pt->dflt_tsc_offset = tsc_offset; 4068 *args = p; 4069 return 0; 4070 } 4071 p += 1; 4072 while (1) { 4073 vmcs = strtoull(p, &p, 0); 4074 if (errno) 4075 return -errno; 4076 if (!vmcs) 4077 return -EINVAL; 4078 vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset); 4079 if (!vmcs_info) 4080 return -ENOMEM; 4081 p = skip_spaces(p); 4082 if (*p != ',') 4083 break; 4084 p += 1; 4085 } 4086 *args = p; 4087 return 0; 4088 } 4089 4090 static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt) 4091 { 4092 char *args = pt->synth_opts.vm_tm_corr_args; 4093 int ret; 4094 4095 if (!args) 4096 return 0; 4097 4098 do { 4099 ret = intel_pt_parse_vm_tm_corr_arg(pt, &args); 4100 } while (!ret); 4101 4102 if (ret < 0) { 4103 pr_err("Failed to parse VM Time Correlation options\n"); 4104 return ret; 4105 } 4106 4107 return 0; 4108 } 4109 4110 static const char * const intel_pt_info_fmts[] = { 4111 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n", 4112 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n", 4113 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n", 4114 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n", 4115 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n", 4116 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n", 4117 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n", 4118 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n", 4119 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", 4120 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n", 4121 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n", 4122 [INTEL_PT_MTC_FREQ_BITS] = " MTC freq bits %#"PRIx64"\n", 4123 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n", 4124 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n", 4125 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n", 4126 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n", 4127 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n", 4128 }; 4129 4130 static void intel_pt_print_info(__u64 *arr, int start, int finish) 4131 { 4132 int i; 4133 4134 if (!dump_trace) 4135 return; 4136 4137 for (i = start; i <= finish; i++) { 4138 const char *fmt = intel_pt_info_fmts[i]; 4139 4140 if (fmt) 4141 fprintf(stdout, fmt, arr[i]); 4142 } 4143 } 4144 4145 static void intel_pt_print_info_str(const char *name, const char *str) 4146 { 4147 if (!dump_trace) 4148 return; 4149 4150 fprintf(stdout, " %-20s%s\n", name, str ? str : ""); 4151 } 4152 4153 static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos) 4154 { 4155 return auxtrace_info->header.size >= 4156 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1)); 4157 } 4158 4159 int intel_pt_process_auxtrace_info(union perf_event *event, 4160 struct perf_session *session) 4161 { 4162 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; 4163 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS; 4164 struct intel_pt *pt; 4165 void *info_end; 4166 __u64 *info; 4167 int err; 4168 4169 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) + 4170 min_sz) 4171 return -EINVAL; 4172 4173 pt = zalloc(sizeof(struct intel_pt)); 4174 if (!pt) 4175 return -ENOMEM; 4176 4177 pt->vmcs_info = RB_ROOT; 4178 4179 addr_filters__init(&pt->filts); 4180 4181 err = perf_config(intel_pt_perf_config, pt); 4182 if (err) 4183 goto err_free; 4184 4185 err = auxtrace_queues__init(&pt->queues); 4186 if (err) 4187 goto err_free; 4188 4189 if (session->itrace_synth_opts->set) { 4190 pt->synth_opts = *session->itrace_synth_opts; 4191 } else { 4192 struct itrace_synth_opts *opts = session->itrace_synth_opts; 4193 4194 itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample); 4195 if (!opts->default_no_sample && !opts->inject) { 4196 pt->synth_opts.branches = false; 4197 pt->synth_opts.callchain = true; 4198 pt->synth_opts.add_callchain = true; 4199 } 4200 pt->synth_opts.thread_stack = opts->thread_stack; 4201 } 4202 4203 if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT)) 4204 intel_pt_log_set_name(INTEL_PT_PMU_NAME); 4205 4206 pt->session = session; 4207 pt->machine = &session->machines.host; /* No kvm support */ 4208 pt->auxtrace_type = auxtrace_info->type; 4209 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE]; 4210 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT]; 4211 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT]; 4212 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO]; 4213 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO]; 4214 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT]; 4215 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT]; 4216 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH]; 4217 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE]; 4218 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS]; 4219 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE, 4220 INTEL_PT_PER_CPU_MMAPS); 4221 4222 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) { 4223 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT]; 4224 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS]; 4225 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N]; 4226 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D]; 4227 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT]; 4228 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT, 4229 INTEL_PT_CYC_BIT); 4230 } 4231 4232 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) { 4233 pt->max_non_turbo_ratio = 4234 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO]; 4235 intel_pt_print_info(&auxtrace_info->priv[0], 4236 INTEL_PT_MAX_NONTURBO_RATIO, 4237 INTEL_PT_MAX_NONTURBO_RATIO); 4238 } 4239 4240 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1; 4241 info_end = (void *)auxtrace_info + auxtrace_info->header.size; 4242 4243 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) { 4244 size_t len; 4245 4246 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN]; 4247 intel_pt_print_info(&auxtrace_info->priv[0], 4248 INTEL_PT_FILTER_STR_LEN, 4249 INTEL_PT_FILTER_STR_LEN); 4250 if (len) { 4251 const char *filter = (const char *)info; 4252 4253 len = roundup(len + 1, 8); 4254 info += len >> 3; 4255 if ((void *)info > info_end) { 4256 pr_err("%s: bad filter string length\n", __func__); 4257 err = -EINVAL; 4258 goto err_free_queues; 4259 } 4260 pt->filter = memdup(filter, len); 4261 if (!pt->filter) { 4262 err = -ENOMEM; 4263 goto err_free_queues; 4264 } 4265 if (session->header.needs_swap) 4266 mem_bswap_64(pt->filter, len); 4267 if (pt->filter[len - 1]) { 4268 pr_err("%s: filter string not null terminated\n", __func__); 4269 err = -EINVAL; 4270 goto err_free_queues; 4271 } 4272 err = addr_filters__parse_bare_filter(&pt->filts, 4273 filter); 4274 if (err) 4275 goto err_free_queues; 4276 } 4277 intel_pt_print_info_str("Filter string", pt->filter); 4278 } 4279 4280 if ((void *)info < info_end) { 4281 pt->cap_event_trace = *info++; 4282 if (dump_trace) 4283 fprintf(stdout, " Cap Event Trace %d\n", 4284 pt->cap_event_trace); 4285 } 4286 4287 pt->timeless_decoding = intel_pt_timeless_decoding(pt); 4288 if (pt->timeless_decoding && !pt->tc.time_mult) 4289 pt->tc.time_mult = 1; 4290 pt->have_tsc = intel_pt_have_tsc(pt); 4291 pt->sampling_mode = intel_pt_sampling_mode(pt); 4292 pt->est_tsc = !pt->timeless_decoding; 4293 4294 if (pt->synth_opts.vm_time_correlation) { 4295 if (pt->timeless_decoding) { 4296 pr_err("Intel PT has no time information for VM Time Correlation\n"); 4297 err = -EINVAL; 4298 goto err_free_queues; 4299 } 4300 if (session->itrace_synth_opts->ptime_range) { 4301 pr_err("Time ranges cannot be specified with VM Time Correlation\n"); 4302 err = -EINVAL; 4303 goto err_free_queues; 4304 } 4305 /* Currently TSC Offset is calculated using MTC packets */ 4306 if (!intel_pt_have_mtc(pt)) { 4307 pr_err("MTC packets must have been enabled for VM Time Correlation\n"); 4308 err = -EINVAL; 4309 goto err_free_queues; 4310 } 4311 err = intel_pt_parse_vm_tm_corr_args(pt); 4312 if (err) 4313 goto err_free_queues; 4314 } 4315 4316 pt->unknown_thread = thread__new(999999999, 999999999); 4317 if (!pt->unknown_thread) { 4318 err = -ENOMEM; 4319 goto err_free_queues; 4320 } 4321 4322 err = thread__set_comm(pt->unknown_thread, "unknown", 0); 4323 if (err) 4324 goto err_delete_thread; 4325 if (thread__init_maps(pt->unknown_thread, pt->machine)) { 4326 err = -ENOMEM; 4327 goto err_delete_thread; 4328 } 4329 4330 pt->auxtrace.process_event = intel_pt_process_event; 4331 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event; 4332 pt->auxtrace.queue_data = intel_pt_queue_data; 4333 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample; 4334 pt->auxtrace.flush_events = intel_pt_flush; 4335 pt->auxtrace.free_events = intel_pt_free_events; 4336 pt->auxtrace.free = intel_pt_free; 4337 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace; 4338 session->auxtrace = &pt->auxtrace; 4339 4340 if (dump_trace) 4341 return 0; 4342 4343 if (pt->have_sched_switch == 1) { 4344 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist); 4345 if (!pt->switch_evsel) { 4346 pr_err("%s: missing sched_switch event\n", __func__); 4347 err = -EINVAL; 4348 goto err_delete_thread; 4349 } 4350 } else if (pt->have_sched_switch == 2 && 4351 !intel_pt_find_switch(session->evlist)) { 4352 pr_err("%s: missing context_switch attribute flag\n", __func__); 4353 err = -EINVAL; 4354 goto err_delete_thread; 4355 } 4356 4357 if (pt->synth_opts.log) { 4358 bool log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR; 4359 unsigned int log_on_error_size = pt->synth_opts.log_on_error_size; 4360 4361 intel_pt_log_enable(log_on_error, log_on_error_size); 4362 } 4363 4364 /* Maximum non-turbo ratio is TSC freq / 100 MHz */ 4365 if (pt->tc.time_mult) { 4366 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000); 4367 4368 if (!pt->max_non_turbo_ratio) 4369 pt->max_non_turbo_ratio = 4370 (tsc_freq + 50000000) / 100000000; 4371 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq); 4372 intel_pt_log("Maximum non-turbo ratio %u\n", 4373 pt->max_non_turbo_ratio); 4374 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000; 4375 } 4376 4377 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts); 4378 if (err) 4379 goto err_delete_thread; 4380 4381 if (pt->synth_opts.calls) 4382 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | 4383 PERF_IP_FLAG_TRACE_END; 4384 if (pt->synth_opts.returns) 4385 pt->branches_filter |= PERF_IP_FLAG_RETURN | 4386 PERF_IP_FLAG_TRACE_BEGIN; 4387 4388 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) && 4389 !symbol_conf.use_callchain) { 4390 symbol_conf.use_callchain = true; 4391 if (callchain_register_param(&callchain_param) < 0) { 4392 symbol_conf.use_callchain = false; 4393 pt->synth_opts.callchain = false; 4394 pt->synth_opts.add_callchain = false; 4395 } 4396 } 4397 4398 if (pt->synth_opts.add_callchain) { 4399 err = intel_pt_callchain_init(pt); 4400 if (err) 4401 goto err_delete_thread; 4402 } 4403 4404 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) { 4405 pt->br_stack_sz = pt->synth_opts.last_branch_sz; 4406 pt->br_stack_sz_plus = pt->br_stack_sz; 4407 } 4408 4409 if (pt->synth_opts.add_last_branch) { 4410 err = intel_pt_br_stack_init(pt); 4411 if (err) 4412 goto err_delete_thread; 4413 /* 4414 * Additional branch stack size to cater for tracing from the 4415 * actual sample ip to where the sample time is recorded. 4416 * Measured at about 200 branches, but generously set to 1024. 4417 * If kernel space is not being traced, then add just 1 for the 4418 * branch to kernel space. 4419 */ 4420 if (intel_pt_tracing_kernel(pt)) 4421 pt->br_stack_sz_plus += 1024; 4422 else 4423 pt->br_stack_sz_plus += 1; 4424 } 4425 4426 pt->use_thread_stack = pt->synth_opts.callchain || 4427 pt->synth_opts.add_callchain || 4428 pt->synth_opts.thread_stack || 4429 pt->synth_opts.last_branch || 4430 pt->synth_opts.add_last_branch; 4431 4432 pt->callstack = pt->synth_opts.callchain || 4433 pt->synth_opts.add_callchain || 4434 pt->synth_opts.thread_stack; 4435 4436 err = intel_pt_synth_events(pt, session); 4437 if (err) 4438 goto err_delete_thread; 4439 4440 intel_pt_setup_pebs_events(pt); 4441 4442 if (perf_data__is_pipe(session->data)) { 4443 pr_warning("WARNING: Intel PT with pipe mode is not recommended.\n" 4444 " The output cannot relied upon. In particular,\n" 4445 " timestamps and the order of events may be incorrect.\n"); 4446 } 4447 4448 if (pt->sampling_mode || list_empty(&session->auxtrace_index)) 4449 err = auxtrace_queue_data(session, true, true); 4450 else 4451 err = auxtrace_queues__process_index(&pt->queues, session); 4452 if (err) 4453 goto err_delete_thread; 4454 4455 if (pt->queues.populated) 4456 pt->data_queued = true; 4457 4458 if (pt->timeless_decoding) 4459 pr_debug2("Intel PT decoding without timestamps\n"); 4460 4461 return 0; 4462 4463 err_delete_thread: 4464 zfree(&pt->chain); 4465 thread__zput(pt->unknown_thread); 4466 err_free_queues: 4467 intel_pt_log_disable(); 4468 auxtrace_queues__free(&pt->queues); 4469 session->auxtrace = NULL; 4470 err_free: 4471 addr_filters__exit(&pt->filts); 4472 zfree(&pt->filter); 4473 zfree(&pt->time_ranges); 4474 free(pt); 4475 return err; 4476 } 4477