1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pt.c: Intel Processor Trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #include <inttypes.h> 8 #include <stdio.h> 9 #include <stdbool.h> 10 #include <errno.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/types.h> 14 #include <linux/zalloc.h> 15 16 #include "session.h" 17 #include "machine.h" 18 #include "memswap.h" 19 #include "sort.h" 20 #include "tool.h" 21 #include "event.h" 22 #include "evlist.h" 23 #include "evsel.h" 24 #include "map.h" 25 #include "color.h" 26 #include "thread.h" 27 #include "thread-stack.h" 28 #include "symbol.h" 29 #include "callchain.h" 30 #include "dso.h" 31 #include "debug.h" 32 #include "auxtrace.h" 33 #include "tsc.h" 34 #include "intel-pt.h" 35 #include "config.h" 36 #include "util/perf_api_probe.h" 37 #include "util/synthetic-events.h" 38 #include "time-utils.h" 39 40 #include "../arch/x86/include/uapi/asm/perf_regs.h" 41 42 #include "intel-pt-decoder/intel-pt-log.h" 43 #include "intel-pt-decoder/intel-pt-decoder.h" 44 #include "intel-pt-decoder/intel-pt-insn-decoder.h" 45 #include "intel-pt-decoder/intel-pt-pkt-decoder.h" 46 47 #define MAX_TIMESTAMP (~0ULL) 48 49 struct range { 50 u64 start; 51 u64 end; 52 }; 53 54 struct intel_pt { 55 struct auxtrace auxtrace; 56 struct auxtrace_queues queues; 57 struct auxtrace_heap heap; 58 u32 auxtrace_type; 59 struct perf_session *session; 60 struct machine *machine; 61 struct evsel *switch_evsel; 62 struct thread *unknown_thread; 63 bool timeless_decoding; 64 bool sampling_mode; 65 bool snapshot_mode; 66 bool per_cpu_mmaps; 67 bool have_tsc; 68 bool data_queued; 69 bool est_tsc; 70 bool sync_switch; 71 bool mispred_all; 72 bool use_thread_stack; 73 bool callstack; 74 unsigned int br_stack_sz; 75 unsigned int br_stack_sz_plus; 76 int have_sched_switch; 77 u32 pmu_type; 78 u64 kernel_start; 79 u64 switch_ip; 80 u64 ptss_ip; 81 u64 first_timestamp; 82 83 struct perf_tsc_conversion tc; 84 bool cap_user_time_zero; 85 86 struct itrace_synth_opts synth_opts; 87 88 bool sample_instructions; 89 u64 instructions_sample_type; 90 u64 instructions_id; 91 92 bool sample_branches; 93 u32 branches_filter; 94 u64 branches_sample_type; 95 u64 branches_id; 96 97 bool sample_transactions; 98 u64 transactions_sample_type; 99 u64 transactions_id; 100 101 bool sample_ptwrites; 102 u64 ptwrites_sample_type; 103 u64 ptwrites_id; 104 105 bool sample_pwr_events; 106 u64 pwr_events_sample_type; 107 u64 mwait_id; 108 u64 pwre_id; 109 u64 exstop_id; 110 u64 pwrx_id; 111 u64 cbr_id; 112 u64 psb_id; 113 114 bool sample_pebs; 115 struct evsel *pebs_evsel; 116 117 u64 tsc_bit; 118 u64 mtc_bit; 119 u64 mtc_freq_bits; 120 u32 tsc_ctc_ratio_n; 121 u32 tsc_ctc_ratio_d; 122 u64 cyc_bit; 123 u64 noretcomp_bit; 124 unsigned max_non_turbo_ratio; 125 unsigned cbr2khz; 126 int max_loops; 127 128 unsigned long num_events; 129 130 char *filter; 131 struct addr_filters filts; 132 133 struct range *time_ranges; 134 unsigned int range_cnt; 135 136 struct ip_callchain *chain; 137 struct branch_stack *br_stack; 138 139 u64 dflt_tsc_offset; 140 struct rb_root vmcs_info; 141 }; 142 143 enum switch_state { 144 INTEL_PT_SS_NOT_TRACING, 145 INTEL_PT_SS_UNKNOWN, 146 INTEL_PT_SS_TRACING, 147 INTEL_PT_SS_EXPECTING_SWITCH_EVENT, 148 INTEL_PT_SS_EXPECTING_SWITCH_IP, 149 }; 150 151 struct intel_pt_queue { 152 struct intel_pt *pt; 153 unsigned int queue_nr; 154 struct auxtrace_buffer *buffer; 155 struct auxtrace_buffer *old_buffer; 156 void *decoder; 157 const struct intel_pt_state *state; 158 struct ip_callchain *chain; 159 struct branch_stack *last_branch; 160 union perf_event *event_buf; 161 bool on_heap; 162 bool stop; 163 bool step_through_buffers; 164 bool use_buffer_pid_tid; 165 bool sync_switch; 166 pid_t pid, tid; 167 int cpu; 168 int switch_state; 169 pid_t next_tid; 170 struct thread *thread; 171 struct machine *guest_machine; 172 struct thread *unknown_guest_thread; 173 pid_t guest_machine_pid; 174 bool exclude_kernel; 175 bool have_sample; 176 u64 time; 177 u64 timestamp; 178 u64 sel_timestamp; 179 bool sel_start; 180 unsigned int sel_idx; 181 u32 flags; 182 u16 insn_len; 183 u64 last_insn_cnt; 184 u64 ipc_insn_cnt; 185 u64 ipc_cyc_cnt; 186 u64 last_in_insn_cnt; 187 u64 last_in_cyc_cnt; 188 u64 last_br_insn_cnt; 189 u64 last_br_cyc_cnt; 190 unsigned int cbr_seen; 191 char insn[INTEL_PT_INSN_BUF_SZ]; 192 }; 193 194 static void intel_pt_dump(struct intel_pt *pt __maybe_unused, 195 unsigned char *buf, size_t len) 196 { 197 struct intel_pt_pkt packet; 198 size_t pos = 0; 199 int ret, pkt_len, i; 200 char desc[INTEL_PT_PKT_DESC_MAX]; 201 const char *color = PERF_COLOR_BLUE; 202 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX; 203 204 color_fprintf(stdout, color, 205 ". ... Intel Processor Trace data: size %zu bytes\n", 206 len); 207 208 while (len) { 209 ret = intel_pt_get_packet(buf, len, &packet, &ctx); 210 if (ret > 0) 211 pkt_len = ret; 212 else 213 pkt_len = 1; 214 printf("."); 215 color_fprintf(stdout, color, " %08x: ", pos); 216 for (i = 0; i < pkt_len; i++) 217 color_fprintf(stdout, color, " %02x", buf[i]); 218 for (; i < 16; i++) 219 color_fprintf(stdout, color, " "); 220 if (ret > 0) { 221 ret = intel_pt_pkt_desc(&packet, desc, 222 INTEL_PT_PKT_DESC_MAX); 223 if (ret > 0) 224 color_fprintf(stdout, color, " %s\n", desc); 225 } else { 226 color_fprintf(stdout, color, " Bad packet!\n"); 227 } 228 pos += pkt_len; 229 buf += pkt_len; 230 len -= pkt_len; 231 } 232 } 233 234 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf, 235 size_t len) 236 { 237 printf(".\n"); 238 intel_pt_dump(pt, buf, len); 239 } 240 241 static void intel_pt_log_event(union perf_event *event) 242 { 243 FILE *f = intel_pt_log_fp(); 244 245 if (!intel_pt_enable_logging || !f) 246 return; 247 248 perf_event__fprintf(event, NULL, f); 249 } 250 251 static void intel_pt_dump_sample(struct perf_session *session, 252 struct perf_sample *sample) 253 { 254 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 255 auxtrace); 256 257 printf("\n"); 258 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size); 259 } 260 261 static bool intel_pt_log_events(struct intel_pt *pt, u64 tm) 262 { 263 struct perf_time_interval *range = pt->synth_opts.ptime_range; 264 int n = pt->synth_opts.range_num; 265 266 if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS) 267 return true; 268 269 if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS) 270 return false; 271 272 /* perf_time__ranges_skip_sample does not work if time is zero */ 273 if (!tm) 274 tm = 1; 275 276 return !n || !perf_time__ranges_skip_sample(range, n, tm); 277 } 278 279 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root, 280 u64 vmcs, 281 u64 dflt_tsc_offset) 282 { 283 struct rb_node **p = &rb_root->rb_node; 284 struct rb_node *parent = NULL; 285 struct intel_pt_vmcs_info *v; 286 287 while (*p) { 288 parent = *p; 289 v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node); 290 291 if (v->vmcs == vmcs) 292 return v; 293 294 if (vmcs < v->vmcs) 295 p = &(*p)->rb_left; 296 else 297 p = &(*p)->rb_right; 298 } 299 300 v = zalloc(sizeof(*v)); 301 if (v) { 302 v->vmcs = vmcs; 303 v->tsc_offset = dflt_tsc_offset; 304 v->reliable = dflt_tsc_offset; 305 306 rb_link_node(&v->rb_node, parent, p); 307 rb_insert_color(&v->rb_node, rb_root); 308 } 309 310 return v; 311 } 312 313 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs) 314 { 315 struct intel_pt_queue *ptq = data; 316 struct intel_pt *pt = ptq->pt; 317 318 if (!vmcs && !pt->dflt_tsc_offset) 319 return NULL; 320 321 return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset); 322 } 323 324 static void intel_pt_free_vmcs_info(struct intel_pt *pt) 325 { 326 struct intel_pt_vmcs_info *v; 327 struct rb_node *n; 328 329 n = rb_first(&pt->vmcs_info); 330 while (n) { 331 v = rb_entry(n, struct intel_pt_vmcs_info, rb_node); 332 n = rb_next(n); 333 rb_erase(&v->rb_node, &pt->vmcs_info); 334 free(v); 335 } 336 } 337 338 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, 339 struct auxtrace_buffer *b) 340 { 341 bool consecutive = false; 342 void *start; 343 344 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size, 345 pt->have_tsc, &consecutive, 346 pt->synth_opts.vm_time_correlation); 347 if (!start) 348 return -EINVAL; 349 /* 350 * In the case of vm_time_correlation, the overlap might contain TSC 351 * packets that will not be fixed, and that will then no longer work for 352 * overlap detection. Avoid that by zeroing out the overlap. 353 */ 354 if (pt->synth_opts.vm_time_correlation) 355 memset(b->data, 0, start - b->data); 356 b->use_size = b->data + b->size - start; 357 b->use_data = start; 358 if (b->use_size && consecutive) 359 b->consecutive = true; 360 return 0; 361 } 362 363 static int intel_pt_get_buffer(struct intel_pt_queue *ptq, 364 struct auxtrace_buffer *buffer, 365 struct auxtrace_buffer *old_buffer, 366 struct intel_pt_buffer *b) 367 { 368 bool might_overlap; 369 370 if (!buffer->data) { 371 int fd = perf_data__fd(ptq->pt->session->data); 372 373 buffer->data = auxtrace_buffer__get_data(buffer, fd); 374 if (!buffer->data) 375 return -ENOMEM; 376 } 377 378 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode; 379 if (might_overlap && !buffer->consecutive && old_buffer && 380 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer)) 381 return -ENOMEM; 382 383 if (buffer->use_data) { 384 b->len = buffer->use_size; 385 b->buf = buffer->use_data; 386 } else { 387 b->len = buffer->size; 388 b->buf = buffer->data; 389 } 390 b->ref_timestamp = buffer->reference; 391 392 if (!old_buffer || (might_overlap && !buffer->consecutive)) { 393 b->consecutive = false; 394 b->trace_nr = buffer->buffer_nr + 1; 395 } else { 396 b->consecutive = true; 397 } 398 399 return 0; 400 } 401 402 /* Do not drop buffers with references - refer intel_pt_get_trace() */ 403 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq, 404 struct auxtrace_buffer *buffer) 405 { 406 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer) 407 return; 408 409 auxtrace_buffer__drop_data(buffer); 410 } 411 412 /* Must be serialized with respect to intel_pt_get_trace() */ 413 static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb, 414 void *cb_data) 415 { 416 struct intel_pt_queue *ptq = data; 417 struct auxtrace_buffer *buffer = ptq->buffer; 418 struct auxtrace_buffer *old_buffer = ptq->old_buffer; 419 struct auxtrace_queue *queue; 420 int err = 0; 421 422 queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; 423 424 while (1) { 425 struct intel_pt_buffer b = { .len = 0 }; 426 427 buffer = auxtrace_buffer__next(queue, buffer); 428 if (!buffer) 429 break; 430 431 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b); 432 if (err) 433 break; 434 435 if (b.len) { 436 intel_pt_lookahead_drop_buffer(ptq, old_buffer); 437 old_buffer = buffer; 438 } else { 439 intel_pt_lookahead_drop_buffer(ptq, buffer); 440 continue; 441 } 442 443 err = cb(&b, cb_data); 444 if (err) 445 break; 446 } 447 448 if (buffer != old_buffer) 449 intel_pt_lookahead_drop_buffer(ptq, buffer); 450 intel_pt_lookahead_drop_buffer(ptq, old_buffer); 451 452 return err; 453 } 454 455 /* 456 * This function assumes data is processed sequentially only. 457 * Must be serialized with respect to intel_pt_lookahead() 458 */ 459 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) 460 { 461 struct intel_pt_queue *ptq = data; 462 struct auxtrace_buffer *buffer = ptq->buffer; 463 struct auxtrace_buffer *old_buffer = ptq->old_buffer; 464 struct auxtrace_queue *queue; 465 int err; 466 467 if (ptq->stop) { 468 b->len = 0; 469 return 0; 470 } 471 472 queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; 473 474 buffer = auxtrace_buffer__next(queue, buffer); 475 if (!buffer) { 476 if (old_buffer) 477 auxtrace_buffer__drop_data(old_buffer); 478 b->len = 0; 479 return 0; 480 } 481 482 ptq->buffer = buffer; 483 484 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b); 485 if (err) 486 return err; 487 488 if (ptq->step_through_buffers) 489 ptq->stop = true; 490 491 if (b->len) { 492 if (old_buffer) 493 auxtrace_buffer__drop_data(old_buffer); 494 ptq->old_buffer = buffer; 495 } else { 496 auxtrace_buffer__drop_data(buffer); 497 return intel_pt_get_trace(b, data); 498 } 499 500 return 0; 501 } 502 503 struct intel_pt_cache_entry { 504 struct auxtrace_cache_entry entry; 505 u64 insn_cnt; 506 u64 byte_cnt; 507 enum intel_pt_insn_op op; 508 enum intel_pt_insn_branch branch; 509 int length; 510 int32_t rel; 511 char insn[INTEL_PT_INSN_BUF_SZ]; 512 }; 513 514 static int intel_pt_config_div(const char *var, const char *value, void *data) 515 { 516 int *d = data; 517 long val; 518 519 if (!strcmp(var, "intel-pt.cache-divisor")) { 520 val = strtol(value, NULL, 0); 521 if (val > 0 && val <= INT_MAX) 522 *d = val; 523 } 524 525 return 0; 526 } 527 528 static int intel_pt_cache_divisor(void) 529 { 530 static int d; 531 532 if (d) 533 return d; 534 535 perf_config(intel_pt_config_div, &d); 536 537 if (!d) 538 d = 64; 539 540 return d; 541 } 542 543 static unsigned int intel_pt_cache_size(struct dso *dso, 544 struct machine *machine) 545 { 546 off_t size; 547 548 size = dso__data_size(dso, machine); 549 size /= intel_pt_cache_divisor(); 550 if (size < 1000) 551 return 10; 552 if (size > (1 << 21)) 553 return 21; 554 return 32 - __builtin_clz(size); 555 } 556 557 static struct auxtrace_cache *intel_pt_cache(struct dso *dso, 558 struct machine *machine) 559 { 560 struct auxtrace_cache *c; 561 unsigned int bits; 562 563 if (dso->auxtrace_cache) 564 return dso->auxtrace_cache; 565 566 bits = intel_pt_cache_size(dso, machine); 567 568 /* Ignoring cache creation failure */ 569 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200); 570 571 dso->auxtrace_cache = c; 572 573 return c; 574 } 575 576 static int intel_pt_cache_add(struct dso *dso, struct machine *machine, 577 u64 offset, u64 insn_cnt, u64 byte_cnt, 578 struct intel_pt_insn *intel_pt_insn) 579 { 580 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 581 struct intel_pt_cache_entry *e; 582 int err; 583 584 if (!c) 585 return -ENOMEM; 586 587 e = auxtrace_cache__alloc_entry(c); 588 if (!e) 589 return -ENOMEM; 590 591 e->insn_cnt = insn_cnt; 592 e->byte_cnt = byte_cnt; 593 e->op = intel_pt_insn->op; 594 e->branch = intel_pt_insn->branch; 595 e->length = intel_pt_insn->length; 596 e->rel = intel_pt_insn->rel; 597 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ); 598 599 err = auxtrace_cache__add(c, offset, &e->entry); 600 if (err) 601 auxtrace_cache__free_entry(c, e); 602 603 return err; 604 } 605 606 static struct intel_pt_cache_entry * 607 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) 608 { 609 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 610 611 if (!c) 612 return NULL; 613 614 return auxtrace_cache__lookup(dso->auxtrace_cache, offset); 615 } 616 617 static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine, 618 u64 offset) 619 { 620 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 621 622 if (!c) 623 return; 624 625 auxtrace_cache__remove(dso->auxtrace_cache, offset); 626 } 627 628 static inline bool intel_pt_guest_kernel_ip(uint64_t ip) 629 { 630 /* Assumes 64-bit kernel */ 631 return ip & (1ULL << 63); 632 } 633 634 static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr) 635 { 636 if (nr) { 637 return intel_pt_guest_kernel_ip(ip) ? 638 PERF_RECORD_MISC_GUEST_KERNEL : 639 PERF_RECORD_MISC_GUEST_USER; 640 } 641 642 return ip >= ptq->pt->kernel_start ? 643 PERF_RECORD_MISC_KERNEL : 644 PERF_RECORD_MISC_USER; 645 } 646 647 static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip) 648 { 649 /* No support for non-zero CS base */ 650 if (from_ip) 651 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr); 652 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr); 653 } 654 655 static int intel_pt_get_guest(struct intel_pt_queue *ptq) 656 { 657 struct machines *machines = &ptq->pt->session->machines; 658 struct machine *machine; 659 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid; 660 661 if (ptq->guest_machine && pid == ptq->guest_machine_pid) 662 return 0; 663 664 ptq->guest_machine = NULL; 665 thread__zput(ptq->unknown_guest_thread); 666 667 machine = machines__find_guest(machines, pid); 668 if (!machine) 669 return -1; 670 671 ptq->unknown_guest_thread = machine__idle_thread(machine); 672 if (!ptq->unknown_guest_thread) 673 return -1; 674 675 ptq->guest_machine = machine; 676 ptq->guest_machine_pid = pid; 677 678 return 0; 679 } 680 681 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, 682 uint64_t *insn_cnt_ptr, uint64_t *ip, 683 uint64_t to_ip, uint64_t max_insn_cnt, 684 void *data) 685 { 686 struct intel_pt_queue *ptq = data; 687 struct machine *machine = ptq->pt->machine; 688 struct thread *thread; 689 struct addr_location al; 690 unsigned char buf[INTEL_PT_INSN_BUF_SZ]; 691 ssize_t len; 692 int x86_64; 693 u8 cpumode; 694 u64 offset, start_offset, start_ip; 695 u64 insn_cnt = 0; 696 bool one_map = true; 697 bool nr; 698 699 intel_pt_insn->length = 0; 700 701 if (to_ip && *ip == to_ip) 702 goto out_no_cache; 703 704 nr = ptq->state->to_nr; 705 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr); 706 707 if (nr) { 708 if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL || 709 intel_pt_get_guest(ptq)) 710 return -EINVAL; 711 machine = ptq->guest_machine; 712 thread = ptq->unknown_guest_thread; 713 } else { 714 thread = ptq->thread; 715 if (!thread) { 716 if (cpumode != PERF_RECORD_MISC_KERNEL) 717 return -EINVAL; 718 thread = ptq->pt->unknown_thread; 719 } 720 } 721 722 while (1) { 723 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso) 724 return -EINVAL; 725 726 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && 727 dso__data_status_seen(al.map->dso, 728 DSO_DATA_STATUS_SEEN_ITRACE)) 729 return -ENOENT; 730 731 offset = al.map->map_ip(al.map, *ip); 732 733 if (!to_ip && one_map) { 734 struct intel_pt_cache_entry *e; 735 736 e = intel_pt_cache_lookup(al.map->dso, machine, offset); 737 if (e && 738 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) { 739 *insn_cnt_ptr = e->insn_cnt; 740 *ip += e->byte_cnt; 741 intel_pt_insn->op = e->op; 742 intel_pt_insn->branch = e->branch; 743 intel_pt_insn->length = e->length; 744 intel_pt_insn->rel = e->rel; 745 memcpy(intel_pt_insn->buf, e->insn, 746 INTEL_PT_INSN_BUF_SZ); 747 intel_pt_log_insn_no_data(intel_pt_insn, *ip); 748 return 0; 749 } 750 } 751 752 start_offset = offset; 753 start_ip = *ip; 754 755 /* Load maps to ensure dso->is_64_bit has been updated */ 756 map__load(al.map); 757 758 x86_64 = al.map->dso->is_64_bit; 759 760 while (1) { 761 len = dso__data_read_offset(al.map->dso, machine, 762 offset, buf, 763 INTEL_PT_INSN_BUF_SZ); 764 if (len <= 0) 765 return -EINVAL; 766 767 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) 768 return -EINVAL; 769 770 intel_pt_log_insn(intel_pt_insn, *ip); 771 772 insn_cnt += 1; 773 774 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) 775 goto out; 776 777 if (max_insn_cnt && insn_cnt >= max_insn_cnt) 778 goto out_no_cache; 779 780 *ip += intel_pt_insn->length; 781 782 if (to_ip && *ip == to_ip) { 783 intel_pt_insn->length = 0; 784 goto out_no_cache; 785 } 786 787 if (*ip >= al.map->end) 788 break; 789 790 offset += intel_pt_insn->length; 791 } 792 one_map = false; 793 } 794 out: 795 *insn_cnt_ptr = insn_cnt; 796 797 if (!one_map) 798 goto out_no_cache; 799 800 /* 801 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate 802 * entries. 803 */ 804 if (to_ip) { 805 struct intel_pt_cache_entry *e; 806 807 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset); 808 if (e) 809 return 0; 810 } 811 812 /* Ignore cache errors */ 813 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt, 814 *ip - start_ip, intel_pt_insn); 815 816 return 0; 817 818 out_no_cache: 819 *insn_cnt_ptr = insn_cnt; 820 return 0; 821 } 822 823 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip, 824 uint64_t offset, const char *filename) 825 { 826 struct addr_filter *filt; 827 bool have_filter = false; 828 bool hit_tracestop = false; 829 bool hit_filter = false; 830 831 list_for_each_entry(filt, &pt->filts.head, list) { 832 if (filt->start) 833 have_filter = true; 834 835 if ((filename && !filt->filename) || 836 (!filename && filt->filename) || 837 (filename && strcmp(filename, filt->filename))) 838 continue; 839 840 if (!(offset >= filt->addr && offset < filt->addr + filt->size)) 841 continue; 842 843 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n", 844 ip, offset, filename ? filename : "[kernel]", 845 filt->start ? "filter" : "stop", 846 filt->addr, filt->size); 847 848 if (filt->start) 849 hit_filter = true; 850 else 851 hit_tracestop = true; 852 } 853 854 if (!hit_tracestop && !hit_filter) 855 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n", 856 ip, offset, filename ? filename : "[kernel]"); 857 858 return hit_tracestop || (have_filter && !hit_filter); 859 } 860 861 static int __intel_pt_pgd_ip(uint64_t ip, void *data) 862 { 863 struct intel_pt_queue *ptq = data; 864 struct thread *thread; 865 struct addr_location al; 866 u8 cpumode; 867 u64 offset; 868 869 if (ptq->state->to_nr) { 870 if (intel_pt_guest_kernel_ip(ip)) 871 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL); 872 /* No support for decoding guest user space */ 873 return -EINVAL; 874 } else if (ip >= ptq->pt->kernel_start) { 875 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL); 876 } 877 878 cpumode = PERF_RECORD_MISC_USER; 879 880 thread = ptq->thread; 881 if (!thread) 882 return -EINVAL; 883 884 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso) 885 return -EINVAL; 886 887 offset = al.map->map_ip(al.map, ip); 888 889 return intel_pt_match_pgd_ip(ptq->pt, ip, offset, 890 al.map->dso->long_name); 891 } 892 893 static bool intel_pt_pgd_ip(uint64_t ip, void *data) 894 { 895 return __intel_pt_pgd_ip(ip, data) > 0; 896 } 897 898 static bool intel_pt_get_config(struct intel_pt *pt, 899 struct perf_event_attr *attr, u64 *config) 900 { 901 if (attr->type == pt->pmu_type) { 902 if (config) 903 *config = attr->config; 904 return true; 905 } 906 907 return false; 908 } 909 910 static bool intel_pt_exclude_kernel(struct intel_pt *pt) 911 { 912 struct evsel *evsel; 913 914 evlist__for_each_entry(pt->session->evlist, evsel) { 915 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) && 916 !evsel->core.attr.exclude_kernel) 917 return false; 918 } 919 return true; 920 } 921 922 static bool intel_pt_return_compression(struct intel_pt *pt) 923 { 924 struct evsel *evsel; 925 u64 config; 926 927 if (!pt->noretcomp_bit) 928 return true; 929 930 evlist__for_each_entry(pt->session->evlist, evsel) { 931 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 932 (config & pt->noretcomp_bit)) 933 return false; 934 } 935 return true; 936 } 937 938 static bool intel_pt_branch_enable(struct intel_pt *pt) 939 { 940 struct evsel *evsel; 941 u64 config; 942 943 evlist__for_each_entry(pt->session->evlist, evsel) { 944 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 945 (config & 1) && !(config & 0x2000)) 946 return false; 947 } 948 return true; 949 } 950 951 static unsigned int intel_pt_mtc_period(struct intel_pt *pt) 952 { 953 struct evsel *evsel; 954 unsigned int shift; 955 u64 config; 956 957 if (!pt->mtc_freq_bits) 958 return 0; 959 960 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++) 961 config >>= 1; 962 963 evlist__for_each_entry(pt->session->evlist, evsel) { 964 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) 965 return (config & pt->mtc_freq_bits) >> shift; 966 } 967 return 0; 968 } 969 970 static bool intel_pt_timeless_decoding(struct intel_pt *pt) 971 { 972 struct evsel *evsel; 973 bool timeless_decoding = true; 974 u64 config; 975 976 if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding) 977 return true; 978 979 evlist__for_each_entry(pt->session->evlist, evsel) { 980 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 981 return true; 982 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) { 983 if (config & pt->tsc_bit) 984 timeless_decoding = false; 985 else 986 return true; 987 } 988 } 989 return timeless_decoding; 990 } 991 992 static bool intel_pt_tracing_kernel(struct intel_pt *pt) 993 { 994 struct evsel *evsel; 995 996 evlist__for_each_entry(pt->session->evlist, evsel) { 997 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) && 998 !evsel->core.attr.exclude_kernel) 999 return true; 1000 } 1001 return false; 1002 } 1003 1004 static bool intel_pt_have_tsc(struct intel_pt *pt) 1005 { 1006 struct evsel *evsel; 1007 bool have_tsc = false; 1008 u64 config; 1009 1010 if (!pt->tsc_bit) 1011 return false; 1012 1013 evlist__for_each_entry(pt->session->evlist, evsel) { 1014 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) { 1015 if (config & pt->tsc_bit) 1016 have_tsc = true; 1017 else 1018 return false; 1019 } 1020 } 1021 return have_tsc; 1022 } 1023 1024 static bool intel_pt_have_mtc(struct intel_pt *pt) 1025 { 1026 struct evsel *evsel; 1027 u64 config; 1028 1029 evlist__for_each_entry(pt->session->evlist, evsel) { 1030 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 1031 (config & pt->mtc_bit)) 1032 return true; 1033 } 1034 return false; 1035 } 1036 1037 static bool intel_pt_sampling_mode(struct intel_pt *pt) 1038 { 1039 struct evsel *evsel; 1040 1041 evlist__for_each_entry(pt->session->evlist, evsel) { 1042 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) && 1043 evsel->core.attr.aux_sample_size) 1044 return true; 1045 } 1046 return false; 1047 } 1048 1049 static u64 intel_pt_ctl(struct intel_pt *pt) 1050 { 1051 struct evsel *evsel; 1052 u64 config; 1053 1054 evlist__for_each_entry(pt->session->evlist, evsel) { 1055 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) 1056 return config; 1057 } 1058 return 0; 1059 } 1060 1061 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns) 1062 { 1063 u64 quot, rem; 1064 1065 quot = ns / pt->tc.time_mult; 1066 rem = ns % pt->tc.time_mult; 1067 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) / 1068 pt->tc.time_mult; 1069 } 1070 1071 static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt) 1072 { 1073 size_t sz = sizeof(struct ip_callchain); 1074 1075 /* Add 1 to callchain_sz for callchain context */ 1076 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64); 1077 return zalloc(sz); 1078 } 1079 1080 static int intel_pt_callchain_init(struct intel_pt *pt) 1081 { 1082 struct evsel *evsel; 1083 1084 evlist__for_each_entry(pt->session->evlist, evsel) { 1085 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN)) 1086 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN; 1087 } 1088 1089 pt->chain = intel_pt_alloc_chain(pt); 1090 if (!pt->chain) 1091 return -ENOMEM; 1092 1093 return 0; 1094 } 1095 1096 static void intel_pt_add_callchain(struct intel_pt *pt, 1097 struct perf_sample *sample) 1098 { 1099 struct thread *thread = machine__findnew_thread(pt->machine, 1100 sample->pid, 1101 sample->tid); 1102 1103 thread_stack__sample_late(thread, sample->cpu, pt->chain, 1104 pt->synth_opts.callchain_sz + 1, sample->ip, 1105 pt->kernel_start); 1106 1107 sample->callchain = pt->chain; 1108 } 1109 1110 static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt) 1111 { 1112 size_t sz = sizeof(struct branch_stack); 1113 1114 sz += entry_cnt * sizeof(struct branch_entry); 1115 return zalloc(sz); 1116 } 1117 1118 static int intel_pt_br_stack_init(struct intel_pt *pt) 1119 { 1120 struct evsel *evsel; 1121 1122 evlist__for_each_entry(pt->session->evlist, evsel) { 1123 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)) 1124 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK; 1125 } 1126 1127 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz); 1128 if (!pt->br_stack) 1129 return -ENOMEM; 1130 1131 return 0; 1132 } 1133 1134 static void intel_pt_add_br_stack(struct intel_pt *pt, 1135 struct perf_sample *sample) 1136 { 1137 struct thread *thread = machine__findnew_thread(pt->machine, 1138 sample->pid, 1139 sample->tid); 1140 1141 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack, 1142 pt->br_stack_sz, sample->ip, 1143 pt->kernel_start); 1144 1145 sample->branch_stack = pt->br_stack; 1146 } 1147 1148 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */ 1149 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U) 1150 1151 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, 1152 unsigned int queue_nr) 1153 { 1154 struct intel_pt_params params = { .get_trace = 0, }; 1155 struct perf_env *env = pt->machine->env; 1156 struct intel_pt_queue *ptq; 1157 1158 ptq = zalloc(sizeof(struct intel_pt_queue)); 1159 if (!ptq) 1160 return NULL; 1161 1162 if (pt->synth_opts.callchain) { 1163 ptq->chain = intel_pt_alloc_chain(pt); 1164 if (!ptq->chain) 1165 goto out_free; 1166 } 1167 1168 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) { 1169 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz); 1170 1171 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt); 1172 if (!ptq->last_branch) 1173 goto out_free; 1174 } 1175 1176 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); 1177 if (!ptq->event_buf) 1178 goto out_free; 1179 1180 ptq->pt = pt; 1181 ptq->queue_nr = queue_nr; 1182 ptq->exclude_kernel = intel_pt_exclude_kernel(pt); 1183 ptq->pid = -1; 1184 ptq->tid = -1; 1185 ptq->cpu = -1; 1186 ptq->next_tid = -1; 1187 1188 params.get_trace = intel_pt_get_trace; 1189 params.walk_insn = intel_pt_walk_next_insn; 1190 params.lookahead = intel_pt_lookahead; 1191 params.findnew_vmcs_info = intel_pt_findnew_vmcs_info; 1192 params.data = ptq; 1193 params.return_compression = intel_pt_return_compression(pt); 1194 params.branch_enable = intel_pt_branch_enable(pt); 1195 params.ctl = intel_pt_ctl(pt); 1196 params.max_non_turbo_ratio = pt->max_non_turbo_ratio; 1197 params.mtc_period = intel_pt_mtc_period(pt); 1198 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n; 1199 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d; 1200 params.quick = pt->synth_opts.quick; 1201 params.vm_time_correlation = pt->synth_opts.vm_time_correlation; 1202 params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run; 1203 params.first_timestamp = pt->first_timestamp; 1204 params.max_loops = pt->max_loops; 1205 1206 if (pt->filts.cnt > 0) 1207 params.pgd_ip = intel_pt_pgd_ip; 1208 1209 if (pt->synth_opts.instructions) { 1210 if (pt->synth_opts.period) { 1211 switch (pt->synth_opts.period_type) { 1212 case PERF_ITRACE_PERIOD_INSTRUCTIONS: 1213 params.period_type = 1214 INTEL_PT_PERIOD_INSTRUCTIONS; 1215 params.period = pt->synth_opts.period; 1216 break; 1217 case PERF_ITRACE_PERIOD_TICKS: 1218 params.period_type = INTEL_PT_PERIOD_TICKS; 1219 params.period = pt->synth_opts.period; 1220 break; 1221 case PERF_ITRACE_PERIOD_NANOSECS: 1222 params.period_type = INTEL_PT_PERIOD_TICKS; 1223 params.period = intel_pt_ns_to_ticks(pt, 1224 pt->synth_opts.period); 1225 break; 1226 default: 1227 break; 1228 } 1229 } 1230 1231 if (!params.period) { 1232 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS; 1233 params.period = 1; 1234 } 1235 } 1236 1237 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18)) 1238 params.flags |= INTEL_PT_FUP_WITH_NLIP; 1239 1240 ptq->decoder = intel_pt_decoder_new(¶ms); 1241 if (!ptq->decoder) 1242 goto out_free; 1243 1244 return ptq; 1245 1246 out_free: 1247 zfree(&ptq->event_buf); 1248 zfree(&ptq->last_branch); 1249 zfree(&ptq->chain); 1250 free(ptq); 1251 return NULL; 1252 } 1253 1254 static void intel_pt_free_queue(void *priv) 1255 { 1256 struct intel_pt_queue *ptq = priv; 1257 1258 if (!ptq) 1259 return; 1260 thread__zput(ptq->thread); 1261 thread__zput(ptq->unknown_guest_thread); 1262 intel_pt_decoder_free(ptq->decoder); 1263 zfree(&ptq->event_buf); 1264 zfree(&ptq->last_branch); 1265 zfree(&ptq->chain); 1266 free(ptq); 1267 } 1268 1269 static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp) 1270 { 1271 unsigned int i; 1272 1273 pt->first_timestamp = timestamp; 1274 1275 for (i = 0; i < pt->queues.nr_queues; i++) { 1276 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 1277 struct intel_pt_queue *ptq = queue->priv; 1278 1279 if (ptq && ptq->decoder) 1280 intel_pt_set_first_timestamp(ptq->decoder, timestamp); 1281 } 1282 } 1283 1284 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, 1285 struct auxtrace_queue *queue) 1286 { 1287 struct intel_pt_queue *ptq = queue->priv; 1288 1289 if (queue->tid == -1 || pt->have_sched_switch) { 1290 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu); 1291 if (ptq->tid == -1) 1292 ptq->pid = -1; 1293 thread__zput(ptq->thread); 1294 } 1295 1296 if (!ptq->thread && ptq->tid != -1) 1297 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid); 1298 1299 if (ptq->thread) { 1300 ptq->pid = ptq->thread->pid_; 1301 if (queue->cpu == -1) 1302 ptq->cpu = ptq->thread->cpu; 1303 } 1304 } 1305 1306 static void intel_pt_sample_flags(struct intel_pt_queue *ptq) 1307 { 1308 ptq->insn_len = 0; 1309 if (ptq->state->flags & INTEL_PT_ABORT_TX) { 1310 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT; 1311 } else if (ptq->state->flags & INTEL_PT_ASYNC) { 1312 if (!ptq->state->to_ip) 1313 ptq->flags = PERF_IP_FLAG_BRANCH | 1314 PERF_IP_FLAG_TRACE_END; 1315 else if (ptq->state->from_nr && !ptq->state->to_nr) 1316 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 1317 PERF_IP_FLAG_VMEXIT; 1318 else 1319 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 1320 PERF_IP_FLAG_ASYNC | 1321 PERF_IP_FLAG_INTERRUPT; 1322 } else { 1323 if (ptq->state->from_ip) 1324 ptq->flags = intel_pt_insn_type(ptq->state->insn_op); 1325 else 1326 ptq->flags = PERF_IP_FLAG_BRANCH | 1327 PERF_IP_FLAG_TRACE_BEGIN; 1328 if (ptq->state->flags & INTEL_PT_IN_TX) 1329 ptq->flags |= PERF_IP_FLAG_IN_TX; 1330 ptq->insn_len = ptq->state->insn_len; 1331 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ); 1332 } 1333 1334 if (ptq->state->type & INTEL_PT_TRACE_BEGIN) 1335 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN; 1336 if (ptq->state->type & INTEL_PT_TRACE_END) 1337 ptq->flags |= PERF_IP_FLAG_TRACE_END; 1338 } 1339 1340 static void intel_pt_setup_time_range(struct intel_pt *pt, 1341 struct intel_pt_queue *ptq) 1342 { 1343 if (!pt->range_cnt) 1344 return; 1345 1346 ptq->sel_timestamp = pt->time_ranges[0].start; 1347 ptq->sel_idx = 0; 1348 1349 if (ptq->sel_timestamp) { 1350 ptq->sel_start = true; 1351 } else { 1352 ptq->sel_timestamp = pt->time_ranges[0].end; 1353 ptq->sel_start = false; 1354 } 1355 } 1356 1357 static int intel_pt_setup_queue(struct intel_pt *pt, 1358 struct auxtrace_queue *queue, 1359 unsigned int queue_nr) 1360 { 1361 struct intel_pt_queue *ptq = queue->priv; 1362 1363 if (list_empty(&queue->head)) 1364 return 0; 1365 1366 if (!ptq) { 1367 ptq = intel_pt_alloc_queue(pt, queue_nr); 1368 if (!ptq) 1369 return -ENOMEM; 1370 queue->priv = ptq; 1371 1372 if (queue->cpu != -1) 1373 ptq->cpu = queue->cpu; 1374 ptq->tid = queue->tid; 1375 1376 ptq->cbr_seen = UINT_MAX; 1377 1378 if (pt->sampling_mode && !pt->snapshot_mode && 1379 pt->timeless_decoding) 1380 ptq->step_through_buffers = true; 1381 1382 ptq->sync_switch = pt->sync_switch; 1383 1384 intel_pt_setup_time_range(pt, ptq); 1385 } 1386 1387 if (!ptq->on_heap && 1388 (!ptq->sync_switch || 1389 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { 1390 const struct intel_pt_state *state; 1391 int ret; 1392 1393 if (pt->timeless_decoding) 1394 return 0; 1395 1396 intel_pt_log("queue %u getting timestamp\n", queue_nr); 1397 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 1398 queue_nr, ptq->cpu, ptq->pid, ptq->tid); 1399 1400 if (ptq->sel_start && ptq->sel_timestamp) { 1401 ret = intel_pt_fast_forward(ptq->decoder, 1402 ptq->sel_timestamp); 1403 if (ret) 1404 return ret; 1405 } 1406 1407 while (1) { 1408 state = intel_pt_decode(ptq->decoder); 1409 if (state->err) { 1410 if (state->err == INTEL_PT_ERR_NODATA) { 1411 intel_pt_log("queue %u has no timestamp\n", 1412 queue_nr); 1413 return 0; 1414 } 1415 continue; 1416 } 1417 if (state->timestamp) 1418 break; 1419 } 1420 1421 ptq->timestamp = state->timestamp; 1422 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n", 1423 queue_nr, ptq->timestamp); 1424 ptq->state = state; 1425 ptq->have_sample = true; 1426 if (ptq->sel_start && ptq->sel_timestamp && 1427 ptq->timestamp < ptq->sel_timestamp) 1428 ptq->have_sample = false; 1429 intel_pt_sample_flags(ptq); 1430 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp); 1431 if (ret) 1432 return ret; 1433 ptq->on_heap = true; 1434 } 1435 1436 return 0; 1437 } 1438 1439 static int intel_pt_setup_queues(struct intel_pt *pt) 1440 { 1441 unsigned int i; 1442 int ret; 1443 1444 for (i = 0; i < pt->queues.nr_queues; i++) { 1445 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i); 1446 if (ret) 1447 return ret; 1448 } 1449 return 0; 1450 } 1451 1452 static inline bool intel_pt_skip_event(struct intel_pt *pt) 1453 { 1454 return pt->synth_opts.initial_skip && 1455 pt->num_events++ < pt->synth_opts.initial_skip; 1456 } 1457 1458 /* 1459 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen. 1460 * Also ensure CBR is first non-skipped event by allowing for 4 more samples 1461 * from this decoder state. 1462 */ 1463 static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt) 1464 { 1465 return pt->synth_opts.initial_skip && 1466 pt->num_events + 4 < pt->synth_opts.initial_skip; 1467 } 1468 1469 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq, 1470 union perf_event *event, 1471 struct perf_sample *sample) 1472 { 1473 event->sample.header.type = PERF_RECORD_SAMPLE; 1474 event->sample.header.size = sizeof(struct perf_event_header); 1475 1476 sample->pid = ptq->pid; 1477 sample->tid = ptq->tid; 1478 sample->cpu = ptq->cpu; 1479 sample->insn_len = ptq->insn_len; 1480 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ); 1481 } 1482 1483 static void intel_pt_prep_b_sample(struct intel_pt *pt, 1484 struct intel_pt_queue *ptq, 1485 union perf_event *event, 1486 struct perf_sample *sample) 1487 { 1488 intel_pt_prep_a_sample(ptq, event, sample); 1489 1490 if (!pt->timeless_decoding) 1491 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc); 1492 1493 sample->ip = ptq->state->from_ip; 1494 sample->addr = ptq->state->to_ip; 1495 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr); 1496 sample->period = 1; 1497 sample->flags = ptq->flags; 1498 1499 event->sample.header.misc = sample->cpumode; 1500 } 1501 1502 static int intel_pt_inject_event(union perf_event *event, 1503 struct perf_sample *sample, u64 type) 1504 { 1505 event->header.size = perf_event__sample_event_size(sample, type, 0); 1506 return perf_event__synthesize_sample(event, type, 0, sample); 1507 } 1508 1509 static inline int intel_pt_opt_inject(struct intel_pt *pt, 1510 union perf_event *event, 1511 struct perf_sample *sample, u64 type) 1512 { 1513 if (!pt->synth_opts.inject) 1514 return 0; 1515 1516 return intel_pt_inject_event(event, sample, type); 1517 } 1518 1519 static int intel_pt_deliver_synth_event(struct intel_pt *pt, 1520 union perf_event *event, 1521 struct perf_sample *sample, u64 type) 1522 { 1523 int ret; 1524 1525 ret = intel_pt_opt_inject(pt, event, sample, type); 1526 if (ret) 1527 return ret; 1528 1529 ret = perf_session__deliver_synth_event(pt->session, event, sample); 1530 if (ret) 1531 pr_err("Intel PT: failed to deliver event, error %d\n", ret); 1532 1533 return ret; 1534 } 1535 1536 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) 1537 { 1538 struct intel_pt *pt = ptq->pt; 1539 union perf_event *event = ptq->event_buf; 1540 struct perf_sample sample = { .ip = 0, }; 1541 struct dummy_branch_stack { 1542 u64 nr; 1543 u64 hw_idx; 1544 struct branch_entry entries; 1545 } dummy_bs; 1546 1547 if (pt->branches_filter && !(pt->branches_filter & ptq->flags)) 1548 return 0; 1549 1550 if (intel_pt_skip_event(pt)) 1551 return 0; 1552 1553 intel_pt_prep_b_sample(pt, ptq, event, &sample); 1554 1555 sample.id = ptq->pt->branches_id; 1556 sample.stream_id = ptq->pt->branches_id; 1557 1558 /* 1559 * perf report cannot handle events without a branch stack when using 1560 * SORT_MODE__BRANCH so make a dummy one. 1561 */ 1562 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) { 1563 dummy_bs = (struct dummy_branch_stack){ 1564 .nr = 1, 1565 .hw_idx = -1ULL, 1566 .entries = { 1567 .from = sample.ip, 1568 .to = sample.addr, 1569 }, 1570 }; 1571 sample.branch_stack = (struct branch_stack *)&dummy_bs; 1572 } 1573 1574 if (ptq->state->flags & INTEL_PT_SAMPLE_IPC) 1575 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt; 1576 if (sample.cyc_cnt) { 1577 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt; 1578 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt; 1579 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt; 1580 } 1581 1582 return intel_pt_deliver_synth_event(pt, event, &sample, 1583 pt->branches_sample_type); 1584 } 1585 1586 static void intel_pt_prep_sample(struct intel_pt *pt, 1587 struct intel_pt_queue *ptq, 1588 union perf_event *event, 1589 struct perf_sample *sample) 1590 { 1591 intel_pt_prep_b_sample(pt, ptq, event, sample); 1592 1593 if (pt->synth_opts.callchain) { 1594 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain, 1595 pt->synth_opts.callchain_sz + 1, 1596 sample->ip, pt->kernel_start); 1597 sample->callchain = ptq->chain; 1598 } 1599 1600 if (pt->synth_opts.last_branch) { 1601 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch, 1602 pt->br_stack_sz); 1603 sample->branch_stack = ptq->last_branch; 1604 } 1605 } 1606 1607 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) 1608 { 1609 struct intel_pt *pt = ptq->pt; 1610 union perf_event *event = ptq->event_buf; 1611 struct perf_sample sample = { .ip = 0, }; 1612 1613 if (intel_pt_skip_event(pt)) 1614 return 0; 1615 1616 intel_pt_prep_sample(pt, ptq, event, &sample); 1617 1618 sample.id = ptq->pt->instructions_id; 1619 sample.stream_id = ptq->pt->instructions_id; 1620 if (pt->synth_opts.quick) 1621 sample.period = 1; 1622 else 1623 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; 1624 1625 if (ptq->state->flags & INTEL_PT_SAMPLE_IPC) 1626 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt; 1627 if (sample.cyc_cnt) { 1628 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt; 1629 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt; 1630 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt; 1631 } 1632 1633 ptq->last_insn_cnt = ptq->state->tot_insn_cnt; 1634 1635 return intel_pt_deliver_synth_event(pt, event, &sample, 1636 pt->instructions_sample_type); 1637 } 1638 1639 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq) 1640 { 1641 struct intel_pt *pt = ptq->pt; 1642 union perf_event *event = ptq->event_buf; 1643 struct perf_sample sample = { .ip = 0, }; 1644 1645 if (intel_pt_skip_event(pt)) 1646 return 0; 1647 1648 intel_pt_prep_sample(pt, ptq, event, &sample); 1649 1650 sample.id = ptq->pt->transactions_id; 1651 sample.stream_id = ptq->pt->transactions_id; 1652 1653 return intel_pt_deliver_synth_event(pt, event, &sample, 1654 pt->transactions_sample_type); 1655 } 1656 1657 static void intel_pt_prep_p_sample(struct intel_pt *pt, 1658 struct intel_pt_queue *ptq, 1659 union perf_event *event, 1660 struct perf_sample *sample) 1661 { 1662 intel_pt_prep_sample(pt, ptq, event, sample); 1663 1664 /* 1665 * Zero IP is used to mean "trace start" but that is not the case for 1666 * power or PTWRITE events with no IP, so clear the flags. 1667 */ 1668 if (!sample->ip) 1669 sample->flags = 0; 1670 } 1671 1672 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq) 1673 { 1674 struct intel_pt *pt = ptq->pt; 1675 union perf_event *event = ptq->event_buf; 1676 struct perf_sample sample = { .ip = 0, }; 1677 struct perf_synth_intel_ptwrite raw; 1678 1679 if (intel_pt_skip_event(pt)) 1680 return 0; 1681 1682 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1683 1684 sample.id = ptq->pt->ptwrites_id; 1685 sample.stream_id = ptq->pt->ptwrites_id; 1686 1687 raw.flags = 0; 1688 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 1689 raw.payload = cpu_to_le64(ptq->state->ptw_payload); 1690 1691 sample.raw_size = perf_synth__raw_size(raw); 1692 sample.raw_data = perf_synth__raw_data(&raw); 1693 1694 return intel_pt_deliver_synth_event(pt, event, &sample, 1695 pt->ptwrites_sample_type); 1696 } 1697 1698 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq) 1699 { 1700 struct intel_pt *pt = ptq->pt; 1701 union perf_event *event = ptq->event_buf; 1702 struct perf_sample sample = { .ip = 0, }; 1703 struct perf_synth_intel_cbr raw; 1704 u32 flags; 1705 1706 if (intel_pt_skip_cbr_event(pt)) 1707 return 0; 1708 1709 ptq->cbr_seen = ptq->state->cbr; 1710 1711 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1712 1713 sample.id = ptq->pt->cbr_id; 1714 sample.stream_id = ptq->pt->cbr_id; 1715 1716 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16); 1717 raw.flags = cpu_to_le32(flags); 1718 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz); 1719 raw.reserved3 = 0; 1720 1721 sample.raw_size = perf_synth__raw_size(raw); 1722 sample.raw_data = perf_synth__raw_data(&raw); 1723 1724 return intel_pt_deliver_synth_event(pt, event, &sample, 1725 pt->pwr_events_sample_type); 1726 } 1727 1728 static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq) 1729 { 1730 struct intel_pt *pt = ptq->pt; 1731 union perf_event *event = ptq->event_buf; 1732 struct perf_sample sample = { .ip = 0, }; 1733 struct perf_synth_intel_psb raw; 1734 1735 if (intel_pt_skip_event(pt)) 1736 return 0; 1737 1738 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1739 1740 sample.id = ptq->pt->psb_id; 1741 sample.stream_id = ptq->pt->psb_id; 1742 sample.flags = 0; 1743 1744 raw.reserved = 0; 1745 raw.offset = ptq->state->psb_offset; 1746 1747 sample.raw_size = perf_synth__raw_size(raw); 1748 sample.raw_data = perf_synth__raw_data(&raw); 1749 1750 return intel_pt_deliver_synth_event(pt, event, &sample, 1751 pt->pwr_events_sample_type); 1752 } 1753 1754 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq) 1755 { 1756 struct intel_pt *pt = ptq->pt; 1757 union perf_event *event = ptq->event_buf; 1758 struct perf_sample sample = { .ip = 0, }; 1759 struct perf_synth_intel_mwait raw; 1760 1761 if (intel_pt_skip_event(pt)) 1762 return 0; 1763 1764 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1765 1766 sample.id = ptq->pt->mwait_id; 1767 sample.stream_id = ptq->pt->mwait_id; 1768 1769 raw.reserved = 0; 1770 raw.payload = cpu_to_le64(ptq->state->mwait_payload); 1771 1772 sample.raw_size = perf_synth__raw_size(raw); 1773 sample.raw_data = perf_synth__raw_data(&raw); 1774 1775 return intel_pt_deliver_synth_event(pt, event, &sample, 1776 pt->pwr_events_sample_type); 1777 } 1778 1779 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq) 1780 { 1781 struct intel_pt *pt = ptq->pt; 1782 union perf_event *event = ptq->event_buf; 1783 struct perf_sample sample = { .ip = 0, }; 1784 struct perf_synth_intel_pwre raw; 1785 1786 if (intel_pt_skip_event(pt)) 1787 return 0; 1788 1789 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1790 1791 sample.id = ptq->pt->pwre_id; 1792 sample.stream_id = ptq->pt->pwre_id; 1793 1794 raw.reserved = 0; 1795 raw.payload = cpu_to_le64(ptq->state->pwre_payload); 1796 1797 sample.raw_size = perf_synth__raw_size(raw); 1798 sample.raw_data = perf_synth__raw_data(&raw); 1799 1800 return intel_pt_deliver_synth_event(pt, event, &sample, 1801 pt->pwr_events_sample_type); 1802 } 1803 1804 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq) 1805 { 1806 struct intel_pt *pt = ptq->pt; 1807 union perf_event *event = ptq->event_buf; 1808 struct perf_sample sample = { .ip = 0, }; 1809 struct perf_synth_intel_exstop raw; 1810 1811 if (intel_pt_skip_event(pt)) 1812 return 0; 1813 1814 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1815 1816 sample.id = ptq->pt->exstop_id; 1817 sample.stream_id = ptq->pt->exstop_id; 1818 1819 raw.flags = 0; 1820 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 1821 1822 sample.raw_size = perf_synth__raw_size(raw); 1823 sample.raw_data = perf_synth__raw_data(&raw); 1824 1825 return intel_pt_deliver_synth_event(pt, event, &sample, 1826 pt->pwr_events_sample_type); 1827 } 1828 1829 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq) 1830 { 1831 struct intel_pt *pt = ptq->pt; 1832 union perf_event *event = ptq->event_buf; 1833 struct perf_sample sample = { .ip = 0, }; 1834 struct perf_synth_intel_pwrx raw; 1835 1836 if (intel_pt_skip_event(pt)) 1837 return 0; 1838 1839 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1840 1841 sample.id = ptq->pt->pwrx_id; 1842 sample.stream_id = ptq->pt->pwrx_id; 1843 1844 raw.reserved = 0; 1845 raw.payload = cpu_to_le64(ptq->state->pwrx_payload); 1846 1847 sample.raw_size = perf_synth__raw_size(raw); 1848 sample.raw_data = perf_synth__raw_data(&raw); 1849 1850 return intel_pt_deliver_synth_event(pt, event, &sample, 1851 pt->pwr_events_sample_type); 1852 } 1853 1854 /* 1855 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer 1856 * intel_pt_add_gp_regs(). 1857 */ 1858 static const int pebs_gp_regs[] = { 1859 [PERF_REG_X86_FLAGS] = 1, 1860 [PERF_REG_X86_IP] = 2, 1861 [PERF_REG_X86_AX] = 3, 1862 [PERF_REG_X86_CX] = 4, 1863 [PERF_REG_X86_DX] = 5, 1864 [PERF_REG_X86_BX] = 6, 1865 [PERF_REG_X86_SP] = 7, 1866 [PERF_REG_X86_BP] = 8, 1867 [PERF_REG_X86_SI] = 9, 1868 [PERF_REG_X86_DI] = 10, 1869 [PERF_REG_X86_R8] = 11, 1870 [PERF_REG_X86_R9] = 12, 1871 [PERF_REG_X86_R10] = 13, 1872 [PERF_REG_X86_R11] = 14, 1873 [PERF_REG_X86_R12] = 15, 1874 [PERF_REG_X86_R13] = 16, 1875 [PERF_REG_X86_R14] = 17, 1876 [PERF_REG_X86_R15] = 18, 1877 }; 1878 1879 static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos, 1880 const struct intel_pt_blk_items *items, 1881 u64 regs_mask) 1882 { 1883 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS]; 1884 u32 mask = items->mask[INTEL_PT_GP_REGS_POS]; 1885 u32 bit; 1886 int i; 1887 1888 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) { 1889 /* Get the PEBS gp_regs array index */ 1890 int n = pebs_gp_regs[i] - 1; 1891 1892 if (n < 0) 1893 continue; 1894 /* 1895 * Add only registers that were requested (i.e. 'regs_mask') and 1896 * that were provided (i.e. 'mask'), and update the resulting 1897 * mask (i.e. 'intr_regs->mask') accordingly. 1898 */ 1899 if (mask & 1 << n && regs_mask & bit) { 1900 intr_regs->mask |= bit; 1901 *pos++ = gp_regs[n]; 1902 } 1903 } 1904 1905 return pos; 1906 } 1907 1908 #ifndef PERF_REG_X86_XMM0 1909 #define PERF_REG_X86_XMM0 32 1910 #endif 1911 1912 static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos, 1913 const struct intel_pt_blk_items *items, 1914 u64 regs_mask) 1915 { 1916 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0); 1917 const u64 *xmm = items->xmm; 1918 1919 /* 1920 * If there are any XMM registers, then there should be all of them. 1921 * Nevertheless, follow the logic to add only registers that were 1922 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'), 1923 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly. 1924 */ 1925 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0; 1926 1927 for (; mask; mask >>= 1, xmm++) { 1928 if (mask & 1) 1929 *pos++ = *xmm; 1930 } 1931 } 1932 1933 #define LBR_INFO_MISPRED (1ULL << 63) 1934 #define LBR_INFO_IN_TX (1ULL << 62) 1935 #define LBR_INFO_ABORT (1ULL << 61) 1936 #define LBR_INFO_CYCLES 0xffff 1937 1938 /* Refer kernel's intel_pmu_store_pebs_lbrs() */ 1939 static u64 intel_pt_lbr_flags(u64 info) 1940 { 1941 union { 1942 struct branch_flags flags; 1943 u64 result; 1944 } u; 1945 1946 u.result = 0; 1947 u.flags.mispred = !!(info & LBR_INFO_MISPRED); 1948 u.flags.predicted = !(info & LBR_INFO_MISPRED); 1949 u.flags.in_tx = !!(info & LBR_INFO_IN_TX); 1950 u.flags.abort = !!(info & LBR_INFO_ABORT); 1951 u.flags.cycles = info & LBR_INFO_CYCLES; 1952 1953 return u.result; 1954 } 1955 1956 static void intel_pt_add_lbrs(struct branch_stack *br_stack, 1957 const struct intel_pt_blk_items *items) 1958 { 1959 u64 *to; 1960 int i; 1961 1962 br_stack->nr = 0; 1963 1964 to = &br_stack->entries[0].from; 1965 1966 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) { 1967 u32 mask = items->mask[i]; 1968 const u64 *from = items->val[i]; 1969 1970 for (; mask; mask >>= 3, from += 3) { 1971 if ((mask & 7) == 7) { 1972 *to++ = from[0]; 1973 *to++ = from[1]; 1974 *to++ = intel_pt_lbr_flags(from[2]); 1975 br_stack->nr += 1; 1976 } 1977 } 1978 } 1979 } 1980 1981 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) 1982 { 1983 const struct intel_pt_blk_items *items = &ptq->state->items; 1984 struct perf_sample sample = { .ip = 0, }; 1985 union perf_event *event = ptq->event_buf; 1986 struct intel_pt *pt = ptq->pt; 1987 struct evsel *evsel = pt->pebs_evsel; 1988 u64 sample_type = evsel->core.attr.sample_type; 1989 u64 id = evsel->core.id[0]; 1990 u8 cpumode; 1991 u64 regs[8 * sizeof(sample.intr_regs.mask)]; 1992 1993 if (intel_pt_skip_event(pt)) 1994 return 0; 1995 1996 intel_pt_prep_a_sample(ptq, event, &sample); 1997 1998 sample.id = id; 1999 sample.stream_id = id; 2000 2001 if (!evsel->core.attr.freq) 2002 sample.period = evsel->core.attr.sample_period; 2003 2004 /* No support for non-zero CS base */ 2005 if (items->has_ip) 2006 sample.ip = items->ip; 2007 else if (items->has_rip) 2008 sample.ip = items->rip; 2009 else 2010 sample.ip = ptq->state->from_ip; 2011 2012 cpumode = intel_pt_cpumode(ptq, sample.ip, 0); 2013 2014 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP; 2015 2016 sample.cpumode = cpumode; 2017 2018 if (sample_type & PERF_SAMPLE_TIME) { 2019 u64 timestamp = 0; 2020 2021 if (items->has_timestamp) 2022 timestamp = items->timestamp; 2023 else if (!pt->timeless_decoding) 2024 timestamp = ptq->timestamp; 2025 if (timestamp) 2026 sample.time = tsc_to_perf_time(timestamp, &pt->tc); 2027 } 2028 2029 if (sample_type & PERF_SAMPLE_CALLCHAIN && 2030 pt->synth_opts.callchain) { 2031 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain, 2032 pt->synth_opts.callchain_sz, sample.ip, 2033 pt->kernel_start); 2034 sample.callchain = ptq->chain; 2035 } 2036 2037 if (sample_type & PERF_SAMPLE_REGS_INTR && 2038 (items->mask[INTEL_PT_GP_REGS_POS] || 2039 items->mask[INTEL_PT_XMM_POS])) { 2040 u64 regs_mask = evsel->core.attr.sample_regs_intr; 2041 u64 *pos; 2042 2043 sample.intr_regs.abi = items->is_32_bit ? 2044 PERF_SAMPLE_REGS_ABI_32 : 2045 PERF_SAMPLE_REGS_ABI_64; 2046 sample.intr_regs.regs = regs; 2047 2048 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask); 2049 2050 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask); 2051 } 2052 2053 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 2054 if (items->mask[INTEL_PT_LBR_0_POS] || 2055 items->mask[INTEL_PT_LBR_1_POS] || 2056 items->mask[INTEL_PT_LBR_2_POS]) { 2057 intel_pt_add_lbrs(ptq->last_branch, items); 2058 } else if (pt->synth_opts.last_branch) { 2059 thread_stack__br_sample(ptq->thread, ptq->cpu, 2060 ptq->last_branch, 2061 pt->br_stack_sz); 2062 } else { 2063 ptq->last_branch->nr = 0; 2064 } 2065 sample.branch_stack = ptq->last_branch; 2066 } 2067 2068 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address) 2069 sample.addr = items->mem_access_address; 2070 2071 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { 2072 /* 2073 * Refer kernel's setup_pebs_adaptive_sample_data() and 2074 * intel_hsw_weight(). 2075 */ 2076 if (items->has_mem_access_latency) { 2077 u64 weight = items->mem_access_latency >> 32; 2078 2079 /* 2080 * Starts from SPR, the mem access latency field 2081 * contains both cache latency [47:32] and instruction 2082 * latency [15:0]. The cache latency is the same as the 2083 * mem access latency on previous platforms. 2084 * 2085 * In practice, no memory access could last than 4G 2086 * cycles. Use latency >> 32 to distinguish the 2087 * different format of the mem access latency field. 2088 */ 2089 if (weight > 0) { 2090 sample.weight = weight & 0xffff; 2091 sample.ins_lat = items->mem_access_latency & 0xffff; 2092 } else 2093 sample.weight = items->mem_access_latency; 2094 } 2095 if (!sample.weight && items->has_tsx_aux_info) { 2096 /* Cycles last block */ 2097 sample.weight = (u32)items->tsx_aux_info; 2098 } 2099 } 2100 2101 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) { 2102 u64 ax = items->has_rax ? items->rax : 0; 2103 /* Refer kernel's intel_hsw_transaction() */ 2104 u64 txn = (u8)(items->tsx_aux_info >> 32); 2105 2106 /* For RTM XABORTs also log the abort code from AX */ 2107 if (txn & PERF_TXN_TRANSACTION && ax & 1) 2108 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; 2109 sample.transaction = txn; 2110 } 2111 2112 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type); 2113 } 2114 2115 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu, 2116 pid_t pid, pid_t tid, u64 ip, u64 timestamp) 2117 { 2118 union perf_event event; 2119 char msg[MAX_AUXTRACE_ERROR_MSG]; 2120 int err; 2121 2122 if (pt->synth_opts.error_minus_flags) { 2123 if (code == INTEL_PT_ERR_OVR && 2124 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW) 2125 return 0; 2126 if (code == INTEL_PT_ERR_LOST && 2127 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST) 2128 return 0; 2129 } 2130 2131 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG); 2132 2133 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, 2134 code, cpu, pid, tid, ip, msg, timestamp); 2135 2136 err = perf_session__deliver_synth_event(pt->session, &event, NULL); 2137 if (err) 2138 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n", 2139 err); 2140 2141 return err; 2142 } 2143 2144 static int intel_ptq_synth_error(struct intel_pt_queue *ptq, 2145 const struct intel_pt_state *state) 2146 { 2147 struct intel_pt *pt = ptq->pt; 2148 u64 tm = ptq->timestamp; 2149 2150 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc); 2151 2152 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid, 2153 ptq->tid, state->from_ip, tm); 2154 } 2155 2156 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq) 2157 { 2158 struct auxtrace_queue *queue; 2159 pid_t tid = ptq->next_tid; 2160 int err; 2161 2162 if (tid == -1) 2163 return 0; 2164 2165 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid); 2166 2167 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid); 2168 2169 queue = &pt->queues.queue_array[ptq->queue_nr]; 2170 intel_pt_set_pid_tid_cpu(pt, queue); 2171 2172 ptq->next_tid = -1; 2173 2174 return err; 2175 } 2176 2177 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip) 2178 { 2179 struct intel_pt *pt = ptq->pt; 2180 2181 return ip == pt->switch_ip && 2182 (ptq->flags & PERF_IP_FLAG_BRANCH) && 2183 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC | 2184 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT)); 2185 } 2186 2187 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \ 2188 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT) 2189 2190 static int intel_pt_sample(struct intel_pt_queue *ptq) 2191 { 2192 const struct intel_pt_state *state = ptq->state; 2193 struct intel_pt *pt = ptq->pt; 2194 int err; 2195 2196 if (!ptq->have_sample) 2197 return 0; 2198 2199 ptq->have_sample = false; 2200 2201 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; 2202 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt; 2203 2204 /* 2205 * Do PEBS first to allow for the possibility that the PEBS timestamp 2206 * precedes the current timestamp. 2207 */ 2208 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) { 2209 err = intel_pt_synth_pebs_sample(ptq); 2210 if (err) 2211 return err; 2212 } 2213 2214 if (pt->sample_pwr_events) { 2215 if (state->type & INTEL_PT_PSB_EVT) { 2216 err = intel_pt_synth_psb_sample(ptq); 2217 if (err) 2218 return err; 2219 } 2220 if (ptq->state->cbr != ptq->cbr_seen) { 2221 err = intel_pt_synth_cbr_sample(ptq); 2222 if (err) 2223 return err; 2224 } 2225 if (state->type & INTEL_PT_PWR_EVT) { 2226 if (state->type & INTEL_PT_MWAIT_OP) { 2227 err = intel_pt_synth_mwait_sample(ptq); 2228 if (err) 2229 return err; 2230 } 2231 if (state->type & INTEL_PT_PWR_ENTRY) { 2232 err = intel_pt_synth_pwre_sample(ptq); 2233 if (err) 2234 return err; 2235 } 2236 if (state->type & INTEL_PT_EX_STOP) { 2237 err = intel_pt_synth_exstop_sample(ptq); 2238 if (err) 2239 return err; 2240 } 2241 if (state->type & INTEL_PT_PWR_EXIT) { 2242 err = intel_pt_synth_pwrx_sample(ptq); 2243 if (err) 2244 return err; 2245 } 2246 } 2247 } 2248 2249 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) { 2250 err = intel_pt_synth_instruction_sample(ptq); 2251 if (err) 2252 return err; 2253 } 2254 2255 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) { 2256 err = intel_pt_synth_transaction_sample(ptq); 2257 if (err) 2258 return err; 2259 } 2260 2261 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) { 2262 err = intel_pt_synth_ptwrite_sample(ptq); 2263 if (err) 2264 return err; 2265 } 2266 2267 if (!(state->type & INTEL_PT_BRANCH)) 2268 return 0; 2269 2270 if (pt->use_thread_stack) { 2271 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, 2272 state->from_ip, state->to_ip, ptq->insn_len, 2273 state->trace_nr, pt->callstack, 2274 pt->br_stack_sz_plus, 2275 pt->mispred_all); 2276 } else { 2277 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr); 2278 } 2279 2280 if (pt->sample_branches) { 2281 if (state->from_nr != state->to_nr && 2282 state->from_ip && state->to_ip) { 2283 struct intel_pt_state *st = (struct intel_pt_state *)state; 2284 u64 to_ip = st->to_ip; 2285 u64 from_ip = st->from_ip; 2286 2287 /* 2288 * perf cannot handle having different machines for ip 2289 * and addr, so create 2 branches. 2290 */ 2291 st->to_ip = 0; 2292 err = intel_pt_synth_branch_sample(ptq); 2293 if (err) 2294 return err; 2295 st->from_ip = 0; 2296 st->to_ip = to_ip; 2297 err = intel_pt_synth_branch_sample(ptq); 2298 st->from_ip = from_ip; 2299 } else { 2300 err = intel_pt_synth_branch_sample(ptq); 2301 } 2302 if (err) 2303 return err; 2304 } 2305 2306 if (!ptq->sync_switch) 2307 return 0; 2308 2309 if (intel_pt_is_switch_ip(ptq, state->to_ip)) { 2310 switch (ptq->switch_state) { 2311 case INTEL_PT_SS_NOT_TRACING: 2312 case INTEL_PT_SS_UNKNOWN: 2313 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 2314 err = intel_pt_next_tid(pt, ptq); 2315 if (err) 2316 return err; 2317 ptq->switch_state = INTEL_PT_SS_TRACING; 2318 break; 2319 default: 2320 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT; 2321 return 1; 2322 } 2323 } else if (!state->to_ip) { 2324 ptq->switch_state = INTEL_PT_SS_NOT_TRACING; 2325 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) { 2326 ptq->switch_state = INTEL_PT_SS_UNKNOWN; 2327 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN && 2328 state->to_ip == pt->ptss_ip && 2329 (ptq->flags & PERF_IP_FLAG_CALL)) { 2330 ptq->switch_state = INTEL_PT_SS_TRACING; 2331 } 2332 2333 return 0; 2334 } 2335 2336 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) 2337 { 2338 struct machine *machine = pt->machine; 2339 struct map *map; 2340 struct symbol *sym, *start; 2341 u64 ip, switch_ip = 0; 2342 const char *ptss; 2343 2344 if (ptss_ip) 2345 *ptss_ip = 0; 2346 2347 map = machine__kernel_map(machine); 2348 if (!map) 2349 return 0; 2350 2351 if (map__load(map)) 2352 return 0; 2353 2354 start = dso__first_symbol(map->dso); 2355 2356 for (sym = start; sym; sym = dso__next_symbol(sym)) { 2357 if (sym->binding == STB_GLOBAL && 2358 !strcmp(sym->name, "__switch_to")) { 2359 ip = map->unmap_ip(map, sym->start); 2360 if (ip >= map->start && ip < map->end) { 2361 switch_ip = ip; 2362 break; 2363 } 2364 } 2365 } 2366 2367 if (!switch_ip || !ptss_ip) 2368 return 0; 2369 2370 if (pt->have_sched_switch == 1) 2371 ptss = "perf_trace_sched_switch"; 2372 else 2373 ptss = "__perf_event_task_sched_out"; 2374 2375 for (sym = start; sym; sym = dso__next_symbol(sym)) { 2376 if (!strcmp(sym->name, ptss)) { 2377 ip = map->unmap_ip(map, sym->start); 2378 if (ip >= map->start && ip < map->end) { 2379 *ptss_ip = ip; 2380 break; 2381 } 2382 } 2383 } 2384 2385 return switch_ip; 2386 } 2387 2388 static void intel_pt_enable_sync_switch(struct intel_pt *pt) 2389 { 2390 unsigned int i; 2391 2392 pt->sync_switch = true; 2393 2394 for (i = 0; i < pt->queues.nr_queues; i++) { 2395 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 2396 struct intel_pt_queue *ptq = queue->priv; 2397 2398 if (ptq) 2399 ptq->sync_switch = true; 2400 } 2401 } 2402 2403 /* 2404 * To filter against time ranges, it is only necessary to look at the next start 2405 * or end time. 2406 */ 2407 static bool intel_pt_next_time(struct intel_pt_queue *ptq) 2408 { 2409 struct intel_pt *pt = ptq->pt; 2410 2411 if (ptq->sel_start) { 2412 /* Next time is an end time */ 2413 ptq->sel_start = false; 2414 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end; 2415 return true; 2416 } else if (ptq->sel_idx + 1 < pt->range_cnt) { 2417 /* Next time is a start time */ 2418 ptq->sel_start = true; 2419 ptq->sel_idx += 1; 2420 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start; 2421 return true; 2422 } 2423 2424 /* No next time */ 2425 return false; 2426 } 2427 2428 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp) 2429 { 2430 int err; 2431 2432 while (1) { 2433 if (ptq->sel_start) { 2434 if (ptq->timestamp >= ptq->sel_timestamp) { 2435 /* After start time, so consider next time */ 2436 intel_pt_next_time(ptq); 2437 if (!ptq->sel_timestamp) { 2438 /* No end time */ 2439 return 0; 2440 } 2441 /* Check against end time */ 2442 continue; 2443 } 2444 /* Before start time, so fast forward */ 2445 ptq->have_sample = false; 2446 if (ptq->sel_timestamp > *ff_timestamp) { 2447 if (ptq->sync_switch) { 2448 intel_pt_next_tid(ptq->pt, ptq); 2449 ptq->switch_state = INTEL_PT_SS_UNKNOWN; 2450 } 2451 *ff_timestamp = ptq->sel_timestamp; 2452 err = intel_pt_fast_forward(ptq->decoder, 2453 ptq->sel_timestamp); 2454 if (err) 2455 return err; 2456 } 2457 return 0; 2458 } else if (ptq->timestamp > ptq->sel_timestamp) { 2459 /* After end time, so consider next time */ 2460 if (!intel_pt_next_time(ptq)) { 2461 /* No next time range, so stop decoding */ 2462 ptq->have_sample = false; 2463 ptq->switch_state = INTEL_PT_SS_NOT_TRACING; 2464 return 1; 2465 } 2466 /* Check against next start time */ 2467 continue; 2468 } else { 2469 /* Before end time */ 2470 return 0; 2471 } 2472 } 2473 } 2474 2475 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) 2476 { 2477 const struct intel_pt_state *state = ptq->state; 2478 struct intel_pt *pt = ptq->pt; 2479 u64 ff_timestamp = 0; 2480 int err; 2481 2482 if (!pt->kernel_start) { 2483 pt->kernel_start = machine__kernel_start(pt->machine); 2484 if (pt->per_cpu_mmaps && 2485 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) && 2486 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) && 2487 !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) { 2488 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip); 2489 if (pt->switch_ip) { 2490 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", 2491 pt->switch_ip, pt->ptss_ip); 2492 intel_pt_enable_sync_switch(pt); 2493 } 2494 } 2495 } 2496 2497 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 2498 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); 2499 while (1) { 2500 err = intel_pt_sample(ptq); 2501 if (err) 2502 return err; 2503 2504 state = intel_pt_decode(ptq->decoder); 2505 if (state->err) { 2506 if (state->err == INTEL_PT_ERR_NODATA) 2507 return 1; 2508 if (ptq->sync_switch && 2509 state->from_ip >= pt->kernel_start) { 2510 ptq->sync_switch = false; 2511 intel_pt_next_tid(pt, ptq); 2512 } 2513 if (pt->synth_opts.errors) { 2514 err = intel_ptq_synth_error(ptq, state); 2515 if (err) 2516 return err; 2517 } 2518 continue; 2519 } 2520 2521 ptq->state = state; 2522 ptq->have_sample = true; 2523 intel_pt_sample_flags(ptq); 2524 2525 /* Use estimated TSC upon return to user space */ 2526 if (pt->est_tsc && 2527 (state->from_ip >= pt->kernel_start || !state->from_ip) && 2528 state->to_ip && state->to_ip < pt->kernel_start) { 2529 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 2530 state->timestamp, state->est_timestamp); 2531 ptq->timestamp = state->est_timestamp; 2532 /* Use estimated TSC in unknown switch state */ 2533 } else if (ptq->sync_switch && 2534 ptq->switch_state == INTEL_PT_SS_UNKNOWN && 2535 intel_pt_is_switch_ip(ptq, state->to_ip) && 2536 ptq->next_tid == -1) { 2537 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 2538 state->timestamp, state->est_timestamp); 2539 ptq->timestamp = state->est_timestamp; 2540 } else if (state->timestamp > ptq->timestamp) { 2541 ptq->timestamp = state->timestamp; 2542 } 2543 2544 if (ptq->sel_timestamp) { 2545 err = intel_pt_time_filter(ptq, &ff_timestamp); 2546 if (err) 2547 return err; 2548 } 2549 2550 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) { 2551 *timestamp = ptq->timestamp; 2552 return 0; 2553 } 2554 } 2555 return 0; 2556 } 2557 2558 static inline int intel_pt_update_queues(struct intel_pt *pt) 2559 { 2560 if (pt->queues.new_data) { 2561 pt->queues.new_data = false; 2562 return intel_pt_setup_queues(pt); 2563 } 2564 return 0; 2565 } 2566 2567 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp) 2568 { 2569 unsigned int queue_nr; 2570 u64 ts; 2571 int ret; 2572 2573 while (1) { 2574 struct auxtrace_queue *queue; 2575 struct intel_pt_queue *ptq; 2576 2577 if (!pt->heap.heap_cnt) 2578 return 0; 2579 2580 if (pt->heap.heap_array[0].ordinal >= timestamp) 2581 return 0; 2582 2583 queue_nr = pt->heap.heap_array[0].queue_nr; 2584 queue = &pt->queues.queue_array[queue_nr]; 2585 ptq = queue->priv; 2586 2587 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n", 2588 queue_nr, pt->heap.heap_array[0].ordinal, 2589 timestamp); 2590 2591 auxtrace_heap__pop(&pt->heap); 2592 2593 if (pt->heap.heap_cnt) { 2594 ts = pt->heap.heap_array[0].ordinal + 1; 2595 if (ts > timestamp) 2596 ts = timestamp; 2597 } else { 2598 ts = timestamp; 2599 } 2600 2601 intel_pt_set_pid_tid_cpu(pt, queue); 2602 2603 ret = intel_pt_run_decoder(ptq, &ts); 2604 2605 if (ret < 0) { 2606 auxtrace_heap__add(&pt->heap, queue_nr, ts); 2607 return ret; 2608 } 2609 2610 if (!ret) { 2611 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts); 2612 if (ret < 0) 2613 return ret; 2614 } else { 2615 ptq->on_heap = false; 2616 } 2617 } 2618 2619 return 0; 2620 } 2621 2622 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid, 2623 u64 time_) 2624 { 2625 struct auxtrace_queues *queues = &pt->queues; 2626 unsigned int i; 2627 u64 ts = 0; 2628 2629 for (i = 0; i < queues->nr_queues; i++) { 2630 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 2631 struct intel_pt_queue *ptq = queue->priv; 2632 2633 if (ptq && (tid == -1 || ptq->tid == tid)) { 2634 ptq->time = time_; 2635 intel_pt_set_pid_tid_cpu(pt, queue); 2636 intel_pt_run_decoder(ptq, &ts); 2637 } 2638 } 2639 return 0; 2640 } 2641 2642 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq, 2643 struct auxtrace_queue *queue, 2644 struct perf_sample *sample) 2645 { 2646 struct machine *m = ptq->pt->machine; 2647 2648 ptq->pid = sample->pid; 2649 ptq->tid = sample->tid; 2650 ptq->cpu = queue->cpu; 2651 2652 intel_pt_log("queue %u cpu %d pid %d tid %d\n", 2653 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); 2654 2655 thread__zput(ptq->thread); 2656 2657 if (ptq->tid == -1) 2658 return; 2659 2660 if (ptq->pid == -1) { 2661 ptq->thread = machine__find_thread(m, -1, ptq->tid); 2662 if (ptq->thread) 2663 ptq->pid = ptq->thread->pid_; 2664 return; 2665 } 2666 2667 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid); 2668 } 2669 2670 static int intel_pt_process_timeless_sample(struct intel_pt *pt, 2671 struct perf_sample *sample) 2672 { 2673 struct auxtrace_queue *queue; 2674 struct intel_pt_queue *ptq; 2675 u64 ts = 0; 2676 2677 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session); 2678 if (!queue) 2679 return -EINVAL; 2680 2681 ptq = queue->priv; 2682 if (!ptq) 2683 return 0; 2684 2685 ptq->stop = false; 2686 ptq->time = sample->time; 2687 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample); 2688 intel_pt_run_decoder(ptq, &ts); 2689 return 0; 2690 } 2691 2692 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample) 2693 { 2694 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu, 2695 sample->pid, sample->tid, 0, sample->time); 2696 } 2697 2698 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu) 2699 { 2700 unsigned i, j; 2701 2702 if (cpu < 0 || !pt->queues.nr_queues) 2703 return NULL; 2704 2705 if ((unsigned)cpu >= pt->queues.nr_queues) 2706 i = pt->queues.nr_queues - 1; 2707 else 2708 i = cpu; 2709 2710 if (pt->queues.queue_array[i].cpu == cpu) 2711 return pt->queues.queue_array[i].priv; 2712 2713 for (j = 0; i > 0; j++) { 2714 if (pt->queues.queue_array[--i].cpu == cpu) 2715 return pt->queues.queue_array[i].priv; 2716 } 2717 2718 for (; j < pt->queues.nr_queues; j++) { 2719 if (pt->queues.queue_array[j].cpu == cpu) 2720 return pt->queues.queue_array[j].priv; 2721 } 2722 2723 return NULL; 2724 } 2725 2726 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid, 2727 u64 timestamp) 2728 { 2729 struct intel_pt_queue *ptq; 2730 int err; 2731 2732 if (!pt->sync_switch) 2733 return 1; 2734 2735 ptq = intel_pt_cpu_to_ptq(pt, cpu); 2736 if (!ptq || !ptq->sync_switch) 2737 return 1; 2738 2739 switch (ptq->switch_state) { 2740 case INTEL_PT_SS_NOT_TRACING: 2741 break; 2742 case INTEL_PT_SS_UNKNOWN: 2743 case INTEL_PT_SS_TRACING: 2744 ptq->next_tid = tid; 2745 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP; 2746 return 0; 2747 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: 2748 if (!ptq->on_heap) { 2749 ptq->timestamp = perf_time_to_tsc(timestamp, 2750 &pt->tc); 2751 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr, 2752 ptq->timestamp); 2753 if (err) 2754 return err; 2755 ptq->on_heap = true; 2756 } 2757 ptq->switch_state = INTEL_PT_SS_TRACING; 2758 break; 2759 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 2760 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu); 2761 break; 2762 default: 2763 break; 2764 } 2765 2766 ptq->next_tid = -1; 2767 2768 return 1; 2769 } 2770 2771 static int intel_pt_process_switch(struct intel_pt *pt, 2772 struct perf_sample *sample) 2773 { 2774 pid_t tid; 2775 int cpu, ret; 2776 struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id); 2777 2778 if (evsel != pt->switch_evsel) 2779 return 0; 2780 2781 tid = evsel__intval(evsel, sample, "next_pid"); 2782 cpu = sample->cpu; 2783 2784 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 2785 cpu, tid, sample->time, perf_time_to_tsc(sample->time, 2786 &pt->tc)); 2787 2788 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 2789 if (ret <= 0) 2790 return ret; 2791 2792 return machine__set_current_tid(pt->machine, cpu, -1, tid); 2793 } 2794 2795 static int intel_pt_context_switch_in(struct intel_pt *pt, 2796 struct perf_sample *sample) 2797 { 2798 pid_t pid = sample->pid; 2799 pid_t tid = sample->tid; 2800 int cpu = sample->cpu; 2801 2802 if (pt->sync_switch) { 2803 struct intel_pt_queue *ptq; 2804 2805 ptq = intel_pt_cpu_to_ptq(pt, cpu); 2806 if (ptq && ptq->sync_switch) { 2807 ptq->next_tid = -1; 2808 switch (ptq->switch_state) { 2809 case INTEL_PT_SS_NOT_TRACING: 2810 case INTEL_PT_SS_UNKNOWN: 2811 case INTEL_PT_SS_TRACING: 2812 break; 2813 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: 2814 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 2815 ptq->switch_state = INTEL_PT_SS_TRACING; 2816 break; 2817 default: 2818 break; 2819 } 2820 } 2821 } 2822 2823 /* 2824 * If the current tid has not been updated yet, ensure it is now that 2825 * a "switch in" event has occurred. 2826 */ 2827 if (machine__get_current_tid(pt->machine, cpu) == tid) 2828 return 0; 2829 2830 return machine__set_current_tid(pt->machine, cpu, pid, tid); 2831 } 2832 2833 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, 2834 struct perf_sample *sample) 2835 { 2836 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 2837 pid_t pid, tid; 2838 int cpu, ret; 2839 2840 cpu = sample->cpu; 2841 2842 if (pt->have_sched_switch == 3) { 2843 if (!out) 2844 return intel_pt_context_switch_in(pt, sample); 2845 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) { 2846 pr_err("Expecting CPU-wide context switch event\n"); 2847 return -EINVAL; 2848 } 2849 pid = event->context_switch.next_prev_pid; 2850 tid = event->context_switch.next_prev_tid; 2851 } else { 2852 if (out) 2853 return 0; 2854 pid = sample->pid; 2855 tid = sample->tid; 2856 } 2857 2858 if (tid == -1) 2859 intel_pt_log("context_switch event has no tid\n"); 2860 2861 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 2862 if (ret <= 0) 2863 return ret; 2864 2865 return machine__set_current_tid(pt->machine, cpu, pid, tid); 2866 } 2867 2868 static int intel_pt_process_itrace_start(struct intel_pt *pt, 2869 union perf_event *event, 2870 struct perf_sample *sample) 2871 { 2872 if (!pt->per_cpu_mmaps) 2873 return 0; 2874 2875 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 2876 sample->cpu, event->itrace_start.pid, 2877 event->itrace_start.tid, sample->time, 2878 perf_time_to_tsc(sample->time, &pt->tc)); 2879 2880 return machine__set_current_tid(pt->machine, sample->cpu, 2881 event->itrace_start.pid, 2882 event->itrace_start.tid); 2883 } 2884 2885 static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr, 2886 struct addr_location *al) 2887 { 2888 if (!al->map || addr < al->map->start || addr >= al->map->end) { 2889 if (!thread__find_map(thread, cpumode, addr, al)) 2890 return -1; 2891 } 2892 2893 return 0; 2894 } 2895 2896 /* Invalidate all instruction cache entries that overlap the text poke */ 2897 static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event) 2898 { 2899 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 2900 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1; 2901 /* Assume text poke begins in a basic block no more than 4096 bytes */ 2902 int cnt = 4096 + event->text_poke.new_len; 2903 struct thread *thread = pt->unknown_thread; 2904 struct addr_location al = { .map = NULL }; 2905 struct machine *machine = pt->machine; 2906 struct intel_pt_cache_entry *e; 2907 u64 offset; 2908 2909 if (!event->text_poke.new_len) 2910 return 0; 2911 2912 for (; cnt; cnt--, addr--) { 2913 if (intel_pt_find_map(thread, cpumode, addr, &al)) { 2914 if (addr < event->text_poke.addr) 2915 return 0; 2916 continue; 2917 } 2918 2919 if (!al.map->dso || !al.map->dso->auxtrace_cache) 2920 continue; 2921 2922 offset = al.map->map_ip(al.map, addr); 2923 2924 e = intel_pt_cache_lookup(al.map->dso, machine, offset); 2925 if (!e) 2926 continue; 2927 2928 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) { 2929 /* 2930 * No overlap. Working backwards there cannot be another 2931 * basic block that overlaps the text poke if there is a 2932 * branch instruction before the text poke address. 2933 */ 2934 if (e->branch != INTEL_PT_BR_NO_BRANCH) 2935 return 0; 2936 } else { 2937 intel_pt_cache_invalidate(al.map->dso, machine, offset); 2938 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n", 2939 al.map->dso->long_name, addr); 2940 } 2941 } 2942 2943 return 0; 2944 } 2945 2946 static int intel_pt_process_event(struct perf_session *session, 2947 union perf_event *event, 2948 struct perf_sample *sample, 2949 struct perf_tool *tool) 2950 { 2951 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 2952 auxtrace); 2953 u64 timestamp; 2954 int err = 0; 2955 2956 if (dump_trace) 2957 return 0; 2958 2959 if (!tool->ordered_events) { 2960 pr_err("Intel Processor Trace requires ordered events\n"); 2961 return -EINVAL; 2962 } 2963 2964 if (sample->time && sample->time != (u64)-1) 2965 timestamp = perf_time_to_tsc(sample->time, &pt->tc); 2966 else 2967 timestamp = 0; 2968 2969 if (timestamp || pt->timeless_decoding) { 2970 err = intel_pt_update_queues(pt); 2971 if (err) 2972 return err; 2973 } 2974 2975 if (pt->timeless_decoding) { 2976 if (pt->sampling_mode) { 2977 if (sample->aux_sample.size) 2978 err = intel_pt_process_timeless_sample(pt, 2979 sample); 2980 } else if (event->header.type == PERF_RECORD_EXIT) { 2981 err = intel_pt_process_timeless_queues(pt, 2982 event->fork.tid, 2983 sample->time); 2984 } 2985 } else if (timestamp) { 2986 if (!pt->first_timestamp) 2987 intel_pt_first_timestamp(pt, timestamp); 2988 err = intel_pt_process_queues(pt, timestamp); 2989 } 2990 if (err) 2991 return err; 2992 2993 if (event->header.type == PERF_RECORD_SAMPLE) { 2994 if (pt->synth_opts.add_callchain && !sample->callchain) 2995 intel_pt_add_callchain(pt, sample); 2996 if (pt->synth_opts.add_last_branch && !sample->branch_stack) 2997 intel_pt_add_br_stack(pt, sample); 2998 } 2999 3000 if (event->header.type == PERF_RECORD_AUX && 3001 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && 3002 pt->synth_opts.errors) { 3003 err = intel_pt_lost(pt, sample); 3004 if (err) 3005 return err; 3006 } 3007 3008 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE) 3009 err = intel_pt_process_switch(pt, sample); 3010 else if (event->header.type == PERF_RECORD_ITRACE_START) 3011 err = intel_pt_process_itrace_start(pt, event, sample); 3012 else if (event->header.type == PERF_RECORD_SWITCH || 3013 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) 3014 err = intel_pt_context_switch(pt, event, sample); 3015 3016 if (!err && event->header.type == PERF_RECORD_TEXT_POKE) 3017 err = intel_pt_text_poke(pt, event); 3018 3019 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) { 3020 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ", 3021 event->header.type, sample->cpu, sample->time, timestamp); 3022 intel_pt_log_event(event); 3023 } 3024 3025 return err; 3026 } 3027 3028 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool) 3029 { 3030 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3031 auxtrace); 3032 int ret; 3033 3034 if (dump_trace) 3035 return 0; 3036 3037 if (!tool->ordered_events) 3038 return -EINVAL; 3039 3040 ret = intel_pt_update_queues(pt); 3041 if (ret < 0) 3042 return ret; 3043 3044 if (pt->timeless_decoding) 3045 return intel_pt_process_timeless_queues(pt, -1, 3046 MAX_TIMESTAMP - 1); 3047 3048 return intel_pt_process_queues(pt, MAX_TIMESTAMP); 3049 } 3050 3051 static void intel_pt_free_events(struct perf_session *session) 3052 { 3053 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3054 auxtrace); 3055 struct auxtrace_queues *queues = &pt->queues; 3056 unsigned int i; 3057 3058 for (i = 0; i < queues->nr_queues; i++) { 3059 intel_pt_free_queue(queues->queue_array[i].priv); 3060 queues->queue_array[i].priv = NULL; 3061 } 3062 intel_pt_log_disable(); 3063 auxtrace_queues__free(queues); 3064 } 3065 3066 static void intel_pt_free(struct perf_session *session) 3067 { 3068 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3069 auxtrace); 3070 3071 auxtrace_heap__free(&pt->heap); 3072 intel_pt_free_events(session); 3073 session->auxtrace = NULL; 3074 intel_pt_free_vmcs_info(pt); 3075 thread__put(pt->unknown_thread); 3076 addr_filters__exit(&pt->filts); 3077 zfree(&pt->chain); 3078 zfree(&pt->filter); 3079 zfree(&pt->time_ranges); 3080 free(pt); 3081 } 3082 3083 static bool intel_pt_evsel_is_auxtrace(struct perf_session *session, 3084 struct evsel *evsel) 3085 { 3086 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3087 auxtrace); 3088 3089 return evsel->core.attr.type == pt->pmu_type; 3090 } 3091 3092 static int intel_pt_process_auxtrace_event(struct perf_session *session, 3093 union perf_event *event, 3094 struct perf_tool *tool __maybe_unused) 3095 { 3096 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3097 auxtrace); 3098 3099 if (!pt->data_queued) { 3100 struct auxtrace_buffer *buffer; 3101 off_t data_offset; 3102 int fd = perf_data__fd(session->data); 3103 int err; 3104 3105 if (perf_data__is_pipe(session->data)) { 3106 data_offset = 0; 3107 } else { 3108 data_offset = lseek(fd, 0, SEEK_CUR); 3109 if (data_offset == -1) 3110 return -errno; 3111 } 3112 3113 err = auxtrace_queues__add_event(&pt->queues, session, event, 3114 data_offset, &buffer); 3115 if (err) 3116 return err; 3117 3118 /* Dump here now we have copied a piped trace out of the pipe */ 3119 if (dump_trace) { 3120 if (auxtrace_buffer__get_data(buffer, fd)) { 3121 intel_pt_dump_event(pt, buffer->data, 3122 buffer->size); 3123 auxtrace_buffer__put_data(buffer); 3124 } 3125 } 3126 } 3127 3128 return 0; 3129 } 3130 3131 static int intel_pt_queue_data(struct perf_session *session, 3132 struct perf_sample *sample, 3133 union perf_event *event, u64 data_offset) 3134 { 3135 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3136 auxtrace); 3137 u64 timestamp; 3138 3139 if (event) { 3140 return auxtrace_queues__add_event(&pt->queues, session, event, 3141 data_offset, NULL); 3142 } 3143 3144 if (sample->time && sample->time != (u64)-1) 3145 timestamp = perf_time_to_tsc(sample->time, &pt->tc); 3146 else 3147 timestamp = 0; 3148 3149 return auxtrace_queues__add_sample(&pt->queues, session, sample, 3150 data_offset, timestamp); 3151 } 3152 3153 struct intel_pt_synth { 3154 struct perf_tool dummy_tool; 3155 struct perf_session *session; 3156 }; 3157 3158 static int intel_pt_event_synth(struct perf_tool *tool, 3159 union perf_event *event, 3160 struct perf_sample *sample __maybe_unused, 3161 struct machine *machine __maybe_unused) 3162 { 3163 struct intel_pt_synth *intel_pt_synth = 3164 container_of(tool, struct intel_pt_synth, dummy_tool); 3165 3166 return perf_session__deliver_synth_event(intel_pt_synth->session, event, 3167 NULL); 3168 } 3169 3170 static int intel_pt_synth_event(struct perf_session *session, const char *name, 3171 struct perf_event_attr *attr, u64 id) 3172 { 3173 struct intel_pt_synth intel_pt_synth; 3174 int err; 3175 3176 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 3177 name, id, (u64)attr->sample_type); 3178 3179 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth)); 3180 intel_pt_synth.session = session; 3181 3182 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1, 3183 &id, intel_pt_event_synth); 3184 if (err) 3185 pr_err("%s: failed to synthesize '%s' event type\n", 3186 __func__, name); 3187 3188 return err; 3189 } 3190 3191 static void intel_pt_set_event_name(struct evlist *evlist, u64 id, 3192 const char *name) 3193 { 3194 struct evsel *evsel; 3195 3196 evlist__for_each_entry(evlist, evsel) { 3197 if (evsel->core.id && evsel->core.id[0] == id) { 3198 if (evsel->name) 3199 zfree(&evsel->name); 3200 evsel->name = strdup(name); 3201 break; 3202 } 3203 } 3204 } 3205 3206 static struct evsel *intel_pt_evsel(struct intel_pt *pt, 3207 struct evlist *evlist) 3208 { 3209 struct evsel *evsel; 3210 3211 evlist__for_each_entry(evlist, evsel) { 3212 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids) 3213 return evsel; 3214 } 3215 3216 return NULL; 3217 } 3218 3219 static int intel_pt_synth_events(struct intel_pt *pt, 3220 struct perf_session *session) 3221 { 3222 struct evlist *evlist = session->evlist; 3223 struct evsel *evsel = intel_pt_evsel(pt, evlist); 3224 struct perf_event_attr attr; 3225 u64 id; 3226 int err; 3227 3228 if (!evsel) { 3229 pr_debug("There are no selected events with Intel Processor Trace data\n"); 3230 return 0; 3231 } 3232 3233 memset(&attr, 0, sizeof(struct perf_event_attr)); 3234 attr.size = sizeof(struct perf_event_attr); 3235 attr.type = PERF_TYPE_HARDWARE; 3236 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; 3237 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | 3238 PERF_SAMPLE_PERIOD; 3239 if (pt->timeless_decoding) 3240 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; 3241 else 3242 attr.sample_type |= PERF_SAMPLE_TIME; 3243 if (!pt->per_cpu_mmaps) 3244 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; 3245 attr.exclude_user = evsel->core.attr.exclude_user; 3246 attr.exclude_kernel = evsel->core.attr.exclude_kernel; 3247 attr.exclude_hv = evsel->core.attr.exclude_hv; 3248 attr.exclude_host = evsel->core.attr.exclude_host; 3249 attr.exclude_guest = evsel->core.attr.exclude_guest; 3250 attr.sample_id_all = evsel->core.attr.sample_id_all; 3251 attr.read_format = evsel->core.attr.read_format; 3252 3253 id = evsel->core.id[0] + 1000000000; 3254 if (!id) 3255 id = 1; 3256 3257 if (pt->synth_opts.branches) { 3258 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; 3259 attr.sample_period = 1; 3260 attr.sample_type |= PERF_SAMPLE_ADDR; 3261 err = intel_pt_synth_event(session, "branches", &attr, id); 3262 if (err) 3263 return err; 3264 pt->sample_branches = true; 3265 pt->branches_sample_type = attr.sample_type; 3266 pt->branches_id = id; 3267 id += 1; 3268 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR; 3269 } 3270 3271 if (pt->synth_opts.callchain) 3272 attr.sample_type |= PERF_SAMPLE_CALLCHAIN; 3273 if (pt->synth_opts.last_branch) { 3274 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; 3275 /* 3276 * We don't use the hardware index, but the sample generation 3277 * code uses the new format branch_stack with this field, 3278 * so the event attributes must indicate that it's present. 3279 */ 3280 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX; 3281 } 3282 3283 if (pt->synth_opts.instructions) { 3284 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 3285 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS) 3286 attr.sample_period = 3287 intel_pt_ns_to_ticks(pt, pt->synth_opts.period); 3288 else 3289 attr.sample_period = pt->synth_opts.period; 3290 err = intel_pt_synth_event(session, "instructions", &attr, id); 3291 if (err) 3292 return err; 3293 pt->sample_instructions = true; 3294 pt->instructions_sample_type = attr.sample_type; 3295 pt->instructions_id = id; 3296 id += 1; 3297 } 3298 3299 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD; 3300 attr.sample_period = 1; 3301 3302 if (pt->synth_opts.transactions) { 3303 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 3304 err = intel_pt_synth_event(session, "transactions", &attr, id); 3305 if (err) 3306 return err; 3307 pt->sample_transactions = true; 3308 pt->transactions_sample_type = attr.sample_type; 3309 pt->transactions_id = id; 3310 intel_pt_set_event_name(evlist, id, "transactions"); 3311 id += 1; 3312 } 3313 3314 attr.type = PERF_TYPE_SYNTH; 3315 attr.sample_type |= PERF_SAMPLE_RAW; 3316 3317 if (pt->synth_opts.ptwrites) { 3318 attr.config = PERF_SYNTH_INTEL_PTWRITE; 3319 err = intel_pt_synth_event(session, "ptwrite", &attr, id); 3320 if (err) 3321 return err; 3322 pt->sample_ptwrites = true; 3323 pt->ptwrites_sample_type = attr.sample_type; 3324 pt->ptwrites_id = id; 3325 intel_pt_set_event_name(evlist, id, "ptwrite"); 3326 id += 1; 3327 } 3328 3329 if (pt->synth_opts.pwr_events) { 3330 pt->sample_pwr_events = true; 3331 pt->pwr_events_sample_type = attr.sample_type; 3332 3333 attr.config = PERF_SYNTH_INTEL_CBR; 3334 err = intel_pt_synth_event(session, "cbr", &attr, id); 3335 if (err) 3336 return err; 3337 pt->cbr_id = id; 3338 intel_pt_set_event_name(evlist, id, "cbr"); 3339 id += 1; 3340 3341 attr.config = PERF_SYNTH_INTEL_PSB; 3342 err = intel_pt_synth_event(session, "psb", &attr, id); 3343 if (err) 3344 return err; 3345 pt->psb_id = id; 3346 intel_pt_set_event_name(evlist, id, "psb"); 3347 id += 1; 3348 } 3349 3350 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & 0x10)) { 3351 attr.config = PERF_SYNTH_INTEL_MWAIT; 3352 err = intel_pt_synth_event(session, "mwait", &attr, id); 3353 if (err) 3354 return err; 3355 pt->mwait_id = id; 3356 intel_pt_set_event_name(evlist, id, "mwait"); 3357 id += 1; 3358 3359 attr.config = PERF_SYNTH_INTEL_PWRE; 3360 err = intel_pt_synth_event(session, "pwre", &attr, id); 3361 if (err) 3362 return err; 3363 pt->pwre_id = id; 3364 intel_pt_set_event_name(evlist, id, "pwre"); 3365 id += 1; 3366 3367 attr.config = PERF_SYNTH_INTEL_EXSTOP; 3368 err = intel_pt_synth_event(session, "exstop", &attr, id); 3369 if (err) 3370 return err; 3371 pt->exstop_id = id; 3372 intel_pt_set_event_name(evlist, id, "exstop"); 3373 id += 1; 3374 3375 attr.config = PERF_SYNTH_INTEL_PWRX; 3376 err = intel_pt_synth_event(session, "pwrx", &attr, id); 3377 if (err) 3378 return err; 3379 pt->pwrx_id = id; 3380 intel_pt_set_event_name(evlist, id, "pwrx"); 3381 id += 1; 3382 } 3383 3384 return 0; 3385 } 3386 3387 static void intel_pt_setup_pebs_events(struct intel_pt *pt) 3388 { 3389 struct evsel *evsel; 3390 3391 if (!pt->synth_opts.other_events) 3392 return; 3393 3394 evlist__for_each_entry(pt->session->evlist, evsel) { 3395 if (evsel->core.attr.aux_output && evsel->core.id) { 3396 pt->sample_pebs = true; 3397 pt->pebs_evsel = evsel; 3398 return; 3399 } 3400 } 3401 } 3402 3403 static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist) 3404 { 3405 struct evsel *evsel; 3406 3407 evlist__for_each_entry_reverse(evlist, evsel) { 3408 const char *name = evsel__name(evsel); 3409 3410 if (!strcmp(name, "sched:sched_switch")) 3411 return evsel; 3412 } 3413 3414 return NULL; 3415 } 3416 3417 static bool intel_pt_find_switch(struct evlist *evlist) 3418 { 3419 struct evsel *evsel; 3420 3421 evlist__for_each_entry(evlist, evsel) { 3422 if (evsel->core.attr.context_switch) 3423 return true; 3424 } 3425 3426 return false; 3427 } 3428 3429 static int intel_pt_perf_config(const char *var, const char *value, void *data) 3430 { 3431 struct intel_pt *pt = data; 3432 3433 if (!strcmp(var, "intel-pt.mispred-all")) 3434 pt->mispred_all = perf_config_bool(var, value); 3435 3436 if (!strcmp(var, "intel-pt.max-loops")) 3437 perf_config_int(&pt->max_loops, var, value); 3438 3439 return 0; 3440 } 3441 3442 /* Find least TSC which converts to ns or later */ 3443 static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt) 3444 { 3445 u64 tsc, tm; 3446 3447 tsc = perf_time_to_tsc(ns, &pt->tc); 3448 3449 while (1) { 3450 tm = tsc_to_perf_time(tsc, &pt->tc); 3451 if (tm < ns) 3452 break; 3453 tsc -= 1; 3454 } 3455 3456 while (tm < ns) 3457 tm = tsc_to_perf_time(++tsc, &pt->tc); 3458 3459 return tsc; 3460 } 3461 3462 /* Find greatest TSC which converts to ns or earlier */ 3463 static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt) 3464 { 3465 u64 tsc, tm; 3466 3467 tsc = perf_time_to_tsc(ns, &pt->tc); 3468 3469 while (1) { 3470 tm = tsc_to_perf_time(tsc, &pt->tc); 3471 if (tm > ns) 3472 break; 3473 tsc += 1; 3474 } 3475 3476 while (tm > ns) 3477 tm = tsc_to_perf_time(--tsc, &pt->tc); 3478 3479 return tsc; 3480 } 3481 3482 static int intel_pt_setup_time_ranges(struct intel_pt *pt, 3483 struct itrace_synth_opts *opts) 3484 { 3485 struct perf_time_interval *p = opts->ptime_range; 3486 int n = opts->range_num; 3487 int i; 3488 3489 if (!n || !p || pt->timeless_decoding) 3490 return 0; 3491 3492 pt->time_ranges = calloc(n, sizeof(struct range)); 3493 if (!pt->time_ranges) 3494 return -ENOMEM; 3495 3496 pt->range_cnt = n; 3497 3498 intel_pt_log("%s: %u range(s)\n", __func__, n); 3499 3500 for (i = 0; i < n; i++) { 3501 struct range *r = &pt->time_ranges[i]; 3502 u64 ts = p[i].start; 3503 u64 te = p[i].end; 3504 3505 /* 3506 * Take care to ensure the TSC range matches the perf-time range 3507 * when converted back to perf-time. 3508 */ 3509 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0; 3510 r->end = te ? intel_pt_tsc_end(te, pt) : 0; 3511 3512 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n", 3513 i, ts, te); 3514 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n", 3515 i, r->start, r->end); 3516 } 3517 3518 return 0; 3519 } 3520 3521 static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args) 3522 { 3523 struct intel_pt_vmcs_info *vmcs_info; 3524 u64 tsc_offset, vmcs; 3525 char *p = *args; 3526 3527 errno = 0; 3528 3529 p = skip_spaces(p); 3530 if (!*p) 3531 return 1; 3532 3533 tsc_offset = strtoull(p, &p, 0); 3534 if (errno) 3535 return -errno; 3536 p = skip_spaces(p); 3537 if (*p != ':') { 3538 pt->dflt_tsc_offset = tsc_offset; 3539 *args = p; 3540 return 0; 3541 } 3542 while (1) { 3543 vmcs = strtoull(p, &p, 0); 3544 if (errno) 3545 return -errno; 3546 if (!vmcs) 3547 return -EINVAL; 3548 vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset); 3549 if (!vmcs_info) 3550 return -ENOMEM; 3551 p = skip_spaces(p); 3552 if (*p != ',') 3553 break; 3554 p += 1; 3555 } 3556 *args = p; 3557 return 0; 3558 } 3559 3560 static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt) 3561 { 3562 char *args = pt->synth_opts.vm_tm_corr_args; 3563 int ret; 3564 3565 if (!args) 3566 return 0; 3567 3568 do { 3569 ret = intel_pt_parse_vm_tm_corr_arg(pt, &args); 3570 } while (!ret); 3571 3572 if (ret < 0) { 3573 pr_err("Failed to parse VM Time Correlation options\n"); 3574 return ret; 3575 } 3576 3577 return 0; 3578 } 3579 3580 static const char * const intel_pt_info_fmts[] = { 3581 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n", 3582 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n", 3583 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n", 3584 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n", 3585 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n", 3586 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n", 3587 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n", 3588 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n", 3589 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", 3590 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n", 3591 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n", 3592 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n", 3593 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n", 3594 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n", 3595 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n", 3596 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n", 3597 }; 3598 3599 static void intel_pt_print_info(__u64 *arr, int start, int finish) 3600 { 3601 int i; 3602 3603 if (!dump_trace) 3604 return; 3605 3606 for (i = start; i <= finish; i++) 3607 fprintf(stdout, intel_pt_info_fmts[i], arr[i]); 3608 } 3609 3610 static void intel_pt_print_info_str(const char *name, const char *str) 3611 { 3612 if (!dump_trace) 3613 return; 3614 3615 fprintf(stdout, " %-20s%s\n", name, str ? str : ""); 3616 } 3617 3618 static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos) 3619 { 3620 return auxtrace_info->header.size >= 3621 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1)); 3622 } 3623 3624 int intel_pt_process_auxtrace_info(union perf_event *event, 3625 struct perf_session *session) 3626 { 3627 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; 3628 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS; 3629 struct intel_pt *pt; 3630 void *info_end; 3631 __u64 *info; 3632 int err; 3633 3634 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) + 3635 min_sz) 3636 return -EINVAL; 3637 3638 pt = zalloc(sizeof(struct intel_pt)); 3639 if (!pt) 3640 return -ENOMEM; 3641 3642 pt->vmcs_info = RB_ROOT; 3643 3644 addr_filters__init(&pt->filts); 3645 3646 err = perf_config(intel_pt_perf_config, pt); 3647 if (err) 3648 goto err_free; 3649 3650 err = auxtrace_queues__init(&pt->queues); 3651 if (err) 3652 goto err_free; 3653 3654 intel_pt_log_set_name(INTEL_PT_PMU_NAME); 3655 3656 if (session->itrace_synth_opts->set) { 3657 pt->synth_opts = *session->itrace_synth_opts; 3658 } else { 3659 struct itrace_synth_opts *opts = session->itrace_synth_opts; 3660 3661 itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample); 3662 if (!opts->default_no_sample && !opts->inject) { 3663 pt->synth_opts.branches = false; 3664 pt->synth_opts.callchain = true; 3665 pt->synth_opts.add_callchain = true; 3666 } 3667 pt->synth_opts.thread_stack = opts->thread_stack; 3668 } 3669 3670 pt->session = session; 3671 pt->machine = &session->machines.host; /* No kvm support */ 3672 pt->auxtrace_type = auxtrace_info->type; 3673 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE]; 3674 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT]; 3675 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT]; 3676 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO]; 3677 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO]; 3678 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT]; 3679 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT]; 3680 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH]; 3681 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE]; 3682 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS]; 3683 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE, 3684 INTEL_PT_PER_CPU_MMAPS); 3685 3686 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) { 3687 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT]; 3688 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS]; 3689 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N]; 3690 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D]; 3691 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT]; 3692 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT, 3693 INTEL_PT_CYC_BIT); 3694 } 3695 3696 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) { 3697 pt->max_non_turbo_ratio = 3698 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO]; 3699 intel_pt_print_info(&auxtrace_info->priv[0], 3700 INTEL_PT_MAX_NONTURBO_RATIO, 3701 INTEL_PT_MAX_NONTURBO_RATIO); 3702 } 3703 3704 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1; 3705 info_end = (void *)info + auxtrace_info->header.size; 3706 3707 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) { 3708 size_t len; 3709 3710 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN]; 3711 intel_pt_print_info(&auxtrace_info->priv[0], 3712 INTEL_PT_FILTER_STR_LEN, 3713 INTEL_PT_FILTER_STR_LEN); 3714 if (len) { 3715 const char *filter = (const char *)info; 3716 3717 len = roundup(len + 1, 8); 3718 info += len >> 3; 3719 if ((void *)info > info_end) { 3720 pr_err("%s: bad filter string length\n", __func__); 3721 err = -EINVAL; 3722 goto err_free_queues; 3723 } 3724 pt->filter = memdup(filter, len); 3725 if (!pt->filter) { 3726 err = -ENOMEM; 3727 goto err_free_queues; 3728 } 3729 if (session->header.needs_swap) 3730 mem_bswap_64(pt->filter, len); 3731 if (pt->filter[len - 1]) { 3732 pr_err("%s: filter string not null terminated\n", __func__); 3733 err = -EINVAL; 3734 goto err_free_queues; 3735 } 3736 err = addr_filters__parse_bare_filter(&pt->filts, 3737 filter); 3738 if (err) 3739 goto err_free_queues; 3740 } 3741 intel_pt_print_info_str("Filter string", pt->filter); 3742 } 3743 3744 pt->timeless_decoding = intel_pt_timeless_decoding(pt); 3745 if (pt->timeless_decoding && !pt->tc.time_mult) 3746 pt->tc.time_mult = 1; 3747 pt->have_tsc = intel_pt_have_tsc(pt); 3748 pt->sampling_mode = intel_pt_sampling_mode(pt); 3749 pt->est_tsc = !pt->timeless_decoding; 3750 3751 if (pt->synth_opts.vm_time_correlation) { 3752 if (pt->timeless_decoding) { 3753 pr_err("Intel PT has no time information for VM Time Correlation\n"); 3754 err = -EINVAL; 3755 goto err_free_queues; 3756 } 3757 if (session->itrace_synth_opts->ptime_range) { 3758 pr_err("Time ranges cannot be specified with VM Time Correlation\n"); 3759 err = -EINVAL; 3760 goto err_free_queues; 3761 } 3762 /* Currently TSC Offset is calculated using MTC packets */ 3763 if (!intel_pt_have_mtc(pt)) { 3764 pr_err("MTC packets must have been enabled for VM Time Correlation\n"); 3765 err = -EINVAL; 3766 goto err_free_queues; 3767 } 3768 err = intel_pt_parse_vm_tm_corr_args(pt); 3769 if (err) 3770 goto err_free_queues; 3771 } 3772 3773 pt->unknown_thread = thread__new(999999999, 999999999); 3774 if (!pt->unknown_thread) { 3775 err = -ENOMEM; 3776 goto err_free_queues; 3777 } 3778 3779 /* 3780 * Since this thread will not be kept in any rbtree not in a 3781 * list, initialize its list node so that at thread__put() the 3782 * current thread lifetime assumption is kept and we don't segfault 3783 * at list_del_init(). 3784 */ 3785 INIT_LIST_HEAD(&pt->unknown_thread->node); 3786 3787 err = thread__set_comm(pt->unknown_thread, "unknown", 0); 3788 if (err) 3789 goto err_delete_thread; 3790 if (thread__init_maps(pt->unknown_thread, pt->machine)) { 3791 err = -ENOMEM; 3792 goto err_delete_thread; 3793 } 3794 3795 pt->auxtrace.process_event = intel_pt_process_event; 3796 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event; 3797 pt->auxtrace.queue_data = intel_pt_queue_data; 3798 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample; 3799 pt->auxtrace.flush_events = intel_pt_flush; 3800 pt->auxtrace.free_events = intel_pt_free_events; 3801 pt->auxtrace.free = intel_pt_free; 3802 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace; 3803 session->auxtrace = &pt->auxtrace; 3804 3805 if (dump_trace) 3806 return 0; 3807 3808 if (pt->have_sched_switch == 1) { 3809 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist); 3810 if (!pt->switch_evsel) { 3811 pr_err("%s: missing sched_switch event\n", __func__); 3812 err = -EINVAL; 3813 goto err_delete_thread; 3814 } 3815 } else if (pt->have_sched_switch == 2 && 3816 !intel_pt_find_switch(session->evlist)) { 3817 pr_err("%s: missing context_switch attribute flag\n", __func__); 3818 err = -EINVAL; 3819 goto err_delete_thread; 3820 } 3821 3822 if (pt->synth_opts.log) 3823 intel_pt_log_enable(); 3824 3825 /* Maximum non-turbo ratio is TSC freq / 100 MHz */ 3826 if (pt->tc.time_mult) { 3827 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000); 3828 3829 if (!pt->max_non_turbo_ratio) 3830 pt->max_non_turbo_ratio = 3831 (tsc_freq + 50000000) / 100000000; 3832 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq); 3833 intel_pt_log("Maximum non-turbo ratio %u\n", 3834 pt->max_non_turbo_ratio); 3835 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000; 3836 } 3837 3838 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts); 3839 if (err) 3840 goto err_delete_thread; 3841 3842 if (pt->synth_opts.calls) 3843 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | 3844 PERF_IP_FLAG_TRACE_END; 3845 if (pt->synth_opts.returns) 3846 pt->branches_filter |= PERF_IP_FLAG_RETURN | 3847 PERF_IP_FLAG_TRACE_BEGIN; 3848 3849 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) && 3850 !symbol_conf.use_callchain) { 3851 symbol_conf.use_callchain = true; 3852 if (callchain_register_param(&callchain_param) < 0) { 3853 symbol_conf.use_callchain = false; 3854 pt->synth_opts.callchain = false; 3855 pt->synth_opts.add_callchain = false; 3856 } 3857 } 3858 3859 if (pt->synth_opts.add_callchain) { 3860 err = intel_pt_callchain_init(pt); 3861 if (err) 3862 goto err_delete_thread; 3863 } 3864 3865 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) { 3866 pt->br_stack_sz = pt->synth_opts.last_branch_sz; 3867 pt->br_stack_sz_plus = pt->br_stack_sz; 3868 } 3869 3870 if (pt->synth_opts.add_last_branch) { 3871 err = intel_pt_br_stack_init(pt); 3872 if (err) 3873 goto err_delete_thread; 3874 /* 3875 * Additional branch stack size to cater for tracing from the 3876 * actual sample ip to where the sample time is recorded. 3877 * Measured at about 200 branches, but generously set to 1024. 3878 * If kernel space is not being traced, then add just 1 for the 3879 * branch to kernel space. 3880 */ 3881 if (intel_pt_tracing_kernel(pt)) 3882 pt->br_stack_sz_plus += 1024; 3883 else 3884 pt->br_stack_sz_plus += 1; 3885 } 3886 3887 pt->use_thread_stack = pt->synth_opts.callchain || 3888 pt->synth_opts.add_callchain || 3889 pt->synth_opts.thread_stack || 3890 pt->synth_opts.last_branch || 3891 pt->synth_opts.add_last_branch; 3892 3893 pt->callstack = pt->synth_opts.callchain || 3894 pt->synth_opts.add_callchain || 3895 pt->synth_opts.thread_stack; 3896 3897 err = intel_pt_synth_events(pt, session); 3898 if (err) 3899 goto err_delete_thread; 3900 3901 intel_pt_setup_pebs_events(pt); 3902 3903 if (pt->sampling_mode || list_empty(&session->auxtrace_index)) 3904 err = auxtrace_queue_data(session, true, true); 3905 else 3906 err = auxtrace_queues__process_index(&pt->queues, session); 3907 if (err) 3908 goto err_delete_thread; 3909 3910 if (pt->queues.populated) 3911 pt->data_queued = true; 3912 3913 if (pt->timeless_decoding) 3914 pr_debug2("Intel PT decoding without timestamps\n"); 3915 3916 return 0; 3917 3918 err_delete_thread: 3919 zfree(&pt->chain); 3920 thread__zput(pt->unknown_thread); 3921 err_free_queues: 3922 intel_pt_log_disable(); 3923 auxtrace_queues__free(&pt->queues); 3924 session->auxtrace = NULL; 3925 err_free: 3926 addr_filters__exit(&pt->filts); 3927 zfree(&pt->filter); 3928 zfree(&pt->time_ranges); 3929 free(pt); 3930 return err; 3931 } 3932