1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pt.c: Intel Processor Trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #include <errno.h> 8 #include <stdbool.h> 9 #include <linux/kernel.h> 10 #include <linux/types.h> 11 #include <linux/bitops.h> 12 #include <linux/log2.h> 13 #include <linux/zalloc.h> 14 #include <linux/err.h> 15 #include <cpuid.h> 16 17 #include "../../../util/session.h" 18 #include "../../../util/event.h" 19 #include "../../../util/evlist.h" 20 #include "../../../util/evsel.h" 21 #include "../../../util/evsel_config.h" 22 #include "../../../util/cpumap.h" 23 #include "../../../util/mmap.h" 24 #include <subcmd/parse-options.h> 25 #include "../../../util/parse-events.h" 26 #include "../../../util/pmu.h" 27 #include "../../../util/debug.h" 28 #include "../../../util/auxtrace.h" 29 #include "../../../util/perf_api_probe.h" 30 #include "../../../util/record.h" 31 #include "../../../util/target.h" 32 #include "../../../util/tsc.h" 33 #include <internal/lib.h> // page_size 34 #include "../../../util/intel-pt.h" 35 36 #define KiB(x) ((x) * 1024) 37 #define MiB(x) ((x) * 1024 * 1024) 38 #define KiB_MASK(x) (KiB(x) - 1) 39 #define MiB_MASK(x) (MiB(x) - 1) 40 41 #define INTEL_PT_PSB_PERIOD_NEAR 256 42 43 struct intel_pt_snapshot_ref { 44 void *ref_buf; 45 size_t ref_offset; 46 bool wrapped; 47 }; 48 49 struct intel_pt_recording { 50 struct auxtrace_record itr; 51 struct perf_pmu *intel_pt_pmu; 52 int have_sched_switch; 53 struct evlist *evlist; 54 bool snapshot_mode; 55 bool snapshot_init_done; 56 size_t snapshot_size; 57 size_t snapshot_ref_buf_size; 58 int snapshot_ref_cnt; 59 struct intel_pt_snapshot_ref *snapshot_refs; 60 size_t priv_size; 61 }; 62 63 static int intel_pt_parse_terms_with_default(const char *pmu_name, 64 struct list_head *formats, 65 const char *str, 66 u64 *config) 67 { 68 struct list_head *terms; 69 struct perf_event_attr attr = { .size = 0, }; 70 int err; 71 72 terms = malloc(sizeof(struct list_head)); 73 if (!terms) 74 return -ENOMEM; 75 76 INIT_LIST_HEAD(terms); 77 78 err = parse_events_terms(terms, str); 79 if (err) 80 goto out_free; 81 82 attr.config = *config; 83 err = perf_pmu__config_terms(pmu_name, formats, &attr, terms, true, 84 NULL); 85 if (err) 86 goto out_free; 87 88 *config = attr.config; 89 out_free: 90 parse_events_terms__delete(terms); 91 return err; 92 } 93 94 static int intel_pt_parse_terms(const char *pmu_name, struct list_head *formats, 95 const char *str, u64 *config) 96 { 97 *config = 0; 98 return intel_pt_parse_terms_with_default(pmu_name, formats, str, 99 config); 100 } 101 102 static u64 intel_pt_masked_bits(u64 mask, u64 bits) 103 { 104 const u64 top_bit = 1ULL << 63; 105 u64 res = 0; 106 int i; 107 108 for (i = 0; i < 64; i++) { 109 if (mask & top_bit) { 110 res <<= 1; 111 if (bits & top_bit) 112 res |= 1; 113 } 114 mask <<= 1; 115 bits <<= 1; 116 } 117 118 return res; 119 } 120 121 static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str, 122 struct evlist *evlist, u64 *res) 123 { 124 struct evsel *evsel; 125 u64 mask; 126 127 *res = 0; 128 129 mask = perf_pmu__format_bits(&intel_pt_pmu->format, str); 130 if (!mask) 131 return -EINVAL; 132 133 evlist__for_each_entry(evlist, evsel) { 134 if (evsel->core.attr.type == intel_pt_pmu->type) { 135 *res = intel_pt_masked_bits(mask, evsel->core.attr.config); 136 return 0; 137 } 138 } 139 140 return -EINVAL; 141 } 142 143 static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu, 144 struct evlist *evlist) 145 { 146 u64 val; 147 int err, topa_multiple_entries; 148 size_t psb_period; 149 150 if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries", 151 "%d", &topa_multiple_entries) != 1) 152 topa_multiple_entries = 0; 153 154 /* 155 * Use caps/topa_multiple_entries to indicate early hardware that had 156 * extra frequent PSBs. 157 */ 158 if (!topa_multiple_entries) { 159 psb_period = 256; 160 goto out; 161 } 162 163 err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val); 164 if (err) 165 val = 0; 166 167 psb_period = 1 << (val + 11); 168 out: 169 pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period); 170 return psb_period; 171 } 172 173 static int intel_pt_pick_bit(int bits, int target) 174 { 175 int pos, pick = -1; 176 177 for (pos = 0; bits; bits >>= 1, pos++) { 178 if (bits & 1) { 179 if (pos <= target || pick < 0) 180 pick = pos; 181 if (pos >= target) 182 break; 183 } 184 } 185 186 return pick; 187 } 188 189 static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu) 190 { 191 char buf[256]; 192 int mtc, mtc_periods = 0, mtc_period; 193 int psb_cyc, psb_periods, psb_period; 194 int pos = 0; 195 u64 config; 196 char c; 197 198 pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc"); 199 200 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d", 201 &mtc) != 1) 202 mtc = 1; 203 204 if (mtc) { 205 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x", 206 &mtc_periods) != 1) 207 mtc_periods = 0; 208 if (mtc_periods) { 209 mtc_period = intel_pt_pick_bit(mtc_periods, 3); 210 pos += scnprintf(buf + pos, sizeof(buf) - pos, 211 ",mtc,mtc_period=%d", mtc_period); 212 } 213 } 214 215 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d", 216 &psb_cyc) != 1) 217 psb_cyc = 1; 218 219 if (psb_cyc && mtc_periods) { 220 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x", 221 &psb_periods) != 1) 222 psb_periods = 0; 223 if (psb_periods) { 224 psb_period = intel_pt_pick_bit(psb_periods, 3); 225 pos += scnprintf(buf + pos, sizeof(buf) - pos, 226 ",psb_period=%d", psb_period); 227 } 228 } 229 230 if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && 231 perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1) 232 pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch"); 233 234 pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf); 235 236 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, buf, 237 &config); 238 239 return config; 240 } 241 242 static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr, 243 struct record_opts *opts, 244 const char *str) 245 { 246 struct intel_pt_recording *ptr = 247 container_of(itr, struct intel_pt_recording, itr); 248 unsigned long long snapshot_size = 0; 249 char *endptr; 250 251 if (str) { 252 snapshot_size = strtoull(str, &endptr, 0); 253 if (*endptr || snapshot_size > SIZE_MAX) 254 return -1; 255 } 256 257 opts->auxtrace_snapshot_mode = true; 258 opts->auxtrace_snapshot_size = snapshot_size; 259 260 ptr->snapshot_size = snapshot_size; 261 262 return 0; 263 } 264 265 struct perf_event_attr * 266 intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu) 267 { 268 struct perf_event_attr *attr; 269 270 attr = zalloc(sizeof(struct perf_event_attr)); 271 if (!attr) 272 return NULL; 273 274 attr->config = intel_pt_default_config(intel_pt_pmu); 275 276 intel_pt_pmu->selectable = true; 277 278 return attr; 279 } 280 281 static const char *intel_pt_find_filter(struct evlist *evlist, 282 struct perf_pmu *intel_pt_pmu) 283 { 284 struct evsel *evsel; 285 286 evlist__for_each_entry(evlist, evsel) { 287 if (evsel->core.attr.type == intel_pt_pmu->type) 288 return evsel->filter; 289 } 290 291 return NULL; 292 } 293 294 static size_t intel_pt_filter_bytes(const char *filter) 295 { 296 size_t len = filter ? strlen(filter) : 0; 297 298 return len ? roundup(len + 1, 8) : 0; 299 } 300 301 static size_t 302 intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist) 303 { 304 struct intel_pt_recording *ptr = 305 container_of(itr, struct intel_pt_recording, itr); 306 const char *filter = intel_pt_find_filter(evlist, ptr->intel_pt_pmu); 307 308 ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) + 309 intel_pt_filter_bytes(filter); 310 ptr->priv_size += sizeof(u64); /* Cap Event Trace */ 311 312 return ptr->priv_size; 313 } 314 315 static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d) 316 { 317 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0; 318 319 __get_cpuid(0x15, &eax, &ebx, &ecx, &edx); 320 *n = ebx; 321 *d = eax; 322 } 323 324 static int intel_pt_info_fill(struct auxtrace_record *itr, 325 struct perf_session *session, 326 struct perf_record_auxtrace_info *auxtrace_info, 327 size_t priv_size) 328 { 329 struct intel_pt_recording *ptr = 330 container_of(itr, struct intel_pt_recording, itr); 331 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; 332 struct perf_event_mmap_page *pc; 333 struct perf_tsc_conversion tc = { .time_mult = 0, }; 334 bool cap_user_time_zero = false, per_cpu_mmaps; 335 u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit; 336 u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d; 337 unsigned long max_non_turbo_ratio; 338 size_t filter_str_len; 339 const char *filter; 340 int event_trace; 341 __u64 *info; 342 int err; 343 344 if (priv_size != ptr->priv_size) 345 return -EINVAL; 346 347 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, 348 "tsc", &tsc_bit); 349 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, 350 "noretcomp", &noretcomp_bit); 351 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, 352 "mtc", &mtc_bit); 353 mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format, 354 "mtc_period"); 355 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, 356 "cyc", &cyc_bit); 357 358 intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d); 359 360 if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio", 361 "%lu", &max_non_turbo_ratio) != 1) 362 max_non_turbo_ratio = 0; 363 if (perf_pmu__scan_file(intel_pt_pmu, "caps/event_trace", 364 "%d", &event_trace) != 1) 365 event_trace = 0; 366 367 filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu); 368 filter_str_len = filter ? strlen(filter) : 0; 369 370 if (!session->evlist->core.nr_mmaps) 371 return -EINVAL; 372 373 pc = session->evlist->mmap[0].core.base; 374 if (pc) { 375 err = perf_read_tsc_conversion(pc, &tc); 376 if (err) { 377 if (err != -EOPNOTSUPP) 378 return err; 379 } else { 380 cap_user_time_zero = tc.time_mult != 0; 381 } 382 if (!cap_user_time_zero) 383 ui__warning("Intel Processor Trace: TSC not available\n"); 384 } 385 386 per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus); 387 388 auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; 389 auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; 390 auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift; 391 auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult; 392 auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero; 393 auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero; 394 auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit; 395 auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit; 396 auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch; 397 auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode; 398 auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps; 399 auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit; 400 auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits; 401 auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n; 402 auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d; 403 auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit; 404 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO] = max_non_turbo_ratio; 405 auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] = filter_str_len; 406 407 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1; 408 409 if (filter_str_len) { 410 size_t len = intel_pt_filter_bytes(filter); 411 412 strncpy((char *)info, filter, len); 413 info += len >> 3; 414 } 415 416 *info++ = event_trace; 417 418 return 0; 419 } 420 421 static int intel_pt_track_switches(struct evlist *evlist) 422 { 423 const char *sched_switch = "sched:sched_switch"; 424 struct evsel *evsel; 425 int err; 426 427 if (!evlist__can_select_event(evlist, sched_switch)) 428 return -EPERM; 429 430 evsel = evlist__add_sched_switch(evlist, true); 431 if (IS_ERR(evsel)) { 432 err = PTR_ERR(evsel); 433 pr_debug2("%s: failed to create %s, error = %d\n", 434 __func__, sched_switch, err); 435 return err; 436 } 437 438 evsel->immediate = true; 439 440 return 0; 441 } 442 443 static void intel_pt_valid_str(char *str, size_t len, u64 valid) 444 { 445 unsigned int val, last = 0, state = 1; 446 int p = 0; 447 448 str[0] = '\0'; 449 450 for (val = 0; val <= 64; val++, valid >>= 1) { 451 if (valid & 1) { 452 last = val; 453 switch (state) { 454 case 0: 455 p += scnprintf(str + p, len - p, ","); 456 /* Fall through */ 457 case 1: 458 p += scnprintf(str + p, len - p, "%u", val); 459 state = 2; 460 break; 461 case 2: 462 state = 3; 463 break; 464 case 3: 465 state = 4; 466 break; 467 default: 468 break; 469 } 470 } else { 471 switch (state) { 472 case 3: 473 p += scnprintf(str + p, len - p, ",%u", last); 474 state = 0; 475 break; 476 case 4: 477 p += scnprintf(str + p, len - p, "-%u", last); 478 state = 0; 479 break; 480 default: 481 break; 482 } 483 if (state != 1) 484 state = 0; 485 } 486 } 487 } 488 489 static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, 490 const char *caps, const char *name, 491 const char *supported, u64 config) 492 { 493 char valid_str[256]; 494 unsigned int shift; 495 unsigned long long valid; 496 u64 bits; 497 int ok; 498 499 if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1) 500 valid = 0; 501 502 if (supported && 503 perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok) 504 valid = 0; 505 506 valid |= 1; 507 508 bits = perf_pmu__format_bits(&intel_pt_pmu->format, name); 509 510 config &= bits; 511 512 for (shift = 0; bits && !(bits & 1); shift++) 513 bits >>= 1; 514 515 config >>= shift; 516 517 if (config > 63) 518 goto out_err; 519 520 if (valid & (1 << config)) 521 return 0; 522 out_err: 523 intel_pt_valid_str(valid_str, sizeof(valid_str), valid); 524 pr_err("Invalid %s for %s. Valid values are: %s\n", 525 name, INTEL_PT_PMU_NAME, valid_str); 526 return -EINVAL; 527 } 528 529 static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu, 530 struct evsel *evsel) 531 { 532 int err; 533 char c; 534 535 if (!evsel) 536 return 0; 537 538 /* 539 * If supported, force pass-through config term (pt=1) even if user 540 * sets pt=0, which avoids senseless kernel errors. 541 */ 542 if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && 543 !(evsel->core.attr.config & 1)) { 544 pr_warning("pt=0 doesn't make sense, forcing pt=1\n"); 545 evsel->core.attr.config |= 1; 546 } 547 548 err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds", 549 "cyc_thresh", "caps/psb_cyc", 550 evsel->core.attr.config); 551 if (err) 552 return err; 553 554 err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods", 555 "mtc_period", "caps/mtc", 556 evsel->core.attr.config); 557 if (err) 558 return err; 559 560 return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods", 561 "psb_period", "caps/psb_cyc", 562 evsel->core.attr.config); 563 } 564 565 static void intel_pt_config_sample_mode(struct perf_pmu *intel_pt_pmu, 566 struct evsel *evsel) 567 { 568 u64 user_bits = 0, bits; 569 struct evsel_config_term *term = evsel__get_config_term(evsel, CFG_CHG); 570 571 if (term) 572 user_bits = term->val.cfg_chg; 573 574 bits = perf_pmu__format_bits(&intel_pt_pmu->format, "psb_period"); 575 576 /* Did user change psb_period */ 577 if (bits & user_bits) 578 return; 579 580 /* Set psb_period to 0 */ 581 evsel->core.attr.config &= ~bits; 582 } 583 584 static void intel_pt_min_max_sample_sz(struct evlist *evlist, 585 size_t *min_sz, size_t *max_sz) 586 { 587 struct evsel *evsel; 588 589 evlist__for_each_entry(evlist, evsel) { 590 size_t sz = evsel->core.attr.aux_sample_size; 591 592 if (!sz) 593 continue; 594 if (min_sz && (sz < *min_sz || !*min_sz)) 595 *min_sz = sz; 596 if (max_sz && sz > *max_sz) 597 *max_sz = sz; 598 } 599 } 600 601 /* 602 * Currently, there is not enough information to disambiguate different PEBS 603 * events, so only allow one. 604 */ 605 static bool intel_pt_too_many_aux_output(struct evlist *evlist) 606 { 607 struct evsel *evsel; 608 int aux_output_cnt = 0; 609 610 evlist__for_each_entry(evlist, evsel) 611 aux_output_cnt += !!evsel->core.attr.aux_output; 612 613 if (aux_output_cnt > 1) { 614 pr_err(INTEL_PT_PMU_NAME " supports at most one event with aux-output\n"); 615 return true; 616 } 617 618 return false; 619 } 620 621 static int intel_pt_recording_options(struct auxtrace_record *itr, 622 struct evlist *evlist, 623 struct record_opts *opts) 624 { 625 struct intel_pt_recording *ptr = 626 container_of(itr, struct intel_pt_recording, itr); 627 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; 628 bool have_timing_info, need_immediate = false; 629 struct evsel *evsel, *intel_pt_evsel = NULL; 630 const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus; 631 bool privileged = perf_event_paranoid_check(-1); 632 u64 tsc_bit; 633 int err; 634 635 ptr->evlist = evlist; 636 ptr->snapshot_mode = opts->auxtrace_snapshot_mode; 637 638 evlist__for_each_entry(evlist, evsel) { 639 if (evsel->core.attr.type == intel_pt_pmu->type) { 640 if (intel_pt_evsel) { 641 pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n"); 642 return -EINVAL; 643 } 644 evsel->core.attr.freq = 0; 645 evsel->core.attr.sample_period = 1; 646 evsel->no_aux_samples = true; 647 evsel->needs_auxtrace_mmap = true; 648 intel_pt_evsel = evsel; 649 opts->full_auxtrace = true; 650 } 651 } 652 653 if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) { 654 pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n"); 655 return -EINVAL; 656 } 657 658 if (opts->auxtrace_snapshot_mode && opts->auxtrace_sample_mode) { 659 pr_err("Snapshot mode (" INTEL_PT_PMU_NAME " PMU) and sample trace cannot be used together\n"); 660 return -EINVAL; 661 } 662 663 if (opts->use_clockid) { 664 pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n"); 665 return -EINVAL; 666 } 667 668 if (intel_pt_too_many_aux_output(evlist)) 669 return -EINVAL; 670 671 if (!opts->full_auxtrace) 672 return 0; 673 674 if (opts->auxtrace_sample_mode) 675 intel_pt_config_sample_mode(intel_pt_pmu, intel_pt_evsel); 676 677 err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel); 678 if (err) 679 return err; 680 681 /* Set default sizes for snapshot mode */ 682 if (opts->auxtrace_snapshot_mode) { 683 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist); 684 685 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) { 686 if (privileged) { 687 opts->auxtrace_mmap_pages = MiB(4) / page_size; 688 } else { 689 opts->auxtrace_mmap_pages = KiB(128) / page_size; 690 if (opts->mmap_pages == UINT_MAX) 691 opts->mmap_pages = KiB(256) / page_size; 692 } 693 } else if (!opts->auxtrace_mmap_pages && !privileged && 694 opts->mmap_pages == UINT_MAX) { 695 opts->mmap_pages = KiB(256) / page_size; 696 } 697 if (!opts->auxtrace_snapshot_size) 698 opts->auxtrace_snapshot_size = 699 opts->auxtrace_mmap_pages * (size_t)page_size; 700 if (!opts->auxtrace_mmap_pages) { 701 size_t sz = opts->auxtrace_snapshot_size; 702 703 sz = round_up(sz, page_size) / page_size; 704 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); 705 } 706 if (opts->auxtrace_snapshot_size > 707 opts->auxtrace_mmap_pages * (size_t)page_size) { 708 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", 709 opts->auxtrace_snapshot_size, 710 opts->auxtrace_mmap_pages * (size_t)page_size); 711 return -EINVAL; 712 } 713 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) { 714 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); 715 return -EINVAL; 716 } 717 pr_debug2("Intel PT snapshot size: %zu\n", 718 opts->auxtrace_snapshot_size); 719 if (psb_period && 720 opts->auxtrace_snapshot_size <= psb_period + 721 INTEL_PT_PSB_PERIOD_NEAR) 722 ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n", 723 opts->auxtrace_snapshot_size, psb_period); 724 } 725 726 /* Set default sizes for sample mode */ 727 if (opts->auxtrace_sample_mode) { 728 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist); 729 size_t min_sz = 0, max_sz = 0; 730 731 intel_pt_min_max_sample_sz(evlist, &min_sz, &max_sz); 732 if (!opts->auxtrace_mmap_pages && !privileged && 733 opts->mmap_pages == UINT_MAX) 734 opts->mmap_pages = KiB(256) / page_size; 735 if (!opts->auxtrace_mmap_pages) { 736 size_t sz = round_up(max_sz, page_size) / page_size; 737 738 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); 739 } 740 if (max_sz > opts->auxtrace_mmap_pages * (size_t)page_size) { 741 pr_err("Sample size %zu must not be greater than AUX area tracing mmap size %zu\n", 742 max_sz, 743 opts->auxtrace_mmap_pages * (size_t)page_size); 744 return -EINVAL; 745 } 746 pr_debug2("Intel PT min. sample size: %zu max. sample size: %zu\n", 747 min_sz, max_sz); 748 if (psb_period && 749 min_sz <= psb_period + INTEL_PT_PSB_PERIOD_NEAR) 750 ui__warning("Intel PT sample size (%zu) may be too small for PSB period (%zu)\n", 751 min_sz, psb_period); 752 } 753 754 /* Set default sizes for full trace mode */ 755 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) { 756 if (privileged) { 757 opts->auxtrace_mmap_pages = MiB(4) / page_size; 758 } else { 759 opts->auxtrace_mmap_pages = KiB(128) / page_size; 760 if (opts->mmap_pages == UINT_MAX) 761 opts->mmap_pages = KiB(256) / page_size; 762 } 763 } 764 765 /* Validate auxtrace_mmap_pages */ 766 if (opts->auxtrace_mmap_pages) { 767 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; 768 size_t min_sz; 769 770 if (opts->auxtrace_snapshot_mode || opts->auxtrace_sample_mode) 771 min_sz = KiB(4); 772 else 773 min_sz = KiB(8); 774 775 if (sz < min_sz || !is_power_of_2(sz)) { 776 pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n", 777 min_sz / 1024); 778 return -EINVAL; 779 } 780 } 781 782 if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) { 783 u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4; 784 785 intel_pt_evsel->core.attr.aux_watermark = aux_watermark; 786 } 787 788 intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, 789 "tsc", &tsc_bit); 790 791 if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit)) 792 have_timing_info = true; 793 else 794 have_timing_info = false; 795 796 /* 797 * Per-cpu recording needs sched_switch events to distinguish different 798 * threads. 799 */ 800 if (have_timing_info && !perf_cpu_map__empty(cpus) && 801 !record_opts__no_switch_events(opts)) { 802 if (perf_can_record_switch_events()) { 803 bool cpu_wide = !target__none(&opts->target) && 804 !target__has_task(&opts->target); 805 806 if (!cpu_wide && perf_can_record_cpu_wide()) { 807 struct evsel *switch_evsel; 808 809 switch_evsel = evlist__add_dummy_on_all_cpus(evlist); 810 if (!switch_evsel) 811 return -ENOMEM; 812 813 switch_evsel->core.attr.context_switch = 1; 814 switch_evsel->immediate = true; 815 816 evsel__set_sample_bit(switch_evsel, TID); 817 evsel__set_sample_bit(switch_evsel, TIME); 818 evsel__set_sample_bit(switch_evsel, CPU); 819 evsel__reset_sample_bit(switch_evsel, BRANCH_STACK); 820 821 opts->record_switch_events = false; 822 ptr->have_sched_switch = 3; 823 } else { 824 opts->record_switch_events = true; 825 need_immediate = true; 826 if (cpu_wide) 827 ptr->have_sched_switch = 3; 828 else 829 ptr->have_sched_switch = 2; 830 } 831 } else { 832 err = intel_pt_track_switches(evlist); 833 if (err == -EPERM) 834 pr_debug2("Unable to select sched:sched_switch\n"); 835 else if (err) 836 return err; 837 else 838 ptr->have_sched_switch = 1; 839 } 840 } 841 842 if (have_timing_info && !intel_pt_evsel->core.attr.exclude_kernel && 843 perf_can_record_text_poke_events() && perf_can_record_cpu_wide()) 844 opts->text_poke = true; 845 846 if (intel_pt_evsel) { 847 /* 848 * To obtain the auxtrace buffer file descriptor, the auxtrace 849 * event must come first. 850 */ 851 evlist__to_front(evlist, intel_pt_evsel); 852 /* 853 * In the case of per-cpu mmaps, we need the CPU on the 854 * AUX event. 855 */ 856 if (!perf_cpu_map__empty(cpus)) 857 evsel__set_sample_bit(intel_pt_evsel, CPU); 858 } 859 860 /* Add dummy event to keep tracking */ 861 if (opts->full_auxtrace) { 862 bool need_system_wide_tracking; 863 struct evsel *tracking_evsel; 864 865 /* 866 * User space tasks can migrate between CPUs, so when tracing 867 * selected CPUs, sideband for all CPUs is still needed. 868 */ 869 need_system_wide_tracking = opts->target.cpu_list && 870 !intel_pt_evsel->core.attr.exclude_user; 871 872 tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking); 873 if (!tracking_evsel) 874 return -ENOMEM; 875 876 evlist__set_tracking_event(evlist, tracking_evsel); 877 878 if (need_immediate) 879 tracking_evsel->immediate = true; 880 881 /* In per-cpu case, always need the time of mmap events etc */ 882 if (!perf_cpu_map__empty(cpus)) { 883 evsel__set_sample_bit(tracking_evsel, TIME); 884 /* And the CPU for switch events */ 885 evsel__set_sample_bit(tracking_evsel, CPU); 886 } 887 evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK); 888 } 889 890 /* 891 * Warn the user when we do not have enough information to decode i.e. 892 * per-cpu with no sched_switch (except workload-only). 893 */ 894 if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) && 895 !target__none(&opts->target) && 896 !intel_pt_evsel->core.attr.exclude_user) 897 ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); 898 899 return 0; 900 } 901 902 static int intel_pt_snapshot_start(struct auxtrace_record *itr) 903 { 904 struct intel_pt_recording *ptr = 905 container_of(itr, struct intel_pt_recording, itr); 906 struct evsel *evsel; 907 908 evlist__for_each_entry(ptr->evlist, evsel) { 909 if (evsel->core.attr.type == ptr->intel_pt_pmu->type) 910 return evsel__disable(evsel); 911 } 912 return -EINVAL; 913 } 914 915 static int intel_pt_snapshot_finish(struct auxtrace_record *itr) 916 { 917 struct intel_pt_recording *ptr = 918 container_of(itr, struct intel_pt_recording, itr); 919 struct evsel *evsel; 920 921 evlist__for_each_entry(ptr->evlist, evsel) { 922 if (evsel->core.attr.type == ptr->intel_pt_pmu->type) 923 return evsel__enable(evsel); 924 } 925 return -EINVAL; 926 } 927 928 static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx) 929 { 930 const size_t sz = sizeof(struct intel_pt_snapshot_ref); 931 int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2; 932 struct intel_pt_snapshot_ref *refs; 933 934 if (!new_cnt) 935 new_cnt = 16; 936 937 while (new_cnt <= idx) 938 new_cnt *= 2; 939 940 refs = calloc(new_cnt, sz); 941 if (!refs) 942 return -ENOMEM; 943 944 memcpy(refs, ptr->snapshot_refs, cnt * sz); 945 946 ptr->snapshot_refs = refs; 947 ptr->snapshot_ref_cnt = new_cnt; 948 949 return 0; 950 } 951 952 static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr) 953 { 954 int i; 955 956 for (i = 0; i < ptr->snapshot_ref_cnt; i++) 957 zfree(&ptr->snapshot_refs[i].ref_buf); 958 zfree(&ptr->snapshot_refs); 959 } 960 961 static void intel_pt_recording_free(struct auxtrace_record *itr) 962 { 963 struct intel_pt_recording *ptr = 964 container_of(itr, struct intel_pt_recording, itr); 965 966 intel_pt_free_snapshot_refs(ptr); 967 free(ptr); 968 } 969 970 static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx, 971 size_t snapshot_buf_size) 972 { 973 size_t ref_buf_size = ptr->snapshot_ref_buf_size; 974 void *ref_buf; 975 976 ref_buf = zalloc(ref_buf_size); 977 if (!ref_buf) 978 return -ENOMEM; 979 980 ptr->snapshot_refs[idx].ref_buf = ref_buf; 981 ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size; 982 983 return 0; 984 } 985 986 static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr, 987 size_t snapshot_buf_size) 988 { 989 const size_t max_size = 256 * 1024; 990 size_t buf_size = 0, psb_period; 991 992 if (ptr->snapshot_size <= 64 * 1024) 993 return 0; 994 995 psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist); 996 if (psb_period) 997 buf_size = psb_period * 2; 998 999 if (!buf_size || buf_size > max_size) 1000 buf_size = max_size; 1001 1002 if (buf_size >= snapshot_buf_size) 1003 return 0; 1004 1005 if (buf_size >= ptr->snapshot_size / 2) 1006 return 0; 1007 1008 return buf_size; 1009 } 1010 1011 static int intel_pt_snapshot_init(struct intel_pt_recording *ptr, 1012 size_t snapshot_buf_size) 1013 { 1014 if (ptr->snapshot_init_done) 1015 return 0; 1016 1017 ptr->snapshot_init_done = true; 1018 1019 ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr, 1020 snapshot_buf_size); 1021 1022 return 0; 1023 } 1024 1025 /** 1026 * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer. 1027 * @buf1: first buffer 1028 * @compare_size: number of bytes to compare 1029 * @buf2: second buffer (a circular buffer) 1030 * @offs2: offset in second buffer 1031 * @buf2_size: size of second buffer 1032 * 1033 * The comparison allows for the possibility that the bytes to compare in the 1034 * circular buffer are not contiguous. It is assumed that @compare_size <= 1035 * @buf2_size. This function returns %false if the bytes are identical, %true 1036 * otherwise. 1037 */ 1038 static bool intel_pt_compare_buffers(void *buf1, size_t compare_size, 1039 void *buf2, size_t offs2, size_t buf2_size) 1040 { 1041 size_t end2 = offs2 + compare_size, part_size; 1042 1043 if (end2 <= buf2_size) 1044 return memcmp(buf1, buf2 + offs2, compare_size); 1045 1046 part_size = end2 - buf2_size; 1047 if (memcmp(buf1, buf2 + offs2, part_size)) 1048 return true; 1049 1050 compare_size -= part_size; 1051 1052 return memcmp(buf1 + part_size, buf2, compare_size); 1053 } 1054 1055 static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset, 1056 size_t ref_size, size_t buf_size, 1057 void *data, size_t head) 1058 { 1059 size_t ref_end = ref_offset + ref_size; 1060 1061 if (ref_end > buf_size) { 1062 if (head > ref_offset || head < ref_end - buf_size) 1063 return true; 1064 } else if (head > ref_offset && head < ref_end) { 1065 return true; 1066 } 1067 1068 return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset, 1069 buf_size); 1070 } 1071 1072 static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size, 1073 void *data, size_t head) 1074 { 1075 if (head >= ref_size) { 1076 memcpy(ref_buf, data + head - ref_size, ref_size); 1077 } else { 1078 memcpy(ref_buf, data, head); 1079 ref_size -= head; 1080 memcpy(ref_buf + head, data + buf_size - ref_size, ref_size); 1081 } 1082 } 1083 1084 static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx, 1085 struct auxtrace_mmap *mm, unsigned char *data, 1086 u64 head) 1087 { 1088 struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx]; 1089 bool wrapped; 1090 1091 wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset, 1092 ptr->snapshot_ref_buf_size, mm->len, 1093 data, head); 1094 1095 intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len, 1096 data, head); 1097 1098 return wrapped; 1099 } 1100 1101 static bool intel_pt_first_wrap(u64 *data, size_t buf_size) 1102 { 1103 int i, a, b; 1104 1105 b = buf_size >> 3; 1106 a = b - 512; 1107 if (a < 0) 1108 a = 0; 1109 1110 for (i = a; i < b; i++) { 1111 if (data[i]) 1112 return true; 1113 } 1114 1115 return false; 1116 } 1117 1118 static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx, 1119 struct auxtrace_mmap *mm, unsigned char *data, 1120 u64 *head, u64 *old) 1121 { 1122 struct intel_pt_recording *ptr = 1123 container_of(itr, struct intel_pt_recording, itr); 1124 bool wrapped; 1125 int err; 1126 1127 pr_debug3("%s: mmap index %d old head %zu new head %zu\n", 1128 __func__, idx, (size_t)*old, (size_t)*head); 1129 1130 err = intel_pt_snapshot_init(ptr, mm->len); 1131 if (err) 1132 goto out_err; 1133 1134 if (idx >= ptr->snapshot_ref_cnt) { 1135 err = intel_pt_alloc_snapshot_refs(ptr, idx); 1136 if (err) 1137 goto out_err; 1138 } 1139 1140 if (ptr->snapshot_ref_buf_size) { 1141 if (!ptr->snapshot_refs[idx].ref_buf) { 1142 err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len); 1143 if (err) 1144 goto out_err; 1145 } 1146 wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head); 1147 } else { 1148 wrapped = ptr->snapshot_refs[idx].wrapped; 1149 if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) { 1150 ptr->snapshot_refs[idx].wrapped = true; 1151 wrapped = true; 1152 } 1153 } 1154 1155 /* 1156 * In full trace mode 'head' continually increases. However in snapshot 1157 * mode 'head' is an offset within the buffer. Here 'old' and 'head' 1158 * are adjusted to match the full trace case which expects that 'old' is 1159 * always less than 'head'. 1160 */ 1161 if (wrapped) { 1162 *old = *head; 1163 *head += mm->len; 1164 } else { 1165 if (mm->mask) 1166 *old &= mm->mask; 1167 else 1168 *old %= mm->len; 1169 if (*old > *head) 1170 *head += mm->len; 1171 } 1172 1173 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n", 1174 __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head); 1175 1176 return 0; 1177 1178 out_err: 1179 pr_err("%s: failed, error %d\n", __func__, err); 1180 return err; 1181 } 1182 1183 static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused) 1184 { 1185 return rdtsc(); 1186 } 1187 1188 struct auxtrace_record *intel_pt_recording_init(int *err) 1189 { 1190 struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME); 1191 struct intel_pt_recording *ptr; 1192 1193 if (!intel_pt_pmu) 1194 return NULL; 1195 1196 if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) { 1197 *err = -errno; 1198 return NULL; 1199 } 1200 1201 ptr = zalloc(sizeof(struct intel_pt_recording)); 1202 if (!ptr) { 1203 *err = -ENOMEM; 1204 return NULL; 1205 } 1206 1207 ptr->intel_pt_pmu = intel_pt_pmu; 1208 ptr->itr.pmu = intel_pt_pmu; 1209 ptr->itr.recording_options = intel_pt_recording_options; 1210 ptr->itr.info_priv_size = intel_pt_info_priv_size; 1211 ptr->itr.info_fill = intel_pt_info_fill; 1212 ptr->itr.free = intel_pt_recording_free; 1213 ptr->itr.snapshot_start = intel_pt_snapshot_start; 1214 ptr->itr.snapshot_finish = intel_pt_snapshot_finish; 1215 ptr->itr.find_snapshot = intel_pt_find_snapshot; 1216 ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options; 1217 ptr->itr.reference = intel_pt_reference; 1218 ptr->itr.read_finish = auxtrace_record__read_finish; 1219 /* 1220 * Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K 1221 * should give at least 1 PSB per sample. 1222 */ 1223 ptr->itr.default_aux_sample_size = 4096; 1224 return &ptr->itr; 1225 } 1226