1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * auxtrace.c: AUX area trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #include <inttypes.h> 8 #include <sys/types.h> 9 #include <sys/mman.h> 10 #include <stdbool.h> 11 #include <string.h> 12 #include <limits.h> 13 #include <errno.h> 14 15 #include <linux/kernel.h> 16 #include <linux/perf_event.h> 17 #include <linux/types.h> 18 #include <linux/bitops.h> 19 #include <linux/log2.h> 20 #include <linux/string.h> 21 #include <linux/time64.h> 22 23 #include <sys/param.h> 24 #include <stdlib.h> 25 #include <stdio.h> 26 #include <linux/list.h> 27 #include <linux/zalloc.h> 28 29 #include "evlist.h" 30 #include "dso.h" 31 #include "map.h" 32 #include "pmu.h" 33 #include "evsel.h" 34 #include "evsel_config.h" 35 #include "symbol.h" 36 #include "util/perf_api_probe.h" 37 #include "util/synthetic-events.h" 38 #include "thread_map.h" 39 #include "asm/bug.h" 40 #include "auxtrace.h" 41 42 #include <linux/hash.h> 43 44 #include "event.h" 45 #include "record.h" 46 #include "session.h" 47 #include "debug.h" 48 #include <subcmd/parse-options.h> 49 50 #include "cs-etm.h" 51 #include "intel-pt.h" 52 #include "intel-bts.h" 53 #include "arm-spe.h" 54 #include "s390-cpumsf.h" 55 #include "util/mmap.h" 56 57 #include <linux/ctype.h> 58 #include "symbol/kallsyms.h" 59 #include <internal/lib.h> 60 61 /* 62 * Make a group from 'leader' to 'last', requiring that the events were not 63 * already grouped to a different leader. 64 */ 65 static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last) 66 { 67 struct evsel *evsel; 68 bool grp; 69 70 if (!evsel__is_group_leader(leader)) 71 return -EINVAL; 72 73 grp = false; 74 evlist__for_each_entry(evlist, evsel) { 75 if (grp) { 76 if (!(evsel__leader(evsel) == leader || 77 (evsel__leader(evsel) == evsel && 78 evsel->core.nr_members <= 1))) 79 return -EINVAL; 80 } else if (evsel == leader) { 81 grp = true; 82 } 83 if (evsel == last) 84 break; 85 } 86 87 grp = false; 88 evlist__for_each_entry(evlist, evsel) { 89 if (grp) { 90 if (!evsel__has_leader(evsel, leader)) { 91 evsel__set_leader(evsel, leader); 92 if (leader->core.nr_members < 1) 93 leader->core.nr_members = 1; 94 leader->core.nr_members += 1; 95 } 96 } else if (evsel == leader) { 97 grp = true; 98 } 99 if (evsel == last) 100 break; 101 } 102 103 return 0; 104 } 105 106 static bool auxtrace__dont_decode(struct perf_session *session) 107 { 108 return !session->itrace_synth_opts || 109 session->itrace_synth_opts->dont_decode; 110 } 111 112 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 113 struct auxtrace_mmap_params *mp, 114 void *userpg, int fd) 115 { 116 struct perf_event_mmap_page *pc = userpg; 117 118 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n"); 119 120 mm->userpg = userpg; 121 mm->mask = mp->mask; 122 mm->len = mp->len; 123 mm->prev = 0; 124 mm->idx = mp->idx; 125 mm->tid = mp->tid; 126 mm->cpu = mp->cpu.cpu; 127 128 if (!mp->len) { 129 mm->base = NULL; 130 return 0; 131 } 132 133 pc->aux_offset = mp->offset; 134 pc->aux_size = mp->len; 135 136 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset); 137 if (mm->base == MAP_FAILED) { 138 pr_debug2("failed to mmap AUX area\n"); 139 mm->base = NULL; 140 return -1; 141 } 142 143 return 0; 144 } 145 146 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm) 147 { 148 if (mm->base) { 149 munmap(mm->base, mm->len); 150 mm->base = NULL; 151 } 152 } 153 154 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 155 off_t auxtrace_offset, 156 unsigned int auxtrace_pages, 157 bool auxtrace_overwrite) 158 { 159 if (auxtrace_pages) { 160 mp->offset = auxtrace_offset; 161 mp->len = auxtrace_pages * (size_t)page_size; 162 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0; 163 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE); 164 pr_debug2("AUX area mmap length %zu\n", mp->len); 165 } else { 166 mp->len = 0; 167 } 168 } 169 170 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 171 struct evlist *evlist, int idx, 172 bool per_cpu) 173 { 174 mp->idx = idx; 175 176 if (per_cpu) { 177 mp->cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, idx); 178 if (evlist->core.threads) 179 mp->tid = perf_thread_map__pid(evlist->core.threads, 0); 180 else 181 mp->tid = -1; 182 } else { 183 mp->cpu.cpu = -1; 184 mp->tid = perf_thread_map__pid(evlist->core.threads, idx); 185 } 186 } 187 188 #define AUXTRACE_INIT_NR_QUEUES 32 189 190 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues) 191 { 192 struct auxtrace_queue *queue_array; 193 unsigned int max_nr_queues, i; 194 195 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue); 196 if (nr_queues > max_nr_queues) 197 return NULL; 198 199 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue)); 200 if (!queue_array) 201 return NULL; 202 203 for (i = 0; i < nr_queues; i++) { 204 INIT_LIST_HEAD(&queue_array[i].head); 205 queue_array[i].priv = NULL; 206 } 207 208 return queue_array; 209 } 210 211 int auxtrace_queues__init(struct auxtrace_queues *queues) 212 { 213 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; 214 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); 215 if (!queues->queue_array) 216 return -ENOMEM; 217 return 0; 218 } 219 220 static int auxtrace_queues__grow(struct auxtrace_queues *queues, 221 unsigned int new_nr_queues) 222 { 223 unsigned int nr_queues = queues->nr_queues; 224 struct auxtrace_queue *queue_array; 225 unsigned int i; 226 227 if (!nr_queues) 228 nr_queues = AUXTRACE_INIT_NR_QUEUES; 229 230 while (nr_queues && nr_queues < new_nr_queues) 231 nr_queues <<= 1; 232 233 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) 234 return -EINVAL; 235 236 queue_array = auxtrace_alloc_queue_array(nr_queues); 237 if (!queue_array) 238 return -ENOMEM; 239 240 for (i = 0; i < queues->nr_queues; i++) { 241 list_splice_tail(&queues->queue_array[i].head, 242 &queue_array[i].head); 243 queue_array[i].tid = queues->queue_array[i].tid; 244 queue_array[i].cpu = queues->queue_array[i].cpu; 245 queue_array[i].set = queues->queue_array[i].set; 246 queue_array[i].priv = queues->queue_array[i].priv; 247 } 248 249 queues->nr_queues = nr_queues; 250 queues->queue_array = queue_array; 251 252 return 0; 253 } 254 255 static void *auxtrace_copy_data(u64 size, struct perf_session *session) 256 { 257 int fd = perf_data__fd(session->data); 258 void *p; 259 ssize_t ret; 260 261 if (size > SSIZE_MAX) 262 return NULL; 263 264 p = malloc(size); 265 if (!p) 266 return NULL; 267 268 ret = readn(fd, p, size); 269 if (ret != (ssize_t)size) { 270 free(p); 271 return NULL; 272 } 273 274 return p; 275 } 276 277 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues, 278 unsigned int idx, 279 struct auxtrace_buffer *buffer) 280 { 281 struct auxtrace_queue *queue; 282 int err; 283 284 if (idx >= queues->nr_queues) { 285 err = auxtrace_queues__grow(queues, idx + 1); 286 if (err) 287 return err; 288 } 289 290 queue = &queues->queue_array[idx]; 291 292 if (!queue->set) { 293 queue->set = true; 294 queue->tid = buffer->tid; 295 queue->cpu = buffer->cpu.cpu; 296 } 297 298 buffer->buffer_nr = queues->next_buffer_nr++; 299 300 list_add_tail(&buffer->list, &queue->head); 301 302 queues->new_data = true; 303 queues->populated = true; 304 305 return 0; 306 } 307 308 /* Limit buffers to 32MiB on 32-bit */ 309 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024) 310 311 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues, 312 unsigned int idx, 313 struct auxtrace_buffer *buffer) 314 { 315 u64 sz = buffer->size; 316 bool consecutive = false; 317 struct auxtrace_buffer *b; 318 int err; 319 320 while (sz > BUFFER_LIMIT_FOR_32_BIT) { 321 b = memdup(buffer, sizeof(struct auxtrace_buffer)); 322 if (!b) 323 return -ENOMEM; 324 b->size = BUFFER_LIMIT_FOR_32_BIT; 325 b->consecutive = consecutive; 326 err = auxtrace_queues__queue_buffer(queues, idx, b); 327 if (err) { 328 auxtrace_buffer__free(b); 329 return err; 330 } 331 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT; 332 sz -= BUFFER_LIMIT_FOR_32_BIT; 333 consecutive = true; 334 } 335 336 buffer->size = sz; 337 buffer->consecutive = consecutive; 338 339 return 0; 340 } 341 342 static bool filter_cpu(struct perf_session *session, struct perf_cpu cpu) 343 { 344 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap; 345 346 return cpu_bitmap && cpu.cpu != -1 && !test_bit(cpu.cpu, cpu_bitmap); 347 } 348 349 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, 350 struct perf_session *session, 351 unsigned int idx, 352 struct auxtrace_buffer *buffer, 353 struct auxtrace_buffer **buffer_ptr) 354 { 355 int err = -ENOMEM; 356 357 if (filter_cpu(session, buffer->cpu)) 358 return 0; 359 360 buffer = memdup(buffer, sizeof(*buffer)); 361 if (!buffer) 362 return -ENOMEM; 363 364 if (session->one_mmap) { 365 buffer->data = buffer->data_offset - session->one_mmap_offset + 366 session->one_mmap_addr; 367 } else if (perf_data__is_pipe(session->data)) { 368 buffer->data = auxtrace_copy_data(buffer->size, session); 369 if (!buffer->data) 370 goto out_free; 371 buffer->data_needs_freeing = true; 372 } else if (BITS_PER_LONG == 32 && 373 buffer->size > BUFFER_LIMIT_FOR_32_BIT) { 374 err = auxtrace_queues__split_buffer(queues, idx, buffer); 375 if (err) 376 goto out_free; 377 } 378 379 err = auxtrace_queues__queue_buffer(queues, idx, buffer); 380 if (err) 381 goto out_free; 382 383 /* FIXME: Doesn't work for split buffer */ 384 if (buffer_ptr) 385 *buffer_ptr = buffer; 386 387 return 0; 388 389 out_free: 390 auxtrace_buffer__free(buffer); 391 return err; 392 } 393 394 int auxtrace_queues__add_event(struct auxtrace_queues *queues, 395 struct perf_session *session, 396 union perf_event *event, off_t data_offset, 397 struct auxtrace_buffer **buffer_ptr) 398 { 399 struct auxtrace_buffer buffer = { 400 .pid = -1, 401 .tid = event->auxtrace.tid, 402 .cpu = { event->auxtrace.cpu }, 403 .data_offset = data_offset, 404 .offset = event->auxtrace.offset, 405 .reference = event->auxtrace.reference, 406 .size = event->auxtrace.size, 407 }; 408 unsigned int idx = event->auxtrace.idx; 409 410 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, 411 buffer_ptr); 412 } 413 414 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, 415 struct perf_session *session, 416 off_t file_offset, size_t sz) 417 { 418 union perf_event *event; 419 int err; 420 char buf[PERF_SAMPLE_MAX_SIZE]; 421 422 err = perf_session__peek_event(session, file_offset, buf, 423 PERF_SAMPLE_MAX_SIZE, &event, NULL); 424 if (err) 425 return err; 426 427 if (event->header.type == PERF_RECORD_AUXTRACE) { 428 if (event->header.size < sizeof(struct perf_record_auxtrace) || 429 event->header.size != sz) { 430 err = -EINVAL; 431 goto out; 432 } 433 file_offset += event->header.size; 434 err = auxtrace_queues__add_event(queues, session, event, 435 file_offset, NULL); 436 } 437 out: 438 return err; 439 } 440 441 void auxtrace_queues__free(struct auxtrace_queues *queues) 442 { 443 unsigned int i; 444 445 for (i = 0; i < queues->nr_queues; i++) { 446 while (!list_empty(&queues->queue_array[i].head)) { 447 struct auxtrace_buffer *buffer; 448 449 buffer = list_entry(queues->queue_array[i].head.next, 450 struct auxtrace_buffer, list); 451 list_del_init(&buffer->list); 452 auxtrace_buffer__free(buffer); 453 } 454 } 455 456 zfree(&queues->queue_array); 457 queues->nr_queues = 0; 458 } 459 460 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array, 461 unsigned int pos, unsigned int queue_nr, 462 u64 ordinal) 463 { 464 unsigned int parent; 465 466 while (pos) { 467 parent = (pos - 1) >> 1; 468 if (heap_array[parent].ordinal <= ordinal) 469 break; 470 heap_array[pos] = heap_array[parent]; 471 pos = parent; 472 } 473 heap_array[pos].queue_nr = queue_nr; 474 heap_array[pos].ordinal = ordinal; 475 } 476 477 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, 478 u64 ordinal) 479 { 480 struct auxtrace_heap_item *heap_array; 481 482 if (queue_nr >= heap->heap_sz) { 483 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES; 484 485 while (heap_sz <= queue_nr) 486 heap_sz <<= 1; 487 heap_array = realloc(heap->heap_array, 488 heap_sz * sizeof(struct auxtrace_heap_item)); 489 if (!heap_array) 490 return -ENOMEM; 491 heap->heap_array = heap_array; 492 heap->heap_sz = heap_sz; 493 } 494 495 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal); 496 497 return 0; 498 } 499 500 void auxtrace_heap__free(struct auxtrace_heap *heap) 501 { 502 zfree(&heap->heap_array); 503 heap->heap_cnt = 0; 504 heap->heap_sz = 0; 505 } 506 507 void auxtrace_heap__pop(struct auxtrace_heap *heap) 508 { 509 unsigned int pos, last, heap_cnt = heap->heap_cnt; 510 struct auxtrace_heap_item *heap_array; 511 512 if (!heap_cnt) 513 return; 514 515 heap->heap_cnt -= 1; 516 517 heap_array = heap->heap_array; 518 519 pos = 0; 520 while (1) { 521 unsigned int left, right; 522 523 left = (pos << 1) + 1; 524 if (left >= heap_cnt) 525 break; 526 right = left + 1; 527 if (right >= heap_cnt) { 528 heap_array[pos] = heap_array[left]; 529 return; 530 } 531 if (heap_array[left].ordinal < heap_array[right].ordinal) { 532 heap_array[pos] = heap_array[left]; 533 pos = left; 534 } else { 535 heap_array[pos] = heap_array[right]; 536 pos = right; 537 } 538 } 539 540 last = heap_cnt - 1; 541 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr, 542 heap_array[last].ordinal); 543 } 544 545 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, 546 struct evlist *evlist) 547 { 548 if (itr) 549 return itr->info_priv_size(itr, evlist); 550 return 0; 551 } 552 553 static int auxtrace_not_supported(void) 554 { 555 pr_err("AUX area tracing is not supported on this architecture\n"); 556 return -EINVAL; 557 } 558 559 int auxtrace_record__info_fill(struct auxtrace_record *itr, 560 struct perf_session *session, 561 struct perf_record_auxtrace_info *auxtrace_info, 562 size_t priv_size) 563 { 564 if (itr) 565 return itr->info_fill(itr, session, auxtrace_info, priv_size); 566 return auxtrace_not_supported(); 567 } 568 569 void auxtrace_record__free(struct auxtrace_record *itr) 570 { 571 if (itr) 572 itr->free(itr); 573 } 574 575 int auxtrace_record__snapshot_start(struct auxtrace_record *itr) 576 { 577 if (itr && itr->snapshot_start) 578 return itr->snapshot_start(itr); 579 return 0; 580 } 581 582 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit) 583 { 584 if (!on_exit && itr && itr->snapshot_finish) 585 return itr->snapshot_finish(itr); 586 return 0; 587 } 588 589 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, 590 struct auxtrace_mmap *mm, 591 unsigned char *data, u64 *head, u64 *old) 592 { 593 if (itr && itr->find_snapshot) 594 return itr->find_snapshot(itr, idx, mm, data, head, old); 595 return 0; 596 } 597 598 int auxtrace_record__options(struct auxtrace_record *itr, 599 struct evlist *evlist, 600 struct record_opts *opts) 601 { 602 if (itr) { 603 itr->evlist = evlist; 604 return itr->recording_options(itr, evlist, opts); 605 } 606 return 0; 607 } 608 609 u64 auxtrace_record__reference(struct auxtrace_record *itr) 610 { 611 if (itr) 612 return itr->reference(itr); 613 return 0; 614 } 615 616 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, 617 struct record_opts *opts, const char *str) 618 { 619 if (!str) 620 return 0; 621 622 /* PMU-agnostic options */ 623 switch (*str) { 624 case 'e': 625 opts->auxtrace_snapshot_on_exit = true; 626 str++; 627 break; 628 default: 629 break; 630 } 631 632 if (itr && itr->parse_snapshot_options) 633 return itr->parse_snapshot_options(itr, opts, str); 634 635 pr_err("No AUX area tracing to snapshot\n"); 636 return -EINVAL; 637 } 638 639 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx) 640 { 641 struct evsel *evsel; 642 643 if (!itr->evlist || !itr->pmu) 644 return -EINVAL; 645 646 evlist__for_each_entry(itr->evlist, evsel) { 647 if (evsel->core.attr.type == itr->pmu->type) { 648 if (evsel->disabled) 649 return 0; 650 return evlist__enable_event_idx(itr->evlist, evsel, idx); 651 } 652 } 653 return -EINVAL; 654 } 655 656 /* 657 * Event record size is 16-bit which results in a maximum size of about 64KiB. 658 * Allow about 4KiB for the rest of the sample record, to give a maximum 659 * AUX area sample size of 60KiB. 660 */ 661 #define MAX_AUX_SAMPLE_SIZE (60 * 1024) 662 663 /* Arbitrary default size if no other default provided */ 664 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024) 665 666 static int auxtrace_validate_aux_sample_size(struct evlist *evlist, 667 struct record_opts *opts) 668 { 669 struct evsel *evsel; 670 bool has_aux_leader = false; 671 u32 sz; 672 673 evlist__for_each_entry(evlist, evsel) { 674 sz = evsel->core.attr.aux_sample_size; 675 if (evsel__is_group_leader(evsel)) { 676 has_aux_leader = evsel__is_aux_event(evsel); 677 if (sz) { 678 if (has_aux_leader) 679 pr_err("Cannot add AUX area sampling to an AUX area event\n"); 680 else 681 pr_err("Cannot add AUX area sampling to a group leader\n"); 682 return -EINVAL; 683 } 684 } 685 if (sz > MAX_AUX_SAMPLE_SIZE) { 686 pr_err("AUX area sample size %u too big, max. %d\n", 687 sz, MAX_AUX_SAMPLE_SIZE); 688 return -EINVAL; 689 } 690 if (sz) { 691 if (!has_aux_leader) { 692 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n"); 693 return -EINVAL; 694 } 695 evsel__set_sample_bit(evsel, AUX); 696 opts->auxtrace_sample_mode = true; 697 } else { 698 evsel__reset_sample_bit(evsel, AUX); 699 } 700 } 701 702 if (!opts->auxtrace_sample_mode) { 703 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n"); 704 return -EINVAL; 705 } 706 707 if (!perf_can_aux_sample()) { 708 pr_err("AUX area sampling is not supported by kernel\n"); 709 return -EINVAL; 710 } 711 712 return 0; 713 } 714 715 int auxtrace_parse_sample_options(struct auxtrace_record *itr, 716 struct evlist *evlist, 717 struct record_opts *opts, const char *str) 718 { 719 struct evsel_config_term *term; 720 struct evsel *aux_evsel; 721 bool has_aux_sample_size = false; 722 bool has_aux_leader = false; 723 struct evsel *evsel; 724 char *endptr; 725 unsigned long sz; 726 727 if (!str) 728 goto no_opt; 729 730 if (!itr) { 731 pr_err("No AUX area event to sample\n"); 732 return -EINVAL; 733 } 734 735 sz = strtoul(str, &endptr, 0); 736 if (*endptr || sz > UINT_MAX) { 737 pr_err("Bad AUX area sampling option: '%s'\n", str); 738 return -EINVAL; 739 } 740 741 if (!sz) 742 sz = itr->default_aux_sample_size; 743 744 if (!sz) 745 sz = DEFAULT_AUX_SAMPLE_SIZE; 746 747 /* Set aux_sample_size based on --aux-sample option */ 748 evlist__for_each_entry(evlist, evsel) { 749 if (evsel__is_group_leader(evsel)) { 750 has_aux_leader = evsel__is_aux_event(evsel); 751 } else if (has_aux_leader) { 752 evsel->core.attr.aux_sample_size = sz; 753 } 754 } 755 no_opt: 756 aux_evsel = NULL; 757 /* Override with aux_sample_size from config term */ 758 evlist__for_each_entry(evlist, evsel) { 759 if (evsel__is_aux_event(evsel)) 760 aux_evsel = evsel; 761 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE); 762 if (term) { 763 has_aux_sample_size = true; 764 evsel->core.attr.aux_sample_size = term->val.aux_sample_size; 765 /* If possible, group with the AUX event */ 766 if (aux_evsel && evsel->core.attr.aux_sample_size) 767 evlist__regroup(evlist, aux_evsel, evsel); 768 } 769 } 770 771 if (!str && !has_aux_sample_size) 772 return 0; 773 774 if (!itr) { 775 pr_err("No AUX area event to sample\n"); 776 return -EINVAL; 777 } 778 779 return auxtrace_validate_aux_sample_size(evlist, opts); 780 } 781 782 void auxtrace_regroup_aux_output(struct evlist *evlist) 783 { 784 struct evsel *evsel, *aux_evsel = NULL; 785 struct evsel_config_term *term; 786 787 evlist__for_each_entry(evlist, evsel) { 788 if (evsel__is_aux_event(evsel)) 789 aux_evsel = evsel; 790 term = evsel__get_config_term(evsel, AUX_OUTPUT); 791 /* If possible, group with the AUX event */ 792 if (term && aux_evsel) 793 evlist__regroup(evlist, aux_evsel, evsel); 794 } 795 } 796 797 struct auxtrace_record *__weak 798 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err) 799 { 800 *err = 0; 801 return NULL; 802 } 803 804 static int auxtrace_index__alloc(struct list_head *head) 805 { 806 struct auxtrace_index *auxtrace_index; 807 808 auxtrace_index = malloc(sizeof(struct auxtrace_index)); 809 if (!auxtrace_index) 810 return -ENOMEM; 811 812 auxtrace_index->nr = 0; 813 INIT_LIST_HEAD(&auxtrace_index->list); 814 815 list_add_tail(&auxtrace_index->list, head); 816 817 return 0; 818 } 819 820 void auxtrace_index__free(struct list_head *head) 821 { 822 struct auxtrace_index *auxtrace_index, *n; 823 824 list_for_each_entry_safe(auxtrace_index, n, head, list) { 825 list_del_init(&auxtrace_index->list); 826 free(auxtrace_index); 827 } 828 } 829 830 static struct auxtrace_index *auxtrace_index__last(struct list_head *head) 831 { 832 struct auxtrace_index *auxtrace_index; 833 int err; 834 835 if (list_empty(head)) { 836 err = auxtrace_index__alloc(head); 837 if (err) 838 return NULL; 839 } 840 841 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list); 842 843 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) { 844 err = auxtrace_index__alloc(head); 845 if (err) 846 return NULL; 847 auxtrace_index = list_entry(head->prev, struct auxtrace_index, 848 list); 849 } 850 851 return auxtrace_index; 852 } 853 854 int auxtrace_index__auxtrace_event(struct list_head *head, 855 union perf_event *event, off_t file_offset) 856 { 857 struct auxtrace_index *auxtrace_index; 858 size_t nr; 859 860 auxtrace_index = auxtrace_index__last(head); 861 if (!auxtrace_index) 862 return -ENOMEM; 863 864 nr = auxtrace_index->nr; 865 auxtrace_index->entries[nr].file_offset = file_offset; 866 auxtrace_index->entries[nr].sz = event->header.size; 867 auxtrace_index->nr += 1; 868 869 return 0; 870 } 871 872 static int auxtrace_index__do_write(int fd, 873 struct auxtrace_index *auxtrace_index) 874 { 875 struct auxtrace_index_entry ent; 876 size_t i; 877 878 for (i = 0; i < auxtrace_index->nr; i++) { 879 ent.file_offset = auxtrace_index->entries[i].file_offset; 880 ent.sz = auxtrace_index->entries[i].sz; 881 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent)) 882 return -errno; 883 } 884 return 0; 885 } 886 887 int auxtrace_index__write(int fd, struct list_head *head) 888 { 889 struct auxtrace_index *auxtrace_index; 890 u64 total = 0; 891 int err; 892 893 list_for_each_entry(auxtrace_index, head, list) 894 total += auxtrace_index->nr; 895 896 if (writen(fd, &total, sizeof(total)) != sizeof(total)) 897 return -errno; 898 899 list_for_each_entry(auxtrace_index, head, list) { 900 err = auxtrace_index__do_write(fd, auxtrace_index); 901 if (err) 902 return err; 903 } 904 905 return 0; 906 } 907 908 static int auxtrace_index__process_entry(int fd, struct list_head *head, 909 bool needs_swap) 910 { 911 struct auxtrace_index *auxtrace_index; 912 struct auxtrace_index_entry ent; 913 size_t nr; 914 915 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent)) 916 return -1; 917 918 auxtrace_index = auxtrace_index__last(head); 919 if (!auxtrace_index) 920 return -1; 921 922 nr = auxtrace_index->nr; 923 if (needs_swap) { 924 auxtrace_index->entries[nr].file_offset = 925 bswap_64(ent.file_offset); 926 auxtrace_index->entries[nr].sz = bswap_64(ent.sz); 927 } else { 928 auxtrace_index->entries[nr].file_offset = ent.file_offset; 929 auxtrace_index->entries[nr].sz = ent.sz; 930 } 931 932 auxtrace_index->nr = nr + 1; 933 934 return 0; 935 } 936 937 int auxtrace_index__process(int fd, u64 size, struct perf_session *session, 938 bool needs_swap) 939 { 940 struct list_head *head = &session->auxtrace_index; 941 u64 nr; 942 943 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64)) 944 return -1; 945 946 if (needs_swap) 947 nr = bswap_64(nr); 948 949 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size) 950 return -1; 951 952 while (nr--) { 953 int err; 954 955 err = auxtrace_index__process_entry(fd, head, needs_swap); 956 if (err) 957 return -1; 958 } 959 960 return 0; 961 } 962 963 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues, 964 struct perf_session *session, 965 struct auxtrace_index_entry *ent) 966 { 967 return auxtrace_queues__add_indexed_event(queues, session, 968 ent->file_offset, ent->sz); 969 } 970 971 int auxtrace_queues__process_index(struct auxtrace_queues *queues, 972 struct perf_session *session) 973 { 974 struct auxtrace_index *auxtrace_index; 975 struct auxtrace_index_entry *ent; 976 size_t i; 977 int err; 978 979 if (auxtrace__dont_decode(session)) 980 return 0; 981 982 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { 983 for (i = 0; i < auxtrace_index->nr; i++) { 984 ent = &auxtrace_index->entries[i]; 985 err = auxtrace_queues__process_index_entry(queues, 986 session, 987 ent); 988 if (err) 989 return err; 990 } 991 } 992 return 0; 993 } 994 995 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, 996 struct auxtrace_buffer *buffer) 997 { 998 if (buffer) { 999 if (list_is_last(&buffer->list, &queue->head)) 1000 return NULL; 1001 return list_entry(buffer->list.next, struct auxtrace_buffer, 1002 list); 1003 } else { 1004 if (list_empty(&queue->head)) 1005 return NULL; 1006 return list_entry(queue->head.next, struct auxtrace_buffer, 1007 list); 1008 } 1009 } 1010 1011 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues, 1012 struct perf_sample *sample, 1013 struct perf_session *session) 1014 { 1015 struct perf_sample_id *sid; 1016 unsigned int idx; 1017 u64 id; 1018 1019 id = sample->id; 1020 if (!id) 1021 return NULL; 1022 1023 sid = evlist__id2sid(session->evlist, id); 1024 if (!sid) 1025 return NULL; 1026 1027 idx = sid->idx; 1028 1029 if (idx >= queues->nr_queues) 1030 return NULL; 1031 1032 return &queues->queue_array[idx]; 1033 } 1034 1035 int auxtrace_queues__add_sample(struct auxtrace_queues *queues, 1036 struct perf_session *session, 1037 struct perf_sample *sample, u64 data_offset, 1038 u64 reference) 1039 { 1040 struct auxtrace_buffer buffer = { 1041 .pid = -1, 1042 .data_offset = data_offset, 1043 .reference = reference, 1044 .size = sample->aux_sample.size, 1045 }; 1046 struct perf_sample_id *sid; 1047 u64 id = sample->id; 1048 unsigned int idx; 1049 1050 if (!id) 1051 return -EINVAL; 1052 1053 sid = evlist__id2sid(session->evlist, id); 1054 if (!sid) 1055 return -ENOENT; 1056 1057 idx = sid->idx; 1058 buffer.tid = sid->tid; 1059 buffer.cpu = sid->cpu; 1060 1061 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL); 1062 } 1063 1064 struct queue_data { 1065 bool samples; 1066 bool events; 1067 }; 1068 1069 static int auxtrace_queue_data_cb(struct perf_session *session, 1070 union perf_event *event, u64 offset, 1071 void *data) 1072 { 1073 struct queue_data *qd = data; 1074 struct perf_sample sample; 1075 int err; 1076 1077 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) { 1078 if (event->header.size < sizeof(struct perf_record_auxtrace)) 1079 return -EINVAL; 1080 offset += event->header.size; 1081 return session->auxtrace->queue_data(session, NULL, event, 1082 offset); 1083 } 1084 1085 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE) 1086 return 0; 1087 1088 err = evlist__parse_sample(session->evlist, event, &sample); 1089 if (err) 1090 return err; 1091 1092 if (!sample.aux_sample.size) 1093 return 0; 1094 1095 offset += sample.aux_sample.data - (void *)event; 1096 1097 return session->auxtrace->queue_data(session, &sample, NULL, offset); 1098 } 1099 1100 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events) 1101 { 1102 struct queue_data qd = { 1103 .samples = samples, 1104 .events = events, 1105 }; 1106 1107 if (auxtrace__dont_decode(session)) 1108 return 0; 1109 1110 if (!session->auxtrace || !session->auxtrace->queue_data) 1111 return -EINVAL; 1112 1113 return perf_session__peek_events(session, session->header.data_offset, 1114 session->header.data_size, 1115 auxtrace_queue_data_cb, &qd); 1116 } 1117 1118 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw) 1119 { 1120 int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ; 1121 size_t adj = buffer->data_offset & (page_size - 1); 1122 size_t size = buffer->size + adj; 1123 off_t file_offset = buffer->data_offset - adj; 1124 void *addr; 1125 1126 if (buffer->data) 1127 return buffer->data; 1128 1129 addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset); 1130 if (addr == MAP_FAILED) 1131 return NULL; 1132 1133 buffer->mmap_addr = addr; 1134 buffer->mmap_size = size; 1135 1136 buffer->data = addr + adj; 1137 1138 return buffer->data; 1139 } 1140 1141 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer) 1142 { 1143 if (!buffer->data || !buffer->mmap_addr) 1144 return; 1145 munmap(buffer->mmap_addr, buffer->mmap_size); 1146 buffer->mmap_addr = NULL; 1147 buffer->mmap_size = 0; 1148 buffer->data = NULL; 1149 buffer->use_data = NULL; 1150 } 1151 1152 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer) 1153 { 1154 auxtrace_buffer__put_data(buffer); 1155 if (buffer->data_needs_freeing) { 1156 buffer->data_needs_freeing = false; 1157 zfree(&buffer->data); 1158 buffer->use_data = NULL; 1159 buffer->size = 0; 1160 } 1161 } 1162 1163 void auxtrace_buffer__free(struct auxtrace_buffer *buffer) 1164 { 1165 auxtrace_buffer__drop_data(buffer); 1166 free(buffer); 1167 } 1168 1169 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 1170 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 1171 const char *msg, u64 timestamp) 1172 { 1173 size_t size; 1174 1175 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error)); 1176 1177 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR; 1178 auxtrace_error->type = type; 1179 auxtrace_error->code = code; 1180 auxtrace_error->cpu = cpu; 1181 auxtrace_error->pid = pid; 1182 auxtrace_error->tid = tid; 1183 auxtrace_error->fmt = 1; 1184 auxtrace_error->ip = ip; 1185 auxtrace_error->time = timestamp; 1186 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG); 1187 1188 size = (void *)auxtrace_error->msg - (void *)auxtrace_error + 1189 strlen(auxtrace_error->msg) + 1; 1190 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64)); 1191 } 1192 1193 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, 1194 struct perf_tool *tool, 1195 struct perf_session *session, 1196 perf_event__handler_t process) 1197 { 1198 union perf_event *ev; 1199 size_t priv_size; 1200 int err; 1201 1202 pr_debug2("Synthesizing auxtrace information\n"); 1203 priv_size = auxtrace_record__info_priv_size(itr, session->evlist); 1204 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size); 1205 if (!ev) 1206 return -ENOMEM; 1207 1208 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO; 1209 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) + 1210 priv_size; 1211 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info, 1212 priv_size); 1213 if (err) 1214 goto out_free; 1215 1216 err = process(tool, ev, NULL, NULL); 1217 out_free: 1218 free(ev); 1219 return err; 1220 } 1221 1222 static void unleader_evsel(struct evlist *evlist, struct evsel *leader) 1223 { 1224 struct evsel *new_leader = NULL; 1225 struct evsel *evsel; 1226 1227 /* Find new leader for the group */ 1228 evlist__for_each_entry(evlist, evsel) { 1229 if (!evsel__has_leader(evsel, leader) || evsel == leader) 1230 continue; 1231 if (!new_leader) 1232 new_leader = evsel; 1233 evsel__set_leader(evsel, new_leader); 1234 } 1235 1236 /* Update group information */ 1237 if (new_leader) { 1238 zfree(&new_leader->group_name); 1239 new_leader->group_name = leader->group_name; 1240 leader->group_name = NULL; 1241 1242 new_leader->core.nr_members = leader->core.nr_members - 1; 1243 leader->core.nr_members = 1; 1244 } 1245 } 1246 1247 static void unleader_auxtrace(struct perf_session *session) 1248 { 1249 struct evsel *evsel; 1250 1251 evlist__for_each_entry(session->evlist, evsel) { 1252 if (auxtrace__evsel_is_auxtrace(session, evsel) && 1253 evsel__is_group_leader(evsel)) { 1254 unleader_evsel(session->evlist, evsel); 1255 } 1256 } 1257 } 1258 1259 int perf_event__process_auxtrace_info(struct perf_session *session, 1260 union perf_event *event) 1261 { 1262 enum auxtrace_type type = event->auxtrace_info.type; 1263 int err; 1264 1265 if (dump_trace) 1266 fprintf(stdout, " type: %u\n", type); 1267 1268 switch (type) { 1269 case PERF_AUXTRACE_INTEL_PT: 1270 err = intel_pt_process_auxtrace_info(event, session); 1271 break; 1272 case PERF_AUXTRACE_INTEL_BTS: 1273 err = intel_bts_process_auxtrace_info(event, session); 1274 break; 1275 case PERF_AUXTRACE_ARM_SPE: 1276 err = arm_spe_process_auxtrace_info(event, session); 1277 break; 1278 case PERF_AUXTRACE_CS_ETM: 1279 err = cs_etm__process_auxtrace_info(event, session); 1280 break; 1281 case PERF_AUXTRACE_S390_CPUMSF: 1282 err = s390_cpumsf_process_auxtrace_info(event, session); 1283 break; 1284 case PERF_AUXTRACE_UNKNOWN: 1285 default: 1286 return -EINVAL; 1287 } 1288 1289 if (err) 1290 return err; 1291 1292 unleader_auxtrace(session); 1293 1294 return 0; 1295 } 1296 1297 s64 perf_event__process_auxtrace(struct perf_session *session, 1298 union perf_event *event) 1299 { 1300 s64 err; 1301 1302 if (dump_trace) 1303 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n", 1304 event->auxtrace.size, event->auxtrace.offset, 1305 event->auxtrace.reference, event->auxtrace.idx, 1306 event->auxtrace.tid, event->auxtrace.cpu); 1307 1308 if (auxtrace__dont_decode(session)) 1309 return event->auxtrace.size; 1310 1311 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE) 1312 return -EINVAL; 1313 1314 err = session->auxtrace->process_auxtrace_event(session, event, session->tool); 1315 if (err < 0) 1316 return err; 1317 1318 return event->auxtrace.size; 1319 } 1320 1321 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS 1322 #define PERF_ITRACE_DEFAULT_PERIOD 100000 1323 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16 1324 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024 1325 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64 1326 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024 1327 1328 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, 1329 bool no_sample) 1330 { 1331 synth_opts->branches = true; 1332 synth_opts->transactions = true; 1333 synth_opts->ptwrites = true; 1334 synth_opts->pwr_events = true; 1335 synth_opts->other_events = true; 1336 synth_opts->intr_events = true; 1337 synth_opts->errors = true; 1338 synth_opts->flc = true; 1339 synth_opts->llc = true; 1340 synth_opts->tlb = true; 1341 synth_opts->mem = true; 1342 synth_opts->remote_access = true; 1343 1344 if (no_sample) { 1345 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS; 1346 synth_opts->period = 1; 1347 synth_opts->calls = true; 1348 } else { 1349 synth_opts->instructions = true; 1350 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; 1351 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 1352 } 1353 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; 1354 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; 1355 synth_opts->initial_skip = 0; 1356 } 1357 1358 static int get_flag(const char **ptr, unsigned int *flags) 1359 { 1360 while (1) { 1361 char c = **ptr; 1362 1363 if (c >= 'a' && c <= 'z') { 1364 *flags |= 1 << (c - 'a'); 1365 ++*ptr; 1366 return 0; 1367 } else if (c == ' ') { 1368 ++*ptr; 1369 continue; 1370 } else { 1371 return -1; 1372 } 1373 } 1374 } 1375 1376 static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags) 1377 { 1378 while (1) { 1379 switch (**ptr) { 1380 case '+': 1381 ++*ptr; 1382 if (get_flag(ptr, plus_flags)) 1383 return -1; 1384 break; 1385 case '-': 1386 ++*ptr; 1387 if (get_flag(ptr, minus_flags)) 1388 return -1; 1389 break; 1390 case ' ': 1391 ++*ptr; 1392 break; 1393 default: 1394 return 0; 1395 } 1396 } 1397 } 1398 1399 /* 1400 * Please check tools/perf/Documentation/perf-script.txt for information 1401 * about the options parsed here, which is introduced after this cset, 1402 * when support in 'perf script' for these options is introduced. 1403 */ 1404 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, 1405 const char *str, int unset) 1406 { 1407 const char *p; 1408 char *endptr; 1409 bool period_type_set = false; 1410 bool period_set = false; 1411 1412 synth_opts->set = true; 1413 1414 if (unset) { 1415 synth_opts->dont_decode = true; 1416 return 0; 1417 } 1418 1419 if (!str) { 1420 itrace_synth_opts__set_default(synth_opts, 1421 synth_opts->default_no_sample); 1422 return 0; 1423 } 1424 1425 for (p = str; *p;) { 1426 switch (*p++) { 1427 case 'i': 1428 synth_opts->instructions = true; 1429 while (*p == ' ' || *p == ',') 1430 p += 1; 1431 if (isdigit(*p)) { 1432 synth_opts->period = strtoull(p, &endptr, 10); 1433 period_set = true; 1434 p = endptr; 1435 while (*p == ' ' || *p == ',') 1436 p += 1; 1437 switch (*p++) { 1438 case 'i': 1439 synth_opts->period_type = 1440 PERF_ITRACE_PERIOD_INSTRUCTIONS; 1441 period_type_set = true; 1442 break; 1443 case 't': 1444 synth_opts->period_type = 1445 PERF_ITRACE_PERIOD_TICKS; 1446 period_type_set = true; 1447 break; 1448 case 'm': 1449 synth_opts->period *= 1000; 1450 /* Fall through */ 1451 case 'u': 1452 synth_opts->period *= 1000; 1453 /* Fall through */ 1454 case 'n': 1455 if (*p++ != 's') 1456 goto out_err; 1457 synth_opts->period_type = 1458 PERF_ITRACE_PERIOD_NANOSECS; 1459 period_type_set = true; 1460 break; 1461 case '\0': 1462 goto out; 1463 default: 1464 goto out_err; 1465 } 1466 } 1467 break; 1468 case 'b': 1469 synth_opts->branches = true; 1470 break; 1471 case 'x': 1472 synth_opts->transactions = true; 1473 break; 1474 case 'w': 1475 synth_opts->ptwrites = true; 1476 break; 1477 case 'p': 1478 synth_opts->pwr_events = true; 1479 break; 1480 case 'o': 1481 synth_opts->other_events = true; 1482 break; 1483 case 'I': 1484 synth_opts->intr_events = true; 1485 break; 1486 case 'e': 1487 synth_opts->errors = true; 1488 if (get_flags(&p, &synth_opts->error_plus_flags, 1489 &synth_opts->error_minus_flags)) 1490 goto out_err; 1491 break; 1492 case 'd': 1493 synth_opts->log = true; 1494 if (get_flags(&p, &synth_opts->log_plus_flags, 1495 &synth_opts->log_minus_flags)) 1496 goto out_err; 1497 break; 1498 case 'c': 1499 synth_opts->branches = true; 1500 synth_opts->calls = true; 1501 break; 1502 case 'r': 1503 synth_opts->branches = true; 1504 synth_opts->returns = true; 1505 break; 1506 case 'G': 1507 case 'g': 1508 if (p[-1] == 'G') 1509 synth_opts->add_callchain = true; 1510 else 1511 synth_opts->callchain = true; 1512 synth_opts->callchain_sz = 1513 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; 1514 while (*p == ' ' || *p == ',') 1515 p += 1; 1516 if (isdigit(*p)) { 1517 unsigned int val; 1518 1519 val = strtoul(p, &endptr, 10); 1520 p = endptr; 1521 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ) 1522 goto out_err; 1523 synth_opts->callchain_sz = val; 1524 } 1525 break; 1526 case 'L': 1527 case 'l': 1528 if (p[-1] == 'L') 1529 synth_opts->add_last_branch = true; 1530 else 1531 synth_opts->last_branch = true; 1532 synth_opts->last_branch_sz = 1533 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; 1534 while (*p == ' ' || *p == ',') 1535 p += 1; 1536 if (isdigit(*p)) { 1537 unsigned int val; 1538 1539 val = strtoul(p, &endptr, 10); 1540 p = endptr; 1541 if (!val || 1542 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ) 1543 goto out_err; 1544 synth_opts->last_branch_sz = val; 1545 } 1546 break; 1547 case 's': 1548 synth_opts->initial_skip = strtoul(p, &endptr, 10); 1549 if (p == endptr) 1550 goto out_err; 1551 p = endptr; 1552 break; 1553 case 'f': 1554 synth_opts->flc = true; 1555 break; 1556 case 'm': 1557 synth_opts->llc = true; 1558 break; 1559 case 't': 1560 synth_opts->tlb = true; 1561 break; 1562 case 'a': 1563 synth_opts->remote_access = true; 1564 break; 1565 case 'M': 1566 synth_opts->mem = true; 1567 break; 1568 case 'q': 1569 synth_opts->quick += 1; 1570 break; 1571 case 'A': 1572 synth_opts->approx_ipc = true; 1573 break; 1574 case 'Z': 1575 synth_opts->timeless_decoding = true; 1576 break; 1577 case ' ': 1578 case ',': 1579 break; 1580 default: 1581 goto out_err; 1582 } 1583 } 1584 out: 1585 if (synth_opts->instructions) { 1586 if (!period_type_set) 1587 synth_opts->period_type = 1588 PERF_ITRACE_DEFAULT_PERIOD_TYPE; 1589 if (!period_set) 1590 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 1591 } 1592 1593 return 0; 1594 1595 out_err: 1596 pr_err("Bad Instruction Tracing options '%s'\n", str); 1597 return -EINVAL; 1598 } 1599 1600 int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset) 1601 { 1602 return itrace_do_parse_synth_opts(opt->value, str, unset); 1603 } 1604 1605 static const char * const auxtrace_error_type_name[] = { 1606 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace", 1607 }; 1608 1609 static const char *auxtrace_error_name(int type) 1610 { 1611 const char *error_type_name = NULL; 1612 1613 if (type < PERF_AUXTRACE_ERROR_MAX) 1614 error_type_name = auxtrace_error_type_name[type]; 1615 if (!error_type_name) 1616 error_type_name = "unknown AUX"; 1617 return error_type_name; 1618 } 1619 1620 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp) 1621 { 1622 struct perf_record_auxtrace_error *e = &event->auxtrace_error; 1623 unsigned long long nsecs = e->time; 1624 const char *msg = e->msg; 1625 int ret; 1626 1627 ret = fprintf(fp, " %s error type %u", 1628 auxtrace_error_name(e->type), e->type); 1629 1630 if (e->fmt && nsecs) { 1631 unsigned long secs = nsecs / NSEC_PER_SEC; 1632 1633 nsecs -= secs * NSEC_PER_SEC; 1634 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs); 1635 } else { 1636 ret += fprintf(fp, " time 0"); 1637 } 1638 1639 if (!e->fmt) 1640 msg = (const char *)&e->time; 1641 1642 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n", 1643 e->cpu, e->pid, e->tid, e->ip, e->code, msg); 1644 return ret; 1645 } 1646 1647 void perf_session__auxtrace_error_inc(struct perf_session *session, 1648 union perf_event *event) 1649 { 1650 struct perf_record_auxtrace_error *e = &event->auxtrace_error; 1651 1652 if (e->type < PERF_AUXTRACE_ERROR_MAX) 1653 session->evlist->stats.nr_auxtrace_errors[e->type] += 1; 1654 } 1655 1656 void events_stats__auxtrace_error_warn(const struct events_stats *stats) 1657 { 1658 int i; 1659 1660 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) { 1661 if (!stats->nr_auxtrace_errors[i]) 1662 continue; 1663 ui__warning("%u %s errors\n", 1664 stats->nr_auxtrace_errors[i], 1665 auxtrace_error_name(i)); 1666 } 1667 } 1668 1669 int perf_event__process_auxtrace_error(struct perf_session *session, 1670 union perf_event *event) 1671 { 1672 if (auxtrace__dont_decode(session)) 1673 return 0; 1674 1675 perf_event__fprintf_auxtrace_error(event, stdout); 1676 return 0; 1677 } 1678 1679 /* 1680 * In the compat mode kernel runs in 64-bit and perf tool runs in 32-bit mode, 1681 * 32-bit perf tool cannot access 64-bit value atomically, which might lead to 1682 * the issues caused by the below sequence on multiple CPUs: when perf tool 1683 * accesses either the load operation or the store operation for 64-bit value, 1684 * on some architectures the operation is divided into two instructions, one 1685 * is for accessing the low 32-bit value and another is for the high 32-bit; 1686 * thus these two user operations can give the kernel chances to access the 1687 * 64-bit value, and thus leads to the unexpected load values. 1688 * 1689 * kernel (64-bit) user (32-bit) 1690 * 1691 * if (LOAD ->aux_tail) { --, LOAD ->aux_head_lo 1692 * STORE $aux_data | ,---> 1693 * FLUSH $aux_data | | LOAD ->aux_head_hi 1694 * STORE ->aux_head --|-------` smp_rmb() 1695 * } | LOAD $data 1696 * | smp_mb() 1697 * | STORE ->aux_tail_lo 1698 * `-----------> 1699 * STORE ->aux_tail_hi 1700 * 1701 * For this reason, it's impossible for the perf tool to work correctly when 1702 * the AUX head or tail is bigger than 4GB (more than 32 bits length); and we 1703 * can not simply limit the AUX ring buffer to less than 4GB, the reason is 1704 * the pointers can be increased monotonically, whatever the buffer size it is, 1705 * at the end the head and tail can be bigger than 4GB and carry out to the 1706 * high 32-bit. 1707 * 1708 * To mitigate the issues and improve the user experience, we can allow the 1709 * perf tool working in certain conditions and bail out with error if detect 1710 * any overflow cannot be handled. 1711 * 1712 * For reading the AUX head, it reads out the values for three times, and 1713 * compares the high 4 bytes of the values between the first time and the last 1714 * time, if there has no change for high 4 bytes injected by the kernel during 1715 * the user reading sequence, it's safe for use the second value. 1716 * 1717 * When compat_auxtrace_mmap__write_tail() detects any carrying in the high 1718 * 32 bits, it means there have two store operations in user space and it cannot 1719 * promise the atomicity for 64-bit write, so return '-1' in this case to tell 1720 * the caller an overflow error has happened. 1721 */ 1722 u64 __weak compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm) 1723 { 1724 struct perf_event_mmap_page *pc = mm->userpg; 1725 u64 first, second, last; 1726 u64 mask = (u64)(UINT32_MAX) << 32; 1727 1728 do { 1729 first = READ_ONCE(pc->aux_head); 1730 /* Ensure all reads are done after we read the head */ 1731 smp_rmb(); 1732 second = READ_ONCE(pc->aux_head); 1733 /* Ensure all reads are done after we read the head */ 1734 smp_rmb(); 1735 last = READ_ONCE(pc->aux_head); 1736 } while ((first & mask) != (last & mask)); 1737 1738 return second; 1739 } 1740 1741 int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail) 1742 { 1743 struct perf_event_mmap_page *pc = mm->userpg; 1744 u64 mask = (u64)(UINT32_MAX) << 32; 1745 1746 if (tail & mask) 1747 return -1; 1748 1749 /* Ensure all reads are done before we write the tail out */ 1750 smp_mb(); 1751 WRITE_ONCE(pc->aux_tail, tail); 1752 return 0; 1753 } 1754 1755 static int __auxtrace_mmap__read(struct mmap *map, 1756 struct auxtrace_record *itr, 1757 struct perf_tool *tool, process_auxtrace_t fn, 1758 bool snapshot, size_t snapshot_size) 1759 { 1760 struct auxtrace_mmap *mm = &map->auxtrace_mmap; 1761 u64 head, old = mm->prev, offset, ref; 1762 unsigned char *data = mm->base; 1763 size_t size, head_off, old_off, len1, len2, padding; 1764 union perf_event ev; 1765 void *data1, *data2; 1766 int kernel_is_64_bit = perf_env__kernel_is_64_bit(evsel__env(NULL)); 1767 1768 head = auxtrace_mmap__read_head(mm, kernel_is_64_bit); 1769 1770 if (snapshot && 1771 auxtrace_record__find_snapshot(itr, mm->idx, mm, data, &head, &old)) 1772 return -1; 1773 1774 if (old == head) 1775 return 0; 1776 1777 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n", 1778 mm->idx, old, head, head - old); 1779 1780 if (mm->mask) { 1781 head_off = head & mm->mask; 1782 old_off = old & mm->mask; 1783 } else { 1784 head_off = head % mm->len; 1785 old_off = old % mm->len; 1786 } 1787 1788 if (head_off > old_off) 1789 size = head_off - old_off; 1790 else 1791 size = mm->len - (old_off - head_off); 1792 1793 if (snapshot && size > snapshot_size) 1794 size = snapshot_size; 1795 1796 ref = auxtrace_record__reference(itr); 1797 1798 if (head > old || size <= head || mm->mask) { 1799 offset = head - size; 1800 } else { 1801 /* 1802 * When the buffer size is not a power of 2, 'head' wraps at the 1803 * highest multiple of the buffer size, so we have to subtract 1804 * the remainder here. 1805 */ 1806 u64 rem = (0ULL - mm->len) % mm->len; 1807 1808 offset = head - size - rem; 1809 } 1810 1811 if (size > head_off) { 1812 len1 = size - head_off; 1813 data1 = &data[mm->len - len1]; 1814 len2 = head_off; 1815 data2 = &data[0]; 1816 } else { 1817 len1 = size; 1818 data1 = &data[head_off - len1]; 1819 len2 = 0; 1820 data2 = NULL; 1821 } 1822 1823 if (itr->alignment) { 1824 unsigned int unwanted = len1 % itr->alignment; 1825 1826 len1 -= unwanted; 1827 size -= unwanted; 1828 } 1829 1830 /* padding must be written by fn() e.g. record__process_auxtrace() */ 1831 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1); 1832 if (padding) 1833 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding; 1834 1835 memset(&ev, 0, sizeof(ev)); 1836 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE; 1837 ev.auxtrace.header.size = sizeof(ev.auxtrace); 1838 ev.auxtrace.size = size + padding; 1839 ev.auxtrace.offset = offset; 1840 ev.auxtrace.reference = ref; 1841 ev.auxtrace.idx = mm->idx; 1842 ev.auxtrace.tid = mm->tid; 1843 ev.auxtrace.cpu = mm->cpu; 1844 1845 if (fn(tool, map, &ev, data1, len1, data2, len2)) 1846 return -1; 1847 1848 mm->prev = head; 1849 1850 if (!snapshot) { 1851 int err; 1852 1853 err = auxtrace_mmap__write_tail(mm, head, kernel_is_64_bit); 1854 if (err < 0) 1855 return err; 1856 1857 if (itr->read_finish) { 1858 err = itr->read_finish(itr, mm->idx); 1859 if (err < 0) 1860 return err; 1861 } 1862 } 1863 1864 return 1; 1865 } 1866 1867 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 1868 struct perf_tool *tool, process_auxtrace_t fn) 1869 { 1870 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0); 1871 } 1872 1873 int auxtrace_mmap__read_snapshot(struct mmap *map, 1874 struct auxtrace_record *itr, 1875 struct perf_tool *tool, process_auxtrace_t fn, 1876 size_t snapshot_size) 1877 { 1878 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size); 1879 } 1880 1881 /** 1882 * struct auxtrace_cache - hash table to implement a cache 1883 * @hashtable: the hashtable 1884 * @sz: hashtable size (number of hlists) 1885 * @entry_size: size of an entry 1886 * @limit: limit the number of entries to this maximum, when reached the cache 1887 * is dropped and caching begins again with an empty cache 1888 * @cnt: current number of entries 1889 * @bits: hashtable size (@sz = 2^@bits) 1890 */ 1891 struct auxtrace_cache { 1892 struct hlist_head *hashtable; 1893 size_t sz; 1894 size_t entry_size; 1895 size_t limit; 1896 size_t cnt; 1897 unsigned int bits; 1898 }; 1899 1900 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, 1901 unsigned int limit_percent) 1902 { 1903 struct auxtrace_cache *c; 1904 struct hlist_head *ht; 1905 size_t sz, i; 1906 1907 c = zalloc(sizeof(struct auxtrace_cache)); 1908 if (!c) 1909 return NULL; 1910 1911 sz = 1UL << bits; 1912 1913 ht = calloc(sz, sizeof(struct hlist_head)); 1914 if (!ht) 1915 goto out_free; 1916 1917 for (i = 0; i < sz; i++) 1918 INIT_HLIST_HEAD(&ht[i]); 1919 1920 c->hashtable = ht; 1921 c->sz = sz; 1922 c->entry_size = entry_size; 1923 c->limit = (c->sz * limit_percent) / 100; 1924 c->bits = bits; 1925 1926 return c; 1927 1928 out_free: 1929 free(c); 1930 return NULL; 1931 } 1932 1933 static void auxtrace_cache__drop(struct auxtrace_cache *c) 1934 { 1935 struct auxtrace_cache_entry *entry; 1936 struct hlist_node *tmp; 1937 size_t i; 1938 1939 if (!c) 1940 return; 1941 1942 for (i = 0; i < c->sz; i++) { 1943 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) { 1944 hlist_del(&entry->hash); 1945 auxtrace_cache__free_entry(c, entry); 1946 } 1947 } 1948 1949 c->cnt = 0; 1950 } 1951 1952 void auxtrace_cache__free(struct auxtrace_cache *c) 1953 { 1954 if (!c) 1955 return; 1956 1957 auxtrace_cache__drop(c); 1958 zfree(&c->hashtable); 1959 free(c); 1960 } 1961 1962 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c) 1963 { 1964 return malloc(c->entry_size); 1965 } 1966 1967 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused, 1968 void *entry) 1969 { 1970 free(entry); 1971 } 1972 1973 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, 1974 struct auxtrace_cache_entry *entry) 1975 { 1976 if (c->limit && ++c->cnt > c->limit) 1977 auxtrace_cache__drop(c); 1978 1979 entry->key = key; 1980 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]); 1981 1982 return 0; 1983 } 1984 1985 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c, 1986 u32 key) 1987 { 1988 struct auxtrace_cache_entry *entry; 1989 struct hlist_head *hlist; 1990 struct hlist_node *n; 1991 1992 if (!c) 1993 return NULL; 1994 1995 hlist = &c->hashtable[hash_32(key, c->bits)]; 1996 hlist_for_each_entry_safe(entry, n, hlist, hash) { 1997 if (entry->key == key) { 1998 hlist_del(&entry->hash); 1999 return entry; 2000 } 2001 } 2002 2003 return NULL; 2004 } 2005 2006 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key) 2007 { 2008 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key); 2009 2010 auxtrace_cache__free_entry(c, entry); 2011 } 2012 2013 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key) 2014 { 2015 struct auxtrace_cache_entry *entry; 2016 struct hlist_head *hlist; 2017 2018 if (!c) 2019 return NULL; 2020 2021 hlist = &c->hashtable[hash_32(key, c->bits)]; 2022 hlist_for_each_entry(entry, hlist, hash) { 2023 if (entry->key == key) 2024 return entry; 2025 } 2026 2027 return NULL; 2028 } 2029 2030 static void addr_filter__free_str(struct addr_filter *filt) 2031 { 2032 zfree(&filt->str); 2033 filt->action = NULL; 2034 filt->sym_from = NULL; 2035 filt->sym_to = NULL; 2036 filt->filename = NULL; 2037 } 2038 2039 static struct addr_filter *addr_filter__new(void) 2040 { 2041 struct addr_filter *filt = zalloc(sizeof(*filt)); 2042 2043 if (filt) 2044 INIT_LIST_HEAD(&filt->list); 2045 2046 return filt; 2047 } 2048 2049 static void addr_filter__free(struct addr_filter *filt) 2050 { 2051 if (filt) 2052 addr_filter__free_str(filt); 2053 free(filt); 2054 } 2055 2056 static void addr_filters__add(struct addr_filters *filts, 2057 struct addr_filter *filt) 2058 { 2059 list_add_tail(&filt->list, &filts->head); 2060 filts->cnt += 1; 2061 } 2062 2063 static void addr_filters__del(struct addr_filters *filts, 2064 struct addr_filter *filt) 2065 { 2066 list_del_init(&filt->list); 2067 filts->cnt -= 1; 2068 } 2069 2070 void addr_filters__init(struct addr_filters *filts) 2071 { 2072 INIT_LIST_HEAD(&filts->head); 2073 filts->cnt = 0; 2074 } 2075 2076 void addr_filters__exit(struct addr_filters *filts) 2077 { 2078 struct addr_filter *filt, *n; 2079 2080 list_for_each_entry_safe(filt, n, &filts->head, list) { 2081 addr_filters__del(filts, filt); 2082 addr_filter__free(filt); 2083 } 2084 } 2085 2086 static int parse_num_or_str(char **inp, u64 *num, const char **str, 2087 const char *str_delim) 2088 { 2089 *inp += strspn(*inp, " "); 2090 2091 if (isdigit(**inp)) { 2092 char *endptr; 2093 2094 if (!num) 2095 return -EINVAL; 2096 errno = 0; 2097 *num = strtoull(*inp, &endptr, 0); 2098 if (errno) 2099 return -errno; 2100 if (endptr == *inp) 2101 return -EINVAL; 2102 *inp = endptr; 2103 } else { 2104 size_t n; 2105 2106 if (!str) 2107 return -EINVAL; 2108 *inp += strspn(*inp, " "); 2109 *str = *inp; 2110 n = strcspn(*inp, str_delim); 2111 if (!n) 2112 return -EINVAL; 2113 *inp += n; 2114 if (**inp) { 2115 **inp = '\0'; 2116 *inp += 1; 2117 } 2118 } 2119 return 0; 2120 } 2121 2122 static int parse_action(struct addr_filter *filt) 2123 { 2124 if (!strcmp(filt->action, "filter")) { 2125 filt->start = true; 2126 filt->range = true; 2127 } else if (!strcmp(filt->action, "start")) { 2128 filt->start = true; 2129 } else if (!strcmp(filt->action, "stop")) { 2130 filt->start = false; 2131 } else if (!strcmp(filt->action, "tracestop")) { 2132 filt->start = false; 2133 filt->range = true; 2134 filt->action += 5; /* Change 'tracestop' to 'stop' */ 2135 } else { 2136 return -EINVAL; 2137 } 2138 return 0; 2139 } 2140 2141 static int parse_sym_idx(char **inp, int *idx) 2142 { 2143 *idx = -1; 2144 2145 *inp += strspn(*inp, " "); 2146 2147 if (**inp != '#') 2148 return 0; 2149 2150 *inp += 1; 2151 2152 if (**inp == 'g' || **inp == 'G') { 2153 *inp += 1; 2154 *idx = 0; 2155 } else { 2156 unsigned long num; 2157 char *endptr; 2158 2159 errno = 0; 2160 num = strtoul(*inp, &endptr, 0); 2161 if (errno) 2162 return -errno; 2163 if (endptr == *inp || num > INT_MAX) 2164 return -EINVAL; 2165 *inp = endptr; 2166 *idx = num; 2167 } 2168 2169 return 0; 2170 } 2171 2172 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx) 2173 { 2174 int err = parse_num_or_str(inp, num, str, " "); 2175 2176 if (!err && *str) 2177 err = parse_sym_idx(inp, idx); 2178 2179 return err; 2180 } 2181 2182 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp) 2183 { 2184 char *fstr; 2185 int err; 2186 2187 filt->str = fstr = strdup(*filter_inp); 2188 if (!fstr) 2189 return -ENOMEM; 2190 2191 err = parse_num_or_str(&fstr, NULL, &filt->action, " "); 2192 if (err) 2193 goto out_err; 2194 2195 err = parse_action(filt); 2196 if (err) 2197 goto out_err; 2198 2199 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from, 2200 &filt->sym_from_idx); 2201 if (err) 2202 goto out_err; 2203 2204 fstr += strspn(fstr, " "); 2205 2206 if (*fstr == '/') { 2207 fstr += 1; 2208 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to, 2209 &filt->sym_to_idx); 2210 if (err) 2211 goto out_err; 2212 filt->range = true; 2213 } 2214 2215 fstr += strspn(fstr, " "); 2216 2217 if (*fstr == '@') { 2218 fstr += 1; 2219 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,"); 2220 if (err) 2221 goto out_err; 2222 } 2223 2224 fstr += strspn(fstr, " ,"); 2225 2226 *filter_inp += fstr - filt->str; 2227 2228 return 0; 2229 2230 out_err: 2231 addr_filter__free_str(filt); 2232 2233 return err; 2234 } 2235 2236 int addr_filters__parse_bare_filter(struct addr_filters *filts, 2237 const char *filter) 2238 { 2239 struct addr_filter *filt; 2240 const char *fstr = filter; 2241 int err; 2242 2243 while (*fstr) { 2244 filt = addr_filter__new(); 2245 err = parse_one_filter(filt, &fstr); 2246 if (err) { 2247 addr_filter__free(filt); 2248 addr_filters__exit(filts); 2249 return err; 2250 } 2251 addr_filters__add(filts, filt); 2252 } 2253 2254 return 0; 2255 } 2256 2257 struct sym_args { 2258 const char *name; 2259 u64 start; 2260 u64 size; 2261 int idx; 2262 int cnt; 2263 bool started; 2264 bool global; 2265 bool selected; 2266 bool duplicate; 2267 bool near; 2268 }; 2269 2270 static bool kern_sym_match(struct sym_args *args, const char *name, char type) 2271 { 2272 /* A function with the same name, and global or the n'th found or any */ 2273 return kallsyms__is_function(type) && 2274 !strcmp(name, args->name) && 2275 ((args->global && isupper(type)) || 2276 (args->selected && ++(args->cnt) == args->idx) || 2277 (!args->global && !args->selected)); 2278 } 2279 2280 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start) 2281 { 2282 struct sym_args *args = arg; 2283 2284 if (args->started) { 2285 if (!args->size) 2286 args->size = start - args->start; 2287 if (args->selected) { 2288 if (args->size) 2289 return 1; 2290 } else if (kern_sym_match(args, name, type)) { 2291 args->duplicate = true; 2292 return 1; 2293 } 2294 } else if (kern_sym_match(args, name, type)) { 2295 args->started = true; 2296 args->start = start; 2297 } 2298 2299 return 0; 2300 } 2301 2302 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start) 2303 { 2304 struct sym_args *args = arg; 2305 2306 if (kern_sym_match(args, name, type)) { 2307 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", 2308 ++args->cnt, start, type, name); 2309 args->near = true; 2310 } else if (args->near) { 2311 args->near = false; 2312 pr_err("\t\twhich is near\t\t%s\n", name); 2313 } 2314 2315 return 0; 2316 } 2317 2318 static int sym_not_found_error(const char *sym_name, int idx) 2319 { 2320 if (idx > 0) { 2321 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n", 2322 idx, sym_name); 2323 } else if (!idx) { 2324 pr_err("Global symbol '%s' not found.\n", sym_name); 2325 } else { 2326 pr_err("Symbol '%s' not found.\n", sym_name); 2327 } 2328 pr_err("Note that symbols must be functions.\n"); 2329 2330 return -EINVAL; 2331 } 2332 2333 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx) 2334 { 2335 struct sym_args args = { 2336 .name = sym_name, 2337 .idx = idx, 2338 .global = !idx, 2339 .selected = idx > 0, 2340 }; 2341 int err; 2342 2343 *start = 0; 2344 *size = 0; 2345 2346 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb); 2347 if (err < 0) { 2348 pr_err("Failed to parse /proc/kallsyms\n"); 2349 return err; 2350 } 2351 2352 if (args.duplicate) { 2353 pr_err("Multiple kernel symbols with name '%s'\n", sym_name); 2354 args.cnt = 0; 2355 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb); 2356 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n", 2357 sym_name); 2358 pr_err("Or select a global symbol by inserting #0 or #g or #G\n"); 2359 return -EINVAL; 2360 } 2361 2362 if (!args.started) { 2363 pr_err("Kernel symbol lookup: "); 2364 return sym_not_found_error(sym_name, idx); 2365 } 2366 2367 *start = args.start; 2368 *size = args.size; 2369 2370 return 0; 2371 } 2372 2373 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused, 2374 char type, u64 start) 2375 { 2376 struct sym_args *args = arg; 2377 2378 if (!kallsyms__is_function(type)) 2379 return 0; 2380 2381 if (!args->started) { 2382 args->started = true; 2383 args->start = start; 2384 } 2385 /* Don't know exactly where the kernel ends, so we add a page */ 2386 args->size = round_up(start, page_size) + page_size - args->start; 2387 2388 return 0; 2389 } 2390 2391 static int addr_filter__entire_kernel(struct addr_filter *filt) 2392 { 2393 struct sym_args args = { .started = false }; 2394 int err; 2395 2396 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb); 2397 if (err < 0 || !args.started) { 2398 pr_err("Failed to parse /proc/kallsyms\n"); 2399 return err; 2400 } 2401 2402 filt->addr = args.start; 2403 filt->size = args.size; 2404 2405 return 0; 2406 } 2407 2408 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size) 2409 { 2410 if (start + size >= filt->addr) 2411 return 0; 2412 2413 if (filt->sym_from) { 2414 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n", 2415 filt->sym_to, start, filt->sym_from, filt->addr); 2416 } else { 2417 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n", 2418 filt->sym_to, start, filt->addr); 2419 } 2420 2421 return -EINVAL; 2422 } 2423 2424 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt) 2425 { 2426 bool no_size = false; 2427 u64 start, size; 2428 int err; 2429 2430 if (symbol_conf.kptr_restrict) { 2431 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n"); 2432 return -EINVAL; 2433 } 2434 2435 if (filt->sym_from && !strcmp(filt->sym_from, "*")) 2436 return addr_filter__entire_kernel(filt); 2437 2438 if (filt->sym_from) { 2439 err = find_kern_sym(filt->sym_from, &start, &size, 2440 filt->sym_from_idx); 2441 if (err) 2442 return err; 2443 filt->addr = start; 2444 if (filt->range && !filt->size && !filt->sym_to) { 2445 filt->size = size; 2446 no_size = !size; 2447 } 2448 } 2449 2450 if (filt->sym_to) { 2451 err = find_kern_sym(filt->sym_to, &start, &size, 2452 filt->sym_to_idx); 2453 if (err) 2454 return err; 2455 2456 err = check_end_after_start(filt, start, size); 2457 if (err) 2458 return err; 2459 filt->size = start + size - filt->addr; 2460 no_size = !size; 2461 } 2462 2463 /* The very last symbol in kallsyms does not imply a particular size */ 2464 if (no_size) { 2465 pr_err("Cannot determine size of symbol '%s'\n", 2466 filt->sym_to ? filt->sym_to : filt->sym_from); 2467 return -EINVAL; 2468 } 2469 2470 return 0; 2471 } 2472 2473 static struct dso *load_dso(const char *name) 2474 { 2475 struct map *map; 2476 struct dso *dso; 2477 2478 map = dso__new_map(name); 2479 if (!map) 2480 return NULL; 2481 2482 if (map__load(map) < 0) 2483 pr_err("File '%s' not found or has no symbols.\n", name); 2484 2485 dso = dso__get(map->dso); 2486 2487 map__put(map); 2488 2489 return dso; 2490 } 2491 2492 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt, 2493 int idx) 2494 { 2495 /* Same name, and global or the n'th found or any */ 2496 return !arch__compare_symbol_names(name, sym->name) && 2497 ((!idx && sym->binding == STB_GLOBAL) || 2498 (idx > 0 && ++*cnt == idx) || 2499 idx < 0); 2500 } 2501 2502 static void print_duplicate_syms(struct dso *dso, const char *sym_name) 2503 { 2504 struct symbol *sym; 2505 bool near = false; 2506 int cnt = 0; 2507 2508 pr_err("Multiple symbols with name '%s'\n", sym_name); 2509 2510 sym = dso__first_symbol(dso); 2511 while (sym) { 2512 if (dso_sym_match(sym, sym_name, &cnt, -1)) { 2513 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", 2514 ++cnt, sym->start, 2515 sym->binding == STB_GLOBAL ? 'g' : 2516 sym->binding == STB_LOCAL ? 'l' : 'w', 2517 sym->name); 2518 near = true; 2519 } else if (near) { 2520 near = false; 2521 pr_err("\t\twhich is near\t\t%s\n", sym->name); 2522 } 2523 sym = dso__next_symbol(sym); 2524 } 2525 2526 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n", 2527 sym_name); 2528 pr_err("Or select a global symbol by inserting #0 or #g or #G\n"); 2529 } 2530 2531 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start, 2532 u64 *size, int idx) 2533 { 2534 struct symbol *sym; 2535 int cnt = 0; 2536 2537 *start = 0; 2538 *size = 0; 2539 2540 sym = dso__first_symbol(dso); 2541 while (sym) { 2542 if (*start) { 2543 if (!*size) 2544 *size = sym->start - *start; 2545 if (idx > 0) { 2546 if (*size) 2547 return 1; 2548 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) { 2549 print_duplicate_syms(dso, sym_name); 2550 return -EINVAL; 2551 } 2552 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) { 2553 *start = sym->start; 2554 *size = sym->end - sym->start; 2555 } 2556 sym = dso__next_symbol(sym); 2557 } 2558 2559 if (!*start) 2560 return sym_not_found_error(sym_name, idx); 2561 2562 return 0; 2563 } 2564 2565 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso) 2566 { 2567 if (dso__data_file_size(dso, NULL)) { 2568 pr_err("Failed to determine filter for %s\nCannot determine file size.\n", 2569 filt->filename); 2570 return -EINVAL; 2571 } 2572 2573 filt->addr = 0; 2574 filt->size = dso->data.file_size; 2575 2576 return 0; 2577 } 2578 2579 static int addr_filter__resolve_syms(struct addr_filter *filt) 2580 { 2581 u64 start, size; 2582 struct dso *dso; 2583 int err = 0; 2584 2585 if (!filt->sym_from && !filt->sym_to) 2586 return 0; 2587 2588 if (!filt->filename) 2589 return addr_filter__resolve_kernel_syms(filt); 2590 2591 dso = load_dso(filt->filename); 2592 if (!dso) { 2593 pr_err("Failed to load symbols from: %s\n", filt->filename); 2594 return -EINVAL; 2595 } 2596 2597 if (filt->sym_from && !strcmp(filt->sym_from, "*")) { 2598 err = addr_filter__entire_dso(filt, dso); 2599 goto put_dso; 2600 } 2601 2602 if (filt->sym_from) { 2603 err = find_dso_sym(dso, filt->sym_from, &start, &size, 2604 filt->sym_from_idx); 2605 if (err) 2606 goto put_dso; 2607 filt->addr = start; 2608 if (filt->range && !filt->size && !filt->sym_to) 2609 filt->size = size; 2610 } 2611 2612 if (filt->sym_to) { 2613 err = find_dso_sym(dso, filt->sym_to, &start, &size, 2614 filt->sym_to_idx); 2615 if (err) 2616 goto put_dso; 2617 2618 err = check_end_after_start(filt, start, size); 2619 if (err) 2620 return err; 2621 2622 filt->size = start + size - filt->addr; 2623 } 2624 2625 put_dso: 2626 dso__put(dso); 2627 2628 return err; 2629 } 2630 2631 static char *addr_filter__to_str(struct addr_filter *filt) 2632 { 2633 char filename_buf[PATH_MAX]; 2634 const char *at = ""; 2635 const char *fn = ""; 2636 char *filter; 2637 int err; 2638 2639 if (filt->filename) { 2640 at = "@"; 2641 fn = realpath(filt->filename, filename_buf); 2642 if (!fn) 2643 return NULL; 2644 } 2645 2646 if (filt->range) { 2647 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s", 2648 filt->action, filt->addr, filt->size, at, fn); 2649 } else { 2650 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s", 2651 filt->action, filt->addr, at, fn); 2652 } 2653 2654 return err < 0 ? NULL : filter; 2655 } 2656 2657 static int parse_addr_filter(struct evsel *evsel, const char *filter, 2658 int max_nr) 2659 { 2660 struct addr_filters filts; 2661 struct addr_filter *filt; 2662 int err; 2663 2664 addr_filters__init(&filts); 2665 2666 err = addr_filters__parse_bare_filter(&filts, filter); 2667 if (err) 2668 goto out_exit; 2669 2670 if (filts.cnt > max_nr) { 2671 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n", 2672 filts.cnt, max_nr); 2673 err = -EINVAL; 2674 goto out_exit; 2675 } 2676 2677 list_for_each_entry(filt, &filts.head, list) { 2678 char *new_filter; 2679 2680 err = addr_filter__resolve_syms(filt); 2681 if (err) 2682 goto out_exit; 2683 2684 new_filter = addr_filter__to_str(filt); 2685 if (!new_filter) { 2686 err = -ENOMEM; 2687 goto out_exit; 2688 } 2689 2690 if (evsel__append_addr_filter(evsel, new_filter)) { 2691 err = -ENOMEM; 2692 goto out_exit; 2693 } 2694 } 2695 2696 out_exit: 2697 addr_filters__exit(&filts); 2698 2699 if (err) { 2700 pr_err("Failed to parse address filter: '%s'\n", filter); 2701 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n"); 2702 pr_err("Where multiple filters are separated by space or comma.\n"); 2703 } 2704 2705 return err; 2706 } 2707 2708 static int evsel__nr_addr_filter(struct evsel *evsel) 2709 { 2710 struct perf_pmu *pmu = evsel__find_pmu(evsel); 2711 int nr_addr_filters = 0; 2712 2713 if (!pmu) 2714 return 0; 2715 2716 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters); 2717 2718 return nr_addr_filters; 2719 } 2720 2721 int auxtrace_parse_filters(struct evlist *evlist) 2722 { 2723 struct evsel *evsel; 2724 char *filter; 2725 int err, max_nr; 2726 2727 evlist__for_each_entry(evlist, evsel) { 2728 filter = evsel->filter; 2729 max_nr = evsel__nr_addr_filter(evsel); 2730 if (!filter || !max_nr) 2731 continue; 2732 evsel->filter = NULL; 2733 err = parse_addr_filter(evsel, filter, max_nr); 2734 free(filter); 2735 if (err) 2736 return err; 2737 pr_debug("Address filter: %s\n", evsel->filter); 2738 } 2739 2740 return 0; 2741 } 2742 2743 int auxtrace__process_event(struct perf_session *session, union perf_event *event, 2744 struct perf_sample *sample, struct perf_tool *tool) 2745 { 2746 if (!session->auxtrace) 2747 return 0; 2748 2749 return session->auxtrace->process_event(session, event, sample, tool); 2750 } 2751 2752 void auxtrace__dump_auxtrace_sample(struct perf_session *session, 2753 struct perf_sample *sample) 2754 { 2755 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample || 2756 auxtrace__dont_decode(session)) 2757 return; 2758 2759 session->auxtrace->dump_auxtrace_sample(session, sample); 2760 } 2761 2762 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool) 2763 { 2764 if (!session->auxtrace) 2765 return 0; 2766 2767 return session->auxtrace->flush_events(session, tool); 2768 } 2769 2770 void auxtrace__free_events(struct perf_session *session) 2771 { 2772 if (!session->auxtrace) 2773 return; 2774 2775 return session->auxtrace->free_events(session); 2776 } 2777 2778 void auxtrace__free(struct perf_session *session) 2779 { 2780 if (!session->auxtrace) 2781 return; 2782 2783 return session->auxtrace->free(session); 2784 } 2785 2786 bool auxtrace__evsel_is_auxtrace(struct perf_session *session, 2787 struct evsel *evsel) 2788 { 2789 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace) 2790 return false; 2791 2792 return session->auxtrace->evsel_is_auxtrace(session, evsel); 2793 } 2794