1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * auxtrace.c: AUX area trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #include <inttypes.h> 8 #include <sys/types.h> 9 #include <sys/mman.h> 10 #include <stdbool.h> 11 #include <string.h> 12 #include <limits.h> 13 #include <errno.h> 14 15 #include <linux/kernel.h> 16 #include <linux/perf_event.h> 17 #include <linux/types.h> 18 #include <linux/bitops.h> 19 #include <linux/log2.h> 20 #include <linux/string.h> 21 #include <linux/time64.h> 22 23 #include <sys/param.h> 24 #include <stdlib.h> 25 #include <stdio.h> 26 #include <linux/list.h> 27 #include <linux/zalloc.h> 28 29 #include "config.h" 30 #include "evlist.h" 31 #include "dso.h" 32 #include "map.h" 33 #include "pmu.h" 34 #include "evsel.h" 35 #include "evsel_config.h" 36 #include "symbol.h" 37 #include "util/perf_api_probe.h" 38 #include "util/synthetic-events.h" 39 #include "thread_map.h" 40 #include "asm/bug.h" 41 #include "auxtrace.h" 42 43 #include <linux/hash.h> 44 45 #include "event.h" 46 #include "record.h" 47 #include "session.h" 48 #include "debug.h" 49 #include <subcmd/parse-options.h> 50 51 #include "cs-etm.h" 52 #include "intel-pt.h" 53 #include "intel-bts.h" 54 #include "arm-spe.h" 55 #include "hisi-ptt.h" 56 #include "s390-cpumsf.h" 57 #include "util/mmap.h" 58 #include "powerpc-vpadtl.h" 59 60 #include <linux/ctype.h> 61 #include "symbol/kallsyms.h" 62 #include <internal/lib.h> 63 #include "util/sample.h" 64 65 #define AUXTRACE_SYNTH_EVENT_ID_OFFSET 1000000000ULL 66 67 /* 68 * Event IDs are allocated sequentially, so a big offset from any 69 * existing ID will reach a unused range. 70 */ 71 u64 auxtrace_synth_id_range_start(struct evsel *evsel) 72 { 73 u64 id = evsel->core.id[0] + AUXTRACE_SYNTH_EVENT_ID_OFFSET; 74 75 if (!id) 76 id = 1; 77 78 return id; 79 } 80 81 /* 82 * Make a group from 'leader' to 'last', requiring that the events were not 83 * already grouped to a different leader. 84 */ 85 static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last) 86 { 87 struct evsel *evsel; 88 bool grp; 89 90 if (!evsel__is_group_leader(leader)) 91 return -EINVAL; 92 93 grp = false; 94 evlist__for_each_entry(evlist, evsel) { 95 if (grp) { 96 if (!(evsel__leader(evsel) == leader || 97 (evsel__leader(evsel) == evsel && 98 evsel->core.nr_members <= 1))) 99 return -EINVAL; 100 } else if (evsel == leader) { 101 grp = true; 102 } 103 if (evsel == last) 104 break; 105 } 106 107 grp = false; 108 evlist__for_each_entry(evlist, evsel) { 109 if (grp) { 110 if (!evsel__has_leader(evsel, leader)) { 111 evsel__set_leader(evsel, leader); 112 if (leader->core.nr_members < 1) 113 leader->core.nr_members = 1; 114 leader->core.nr_members += 1; 115 } 116 } else if (evsel == leader) { 117 grp = true; 118 } 119 if (evsel == last) 120 break; 121 } 122 123 return 0; 124 } 125 126 static bool auxtrace__dont_decode(struct perf_session *session) 127 { 128 return !session->itrace_synth_opts || 129 session->itrace_synth_opts->dont_decode; 130 } 131 132 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 133 struct auxtrace_mmap_params *mp, 134 void *userpg, int fd) 135 { 136 struct perf_event_mmap_page *pc = userpg; 137 138 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n"); 139 140 mm->userpg = userpg; 141 mm->mask = mp->mask; 142 mm->len = mp->len; 143 mm->prev = 0; 144 mm->idx = mp->idx; 145 mm->tid = mp->tid; 146 mm->cpu = mp->cpu.cpu; 147 148 if (!mp->len || !mp->mmap_needed) { 149 mm->base = NULL; 150 return 0; 151 } 152 153 pc->aux_offset = mp->offset; 154 pc->aux_size = mp->len; 155 156 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset); 157 if (mm->base == MAP_FAILED) { 158 pr_debug2("failed to mmap AUX area\n"); 159 mm->base = NULL; 160 return -1; 161 } 162 163 return 0; 164 } 165 166 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm) 167 { 168 if (mm->base) { 169 munmap(mm->base, mm->len); 170 mm->base = NULL; 171 } 172 } 173 174 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 175 off_t auxtrace_offset, 176 unsigned int auxtrace_pages, 177 bool auxtrace_overwrite) 178 { 179 if (auxtrace_pages) { 180 mp->offset = auxtrace_offset; 181 mp->len = auxtrace_pages * (size_t)page_size; 182 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0; 183 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE); 184 pr_debug2("AUX area mmap length %zu\n", mp->len); 185 } else { 186 mp->len = 0; 187 } 188 } 189 190 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 191 struct evlist *evlist, 192 struct evsel *evsel, int idx) 193 { 194 bool per_cpu = !perf_cpu_map__has_any_cpu(evlist->core.user_requested_cpus); 195 196 mp->mmap_needed = evsel->needs_auxtrace_mmap; 197 198 if (!mp->mmap_needed) 199 return; 200 201 mp->idx = idx; 202 203 if (per_cpu) { 204 mp->cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx); 205 mp->tid = perf_thread_map__pid(evlist->core.threads, 0); 206 } else { 207 mp->cpu.cpu = -1; 208 mp->tid = perf_thread_map__pid(evlist->core.threads, idx); 209 } 210 } 211 212 #define AUXTRACE_INIT_NR_QUEUES 32 213 214 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues) 215 { 216 struct auxtrace_queue *queue_array; 217 unsigned int max_nr_queues, i; 218 219 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue); 220 if (nr_queues > max_nr_queues) 221 return NULL; 222 223 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue)); 224 if (!queue_array) 225 return NULL; 226 227 for (i = 0; i < nr_queues; i++) { 228 INIT_LIST_HEAD(&queue_array[i].head); 229 queue_array[i].priv = NULL; 230 } 231 232 return queue_array; 233 } 234 235 int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues) 236 { 237 queues->nr_queues = nr_queues; 238 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); 239 if (!queues->queue_array) 240 return -ENOMEM; 241 return 0; 242 } 243 244 int auxtrace_queues__init(struct auxtrace_queues *queues) 245 { 246 return auxtrace_queues__init_nr(queues, AUXTRACE_INIT_NR_QUEUES); 247 } 248 249 static int auxtrace_queues__grow(struct auxtrace_queues *queues, 250 unsigned int new_nr_queues) 251 { 252 unsigned int nr_queues = queues->nr_queues; 253 struct auxtrace_queue *queue_array; 254 unsigned int i; 255 256 if (!nr_queues) 257 nr_queues = AUXTRACE_INIT_NR_QUEUES; 258 259 while (nr_queues && nr_queues < new_nr_queues) 260 nr_queues <<= 1; 261 262 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) 263 return -EINVAL; 264 265 queue_array = auxtrace_alloc_queue_array(nr_queues); 266 if (!queue_array) 267 return -ENOMEM; 268 269 for (i = 0; i < queues->nr_queues; i++) { 270 list_splice_tail(&queues->queue_array[i].head, 271 &queue_array[i].head); 272 queue_array[i].tid = queues->queue_array[i].tid; 273 queue_array[i].cpu = queues->queue_array[i].cpu; 274 queue_array[i].set = queues->queue_array[i].set; 275 queue_array[i].priv = queues->queue_array[i].priv; 276 } 277 278 queues->nr_queues = nr_queues; 279 queues->queue_array = queue_array; 280 281 return 0; 282 } 283 284 static void *auxtrace_copy_data(u64 size, struct perf_session *session) 285 { 286 int fd = perf_data__fd(session->data); 287 void *p; 288 ssize_t ret; 289 290 if (size > SSIZE_MAX) 291 return NULL; 292 293 p = malloc(size); 294 if (!p) 295 return NULL; 296 297 ret = readn(fd, p, size); 298 if (ret != (ssize_t)size) { 299 free(p); 300 return NULL; 301 } 302 303 return p; 304 } 305 306 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues, 307 unsigned int idx, 308 struct auxtrace_buffer *buffer) 309 { 310 struct auxtrace_queue *queue; 311 int err; 312 313 if (idx >= queues->nr_queues) { 314 err = auxtrace_queues__grow(queues, idx + 1); 315 if (err) 316 return err; 317 } 318 319 queue = &queues->queue_array[idx]; 320 321 if (!queue->set) { 322 queue->set = true; 323 queue->tid = buffer->tid; 324 queue->cpu = buffer->cpu.cpu; 325 } 326 327 buffer->buffer_nr = queues->next_buffer_nr++; 328 329 list_add_tail(&buffer->list, &queue->head); 330 331 queues->new_data = true; 332 queues->populated = true; 333 334 return 0; 335 } 336 337 /* Limit buffers to 32MiB on 32-bit */ 338 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024) 339 340 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues, 341 unsigned int idx, 342 struct auxtrace_buffer *buffer) 343 { 344 u64 sz = buffer->size; 345 bool consecutive = false; 346 struct auxtrace_buffer *b; 347 int err; 348 349 while (sz > BUFFER_LIMIT_FOR_32_BIT) { 350 b = memdup(buffer, sizeof(struct auxtrace_buffer)); 351 if (!b) 352 return -ENOMEM; 353 b->size = BUFFER_LIMIT_FOR_32_BIT; 354 b->consecutive = consecutive; 355 err = auxtrace_queues__queue_buffer(queues, idx, b); 356 if (err) { 357 auxtrace_buffer__free(b); 358 return err; 359 } 360 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT; 361 sz -= BUFFER_LIMIT_FOR_32_BIT; 362 consecutive = true; 363 } 364 365 buffer->size = sz; 366 buffer->consecutive = consecutive; 367 368 return 0; 369 } 370 371 static bool filter_cpu(struct perf_session *session, struct perf_cpu cpu) 372 { 373 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap; 374 375 return cpu_bitmap && cpu.cpu != -1 && !test_bit(cpu.cpu, cpu_bitmap); 376 } 377 378 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, 379 struct perf_session *session, 380 unsigned int idx, 381 struct auxtrace_buffer *buffer, 382 struct auxtrace_buffer **buffer_ptr) 383 { 384 int err = -ENOMEM; 385 386 if (filter_cpu(session, buffer->cpu)) 387 return 0; 388 389 buffer = memdup(buffer, sizeof(*buffer)); 390 if (!buffer) 391 return -ENOMEM; 392 393 if (session->one_mmap) { 394 buffer->data = buffer->data_offset - session->one_mmap_offset + 395 session->one_mmap_addr; 396 } else if (perf_data__is_pipe(session->data)) { 397 buffer->data = auxtrace_copy_data(buffer->size, session); 398 if (!buffer->data) 399 goto out_free; 400 buffer->data_needs_freeing = true; 401 } else if (BITS_PER_LONG == 32 && 402 buffer->size > BUFFER_LIMIT_FOR_32_BIT) { 403 err = auxtrace_queues__split_buffer(queues, idx, buffer); 404 if (err) 405 goto out_free; 406 } 407 408 err = auxtrace_queues__queue_buffer(queues, idx, buffer); 409 if (err) 410 goto out_free; 411 412 /* FIXME: Doesn't work for split buffer */ 413 if (buffer_ptr) 414 *buffer_ptr = buffer; 415 416 return 0; 417 418 out_free: 419 auxtrace_buffer__free(buffer); 420 return err; 421 } 422 423 int auxtrace_queues__add_event(struct auxtrace_queues *queues, 424 struct perf_session *session, 425 union perf_event *event, off_t data_offset, 426 struct auxtrace_buffer **buffer_ptr) 427 { 428 struct auxtrace_buffer buffer = { 429 .pid = -1, 430 .tid = event->auxtrace.tid, 431 .cpu = { event->auxtrace.cpu }, 432 .data_offset = data_offset, 433 .offset = event->auxtrace.offset, 434 .reference = event->auxtrace.reference, 435 .size = event->auxtrace.size, 436 }; 437 unsigned int idx = event->auxtrace.idx; 438 439 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, 440 buffer_ptr); 441 } 442 443 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, 444 struct perf_session *session, 445 off_t file_offset, size_t sz) 446 { 447 union perf_event *event; 448 int err; 449 char buf[PERF_SAMPLE_MAX_SIZE]; 450 451 err = perf_session__peek_event(session, file_offset, buf, 452 PERF_SAMPLE_MAX_SIZE, &event, NULL); 453 if (err) 454 return err; 455 456 if (event->header.type == PERF_RECORD_AUXTRACE) { 457 if (event->header.size < sizeof(struct perf_record_auxtrace) || 458 event->header.size != sz) { 459 err = -EINVAL; 460 goto out; 461 } 462 file_offset += event->header.size; 463 err = auxtrace_queues__add_event(queues, session, event, 464 file_offset, NULL); 465 } 466 out: 467 return err; 468 } 469 470 void auxtrace_queues__free(struct auxtrace_queues *queues) 471 { 472 unsigned int i; 473 474 for (i = 0; i < queues->nr_queues; i++) { 475 while (!list_empty(&queues->queue_array[i].head)) { 476 struct auxtrace_buffer *buffer; 477 478 buffer = list_entry(queues->queue_array[i].head.next, 479 struct auxtrace_buffer, list); 480 list_del_init(&buffer->list); 481 auxtrace_buffer__free(buffer); 482 } 483 } 484 485 zfree(&queues->queue_array); 486 queues->nr_queues = 0; 487 } 488 489 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array, 490 unsigned int pos, unsigned int queue_nr, 491 u64 ordinal) 492 { 493 unsigned int parent; 494 495 while (pos) { 496 parent = (pos - 1) >> 1; 497 if (heap_array[parent].ordinal <= ordinal) 498 break; 499 heap_array[pos] = heap_array[parent]; 500 pos = parent; 501 } 502 heap_array[pos].queue_nr = queue_nr; 503 heap_array[pos].ordinal = ordinal; 504 } 505 506 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, 507 u64 ordinal) 508 { 509 struct auxtrace_heap_item *heap_array; 510 511 if (queue_nr >= heap->heap_sz) { 512 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES; 513 514 while (heap_sz <= queue_nr) 515 heap_sz <<= 1; 516 heap_array = realloc(heap->heap_array, 517 heap_sz * sizeof(struct auxtrace_heap_item)); 518 if (!heap_array) 519 return -ENOMEM; 520 heap->heap_array = heap_array; 521 heap->heap_sz = heap_sz; 522 } 523 524 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal); 525 526 return 0; 527 } 528 529 void auxtrace_heap__free(struct auxtrace_heap *heap) 530 { 531 zfree(&heap->heap_array); 532 heap->heap_cnt = 0; 533 heap->heap_sz = 0; 534 } 535 536 void auxtrace_heap__pop(struct auxtrace_heap *heap) 537 { 538 unsigned int pos, last, heap_cnt = heap->heap_cnt; 539 struct auxtrace_heap_item *heap_array; 540 541 if (!heap_cnt) 542 return; 543 544 heap->heap_cnt -= 1; 545 546 heap_array = heap->heap_array; 547 548 pos = 0; 549 while (1) { 550 unsigned int left, right; 551 552 left = (pos << 1) + 1; 553 if (left >= heap_cnt) 554 break; 555 right = left + 1; 556 if (right >= heap_cnt) { 557 heap_array[pos] = heap_array[left]; 558 return; 559 } 560 if (heap_array[left].ordinal < heap_array[right].ordinal) { 561 heap_array[pos] = heap_array[left]; 562 pos = left; 563 } else { 564 heap_array[pos] = heap_array[right]; 565 pos = right; 566 } 567 } 568 569 last = heap_cnt - 1; 570 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr, 571 heap_array[last].ordinal); 572 } 573 574 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, 575 struct evlist *evlist) 576 { 577 if (itr) 578 return itr->info_priv_size(itr, evlist); 579 return 0; 580 } 581 582 static int auxtrace_not_supported(void) 583 { 584 pr_err("AUX area tracing is not supported on this architecture\n"); 585 return -EINVAL; 586 } 587 588 int auxtrace_record__info_fill(struct auxtrace_record *itr, 589 struct perf_session *session, 590 struct perf_record_auxtrace_info *auxtrace_info, 591 size_t priv_size) 592 { 593 if (itr) 594 return itr->info_fill(itr, session, auxtrace_info, priv_size); 595 return auxtrace_not_supported(); 596 } 597 598 void auxtrace_record__free(struct auxtrace_record *itr) 599 { 600 if (itr) 601 itr->free(itr); 602 } 603 604 int auxtrace_record__snapshot_start(struct auxtrace_record *itr) 605 { 606 if (itr && itr->snapshot_start) 607 return itr->snapshot_start(itr); 608 return 0; 609 } 610 611 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit) 612 { 613 if (!on_exit && itr && itr->snapshot_finish) 614 return itr->snapshot_finish(itr); 615 return 0; 616 } 617 618 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, 619 struct auxtrace_mmap *mm, 620 unsigned char *data, u64 *head, u64 *old) 621 { 622 if (itr && itr->find_snapshot) 623 return itr->find_snapshot(itr, idx, mm, data, head, old); 624 return 0; 625 } 626 627 int auxtrace_record__options(struct auxtrace_record *itr, 628 struct evlist *evlist, 629 struct record_opts *opts) 630 { 631 if (itr) { 632 itr->evlist = evlist; 633 return itr->recording_options(itr, evlist, opts); 634 } 635 return 0; 636 } 637 638 u64 auxtrace_record__reference(struct auxtrace_record *itr) 639 { 640 if (itr) 641 return itr->reference(itr); 642 return 0; 643 } 644 645 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, 646 struct record_opts *opts, const char *str) 647 { 648 if (!str) 649 return 0; 650 651 /* PMU-agnostic options */ 652 switch (*str) { 653 case 'e': 654 opts->auxtrace_snapshot_on_exit = true; 655 str++; 656 break; 657 default: 658 break; 659 } 660 661 if (itr && itr->parse_snapshot_options) 662 return itr->parse_snapshot_options(itr, opts, str); 663 664 pr_err("No AUX area tracing to snapshot\n"); 665 return -EINVAL; 666 } 667 668 static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) 669 { 670 bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu(evlist->core.user_requested_cpus); 671 672 if (per_cpu_mmaps) { 673 struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx); 674 int cpu_map_idx = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu); 675 676 if (cpu_map_idx == -1) 677 return -EINVAL; 678 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx); 679 } 680 681 return perf_evsel__enable_thread(&evsel->core, idx); 682 } 683 684 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx) 685 { 686 struct evsel *evsel; 687 688 if (!itr->evlist) 689 return -EINVAL; 690 691 evlist__for_each_entry(itr->evlist, evsel) { 692 if (evsel__is_aux_event(evsel)) { 693 if (evsel->disabled) 694 return 0; 695 return evlist__enable_event_idx(itr->evlist, evsel, idx); 696 } 697 } 698 return -EINVAL; 699 } 700 701 /* 702 * Event record size is 16-bit which results in a maximum size of about 64KiB. 703 * Allow about 4KiB for the rest of the sample record, to give a maximum 704 * AUX area sample size of 60KiB. 705 */ 706 #define MAX_AUX_SAMPLE_SIZE (60 * 1024) 707 708 /* Arbitrary default size if no other default provided */ 709 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024) 710 711 static int auxtrace_validate_aux_sample_size(struct evlist *evlist, 712 struct record_opts *opts) 713 { 714 struct evsel *evsel; 715 bool has_aux_leader = false; 716 u32 sz; 717 718 evlist__for_each_entry(evlist, evsel) { 719 sz = evsel->core.attr.aux_sample_size; 720 if (evsel__is_group_leader(evsel)) { 721 has_aux_leader = evsel__is_aux_event(evsel); 722 if (sz) { 723 if (has_aux_leader) 724 pr_err("Cannot add AUX area sampling to an AUX area event\n"); 725 else 726 pr_err("Cannot add AUX area sampling to a group leader\n"); 727 return -EINVAL; 728 } 729 } 730 if (sz > MAX_AUX_SAMPLE_SIZE) { 731 pr_err("AUX area sample size %u too big, max. %d\n", 732 sz, MAX_AUX_SAMPLE_SIZE); 733 return -EINVAL; 734 } 735 if (sz) { 736 if (!has_aux_leader) { 737 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n"); 738 return -EINVAL; 739 } 740 evsel__set_sample_bit(evsel, AUX); 741 opts->auxtrace_sample_mode = true; 742 } else { 743 evsel__reset_sample_bit(evsel, AUX); 744 } 745 } 746 747 if (!opts->auxtrace_sample_mode) { 748 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n"); 749 return -EINVAL; 750 } 751 752 if (!perf_can_aux_sample()) { 753 pr_err("AUX area sampling is not supported by kernel\n"); 754 return -EINVAL; 755 } 756 757 return 0; 758 } 759 760 int auxtrace_parse_sample_options(struct auxtrace_record *itr, 761 struct evlist *evlist, 762 struct record_opts *opts, const char *str) 763 { 764 struct evsel_config_term *term; 765 struct evsel *aux_evsel; 766 bool has_aux_sample_size = false; 767 bool has_aux_leader = false; 768 struct evsel *evsel; 769 char *endptr; 770 unsigned long sz; 771 772 if (!str) 773 goto no_opt; 774 775 if (!itr) { 776 pr_err("No AUX area event to sample\n"); 777 return -EINVAL; 778 } 779 780 sz = strtoul(str, &endptr, 0); 781 if (*endptr || sz > UINT_MAX) { 782 pr_err("Bad AUX area sampling option: '%s'\n", str); 783 return -EINVAL; 784 } 785 786 if (!sz) 787 sz = itr->default_aux_sample_size; 788 789 if (!sz) 790 sz = DEFAULT_AUX_SAMPLE_SIZE; 791 792 /* Set aux_sample_size based on --aux-sample option */ 793 evlist__for_each_entry(evlist, evsel) { 794 if (evsel__is_group_leader(evsel)) { 795 has_aux_leader = evsel__is_aux_event(evsel); 796 } else if (has_aux_leader) { 797 evsel->core.attr.aux_sample_size = sz; 798 } 799 } 800 no_opt: 801 aux_evsel = NULL; 802 /* Override with aux_sample_size from config term */ 803 evlist__for_each_entry(evlist, evsel) { 804 if (evsel__is_aux_event(evsel)) 805 aux_evsel = evsel; 806 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE); 807 if (term) { 808 has_aux_sample_size = true; 809 evsel->core.attr.aux_sample_size = term->val.aux_sample_size; 810 /* If possible, group with the AUX event */ 811 if (aux_evsel && evsel->core.attr.aux_sample_size) 812 evlist__regroup(evlist, aux_evsel, evsel); 813 } 814 } 815 816 if (!str && !has_aux_sample_size) 817 return 0; 818 819 if (!itr) { 820 pr_err("No AUX area event to sample\n"); 821 return -EINVAL; 822 } 823 824 return auxtrace_validate_aux_sample_size(evlist, opts); 825 } 826 827 static struct aux_action_opt { 828 const char *str; 829 u32 aux_action; 830 bool aux_event_opt; 831 } aux_action_opts[] = { 832 {"start-paused", BIT(0), true}, 833 {"pause", BIT(1), false}, 834 {"resume", BIT(2), false}, 835 {.str = NULL}, 836 }; 837 838 static const struct aux_action_opt *auxtrace_parse_aux_action_str(const char *str) 839 { 840 const struct aux_action_opt *opt; 841 842 if (!str) 843 return NULL; 844 845 for (opt = aux_action_opts; opt->str; opt++) 846 if (!strcmp(str, opt->str)) 847 return opt; 848 849 return NULL; 850 } 851 852 int auxtrace_parse_aux_action(struct evlist *evlist) 853 { 854 struct evsel_config_term *term; 855 struct evsel *aux_evsel = NULL; 856 struct evsel *evsel; 857 858 evlist__for_each_entry(evlist, evsel) { 859 bool is_aux_event = evsel__is_aux_event(evsel); 860 const struct aux_action_opt *opt; 861 862 if (is_aux_event) 863 aux_evsel = evsel; 864 term = evsel__get_config_term(evsel, AUX_ACTION); 865 if (!term) { 866 if (evsel__get_config_term(evsel, AUX_OUTPUT)) 867 goto regroup; 868 continue; 869 } 870 opt = auxtrace_parse_aux_action_str(term->val.str); 871 if (!opt) { 872 pr_err("Bad aux-action '%s'\n", term->val.str); 873 return -EINVAL; 874 } 875 if (opt->aux_event_opt && !is_aux_event) { 876 pr_err("aux-action '%s' can only be used with AUX area event\n", 877 term->val.str); 878 return -EINVAL; 879 } 880 if (!opt->aux_event_opt && is_aux_event) { 881 pr_err("aux-action '%s' cannot be used for AUX area event itself\n", 882 term->val.str); 883 return -EINVAL; 884 } 885 evsel->core.attr.aux_action = opt->aux_action; 886 regroup: 887 /* If possible, group with the AUX event */ 888 if (aux_evsel) 889 evlist__regroup(evlist, aux_evsel, evsel); 890 if (!evsel__is_aux_event(evsel__leader(evsel))) { 891 pr_err("Events with aux-action must have AUX area event group leader\n"); 892 return -EINVAL; 893 } 894 } 895 896 return 0; 897 } 898 899 struct auxtrace_record *__weak 900 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err) 901 { 902 *err = 0; 903 return NULL; 904 } 905 906 static int auxtrace_index__alloc(struct list_head *head) 907 { 908 struct auxtrace_index *auxtrace_index; 909 910 auxtrace_index = malloc(sizeof(struct auxtrace_index)); 911 if (!auxtrace_index) 912 return -ENOMEM; 913 914 auxtrace_index->nr = 0; 915 INIT_LIST_HEAD(&auxtrace_index->list); 916 917 list_add_tail(&auxtrace_index->list, head); 918 919 return 0; 920 } 921 922 void auxtrace_index__free(struct list_head *head) 923 { 924 struct auxtrace_index *auxtrace_index, *n; 925 926 list_for_each_entry_safe(auxtrace_index, n, head, list) { 927 list_del_init(&auxtrace_index->list); 928 free(auxtrace_index); 929 } 930 } 931 932 static struct auxtrace_index *auxtrace_index__last(struct list_head *head) 933 { 934 struct auxtrace_index *auxtrace_index; 935 int err; 936 937 if (list_empty(head)) { 938 err = auxtrace_index__alloc(head); 939 if (err) 940 return NULL; 941 } 942 943 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list); 944 945 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) { 946 err = auxtrace_index__alloc(head); 947 if (err) 948 return NULL; 949 auxtrace_index = list_entry(head->prev, struct auxtrace_index, 950 list); 951 } 952 953 return auxtrace_index; 954 } 955 956 int auxtrace_index__auxtrace_event(struct list_head *head, 957 union perf_event *event, off_t file_offset) 958 { 959 struct auxtrace_index *auxtrace_index; 960 size_t nr; 961 962 auxtrace_index = auxtrace_index__last(head); 963 if (!auxtrace_index) 964 return -ENOMEM; 965 966 nr = auxtrace_index->nr; 967 auxtrace_index->entries[nr].file_offset = file_offset; 968 auxtrace_index->entries[nr].sz = event->header.size; 969 auxtrace_index->nr += 1; 970 971 return 0; 972 } 973 974 static int auxtrace_index__do_write(int fd, 975 struct auxtrace_index *auxtrace_index) 976 { 977 struct auxtrace_index_entry ent; 978 size_t i; 979 980 for (i = 0; i < auxtrace_index->nr; i++) { 981 ent.file_offset = auxtrace_index->entries[i].file_offset; 982 ent.sz = auxtrace_index->entries[i].sz; 983 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent)) 984 return -errno; 985 } 986 return 0; 987 } 988 989 int auxtrace_index__write(int fd, struct list_head *head) 990 { 991 struct auxtrace_index *auxtrace_index; 992 u64 total = 0; 993 int err; 994 995 list_for_each_entry(auxtrace_index, head, list) 996 total += auxtrace_index->nr; 997 998 if (writen(fd, &total, sizeof(total)) != sizeof(total)) 999 return -errno; 1000 1001 list_for_each_entry(auxtrace_index, head, list) { 1002 err = auxtrace_index__do_write(fd, auxtrace_index); 1003 if (err) 1004 return err; 1005 } 1006 1007 return 0; 1008 } 1009 1010 static int auxtrace_index__process_entry(int fd, struct list_head *head, 1011 bool needs_swap) 1012 { 1013 struct auxtrace_index *auxtrace_index; 1014 struct auxtrace_index_entry ent; 1015 size_t nr; 1016 1017 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent)) 1018 return -1; 1019 1020 auxtrace_index = auxtrace_index__last(head); 1021 if (!auxtrace_index) 1022 return -1; 1023 1024 nr = auxtrace_index->nr; 1025 if (needs_swap) { 1026 auxtrace_index->entries[nr].file_offset = 1027 bswap_64(ent.file_offset); 1028 auxtrace_index->entries[nr].sz = bswap_64(ent.sz); 1029 } else { 1030 auxtrace_index->entries[nr].file_offset = ent.file_offset; 1031 auxtrace_index->entries[nr].sz = ent.sz; 1032 } 1033 1034 auxtrace_index->nr = nr + 1; 1035 1036 return 0; 1037 } 1038 1039 int auxtrace_index__process(int fd, u64 size, struct perf_session *session, 1040 bool needs_swap) 1041 { 1042 struct list_head *head = &session->auxtrace_index; 1043 u64 nr; 1044 1045 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64)) 1046 return -1; 1047 1048 if (needs_swap) 1049 nr = bswap_64(nr); 1050 1051 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size) 1052 return -1; 1053 1054 while (nr--) { 1055 int err; 1056 1057 err = auxtrace_index__process_entry(fd, head, needs_swap); 1058 if (err) 1059 return -1; 1060 } 1061 1062 return 0; 1063 } 1064 1065 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues, 1066 struct perf_session *session, 1067 struct auxtrace_index_entry *ent) 1068 { 1069 return auxtrace_queues__add_indexed_event(queues, session, 1070 ent->file_offset, ent->sz); 1071 } 1072 1073 int auxtrace_queues__process_index(struct auxtrace_queues *queues, 1074 struct perf_session *session) 1075 { 1076 struct auxtrace_index *auxtrace_index; 1077 struct auxtrace_index_entry *ent; 1078 size_t i; 1079 int err; 1080 1081 if (auxtrace__dont_decode(session)) 1082 return 0; 1083 1084 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { 1085 for (i = 0; i < auxtrace_index->nr; i++) { 1086 ent = &auxtrace_index->entries[i]; 1087 err = auxtrace_queues__process_index_entry(queues, 1088 session, 1089 ent); 1090 if (err) 1091 return err; 1092 } 1093 } 1094 return 0; 1095 } 1096 1097 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, 1098 struct auxtrace_buffer *buffer) 1099 { 1100 if (buffer) { 1101 if (list_is_last(&buffer->list, &queue->head)) 1102 return NULL; 1103 return list_entry(buffer->list.next, struct auxtrace_buffer, 1104 list); 1105 } else { 1106 if (list_empty(&queue->head)) 1107 return NULL; 1108 return list_entry(queue->head.next, struct auxtrace_buffer, 1109 list); 1110 } 1111 } 1112 1113 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues, 1114 struct perf_sample *sample, 1115 struct perf_session *session) 1116 { 1117 struct perf_sample_id *sid; 1118 unsigned int idx; 1119 u64 id; 1120 1121 id = sample->id; 1122 if (!id) 1123 return NULL; 1124 1125 sid = evlist__id2sid(session->evlist, id); 1126 if (!sid) 1127 return NULL; 1128 1129 idx = sid->idx; 1130 1131 if (idx >= queues->nr_queues) 1132 return NULL; 1133 1134 return &queues->queue_array[idx]; 1135 } 1136 1137 int auxtrace_queues__add_sample(struct auxtrace_queues *queues, 1138 struct perf_session *session, 1139 struct perf_sample *sample, u64 data_offset, 1140 u64 reference) 1141 { 1142 struct auxtrace_buffer buffer = { 1143 .pid = -1, 1144 .data_offset = data_offset, 1145 .reference = reference, 1146 .size = sample->aux_sample.size, 1147 }; 1148 struct perf_sample_id *sid; 1149 u64 id = sample->id; 1150 unsigned int idx; 1151 1152 if (!id) 1153 return -EINVAL; 1154 1155 sid = evlist__id2sid(session->evlist, id); 1156 if (!sid) 1157 return -ENOENT; 1158 1159 idx = sid->idx; 1160 buffer.tid = sid->tid; 1161 buffer.cpu = sid->cpu; 1162 1163 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL); 1164 } 1165 1166 struct queue_data { 1167 bool samples; 1168 bool events; 1169 }; 1170 1171 static int auxtrace_queue_data_cb(struct perf_session *session, 1172 union perf_event *event, u64 offset, 1173 void *data) 1174 { 1175 struct queue_data *qd = data; 1176 struct perf_sample sample; 1177 int err; 1178 1179 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) { 1180 if (event->header.size < sizeof(struct perf_record_auxtrace)) 1181 return -EINVAL; 1182 offset += event->header.size; 1183 return session->auxtrace->queue_data(session, NULL, event, 1184 offset); 1185 } 1186 1187 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE) 1188 return 0; 1189 1190 perf_sample__init(&sample, /*all=*/false); 1191 err = evlist__parse_sample(session->evlist, event, &sample); 1192 if (err) 1193 goto out; 1194 1195 if (sample.aux_sample.size) { 1196 offset += sample.aux_sample.data - (void *)event; 1197 1198 err = session->auxtrace->queue_data(session, &sample, NULL, offset); 1199 } 1200 out: 1201 perf_sample__exit(&sample); 1202 return err; 1203 } 1204 1205 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events) 1206 { 1207 struct queue_data qd = { 1208 .samples = samples, 1209 .events = events, 1210 }; 1211 1212 if (auxtrace__dont_decode(session)) 1213 return 0; 1214 1215 if (perf_data__is_pipe(session->data)) 1216 return 0; 1217 1218 if (!session->auxtrace || !session->auxtrace->queue_data) 1219 return -EINVAL; 1220 1221 return perf_session__peek_events(session, session->header.data_offset, 1222 session->header.data_size, 1223 auxtrace_queue_data_cb, &qd); 1224 } 1225 1226 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw) 1227 { 1228 int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ; 1229 size_t adj = buffer->data_offset & (page_size - 1); 1230 size_t size = buffer->size + adj; 1231 off_t file_offset = buffer->data_offset - adj; 1232 void *addr; 1233 1234 if (buffer->data) 1235 return buffer->data; 1236 1237 addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset); 1238 if (addr == MAP_FAILED) 1239 return NULL; 1240 1241 buffer->mmap_addr = addr; 1242 buffer->mmap_size = size; 1243 1244 buffer->data = addr + adj; 1245 1246 return buffer->data; 1247 } 1248 1249 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer) 1250 { 1251 if (!buffer->data || !buffer->mmap_addr) 1252 return; 1253 munmap(buffer->mmap_addr, buffer->mmap_size); 1254 buffer->mmap_addr = NULL; 1255 buffer->mmap_size = 0; 1256 buffer->data = NULL; 1257 buffer->use_data = NULL; 1258 } 1259 1260 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer) 1261 { 1262 auxtrace_buffer__put_data(buffer); 1263 if (buffer->data_needs_freeing) { 1264 buffer->data_needs_freeing = false; 1265 zfree(&buffer->data); 1266 buffer->use_data = NULL; 1267 buffer->size = 0; 1268 } 1269 } 1270 1271 void auxtrace_buffer__free(struct auxtrace_buffer *buffer) 1272 { 1273 auxtrace_buffer__drop_data(buffer); 1274 free(buffer); 1275 } 1276 1277 void auxtrace_synth_guest_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 1278 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 1279 const char *msg, u64 timestamp, 1280 pid_t machine_pid, int vcpu) 1281 { 1282 size_t size; 1283 1284 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error)); 1285 1286 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR; 1287 auxtrace_error->type = type; 1288 auxtrace_error->code = code; 1289 auxtrace_error->cpu = cpu; 1290 auxtrace_error->pid = pid; 1291 auxtrace_error->tid = tid; 1292 auxtrace_error->fmt = 1; 1293 auxtrace_error->ip = ip; 1294 auxtrace_error->time = timestamp; 1295 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG); 1296 if (machine_pid) { 1297 auxtrace_error->fmt = 2; 1298 auxtrace_error->machine_pid = machine_pid; 1299 auxtrace_error->vcpu = vcpu; 1300 size = sizeof(*auxtrace_error); 1301 } else { 1302 size = (void *)auxtrace_error->msg - (void *)auxtrace_error + 1303 strlen(auxtrace_error->msg) + 1; 1304 } 1305 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64)); 1306 } 1307 1308 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 1309 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 1310 const char *msg, u64 timestamp) 1311 { 1312 auxtrace_synth_guest_error(auxtrace_error, type, code, cpu, pid, tid, 1313 ip, msg, timestamp, 0, -1); 1314 } 1315 1316 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, 1317 const struct perf_tool *tool, 1318 struct perf_session *session, 1319 perf_event__handler_t process) 1320 { 1321 union perf_event *ev; 1322 size_t priv_size; 1323 int err; 1324 1325 pr_debug2("Synthesizing auxtrace information\n"); 1326 priv_size = auxtrace_record__info_priv_size(itr, session->evlist); 1327 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size); 1328 if (!ev) 1329 return -ENOMEM; 1330 1331 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO; 1332 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) + 1333 priv_size; 1334 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info, 1335 priv_size); 1336 if (err) 1337 goto out_free; 1338 1339 err = process(tool, ev, NULL, NULL); 1340 out_free: 1341 free(ev); 1342 return err; 1343 } 1344 1345 static void unleader_evsel(struct evlist *evlist, struct evsel *leader) 1346 { 1347 struct evsel *new_leader = NULL; 1348 struct evsel *evsel; 1349 1350 /* Find new leader for the group */ 1351 evlist__for_each_entry(evlist, evsel) { 1352 if (!evsel__has_leader(evsel, leader) || evsel == leader) 1353 continue; 1354 if (!new_leader) 1355 new_leader = evsel; 1356 evsel__set_leader(evsel, new_leader); 1357 } 1358 1359 /* Update group information */ 1360 if (new_leader) { 1361 zfree(&new_leader->group_name); 1362 new_leader->group_name = leader->group_name; 1363 leader->group_name = NULL; 1364 1365 new_leader->core.nr_members = leader->core.nr_members - 1; 1366 leader->core.nr_members = 1; 1367 } 1368 } 1369 1370 static void unleader_auxtrace(struct perf_session *session) 1371 { 1372 struct evsel *evsel; 1373 1374 evlist__for_each_entry(session->evlist, evsel) { 1375 if (auxtrace__evsel_is_auxtrace(session, evsel) && 1376 evsel__is_group_leader(evsel)) { 1377 unleader_evsel(session->evlist, evsel); 1378 } 1379 } 1380 } 1381 1382 int perf_event__process_auxtrace_info(const struct perf_tool *tool __maybe_unused, 1383 struct perf_session *session, 1384 union perf_event *event) 1385 { 1386 enum auxtrace_type type = event->auxtrace_info.type; 1387 int err; 1388 1389 if (dump_trace) 1390 fprintf(stdout, " type: %u\n", type); 1391 1392 switch (type) { 1393 case PERF_AUXTRACE_INTEL_PT: 1394 err = intel_pt_process_auxtrace_info(event, session); 1395 break; 1396 case PERF_AUXTRACE_INTEL_BTS: 1397 err = intel_bts_process_auxtrace_info(event, session); 1398 break; 1399 case PERF_AUXTRACE_ARM_SPE: 1400 err = arm_spe_process_auxtrace_info(event, session); 1401 break; 1402 case PERF_AUXTRACE_CS_ETM: 1403 err = cs_etm__process_auxtrace_info(event, session); 1404 break; 1405 case PERF_AUXTRACE_S390_CPUMSF: 1406 err = s390_cpumsf_process_auxtrace_info(event, session); 1407 break; 1408 case PERF_AUXTRACE_HISI_PTT: 1409 err = hisi_ptt_process_auxtrace_info(event, session); 1410 break; 1411 case PERF_AUXTRACE_VPA_DTL: 1412 err = powerpc_vpadtl_process_auxtrace_info(event, session); 1413 break; 1414 case PERF_AUXTRACE_UNKNOWN: 1415 default: 1416 return -EINVAL; 1417 } 1418 1419 if (err) 1420 return err; 1421 1422 unleader_auxtrace(session); 1423 1424 return 0; 1425 } 1426 1427 s64 perf_event__process_auxtrace(const struct perf_tool *tool __maybe_unused, 1428 struct perf_session *session, 1429 union perf_event *event) 1430 { 1431 s64 err; 1432 1433 if (dump_trace) 1434 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n", 1435 event->auxtrace.size, event->auxtrace.offset, 1436 event->auxtrace.reference, event->auxtrace.idx, 1437 event->auxtrace.tid, event->auxtrace.cpu); 1438 1439 if (auxtrace__dont_decode(session)) 1440 return event->auxtrace.size; 1441 1442 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE) 1443 return -EINVAL; 1444 1445 err = session->auxtrace->process_auxtrace_event(session, event, session->tool); 1446 if (err < 0) 1447 return err; 1448 1449 return event->auxtrace.size; 1450 } 1451 1452 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS 1453 #define PERF_ITRACE_DEFAULT_PERIOD 100000 1454 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16 1455 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024 1456 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64 1457 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024 1458 1459 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, 1460 bool no_sample) 1461 { 1462 synth_opts->branches = true; 1463 synth_opts->transactions = true; 1464 synth_opts->ptwrites = true; 1465 synth_opts->pwr_events = true; 1466 synth_opts->other_events = true; 1467 synth_opts->intr_events = true; 1468 synth_opts->errors = true; 1469 synth_opts->flc = true; 1470 synth_opts->llc = true; 1471 synth_opts->tlb = true; 1472 synth_opts->mem = true; 1473 synth_opts->remote_access = true; 1474 1475 if (no_sample) { 1476 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS; 1477 synth_opts->period = 1; 1478 synth_opts->calls = true; 1479 } else { 1480 synth_opts->instructions = true; 1481 synth_opts->cycles = true; 1482 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; 1483 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 1484 } 1485 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; 1486 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; 1487 synth_opts->initial_skip = 0; 1488 } 1489 1490 static int get_flag(const char **ptr, unsigned int *flags) 1491 { 1492 while (1) { 1493 char c = **ptr; 1494 1495 if (c >= 'a' && c <= 'z') { 1496 *flags |= 1 << (c - 'a'); 1497 ++*ptr; 1498 return 0; 1499 } else if (c == ' ') { 1500 ++*ptr; 1501 continue; 1502 } else { 1503 return -1; 1504 } 1505 } 1506 } 1507 1508 static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags) 1509 { 1510 while (1) { 1511 switch (**ptr) { 1512 case '+': 1513 ++*ptr; 1514 if (get_flag(ptr, plus_flags)) 1515 return -1; 1516 break; 1517 case '-': 1518 ++*ptr; 1519 if (get_flag(ptr, minus_flags)) 1520 return -1; 1521 break; 1522 case ' ': 1523 ++*ptr; 1524 break; 1525 default: 1526 return 0; 1527 } 1528 } 1529 } 1530 1531 #define ITRACE_DFLT_LOG_ON_ERROR_SZ 16384 1532 1533 static unsigned int itrace_log_on_error_size(void) 1534 { 1535 unsigned int sz = 0; 1536 1537 perf_config_scan("itrace.debug-log-buffer-size", "%u", &sz); 1538 return sz ?: ITRACE_DFLT_LOG_ON_ERROR_SZ; 1539 } 1540 1541 /* 1542 * Please check tools/perf/Documentation/perf-script.txt for information 1543 * about the options parsed here, which is introduced after this cset, 1544 * when support in 'perf script' for these options is introduced. 1545 */ 1546 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, 1547 const char *str, int unset) 1548 { 1549 const char *p; 1550 char *endptr; 1551 bool period_type_set = false; 1552 bool period_set = false; 1553 bool iy = false; 1554 1555 synth_opts->set = true; 1556 1557 if (unset) { 1558 synth_opts->dont_decode = true; 1559 return 0; 1560 } 1561 1562 if (!str) { 1563 itrace_synth_opts__set_default(synth_opts, 1564 synth_opts->default_no_sample); 1565 return 0; 1566 } 1567 1568 for (p = str; *p;) { 1569 switch (*p++) { 1570 case 'i': 1571 case 'y': 1572 iy = true; 1573 if (p[-1] == 'y') 1574 synth_opts->cycles = true; 1575 else 1576 synth_opts->instructions = true; 1577 while (*p == ' ' || *p == ',') 1578 p += 1; 1579 if (isdigit(*p)) { 1580 synth_opts->period = strtoull(p, &endptr, 10); 1581 period_set = true; 1582 p = endptr; 1583 while (*p == ' ' || *p == ',') 1584 p += 1; 1585 switch (*p++) { 1586 case 'i': 1587 synth_opts->period_type = 1588 PERF_ITRACE_PERIOD_INSTRUCTIONS; 1589 period_type_set = true; 1590 break; 1591 case 't': 1592 synth_opts->period_type = 1593 PERF_ITRACE_PERIOD_TICKS; 1594 period_type_set = true; 1595 break; 1596 case 'm': 1597 synth_opts->period *= 1000; 1598 /* Fall through */ 1599 case 'u': 1600 synth_opts->period *= 1000; 1601 /* Fall through */ 1602 case 'n': 1603 if (*p++ != 's') 1604 goto out_err; 1605 synth_opts->period_type = 1606 PERF_ITRACE_PERIOD_NANOSECS; 1607 period_type_set = true; 1608 break; 1609 case '\0': 1610 goto out; 1611 default: 1612 goto out_err; 1613 } 1614 } 1615 break; 1616 case 'b': 1617 synth_opts->branches = true; 1618 break; 1619 case 'x': 1620 synth_opts->transactions = true; 1621 break; 1622 case 'w': 1623 synth_opts->ptwrites = true; 1624 break; 1625 case 'p': 1626 synth_opts->pwr_events = true; 1627 break; 1628 case 'o': 1629 synth_opts->other_events = true; 1630 break; 1631 case 'I': 1632 synth_opts->intr_events = true; 1633 break; 1634 case 'e': 1635 synth_opts->errors = true; 1636 if (get_flags(&p, &synth_opts->error_plus_flags, 1637 &synth_opts->error_minus_flags)) 1638 goto out_err; 1639 break; 1640 case 'd': 1641 synth_opts->log = true; 1642 if (get_flags(&p, &synth_opts->log_plus_flags, 1643 &synth_opts->log_minus_flags)) 1644 goto out_err; 1645 if (synth_opts->log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR) 1646 synth_opts->log_on_error_size = itrace_log_on_error_size(); 1647 break; 1648 case 'c': 1649 synth_opts->branches = true; 1650 synth_opts->calls = true; 1651 break; 1652 case 'r': 1653 synth_opts->branches = true; 1654 synth_opts->returns = true; 1655 break; 1656 case 'G': 1657 case 'g': 1658 if (p[-1] == 'G') 1659 synth_opts->add_callchain = true; 1660 else 1661 synth_opts->callchain = true; 1662 synth_opts->callchain_sz = 1663 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; 1664 while (*p == ' ' || *p == ',') 1665 p += 1; 1666 if (isdigit(*p)) { 1667 unsigned int val; 1668 1669 val = strtoul(p, &endptr, 10); 1670 p = endptr; 1671 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ) 1672 goto out_err; 1673 synth_opts->callchain_sz = val; 1674 } 1675 break; 1676 case 'L': 1677 case 'l': 1678 if (p[-1] == 'L') 1679 synth_opts->add_last_branch = true; 1680 else 1681 synth_opts->last_branch = true; 1682 synth_opts->last_branch_sz = 1683 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; 1684 while (*p == ' ' || *p == ',') 1685 p += 1; 1686 if (isdigit(*p)) { 1687 unsigned int val; 1688 1689 val = strtoul(p, &endptr, 10); 1690 p = endptr; 1691 if (!val || 1692 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ) 1693 goto out_err; 1694 synth_opts->last_branch_sz = val; 1695 } 1696 break; 1697 case 's': 1698 synth_opts->initial_skip = strtoul(p, &endptr, 10); 1699 if (p == endptr) 1700 goto out_err; 1701 p = endptr; 1702 break; 1703 case 'f': 1704 synth_opts->flc = true; 1705 break; 1706 case 'm': 1707 synth_opts->llc = true; 1708 break; 1709 case 't': 1710 synth_opts->tlb = true; 1711 break; 1712 case 'a': 1713 synth_opts->remote_access = true; 1714 break; 1715 case 'M': 1716 synth_opts->mem = true; 1717 break; 1718 case 'q': 1719 synth_opts->quick += 1; 1720 break; 1721 case 'A': 1722 synth_opts->approx_ipc = true; 1723 break; 1724 case 'Z': 1725 synth_opts->timeless_decoding = true; 1726 break; 1727 case 'T': 1728 synth_opts->use_timestamp = true; 1729 break; 1730 case ' ': 1731 case ',': 1732 break; 1733 default: 1734 goto out_err; 1735 } 1736 } 1737 out: 1738 if (iy) { 1739 if (!period_type_set) 1740 synth_opts->period_type = 1741 PERF_ITRACE_DEFAULT_PERIOD_TYPE; 1742 if (!period_set) 1743 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 1744 } 1745 1746 return 0; 1747 1748 out_err: 1749 pr_err("Bad Instruction Tracing options '%s'\n", str); 1750 return -EINVAL; 1751 } 1752 1753 int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset) 1754 { 1755 return itrace_do_parse_synth_opts(opt->value, str, unset); 1756 } 1757 1758 static const char * const auxtrace_error_type_name[] = { 1759 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace", 1760 }; 1761 1762 static const char *auxtrace_error_name(int type) 1763 { 1764 const char *error_type_name = NULL; 1765 1766 if (type < PERF_AUXTRACE_ERROR_MAX) 1767 error_type_name = auxtrace_error_type_name[type]; 1768 if (!error_type_name) 1769 error_type_name = "unknown AUX"; 1770 return error_type_name; 1771 } 1772 1773 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp) 1774 { 1775 struct perf_record_auxtrace_error *e = &event->auxtrace_error; 1776 unsigned long long nsecs = e->time; 1777 const char *msg = e->msg; 1778 int ret; 1779 1780 ret = fprintf(fp, " %s error type %u", 1781 auxtrace_error_name(e->type), e->type); 1782 1783 if (e->fmt && nsecs) { 1784 unsigned long secs = nsecs / NSEC_PER_SEC; 1785 1786 nsecs -= secs * NSEC_PER_SEC; 1787 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs); 1788 } else { 1789 ret += fprintf(fp, " time 0"); 1790 } 1791 1792 if (!e->fmt) 1793 msg = (const char *)&e->time; 1794 1795 if (e->fmt >= 2 && e->machine_pid) 1796 ret += fprintf(fp, " machine_pid %d vcpu %d", e->machine_pid, e->vcpu); 1797 1798 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n", 1799 e->cpu, e->pid, e->tid, e->ip, e->code, msg); 1800 return ret; 1801 } 1802 1803 void perf_session__auxtrace_error_inc(struct perf_session *session, 1804 union perf_event *event) 1805 { 1806 struct perf_record_auxtrace_error *e = &event->auxtrace_error; 1807 1808 if (e->type < PERF_AUXTRACE_ERROR_MAX) 1809 session->evlist->stats.nr_auxtrace_errors[e->type] += 1; 1810 } 1811 1812 void events_stats__auxtrace_error_warn(const struct events_stats *stats) 1813 { 1814 int i; 1815 1816 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) { 1817 if (!stats->nr_auxtrace_errors[i]) 1818 continue; 1819 ui__warning("%u %s errors\n", 1820 stats->nr_auxtrace_errors[i], 1821 auxtrace_error_name(i)); 1822 } 1823 } 1824 1825 int perf_event__process_auxtrace_error(const struct perf_tool *tool __maybe_unused, 1826 struct perf_session *session, 1827 union perf_event *event) 1828 { 1829 if (auxtrace__dont_decode(session)) 1830 return 0; 1831 1832 perf_event__fprintf_auxtrace_error(event, stdout); 1833 return 0; 1834 } 1835 1836 /* 1837 * In the compat mode kernel runs in 64-bit and perf tool runs in 32-bit mode, 1838 * 32-bit perf tool cannot access 64-bit value atomically, which might lead to 1839 * the issues caused by the below sequence on multiple CPUs: when perf tool 1840 * accesses either the load operation or the store operation for 64-bit value, 1841 * on some architectures the operation is divided into two instructions, one 1842 * is for accessing the low 32-bit value and another is for the high 32-bit; 1843 * thus these two user operations can give the kernel chances to access the 1844 * 64-bit value, and thus leads to the unexpected load values. 1845 * 1846 * kernel (64-bit) user (32-bit) 1847 * 1848 * if (LOAD ->aux_tail) { --, LOAD ->aux_head_lo 1849 * STORE $aux_data | ,---> 1850 * FLUSH $aux_data | | LOAD ->aux_head_hi 1851 * STORE ->aux_head --|-------` smp_rmb() 1852 * } | LOAD $data 1853 * | smp_mb() 1854 * | STORE ->aux_tail_lo 1855 * `-----------> 1856 * STORE ->aux_tail_hi 1857 * 1858 * For this reason, it's impossible for the perf tool to work correctly when 1859 * the AUX head or tail is bigger than 4GB (more than 32 bits length); and we 1860 * can not simply limit the AUX ring buffer to less than 4GB, the reason is 1861 * the pointers can be increased monotonically, whatever the buffer size it is, 1862 * at the end the head and tail can be bigger than 4GB and carry out to the 1863 * high 32-bit. 1864 * 1865 * To mitigate the issues and improve the user experience, we can allow the 1866 * perf tool working in certain conditions and bail out with error if detect 1867 * any overflow cannot be handled. 1868 * 1869 * For reading the AUX head, it reads out the values for three times, and 1870 * compares the high 4 bytes of the values between the first time and the last 1871 * time, if there has no change for high 4 bytes injected by the kernel during 1872 * the user reading sequence, it's safe for use the second value. 1873 * 1874 * When compat_auxtrace_mmap__write_tail() detects any carrying in the high 1875 * 32 bits, it means there have two store operations in user space and it cannot 1876 * promise the atomicity for 64-bit write, so return '-1' in this case to tell 1877 * the caller an overflow error has happened. 1878 */ 1879 u64 __weak compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm) 1880 { 1881 struct perf_event_mmap_page *pc = mm->userpg; 1882 u64 first, second, last; 1883 u64 mask = (u64)(UINT32_MAX) << 32; 1884 1885 do { 1886 first = READ_ONCE(pc->aux_head); 1887 /* Ensure all reads are done after we read the head */ 1888 smp_rmb(); 1889 second = READ_ONCE(pc->aux_head); 1890 /* Ensure all reads are done after we read the head */ 1891 smp_rmb(); 1892 last = READ_ONCE(pc->aux_head); 1893 } while ((first & mask) != (last & mask)); 1894 1895 return second; 1896 } 1897 1898 int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail) 1899 { 1900 struct perf_event_mmap_page *pc = mm->userpg; 1901 u64 mask = (u64)(UINT32_MAX) << 32; 1902 1903 if (tail & mask) 1904 return -1; 1905 1906 /* Ensure all reads are done before we write the tail out */ 1907 smp_mb(); 1908 WRITE_ONCE(pc->aux_tail, tail); 1909 return 0; 1910 } 1911 1912 static int __auxtrace_mmap__read(struct mmap *map, 1913 struct auxtrace_record *itr, struct perf_env *env, 1914 const struct perf_tool *tool, process_auxtrace_t fn, 1915 bool snapshot, size_t snapshot_size) 1916 { 1917 struct auxtrace_mmap *mm = &map->auxtrace_mmap; 1918 u64 head, old = mm->prev, offset, ref; 1919 unsigned char *data = mm->base; 1920 size_t size, head_off, old_off, len1, len2, padding; 1921 union perf_event ev; 1922 void *data1, *data2; 1923 int kernel_is_64_bit = perf_env__kernel_is_64_bit(env); 1924 1925 head = auxtrace_mmap__read_head(mm, kernel_is_64_bit); 1926 1927 if (snapshot && 1928 auxtrace_record__find_snapshot(itr, mm->idx, mm, data, &head, &old)) 1929 return -1; 1930 1931 if (old == head) 1932 return 0; 1933 1934 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n", 1935 mm->idx, old, head, head - old); 1936 1937 if (mm->mask) { 1938 head_off = head & mm->mask; 1939 old_off = old & mm->mask; 1940 } else { 1941 head_off = head % mm->len; 1942 old_off = old % mm->len; 1943 } 1944 1945 if (head_off > old_off) 1946 size = head_off - old_off; 1947 else 1948 size = mm->len - (old_off - head_off); 1949 1950 if (snapshot && size > snapshot_size) 1951 size = snapshot_size; 1952 1953 ref = auxtrace_record__reference(itr); 1954 1955 if (head > old || size <= head || mm->mask) { 1956 offset = head - size; 1957 } else { 1958 /* 1959 * When the buffer size is not a power of 2, 'head' wraps at the 1960 * highest multiple of the buffer size, so we have to subtract 1961 * the remainder here. 1962 */ 1963 u64 rem = (0ULL - mm->len) % mm->len; 1964 1965 offset = head - size - rem; 1966 } 1967 1968 if (size > head_off) { 1969 len1 = size - head_off; 1970 data1 = &data[mm->len - len1]; 1971 len2 = head_off; 1972 data2 = &data[0]; 1973 } else { 1974 len1 = size; 1975 data1 = &data[head_off - len1]; 1976 len2 = 0; 1977 data2 = NULL; 1978 } 1979 1980 if (itr->alignment) { 1981 unsigned int unwanted = len1 % itr->alignment; 1982 1983 len1 -= unwanted; 1984 size -= unwanted; 1985 } 1986 1987 /* padding must be written by fn() e.g. record__process_auxtrace() */ 1988 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1); 1989 if (padding) 1990 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding; 1991 1992 memset(&ev, 0, sizeof(ev)); 1993 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE; 1994 ev.auxtrace.header.size = sizeof(ev.auxtrace); 1995 ev.auxtrace.size = size + padding; 1996 ev.auxtrace.offset = offset; 1997 ev.auxtrace.reference = ref; 1998 ev.auxtrace.idx = mm->idx; 1999 ev.auxtrace.tid = mm->tid; 2000 ev.auxtrace.cpu = mm->cpu; 2001 2002 if (fn(tool, map, &ev, data1, len1, data2, len2)) 2003 return -1; 2004 2005 mm->prev = head; 2006 2007 if (!snapshot) { 2008 int err; 2009 2010 err = auxtrace_mmap__write_tail(mm, head, kernel_is_64_bit); 2011 if (err < 0) 2012 return err; 2013 2014 if (itr->read_finish) { 2015 err = itr->read_finish(itr, mm->idx); 2016 if (err < 0) 2017 return err; 2018 } 2019 } 2020 2021 return 1; 2022 } 2023 2024 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 2025 struct perf_env *env, const struct perf_tool *tool, 2026 process_auxtrace_t fn) 2027 { 2028 return __auxtrace_mmap__read(map, itr, env, tool, fn, false, 0); 2029 } 2030 2031 int auxtrace_mmap__read_snapshot(struct mmap *map, 2032 struct auxtrace_record *itr, struct perf_env *env, 2033 const struct perf_tool *tool, process_auxtrace_t fn, 2034 size_t snapshot_size) 2035 { 2036 return __auxtrace_mmap__read(map, itr, env, tool, fn, true, snapshot_size); 2037 } 2038 2039 /** 2040 * struct auxtrace_cache - hash table to implement a cache 2041 * @hashtable: the hashtable 2042 * @sz: hashtable size (number of hlists) 2043 * @entry_size: size of an entry 2044 * @limit: limit the number of entries to this maximum, when reached the cache 2045 * is dropped and caching begins again with an empty cache 2046 * @cnt: current number of entries 2047 * @bits: hashtable size (@sz = 2^@bits) 2048 */ 2049 struct auxtrace_cache { 2050 struct hlist_head *hashtable; 2051 size_t sz; 2052 size_t entry_size; 2053 size_t limit; 2054 size_t cnt; 2055 unsigned int bits; 2056 }; 2057 2058 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, 2059 unsigned int limit_percent) 2060 { 2061 struct auxtrace_cache *c; 2062 struct hlist_head *ht; 2063 size_t sz, i; 2064 2065 c = zalloc(sizeof(struct auxtrace_cache)); 2066 if (!c) 2067 return NULL; 2068 2069 sz = 1UL << bits; 2070 2071 ht = calloc(sz, sizeof(struct hlist_head)); 2072 if (!ht) 2073 goto out_free; 2074 2075 for (i = 0; i < sz; i++) 2076 INIT_HLIST_HEAD(&ht[i]); 2077 2078 c->hashtable = ht; 2079 c->sz = sz; 2080 c->entry_size = entry_size; 2081 c->limit = (c->sz * limit_percent) / 100; 2082 c->bits = bits; 2083 2084 return c; 2085 2086 out_free: 2087 free(c); 2088 return NULL; 2089 } 2090 2091 static void auxtrace_cache__drop(struct auxtrace_cache *c) 2092 { 2093 struct auxtrace_cache_entry *entry; 2094 struct hlist_node *tmp; 2095 size_t i; 2096 2097 if (!c) 2098 return; 2099 2100 for (i = 0; i < c->sz; i++) { 2101 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) { 2102 hlist_del(&entry->hash); 2103 auxtrace_cache__free_entry(c, entry); 2104 } 2105 } 2106 2107 c->cnt = 0; 2108 } 2109 2110 void auxtrace_cache__free(struct auxtrace_cache *c) 2111 { 2112 if (!c) 2113 return; 2114 2115 auxtrace_cache__drop(c); 2116 zfree(&c->hashtable); 2117 free(c); 2118 } 2119 2120 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c) 2121 { 2122 return malloc(c->entry_size); 2123 } 2124 2125 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused, 2126 void *entry) 2127 { 2128 free(entry); 2129 } 2130 2131 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, 2132 struct auxtrace_cache_entry *entry) 2133 { 2134 if (c->limit && ++c->cnt > c->limit) 2135 auxtrace_cache__drop(c); 2136 2137 entry->key = key; 2138 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]); 2139 2140 return 0; 2141 } 2142 2143 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c, 2144 u32 key) 2145 { 2146 struct auxtrace_cache_entry *entry; 2147 struct hlist_head *hlist; 2148 struct hlist_node *n; 2149 2150 if (!c) 2151 return NULL; 2152 2153 hlist = &c->hashtable[hash_32(key, c->bits)]; 2154 hlist_for_each_entry_safe(entry, n, hlist, hash) { 2155 if (entry->key == key) { 2156 hlist_del(&entry->hash); 2157 return entry; 2158 } 2159 } 2160 2161 return NULL; 2162 } 2163 2164 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key) 2165 { 2166 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key); 2167 2168 auxtrace_cache__free_entry(c, entry); 2169 } 2170 2171 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key) 2172 { 2173 struct auxtrace_cache_entry *entry; 2174 struct hlist_head *hlist; 2175 2176 if (!c) 2177 return NULL; 2178 2179 hlist = &c->hashtable[hash_32(key, c->bits)]; 2180 hlist_for_each_entry(entry, hlist, hash) { 2181 if (entry->key == key) 2182 return entry; 2183 } 2184 2185 return NULL; 2186 } 2187 2188 static void addr_filter__free_str(struct addr_filter *filt) 2189 { 2190 zfree(&filt->str); 2191 filt->action = NULL; 2192 filt->sym_from = NULL; 2193 filt->sym_to = NULL; 2194 filt->filename = NULL; 2195 } 2196 2197 static struct addr_filter *addr_filter__new(void) 2198 { 2199 struct addr_filter *filt = zalloc(sizeof(*filt)); 2200 2201 if (filt) 2202 INIT_LIST_HEAD(&filt->list); 2203 2204 return filt; 2205 } 2206 2207 static void addr_filter__free(struct addr_filter *filt) 2208 { 2209 if (filt) 2210 addr_filter__free_str(filt); 2211 free(filt); 2212 } 2213 2214 static void addr_filters__add(struct addr_filters *filts, 2215 struct addr_filter *filt) 2216 { 2217 list_add_tail(&filt->list, &filts->head); 2218 filts->cnt += 1; 2219 } 2220 2221 static void addr_filters__del(struct addr_filters *filts, 2222 struct addr_filter *filt) 2223 { 2224 list_del_init(&filt->list); 2225 filts->cnt -= 1; 2226 } 2227 2228 void addr_filters__init(struct addr_filters *filts) 2229 { 2230 INIT_LIST_HEAD(&filts->head); 2231 filts->cnt = 0; 2232 } 2233 2234 void addr_filters__exit(struct addr_filters *filts) 2235 { 2236 struct addr_filter *filt, *n; 2237 2238 list_for_each_entry_safe(filt, n, &filts->head, list) { 2239 addr_filters__del(filts, filt); 2240 addr_filter__free(filt); 2241 } 2242 } 2243 2244 static int parse_num_or_str(char **inp, u64 *num, const char **str, 2245 const char *str_delim) 2246 { 2247 *inp += strspn(*inp, " "); 2248 2249 if (isdigit(**inp)) { 2250 char *endptr; 2251 2252 if (!num) 2253 return -EINVAL; 2254 errno = 0; 2255 *num = strtoull(*inp, &endptr, 0); 2256 if (errno) 2257 return -errno; 2258 if (endptr == *inp) 2259 return -EINVAL; 2260 *inp = endptr; 2261 } else { 2262 size_t n; 2263 2264 if (!str) 2265 return -EINVAL; 2266 *inp += strspn(*inp, " "); 2267 *str = *inp; 2268 n = strcspn(*inp, str_delim); 2269 if (!n) 2270 return -EINVAL; 2271 *inp += n; 2272 if (**inp) { 2273 **inp = '\0'; 2274 *inp += 1; 2275 } 2276 } 2277 return 0; 2278 } 2279 2280 static int parse_action(struct addr_filter *filt) 2281 { 2282 if (!strcmp(filt->action, "filter")) { 2283 filt->start = true; 2284 filt->range = true; 2285 } else if (!strcmp(filt->action, "start")) { 2286 filt->start = true; 2287 } else if (!strcmp(filt->action, "stop")) { 2288 filt->start = false; 2289 } else if (!strcmp(filt->action, "tracestop")) { 2290 filt->start = false; 2291 filt->range = true; 2292 filt->action += 5; /* Change 'tracestop' to 'stop' */ 2293 } else { 2294 return -EINVAL; 2295 } 2296 return 0; 2297 } 2298 2299 static int parse_sym_idx(char **inp, int *idx) 2300 { 2301 *idx = -1; 2302 2303 *inp += strspn(*inp, " "); 2304 2305 if (**inp != '#') 2306 return 0; 2307 2308 *inp += 1; 2309 2310 if (**inp == 'g' || **inp == 'G') { 2311 *inp += 1; 2312 *idx = 0; 2313 } else { 2314 unsigned long num; 2315 char *endptr; 2316 2317 errno = 0; 2318 num = strtoul(*inp, &endptr, 0); 2319 if (errno) 2320 return -errno; 2321 if (endptr == *inp || num > INT_MAX) 2322 return -EINVAL; 2323 *inp = endptr; 2324 *idx = num; 2325 } 2326 2327 return 0; 2328 } 2329 2330 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx) 2331 { 2332 int err = parse_num_or_str(inp, num, str, " "); 2333 2334 if (!err && *str) 2335 err = parse_sym_idx(inp, idx); 2336 2337 return err; 2338 } 2339 2340 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp) 2341 { 2342 char *fstr; 2343 int err; 2344 2345 filt->str = fstr = strdup(*filter_inp); 2346 if (!fstr) 2347 return -ENOMEM; 2348 2349 err = parse_num_or_str(&fstr, NULL, &filt->action, " "); 2350 if (err) 2351 goto out_err; 2352 2353 err = parse_action(filt); 2354 if (err) 2355 goto out_err; 2356 2357 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from, 2358 &filt->sym_from_idx); 2359 if (err) 2360 goto out_err; 2361 2362 fstr += strspn(fstr, " "); 2363 2364 if (*fstr == '/') { 2365 fstr += 1; 2366 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to, 2367 &filt->sym_to_idx); 2368 if (err) 2369 goto out_err; 2370 filt->range = true; 2371 } 2372 2373 fstr += strspn(fstr, " "); 2374 2375 if (*fstr == '@') { 2376 fstr += 1; 2377 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,"); 2378 if (err) 2379 goto out_err; 2380 } 2381 2382 fstr += strspn(fstr, " ,"); 2383 2384 *filter_inp += fstr - filt->str; 2385 2386 return 0; 2387 2388 out_err: 2389 addr_filter__free_str(filt); 2390 2391 return err; 2392 } 2393 2394 int addr_filters__parse_bare_filter(struct addr_filters *filts, 2395 const char *filter) 2396 { 2397 struct addr_filter *filt; 2398 const char *fstr = filter; 2399 int err; 2400 2401 while (*fstr) { 2402 filt = addr_filter__new(); 2403 err = parse_one_filter(filt, &fstr); 2404 if (err) { 2405 addr_filter__free(filt); 2406 addr_filters__exit(filts); 2407 return err; 2408 } 2409 addr_filters__add(filts, filt); 2410 } 2411 2412 return 0; 2413 } 2414 2415 struct sym_args { 2416 const char *name; 2417 u64 start; 2418 u64 size; 2419 int idx; 2420 int cnt; 2421 bool started; 2422 bool global; 2423 bool selected; 2424 bool duplicate; 2425 bool near; 2426 }; 2427 2428 static bool kern_sym_name_match(const char *kname, const char *name) 2429 { 2430 size_t n = strlen(name); 2431 2432 return !strcmp(kname, name) || 2433 (!strncmp(kname, name, n) && kname[n] == '\t'); 2434 } 2435 2436 static bool kern_sym_match(struct sym_args *args, const char *name, char type) 2437 { 2438 /* A function with the same name, and global or the n'th found or any */ 2439 return kallsyms__is_function(type) && 2440 kern_sym_name_match(name, args->name) && 2441 ((args->global && isupper(type)) || 2442 (args->selected && ++(args->cnt) == args->idx) || 2443 (!args->global && !args->selected)); 2444 } 2445 2446 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start) 2447 { 2448 struct sym_args *args = arg; 2449 2450 if (args->started) { 2451 if (!args->size) 2452 args->size = start - args->start; 2453 if (args->selected) { 2454 if (args->size) 2455 return 1; 2456 } else if (kern_sym_match(args, name, type)) { 2457 args->duplicate = true; 2458 return 1; 2459 } 2460 } else if (kern_sym_match(args, name, type)) { 2461 args->started = true; 2462 args->start = start; 2463 } 2464 2465 return 0; 2466 } 2467 2468 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start) 2469 { 2470 struct sym_args *args = arg; 2471 2472 if (kern_sym_match(args, name, type)) { 2473 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", 2474 ++args->cnt, start, type, name); 2475 args->near = true; 2476 } else if (args->near) { 2477 args->near = false; 2478 pr_err("\t\twhich is near\t\t%s\n", name); 2479 } 2480 2481 return 0; 2482 } 2483 2484 static int sym_not_found_error(const char *sym_name, int idx) 2485 { 2486 if (idx > 0) { 2487 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n", 2488 idx, sym_name); 2489 } else if (!idx) { 2490 pr_err("Global symbol '%s' not found.\n", sym_name); 2491 } else { 2492 pr_err("Symbol '%s' not found.\n", sym_name); 2493 } 2494 pr_err("Note that symbols must be functions.\n"); 2495 2496 return -EINVAL; 2497 } 2498 2499 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx) 2500 { 2501 struct sym_args args = { 2502 .name = sym_name, 2503 .idx = idx, 2504 .global = !idx, 2505 .selected = idx > 0, 2506 }; 2507 int err; 2508 2509 *start = 0; 2510 *size = 0; 2511 2512 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb); 2513 if (err < 0) { 2514 pr_err("Failed to parse /proc/kallsyms\n"); 2515 return err; 2516 } 2517 2518 if (args.duplicate) { 2519 pr_err("Multiple kernel symbols with name '%s'\n", sym_name); 2520 args.cnt = 0; 2521 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb); 2522 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n", 2523 sym_name); 2524 pr_err("Or select a global symbol by inserting #0 or #g or #G\n"); 2525 return -EINVAL; 2526 } 2527 2528 if (!args.started) { 2529 pr_err("Kernel symbol lookup: "); 2530 return sym_not_found_error(sym_name, idx); 2531 } 2532 2533 *start = args.start; 2534 *size = args.size; 2535 2536 return 0; 2537 } 2538 2539 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused, 2540 char type, u64 start) 2541 { 2542 struct sym_args *args = arg; 2543 u64 size; 2544 2545 if (!kallsyms__is_function(type)) 2546 return 0; 2547 2548 if (!args->started) { 2549 args->started = true; 2550 args->start = start; 2551 } 2552 /* Don't know exactly where the kernel ends, so we add a page */ 2553 size = round_up(start, page_size) + page_size - args->start; 2554 if (size > args->size) 2555 args->size = size; 2556 2557 return 0; 2558 } 2559 2560 static int addr_filter__entire_kernel(struct addr_filter *filt) 2561 { 2562 struct sym_args args = { .started = false }; 2563 int err; 2564 2565 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb); 2566 if (err < 0 || !args.started) { 2567 pr_err("Failed to parse /proc/kallsyms\n"); 2568 return err; 2569 } 2570 2571 filt->addr = args.start; 2572 filt->size = args.size; 2573 2574 return 0; 2575 } 2576 2577 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size) 2578 { 2579 if (start + size >= filt->addr) 2580 return 0; 2581 2582 if (filt->sym_from) { 2583 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n", 2584 filt->sym_to, start, filt->sym_from, filt->addr); 2585 } else { 2586 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n", 2587 filt->sym_to, start, filt->addr); 2588 } 2589 2590 return -EINVAL; 2591 } 2592 2593 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt) 2594 { 2595 bool no_size = false; 2596 u64 start, size; 2597 int err; 2598 2599 if (symbol_conf.kptr_restrict) { 2600 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n"); 2601 return -EINVAL; 2602 } 2603 2604 if (filt->sym_from && !strcmp(filt->sym_from, "*")) 2605 return addr_filter__entire_kernel(filt); 2606 2607 if (filt->sym_from) { 2608 err = find_kern_sym(filt->sym_from, &start, &size, 2609 filt->sym_from_idx); 2610 if (err) 2611 return err; 2612 filt->addr = start; 2613 if (filt->range && !filt->size && !filt->sym_to) { 2614 filt->size = size; 2615 no_size = !size; 2616 } 2617 } 2618 2619 if (filt->sym_to) { 2620 err = find_kern_sym(filt->sym_to, &start, &size, 2621 filt->sym_to_idx); 2622 if (err) 2623 return err; 2624 2625 err = check_end_after_start(filt, start, size); 2626 if (err) 2627 return err; 2628 filt->size = start + size - filt->addr; 2629 no_size = !size; 2630 } 2631 2632 /* The very last symbol in kallsyms does not imply a particular size */ 2633 if (no_size) { 2634 pr_err("Cannot determine size of symbol '%s'\n", 2635 filt->sym_to ? filt->sym_to : filt->sym_from); 2636 return -EINVAL; 2637 } 2638 2639 return 0; 2640 } 2641 2642 static struct dso *load_dso(const char *name) 2643 { 2644 struct map *map; 2645 struct dso *dso; 2646 2647 map = dso__new_map(name); 2648 if (!map) 2649 return NULL; 2650 2651 if (map__load(map) < 0) 2652 pr_err("File '%s' not found or has no symbols.\n", name); 2653 2654 dso = dso__get(map__dso(map)); 2655 2656 map__put(map); 2657 2658 return dso; 2659 } 2660 2661 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt, 2662 int idx) 2663 { 2664 /* Same name, and global or the n'th found or any */ 2665 return !arch__compare_symbol_names(name, sym->name) && 2666 ((!idx && sym->binding == STB_GLOBAL) || 2667 (idx > 0 && ++*cnt == idx) || 2668 idx < 0); 2669 } 2670 2671 static void print_duplicate_syms(struct dso *dso, const char *sym_name) 2672 { 2673 struct symbol *sym; 2674 bool near = false; 2675 int cnt = 0; 2676 2677 pr_err("Multiple symbols with name '%s'\n", sym_name); 2678 2679 sym = dso__first_symbol(dso); 2680 while (sym) { 2681 if (dso_sym_match(sym, sym_name, &cnt, -1)) { 2682 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", 2683 ++cnt, sym->start, 2684 sym->binding == STB_GLOBAL ? 'g' : 2685 sym->binding == STB_LOCAL ? 'l' : 'w', 2686 sym->name); 2687 near = true; 2688 } else if (near) { 2689 near = false; 2690 pr_err("\t\twhich is near\t\t%s\n", sym->name); 2691 } 2692 sym = dso__next_symbol(sym); 2693 } 2694 2695 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n", 2696 sym_name); 2697 pr_err("Or select a global symbol by inserting #0 or #g or #G\n"); 2698 } 2699 2700 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start, 2701 u64 *size, int idx) 2702 { 2703 struct symbol *sym; 2704 int cnt = 0; 2705 2706 *start = 0; 2707 *size = 0; 2708 2709 sym = dso__first_symbol(dso); 2710 while (sym) { 2711 if (*start) { 2712 if (!*size) 2713 *size = sym->start - *start; 2714 if (idx > 0) { 2715 if (*size) 2716 return 0; 2717 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) { 2718 print_duplicate_syms(dso, sym_name); 2719 return -EINVAL; 2720 } 2721 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) { 2722 *start = sym->start; 2723 *size = sym->end - sym->start; 2724 } 2725 sym = dso__next_symbol(sym); 2726 } 2727 2728 if (!*start) 2729 return sym_not_found_error(sym_name, idx); 2730 2731 return 0; 2732 } 2733 2734 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso) 2735 { 2736 if (dso__data_file_size(dso, NULL)) { 2737 pr_err("Failed to determine filter for %s\nCannot determine file size.\n", 2738 filt->filename); 2739 return -EINVAL; 2740 } 2741 2742 filt->addr = 0; 2743 filt->size = dso__data(dso)->file_size; 2744 2745 return 0; 2746 } 2747 2748 static int addr_filter__resolve_syms(struct addr_filter *filt) 2749 { 2750 u64 start, size; 2751 struct dso *dso; 2752 int err = 0; 2753 2754 if (!filt->sym_from && !filt->sym_to) 2755 return 0; 2756 2757 if (!filt->filename) 2758 return addr_filter__resolve_kernel_syms(filt); 2759 2760 dso = load_dso(filt->filename); 2761 if (!dso) { 2762 pr_err("Failed to load symbols from: %s\n", filt->filename); 2763 return -EINVAL; 2764 } 2765 2766 if (filt->sym_from && !strcmp(filt->sym_from, "*")) { 2767 err = addr_filter__entire_dso(filt, dso); 2768 goto put_dso; 2769 } 2770 2771 if (filt->sym_from) { 2772 err = find_dso_sym(dso, filt->sym_from, &start, &size, 2773 filt->sym_from_idx); 2774 if (err) 2775 goto put_dso; 2776 filt->addr = start; 2777 if (filt->range && !filt->size && !filt->sym_to) 2778 filt->size = size; 2779 } 2780 2781 if (filt->sym_to) { 2782 err = find_dso_sym(dso, filt->sym_to, &start, &size, 2783 filt->sym_to_idx); 2784 if (err) 2785 goto put_dso; 2786 2787 err = check_end_after_start(filt, start, size); 2788 if (err) 2789 return err; 2790 2791 filt->size = start + size - filt->addr; 2792 } 2793 2794 put_dso: 2795 dso__put(dso); 2796 2797 return err; 2798 } 2799 2800 static char *addr_filter__to_str(struct addr_filter *filt) 2801 { 2802 char filename_buf[PATH_MAX]; 2803 const char *at = ""; 2804 const char *fn = ""; 2805 char *filter; 2806 int err; 2807 2808 if (filt->filename) { 2809 at = "@"; 2810 fn = realpath(filt->filename, filename_buf); 2811 if (!fn) 2812 return NULL; 2813 } 2814 2815 if (filt->range) { 2816 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s", 2817 filt->action, filt->addr, filt->size, at, fn); 2818 } else { 2819 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s", 2820 filt->action, filt->addr, at, fn); 2821 } 2822 2823 return err < 0 ? NULL : filter; 2824 } 2825 2826 static int parse_addr_filter(struct evsel *evsel, const char *filter, 2827 int max_nr) 2828 { 2829 struct addr_filters filts; 2830 struct addr_filter *filt; 2831 int err; 2832 2833 addr_filters__init(&filts); 2834 2835 err = addr_filters__parse_bare_filter(&filts, filter); 2836 if (err) 2837 goto out_exit; 2838 2839 if (filts.cnt > max_nr) { 2840 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n", 2841 filts.cnt, max_nr); 2842 err = -EINVAL; 2843 goto out_exit; 2844 } 2845 2846 list_for_each_entry(filt, &filts.head, list) { 2847 char *new_filter; 2848 2849 err = addr_filter__resolve_syms(filt); 2850 if (err) 2851 goto out_exit; 2852 2853 new_filter = addr_filter__to_str(filt); 2854 if (!new_filter) { 2855 err = -ENOMEM; 2856 goto out_exit; 2857 } 2858 2859 if (evsel__append_addr_filter(evsel, new_filter)) { 2860 err = -ENOMEM; 2861 goto out_exit; 2862 } 2863 } 2864 2865 out_exit: 2866 addr_filters__exit(&filts); 2867 2868 if (err) { 2869 pr_err("Failed to parse address filter: '%s'\n", filter); 2870 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n"); 2871 pr_err("Where multiple filters are separated by space or comma.\n"); 2872 } 2873 2874 return err; 2875 } 2876 2877 static int evsel__nr_addr_filter(struct evsel *evsel) 2878 { 2879 struct perf_pmu *pmu = evsel__find_pmu(evsel); 2880 int nr_addr_filters = 0; 2881 2882 if (!pmu) 2883 return 0; 2884 2885 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters); 2886 2887 return nr_addr_filters; 2888 } 2889 2890 int auxtrace_parse_filters(struct evlist *evlist) 2891 { 2892 struct evsel *evsel; 2893 char *filter; 2894 int err, max_nr; 2895 2896 evlist__for_each_entry(evlist, evsel) { 2897 filter = evsel->filter; 2898 max_nr = evsel__nr_addr_filter(evsel); 2899 if (!filter || !max_nr) 2900 continue; 2901 evsel->filter = NULL; 2902 err = parse_addr_filter(evsel, filter, max_nr); 2903 free(filter); 2904 if (err) 2905 return err; 2906 pr_debug("Address filter: %s\n", evsel->filter); 2907 } 2908 2909 return 0; 2910 } 2911 2912 int auxtrace__process_event(struct perf_session *session, union perf_event *event, 2913 struct perf_sample *sample, const struct perf_tool *tool) 2914 { 2915 if (!session->auxtrace) 2916 return 0; 2917 2918 return session->auxtrace->process_event(session, event, sample, tool); 2919 } 2920 2921 void auxtrace__dump_auxtrace_sample(struct perf_session *session, 2922 struct perf_sample *sample) 2923 { 2924 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample || 2925 auxtrace__dont_decode(session)) 2926 return; 2927 2928 session->auxtrace->dump_auxtrace_sample(session, sample); 2929 } 2930 2931 int auxtrace__flush_events(struct perf_session *session, const struct perf_tool *tool) 2932 { 2933 if (!session->auxtrace) 2934 return 0; 2935 2936 return session->auxtrace->flush_events(session, tool); 2937 } 2938 2939 void auxtrace__free_events(struct perf_session *session) 2940 { 2941 if (!session->auxtrace) 2942 return; 2943 2944 return session->auxtrace->free_events(session); 2945 } 2946 2947 void auxtrace__free(struct perf_session *session) 2948 { 2949 if (!session->auxtrace) 2950 return; 2951 2952 return session->auxtrace->free(session); 2953 } 2954 2955 bool auxtrace__evsel_is_auxtrace(struct perf_session *session, 2956 struct evsel *evsel) 2957 { 2958 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace) 2959 return false; 2960 2961 return session->auxtrace->evsel_is_auxtrace(session, evsel); 2962 } 2963