1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "string2.h" 5 #include <sys/param.h> 6 #include <sys/types.h> 7 #include <byteswap.h> 8 #include <unistd.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <linux/compiler.h> 12 #include <linux/list.h> 13 #include <linux/kernel.h> 14 #include <linux/bitops.h> 15 #include <linux/string.h> 16 #include <linux/stringify.h> 17 #include <linux/zalloc.h> 18 #include <sys/stat.h> 19 #include <sys/utsname.h> 20 #include <linux/time64.h> 21 #include <dirent.h> 22 #include <bpf/libbpf.h> 23 #include <perf/cpumap.h> 24 25 #include "dso.h" 26 #include "evlist.h" 27 #include "evsel.h" 28 #include "header.h" 29 #include "memswap.h" 30 #include "trace-event.h" 31 #include "session.h" 32 #include "symbol.h" 33 #include "debug.h" 34 #include "cpumap.h" 35 #include "pmu.h" 36 #include "vdso.h" 37 #include "strbuf.h" 38 #include "build-id.h" 39 #include "data.h" 40 #include <api/fs/fs.h> 41 #include "asm/bug.h" 42 #include "tool.h" 43 #include "time-utils.h" 44 #include "units.h" 45 #include "util.h" // page_size, perf_exe() 46 #include "cputopo.h" 47 #include "bpf-event.h" 48 49 #include <linux/ctype.h> 50 #include <internal/lib.h> 51 52 /* 53 * magic2 = "PERFILE2" 54 * must be a numerical value to let the endianness 55 * determine the memory layout. That way we are able 56 * to detect endianness when reading the perf.data file 57 * back. 58 * 59 * we check for legacy (PERFFILE) format. 60 */ 61 static const char *__perf_magic1 = "PERFFILE"; 62 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 63 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 64 65 #define PERF_MAGIC __perf_magic2 66 67 const char perf_version_string[] = PERF_VERSION; 68 69 struct perf_file_attr { 70 struct perf_event_attr attr; 71 struct perf_file_section ids; 72 }; 73 74 void perf_header__set_feat(struct perf_header *header, int feat) 75 { 76 set_bit(feat, header->adds_features); 77 } 78 79 void perf_header__clear_feat(struct perf_header *header, int feat) 80 { 81 clear_bit(feat, header->adds_features); 82 } 83 84 bool perf_header__has_feat(const struct perf_header *header, int feat) 85 { 86 return test_bit(feat, header->adds_features); 87 } 88 89 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) 90 { 91 ssize_t ret = writen(ff->fd, buf, size); 92 93 if (ret != (ssize_t)size) 94 return ret < 0 ? (int)ret : -1; 95 return 0; 96 } 97 98 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) 99 { 100 /* struct perf_event_header::size is u16 */ 101 const size_t max_size = 0xffff - sizeof(struct perf_event_header); 102 size_t new_size = ff->size; 103 void *addr; 104 105 if (size + ff->offset > max_size) 106 return -E2BIG; 107 108 while (size > (new_size - ff->offset)) 109 new_size <<= 1; 110 new_size = min(max_size, new_size); 111 112 if (ff->size < new_size) { 113 addr = realloc(ff->buf, new_size); 114 if (!addr) 115 return -ENOMEM; 116 ff->buf = addr; 117 ff->size = new_size; 118 } 119 120 memcpy(ff->buf + ff->offset, buf, size); 121 ff->offset += size; 122 123 return 0; 124 } 125 126 /* Return: 0 if succeded, -ERR if failed. */ 127 int do_write(struct feat_fd *ff, const void *buf, size_t size) 128 { 129 if (!ff->buf) 130 return __do_write_fd(ff, buf, size); 131 return __do_write_buf(ff, buf, size); 132 } 133 134 /* Return: 0 if succeded, -ERR if failed. */ 135 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size) 136 { 137 u64 *p = (u64 *) set; 138 int i, ret; 139 140 ret = do_write(ff, &size, sizeof(size)); 141 if (ret < 0) 142 return ret; 143 144 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 145 ret = do_write(ff, p + i, sizeof(*p)); 146 if (ret < 0) 147 return ret; 148 } 149 150 return 0; 151 } 152 153 /* Return: 0 if succeded, -ERR if failed. */ 154 int write_padded(struct feat_fd *ff, const void *bf, 155 size_t count, size_t count_aligned) 156 { 157 static const char zero_buf[NAME_ALIGN]; 158 int err = do_write(ff, bf, count); 159 160 if (!err) 161 err = do_write(ff, zero_buf, count_aligned - count); 162 163 return err; 164 } 165 166 #define string_size(str) \ 167 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) 168 169 /* Return: 0 if succeded, -ERR if failed. */ 170 static int do_write_string(struct feat_fd *ff, const char *str) 171 { 172 u32 len, olen; 173 int ret; 174 175 olen = strlen(str) + 1; 176 len = PERF_ALIGN(olen, NAME_ALIGN); 177 178 /* write len, incl. \0 */ 179 ret = do_write(ff, &len, sizeof(len)); 180 if (ret < 0) 181 return ret; 182 183 return write_padded(ff, str, olen, len); 184 } 185 186 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) 187 { 188 ssize_t ret = readn(ff->fd, addr, size); 189 190 if (ret != size) 191 return ret < 0 ? (int)ret : -1; 192 return 0; 193 } 194 195 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) 196 { 197 if (size > (ssize_t)ff->size - ff->offset) 198 return -1; 199 200 memcpy(addr, ff->buf + ff->offset, size); 201 ff->offset += size; 202 203 return 0; 204 205 } 206 207 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) 208 { 209 if (!ff->buf) 210 return __do_read_fd(ff, addr, size); 211 return __do_read_buf(ff, addr, size); 212 } 213 214 static int do_read_u32(struct feat_fd *ff, u32 *addr) 215 { 216 int ret; 217 218 ret = __do_read(ff, addr, sizeof(*addr)); 219 if (ret) 220 return ret; 221 222 if (ff->ph->needs_swap) 223 *addr = bswap_32(*addr); 224 return 0; 225 } 226 227 static int do_read_u64(struct feat_fd *ff, u64 *addr) 228 { 229 int ret; 230 231 ret = __do_read(ff, addr, sizeof(*addr)); 232 if (ret) 233 return ret; 234 235 if (ff->ph->needs_swap) 236 *addr = bswap_64(*addr); 237 return 0; 238 } 239 240 static char *do_read_string(struct feat_fd *ff) 241 { 242 u32 len; 243 char *buf; 244 245 if (do_read_u32(ff, &len)) 246 return NULL; 247 248 buf = malloc(len); 249 if (!buf) 250 return NULL; 251 252 if (!__do_read(ff, buf, len)) { 253 /* 254 * strings are padded by zeroes 255 * thus the actual strlen of buf 256 * may be less than len 257 */ 258 return buf; 259 } 260 261 free(buf); 262 return NULL; 263 } 264 265 /* Return: 0 if succeded, -ERR if failed. */ 266 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) 267 { 268 unsigned long *set; 269 u64 size, *p; 270 int i, ret; 271 272 ret = do_read_u64(ff, &size); 273 if (ret) 274 return ret; 275 276 set = bitmap_alloc(size); 277 if (!set) 278 return -ENOMEM; 279 280 p = (u64 *) set; 281 282 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 283 ret = do_read_u64(ff, p + i); 284 if (ret < 0) { 285 free(set); 286 return ret; 287 } 288 } 289 290 *pset = set; 291 *psize = size; 292 return 0; 293 } 294 295 static int write_tracing_data(struct feat_fd *ff, 296 struct evlist *evlist) 297 { 298 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 299 return -1; 300 301 return read_tracing_data(ff->fd, &evlist->core.entries); 302 } 303 304 static int write_build_id(struct feat_fd *ff, 305 struct evlist *evlist __maybe_unused) 306 { 307 struct perf_session *session; 308 int err; 309 310 session = container_of(ff->ph, struct perf_session, header); 311 312 if (!perf_session__read_build_ids(session, true)) 313 return -1; 314 315 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 316 return -1; 317 318 err = perf_session__write_buildid_table(session, ff); 319 if (err < 0) { 320 pr_debug("failed to write buildid table\n"); 321 return err; 322 } 323 perf_session__cache_build_ids(session); 324 325 return 0; 326 } 327 328 static int write_hostname(struct feat_fd *ff, 329 struct evlist *evlist __maybe_unused) 330 { 331 struct utsname uts; 332 int ret; 333 334 ret = uname(&uts); 335 if (ret < 0) 336 return -1; 337 338 return do_write_string(ff, uts.nodename); 339 } 340 341 static int write_osrelease(struct feat_fd *ff, 342 struct evlist *evlist __maybe_unused) 343 { 344 struct utsname uts; 345 int ret; 346 347 ret = uname(&uts); 348 if (ret < 0) 349 return -1; 350 351 return do_write_string(ff, uts.release); 352 } 353 354 static int write_arch(struct feat_fd *ff, 355 struct evlist *evlist __maybe_unused) 356 { 357 struct utsname uts; 358 int ret; 359 360 ret = uname(&uts); 361 if (ret < 0) 362 return -1; 363 364 return do_write_string(ff, uts.machine); 365 } 366 367 static int write_version(struct feat_fd *ff, 368 struct evlist *evlist __maybe_unused) 369 { 370 return do_write_string(ff, perf_version_string); 371 } 372 373 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) 374 { 375 FILE *file; 376 char *buf = NULL; 377 char *s, *p; 378 const char *search = cpuinfo_proc; 379 size_t len = 0; 380 int ret = -1; 381 382 if (!search) 383 return -1; 384 385 file = fopen("/proc/cpuinfo", "r"); 386 if (!file) 387 return -1; 388 389 while (getline(&buf, &len, file) > 0) { 390 ret = strncmp(buf, search, strlen(search)); 391 if (!ret) 392 break; 393 } 394 395 if (ret) { 396 ret = -1; 397 goto done; 398 } 399 400 s = buf; 401 402 p = strchr(buf, ':'); 403 if (p && *(p+1) == ' ' && *(p+2)) 404 s = p + 2; 405 p = strchr(s, '\n'); 406 if (p) 407 *p = '\0'; 408 409 /* squash extra space characters (branding string) */ 410 p = s; 411 while (*p) { 412 if (isspace(*p)) { 413 char *r = p + 1; 414 char *q = skip_spaces(r); 415 *p = ' '; 416 if (q != (p+1)) 417 while ((*r++ = *q++)); 418 } 419 p++; 420 } 421 ret = do_write_string(ff, s); 422 done: 423 free(buf); 424 fclose(file); 425 return ret; 426 } 427 428 static int write_cpudesc(struct feat_fd *ff, 429 struct evlist *evlist __maybe_unused) 430 { 431 #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__) 432 #define CPUINFO_PROC { "cpu", } 433 #elif defined(__s390__) 434 #define CPUINFO_PROC { "vendor_id", } 435 #elif defined(__sh__) 436 #define CPUINFO_PROC { "cpu type", } 437 #elif defined(__alpha__) || defined(__mips__) 438 #define CPUINFO_PROC { "cpu model", } 439 #elif defined(__arm__) 440 #define CPUINFO_PROC { "model name", "Processor", } 441 #elif defined(__arc__) 442 #define CPUINFO_PROC { "Processor", } 443 #elif defined(__xtensa__) 444 #define CPUINFO_PROC { "core ID", } 445 #else 446 #define CPUINFO_PROC { "model name", } 447 #endif 448 const char *cpuinfo_procs[] = CPUINFO_PROC; 449 #undef CPUINFO_PROC 450 unsigned int i; 451 452 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 453 int ret; 454 ret = __write_cpudesc(ff, cpuinfo_procs[i]); 455 if (ret >= 0) 456 return ret; 457 } 458 return -1; 459 } 460 461 462 static int write_nrcpus(struct feat_fd *ff, 463 struct evlist *evlist __maybe_unused) 464 { 465 long nr; 466 u32 nrc, nra; 467 int ret; 468 469 nrc = cpu__max_present_cpu(); 470 471 nr = sysconf(_SC_NPROCESSORS_ONLN); 472 if (nr < 0) 473 return -1; 474 475 nra = (u32)(nr & UINT_MAX); 476 477 ret = do_write(ff, &nrc, sizeof(nrc)); 478 if (ret < 0) 479 return ret; 480 481 return do_write(ff, &nra, sizeof(nra)); 482 } 483 484 static int write_event_desc(struct feat_fd *ff, 485 struct evlist *evlist) 486 { 487 struct evsel *evsel; 488 u32 nre, nri, sz; 489 int ret; 490 491 nre = evlist->core.nr_entries; 492 493 /* 494 * write number of events 495 */ 496 ret = do_write(ff, &nre, sizeof(nre)); 497 if (ret < 0) 498 return ret; 499 500 /* 501 * size of perf_event_attr struct 502 */ 503 sz = (u32)sizeof(evsel->core.attr); 504 ret = do_write(ff, &sz, sizeof(sz)); 505 if (ret < 0) 506 return ret; 507 508 evlist__for_each_entry(evlist, evsel) { 509 ret = do_write(ff, &evsel->core.attr, sz); 510 if (ret < 0) 511 return ret; 512 /* 513 * write number of unique id per event 514 * there is one id per instance of an event 515 * 516 * copy into an nri to be independent of the 517 * type of ids, 518 */ 519 nri = evsel->ids; 520 ret = do_write(ff, &nri, sizeof(nri)); 521 if (ret < 0) 522 return ret; 523 524 /* 525 * write event string as passed on cmdline 526 */ 527 ret = do_write_string(ff, perf_evsel__name(evsel)); 528 if (ret < 0) 529 return ret; 530 /* 531 * write unique ids for this event 532 */ 533 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); 534 if (ret < 0) 535 return ret; 536 } 537 return 0; 538 } 539 540 static int write_cmdline(struct feat_fd *ff, 541 struct evlist *evlist __maybe_unused) 542 { 543 char pbuf[MAXPATHLEN], *buf; 544 int i, ret, n; 545 546 /* actual path to perf binary */ 547 buf = perf_exe(pbuf, MAXPATHLEN); 548 549 /* account for binary path */ 550 n = perf_env.nr_cmdline + 1; 551 552 ret = do_write(ff, &n, sizeof(n)); 553 if (ret < 0) 554 return ret; 555 556 ret = do_write_string(ff, buf); 557 if (ret < 0) 558 return ret; 559 560 for (i = 0 ; i < perf_env.nr_cmdline; i++) { 561 ret = do_write_string(ff, perf_env.cmdline_argv[i]); 562 if (ret < 0) 563 return ret; 564 } 565 return 0; 566 } 567 568 569 static int write_cpu_topology(struct feat_fd *ff, 570 struct evlist *evlist __maybe_unused) 571 { 572 struct cpu_topology *tp; 573 u32 i; 574 int ret, j; 575 576 tp = cpu_topology__new(); 577 if (!tp) 578 return -1; 579 580 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib)); 581 if (ret < 0) 582 goto done; 583 584 for (i = 0; i < tp->core_sib; i++) { 585 ret = do_write_string(ff, tp->core_siblings[i]); 586 if (ret < 0) 587 goto done; 588 } 589 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib)); 590 if (ret < 0) 591 goto done; 592 593 for (i = 0; i < tp->thread_sib; i++) { 594 ret = do_write_string(ff, tp->thread_siblings[i]); 595 if (ret < 0) 596 break; 597 } 598 599 ret = perf_env__read_cpu_topology_map(&perf_env); 600 if (ret < 0) 601 goto done; 602 603 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 604 ret = do_write(ff, &perf_env.cpu[j].core_id, 605 sizeof(perf_env.cpu[j].core_id)); 606 if (ret < 0) 607 return ret; 608 ret = do_write(ff, &perf_env.cpu[j].socket_id, 609 sizeof(perf_env.cpu[j].socket_id)); 610 if (ret < 0) 611 return ret; 612 } 613 614 if (!tp->die_sib) 615 goto done; 616 617 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib)); 618 if (ret < 0) 619 goto done; 620 621 for (i = 0; i < tp->die_sib; i++) { 622 ret = do_write_string(ff, tp->die_siblings[i]); 623 if (ret < 0) 624 goto done; 625 } 626 627 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 628 ret = do_write(ff, &perf_env.cpu[j].die_id, 629 sizeof(perf_env.cpu[j].die_id)); 630 if (ret < 0) 631 return ret; 632 } 633 634 done: 635 cpu_topology__delete(tp); 636 return ret; 637 } 638 639 640 641 static int write_total_mem(struct feat_fd *ff, 642 struct evlist *evlist __maybe_unused) 643 { 644 char *buf = NULL; 645 FILE *fp; 646 size_t len = 0; 647 int ret = -1, n; 648 uint64_t mem; 649 650 fp = fopen("/proc/meminfo", "r"); 651 if (!fp) 652 return -1; 653 654 while (getline(&buf, &len, fp) > 0) { 655 ret = strncmp(buf, "MemTotal:", 9); 656 if (!ret) 657 break; 658 } 659 if (!ret) { 660 n = sscanf(buf, "%*s %"PRIu64, &mem); 661 if (n == 1) 662 ret = do_write(ff, &mem, sizeof(mem)); 663 } else 664 ret = -1; 665 free(buf); 666 fclose(fp); 667 return ret; 668 } 669 670 static int write_numa_topology(struct feat_fd *ff, 671 struct evlist *evlist __maybe_unused) 672 { 673 struct numa_topology *tp; 674 int ret = -1; 675 u32 i; 676 677 tp = numa_topology__new(); 678 if (!tp) 679 return -ENOMEM; 680 681 ret = do_write(ff, &tp->nr, sizeof(u32)); 682 if (ret < 0) 683 goto err; 684 685 for (i = 0; i < tp->nr; i++) { 686 struct numa_topology_node *n = &tp->nodes[i]; 687 688 ret = do_write(ff, &n->node, sizeof(u32)); 689 if (ret < 0) 690 goto err; 691 692 ret = do_write(ff, &n->mem_total, sizeof(u64)); 693 if (ret) 694 goto err; 695 696 ret = do_write(ff, &n->mem_free, sizeof(u64)); 697 if (ret) 698 goto err; 699 700 ret = do_write_string(ff, n->cpus); 701 if (ret < 0) 702 goto err; 703 } 704 705 ret = 0; 706 707 err: 708 numa_topology__delete(tp); 709 return ret; 710 } 711 712 /* 713 * File format: 714 * 715 * struct pmu_mappings { 716 * u32 pmu_num; 717 * struct pmu_map { 718 * u32 type; 719 * char name[]; 720 * }[pmu_num]; 721 * }; 722 */ 723 724 static int write_pmu_mappings(struct feat_fd *ff, 725 struct evlist *evlist __maybe_unused) 726 { 727 struct perf_pmu *pmu = NULL; 728 u32 pmu_num = 0; 729 int ret; 730 731 /* 732 * Do a first pass to count number of pmu to avoid lseek so this 733 * works in pipe mode as well. 734 */ 735 while ((pmu = perf_pmu__scan(pmu))) { 736 if (!pmu->name) 737 continue; 738 pmu_num++; 739 } 740 741 ret = do_write(ff, &pmu_num, sizeof(pmu_num)); 742 if (ret < 0) 743 return ret; 744 745 while ((pmu = perf_pmu__scan(pmu))) { 746 if (!pmu->name) 747 continue; 748 749 ret = do_write(ff, &pmu->type, sizeof(pmu->type)); 750 if (ret < 0) 751 return ret; 752 753 ret = do_write_string(ff, pmu->name); 754 if (ret < 0) 755 return ret; 756 } 757 758 return 0; 759 } 760 761 /* 762 * File format: 763 * 764 * struct group_descs { 765 * u32 nr_groups; 766 * struct group_desc { 767 * char name[]; 768 * u32 leader_idx; 769 * u32 nr_members; 770 * }[nr_groups]; 771 * }; 772 */ 773 static int write_group_desc(struct feat_fd *ff, 774 struct evlist *evlist) 775 { 776 u32 nr_groups = evlist->nr_groups; 777 struct evsel *evsel; 778 int ret; 779 780 ret = do_write(ff, &nr_groups, sizeof(nr_groups)); 781 if (ret < 0) 782 return ret; 783 784 evlist__for_each_entry(evlist, evsel) { 785 if (perf_evsel__is_group_leader(evsel) && 786 evsel->core.nr_members > 1) { 787 const char *name = evsel->group_name ?: "{anon_group}"; 788 u32 leader_idx = evsel->idx; 789 u32 nr_members = evsel->core.nr_members; 790 791 ret = do_write_string(ff, name); 792 if (ret < 0) 793 return ret; 794 795 ret = do_write(ff, &leader_idx, sizeof(leader_idx)); 796 if (ret < 0) 797 return ret; 798 799 ret = do_write(ff, &nr_members, sizeof(nr_members)); 800 if (ret < 0) 801 return ret; 802 } 803 } 804 return 0; 805 } 806 807 /* 808 * Return the CPU id as a raw string. 809 * 810 * Each architecture should provide a more precise id string that 811 * can be use to match the architecture's "mapfile". 812 */ 813 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) 814 { 815 return NULL; 816 } 817 818 /* Return zero when the cpuid from the mapfile.csv matches the 819 * cpuid string generated on this platform. 820 * Otherwise return non-zero. 821 */ 822 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) 823 { 824 regex_t re; 825 regmatch_t pmatch[1]; 826 int match; 827 828 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { 829 /* Warn unable to generate match particular string. */ 830 pr_info("Invalid regular expression %s\n", mapcpuid); 831 return 1; 832 } 833 834 match = !regexec(&re, cpuid, 1, pmatch, 0); 835 regfree(&re); 836 if (match) { 837 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); 838 839 /* Verify the entire string matched. */ 840 if (match_len == strlen(cpuid)) 841 return 0; 842 } 843 return 1; 844 } 845 846 /* 847 * default get_cpuid(): nothing gets recorded 848 * actual implementation must be in arch/$(SRCARCH)/util/header.c 849 */ 850 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 851 { 852 return -1; 853 } 854 855 static int write_cpuid(struct feat_fd *ff, 856 struct evlist *evlist __maybe_unused) 857 { 858 char buffer[64]; 859 int ret; 860 861 ret = get_cpuid(buffer, sizeof(buffer)); 862 if (ret) 863 return -1; 864 865 return do_write_string(ff, buffer); 866 } 867 868 static int write_branch_stack(struct feat_fd *ff __maybe_unused, 869 struct evlist *evlist __maybe_unused) 870 { 871 return 0; 872 } 873 874 static int write_auxtrace(struct feat_fd *ff, 875 struct evlist *evlist __maybe_unused) 876 { 877 struct perf_session *session; 878 int err; 879 880 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 881 return -1; 882 883 session = container_of(ff->ph, struct perf_session, header); 884 885 err = auxtrace_index__write(ff->fd, &session->auxtrace_index); 886 if (err < 0) 887 pr_err("Failed to write auxtrace index\n"); 888 return err; 889 } 890 891 static int write_clockid(struct feat_fd *ff, 892 struct evlist *evlist __maybe_unused) 893 { 894 return do_write(ff, &ff->ph->env.clockid_res_ns, 895 sizeof(ff->ph->env.clockid_res_ns)); 896 } 897 898 static int write_dir_format(struct feat_fd *ff, 899 struct evlist *evlist __maybe_unused) 900 { 901 struct perf_session *session; 902 struct perf_data *data; 903 904 session = container_of(ff->ph, struct perf_session, header); 905 data = session->data; 906 907 if (WARN_ON(!perf_data__is_dir(data))) 908 return -1; 909 910 return do_write(ff, &data->dir.version, sizeof(data->dir.version)); 911 } 912 913 #ifdef HAVE_LIBBPF_SUPPORT 914 static int write_bpf_prog_info(struct feat_fd *ff, 915 struct evlist *evlist __maybe_unused) 916 { 917 struct perf_env *env = &ff->ph->env; 918 struct rb_root *root; 919 struct rb_node *next; 920 int ret; 921 922 down_read(&env->bpf_progs.lock); 923 924 ret = do_write(ff, &env->bpf_progs.infos_cnt, 925 sizeof(env->bpf_progs.infos_cnt)); 926 if (ret < 0) 927 goto out; 928 929 root = &env->bpf_progs.infos; 930 next = rb_first(root); 931 while (next) { 932 struct bpf_prog_info_node *node; 933 size_t len; 934 935 node = rb_entry(next, struct bpf_prog_info_node, rb_node); 936 next = rb_next(&node->rb_node); 937 len = sizeof(struct bpf_prog_info_linear) + 938 node->info_linear->data_len; 939 940 /* before writing to file, translate address to offset */ 941 bpf_program__bpil_addr_to_offs(node->info_linear); 942 ret = do_write(ff, node->info_linear, len); 943 /* 944 * translate back to address even when do_write() fails, 945 * so that this function never changes the data. 946 */ 947 bpf_program__bpil_offs_to_addr(node->info_linear); 948 if (ret < 0) 949 goto out; 950 } 951 out: 952 up_read(&env->bpf_progs.lock); 953 return ret; 954 } 955 #else // HAVE_LIBBPF_SUPPORT 956 static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused, 957 struct evlist *evlist __maybe_unused) 958 { 959 return 0; 960 } 961 #endif // HAVE_LIBBPF_SUPPORT 962 963 static int write_bpf_btf(struct feat_fd *ff, 964 struct evlist *evlist __maybe_unused) 965 { 966 struct perf_env *env = &ff->ph->env; 967 struct rb_root *root; 968 struct rb_node *next; 969 int ret; 970 971 down_read(&env->bpf_progs.lock); 972 973 ret = do_write(ff, &env->bpf_progs.btfs_cnt, 974 sizeof(env->bpf_progs.btfs_cnt)); 975 976 if (ret < 0) 977 goto out; 978 979 root = &env->bpf_progs.btfs; 980 next = rb_first(root); 981 while (next) { 982 struct btf_node *node; 983 984 node = rb_entry(next, struct btf_node, rb_node); 985 next = rb_next(&node->rb_node); 986 ret = do_write(ff, &node->id, 987 sizeof(u32) * 2 + node->data_size); 988 if (ret < 0) 989 goto out; 990 } 991 out: 992 up_read(&env->bpf_progs.lock); 993 return ret; 994 } 995 996 static int cpu_cache_level__sort(const void *a, const void *b) 997 { 998 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 999 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; 1000 1001 return cache_a->level - cache_b->level; 1002 } 1003 1004 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) 1005 { 1006 if (a->level != b->level) 1007 return false; 1008 1009 if (a->line_size != b->line_size) 1010 return false; 1011 1012 if (a->sets != b->sets) 1013 return false; 1014 1015 if (a->ways != b->ways) 1016 return false; 1017 1018 if (strcmp(a->type, b->type)) 1019 return false; 1020 1021 if (strcmp(a->size, b->size)) 1022 return false; 1023 1024 if (strcmp(a->map, b->map)) 1025 return false; 1026 1027 return true; 1028 } 1029 1030 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) 1031 { 1032 char path[PATH_MAX], file[PATH_MAX]; 1033 struct stat st; 1034 size_t len; 1035 1036 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level); 1037 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path); 1038 1039 if (stat(file, &st)) 1040 return 1; 1041 1042 scnprintf(file, PATH_MAX, "%s/level", path); 1043 if (sysfs__read_int(file, (int *) &cache->level)) 1044 return -1; 1045 1046 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path); 1047 if (sysfs__read_int(file, (int *) &cache->line_size)) 1048 return -1; 1049 1050 scnprintf(file, PATH_MAX, "%s/number_of_sets", path); 1051 if (sysfs__read_int(file, (int *) &cache->sets)) 1052 return -1; 1053 1054 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path); 1055 if (sysfs__read_int(file, (int *) &cache->ways)) 1056 return -1; 1057 1058 scnprintf(file, PATH_MAX, "%s/type", path); 1059 if (sysfs__read_str(file, &cache->type, &len)) 1060 return -1; 1061 1062 cache->type[len] = 0; 1063 cache->type = strim(cache->type); 1064 1065 scnprintf(file, PATH_MAX, "%s/size", path); 1066 if (sysfs__read_str(file, &cache->size, &len)) { 1067 zfree(&cache->type); 1068 return -1; 1069 } 1070 1071 cache->size[len] = 0; 1072 cache->size = strim(cache->size); 1073 1074 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 1075 if (sysfs__read_str(file, &cache->map, &len)) { 1076 zfree(&cache->size); 1077 zfree(&cache->type); 1078 return -1; 1079 } 1080 1081 cache->map[len] = 0; 1082 cache->map = strim(cache->map); 1083 return 0; 1084 } 1085 1086 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) 1087 { 1088 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); 1089 } 1090 1091 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) 1092 { 1093 u32 i, cnt = 0; 1094 long ncpus; 1095 u32 nr, cpu; 1096 u16 level; 1097 1098 ncpus = sysconf(_SC_NPROCESSORS_CONF); 1099 if (ncpus < 0) 1100 return -1; 1101 1102 nr = (u32)(ncpus & UINT_MAX); 1103 1104 for (cpu = 0; cpu < nr; cpu++) { 1105 for (level = 0; level < 10; level++) { 1106 struct cpu_cache_level c; 1107 int err; 1108 1109 err = cpu_cache_level__read(&c, cpu, level); 1110 if (err < 0) 1111 return err; 1112 1113 if (err == 1) 1114 break; 1115 1116 for (i = 0; i < cnt; i++) { 1117 if (cpu_cache_level__cmp(&c, &caches[i])) 1118 break; 1119 } 1120 1121 if (i == cnt) 1122 caches[cnt++] = c; 1123 else 1124 cpu_cache_level__free(&c); 1125 1126 if (WARN_ONCE(cnt == size, "way too many cpu caches..")) 1127 goto out; 1128 } 1129 } 1130 out: 1131 *cntp = cnt; 1132 return 0; 1133 } 1134 1135 #define MAX_CACHE_LVL 4 1136 1137 static int write_cache(struct feat_fd *ff, 1138 struct evlist *evlist __maybe_unused) 1139 { 1140 u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL; 1141 struct cpu_cache_level caches[max_caches]; 1142 u32 cnt = 0, i, version = 1; 1143 int ret; 1144 1145 ret = build_caches(caches, max_caches, &cnt); 1146 if (ret) 1147 goto out; 1148 1149 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); 1150 1151 ret = do_write(ff, &version, sizeof(u32)); 1152 if (ret < 0) 1153 goto out; 1154 1155 ret = do_write(ff, &cnt, sizeof(u32)); 1156 if (ret < 0) 1157 goto out; 1158 1159 for (i = 0; i < cnt; i++) { 1160 struct cpu_cache_level *c = &caches[i]; 1161 1162 #define _W(v) \ 1163 ret = do_write(ff, &c->v, sizeof(u32)); \ 1164 if (ret < 0) \ 1165 goto out; 1166 1167 _W(level) 1168 _W(line_size) 1169 _W(sets) 1170 _W(ways) 1171 #undef _W 1172 1173 #define _W(v) \ 1174 ret = do_write_string(ff, (const char *) c->v); \ 1175 if (ret < 0) \ 1176 goto out; 1177 1178 _W(type) 1179 _W(size) 1180 _W(map) 1181 #undef _W 1182 } 1183 1184 out: 1185 for (i = 0; i < cnt; i++) 1186 cpu_cache_level__free(&caches[i]); 1187 return ret; 1188 } 1189 1190 static int write_stat(struct feat_fd *ff __maybe_unused, 1191 struct evlist *evlist __maybe_unused) 1192 { 1193 return 0; 1194 } 1195 1196 static int write_sample_time(struct feat_fd *ff, 1197 struct evlist *evlist) 1198 { 1199 int ret; 1200 1201 ret = do_write(ff, &evlist->first_sample_time, 1202 sizeof(evlist->first_sample_time)); 1203 if (ret < 0) 1204 return ret; 1205 1206 return do_write(ff, &evlist->last_sample_time, 1207 sizeof(evlist->last_sample_time)); 1208 } 1209 1210 1211 static int memory_node__read(struct memory_node *n, unsigned long idx) 1212 { 1213 unsigned int phys, size = 0; 1214 char path[PATH_MAX]; 1215 struct dirent *ent; 1216 DIR *dir; 1217 1218 #define for_each_memory(mem, dir) \ 1219 while ((ent = readdir(dir))) \ 1220 if (strcmp(ent->d_name, ".") && \ 1221 strcmp(ent->d_name, "..") && \ 1222 sscanf(ent->d_name, "memory%u", &mem) == 1) 1223 1224 scnprintf(path, PATH_MAX, 1225 "%s/devices/system/node/node%lu", 1226 sysfs__mountpoint(), idx); 1227 1228 dir = opendir(path); 1229 if (!dir) { 1230 pr_warning("failed: cant' open memory sysfs data\n"); 1231 return -1; 1232 } 1233 1234 for_each_memory(phys, dir) { 1235 size = max(phys, size); 1236 } 1237 1238 size++; 1239 1240 n->set = bitmap_alloc(size); 1241 if (!n->set) { 1242 closedir(dir); 1243 return -ENOMEM; 1244 } 1245 1246 n->node = idx; 1247 n->size = size; 1248 1249 rewinddir(dir); 1250 1251 for_each_memory(phys, dir) { 1252 set_bit(phys, n->set); 1253 } 1254 1255 closedir(dir); 1256 return 0; 1257 } 1258 1259 static int memory_node__sort(const void *a, const void *b) 1260 { 1261 const struct memory_node *na = a; 1262 const struct memory_node *nb = b; 1263 1264 return na->node - nb->node; 1265 } 1266 1267 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) 1268 { 1269 char path[PATH_MAX]; 1270 struct dirent *ent; 1271 DIR *dir; 1272 u64 cnt = 0; 1273 int ret = 0; 1274 1275 scnprintf(path, PATH_MAX, "%s/devices/system/node/", 1276 sysfs__mountpoint()); 1277 1278 dir = opendir(path); 1279 if (!dir) { 1280 pr_debug2("%s: could't read %s, does this arch have topology information?\n", 1281 __func__, path); 1282 return -1; 1283 } 1284 1285 while (!ret && (ent = readdir(dir))) { 1286 unsigned int idx; 1287 int r; 1288 1289 if (!strcmp(ent->d_name, ".") || 1290 !strcmp(ent->d_name, "..")) 1291 continue; 1292 1293 r = sscanf(ent->d_name, "node%u", &idx); 1294 if (r != 1) 1295 continue; 1296 1297 if (WARN_ONCE(cnt >= size, 1298 "failed to write MEM_TOPOLOGY, way too many nodes\n")) 1299 return -1; 1300 1301 ret = memory_node__read(&nodes[cnt++], idx); 1302 } 1303 1304 *cntp = cnt; 1305 closedir(dir); 1306 1307 if (!ret) 1308 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort); 1309 1310 return ret; 1311 } 1312 1313 #define MAX_MEMORY_NODES 2000 1314 1315 /* 1316 * The MEM_TOPOLOGY holds physical memory map for every 1317 * node in system. The format of data is as follows: 1318 * 1319 * 0 - version | for future changes 1320 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes 1321 * 16 - count | number of nodes 1322 * 1323 * For each node we store map of physical indexes for 1324 * each node: 1325 * 1326 * 32 - node id | node index 1327 * 40 - size | size of bitmap 1328 * 48 - bitmap | bitmap of memory indexes that belongs to node 1329 */ 1330 static int write_mem_topology(struct feat_fd *ff __maybe_unused, 1331 struct evlist *evlist __maybe_unused) 1332 { 1333 static struct memory_node nodes[MAX_MEMORY_NODES]; 1334 u64 bsize, version = 1, i, nr; 1335 int ret; 1336 1337 ret = sysfs__read_xll("devices/system/memory/block_size_bytes", 1338 (unsigned long long *) &bsize); 1339 if (ret) 1340 return ret; 1341 1342 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr); 1343 if (ret) 1344 return ret; 1345 1346 ret = do_write(ff, &version, sizeof(version)); 1347 if (ret < 0) 1348 goto out; 1349 1350 ret = do_write(ff, &bsize, sizeof(bsize)); 1351 if (ret < 0) 1352 goto out; 1353 1354 ret = do_write(ff, &nr, sizeof(nr)); 1355 if (ret < 0) 1356 goto out; 1357 1358 for (i = 0; i < nr; i++) { 1359 struct memory_node *n = &nodes[i]; 1360 1361 #define _W(v) \ 1362 ret = do_write(ff, &n->v, sizeof(n->v)); \ 1363 if (ret < 0) \ 1364 goto out; 1365 1366 _W(node) 1367 _W(size) 1368 1369 #undef _W 1370 1371 ret = do_write_bitmap(ff, n->set, n->size); 1372 if (ret < 0) 1373 goto out; 1374 } 1375 1376 out: 1377 return ret; 1378 } 1379 1380 static int write_compressed(struct feat_fd *ff __maybe_unused, 1381 struct evlist *evlist __maybe_unused) 1382 { 1383 int ret; 1384 1385 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver)); 1386 if (ret) 1387 return ret; 1388 1389 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type)); 1390 if (ret) 1391 return ret; 1392 1393 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level)); 1394 if (ret) 1395 return ret; 1396 1397 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio)); 1398 if (ret) 1399 return ret; 1400 1401 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len)); 1402 } 1403 1404 static void print_hostname(struct feat_fd *ff, FILE *fp) 1405 { 1406 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); 1407 } 1408 1409 static void print_osrelease(struct feat_fd *ff, FILE *fp) 1410 { 1411 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); 1412 } 1413 1414 static void print_arch(struct feat_fd *ff, FILE *fp) 1415 { 1416 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); 1417 } 1418 1419 static void print_cpudesc(struct feat_fd *ff, FILE *fp) 1420 { 1421 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); 1422 } 1423 1424 static void print_nrcpus(struct feat_fd *ff, FILE *fp) 1425 { 1426 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); 1427 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); 1428 } 1429 1430 static void print_version(struct feat_fd *ff, FILE *fp) 1431 { 1432 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); 1433 } 1434 1435 static void print_cmdline(struct feat_fd *ff, FILE *fp) 1436 { 1437 int nr, i; 1438 1439 nr = ff->ph->env.nr_cmdline; 1440 1441 fprintf(fp, "# cmdline : "); 1442 1443 for (i = 0; i < nr; i++) { 1444 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]); 1445 if (!argv_i) { 1446 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); 1447 } else { 1448 char *mem = argv_i; 1449 do { 1450 char *quote = strchr(argv_i, '\''); 1451 if (!quote) 1452 break; 1453 *quote++ = '\0'; 1454 fprintf(fp, "%s\\\'", argv_i); 1455 argv_i = quote; 1456 } while (1); 1457 fprintf(fp, "%s ", argv_i); 1458 free(mem); 1459 } 1460 } 1461 fputc('\n', fp); 1462 } 1463 1464 static void print_cpu_topology(struct feat_fd *ff, FILE *fp) 1465 { 1466 struct perf_header *ph = ff->ph; 1467 int cpu_nr = ph->env.nr_cpus_avail; 1468 int nr, i; 1469 char *str; 1470 1471 nr = ph->env.nr_sibling_cores; 1472 str = ph->env.sibling_cores; 1473 1474 for (i = 0; i < nr; i++) { 1475 fprintf(fp, "# sibling sockets : %s\n", str); 1476 str += strlen(str) + 1; 1477 } 1478 1479 if (ph->env.nr_sibling_dies) { 1480 nr = ph->env.nr_sibling_dies; 1481 str = ph->env.sibling_dies; 1482 1483 for (i = 0; i < nr; i++) { 1484 fprintf(fp, "# sibling dies : %s\n", str); 1485 str += strlen(str) + 1; 1486 } 1487 } 1488 1489 nr = ph->env.nr_sibling_threads; 1490 str = ph->env.sibling_threads; 1491 1492 for (i = 0; i < nr; i++) { 1493 fprintf(fp, "# sibling threads : %s\n", str); 1494 str += strlen(str) + 1; 1495 } 1496 1497 if (ph->env.nr_sibling_dies) { 1498 if (ph->env.cpu != NULL) { 1499 for (i = 0; i < cpu_nr; i++) 1500 fprintf(fp, "# CPU %d: Core ID %d, " 1501 "Die ID %d, Socket ID %d\n", 1502 i, ph->env.cpu[i].core_id, 1503 ph->env.cpu[i].die_id, 1504 ph->env.cpu[i].socket_id); 1505 } else 1506 fprintf(fp, "# Core ID, Die ID and Socket ID " 1507 "information is not available\n"); 1508 } else { 1509 if (ph->env.cpu != NULL) { 1510 for (i = 0; i < cpu_nr; i++) 1511 fprintf(fp, "# CPU %d: Core ID %d, " 1512 "Socket ID %d\n", 1513 i, ph->env.cpu[i].core_id, 1514 ph->env.cpu[i].socket_id); 1515 } else 1516 fprintf(fp, "# Core ID and Socket ID " 1517 "information is not available\n"); 1518 } 1519 } 1520 1521 static void print_clockid(struct feat_fd *ff, FILE *fp) 1522 { 1523 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n", 1524 ff->ph->env.clockid_res_ns * 1000); 1525 } 1526 1527 static void print_dir_format(struct feat_fd *ff, FILE *fp) 1528 { 1529 struct perf_session *session; 1530 struct perf_data *data; 1531 1532 session = container_of(ff->ph, struct perf_session, header); 1533 data = session->data; 1534 1535 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version); 1536 } 1537 1538 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp) 1539 { 1540 struct perf_env *env = &ff->ph->env; 1541 struct rb_root *root; 1542 struct rb_node *next; 1543 1544 down_read(&env->bpf_progs.lock); 1545 1546 root = &env->bpf_progs.infos; 1547 next = rb_first(root); 1548 1549 while (next) { 1550 struct bpf_prog_info_node *node; 1551 1552 node = rb_entry(next, struct bpf_prog_info_node, rb_node); 1553 next = rb_next(&node->rb_node); 1554 1555 bpf_event__print_bpf_prog_info(&node->info_linear->info, 1556 env, fp); 1557 } 1558 1559 up_read(&env->bpf_progs.lock); 1560 } 1561 1562 static void print_bpf_btf(struct feat_fd *ff, FILE *fp) 1563 { 1564 struct perf_env *env = &ff->ph->env; 1565 struct rb_root *root; 1566 struct rb_node *next; 1567 1568 down_read(&env->bpf_progs.lock); 1569 1570 root = &env->bpf_progs.btfs; 1571 next = rb_first(root); 1572 1573 while (next) { 1574 struct btf_node *node; 1575 1576 node = rb_entry(next, struct btf_node, rb_node); 1577 next = rb_next(&node->rb_node); 1578 fprintf(fp, "# btf info of id %u\n", node->id); 1579 } 1580 1581 up_read(&env->bpf_progs.lock); 1582 } 1583 1584 static void free_event_desc(struct evsel *events) 1585 { 1586 struct evsel *evsel; 1587 1588 if (!events) 1589 return; 1590 1591 for (evsel = events; evsel->core.attr.size; evsel++) { 1592 zfree(&evsel->name); 1593 zfree(&evsel->id); 1594 } 1595 1596 free(events); 1597 } 1598 1599 static struct evsel *read_event_desc(struct feat_fd *ff) 1600 { 1601 struct evsel *evsel, *events = NULL; 1602 u64 *id; 1603 void *buf = NULL; 1604 u32 nre, sz, nr, i, j; 1605 size_t msz; 1606 1607 /* number of events */ 1608 if (do_read_u32(ff, &nre)) 1609 goto error; 1610 1611 if (do_read_u32(ff, &sz)) 1612 goto error; 1613 1614 /* buffer to hold on file attr struct */ 1615 buf = malloc(sz); 1616 if (!buf) 1617 goto error; 1618 1619 /* the last event terminates with evsel->core.attr.size == 0: */ 1620 events = calloc(nre + 1, sizeof(*events)); 1621 if (!events) 1622 goto error; 1623 1624 msz = sizeof(evsel->core.attr); 1625 if (sz < msz) 1626 msz = sz; 1627 1628 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1629 evsel->idx = i; 1630 1631 /* 1632 * must read entire on-file attr struct to 1633 * sync up with layout. 1634 */ 1635 if (__do_read(ff, buf, sz)) 1636 goto error; 1637 1638 if (ff->ph->needs_swap) 1639 perf_event__attr_swap(buf); 1640 1641 memcpy(&evsel->core.attr, buf, msz); 1642 1643 if (do_read_u32(ff, &nr)) 1644 goto error; 1645 1646 if (ff->ph->needs_swap) 1647 evsel->needs_swap = true; 1648 1649 evsel->name = do_read_string(ff); 1650 if (!evsel->name) 1651 goto error; 1652 1653 if (!nr) 1654 continue; 1655 1656 id = calloc(nr, sizeof(*id)); 1657 if (!id) 1658 goto error; 1659 evsel->ids = nr; 1660 evsel->id = id; 1661 1662 for (j = 0 ; j < nr; j++) { 1663 if (do_read_u64(ff, id)) 1664 goto error; 1665 id++; 1666 } 1667 } 1668 out: 1669 free(buf); 1670 return events; 1671 error: 1672 free_event_desc(events); 1673 events = NULL; 1674 goto out; 1675 } 1676 1677 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1678 void *priv __maybe_unused) 1679 { 1680 return fprintf(fp, ", %s = %s", name, val); 1681 } 1682 1683 static void print_event_desc(struct feat_fd *ff, FILE *fp) 1684 { 1685 struct evsel *evsel, *events; 1686 u32 j; 1687 u64 *id; 1688 1689 if (ff->events) 1690 events = ff->events; 1691 else 1692 events = read_event_desc(ff); 1693 1694 if (!events) { 1695 fprintf(fp, "# event desc: not available or unable to read\n"); 1696 return; 1697 } 1698 1699 for (evsel = events; evsel->core.attr.size; evsel++) { 1700 fprintf(fp, "# event : name = %s, ", evsel->name); 1701 1702 if (evsel->ids) { 1703 fprintf(fp, ", id = {"); 1704 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1705 if (j) 1706 fputc(',', fp); 1707 fprintf(fp, " %"PRIu64, *id); 1708 } 1709 fprintf(fp, " }"); 1710 } 1711 1712 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL); 1713 1714 fputc('\n', fp); 1715 } 1716 1717 free_event_desc(events); 1718 ff->events = NULL; 1719 } 1720 1721 static void print_total_mem(struct feat_fd *ff, FILE *fp) 1722 { 1723 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); 1724 } 1725 1726 static void print_numa_topology(struct feat_fd *ff, FILE *fp) 1727 { 1728 int i; 1729 struct numa_node *n; 1730 1731 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { 1732 n = &ff->ph->env.numa_nodes[i]; 1733 1734 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1735 " free = %"PRIu64" kB\n", 1736 n->node, n->mem_total, n->mem_free); 1737 1738 fprintf(fp, "# node%u cpu list : ", n->node); 1739 cpu_map__fprintf(n->map, fp); 1740 } 1741 } 1742 1743 static void print_cpuid(struct feat_fd *ff, FILE *fp) 1744 { 1745 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); 1746 } 1747 1748 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) 1749 { 1750 fprintf(fp, "# contains samples with branch stack\n"); 1751 } 1752 1753 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) 1754 { 1755 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1756 } 1757 1758 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) 1759 { 1760 fprintf(fp, "# contains stat data\n"); 1761 } 1762 1763 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) 1764 { 1765 int i; 1766 1767 fprintf(fp, "# CPU cache info:\n"); 1768 for (i = 0; i < ff->ph->env.caches_cnt; i++) { 1769 fprintf(fp, "# "); 1770 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); 1771 } 1772 } 1773 1774 static void print_compressed(struct feat_fd *ff, FILE *fp) 1775 { 1776 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n", 1777 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown", 1778 ff->ph->env.comp_level, ff->ph->env.comp_ratio); 1779 } 1780 1781 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) 1782 { 1783 const char *delimiter = "# pmu mappings: "; 1784 char *str, *tmp; 1785 u32 pmu_num; 1786 u32 type; 1787 1788 pmu_num = ff->ph->env.nr_pmu_mappings; 1789 if (!pmu_num) { 1790 fprintf(fp, "# pmu mappings: not available\n"); 1791 return; 1792 } 1793 1794 str = ff->ph->env.pmu_mappings; 1795 1796 while (pmu_num) { 1797 type = strtoul(str, &tmp, 0); 1798 if (*tmp != ':') 1799 goto error; 1800 1801 str = tmp + 1; 1802 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1803 1804 delimiter = ", "; 1805 str += strlen(str) + 1; 1806 pmu_num--; 1807 } 1808 1809 fprintf(fp, "\n"); 1810 1811 if (!pmu_num) 1812 return; 1813 error: 1814 fprintf(fp, "# pmu mappings: unable to read\n"); 1815 } 1816 1817 static void print_group_desc(struct feat_fd *ff, FILE *fp) 1818 { 1819 struct perf_session *session; 1820 struct evsel *evsel; 1821 u32 nr = 0; 1822 1823 session = container_of(ff->ph, struct perf_session, header); 1824 1825 evlist__for_each_entry(session->evlist, evsel) { 1826 if (perf_evsel__is_group_leader(evsel) && 1827 evsel->core.nr_members > 1) { 1828 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1829 perf_evsel__name(evsel)); 1830 1831 nr = evsel->core.nr_members - 1; 1832 } else if (nr) { 1833 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1834 1835 if (--nr == 0) 1836 fprintf(fp, "}\n"); 1837 } 1838 } 1839 } 1840 1841 static void print_sample_time(struct feat_fd *ff, FILE *fp) 1842 { 1843 struct perf_session *session; 1844 char time_buf[32]; 1845 double d; 1846 1847 session = container_of(ff->ph, struct perf_session, header); 1848 1849 timestamp__scnprintf_usec(session->evlist->first_sample_time, 1850 time_buf, sizeof(time_buf)); 1851 fprintf(fp, "# time of first sample : %s\n", time_buf); 1852 1853 timestamp__scnprintf_usec(session->evlist->last_sample_time, 1854 time_buf, sizeof(time_buf)); 1855 fprintf(fp, "# time of last sample : %s\n", time_buf); 1856 1857 d = (double)(session->evlist->last_sample_time - 1858 session->evlist->first_sample_time) / NSEC_PER_MSEC; 1859 1860 fprintf(fp, "# sample duration : %10.3f ms\n", d); 1861 } 1862 1863 static void memory_node__fprintf(struct memory_node *n, 1864 unsigned long long bsize, FILE *fp) 1865 { 1866 char buf_map[100], buf_size[50]; 1867 unsigned long long size; 1868 1869 size = bsize * bitmap_weight(n->set, n->size); 1870 unit_number__scnprintf(buf_size, 50, size); 1871 1872 bitmap_scnprintf(n->set, n->size, buf_map, 100); 1873 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map); 1874 } 1875 1876 static void print_mem_topology(struct feat_fd *ff, FILE *fp) 1877 { 1878 struct memory_node *nodes; 1879 int i, nr; 1880 1881 nodes = ff->ph->env.memory_nodes; 1882 nr = ff->ph->env.nr_memory_nodes; 1883 1884 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n", 1885 nr, ff->ph->env.memory_bsize); 1886 1887 for (i = 0; i < nr; i++) { 1888 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp); 1889 } 1890 } 1891 1892 static int __event_process_build_id(struct perf_record_header_build_id *bev, 1893 char *filename, 1894 struct perf_session *session) 1895 { 1896 int err = -1; 1897 struct machine *machine; 1898 u16 cpumode; 1899 struct dso *dso; 1900 enum dso_kernel_type dso_type; 1901 1902 machine = perf_session__findnew_machine(session, bev->pid); 1903 if (!machine) 1904 goto out; 1905 1906 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1907 1908 switch (cpumode) { 1909 case PERF_RECORD_MISC_KERNEL: 1910 dso_type = DSO_TYPE_KERNEL; 1911 break; 1912 case PERF_RECORD_MISC_GUEST_KERNEL: 1913 dso_type = DSO_TYPE_GUEST_KERNEL; 1914 break; 1915 case PERF_RECORD_MISC_USER: 1916 case PERF_RECORD_MISC_GUEST_USER: 1917 dso_type = DSO_TYPE_USER; 1918 break; 1919 default: 1920 goto out; 1921 } 1922 1923 dso = machine__findnew_dso(machine, filename); 1924 if (dso != NULL) { 1925 char sbuild_id[SBUILD_ID_SIZE]; 1926 1927 dso__set_build_id(dso, &bev->build_id); 1928 1929 if (dso_type != DSO_TYPE_USER) { 1930 struct kmod_path m = { .name = NULL, }; 1931 1932 if (!kmod_path__parse_name(&m, filename) && m.kmod) 1933 dso__set_module_info(dso, &m, machine); 1934 else 1935 dso->kernel = dso_type; 1936 1937 free(m.name); 1938 } 1939 1940 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1941 sbuild_id); 1942 pr_debug("build id event received for %s: %s\n", 1943 dso->long_name, sbuild_id); 1944 dso__put(dso); 1945 } 1946 1947 err = 0; 1948 out: 1949 return err; 1950 } 1951 1952 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1953 int input, u64 offset, u64 size) 1954 { 1955 struct perf_session *session = container_of(header, struct perf_session, header); 1956 struct { 1957 struct perf_event_header header; 1958 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1959 char filename[0]; 1960 } old_bev; 1961 struct perf_record_header_build_id bev; 1962 char filename[PATH_MAX]; 1963 u64 limit = offset + size; 1964 1965 while (offset < limit) { 1966 ssize_t len; 1967 1968 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1969 return -1; 1970 1971 if (header->needs_swap) 1972 perf_event_header__bswap(&old_bev.header); 1973 1974 len = old_bev.header.size - sizeof(old_bev); 1975 if (readn(input, filename, len) != len) 1976 return -1; 1977 1978 bev.header = old_bev.header; 1979 1980 /* 1981 * As the pid is the missing value, we need to fill 1982 * it properly. The header.misc value give us nice hint. 1983 */ 1984 bev.pid = HOST_KERNEL_ID; 1985 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1986 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1987 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1988 1989 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1990 __event_process_build_id(&bev, filename, session); 1991 1992 offset += bev.header.size; 1993 } 1994 1995 return 0; 1996 } 1997 1998 static int perf_header__read_build_ids(struct perf_header *header, 1999 int input, u64 offset, u64 size) 2000 { 2001 struct perf_session *session = container_of(header, struct perf_session, header); 2002 struct perf_record_header_build_id bev; 2003 char filename[PATH_MAX]; 2004 u64 limit = offset + size, orig_offset = offset; 2005 int err = -1; 2006 2007 while (offset < limit) { 2008 ssize_t len; 2009 2010 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 2011 goto out; 2012 2013 if (header->needs_swap) 2014 perf_event_header__bswap(&bev.header); 2015 2016 len = bev.header.size - sizeof(bev); 2017 if (readn(input, filename, len) != len) 2018 goto out; 2019 /* 2020 * The a1645ce1 changeset: 2021 * 2022 * "perf: 'perf kvm' tool for monitoring guest performance from host" 2023 * 2024 * Added a field to struct perf_record_header_build_id that broke the file 2025 * format. 2026 * 2027 * Since the kernel build-id is the first entry, process the 2028 * table using the old format if the well known 2029 * '[kernel.kallsyms]' string for the kernel build-id has the 2030 * first 4 characters chopped off (where the pid_t sits). 2031 */ 2032 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 2033 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 2034 return -1; 2035 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 2036 } 2037 2038 __event_process_build_id(&bev, filename, session); 2039 2040 offset += bev.header.size; 2041 } 2042 err = 0; 2043 out: 2044 return err; 2045 } 2046 2047 /* Macro for features that simply need to read and store a string. */ 2048 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ 2049 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ 2050 {\ 2051 ff->ph->env.__feat_env = do_read_string(ff); \ 2052 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ 2053 } 2054 2055 FEAT_PROCESS_STR_FUN(hostname, hostname); 2056 FEAT_PROCESS_STR_FUN(osrelease, os_release); 2057 FEAT_PROCESS_STR_FUN(version, version); 2058 FEAT_PROCESS_STR_FUN(arch, arch); 2059 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc); 2060 FEAT_PROCESS_STR_FUN(cpuid, cpuid); 2061 2062 static int process_tracing_data(struct feat_fd *ff, void *data) 2063 { 2064 ssize_t ret = trace_report(ff->fd, data, false); 2065 2066 return ret < 0 ? -1 : 0; 2067 } 2068 2069 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) 2070 { 2071 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size)) 2072 pr_debug("Failed to read buildids, continuing...\n"); 2073 return 0; 2074 } 2075 2076 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) 2077 { 2078 int ret; 2079 u32 nr_cpus_avail, nr_cpus_online; 2080 2081 ret = do_read_u32(ff, &nr_cpus_avail); 2082 if (ret) 2083 return ret; 2084 2085 ret = do_read_u32(ff, &nr_cpus_online); 2086 if (ret) 2087 return ret; 2088 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; 2089 ff->ph->env.nr_cpus_online = (int)nr_cpus_online; 2090 return 0; 2091 } 2092 2093 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) 2094 { 2095 u64 total_mem; 2096 int ret; 2097 2098 ret = do_read_u64(ff, &total_mem); 2099 if (ret) 2100 return -1; 2101 ff->ph->env.total_mem = (unsigned long long)total_mem; 2102 return 0; 2103 } 2104 2105 static struct evsel * 2106 perf_evlist__find_by_index(struct evlist *evlist, int idx) 2107 { 2108 struct evsel *evsel; 2109 2110 evlist__for_each_entry(evlist, evsel) { 2111 if (evsel->idx == idx) 2112 return evsel; 2113 } 2114 2115 return NULL; 2116 } 2117 2118 static void 2119 perf_evlist__set_event_name(struct evlist *evlist, 2120 struct evsel *event) 2121 { 2122 struct evsel *evsel; 2123 2124 if (!event->name) 2125 return; 2126 2127 evsel = perf_evlist__find_by_index(evlist, event->idx); 2128 if (!evsel) 2129 return; 2130 2131 if (evsel->name) 2132 return; 2133 2134 evsel->name = strdup(event->name); 2135 } 2136 2137 static int 2138 process_event_desc(struct feat_fd *ff, void *data __maybe_unused) 2139 { 2140 struct perf_session *session; 2141 struct evsel *evsel, *events = read_event_desc(ff); 2142 2143 if (!events) 2144 return 0; 2145 2146 session = container_of(ff->ph, struct perf_session, header); 2147 2148 if (session->data->is_pipe) { 2149 /* Save events for reading later by print_event_desc, 2150 * since they can't be read again in pipe mode. */ 2151 ff->events = events; 2152 } 2153 2154 for (evsel = events; evsel->core.attr.size; evsel++) 2155 perf_evlist__set_event_name(session->evlist, evsel); 2156 2157 if (!session->data->is_pipe) 2158 free_event_desc(events); 2159 2160 return 0; 2161 } 2162 2163 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) 2164 { 2165 char *str, *cmdline = NULL, **argv = NULL; 2166 u32 nr, i, len = 0; 2167 2168 if (do_read_u32(ff, &nr)) 2169 return -1; 2170 2171 ff->ph->env.nr_cmdline = nr; 2172 2173 cmdline = zalloc(ff->size + nr + 1); 2174 if (!cmdline) 2175 return -1; 2176 2177 argv = zalloc(sizeof(char *) * (nr + 1)); 2178 if (!argv) 2179 goto error; 2180 2181 for (i = 0; i < nr; i++) { 2182 str = do_read_string(ff); 2183 if (!str) 2184 goto error; 2185 2186 argv[i] = cmdline + len; 2187 memcpy(argv[i], str, strlen(str) + 1); 2188 len += strlen(str) + 1; 2189 free(str); 2190 } 2191 ff->ph->env.cmdline = cmdline; 2192 ff->ph->env.cmdline_argv = (const char **) argv; 2193 return 0; 2194 2195 error: 2196 free(argv); 2197 free(cmdline); 2198 return -1; 2199 } 2200 2201 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) 2202 { 2203 u32 nr, i; 2204 char *str; 2205 struct strbuf sb; 2206 int cpu_nr = ff->ph->env.nr_cpus_avail; 2207 u64 size = 0; 2208 struct perf_header *ph = ff->ph; 2209 bool do_core_id_test = true; 2210 2211 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); 2212 if (!ph->env.cpu) 2213 return -1; 2214 2215 if (do_read_u32(ff, &nr)) 2216 goto free_cpu; 2217 2218 ph->env.nr_sibling_cores = nr; 2219 size += sizeof(u32); 2220 if (strbuf_init(&sb, 128) < 0) 2221 goto free_cpu; 2222 2223 for (i = 0; i < nr; i++) { 2224 str = do_read_string(ff); 2225 if (!str) 2226 goto error; 2227 2228 /* include a NULL character at the end */ 2229 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2230 goto error; 2231 size += string_size(str); 2232 free(str); 2233 } 2234 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 2235 2236 if (do_read_u32(ff, &nr)) 2237 return -1; 2238 2239 ph->env.nr_sibling_threads = nr; 2240 size += sizeof(u32); 2241 2242 for (i = 0; i < nr; i++) { 2243 str = do_read_string(ff); 2244 if (!str) 2245 goto error; 2246 2247 /* include a NULL character at the end */ 2248 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2249 goto error; 2250 size += string_size(str); 2251 free(str); 2252 } 2253 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 2254 2255 /* 2256 * The header may be from old perf, 2257 * which doesn't include core id and socket id information. 2258 */ 2259 if (ff->size <= size) { 2260 zfree(&ph->env.cpu); 2261 return 0; 2262 } 2263 2264 /* On s390 the socket_id number is not related to the numbers of cpus. 2265 * The socket_id number might be higher than the numbers of cpus. 2266 * This depends on the configuration. 2267 * AArch64 is the same. 2268 */ 2269 if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4) 2270 || !strncmp(ph->env.arch, "aarch64", 7))) 2271 do_core_id_test = false; 2272 2273 for (i = 0; i < (u32)cpu_nr; i++) { 2274 if (do_read_u32(ff, &nr)) 2275 goto free_cpu; 2276 2277 ph->env.cpu[i].core_id = nr; 2278 size += sizeof(u32); 2279 2280 if (do_read_u32(ff, &nr)) 2281 goto free_cpu; 2282 2283 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) { 2284 pr_debug("socket_id number is too big." 2285 "You may need to upgrade the perf tool.\n"); 2286 goto free_cpu; 2287 } 2288 2289 ph->env.cpu[i].socket_id = nr; 2290 size += sizeof(u32); 2291 } 2292 2293 /* 2294 * The header may be from old perf, 2295 * which doesn't include die information. 2296 */ 2297 if (ff->size <= size) 2298 return 0; 2299 2300 if (do_read_u32(ff, &nr)) 2301 return -1; 2302 2303 ph->env.nr_sibling_dies = nr; 2304 size += sizeof(u32); 2305 2306 for (i = 0; i < nr; i++) { 2307 str = do_read_string(ff); 2308 if (!str) 2309 goto error; 2310 2311 /* include a NULL character at the end */ 2312 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2313 goto error; 2314 size += string_size(str); 2315 free(str); 2316 } 2317 ph->env.sibling_dies = strbuf_detach(&sb, NULL); 2318 2319 for (i = 0; i < (u32)cpu_nr; i++) { 2320 if (do_read_u32(ff, &nr)) 2321 goto free_cpu; 2322 2323 ph->env.cpu[i].die_id = nr; 2324 } 2325 2326 return 0; 2327 2328 error: 2329 strbuf_release(&sb); 2330 free_cpu: 2331 zfree(&ph->env.cpu); 2332 return -1; 2333 } 2334 2335 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) 2336 { 2337 struct numa_node *nodes, *n; 2338 u32 nr, i; 2339 char *str; 2340 2341 /* nr nodes */ 2342 if (do_read_u32(ff, &nr)) 2343 return -1; 2344 2345 nodes = zalloc(sizeof(*nodes) * nr); 2346 if (!nodes) 2347 return -ENOMEM; 2348 2349 for (i = 0; i < nr; i++) { 2350 n = &nodes[i]; 2351 2352 /* node number */ 2353 if (do_read_u32(ff, &n->node)) 2354 goto error; 2355 2356 if (do_read_u64(ff, &n->mem_total)) 2357 goto error; 2358 2359 if (do_read_u64(ff, &n->mem_free)) 2360 goto error; 2361 2362 str = do_read_string(ff); 2363 if (!str) 2364 goto error; 2365 2366 n->map = perf_cpu_map__new(str); 2367 if (!n->map) 2368 goto error; 2369 2370 free(str); 2371 } 2372 ff->ph->env.nr_numa_nodes = nr; 2373 ff->ph->env.numa_nodes = nodes; 2374 return 0; 2375 2376 error: 2377 free(nodes); 2378 return -1; 2379 } 2380 2381 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) 2382 { 2383 char *name; 2384 u32 pmu_num; 2385 u32 type; 2386 struct strbuf sb; 2387 2388 if (do_read_u32(ff, &pmu_num)) 2389 return -1; 2390 2391 if (!pmu_num) { 2392 pr_debug("pmu mappings not available\n"); 2393 return 0; 2394 } 2395 2396 ff->ph->env.nr_pmu_mappings = pmu_num; 2397 if (strbuf_init(&sb, 128) < 0) 2398 return -1; 2399 2400 while (pmu_num) { 2401 if (do_read_u32(ff, &type)) 2402 goto error; 2403 2404 name = do_read_string(ff); 2405 if (!name) 2406 goto error; 2407 2408 if (strbuf_addf(&sb, "%u:%s", type, name) < 0) 2409 goto error; 2410 /* include a NULL character at the end */ 2411 if (strbuf_add(&sb, "", 1) < 0) 2412 goto error; 2413 2414 if (!strcmp(name, "msr")) 2415 ff->ph->env.msr_pmu_type = type; 2416 2417 free(name); 2418 pmu_num--; 2419 } 2420 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 2421 return 0; 2422 2423 error: 2424 strbuf_release(&sb); 2425 return -1; 2426 } 2427 2428 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) 2429 { 2430 size_t ret = -1; 2431 u32 i, nr, nr_groups; 2432 struct perf_session *session; 2433 struct evsel *evsel, *leader = NULL; 2434 struct group_desc { 2435 char *name; 2436 u32 leader_idx; 2437 u32 nr_members; 2438 } *desc; 2439 2440 if (do_read_u32(ff, &nr_groups)) 2441 return -1; 2442 2443 ff->ph->env.nr_groups = nr_groups; 2444 if (!nr_groups) { 2445 pr_debug("group desc not available\n"); 2446 return 0; 2447 } 2448 2449 desc = calloc(nr_groups, sizeof(*desc)); 2450 if (!desc) 2451 return -1; 2452 2453 for (i = 0; i < nr_groups; i++) { 2454 desc[i].name = do_read_string(ff); 2455 if (!desc[i].name) 2456 goto out_free; 2457 2458 if (do_read_u32(ff, &desc[i].leader_idx)) 2459 goto out_free; 2460 2461 if (do_read_u32(ff, &desc[i].nr_members)) 2462 goto out_free; 2463 } 2464 2465 /* 2466 * Rebuild group relationship based on the group_desc 2467 */ 2468 session = container_of(ff->ph, struct perf_session, header); 2469 session->evlist->nr_groups = nr_groups; 2470 2471 i = nr = 0; 2472 evlist__for_each_entry(session->evlist, evsel) { 2473 if (evsel->idx == (int) desc[i].leader_idx) { 2474 evsel->leader = evsel; 2475 /* {anon_group} is a dummy name */ 2476 if (strcmp(desc[i].name, "{anon_group}")) { 2477 evsel->group_name = desc[i].name; 2478 desc[i].name = NULL; 2479 } 2480 evsel->core.nr_members = desc[i].nr_members; 2481 2482 if (i >= nr_groups || nr > 0) { 2483 pr_debug("invalid group desc\n"); 2484 goto out_free; 2485 } 2486 2487 leader = evsel; 2488 nr = evsel->core.nr_members - 1; 2489 i++; 2490 } else if (nr) { 2491 /* This is a group member */ 2492 evsel->leader = leader; 2493 2494 nr--; 2495 } 2496 } 2497 2498 if (i != nr_groups || nr != 0) { 2499 pr_debug("invalid group desc\n"); 2500 goto out_free; 2501 } 2502 2503 ret = 0; 2504 out_free: 2505 for (i = 0; i < nr_groups; i++) 2506 zfree(&desc[i].name); 2507 free(desc); 2508 2509 return ret; 2510 } 2511 2512 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) 2513 { 2514 struct perf_session *session; 2515 int err; 2516 2517 session = container_of(ff->ph, struct perf_session, header); 2518 2519 err = auxtrace_index__process(ff->fd, ff->size, session, 2520 ff->ph->needs_swap); 2521 if (err < 0) 2522 pr_err("Failed to process auxtrace index\n"); 2523 return err; 2524 } 2525 2526 static int process_cache(struct feat_fd *ff, void *data __maybe_unused) 2527 { 2528 struct cpu_cache_level *caches; 2529 u32 cnt, i, version; 2530 2531 if (do_read_u32(ff, &version)) 2532 return -1; 2533 2534 if (version != 1) 2535 return -1; 2536 2537 if (do_read_u32(ff, &cnt)) 2538 return -1; 2539 2540 caches = zalloc(sizeof(*caches) * cnt); 2541 if (!caches) 2542 return -1; 2543 2544 for (i = 0; i < cnt; i++) { 2545 struct cpu_cache_level c; 2546 2547 #define _R(v) \ 2548 if (do_read_u32(ff, &c.v))\ 2549 goto out_free_caches; \ 2550 2551 _R(level) 2552 _R(line_size) 2553 _R(sets) 2554 _R(ways) 2555 #undef _R 2556 2557 #define _R(v) \ 2558 c.v = do_read_string(ff); \ 2559 if (!c.v) \ 2560 goto out_free_caches; 2561 2562 _R(type) 2563 _R(size) 2564 _R(map) 2565 #undef _R 2566 2567 caches[i] = c; 2568 } 2569 2570 ff->ph->env.caches = caches; 2571 ff->ph->env.caches_cnt = cnt; 2572 return 0; 2573 out_free_caches: 2574 free(caches); 2575 return -1; 2576 } 2577 2578 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused) 2579 { 2580 struct perf_session *session; 2581 u64 first_sample_time, last_sample_time; 2582 int ret; 2583 2584 session = container_of(ff->ph, struct perf_session, header); 2585 2586 ret = do_read_u64(ff, &first_sample_time); 2587 if (ret) 2588 return -1; 2589 2590 ret = do_read_u64(ff, &last_sample_time); 2591 if (ret) 2592 return -1; 2593 2594 session->evlist->first_sample_time = first_sample_time; 2595 session->evlist->last_sample_time = last_sample_time; 2596 return 0; 2597 } 2598 2599 static int process_mem_topology(struct feat_fd *ff, 2600 void *data __maybe_unused) 2601 { 2602 struct memory_node *nodes; 2603 u64 version, i, nr, bsize; 2604 int ret = -1; 2605 2606 if (do_read_u64(ff, &version)) 2607 return -1; 2608 2609 if (version != 1) 2610 return -1; 2611 2612 if (do_read_u64(ff, &bsize)) 2613 return -1; 2614 2615 if (do_read_u64(ff, &nr)) 2616 return -1; 2617 2618 nodes = zalloc(sizeof(*nodes) * nr); 2619 if (!nodes) 2620 return -1; 2621 2622 for (i = 0; i < nr; i++) { 2623 struct memory_node n; 2624 2625 #define _R(v) \ 2626 if (do_read_u64(ff, &n.v)) \ 2627 goto out; \ 2628 2629 _R(node) 2630 _R(size) 2631 2632 #undef _R 2633 2634 if (do_read_bitmap(ff, &n.set, &n.size)) 2635 goto out; 2636 2637 nodes[i] = n; 2638 } 2639 2640 ff->ph->env.memory_bsize = bsize; 2641 ff->ph->env.memory_nodes = nodes; 2642 ff->ph->env.nr_memory_nodes = nr; 2643 ret = 0; 2644 2645 out: 2646 if (ret) 2647 free(nodes); 2648 return ret; 2649 } 2650 2651 static int process_clockid(struct feat_fd *ff, 2652 void *data __maybe_unused) 2653 { 2654 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns)) 2655 return -1; 2656 2657 return 0; 2658 } 2659 2660 static int process_dir_format(struct feat_fd *ff, 2661 void *_data __maybe_unused) 2662 { 2663 struct perf_session *session; 2664 struct perf_data *data; 2665 2666 session = container_of(ff->ph, struct perf_session, header); 2667 data = session->data; 2668 2669 if (WARN_ON(!perf_data__is_dir(data))) 2670 return -1; 2671 2672 return do_read_u64(ff, &data->dir.version); 2673 } 2674 2675 #ifdef HAVE_LIBBPF_SUPPORT 2676 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) 2677 { 2678 struct bpf_prog_info_linear *info_linear; 2679 struct bpf_prog_info_node *info_node; 2680 struct perf_env *env = &ff->ph->env; 2681 u32 count, i; 2682 int err = -1; 2683 2684 if (ff->ph->needs_swap) { 2685 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n"); 2686 return 0; 2687 } 2688 2689 if (do_read_u32(ff, &count)) 2690 return -1; 2691 2692 down_write(&env->bpf_progs.lock); 2693 2694 for (i = 0; i < count; ++i) { 2695 u32 info_len, data_len; 2696 2697 info_linear = NULL; 2698 info_node = NULL; 2699 if (do_read_u32(ff, &info_len)) 2700 goto out; 2701 if (do_read_u32(ff, &data_len)) 2702 goto out; 2703 2704 if (info_len > sizeof(struct bpf_prog_info)) { 2705 pr_warning("detected invalid bpf_prog_info\n"); 2706 goto out; 2707 } 2708 2709 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + 2710 data_len); 2711 if (!info_linear) 2712 goto out; 2713 info_linear->info_len = sizeof(struct bpf_prog_info); 2714 info_linear->data_len = data_len; 2715 if (do_read_u64(ff, (u64 *)(&info_linear->arrays))) 2716 goto out; 2717 if (__do_read(ff, &info_linear->info, info_len)) 2718 goto out; 2719 if (info_len < sizeof(struct bpf_prog_info)) 2720 memset(((void *)(&info_linear->info)) + info_len, 0, 2721 sizeof(struct bpf_prog_info) - info_len); 2722 2723 if (__do_read(ff, info_linear->data, data_len)) 2724 goto out; 2725 2726 info_node = malloc(sizeof(struct bpf_prog_info_node)); 2727 if (!info_node) 2728 goto out; 2729 2730 /* after reading from file, translate offset to address */ 2731 bpf_program__bpil_offs_to_addr(info_linear); 2732 info_node->info_linear = info_linear; 2733 perf_env__insert_bpf_prog_info(env, info_node); 2734 } 2735 2736 up_write(&env->bpf_progs.lock); 2737 return 0; 2738 out: 2739 free(info_linear); 2740 free(info_node); 2741 up_write(&env->bpf_progs.lock); 2742 return err; 2743 } 2744 #else // HAVE_LIBBPF_SUPPORT 2745 static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused) 2746 { 2747 return 0; 2748 } 2749 #endif // HAVE_LIBBPF_SUPPORT 2750 2751 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) 2752 { 2753 struct perf_env *env = &ff->ph->env; 2754 struct btf_node *node = NULL; 2755 u32 count, i; 2756 int err = -1; 2757 2758 if (ff->ph->needs_swap) { 2759 pr_warning("interpreting btf from systems with endianity is not yet supported\n"); 2760 return 0; 2761 } 2762 2763 if (do_read_u32(ff, &count)) 2764 return -1; 2765 2766 down_write(&env->bpf_progs.lock); 2767 2768 for (i = 0; i < count; ++i) { 2769 u32 id, data_size; 2770 2771 if (do_read_u32(ff, &id)) 2772 goto out; 2773 if (do_read_u32(ff, &data_size)) 2774 goto out; 2775 2776 node = malloc(sizeof(struct btf_node) + data_size); 2777 if (!node) 2778 goto out; 2779 2780 node->id = id; 2781 node->data_size = data_size; 2782 2783 if (__do_read(ff, node->data, data_size)) 2784 goto out; 2785 2786 perf_env__insert_btf(env, node); 2787 node = NULL; 2788 } 2789 2790 err = 0; 2791 out: 2792 up_write(&env->bpf_progs.lock); 2793 free(node); 2794 return err; 2795 } 2796 2797 static int process_compressed(struct feat_fd *ff, 2798 void *data __maybe_unused) 2799 { 2800 if (do_read_u32(ff, &(ff->ph->env.comp_ver))) 2801 return -1; 2802 2803 if (do_read_u32(ff, &(ff->ph->env.comp_type))) 2804 return -1; 2805 2806 if (do_read_u32(ff, &(ff->ph->env.comp_level))) 2807 return -1; 2808 2809 if (do_read_u32(ff, &(ff->ph->env.comp_ratio))) 2810 return -1; 2811 2812 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len))) 2813 return -1; 2814 2815 return 0; 2816 } 2817 2818 #define FEAT_OPR(n, func, __full_only) \ 2819 [HEADER_##n] = { \ 2820 .name = __stringify(n), \ 2821 .write = write_##func, \ 2822 .print = print_##func, \ 2823 .full_only = __full_only, \ 2824 .process = process_##func, \ 2825 .synthesize = true \ 2826 } 2827 2828 #define FEAT_OPN(n, func, __full_only) \ 2829 [HEADER_##n] = { \ 2830 .name = __stringify(n), \ 2831 .write = write_##func, \ 2832 .print = print_##func, \ 2833 .full_only = __full_only, \ 2834 .process = process_##func \ 2835 } 2836 2837 /* feature_ops not implemented: */ 2838 #define print_tracing_data NULL 2839 #define print_build_id NULL 2840 2841 #define process_branch_stack NULL 2842 #define process_stat NULL 2843 2844 // Only used in util/synthetic-events.c 2845 const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE]; 2846 2847 const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2848 FEAT_OPN(TRACING_DATA, tracing_data, false), 2849 FEAT_OPN(BUILD_ID, build_id, false), 2850 FEAT_OPR(HOSTNAME, hostname, false), 2851 FEAT_OPR(OSRELEASE, osrelease, false), 2852 FEAT_OPR(VERSION, version, false), 2853 FEAT_OPR(ARCH, arch, false), 2854 FEAT_OPR(NRCPUS, nrcpus, false), 2855 FEAT_OPR(CPUDESC, cpudesc, false), 2856 FEAT_OPR(CPUID, cpuid, false), 2857 FEAT_OPR(TOTAL_MEM, total_mem, false), 2858 FEAT_OPR(EVENT_DESC, event_desc, false), 2859 FEAT_OPR(CMDLINE, cmdline, false), 2860 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true), 2861 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), 2862 FEAT_OPN(BRANCH_STACK, branch_stack, false), 2863 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), 2864 FEAT_OPR(GROUP_DESC, group_desc, false), 2865 FEAT_OPN(AUXTRACE, auxtrace, false), 2866 FEAT_OPN(STAT, stat, false), 2867 FEAT_OPN(CACHE, cache, true), 2868 FEAT_OPR(SAMPLE_TIME, sample_time, false), 2869 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), 2870 FEAT_OPR(CLOCKID, clockid, false), 2871 FEAT_OPN(DIR_FORMAT, dir_format, false), 2872 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false), 2873 FEAT_OPR(BPF_BTF, bpf_btf, false), 2874 FEAT_OPR(COMPRESSED, compressed, false), 2875 }; 2876 2877 struct header_print_data { 2878 FILE *fp; 2879 bool full; /* extended list of headers */ 2880 }; 2881 2882 static int perf_file_section__fprintf_info(struct perf_file_section *section, 2883 struct perf_header *ph, 2884 int feat, int fd, void *data) 2885 { 2886 struct header_print_data *hd = data; 2887 struct feat_fd ff; 2888 2889 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2890 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2891 "%d, continuing...\n", section->offset, feat); 2892 return 0; 2893 } 2894 if (feat >= HEADER_LAST_FEATURE) { 2895 pr_warning("unknown feature %d\n", feat); 2896 return 0; 2897 } 2898 if (!feat_ops[feat].print) 2899 return 0; 2900 2901 ff = (struct feat_fd) { 2902 .fd = fd, 2903 .ph = ph, 2904 }; 2905 2906 if (!feat_ops[feat].full_only || hd->full) 2907 feat_ops[feat].print(&ff, hd->fp); 2908 else 2909 fprintf(hd->fp, "# %s info available, use -I to display\n", 2910 feat_ops[feat].name); 2911 2912 return 0; 2913 } 2914 2915 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 2916 { 2917 struct header_print_data hd; 2918 struct perf_header *header = &session->header; 2919 int fd = perf_data__fd(session->data); 2920 struct stat st; 2921 time_t stctime; 2922 int ret, bit; 2923 2924 hd.fp = fp; 2925 hd.full = full; 2926 2927 ret = fstat(fd, &st); 2928 if (ret == -1) 2929 return -1; 2930 2931 stctime = st.st_ctime; 2932 fprintf(fp, "# captured on : %s", ctime(&stctime)); 2933 2934 fprintf(fp, "# header version : %u\n", header->version); 2935 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset); 2936 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size); 2937 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset); 2938 2939 perf_header__process_sections(header, fd, &hd, 2940 perf_file_section__fprintf_info); 2941 2942 if (session->data->is_pipe) 2943 return 0; 2944 2945 fprintf(fp, "# missing features: "); 2946 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { 2947 if (bit) 2948 fprintf(fp, "%s ", feat_ops[bit].name); 2949 } 2950 2951 fprintf(fp, "\n"); 2952 return 0; 2953 } 2954 2955 static int do_write_feat(struct feat_fd *ff, int type, 2956 struct perf_file_section **p, 2957 struct evlist *evlist) 2958 { 2959 int err; 2960 int ret = 0; 2961 2962 if (perf_header__has_feat(ff->ph, type)) { 2963 if (!feat_ops[type].write) 2964 return -1; 2965 2966 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 2967 return -1; 2968 2969 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); 2970 2971 err = feat_ops[type].write(ff, evlist); 2972 if (err < 0) { 2973 pr_debug("failed to write feature %s\n", feat_ops[type].name); 2974 2975 /* undo anything written */ 2976 lseek(ff->fd, (*p)->offset, SEEK_SET); 2977 2978 return -1; 2979 } 2980 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; 2981 (*p)++; 2982 } 2983 return ret; 2984 } 2985 2986 static int perf_header__adds_write(struct perf_header *header, 2987 struct evlist *evlist, int fd) 2988 { 2989 int nr_sections; 2990 struct feat_fd ff; 2991 struct perf_file_section *feat_sec, *p; 2992 int sec_size; 2993 u64 sec_start; 2994 int feat; 2995 int err; 2996 2997 ff = (struct feat_fd){ 2998 .fd = fd, 2999 .ph = header, 3000 }; 3001 3002 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 3003 if (!nr_sections) 3004 return 0; 3005 3006 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 3007 if (feat_sec == NULL) 3008 return -ENOMEM; 3009 3010 sec_size = sizeof(*feat_sec) * nr_sections; 3011 3012 sec_start = header->feat_offset; 3013 lseek(fd, sec_start + sec_size, SEEK_SET); 3014 3015 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3016 if (do_write_feat(&ff, feat, &p, evlist)) 3017 perf_header__clear_feat(header, feat); 3018 } 3019 3020 lseek(fd, sec_start, SEEK_SET); 3021 /* 3022 * may write more than needed due to dropped feature, but 3023 * this is okay, reader will skip the missing entries 3024 */ 3025 err = do_write(&ff, feat_sec, sec_size); 3026 if (err < 0) 3027 pr_debug("failed to write feature section\n"); 3028 free(feat_sec); 3029 return err; 3030 } 3031 3032 int perf_header__write_pipe(int fd) 3033 { 3034 struct perf_pipe_file_header f_header; 3035 struct feat_fd ff; 3036 int err; 3037 3038 ff = (struct feat_fd){ .fd = fd }; 3039 3040 f_header = (struct perf_pipe_file_header){ 3041 .magic = PERF_MAGIC, 3042 .size = sizeof(f_header), 3043 }; 3044 3045 err = do_write(&ff, &f_header, sizeof(f_header)); 3046 if (err < 0) { 3047 pr_debug("failed to write perf pipe header\n"); 3048 return err; 3049 } 3050 3051 return 0; 3052 } 3053 3054 int perf_session__write_header(struct perf_session *session, 3055 struct evlist *evlist, 3056 int fd, bool at_exit) 3057 { 3058 struct perf_file_header f_header; 3059 struct perf_file_attr f_attr; 3060 struct perf_header *header = &session->header; 3061 struct evsel *evsel; 3062 struct feat_fd ff; 3063 u64 attr_offset; 3064 int err; 3065 3066 ff = (struct feat_fd){ .fd = fd}; 3067 lseek(fd, sizeof(f_header), SEEK_SET); 3068 3069 evlist__for_each_entry(session->evlist, evsel) { 3070 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 3071 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); 3072 if (err < 0) { 3073 pr_debug("failed to write perf header\n"); 3074 return err; 3075 } 3076 } 3077 3078 attr_offset = lseek(ff.fd, 0, SEEK_CUR); 3079 3080 evlist__for_each_entry(evlist, evsel) { 3081 f_attr = (struct perf_file_attr){ 3082 .attr = evsel->core.attr, 3083 .ids = { 3084 .offset = evsel->id_offset, 3085 .size = evsel->ids * sizeof(u64), 3086 } 3087 }; 3088 err = do_write(&ff, &f_attr, sizeof(f_attr)); 3089 if (err < 0) { 3090 pr_debug("failed to write perf header attribute\n"); 3091 return err; 3092 } 3093 } 3094 3095 if (!header->data_offset) 3096 header->data_offset = lseek(fd, 0, SEEK_CUR); 3097 header->feat_offset = header->data_offset + header->data_size; 3098 3099 if (at_exit) { 3100 err = perf_header__adds_write(header, evlist, fd); 3101 if (err < 0) 3102 return err; 3103 } 3104 3105 f_header = (struct perf_file_header){ 3106 .magic = PERF_MAGIC, 3107 .size = sizeof(f_header), 3108 .attr_size = sizeof(f_attr), 3109 .attrs = { 3110 .offset = attr_offset, 3111 .size = evlist->core.nr_entries * sizeof(f_attr), 3112 }, 3113 .data = { 3114 .offset = header->data_offset, 3115 .size = header->data_size, 3116 }, 3117 /* event_types is ignored, store zeros */ 3118 }; 3119 3120 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 3121 3122 lseek(fd, 0, SEEK_SET); 3123 err = do_write(&ff, &f_header, sizeof(f_header)); 3124 if (err < 0) { 3125 pr_debug("failed to write perf header\n"); 3126 return err; 3127 } 3128 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 3129 3130 return 0; 3131 } 3132 3133 static int perf_header__getbuffer64(struct perf_header *header, 3134 int fd, void *buf, size_t size) 3135 { 3136 if (readn(fd, buf, size) <= 0) 3137 return -1; 3138 3139 if (header->needs_swap) 3140 mem_bswap_64(buf, size); 3141 3142 return 0; 3143 } 3144 3145 int perf_header__process_sections(struct perf_header *header, int fd, 3146 void *data, 3147 int (*process)(struct perf_file_section *section, 3148 struct perf_header *ph, 3149 int feat, int fd, void *data)) 3150 { 3151 struct perf_file_section *feat_sec, *sec; 3152 int nr_sections; 3153 int sec_size; 3154 int feat; 3155 int err; 3156 3157 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 3158 if (!nr_sections) 3159 return 0; 3160 3161 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 3162 if (!feat_sec) 3163 return -1; 3164 3165 sec_size = sizeof(*feat_sec) * nr_sections; 3166 3167 lseek(fd, header->feat_offset, SEEK_SET); 3168 3169 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 3170 if (err < 0) 3171 goto out_free; 3172 3173 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 3174 err = process(sec++, header, feat, fd, data); 3175 if (err < 0) 3176 goto out_free; 3177 } 3178 err = 0; 3179 out_free: 3180 free(feat_sec); 3181 return err; 3182 } 3183 3184 static const int attr_file_abi_sizes[] = { 3185 [0] = PERF_ATTR_SIZE_VER0, 3186 [1] = PERF_ATTR_SIZE_VER1, 3187 [2] = PERF_ATTR_SIZE_VER2, 3188 [3] = PERF_ATTR_SIZE_VER3, 3189 [4] = PERF_ATTR_SIZE_VER4, 3190 0, 3191 }; 3192 3193 /* 3194 * In the legacy file format, the magic number is not used to encode endianness. 3195 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 3196 * on ABI revisions, we need to try all combinations for all endianness to 3197 * detect the endianness. 3198 */ 3199 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 3200 { 3201 uint64_t ref_size, attr_size; 3202 int i; 3203 3204 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 3205 ref_size = attr_file_abi_sizes[i] 3206 + sizeof(struct perf_file_section); 3207 if (hdr_sz != ref_size) { 3208 attr_size = bswap_64(hdr_sz); 3209 if (attr_size != ref_size) 3210 continue; 3211 3212 ph->needs_swap = true; 3213 } 3214 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 3215 i, 3216 ph->needs_swap); 3217 return 0; 3218 } 3219 /* could not determine endianness */ 3220 return -1; 3221 } 3222 3223 #define PERF_PIPE_HDR_VER0 16 3224 3225 static const size_t attr_pipe_abi_sizes[] = { 3226 [0] = PERF_PIPE_HDR_VER0, 3227 0, 3228 }; 3229 3230 /* 3231 * In the legacy pipe format, there is an implicit assumption that endiannesss 3232 * between host recording the samples, and host parsing the samples is the 3233 * same. This is not always the case given that the pipe output may always be 3234 * redirected into a file and analyzed on a different machine with possibly a 3235 * different endianness and perf_event ABI revsions in the perf tool itself. 3236 */ 3237 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 3238 { 3239 u64 attr_size; 3240 int i; 3241 3242 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 3243 if (hdr_sz != attr_pipe_abi_sizes[i]) { 3244 attr_size = bswap_64(hdr_sz); 3245 if (attr_size != hdr_sz) 3246 continue; 3247 3248 ph->needs_swap = true; 3249 } 3250 pr_debug("Pipe ABI%d perf.data file detected\n", i); 3251 return 0; 3252 } 3253 return -1; 3254 } 3255 3256 bool is_perf_magic(u64 magic) 3257 { 3258 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 3259 || magic == __perf_magic2 3260 || magic == __perf_magic2_sw) 3261 return true; 3262 3263 return false; 3264 } 3265 3266 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 3267 bool is_pipe, struct perf_header *ph) 3268 { 3269 int ret; 3270 3271 /* check for legacy format */ 3272 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 3273 if (ret == 0) { 3274 ph->version = PERF_HEADER_VERSION_1; 3275 pr_debug("legacy perf.data format\n"); 3276 if (is_pipe) 3277 return try_all_pipe_abis(hdr_sz, ph); 3278 3279 return try_all_file_abis(hdr_sz, ph); 3280 } 3281 /* 3282 * the new magic number serves two purposes: 3283 * - unique number to identify actual perf.data files 3284 * - encode endianness of file 3285 */ 3286 ph->version = PERF_HEADER_VERSION_2; 3287 3288 /* check magic number with one endianness */ 3289 if (magic == __perf_magic2) 3290 return 0; 3291 3292 /* check magic number with opposite endianness */ 3293 if (magic != __perf_magic2_sw) 3294 return -1; 3295 3296 ph->needs_swap = true; 3297 3298 return 0; 3299 } 3300 3301 int perf_file_header__read(struct perf_file_header *header, 3302 struct perf_header *ph, int fd) 3303 { 3304 ssize_t ret; 3305 3306 lseek(fd, 0, SEEK_SET); 3307 3308 ret = readn(fd, header, sizeof(*header)); 3309 if (ret <= 0) 3310 return -1; 3311 3312 if (check_magic_endian(header->magic, 3313 header->attr_size, false, ph) < 0) { 3314 pr_debug("magic/endian check failed\n"); 3315 return -1; 3316 } 3317 3318 if (ph->needs_swap) { 3319 mem_bswap_64(header, offsetof(struct perf_file_header, 3320 adds_features)); 3321 } 3322 3323 if (header->size != sizeof(*header)) { 3324 /* Support the previous format */ 3325 if (header->size == offsetof(typeof(*header), adds_features)) 3326 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3327 else 3328 return -1; 3329 } else if (ph->needs_swap) { 3330 /* 3331 * feature bitmap is declared as an array of unsigned longs -- 3332 * not good since its size can differ between the host that 3333 * generated the data file and the host analyzing the file. 3334 * 3335 * We need to handle endianness, but we don't know the size of 3336 * the unsigned long where the file was generated. Take a best 3337 * guess at determining it: try 64-bit swap first (ie., file 3338 * created on a 64-bit host), and check if the hostname feature 3339 * bit is set (this feature bit is forced on as of fbe96f2). 3340 * If the bit is not, undo the 64-bit swap and try a 32-bit 3341 * swap. If the hostname bit is still not set (e.g., older data 3342 * file), punt and fallback to the original behavior -- 3343 * clearing all feature bits and setting buildid. 3344 */ 3345 mem_bswap_64(&header->adds_features, 3346 BITS_TO_U64(HEADER_FEAT_BITS)); 3347 3348 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3349 /* unswap as u64 */ 3350 mem_bswap_64(&header->adds_features, 3351 BITS_TO_U64(HEADER_FEAT_BITS)); 3352 3353 /* unswap as u32 */ 3354 mem_bswap_32(&header->adds_features, 3355 BITS_TO_U32(HEADER_FEAT_BITS)); 3356 } 3357 3358 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3359 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3360 set_bit(HEADER_BUILD_ID, header->adds_features); 3361 } 3362 } 3363 3364 memcpy(&ph->adds_features, &header->adds_features, 3365 sizeof(ph->adds_features)); 3366 3367 ph->data_offset = header->data.offset; 3368 ph->data_size = header->data.size; 3369 ph->feat_offset = header->data.offset + header->data.size; 3370 return 0; 3371 } 3372 3373 static int perf_file_section__process(struct perf_file_section *section, 3374 struct perf_header *ph, 3375 int feat, int fd, void *data) 3376 { 3377 struct feat_fd fdd = { 3378 .fd = fd, 3379 .ph = ph, 3380 .size = section->size, 3381 .offset = section->offset, 3382 }; 3383 3384 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 3385 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 3386 "%d, continuing...\n", section->offset, feat); 3387 return 0; 3388 } 3389 3390 if (feat >= HEADER_LAST_FEATURE) { 3391 pr_debug("unknown feature %d, continuing...\n", feat); 3392 return 0; 3393 } 3394 3395 if (!feat_ops[feat].process) 3396 return 0; 3397 3398 return feat_ops[feat].process(&fdd, data); 3399 } 3400 3401 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 3402 struct perf_header *ph, int fd, 3403 bool repipe) 3404 { 3405 struct feat_fd ff = { 3406 .fd = STDOUT_FILENO, 3407 .ph = ph, 3408 }; 3409 ssize_t ret; 3410 3411 ret = readn(fd, header, sizeof(*header)); 3412 if (ret <= 0) 3413 return -1; 3414 3415 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 3416 pr_debug("endian/magic failed\n"); 3417 return -1; 3418 } 3419 3420 if (ph->needs_swap) 3421 header->size = bswap_64(header->size); 3422 3423 if (repipe && do_write(&ff, header, sizeof(*header)) < 0) 3424 return -1; 3425 3426 return 0; 3427 } 3428 3429 static int perf_header__read_pipe(struct perf_session *session) 3430 { 3431 struct perf_header *header = &session->header; 3432 struct perf_pipe_file_header f_header; 3433 3434 if (perf_file_header__read_pipe(&f_header, header, 3435 perf_data__fd(session->data), 3436 session->repipe) < 0) { 3437 pr_debug("incompatible file format\n"); 3438 return -EINVAL; 3439 } 3440 3441 return 0; 3442 } 3443 3444 static int read_attr(int fd, struct perf_header *ph, 3445 struct perf_file_attr *f_attr) 3446 { 3447 struct perf_event_attr *attr = &f_attr->attr; 3448 size_t sz, left; 3449 size_t our_sz = sizeof(f_attr->attr); 3450 ssize_t ret; 3451 3452 memset(f_attr, 0, sizeof(*f_attr)); 3453 3454 /* read minimal guaranteed structure */ 3455 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 3456 if (ret <= 0) { 3457 pr_debug("cannot read %d bytes of header attr\n", 3458 PERF_ATTR_SIZE_VER0); 3459 return -1; 3460 } 3461 3462 /* on file perf_event_attr size */ 3463 sz = attr->size; 3464 3465 if (ph->needs_swap) 3466 sz = bswap_32(sz); 3467 3468 if (sz == 0) { 3469 /* assume ABI0 */ 3470 sz = PERF_ATTR_SIZE_VER0; 3471 } else if (sz > our_sz) { 3472 pr_debug("file uses a more recent and unsupported ABI" 3473 " (%zu bytes extra)\n", sz - our_sz); 3474 return -1; 3475 } 3476 /* what we have not yet read and that we know about */ 3477 left = sz - PERF_ATTR_SIZE_VER0; 3478 if (left) { 3479 void *ptr = attr; 3480 ptr += PERF_ATTR_SIZE_VER0; 3481 3482 ret = readn(fd, ptr, left); 3483 } 3484 /* read perf_file_section, ids are read in caller */ 3485 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 3486 3487 return ret <= 0 ? -1 : 0; 3488 } 3489 3490 static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel, 3491 struct tep_handle *pevent) 3492 { 3493 struct tep_event *event; 3494 char bf[128]; 3495 3496 /* already prepared */ 3497 if (evsel->tp_format) 3498 return 0; 3499 3500 if (pevent == NULL) { 3501 pr_debug("broken or missing trace data\n"); 3502 return -1; 3503 } 3504 3505 event = tep_find_event(pevent, evsel->core.attr.config); 3506 if (event == NULL) { 3507 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config); 3508 return -1; 3509 } 3510 3511 if (!evsel->name) { 3512 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 3513 evsel->name = strdup(bf); 3514 if (evsel->name == NULL) 3515 return -1; 3516 } 3517 3518 evsel->tp_format = event; 3519 return 0; 3520 } 3521 3522 static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist, 3523 struct tep_handle *pevent) 3524 { 3525 struct evsel *pos; 3526 3527 evlist__for_each_entry(evlist, pos) { 3528 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT && 3529 perf_evsel__prepare_tracepoint_event(pos, pevent)) 3530 return -1; 3531 } 3532 3533 return 0; 3534 } 3535 3536 int perf_session__read_header(struct perf_session *session) 3537 { 3538 struct perf_data *data = session->data; 3539 struct perf_header *header = &session->header; 3540 struct perf_file_header f_header; 3541 struct perf_file_attr f_attr; 3542 u64 f_id; 3543 int nr_attrs, nr_ids, i, j; 3544 int fd = perf_data__fd(data); 3545 3546 session->evlist = evlist__new(); 3547 if (session->evlist == NULL) 3548 return -ENOMEM; 3549 3550 session->evlist->env = &header->env; 3551 session->machines.host.env = &header->env; 3552 if (perf_data__is_pipe(data)) 3553 return perf_header__read_pipe(session); 3554 3555 if (perf_file_header__read(&f_header, header, fd) < 0) 3556 return -EINVAL; 3557 3558 /* 3559 * Sanity check that perf.data was written cleanly; data size is 3560 * initialized to 0 and updated only if the on_exit function is run. 3561 * If data size is still 0 then the file contains only partial 3562 * information. Just warn user and process it as much as it can. 3563 */ 3564 if (f_header.data.size == 0) { 3565 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 3566 "Was the 'perf record' command properly terminated?\n", 3567 data->file.path); 3568 } 3569 3570 if (f_header.attr_size == 0) { 3571 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n" 3572 "Was the 'perf record' command properly terminated?\n", 3573 data->file.path); 3574 return -EINVAL; 3575 } 3576 3577 nr_attrs = f_header.attrs.size / f_header.attr_size; 3578 lseek(fd, f_header.attrs.offset, SEEK_SET); 3579 3580 for (i = 0; i < nr_attrs; i++) { 3581 struct evsel *evsel; 3582 off_t tmp; 3583 3584 if (read_attr(fd, header, &f_attr) < 0) 3585 goto out_errno; 3586 3587 if (header->needs_swap) { 3588 f_attr.ids.size = bswap_64(f_attr.ids.size); 3589 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 3590 perf_event__attr_swap(&f_attr.attr); 3591 } 3592 3593 tmp = lseek(fd, 0, SEEK_CUR); 3594 evsel = evsel__new(&f_attr.attr); 3595 3596 if (evsel == NULL) 3597 goto out_delete_evlist; 3598 3599 evsel->needs_swap = header->needs_swap; 3600 /* 3601 * Do it before so that if perf_evsel__alloc_id fails, this 3602 * entry gets purged too at evlist__delete(). 3603 */ 3604 evlist__add(session->evlist, evsel); 3605 3606 nr_ids = f_attr.ids.size / sizeof(u64); 3607 /* 3608 * We don't have the cpu and thread maps on the header, so 3609 * for allocating the perf_sample_id table we fake 1 cpu and 3610 * hattr->ids threads. 3611 */ 3612 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 3613 goto out_delete_evlist; 3614 3615 lseek(fd, f_attr.ids.offset, SEEK_SET); 3616 3617 for (j = 0; j < nr_ids; j++) { 3618 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 3619 goto out_errno; 3620 3621 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 3622 } 3623 3624 lseek(fd, tmp, SEEK_SET); 3625 } 3626 3627 perf_header__process_sections(header, fd, &session->tevent, 3628 perf_file_section__process); 3629 3630 if (perf_evlist__prepare_tracepoint_events(session->evlist, 3631 session->tevent.pevent)) 3632 goto out_delete_evlist; 3633 3634 return 0; 3635 out_errno: 3636 return -errno; 3637 3638 out_delete_evlist: 3639 evlist__delete(session->evlist); 3640 session->evlist = NULL; 3641 return -ENOMEM; 3642 } 3643 3644 int perf_event__process_feature(struct perf_session *session, 3645 union perf_event *event) 3646 { 3647 struct perf_tool *tool = session->tool; 3648 struct feat_fd ff = { .fd = 0 }; 3649 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event; 3650 int type = fe->header.type; 3651 u64 feat = fe->feat_id; 3652 3653 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { 3654 pr_warning("invalid record type %d in pipe-mode\n", type); 3655 return 0; 3656 } 3657 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) { 3658 pr_warning("invalid record type %d in pipe-mode\n", type); 3659 return -1; 3660 } 3661 3662 if (!feat_ops[feat].process) 3663 return 0; 3664 3665 ff.buf = (void *)fe->data; 3666 ff.size = event->header.size - sizeof(*fe); 3667 ff.ph = &session->header; 3668 3669 if (feat_ops[feat].process(&ff, NULL)) 3670 return -1; 3671 3672 if (!feat_ops[feat].print || !tool->show_feat_hdr) 3673 return 0; 3674 3675 if (!feat_ops[feat].full_only || 3676 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { 3677 feat_ops[feat].print(&ff, stdout); 3678 } else { 3679 fprintf(stdout, "# %s info available, use -I to display\n", 3680 feat_ops[feat].name); 3681 } 3682 3683 return 0; 3684 } 3685 3686 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3687 { 3688 struct perf_record_event_update *ev = &event->event_update; 3689 struct perf_record_event_update_scale *ev_scale; 3690 struct perf_record_event_update_cpus *ev_cpus; 3691 struct perf_cpu_map *map; 3692 size_t ret; 3693 3694 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id); 3695 3696 switch (ev->type) { 3697 case PERF_EVENT_UPDATE__SCALE: 3698 ev_scale = (struct perf_record_event_update_scale *)ev->data; 3699 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); 3700 break; 3701 case PERF_EVENT_UPDATE__UNIT: 3702 ret += fprintf(fp, "... unit: %s\n", ev->data); 3703 break; 3704 case PERF_EVENT_UPDATE__NAME: 3705 ret += fprintf(fp, "... name: %s\n", ev->data); 3706 break; 3707 case PERF_EVENT_UPDATE__CPUS: 3708 ev_cpus = (struct perf_record_event_update_cpus *)ev->data; 3709 ret += fprintf(fp, "... "); 3710 3711 map = cpu_map__new_data(&ev_cpus->cpus); 3712 if (map) 3713 ret += cpu_map__fprintf(map, fp); 3714 else 3715 ret += fprintf(fp, "failed to get cpus\n"); 3716 break; 3717 default: 3718 ret += fprintf(fp, "... unknown type\n"); 3719 break; 3720 } 3721 3722 return ret; 3723 } 3724 3725 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 3726 union perf_event *event, 3727 struct evlist **pevlist) 3728 { 3729 u32 i, ids, n_ids; 3730 struct evsel *evsel; 3731 struct evlist *evlist = *pevlist; 3732 3733 if (evlist == NULL) { 3734 *pevlist = evlist = evlist__new(); 3735 if (evlist == NULL) 3736 return -ENOMEM; 3737 } 3738 3739 evsel = evsel__new(&event->attr.attr); 3740 if (evsel == NULL) 3741 return -ENOMEM; 3742 3743 evlist__add(evlist, evsel); 3744 3745 ids = event->header.size; 3746 ids -= (void *)&event->attr.id - (void *)event; 3747 n_ids = ids / sizeof(u64); 3748 /* 3749 * We don't have the cpu and thread maps on the header, so 3750 * for allocating the perf_sample_id table we fake 1 cpu and 3751 * hattr->ids threads. 3752 */ 3753 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 3754 return -ENOMEM; 3755 3756 for (i = 0; i < n_ids; i++) { 3757 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 3758 } 3759 3760 return 0; 3761 } 3762 3763 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, 3764 union perf_event *event, 3765 struct evlist **pevlist) 3766 { 3767 struct perf_record_event_update *ev = &event->event_update; 3768 struct perf_record_event_update_scale *ev_scale; 3769 struct perf_record_event_update_cpus *ev_cpus; 3770 struct evlist *evlist; 3771 struct evsel *evsel; 3772 struct perf_cpu_map *map; 3773 3774 if (!pevlist || *pevlist == NULL) 3775 return -EINVAL; 3776 3777 evlist = *pevlist; 3778 3779 evsel = perf_evlist__id2evsel(evlist, ev->id); 3780 if (evsel == NULL) 3781 return -EINVAL; 3782 3783 switch (ev->type) { 3784 case PERF_EVENT_UPDATE__UNIT: 3785 evsel->unit = strdup(ev->data); 3786 break; 3787 case PERF_EVENT_UPDATE__NAME: 3788 evsel->name = strdup(ev->data); 3789 break; 3790 case PERF_EVENT_UPDATE__SCALE: 3791 ev_scale = (struct perf_record_event_update_scale *)ev->data; 3792 evsel->scale = ev_scale->scale; 3793 break; 3794 case PERF_EVENT_UPDATE__CPUS: 3795 ev_cpus = (struct perf_record_event_update_cpus *)ev->data; 3796 3797 map = cpu_map__new_data(&ev_cpus->cpus); 3798 if (map) 3799 evsel->core.own_cpus = map; 3800 else 3801 pr_err("failed to get event_update cpus\n"); 3802 default: 3803 break; 3804 } 3805 3806 return 0; 3807 } 3808 3809 int perf_event__process_tracing_data(struct perf_session *session, 3810 union perf_event *event) 3811 { 3812 ssize_t size_read, padding, size = event->tracing_data.size; 3813 int fd = perf_data__fd(session->data); 3814 off_t offset = lseek(fd, 0, SEEK_CUR); 3815 char buf[BUFSIZ]; 3816 3817 /* setup for reading amidst mmap */ 3818 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data), 3819 SEEK_SET); 3820 3821 size_read = trace_report(fd, &session->tevent, 3822 session->repipe); 3823 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 3824 3825 if (readn(fd, buf, padding) < 0) { 3826 pr_err("%s: reading input file", __func__); 3827 return -1; 3828 } 3829 if (session->repipe) { 3830 int retw = write(STDOUT_FILENO, buf, padding); 3831 if (retw <= 0 || retw != padding) { 3832 pr_err("%s: repiping tracing data padding", __func__); 3833 return -1; 3834 } 3835 } 3836 3837 if (size_read + padding != size) { 3838 pr_err("%s: tracing data size mismatch", __func__); 3839 return -1; 3840 } 3841 3842 perf_evlist__prepare_tracepoint_events(session->evlist, 3843 session->tevent.pevent); 3844 3845 return size_read + padding; 3846 } 3847 3848 int perf_event__process_build_id(struct perf_session *session, 3849 union perf_event *event) 3850 { 3851 __event_process_build_id(&event->build_id, 3852 event->build_id.filename, 3853 session); 3854 return 0; 3855 } 3856