1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "util.h" 5 #include "string2.h" 6 #include <sys/param.h> 7 #include <sys/types.h> 8 #include <byteswap.h> 9 #include <unistd.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <linux/compiler.h> 13 #include <linux/list.h> 14 #include <linux/kernel.h> 15 #include <linux/bitops.h> 16 #include <linux/stringify.h> 17 #include <sys/stat.h> 18 #include <sys/utsname.h> 19 #include <linux/time64.h> 20 #include <dirent.h> 21 22 #include "evlist.h" 23 #include "evsel.h" 24 #include "header.h" 25 #include "memswap.h" 26 #include "../perf.h" 27 #include "trace-event.h" 28 #include "session.h" 29 #include "symbol.h" 30 #include "debug.h" 31 #include "cpumap.h" 32 #include "pmu.h" 33 #include "vdso.h" 34 #include "strbuf.h" 35 #include "build-id.h" 36 #include "data.h" 37 #include <api/fs/fs.h> 38 #include "asm/bug.h" 39 #include "tool.h" 40 #include "time-utils.h" 41 #include "units.h" 42 43 #include "sane_ctype.h" 44 45 /* 46 * magic2 = "PERFILE2" 47 * must be a numerical value to let the endianness 48 * determine the memory layout. That way we are able 49 * to detect endianness when reading the perf.data file 50 * back. 51 * 52 * we check for legacy (PERFFILE) format. 53 */ 54 static const char *__perf_magic1 = "PERFFILE"; 55 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 56 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 57 58 #define PERF_MAGIC __perf_magic2 59 60 const char perf_version_string[] = PERF_VERSION; 61 62 struct perf_file_attr { 63 struct perf_event_attr attr; 64 struct perf_file_section ids; 65 }; 66 67 struct feat_fd { 68 struct perf_header *ph; 69 int fd; 70 void *buf; /* Either buf != NULL or fd >= 0 */ 71 ssize_t offset; 72 size_t size; 73 struct perf_evsel *events; 74 }; 75 76 void perf_header__set_feat(struct perf_header *header, int feat) 77 { 78 set_bit(feat, header->adds_features); 79 } 80 81 void perf_header__clear_feat(struct perf_header *header, int feat) 82 { 83 clear_bit(feat, header->adds_features); 84 } 85 86 bool perf_header__has_feat(const struct perf_header *header, int feat) 87 { 88 return test_bit(feat, header->adds_features); 89 } 90 91 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) 92 { 93 ssize_t ret = writen(ff->fd, buf, size); 94 95 if (ret != (ssize_t)size) 96 return ret < 0 ? (int)ret : -1; 97 return 0; 98 } 99 100 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) 101 { 102 /* struct perf_event_header::size is u16 */ 103 const size_t max_size = 0xffff - sizeof(struct perf_event_header); 104 size_t new_size = ff->size; 105 void *addr; 106 107 if (size + ff->offset > max_size) 108 return -E2BIG; 109 110 while (size > (new_size - ff->offset)) 111 new_size <<= 1; 112 new_size = min(max_size, new_size); 113 114 if (ff->size < new_size) { 115 addr = realloc(ff->buf, new_size); 116 if (!addr) 117 return -ENOMEM; 118 ff->buf = addr; 119 ff->size = new_size; 120 } 121 122 memcpy(ff->buf + ff->offset, buf, size); 123 ff->offset += size; 124 125 return 0; 126 } 127 128 /* Return: 0 if succeded, -ERR if failed. */ 129 int do_write(struct feat_fd *ff, const void *buf, size_t size) 130 { 131 if (!ff->buf) 132 return __do_write_fd(ff, buf, size); 133 return __do_write_buf(ff, buf, size); 134 } 135 136 /* Return: 0 if succeded, -ERR if failed. */ 137 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size) 138 { 139 u64 *p = (u64 *) set; 140 int i, ret; 141 142 ret = do_write(ff, &size, sizeof(size)); 143 if (ret < 0) 144 return ret; 145 146 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 147 ret = do_write(ff, p + i, sizeof(*p)); 148 if (ret < 0) 149 return ret; 150 } 151 152 return 0; 153 } 154 155 /* Return: 0 if succeded, -ERR if failed. */ 156 int write_padded(struct feat_fd *ff, const void *bf, 157 size_t count, size_t count_aligned) 158 { 159 static const char zero_buf[NAME_ALIGN]; 160 int err = do_write(ff, bf, count); 161 162 if (!err) 163 err = do_write(ff, zero_buf, count_aligned - count); 164 165 return err; 166 } 167 168 #define string_size(str) \ 169 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) 170 171 /* Return: 0 if succeded, -ERR if failed. */ 172 static int do_write_string(struct feat_fd *ff, const char *str) 173 { 174 u32 len, olen; 175 int ret; 176 177 olen = strlen(str) + 1; 178 len = PERF_ALIGN(olen, NAME_ALIGN); 179 180 /* write len, incl. \0 */ 181 ret = do_write(ff, &len, sizeof(len)); 182 if (ret < 0) 183 return ret; 184 185 return write_padded(ff, str, olen, len); 186 } 187 188 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) 189 { 190 ssize_t ret = readn(ff->fd, addr, size); 191 192 if (ret != size) 193 return ret < 0 ? (int)ret : -1; 194 return 0; 195 } 196 197 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) 198 { 199 if (size > (ssize_t)ff->size - ff->offset) 200 return -1; 201 202 memcpy(addr, ff->buf + ff->offset, size); 203 ff->offset += size; 204 205 return 0; 206 207 } 208 209 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) 210 { 211 if (!ff->buf) 212 return __do_read_fd(ff, addr, size); 213 return __do_read_buf(ff, addr, size); 214 } 215 216 static int do_read_u32(struct feat_fd *ff, u32 *addr) 217 { 218 int ret; 219 220 ret = __do_read(ff, addr, sizeof(*addr)); 221 if (ret) 222 return ret; 223 224 if (ff->ph->needs_swap) 225 *addr = bswap_32(*addr); 226 return 0; 227 } 228 229 static int do_read_u64(struct feat_fd *ff, u64 *addr) 230 { 231 int ret; 232 233 ret = __do_read(ff, addr, sizeof(*addr)); 234 if (ret) 235 return ret; 236 237 if (ff->ph->needs_swap) 238 *addr = bswap_64(*addr); 239 return 0; 240 } 241 242 static char *do_read_string(struct feat_fd *ff) 243 { 244 u32 len; 245 char *buf; 246 247 if (do_read_u32(ff, &len)) 248 return NULL; 249 250 buf = malloc(len); 251 if (!buf) 252 return NULL; 253 254 if (!__do_read(ff, buf, len)) { 255 /* 256 * strings are padded by zeroes 257 * thus the actual strlen of buf 258 * may be less than len 259 */ 260 return buf; 261 } 262 263 free(buf); 264 return NULL; 265 } 266 267 /* Return: 0 if succeded, -ERR if failed. */ 268 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) 269 { 270 unsigned long *set; 271 u64 size, *p; 272 int i, ret; 273 274 ret = do_read_u64(ff, &size); 275 if (ret) 276 return ret; 277 278 set = bitmap_alloc(size); 279 if (!set) 280 return -ENOMEM; 281 282 bitmap_zero(set, size); 283 284 p = (u64 *) set; 285 286 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 287 ret = do_read_u64(ff, p + i); 288 if (ret < 0) { 289 free(set); 290 return ret; 291 } 292 } 293 294 *pset = set; 295 *psize = size; 296 return 0; 297 } 298 299 static int write_tracing_data(struct feat_fd *ff, 300 struct perf_evlist *evlist) 301 { 302 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 303 return -1; 304 305 return read_tracing_data(ff->fd, &evlist->entries); 306 } 307 308 static int write_build_id(struct feat_fd *ff, 309 struct perf_evlist *evlist __maybe_unused) 310 { 311 struct perf_session *session; 312 int err; 313 314 session = container_of(ff->ph, struct perf_session, header); 315 316 if (!perf_session__read_build_ids(session, true)) 317 return -1; 318 319 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 320 return -1; 321 322 err = perf_session__write_buildid_table(session, ff); 323 if (err < 0) { 324 pr_debug("failed to write buildid table\n"); 325 return err; 326 } 327 perf_session__cache_build_ids(session); 328 329 return 0; 330 } 331 332 static int write_hostname(struct feat_fd *ff, 333 struct perf_evlist *evlist __maybe_unused) 334 { 335 struct utsname uts; 336 int ret; 337 338 ret = uname(&uts); 339 if (ret < 0) 340 return -1; 341 342 return do_write_string(ff, uts.nodename); 343 } 344 345 static int write_osrelease(struct feat_fd *ff, 346 struct perf_evlist *evlist __maybe_unused) 347 { 348 struct utsname uts; 349 int ret; 350 351 ret = uname(&uts); 352 if (ret < 0) 353 return -1; 354 355 return do_write_string(ff, uts.release); 356 } 357 358 static int write_arch(struct feat_fd *ff, 359 struct perf_evlist *evlist __maybe_unused) 360 { 361 struct utsname uts; 362 int ret; 363 364 ret = uname(&uts); 365 if (ret < 0) 366 return -1; 367 368 return do_write_string(ff, uts.machine); 369 } 370 371 static int write_version(struct feat_fd *ff, 372 struct perf_evlist *evlist __maybe_unused) 373 { 374 return do_write_string(ff, perf_version_string); 375 } 376 377 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) 378 { 379 FILE *file; 380 char *buf = NULL; 381 char *s, *p; 382 const char *search = cpuinfo_proc; 383 size_t len = 0; 384 int ret = -1; 385 386 if (!search) 387 return -1; 388 389 file = fopen("/proc/cpuinfo", "r"); 390 if (!file) 391 return -1; 392 393 while (getline(&buf, &len, file) > 0) { 394 ret = strncmp(buf, search, strlen(search)); 395 if (!ret) 396 break; 397 } 398 399 if (ret) { 400 ret = -1; 401 goto done; 402 } 403 404 s = buf; 405 406 p = strchr(buf, ':'); 407 if (p && *(p+1) == ' ' && *(p+2)) 408 s = p + 2; 409 p = strchr(s, '\n'); 410 if (p) 411 *p = '\0'; 412 413 /* squash extra space characters (branding string) */ 414 p = s; 415 while (*p) { 416 if (isspace(*p)) { 417 char *r = p + 1; 418 char *q = r; 419 *p = ' '; 420 while (*q && isspace(*q)) 421 q++; 422 if (q != (p+1)) 423 while ((*r++ = *q++)); 424 } 425 p++; 426 } 427 ret = do_write_string(ff, s); 428 done: 429 free(buf); 430 fclose(file); 431 return ret; 432 } 433 434 static int write_cpudesc(struct feat_fd *ff, 435 struct perf_evlist *evlist __maybe_unused) 436 { 437 const char *cpuinfo_procs[] = CPUINFO_PROC; 438 unsigned int i; 439 440 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 441 int ret; 442 ret = __write_cpudesc(ff, cpuinfo_procs[i]); 443 if (ret >= 0) 444 return ret; 445 } 446 return -1; 447 } 448 449 450 static int write_nrcpus(struct feat_fd *ff, 451 struct perf_evlist *evlist __maybe_unused) 452 { 453 long nr; 454 u32 nrc, nra; 455 int ret; 456 457 nrc = cpu__max_present_cpu(); 458 459 nr = sysconf(_SC_NPROCESSORS_ONLN); 460 if (nr < 0) 461 return -1; 462 463 nra = (u32)(nr & UINT_MAX); 464 465 ret = do_write(ff, &nrc, sizeof(nrc)); 466 if (ret < 0) 467 return ret; 468 469 return do_write(ff, &nra, sizeof(nra)); 470 } 471 472 static int write_event_desc(struct feat_fd *ff, 473 struct perf_evlist *evlist) 474 { 475 struct perf_evsel *evsel; 476 u32 nre, nri, sz; 477 int ret; 478 479 nre = evlist->nr_entries; 480 481 /* 482 * write number of events 483 */ 484 ret = do_write(ff, &nre, sizeof(nre)); 485 if (ret < 0) 486 return ret; 487 488 /* 489 * size of perf_event_attr struct 490 */ 491 sz = (u32)sizeof(evsel->attr); 492 ret = do_write(ff, &sz, sizeof(sz)); 493 if (ret < 0) 494 return ret; 495 496 evlist__for_each_entry(evlist, evsel) { 497 ret = do_write(ff, &evsel->attr, sz); 498 if (ret < 0) 499 return ret; 500 /* 501 * write number of unique id per event 502 * there is one id per instance of an event 503 * 504 * copy into an nri to be independent of the 505 * type of ids, 506 */ 507 nri = evsel->ids; 508 ret = do_write(ff, &nri, sizeof(nri)); 509 if (ret < 0) 510 return ret; 511 512 /* 513 * write event string as passed on cmdline 514 */ 515 ret = do_write_string(ff, perf_evsel__name(evsel)); 516 if (ret < 0) 517 return ret; 518 /* 519 * write unique ids for this event 520 */ 521 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); 522 if (ret < 0) 523 return ret; 524 } 525 return 0; 526 } 527 528 static int write_cmdline(struct feat_fd *ff, 529 struct perf_evlist *evlist __maybe_unused) 530 { 531 char buf[MAXPATHLEN]; 532 u32 n; 533 int i, ret; 534 535 /* actual path to perf binary */ 536 ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1); 537 if (ret <= 0) 538 return -1; 539 540 /* readlink() does not add null termination */ 541 buf[ret] = '\0'; 542 543 /* account for binary path */ 544 n = perf_env.nr_cmdline + 1; 545 546 ret = do_write(ff, &n, sizeof(n)); 547 if (ret < 0) 548 return ret; 549 550 ret = do_write_string(ff, buf); 551 if (ret < 0) 552 return ret; 553 554 for (i = 0 ; i < perf_env.nr_cmdline; i++) { 555 ret = do_write_string(ff, perf_env.cmdline_argv[i]); 556 if (ret < 0) 557 return ret; 558 } 559 return 0; 560 } 561 562 #define CORE_SIB_FMT \ 563 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" 564 #define THRD_SIB_FMT \ 565 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" 566 567 struct cpu_topo { 568 u32 cpu_nr; 569 u32 core_sib; 570 u32 thread_sib; 571 char **core_siblings; 572 char **thread_siblings; 573 }; 574 575 static int build_cpu_topo(struct cpu_topo *tp, int cpu) 576 { 577 FILE *fp; 578 char filename[MAXPATHLEN]; 579 char *buf = NULL, *p; 580 size_t len = 0; 581 ssize_t sret; 582 u32 i = 0; 583 int ret = -1; 584 585 sprintf(filename, CORE_SIB_FMT, cpu); 586 fp = fopen(filename, "r"); 587 if (!fp) 588 goto try_threads; 589 590 sret = getline(&buf, &len, fp); 591 fclose(fp); 592 if (sret <= 0) 593 goto try_threads; 594 595 p = strchr(buf, '\n'); 596 if (p) 597 *p = '\0'; 598 599 for (i = 0; i < tp->core_sib; i++) { 600 if (!strcmp(buf, tp->core_siblings[i])) 601 break; 602 } 603 if (i == tp->core_sib) { 604 tp->core_siblings[i] = buf; 605 tp->core_sib++; 606 buf = NULL; 607 len = 0; 608 } 609 ret = 0; 610 611 try_threads: 612 sprintf(filename, THRD_SIB_FMT, cpu); 613 fp = fopen(filename, "r"); 614 if (!fp) 615 goto done; 616 617 if (getline(&buf, &len, fp) <= 0) 618 goto done; 619 620 p = strchr(buf, '\n'); 621 if (p) 622 *p = '\0'; 623 624 for (i = 0; i < tp->thread_sib; i++) { 625 if (!strcmp(buf, tp->thread_siblings[i])) 626 break; 627 } 628 if (i == tp->thread_sib) { 629 tp->thread_siblings[i] = buf; 630 tp->thread_sib++; 631 buf = NULL; 632 } 633 ret = 0; 634 done: 635 if(fp) 636 fclose(fp); 637 free(buf); 638 return ret; 639 } 640 641 static void free_cpu_topo(struct cpu_topo *tp) 642 { 643 u32 i; 644 645 if (!tp) 646 return; 647 648 for (i = 0 ; i < tp->core_sib; i++) 649 zfree(&tp->core_siblings[i]); 650 651 for (i = 0 ; i < tp->thread_sib; i++) 652 zfree(&tp->thread_siblings[i]); 653 654 free(tp); 655 } 656 657 static struct cpu_topo *build_cpu_topology(void) 658 { 659 struct cpu_topo *tp = NULL; 660 void *addr; 661 u32 nr, i; 662 size_t sz; 663 long ncpus; 664 int ret = -1; 665 struct cpu_map *map; 666 667 ncpus = cpu__max_present_cpu(); 668 669 /* build online CPU map */ 670 map = cpu_map__new(NULL); 671 if (map == NULL) { 672 pr_debug("failed to get system cpumap\n"); 673 return NULL; 674 } 675 676 nr = (u32)(ncpus & UINT_MAX); 677 678 sz = nr * sizeof(char *); 679 addr = calloc(1, sizeof(*tp) + 2 * sz); 680 if (!addr) 681 goto out_free; 682 683 tp = addr; 684 tp->cpu_nr = nr; 685 addr += sizeof(*tp); 686 tp->core_siblings = addr; 687 addr += sz; 688 tp->thread_siblings = addr; 689 690 for (i = 0; i < nr; i++) { 691 if (!cpu_map__has(map, i)) 692 continue; 693 694 ret = build_cpu_topo(tp, i); 695 if (ret < 0) 696 break; 697 } 698 699 out_free: 700 cpu_map__put(map); 701 if (ret) { 702 free_cpu_topo(tp); 703 tp = NULL; 704 } 705 return tp; 706 } 707 708 static int write_cpu_topology(struct feat_fd *ff, 709 struct perf_evlist *evlist __maybe_unused) 710 { 711 struct cpu_topo *tp; 712 u32 i; 713 int ret, j; 714 715 tp = build_cpu_topology(); 716 if (!tp) 717 return -1; 718 719 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib)); 720 if (ret < 0) 721 goto done; 722 723 for (i = 0; i < tp->core_sib; i++) { 724 ret = do_write_string(ff, tp->core_siblings[i]); 725 if (ret < 0) 726 goto done; 727 } 728 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib)); 729 if (ret < 0) 730 goto done; 731 732 for (i = 0; i < tp->thread_sib; i++) { 733 ret = do_write_string(ff, tp->thread_siblings[i]); 734 if (ret < 0) 735 break; 736 } 737 738 ret = perf_env__read_cpu_topology_map(&perf_env); 739 if (ret < 0) 740 goto done; 741 742 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 743 ret = do_write(ff, &perf_env.cpu[j].core_id, 744 sizeof(perf_env.cpu[j].core_id)); 745 if (ret < 0) 746 return ret; 747 ret = do_write(ff, &perf_env.cpu[j].socket_id, 748 sizeof(perf_env.cpu[j].socket_id)); 749 if (ret < 0) 750 return ret; 751 } 752 done: 753 free_cpu_topo(tp); 754 return ret; 755 } 756 757 758 759 static int write_total_mem(struct feat_fd *ff, 760 struct perf_evlist *evlist __maybe_unused) 761 { 762 char *buf = NULL; 763 FILE *fp; 764 size_t len = 0; 765 int ret = -1, n; 766 uint64_t mem; 767 768 fp = fopen("/proc/meminfo", "r"); 769 if (!fp) 770 return -1; 771 772 while (getline(&buf, &len, fp) > 0) { 773 ret = strncmp(buf, "MemTotal:", 9); 774 if (!ret) 775 break; 776 } 777 if (!ret) { 778 n = sscanf(buf, "%*s %"PRIu64, &mem); 779 if (n == 1) 780 ret = do_write(ff, &mem, sizeof(mem)); 781 } else 782 ret = -1; 783 free(buf); 784 fclose(fp); 785 return ret; 786 } 787 788 static int write_topo_node(struct feat_fd *ff, int node) 789 { 790 char str[MAXPATHLEN]; 791 char field[32]; 792 char *buf = NULL, *p; 793 size_t len = 0; 794 FILE *fp; 795 u64 mem_total, mem_free, mem; 796 int ret = -1; 797 798 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); 799 fp = fopen(str, "r"); 800 if (!fp) 801 return -1; 802 803 while (getline(&buf, &len, fp) > 0) { 804 /* skip over invalid lines */ 805 if (!strchr(buf, ':')) 806 continue; 807 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) 808 goto done; 809 if (!strcmp(field, "MemTotal:")) 810 mem_total = mem; 811 if (!strcmp(field, "MemFree:")) 812 mem_free = mem; 813 } 814 815 fclose(fp); 816 fp = NULL; 817 818 ret = do_write(ff, &mem_total, sizeof(u64)); 819 if (ret) 820 goto done; 821 822 ret = do_write(ff, &mem_free, sizeof(u64)); 823 if (ret) 824 goto done; 825 826 ret = -1; 827 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); 828 829 fp = fopen(str, "r"); 830 if (!fp) 831 goto done; 832 833 if (getline(&buf, &len, fp) <= 0) 834 goto done; 835 836 p = strchr(buf, '\n'); 837 if (p) 838 *p = '\0'; 839 840 ret = do_write_string(ff, buf); 841 done: 842 free(buf); 843 if (fp) 844 fclose(fp); 845 return ret; 846 } 847 848 static int write_numa_topology(struct feat_fd *ff, 849 struct perf_evlist *evlist __maybe_unused) 850 { 851 char *buf = NULL; 852 size_t len = 0; 853 FILE *fp; 854 struct cpu_map *node_map = NULL; 855 char *c; 856 u32 nr, i, j; 857 int ret = -1; 858 859 fp = fopen("/sys/devices/system/node/online", "r"); 860 if (!fp) 861 return -1; 862 863 if (getline(&buf, &len, fp) <= 0) 864 goto done; 865 866 c = strchr(buf, '\n'); 867 if (c) 868 *c = '\0'; 869 870 node_map = cpu_map__new(buf); 871 if (!node_map) 872 goto done; 873 874 nr = (u32)node_map->nr; 875 876 ret = do_write(ff, &nr, sizeof(nr)); 877 if (ret < 0) 878 goto done; 879 880 for (i = 0; i < nr; i++) { 881 j = (u32)node_map->map[i]; 882 ret = do_write(ff, &j, sizeof(j)); 883 if (ret < 0) 884 break; 885 886 ret = write_topo_node(ff, i); 887 if (ret < 0) 888 break; 889 } 890 done: 891 free(buf); 892 fclose(fp); 893 cpu_map__put(node_map); 894 return ret; 895 } 896 897 /* 898 * File format: 899 * 900 * struct pmu_mappings { 901 * u32 pmu_num; 902 * struct pmu_map { 903 * u32 type; 904 * char name[]; 905 * }[pmu_num]; 906 * }; 907 */ 908 909 static int write_pmu_mappings(struct feat_fd *ff, 910 struct perf_evlist *evlist __maybe_unused) 911 { 912 struct perf_pmu *pmu = NULL; 913 u32 pmu_num = 0; 914 int ret; 915 916 /* 917 * Do a first pass to count number of pmu to avoid lseek so this 918 * works in pipe mode as well. 919 */ 920 while ((pmu = perf_pmu__scan(pmu))) { 921 if (!pmu->name) 922 continue; 923 pmu_num++; 924 } 925 926 ret = do_write(ff, &pmu_num, sizeof(pmu_num)); 927 if (ret < 0) 928 return ret; 929 930 while ((pmu = perf_pmu__scan(pmu))) { 931 if (!pmu->name) 932 continue; 933 934 ret = do_write(ff, &pmu->type, sizeof(pmu->type)); 935 if (ret < 0) 936 return ret; 937 938 ret = do_write_string(ff, pmu->name); 939 if (ret < 0) 940 return ret; 941 } 942 943 return 0; 944 } 945 946 /* 947 * File format: 948 * 949 * struct group_descs { 950 * u32 nr_groups; 951 * struct group_desc { 952 * char name[]; 953 * u32 leader_idx; 954 * u32 nr_members; 955 * }[nr_groups]; 956 * }; 957 */ 958 static int write_group_desc(struct feat_fd *ff, 959 struct perf_evlist *evlist) 960 { 961 u32 nr_groups = evlist->nr_groups; 962 struct perf_evsel *evsel; 963 int ret; 964 965 ret = do_write(ff, &nr_groups, sizeof(nr_groups)); 966 if (ret < 0) 967 return ret; 968 969 evlist__for_each_entry(evlist, evsel) { 970 if (perf_evsel__is_group_leader(evsel) && 971 evsel->nr_members > 1) { 972 const char *name = evsel->group_name ?: "{anon_group}"; 973 u32 leader_idx = evsel->idx; 974 u32 nr_members = evsel->nr_members; 975 976 ret = do_write_string(ff, name); 977 if (ret < 0) 978 return ret; 979 980 ret = do_write(ff, &leader_idx, sizeof(leader_idx)); 981 if (ret < 0) 982 return ret; 983 984 ret = do_write(ff, &nr_members, sizeof(nr_members)); 985 if (ret < 0) 986 return ret; 987 } 988 } 989 return 0; 990 } 991 992 /* 993 * default get_cpuid(): nothing gets recorded 994 * actual implementation must be in arch/$(SRCARCH)/util/header.c 995 */ 996 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 997 { 998 return -1; 999 } 1000 1001 static int write_cpuid(struct feat_fd *ff, 1002 struct perf_evlist *evlist __maybe_unused) 1003 { 1004 char buffer[64]; 1005 int ret; 1006 1007 ret = get_cpuid(buffer, sizeof(buffer)); 1008 if (!ret) 1009 goto write_it; 1010 1011 return -1; 1012 write_it: 1013 return do_write_string(ff, buffer); 1014 } 1015 1016 static int write_branch_stack(struct feat_fd *ff __maybe_unused, 1017 struct perf_evlist *evlist __maybe_unused) 1018 { 1019 return 0; 1020 } 1021 1022 static int write_auxtrace(struct feat_fd *ff, 1023 struct perf_evlist *evlist __maybe_unused) 1024 { 1025 struct perf_session *session; 1026 int err; 1027 1028 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 1029 return -1; 1030 1031 session = container_of(ff->ph, struct perf_session, header); 1032 1033 err = auxtrace_index__write(ff->fd, &session->auxtrace_index); 1034 if (err < 0) 1035 pr_err("Failed to write auxtrace index\n"); 1036 return err; 1037 } 1038 1039 static int cpu_cache_level__sort(const void *a, const void *b) 1040 { 1041 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 1042 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; 1043 1044 return cache_a->level - cache_b->level; 1045 } 1046 1047 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) 1048 { 1049 if (a->level != b->level) 1050 return false; 1051 1052 if (a->line_size != b->line_size) 1053 return false; 1054 1055 if (a->sets != b->sets) 1056 return false; 1057 1058 if (a->ways != b->ways) 1059 return false; 1060 1061 if (strcmp(a->type, b->type)) 1062 return false; 1063 1064 if (strcmp(a->size, b->size)) 1065 return false; 1066 1067 if (strcmp(a->map, b->map)) 1068 return false; 1069 1070 return true; 1071 } 1072 1073 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) 1074 { 1075 char path[PATH_MAX], file[PATH_MAX]; 1076 struct stat st; 1077 size_t len; 1078 1079 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level); 1080 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path); 1081 1082 if (stat(file, &st)) 1083 return 1; 1084 1085 scnprintf(file, PATH_MAX, "%s/level", path); 1086 if (sysfs__read_int(file, (int *) &cache->level)) 1087 return -1; 1088 1089 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path); 1090 if (sysfs__read_int(file, (int *) &cache->line_size)) 1091 return -1; 1092 1093 scnprintf(file, PATH_MAX, "%s/number_of_sets", path); 1094 if (sysfs__read_int(file, (int *) &cache->sets)) 1095 return -1; 1096 1097 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path); 1098 if (sysfs__read_int(file, (int *) &cache->ways)) 1099 return -1; 1100 1101 scnprintf(file, PATH_MAX, "%s/type", path); 1102 if (sysfs__read_str(file, &cache->type, &len)) 1103 return -1; 1104 1105 cache->type[len] = 0; 1106 cache->type = rtrim(cache->type); 1107 1108 scnprintf(file, PATH_MAX, "%s/size", path); 1109 if (sysfs__read_str(file, &cache->size, &len)) { 1110 free(cache->type); 1111 return -1; 1112 } 1113 1114 cache->size[len] = 0; 1115 cache->size = rtrim(cache->size); 1116 1117 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 1118 if (sysfs__read_str(file, &cache->map, &len)) { 1119 free(cache->map); 1120 free(cache->type); 1121 return -1; 1122 } 1123 1124 cache->map[len] = 0; 1125 cache->map = rtrim(cache->map); 1126 return 0; 1127 } 1128 1129 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) 1130 { 1131 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); 1132 } 1133 1134 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) 1135 { 1136 u32 i, cnt = 0; 1137 long ncpus; 1138 u32 nr, cpu; 1139 u16 level; 1140 1141 ncpus = sysconf(_SC_NPROCESSORS_CONF); 1142 if (ncpus < 0) 1143 return -1; 1144 1145 nr = (u32)(ncpus & UINT_MAX); 1146 1147 for (cpu = 0; cpu < nr; cpu++) { 1148 for (level = 0; level < 10; level++) { 1149 struct cpu_cache_level c; 1150 int err; 1151 1152 err = cpu_cache_level__read(&c, cpu, level); 1153 if (err < 0) 1154 return err; 1155 1156 if (err == 1) 1157 break; 1158 1159 for (i = 0; i < cnt; i++) { 1160 if (cpu_cache_level__cmp(&c, &caches[i])) 1161 break; 1162 } 1163 1164 if (i == cnt) 1165 caches[cnt++] = c; 1166 else 1167 cpu_cache_level__free(&c); 1168 1169 if (WARN_ONCE(cnt == size, "way too many cpu caches..")) 1170 goto out; 1171 } 1172 } 1173 out: 1174 *cntp = cnt; 1175 return 0; 1176 } 1177 1178 #define MAX_CACHES 2000 1179 1180 static int write_cache(struct feat_fd *ff, 1181 struct perf_evlist *evlist __maybe_unused) 1182 { 1183 struct cpu_cache_level caches[MAX_CACHES]; 1184 u32 cnt = 0, i, version = 1; 1185 int ret; 1186 1187 ret = build_caches(caches, MAX_CACHES, &cnt); 1188 if (ret) 1189 goto out; 1190 1191 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); 1192 1193 ret = do_write(ff, &version, sizeof(u32)); 1194 if (ret < 0) 1195 goto out; 1196 1197 ret = do_write(ff, &cnt, sizeof(u32)); 1198 if (ret < 0) 1199 goto out; 1200 1201 for (i = 0; i < cnt; i++) { 1202 struct cpu_cache_level *c = &caches[i]; 1203 1204 #define _W(v) \ 1205 ret = do_write(ff, &c->v, sizeof(u32)); \ 1206 if (ret < 0) \ 1207 goto out; 1208 1209 _W(level) 1210 _W(line_size) 1211 _W(sets) 1212 _W(ways) 1213 #undef _W 1214 1215 #define _W(v) \ 1216 ret = do_write_string(ff, (const char *) c->v); \ 1217 if (ret < 0) \ 1218 goto out; 1219 1220 _W(type) 1221 _W(size) 1222 _W(map) 1223 #undef _W 1224 } 1225 1226 out: 1227 for (i = 0; i < cnt; i++) 1228 cpu_cache_level__free(&caches[i]); 1229 return ret; 1230 } 1231 1232 static int write_stat(struct feat_fd *ff __maybe_unused, 1233 struct perf_evlist *evlist __maybe_unused) 1234 { 1235 return 0; 1236 } 1237 1238 static int write_sample_time(struct feat_fd *ff, 1239 struct perf_evlist *evlist) 1240 { 1241 int ret; 1242 1243 ret = do_write(ff, &evlist->first_sample_time, 1244 sizeof(evlist->first_sample_time)); 1245 if (ret < 0) 1246 return ret; 1247 1248 return do_write(ff, &evlist->last_sample_time, 1249 sizeof(evlist->last_sample_time)); 1250 } 1251 1252 1253 static int memory_node__read(struct memory_node *n, unsigned long idx) 1254 { 1255 unsigned int phys, size = 0; 1256 char path[PATH_MAX]; 1257 struct dirent *ent; 1258 DIR *dir; 1259 1260 #define for_each_memory(mem, dir) \ 1261 while ((ent = readdir(dir))) \ 1262 if (strcmp(ent->d_name, ".") && \ 1263 strcmp(ent->d_name, "..") && \ 1264 sscanf(ent->d_name, "memory%u", &mem) == 1) 1265 1266 scnprintf(path, PATH_MAX, 1267 "%s/devices/system/node/node%lu", 1268 sysfs__mountpoint(), idx); 1269 1270 dir = opendir(path); 1271 if (!dir) { 1272 pr_warning("failed: cant' open memory sysfs data\n"); 1273 return -1; 1274 } 1275 1276 for_each_memory(phys, dir) { 1277 size = max(phys, size); 1278 } 1279 1280 size++; 1281 1282 n->set = bitmap_alloc(size); 1283 if (!n->set) { 1284 closedir(dir); 1285 return -ENOMEM; 1286 } 1287 1288 bitmap_zero(n->set, size); 1289 n->node = idx; 1290 n->size = size; 1291 1292 rewinddir(dir); 1293 1294 for_each_memory(phys, dir) { 1295 set_bit(phys, n->set); 1296 } 1297 1298 closedir(dir); 1299 return 0; 1300 } 1301 1302 static int memory_node__sort(const void *a, const void *b) 1303 { 1304 const struct memory_node *na = a; 1305 const struct memory_node *nb = b; 1306 1307 return na->node - nb->node; 1308 } 1309 1310 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) 1311 { 1312 char path[PATH_MAX]; 1313 struct dirent *ent; 1314 DIR *dir; 1315 u64 cnt = 0; 1316 int ret = 0; 1317 1318 scnprintf(path, PATH_MAX, "%s/devices/system/node/", 1319 sysfs__mountpoint()); 1320 1321 dir = opendir(path); 1322 if (!dir) { 1323 pr_warning("failed: can't open node sysfs data\n"); 1324 return -1; 1325 } 1326 1327 while (!ret && (ent = readdir(dir))) { 1328 unsigned int idx; 1329 int r; 1330 1331 if (!strcmp(ent->d_name, ".") || 1332 !strcmp(ent->d_name, "..")) 1333 continue; 1334 1335 r = sscanf(ent->d_name, "node%u", &idx); 1336 if (r != 1) 1337 continue; 1338 1339 if (WARN_ONCE(cnt >= size, 1340 "failed to write MEM_TOPOLOGY, way too many nodes\n")) 1341 return -1; 1342 1343 ret = memory_node__read(&nodes[cnt++], idx); 1344 } 1345 1346 *cntp = cnt; 1347 closedir(dir); 1348 1349 if (!ret) 1350 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort); 1351 1352 return ret; 1353 } 1354 1355 #define MAX_MEMORY_NODES 2000 1356 1357 /* 1358 * The MEM_TOPOLOGY holds physical memory map for every 1359 * node in system. The format of data is as follows: 1360 * 1361 * 0 - version | for future changes 1362 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes 1363 * 16 - count | number of nodes 1364 * 1365 * For each node we store map of physical indexes for 1366 * each node: 1367 * 1368 * 32 - node id | node index 1369 * 40 - size | size of bitmap 1370 * 48 - bitmap | bitmap of memory indexes that belongs to node 1371 */ 1372 static int write_mem_topology(struct feat_fd *ff __maybe_unused, 1373 struct perf_evlist *evlist __maybe_unused) 1374 { 1375 static struct memory_node nodes[MAX_MEMORY_NODES]; 1376 u64 bsize, version = 1, i, nr; 1377 int ret; 1378 1379 ret = sysfs__read_xll("devices/system/memory/block_size_bytes", 1380 (unsigned long long *) &bsize); 1381 if (ret) 1382 return ret; 1383 1384 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr); 1385 if (ret) 1386 return ret; 1387 1388 ret = do_write(ff, &version, sizeof(version)); 1389 if (ret < 0) 1390 goto out; 1391 1392 ret = do_write(ff, &bsize, sizeof(bsize)); 1393 if (ret < 0) 1394 goto out; 1395 1396 ret = do_write(ff, &nr, sizeof(nr)); 1397 if (ret < 0) 1398 goto out; 1399 1400 for (i = 0; i < nr; i++) { 1401 struct memory_node *n = &nodes[i]; 1402 1403 #define _W(v) \ 1404 ret = do_write(ff, &n->v, sizeof(n->v)); \ 1405 if (ret < 0) \ 1406 goto out; 1407 1408 _W(node) 1409 _W(size) 1410 1411 #undef _W 1412 1413 ret = do_write_bitmap(ff, n->set, n->size); 1414 if (ret < 0) 1415 goto out; 1416 } 1417 1418 out: 1419 return ret; 1420 } 1421 1422 static void print_hostname(struct feat_fd *ff, FILE *fp) 1423 { 1424 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); 1425 } 1426 1427 static void print_osrelease(struct feat_fd *ff, FILE *fp) 1428 { 1429 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); 1430 } 1431 1432 static void print_arch(struct feat_fd *ff, FILE *fp) 1433 { 1434 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); 1435 } 1436 1437 static void print_cpudesc(struct feat_fd *ff, FILE *fp) 1438 { 1439 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); 1440 } 1441 1442 static void print_nrcpus(struct feat_fd *ff, FILE *fp) 1443 { 1444 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); 1445 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); 1446 } 1447 1448 static void print_version(struct feat_fd *ff, FILE *fp) 1449 { 1450 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); 1451 } 1452 1453 static void print_cmdline(struct feat_fd *ff, FILE *fp) 1454 { 1455 int nr, i; 1456 1457 nr = ff->ph->env.nr_cmdline; 1458 1459 fprintf(fp, "# cmdline : "); 1460 1461 for (i = 0; i < nr; i++) 1462 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); 1463 fputc('\n', fp); 1464 } 1465 1466 static void print_cpu_topology(struct feat_fd *ff, FILE *fp) 1467 { 1468 struct perf_header *ph = ff->ph; 1469 int cpu_nr = ph->env.nr_cpus_avail; 1470 int nr, i; 1471 char *str; 1472 1473 nr = ph->env.nr_sibling_cores; 1474 str = ph->env.sibling_cores; 1475 1476 for (i = 0; i < nr; i++) { 1477 fprintf(fp, "# sibling cores : %s\n", str); 1478 str += strlen(str) + 1; 1479 } 1480 1481 nr = ph->env.nr_sibling_threads; 1482 str = ph->env.sibling_threads; 1483 1484 for (i = 0; i < nr; i++) { 1485 fprintf(fp, "# sibling threads : %s\n", str); 1486 str += strlen(str) + 1; 1487 } 1488 1489 if (ph->env.cpu != NULL) { 1490 for (i = 0; i < cpu_nr; i++) 1491 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i, 1492 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id); 1493 } else 1494 fprintf(fp, "# Core ID and Socket ID information is not available\n"); 1495 } 1496 1497 static void free_event_desc(struct perf_evsel *events) 1498 { 1499 struct perf_evsel *evsel; 1500 1501 if (!events) 1502 return; 1503 1504 for (evsel = events; evsel->attr.size; evsel++) { 1505 zfree(&evsel->name); 1506 zfree(&evsel->id); 1507 } 1508 1509 free(events); 1510 } 1511 1512 static struct perf_evsel *read_event_desc(struct feat_fd *ff) 1513 { 1514 struct perf_evsel *evsel, *events = NULL; 1515 u64 *id; 1516 void *buf = NULL; 1517 u32 nre, sz, nr, i, j; 1518 size_t msz; 1519 1520 /* number of events */ 1521 if (do_read_u32(ff, &nre)) 1522 goto error; 1523 1524 if (do_read_u32(ff, &sz)) 1525 goto error; 1526 1527 /* buffer to hold on file attr struct */ 1528 buf = malloc(sz); 1529 if (!buf) 1530 goto error; 1531 1532 /* the last event terminates with evsel->attr.size == 0: */ 1533 events = calloc(nre + 1, sizeof(*events)); 1534 if (!events) 1535 goto error; 1536 1537 msz = sizeof(evsel->attr); 1538 if (sz < msz) 1539 msz = sz; 1540 1541 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1542 evsel->idx = i; 1543 1544 /* 1545 * must read entire on-file attr struct to 1546 * sync up with layout. 1547 */ 1548 if (__do_read(ff, buf, sz)) 1549 goto error; 1550 1551 if (ff->ph->needs_swap) 1552 perf_event__attr_swap(buf); 1553 1554 memcpy(&evsel->attr, buf, msz); 1555 1556 if (do_read_u32(ff, &nr)) 1557 goto error; 1558 1559 if (ff->ph->needs_swap) 1560 evsel->needs_swap = true; 1561 1562 evsel->name = do_read_string(ff); 1563 if (!evsel->name) 1564 goto error; 1565 1566 if (!nr) 1567 continue; 1568 1569 id = calloc(nr, sizeof(*id)); 1570 if (!id) 1571 goto error; 1572 evsel->ids = nr; 1573 evsel->id = id; 1574 1575 for (j = 0 ; j < nr; j++) { 1576 if (do_read_u64(ff, id)) 1577 goto error; 1578 id++; 1579 } 1580 } 1581 out: 1582 free(buf); 1583 return events; 1584 error: 1585 free_event_desc(events); 1586 events = NULL; 1587 goto out; 1588 } 1589 1590 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1591 void *priv __maybe_unused) 1592 { 1593 return fprintf(fp, ", %s = %s", name, val); 1594 } 1595 1596 static void print_event_desc(struct feat_fd *ff, FILE *fp) 1597 { 1598 struct perf_evsel *evsel, *events; 1599 u32 j; 1600 u64 *id; 1601 1602 if (ff->events) 1603 events = ff->events; 1604 else 1605 events = read_event_desc(ff); 1606 1607 if (!events) { 1608 fprintf(fp, "# event desc: not available or unable to read\n"); 1609 return; 1610 } 1611 1612 for (evsel = events; evsel->attr.size; evsel++) { 1613 fprintf(fp, "# event : name = %s, ", evsel->name); 1614 1615 if (evsel->ids) { 1616 fprintf(fp, ", id = {"); 1617 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1618 if (j) 1619 fputc(',', fp); 1620 fprintf(fp, " %"PRIu64, *id); 1621 } 1622 fprintf(fp, " }"); 1623 } 1624 1625 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL); 1626 1627 fputc('\n', fp); 1628 } 1629 1630 free_event_desc(events); 1631 ff->events = NULL; 1632 } 1633 1634 static void print_total_mem(struct feat_fd *ff, FILE *fp) 1635 { 1636 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); 1637 } 1638 1639 static void print_numa_topology(struct feat_fd *ff, FILE *fp) 1640 { 1641 int i; 1642 struct numa_node *n; 1643 1644 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { 1645 n = &ff->ph->env.numa_nodes[i]; 1646 1647 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1648 " free = %"PRIu64" kB\n", 1649 n->node, n->mem_total, n->mem_free); 1650 1651 fprintf(fp, "# node%u cpu list : ", n->node); 1652 cpu_map__fprintf(n->map, fp); 1653 } 1654 } 1655 1656 static void print_cpuid(struct feat_fd *ff, FILE *fp) 1657 { 1658 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); 1659 } 1660 1661 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) 1662 { 1663 fprintf(fp, "# contains samples with branch stack\n"); 1664 } 1665 1666 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) 1667 { 1668 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1669 } 1670 1671 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) 1672 { 1673 fprintf(fp, "# contains stat data\n"); 1674 } 1675 1676 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) 1677 { 1678 int i; 1679 1680 fprintf(fp, "# CPU cache info:\n"); 1681 for (i = 0; i < ff->ph->env.caches_cnt; i++) { 1682 fprintf(fp, "# "); 1683 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); 1684 } 1685 } 1686 1687 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) 1688 { 1689 const char *delimiter = "# pmu mappings: "; 1690 char *str, *tmp; 1691 u32 pmu_num; 1692 u32 type; 1693 1694 pmu_num = ff->ph->env.nr_pmu_mappings; 1695 if (!pmu_num) { 1696 fprintf(fp, "# pmu mappings: not available\n"); 1697 return; 1698 } 1699 1700 str = ff->ph->env.pmu_mappings; 1701 1702 while (pmu_num) { 1703 type = strtoul(str, &tmp, 0); 1704 if (*tmp != ':') 1705 goto error; 1706 1707 str = tmp + 1; 1708 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1709 1710 delimiter = ", "; 1711 str += strlen(str) + 1; 1712 pmu_num--; 1713 } 1714 1715 fprintf(fp, "\n"); 1716 1717 if (!pmu_num) 1718 return; 1719 error: 1720 fprintf(fp, "# pmu mappings: unable to read\n"); 1721 } 1722 1723 static void print_group_desc(struct feat_fd *ff, FILE *fp) 1724 { 1725 struct perf_session *session; 1726 struct perf_evsel *evsel; 1727 u32 nr = 0; 1728 1729 session = container_of(ff->ph, struct perf_session, header); 1730 1731 evlist__for_each_entry(session->evlist, evsel) { 1732 if (perf_evsel__is_group_leader(evsel) && 1733 evsel->nr_members > 1) { 1734 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1735 perf_evsel__name(evsel)); 1736 1737 nr = evsel->nr_members - 1; 1738 } else if (nr) { 1739 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1740 1741 if (--nr == 0) 1742 fprintf(fp, "}\n"); 1743 } 1744 } 1745 } 1746 1747 static void print_sample_time(struct feat_fd *ff, FILE *fp) 1748 { 1749 struct perf_session *session; 1750 char time_buf[32]; 1751 double d; 1752 1753 session = container_of(ff->ph, struct perf_session, header); 1754 1755 timestamp__scnprintf_usec(session->evlist->first_sample_time, 1756 time_buf, sizeof(time_buf)); 1757 fprintf(fp, "# time of first sample : %s\n", time_buf); 1758 1759 timestamp__scnprintf_usec(session->evlist->last_sample_time, 1760 time_buf, sizeof(time_buf)); 1761 fprintf(fp, "# time of last sample : %s\n", time_buf); 1762 1763 d = (double)(session->evlist->last_sample_time - 1764 session->evlist->first_sample_time) / NSEC_PER_MSEC; 1765 1766 fprintf(fp, "# sample duration : %10.3f ms\n", d); 1767 } 1768 1769 static void memory_node__fprintf(struct memory_node *n, 1770 unsigned long long bsize, FILE *fp) 1771 { 1772 char buf_map[100], buf_size[50]; 1773 unsigned long long size; 1774 1775 size = bsize * bitmap_weight(n->set, n->size); 1776 unit_number__scnprintf(buf_size, 50, size); 1777 1778 bitmap_scnprintf(n->set, n->size, buf_map, 100); 1779 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map); 1780 } 1781 1782 static void print_mem_topology(struct feat_fd *ff, FILE *fp) 1783 { 1784 struct memory_node *nodes; 1785 int i, nr; 1786 1787 nodes = ff->ph->env.memory_nodes; 1788 nr = ff->ph->env.nr_memory_nodes; 1789 1790 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n", 1791 nr, ff->ph->env.memory_bsize); 1792 1793 for (i = 0; i < nr; i++) { 1794 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp); 1795 } 1796 } 1797 1798 static int __event_process_build_id(struct build_id_event *bev, 1799 char *filename, 1800 struct perf_session *session) 1801 { 1802 int err = -1; 1803 struct machine *machine; 1804 u16 cpumode; 1805 struct dso *dso; 1806 enum dso_kernel_type dso_type; 1807 1808 machine = perf_session__findnew_machine(session, bev->pid); 1809 if (!machine) 1810 goto out; 1811 1812 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1813 1814 switch (cpumode) { 1815 case PERF_RECORD_MISC_KERNEL: 1816 dso_type = DSO_TYPE_KERNEL; 1817 break; 1818 case PERF_RECORD_MISC_GUEST_KERNEL: 1819 dso_type = DSO_TYPE_GUEST_KERNEL; 1820 break; 1821 case PERF_RECORD_MISC_USER: 1822 case PERF_RECORD_MISC_GUEST_USER: 1823 dso_type = DSO_TYPE_USER; 1824 break; 1825 default: 1826 goto out; 1827 } 1828 1829 dso = machine__findnew_dso(machine, filename); 1830 if (dso != NULL) { 1831 char sbuild_id[SBUILD_ID_SIZE]; 1832 1833 dso__set_build_id(dso, &bev->build_id); 1834 1835 if (dso_type != DSO_TYPE_USER) { 1836 struct kmod_path m = { .name = NULL, }; 1837 1838 if (!kmod_path__parse_name(&m, filename) && m.kmod) 1839 dso__set_module_info(dso, &m, machine); 1840 else 1841 dso->kernel = dso_type; 1842 1843 free(m.name); 1844 } 1845 1846 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1847 sbuild_id); 1848 pr_debug("build id event received for %s: %s\n", 1849 dso->long_name, sbuild_id); 1850 dso__put(dso); 1851 } 1852 1853 err = 0; 1854 out: 1855 return err; 1856 } 1857 1858 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1859 int input, u64 offset, u64 size) 1860 { 1861 struct perf_session *session = container_of(header, struct perf_session, header); 1862 struct { 1863 struct perf_event_header header; 1864 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1865 char filename[0]; 1866 } old_bev; 1867 struct build_id_event bev; 1868 char filename[PATH_MAX]; 1869 u64 limit = offset + size; 1870 1871 while (offset < limit) { 1872 ssize_t len; 1873 1874 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1875 return -1; 1876 1877 if (header->needs_swap) 1878 perf_event_header__bswap(&old_bev.header); 1879 1880 len = old_bev.header.size - sizeof(old_bev); 1881 if (readn(input, filename, len) != len) 1882 return -1; 1883 1884 bev.header = old_bev.header; 1885 1886 /* 1887 * As the pid is the missing value, we need to fill 1888 * it properly. The header.misc value give us nice hint. 1889 */ 1890 bev.pid = HOST_KERNEL_ID; 1891 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1892 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1893 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1894 1895 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1896 __event_process_build_id(&bev, filename, session); 1897 1898 offset += bev.header.size; 1899 } 1900 1901 return 0; 1902 } 1903 1904 static int perf_header__read_build_ids(struct perf_header *header, 1905 int input, u64 offset, u64 size) 1906 { 1907 struct perf_session *session = container_of(header, struct perf_session, header); 1908 struct build_id_event bev; 1909 char filename[PATH_MAX]; 1910 u64 limit = offset + size, orig_offset = offset; 1911 int err = -1; 1912 1913 while (offset < limit) { 1914 ssize_t len; 1915 1916 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 1917 goto out; 1918 1919 if (header->needs_swap) 1920 perf_event_header__bswap(&bev.header); 1921 1922 len = bev.header.size - sizeof(bev); 1923 if (readn(input, filename, len) != len) 1924 goto out; 1925 /* 1926 * The a1645ce1 changeset: 1927 * 1928 * "perf: 'perf kvm' tool for monitoring guest performance from host" 1929 * 1930 * Added a field to struct build_id_event that broke the file 1931 * format. 1932 * 1933 * Since the kernel build-id is the first entry, process the 1934 * table using the old format if the well known 1935 * '[kernel.kallsyms]' string for the kernel build-id has the 1936 * first 4 characters chopped off (where the pid_t sits). 1937 */ 1938 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 1939 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 1940 return -1; 1941 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 1942 } 1943 1944 __event_process_build_id(&bev, filename, session); 1945 1946 offset += bev.header.size; 1947 } 1948 err = 0; 1949 out: 1950 return err; 1951 } 1952 1953 /* Macro for features that simply need to read and store a string. */ 1954 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ 1955 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ 1956 {\ 1957 ff->ph->env.__feat_env = do_read_string(ff); \ 1958 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ 1959 } 1960 1961 FEAT_PROCESS_STR_FUN(hostname, hostname); 1962 FEAT_PROCESS_STR_FUN(osrelease, os_release); 1963 FEAT_PROCESS_STR_FUN(version, version); 1964 FEAT_PROCESS_STR_FUN(arch, arch); 1965 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc); 1966 FEAT_PROCESS_STR_FUN(cpuid, cpuid); 1967 1968 static int process_tracing_data(struct feat_fd *ff, void *data) 1969 { 1970 ssize_t ret = trace_report(ff->fd, data, false); 1971 1972 return ret < 0 ? -1 : 0; 1973 } 1974 1975 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) 1976 { 1977 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size)) 1978 pr_debug("Failed to read buildids, continuing...\n"); 1979 return 0; 1980 } 1981 1982 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) 1983 { 1984 int ret; 1985 u32 nr_cpus_avail, nr_cpus_online; 1986 1987 ret = do_read_u32(ff, &nr_cpus_avail); 1988 if (ret) 1989 return ret; 1990 1991 ret = do_read_u32(ff, &nr_cpus_online); 1992 if (ret) 1993 return ret; 1994 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; 1995 ff->ph->env.nr_cpus_online = (int)nr_cpus_online; 1996 return 0; 1997 } 1998 1999 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) 2000 { 2001 u64 total_mem; 2002 int ret; 2003 2004 ret = do_read_u64(ff, &total_mem); 2005 if (ret) 2006 return -1; 2007 ff->ph->env.total_mem = (unsigned long long)total_mem; 2008 return 0; 2009 } 2010 2011 static struct perf_evsel * 2012 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) 2013 { 2014 struct perf_evsel *evsel; 2015 2016 evlist__for_each_entry(evlist, evsel) { 2017 if (evsel->idx == idx) 2018 return evsel; 2019 } 2020 2021 return NULL; 2022 } 2023 2024 static void 2025 perf_evlist__set_event_name(struct perf_evlist *evlist, 2026 struct perf_evsel *event) 2027 { 2028 struct perf_evsel *evsel; 2029 2030 if (!event->name) 2031 return; 2032 2033 evsel = perf_evlist__find_by_index(evlist, event->idx); 2034 if (!evsel) 2035 return; 2036 2037 if (evsel->name) 2038 return; 2039 2040 evsel->name = strdup(event->name); 2041 } 2042 2043 static int 2044 process_event_desc(struct feat_fd *ff, void *data __maybe_unused) 2045 { 2046 struct perf_session *session; 2047 struct perf_evsel *evsel, *events = read_event_desc(ff); 2048 2049 if (!events) 2050 return 0; 2051 2052 session = container_of(ff->ph, struct perf_session, header); 2053 2054 if (session->data->is_pipe) { 2055 /* Save events for reading later by print_event_desc, 2056 * since they can't be read again in pipe mode. */ 2057 ff->events = events; 2058 } 2059 2060 for (evsel = events; evsel->attr.size; evsel++) 2061 perf_evlist__set_event_name(session->evlist, evsel); 2062 2063 if (!session->data->is_pipe) 2064 free_event_desc(events); 2065 2066 return 0; 2067 } 2068 2069 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) 2070 { 2071 char *str, *cmdline = NULL, **argv = NULL; 2072 u32 nr, i, len = 0; 2073 2074 if (do_read_u32(ff, &nr)) 2075 return -1; 2076 2077 ff->ph->env.nr_cmdline = nr; 2078 2079 cmdline = zalloc(ff->size + nr + 1); 2080 if (!cmdline) 2081 return -1; 2082 2083 argv = zalloc(sizeof(char *) * (nr + 1)); 2084 if (!argv) 2085 goto error; 2086 2087 for (i = 0; i < nr; i++) { 2088 str = do_read_string(ff); 2089 if (!str) 2090 goto error; 2091 2092 argv[i] = cmdline + len; 2093 memcpy(argv[i], str, strlen(str) + 1); 2094 len += strlen(str) + 1; 2095 free(str); 2096 } 2097 ff->ph->env.cmdline = cmdline; 2098 ff->ph->env.cmdline_argv = (const char **) argv; 2099 return 0; 2100 2101 error: 2102 free(argv); 2103 free(cmdline); 2104 return -1; 2105 } 2106 2107 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) 2108 { 2109 u32 nr, i; 2110 char *str; 2111 struct strbuf sb; 2112 int cpu_nr = ff->ph->env.nr_cpus_avail; 2113 u64 size = 0; 2114 struct perf_header *ph = ff->ph; 2115 2116 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); 2117 if (!ph->env.cpu) 2118 return -1; 2119 2120 if (do_read_u32(ff, &nr)) 2121 goto free_cpu; 2122 2123 ph->env.nr_sibling_cores = nr; 2124 size += sizeof(u32); 2125 if (strbuf_init(&sb, 128) < 0) 2126 goto free_cpu; 2127 2128 for (i = 0; i < nr; i++) { 2129 str = do_read_string(ff); 2130 if (!str) 2131 goto error; 2132 2133 /* include a NULL character at the end */ 2134 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2135 goto error; 2136 size += string_size(str); 2137 free(str); 2138 } 2139 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 2140 2141 if (do_read_u32(ff, &nr)) 2142 return -1; 2143 2144 ph->env.nr_sibling_threads = nr; 2145 size += sizeof(u32); 2146 2147 for (i = 0; i < nr; i++) { 2148 str = do_read_string(ff); 2149 if (!str) 2150 goto error; 2151 2152 /* include a NULL character at the end */ 2153 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2154 goto error; 2155 size += string_size(str); 2156 free(str); 2157 } 2158 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 2159 2160 /* 2161 * The header may be from old perf, 2162 * which doesn't include core id and socket id information. 2163 */ 2164 if (ff->size <= size) { 2165 zfree(&ph->env.cpu); 2166 return 0; 2167 } 2168 2169 for (i = 0; i < (u32)cpu_nr; i++) { 2170 if (do_read_u32(ff, &nr)) 2171 goto free_cpu; 2172 2173 ph->env.cpu[i].core_id = nr; 2174 2175 if (do_read_u32(ff, &nr)) 2176 goto free_cpu; 2177 2178 if (nr != (u32)-1 && nr > (u32)cpu_nr) { 2179 pr_debug("socket_id number is too big." 2180 "You may need to upgrade the perf tool.\n"); 2181 goto free_cpu; 2182 } 2183 2184 ph->env.cpu[i].socket_id = nr; 2185 } 2186 2187 return 0; 2188 2189 error: 2190 strbuf_release(&sb); 2191 free_cpu: 2192 zfree(&ph->env.cpu); 2193 return -1; 2194 } 2195 2196 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) 2197 { 2198 struct numa_node *nodes, *n; 2199 u32 nr, i; 2200 char *str; 2201 2202 /* nr nodes */ 2203 if (do_read_u32(ff, &nr)) 2204 return -1; 2205 2206 nodes = zalloc(sizeof(*nodes) * nr); 2207 if (!nodes) 2208 return -ENOMEM; 2209 2210 for (i = 0; i < nr; i++) { 2211 n = &nodes[i]; 2212 2213 /* node number */ 2214 if (do_read_u32(ff, &n->node)) 2215 goto error; 2216 2217 if (do_read_u64(ff, &n->mem_total)) 2218 goto error; 2219 2220 if (do_read_u64(ff, &n->mem_free)) 2221 goto error; 2222 2223 str = do_read_string(ff); 2224 if (!str) 2225 goto error; 2226 2227 n->map = cpu_map__new(str); 2228 if (!n->map) 2229 goto error; 2230 2231 free(str); 2232 } 2233 ff->ph->env.nr_numa_nodes = nr; 2234 ff->ph->env.numa_nodes = nodes; 2235 return 0; 2236 2237 error: 2238 free(nodes); 2239 return -1; 2240 } 2241 2242 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) 2243 { 2244 char *name; 2245 u32 pmu_num; 2246 u32 type; 2247 struct strbuf sb; 2248 2249 if (do_read_u32(ff, &pmu_num)) 2250 return -1; 2251 2252 if (!pmu_num) { 2253 pr_debug("pmu mappings not available\n"); 2254 return 0; 2255 } 2256 2257 ff->ph->env.nr_pmu_mappings = pmu_num; 2258 if (strbuf_init(&sb, 128) < 0) 2259 return -1; 2260 2261 while (pmu_num) { 2262 if (do_read_u32(ff, &type)) 2263 goto error; 2264 2265 name = do_read_string(ff); 2266 if (!name) 2267 goto error; 2268 2269 if (strbuf_addf(&sb, "%u:%s", type, name) < 0) 2270 goto error; 2271 /* include a NULL character at the end */ 2272 if (strbuf_add(&sb, "", 1) < 0) 2273 goto error; 2274 2275 if (!strcmp(name, "msr")) 2276 ff->ph->env.msr_pmu_type = type; 2277 2278 free(name); 2279 pmu_num--; 2280 } 2281 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 2282 return 0; 2283 2284 error: 2285 strbuf_release(&sb); 2286 return -1; 2287 } 2288 2289 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) 2290 { 2291 size_t ret = -1; 2292 u32 i, nr, nr_groups; 2293 struct perf_session *session; 2294 struct perf_evsel *evsel, *leader = NULL; 2295 struct group_desc { 2296 char *name; 2297 u32 leader_idx; 2298 u32 nr_members; 2299 } *desc; 2300 2301 if (do_read_u32(ff, &nr_groups)) 2302 return -1; 2303 2304 ff->ph->env.nr_groups = nr_groups; 2305 if (!nr_groups) { 2306 pr_debug("group desc not available\n"); 2307 return 0; 2308 } 2309 2310 desc = calloc(nr_groups, sizeof(*desc)); 2311 if (!desc) 2312 return -1; 2313 2314 for (i = 0; i < nr_groups; i++) { 2315 desc[i].name = do_read_string(ff); 2316 if (!desc[i].name) 2317 goto out_free; 2318 2319 if (do_read_u32(ff, &desc[i].leader_idx)) 2320 goto out_free; 2321 2322 if (do_read_u32(ff, &desc[i].nr_members)) 2323 goto out_free; 2324 } 2325 2326 /* 2327 * Rebuild group relationship based on the group_desc 2328 */ 2329 session = container_of(ff->ph, struct perf_session, header); 2330 session->evlist->nr_groups = nr_groups; 2331 2332 i = nr = 0; 2333 evlist__for_each_entry(session->evlist, evsel) { 2334 if (evsel->idx == (int) desc[i].leader_idx) { 2335 evsel->leader = evsel; 2336 /* {anon_group} is a dummy name */ 2337 if (strcmp(desc[i].name, "{anon_group}")) { 2338 evsel->group_name = desc[i].name; 2339 desc[i].name = NULL; 2340 } 2341 evsel->nr_members = desc[i].nr_members; 2342 2343 if (i >= nr_groups || nr > 0) { 2344 pr_debug("invalid group desc\n"); 2345 goto out_free; 2346 } 2347 2348 leader = evsel; 2349 nr = evsel->nr_members - 1; 2350 i++; 2351 } else if (nr) { 2352 /* This is a group member */ 2353 evsel->leader = leader; 2354 2355 nr--; 2356 } 2357 } 2358 2359 if (i != nr_groups || nr != 0) { 2360 pr_debug("invalid group desc\n"); 2361 goto out_free; 2362 } 2363 2364 ret = 0; 2365 out_free: 2366 for (i = 0; i < nr_groups; i++) 2367 zfree(&desc[i].name); 2368 free(desc); 2369 2370 return ret; 2371 } 2372 2373 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) 2374 { 2375 struct perf_session *session; 2376 int err; 2377 2378 session = container_of(ff->ph, struct perf_session, header); 2379 2380 err = auxtrace_index__process(ff->fd, ff->size, session, 2381 ff->ph->needs_swap); 2382 if (err < 0) 2383 pr_err("Failed to process auxtrace index\n"); 2384 return err; 2385 } 2386 2387 static int process_cache(struct feat_fd *ff, void *data __maybe_unused) 2388 { 2389 struct cpu_cache_level *caches; 2390 u32 cnt, i, version; 2391 2392 if (do_read_u32(ff, &version)) 2393 return -1; 2394 2395 if (version != 1) 2396 return -1; 2397 2398 if (do_read_u32(ff, &cnt)) 2399 return -1; 2400 2401 caches = zalloc(sizeof(*caches) * cnt); 2402 if (!caches) 2403 return -1; 2404 2405 for (i = 0; i < cnt; i++) { 2406 struct cpu_cache_level c; 2407 2408 #define _R(v) \ 2409 if (do_read_u32(ff, &c.v))\ 2410 goto out_free_caches; \ 2411 2412 _R(level) 2413 _R(line_size) 2414 _R(sets) 2415 _R(ways) 2416 #undef _R 2417 2418 #define _R(v) \ 2419 c.v = do_read_string(ff); \ 2420 if (!c.v) \ 2421 goto out_free_caches; 2422 2423 _R(type) 2424 _R(size) 2425 _R(map) 2426 #undef _R 2427 2428 caches[i] = c; 2429 } 2430 2431 ff->ph->env.caches = caches; 2432 ff->ph->env.caches_cnt = cnt; 2433 return 0; 2434 out_free_caches: 2435 free(caches); 2436 return -1; 2437 } 2438 2439 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused) 2440 { 2441 struct perf_session *session; 2442 u64 first_sample_time, last_sample_time; 2443 int ret; 2444 2445 session = container_of(ff->ph, struct perf_session, header); 2446 2447 ret = do_read_u64(ff, &first_sample_time); 2448 if (ret) 2449 return -1; 2450 2451 ret = do_read_u64(ff, &last_sample_time); 2452 if (ret) 2453 return -1; 2454 2455 session->evlist->first_sample_time = first_sample_time; 2456 session->evlist->last_sample_time = last_sample_time; 2457 return 0; 2458 } 2459 2460 static int process_mem_topology(struct feat_fd *ff, 2461 void *data __maybe_unused) 2462 { 2463 struct memory_node *nodes; 2464 u64 version, i, nr, bsize; 2465 int ret = -1; 2466 2467 if (do_read_u64(ff, &version)) 2468 return -1; 2469 2470 if (version != 1) 2471 return -1; 2472 2473 if (do_read_u64(ff, &bsize)) 2474 return -1; 2475 2476 if (do_read_u64(ff, &nr)) 2477 return -1; 2478 2479 nodes = zalloc(sizeof(*nodes) * nr); 2480 if (!nodes) 2481 return -1; 2482 2483 for (i = 0; i < nr; i++) { 2484 struct memory_node n; 2485 2486 #define _R(v) \ 2487 if (do_read_u64(ff, &n.v)) \ 2488 goto out; \ 2489 2490 _R(node) 2491 _R(size) 2492 2493 #undef _R 2494 2495 if (do_read_bitmap(ff, &n.set, &n.size)) 2496 goto out; 2497 2498 nodes[i] = n; 2499 } 2500 2501 ff->ph->env.memory_bsize = bsize; 2502 ff->ph->env.memory_nodes = nodes; 2503 ff->ph->env.nr_memory_nodes = nr; 2504 ret = 0; 2505 2506 out: 2507 if (ret) 2508 free(nodes); 2509 return ret; 2510 } 2511 2512 struct feature_ops { 2513 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist); 2514 void (*print)(struct feat_fd *ff, FILE *fp); 2515 int (*process)(struct feat_fd *ff, void *data); 2516 const char *name; 2517 bool full_only; 2518 bool synthesize; 2519 }; 2520 2521 #define FEAT_OPR(n, func, __full_only) \ 2522 [HEADER_##n] = { \ 2523 .name = __stringify(n), \ 2524 .write = write_##func, \ 2525 .print = print_##func, \ 2526 .full_only = __full_only, \ 2527 .process = process_##func, \ 2528 .synthesize = true \ 2529 } 2530 2531 #define FEAT_OPN(n, func, __full_only) \ 2532 [HEADER_##n] = { \ 2533 .name = __stringify(n), \ 2534 .write = write_##func, \ 2535 .print = print_##func, \ 2536 .full_only = __full_only, \ 2537 .process = process_##func \ 2538 } 2539 2540 /* feature_ops not implemented: */ 2541 #define print_tracing_data NULL 2542 #define print_build_id NULL 2543 2544 #define process_branch_stack NULL 2545 #define process_stat NULL 2546 2547 2548 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2549 FEAT_OPN(TRACING_DATA, tracing_data, false), 2550 FEAT_OPN(BUILD_ID, build_id, false), 2551 FEAT_OPR(HOSTNAME, hostname, false), 2552 FEAT_OPR(OSRELEASE, osrelease, false), 2553 FEAT_OPR(VERSION, version, false), 2554 FEAT_OPR(ARCH, arch, false), 2555 FEAT_OPR(NRCPUS, nrcpus, false), 2556 FEAT_OPR(CPUDESC, cpudesc, false), 2557 FEAT_OPR(CPUID, cpuid, false), 2558 FEAT_OPR(TOTAL_MEM, total_mem, false), 2559 FEAT_OPR(EVENT_DESC, event_desc, false), 2560 FEAT_OPR(CMDLINE, cmdline, false), 2561 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true), 2562 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), 2563 FEAT_OPN(BRANCH_STACK, branch_stack, false), 2564 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), 2565 FEAT_OPN(GROUP_DESC, group_desc, false), 2566 FEAT_OPN(AUXTRACE, auxtrace, false), 2567 FEAT_OPN(STAT, stat, false), 2568 FEAT_OPN(CACHE, cache, true), 2569 FEAT_OPR(SAMPLE_TIME, sample_time, false), 2570 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), 2571 }; 2572 2573 struct header_print_data { 2574 FILE *fp; 2575 bool full; /* extended list of headers */ 2576 }; 2577 2578 static int perf_file_section__fprintf_info(struct perf_file_section *section, 2579 struct perf_header *ph, 2580 int feat, int fd, void *data) 2581 { 2582 struct header_print_data *hd = data; 2583 struct feat_fd ff; 2584 2585 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2586 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2587 "%d, continuing...\n", section->offset, feat); 2588 return 0; 2589 } 2590 if (feat >= HEADER_LAST_FEATURE) { 2591 pr_warning("unknown feature %d\n", feat); 2592 return 0; 2593 } 2594 if (!feat_ops[feat].print) 2595 return 0; 2596 2597 ff = (struct feat_fd) { 2598 .fd = fd, 2599 .ph = ph, 2600 }; 2601 2602 if (!feat_ops[feat].full_only || hd->full) 2603 feat_ops[feat].print(&ff, hd->fp); 2604 else 2605 fprintf(hd->fp, "# %s info available, use -I to display\n", 2606 feat_ops[feat].name); 2607 2608 return 0; 2609 } 2610 2611 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 2612 { 2613 struct header_print_data hd; 2614 struct perf_header *header = &session->header; 2615 int fd = perf_data__fd(session->data); 2616 struct stat st; 2617 int ret, bit; 2618 2619 hd.fp = fp; 2620 hd.full = full; 2621 2622 ret = fstat(fd, &st); 2623 if (ret == -1) 2624 return -1; 2625 2626 fprintf(fp, "# captured on : %s", ctime(&st.st_ctime)); 2627 2628 fprintf(fp, "# header version : %u\n", header->version); 2629 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset); 2630 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size); 2631 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset); 2632 2633 perf_header__process_sections(header, fd, &hd, 2634 perf_file_section__fprintf_info); 2635 2636 if (session->data->is_pipe) 2637 return 0; 2638 2639 fprintf(fp, "# missing features: "); 2640 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { 2641 if (bit) 2642 fprintf(fp, "%s ", feat_ops[bit].name); 2643 } 2644 2645 fprintf(fp, "\n"); 2646 return 0; 2647 } 2648 2649 static int do_write_feat(struct feat_fd *ff, int type, 2650 struct perf_file_section **p, 2651 struct perf_evlist *evlist) 2652 { 2653 int err; 2654 int ret = 0; 2655 2656 if (perf_header__has_feat(ff->ph, type)) { 2657 if (!feat_ops[type].write) 2658 return -1; 2659 2660 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 2661 return -1; 2662 2663 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); 2664 2665 err = feat_ops[type].write(ff, evlist); 2666 if (err < 0) { 2667 pr_debug("failed to write feature %s\n", feat_ops[type].name); 2668 2669 /* undo anything written */ 2670 lseek(ff->fd, (*p)->offset, SEEK_SET); 2671 2672 return -1; 2673 } 2674 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; 2675 (*p)++; 2676 } 2677 return ret; 2678 } 2679 2680 static int perf_header__adds_write(struct perf_header *header, 2681 struct perf_evlist *evlist, int fd) 2682 { 2683 int nr_sections; 2684 struct feat_fd ff; 2685 struct perf_file_section *feat_sec, *p; 2686 int sec_size; 2687 u64 sec_start; 2688 int feat; 2689 int err; 2690 2691 ff = (struct feat_fd){ 2692 .fd = fd, 2693 .ph = header, 2694 }; 2695 2696 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2697 if (!nr_sections) 2698 return 0; 2699 2700 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 2701 if (feat_sec == NULL) 2702 return -ENOMEM; 2703 2704 sec_size = sizeof(*feat_sec) * nr_sections; 2705 2706 sec_start = header->feat_offset; 2707 lseek(fd, sec_start + sec_size, SEEK_SET); 2708 2709 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 2710 if (do_write_feat(&ff, feat, &p, evlist)) 2711 perf_header__clear_feat(header, feat); 2712 } 2713 2714 lseek(fd, sec_start, SEEK_SET); 2715 /* 2716 * may write more than needed due to dropped feature, but 2717 * this is okay, reader will skip the mising entries 2718 */ 2719 err = do_write(&ff, feat_sec, sec_size); 2720 if (err < 0) 2721 pr_debug("failed to write feature section\n"); 2722 free(feat_sec); 2723 return err; 2724 } 2725 2726 int perf_header__write_pipe(int fd) 2727 { 2728 struct perf_pipe_file_header f_header; 2729 struct feat_fd ff; 2730 int err; 2731 2732 ff = (struct feat_fd){ .fd = fd }; 2733 2734 f_header = (struct perf_pipe_file_header){ 2735 .magic = PERF_MAGIC, 2736 .size = sizeof(f_header), 2737 }; 2738 2739 err = do_write(&ff, &f_header, sizeof(f_header)); 2740 if (err < 0) { 2741 pr_debug("failed to write perf pipe header\n"); 2742 return err; 2743 } 2744 2745 return 0; 2746 } 2747 2748 int perf_session__write_header(struct perf_session *session, 2749 struct perf_evlist *evlist, 2750 int fd, bool at_exit) 2751 { 2752 struct perf_file_header f_header; 2753 struct perf_file_attr f_attr; 2754 struct perf_header *header = &session->header; 2755 struct perf_evsel *evsel; 2756 struct feat_fd ff; 2757 u64 attr_offset; 2758 int err; 2759 2760 ff = (struct feat_fd){ .fd = fd}; 2761 lseek(fd, sizeof(f_header), SEEK_SET); 2762 2763 evlist__for_each_entry(session->evlist, evsel) { 2764 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 2765 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); 2766 if (err < 0) { 2767 pr_debug("failed to write perf header\n"); 2768 return err; 2769 } 2770 } 2771 2772 attr_offset = lseek(ff.fd, 0, SEEK_CUR); 2773 2774 evlist__for_each_entry(evlist, evsel) { 2775 f_attr = (struct perf_file_attr){ 2776 .attr = evsel->attr, 2777 .ids = { 2778 .offset = evsel->id_offset, 2779 .size = evsel->ids * sizeof(u64), 2780 } 2781 }; 2782 err = do_write(&ff, &f_attr, sizeof(f_attr)); 2783 if (err < 0) { 2784 pr_debug("failed to write perf header attribute\n"); 2785 return err; 2786 } 2787 } 2788 2789 if (!header->data_offset) 2790 header->data_offset = lseek(fd, 0, SEEK_CUR); 2791 header->feat_offset = header->data_offset + header->data_size; 2792 2793 if (at_exit) { 2794 err = perf_header__adds_write(header, evlist, fd); 2795 if (err < 0) 2796 return err; 2797 } 2798 2799 f_header = (struct perf_file_header){ 2800 .magic = PERF_MAGIC, 2801 .size = sizeof(f_header), 2802 .attr_size = sizeof(f_attr), 2803 .attrs = { 2804 .offset = attr_offset, 2805 .size = evlist->nr_entries * sizeof(f_attr), 2806 }, 2807 .data = { 2808 .offset = header->data_offset, 2809 .size = header->data_size, 2810 }, 2811 /* event_types is ignored, store zeros */ 2812 }; 2813 2814 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 2815 2816 lseek(fd, 0, SEEK_SET); 2817 err = do_write(&ff, &f_header, sizeof(f_header)); 2818 if (err < 0) { 2819 pr_debug("failed to write perf header\n"); 2820 return err; 2821 } 2822 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 2823 2824 return 0; 2825 } 2826 2827 static int perf_header__getbuffer64(struct perf_header *header, 2828 int fd, void *buf, size_t size) 2829 { 2830 if (readn(fd, buf, size) <= 0) 2831 return -1; 2832 2833 if (header->needs_swap) 2834 mem_bswap_64(buf, size); 2835 2836 return 0; 2837 } 2838 2839 int perf_header__process_sections(struct perf_header *header, int fd, 2840 void *data, 2841 int (*process)(struct perf_file_section *section, 2842 struct perf_header *ph, 2843 int feat, int fd, void *data)) 2844 { 2845 struct perf_file_section *feat_sec, *sec; 2846 int nr_sections; 2847 int sec_size; 2848 int feat; 2849 int err; 2850 2851 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2852 if (!nr_sections) 2853 return 0; 2854 2855 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 2856 if (!feat_sec) 2857 return -1; 2858 2859 sec_size = sizeof(*feat_sec) * nr_sections; 2860 2861 lseek(fd, header->feat_offset, SEEK_SET); 2862 2863 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 2864 if (err < 0) 2865 goto out_free; 2866 2867 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 2868 err = process(sec++, header, feat, fd, data); 2869 if (err < 0) 2870 goto out_free; 2871 } 2872 err = 0; 2873 out_free: 2874 free(feat_sec); 2875 return err; 2876 } 2877 2878 static const int attr_file_abi_sizes[] = { 2879 [0] = PERF_ATTR_SIZE_VER0, 2880 [1] = PERF_ATTR_SIZE_VER1, 2881 [2] = PERF_ATTR_SIZE_VER2, 2882 [3] = PERF_ATTR_SIZE_VER3, 2883 [4] = PERF_ATTR_SIZE_VER4, 2884 0, 2885 }; 2886 2887 /* 2888 * In the legacy file format, the magic number is not used to encode endianness. 2889 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 2890 * on ABI revisions, we need to try all combinations for all endianness to 2891 * detect the endianness. 2892 */ 2893 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 2894 { 2895 uint64_t ref_size, attr_size; 2896 int i; 2897 2898 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 2899 ref_size = attr_file_abi_sizes[i] 2900 + sizeof(struct perf_file_section); 2901 if (hdr_sz != ref_size) { 2902 attr_size = bswap_64(hdr_sz); 2903 if (attr_size != ref_size) 2904 continue; 2905 2906 ph->needs_swap = true; 2907 } 2908 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 2909 i, 2910 ph->needs_swap); 2911 return 0; 2912 } 2913 /* could not determine endianness */ 2914 return -1; 2915 } 2916 2917 #define PERF_PIPE_HDR_VER0 16 2918 2919 static const size_t attr_pipe_abi_sizes[] = { 2920 [0] = PERF_PIPE_HDR_VER0, 2921 0, 2922 }; 2923 2924 /* 2925 * In the legacy pipe format, there is an implicit assumption that endiannesss 2926 * between host recording the samples, and host parsing the samples is the 2927 * same. This is not always the case given that the pipe output may always be 2928 * redirected into a file and analyzed on a different machine with possibly a 2929 * different endianness and perf_event ABI revsions in the perf tool itself. 2930 */ 2931 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 2932 { 2933 u64 attr_size; 2934 int i; 2935 2936 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 2937 if (hdr_sz != attr_pipe_abi_sizes[i]) { 2938 attr_size = bswap_64(hdr_sz); 2939 if (attr_size != hdr_sz) 2940 continue; 2941 2942 ph->needs_swap = true; 2943 } 2944 pr_debug("Pipe ABI%d perf.data file detected\n", i); 2945 return 0; 2946 } 2947 return -1; 2948 } 2949 2950 bool is_perf_magic(u64 magic) 2951 { 2952 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 2953 || magic == __perf_magic2 2954 || magic == __perf_magic2_sw) 2955 return true; 2956 2957 return false; 2958 } 2959 2960 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 2961 bool is_pipe, struct perf_header *ph) 2962 { 2963 int ret; 2964 2965 /* check for legacy format */ 2966 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 2967 if (ret == 0) { 2968 ph->version = PERF_HEADER_VERSION_1; 2969 pr_debug("legacy perf.data format\n"); 2970 if (is_pipe) 2971 return try_all_pipe_abis(hdr_sz, ph); 2972 2973 return try_all_file_abis(hdr_sz, ph); 2974 } 2975 /* 2976 * the new magic number serves two purposes: 2977 * - unique number to identify actual perf.data files 2978 * - encode endianness of file 2979 */ 2980 ph->version = PERF_HEADER_VERSION_2; 2981 2982 /* check magic number with one endianness */ 2983 if (magic == __perf_magic2) 2984 return 0; 2985 2986 /* check magic number with opposite endianness */ 2987 if (magic != __perf_magic2_sw) 2988 return -1; 2989 2990 ph->needs_swap = true; 2991 2992 return 0; 2993 } 2994 2995 int perf_file_header__read(struct perf_file_header *header, 2996 struct perf_header *ph, int fd) 2997 { 2998 ssize_t ret; 2999 3000 lseek(fd, 0, SEEK_SET); 3001 3002 ret = readn(fd, header, sizeof(*header)); 3003 if (ret <= 0) 3004 return -1; 3005 3006 if (check_magic_endian(header->magic, 3007 header->attr_size, false, ph) < 0) { 3008 pr_debug("magic/endian check failed\n"); 3009 return -1; 3010 } 3011 3012 if (ph->needs_swap) { 3013 mem_bswap_64(header, offsetof(struct perf_file_header, 3014 adds_features)); 3015 } 3016 3017 if (header->size != sizeof(*header)) { 3018 /* Support the previous format */ 3019 if (header->size == offsetof(typeof(*header), adds_features)) 3020 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3021 else 3022 return -1; 3023 } else if (ph->needs_swap) { 3024 /* 3025 * feature bitmap is declared as an array of unsigned longs -- 3026 * not good since its size can differ between the host that 3027 * generated the data file and the host analyzing the file. 3028 * 3029 * We need to handle endianness, but we don't know the size of 3030 * the unsigned long where the file was generated. Take a best 3031 * guess at determining it: try 64-bit swap first (ie., file 3032 * created on a 64-bit host), and check if the hostname feature 3033 * bit is set (this feature bit is forced on as of fbe96f2). 3034 * If the bit is not, undo the 64-bit swap and try a 32-bit 3035 * swap. If the hostname bit is still not set (e.g., older data 3036 * file), punt and fallback to the original behavior -- 3037 * clearing all feature bits and setting buildid. 3038 */ 3039 mem_bswap_64(&header->adds_features, 3040 BITS_TO_U64(HEADER_FEAT_BITS)); 3041 3042 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3043 /* unswap as u64 */ 3044 mem_bswap_64(&header->adds_features, 3045 BITS_TO_U64(HEADER_FEAT_BITS)); 3046 3047 /* unswap as u32 */ 3048 mem_bswap_32(&header->adds_features, 3049 BITS_TO_U32(HEADER_FEAT_BITS)); 3050 } 3051 3052 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3053 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3054 set_bit(HEADER_BUILD_ID, header->adds_features); 3055 } 3056 } 3057 3058 memcpy(&ph->adds_features, &header->adds_features, 3059 sizeof(ph->adds_features)); 3060 3061 ph->data_offset = header->data.offset; 3062 ph->data_size = header->data.size; 3063 ph->feat_offset = header->data.offset + header->data.size; 3064 return 0; 3065 } 3066 3067 static int perf_file_section__process(struct perf_file_section *section, 3068 struct perf_header *ph, 3069 int feat, int fd, void *data) 3070 { 3071 struct feat_fd fdd = { 3072 .fd = fd, 3073 .ph = ph, 3074 .size = section->size, 3075 .offset = section->offset, 3076 }; 3077 3078 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 3079 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 3080 "%d, continuing...\n", section->offset, feat); 3081 return 0; 3082 } 3083 3084 if (feat >= HEADER_LAST_FEATURE) { 3085 pr_debug("unknown feature %d, continuing...\n", feat); 3086 return 0; 3087 } 3088 3089 if (!feat_ops[feat].process) 3090 return 0; 3091 3092 return feat_ops[feat].process(&fdd, data); 3093 } 3094 3095 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 3096 struct perf_header *ph, int fd, 3097 bool repipe) 3098 { 3099 struct feat_fd ff = { 3100 .fd = STDOUT_FILENO, 3101 .ph = ph, 3102 }; 3103 ssize_t ret; 3104 3105 ret = readn(fd, header, sizeof(*header)); 3106 if (ret <= 0) 3107 return -1; 3108 3109 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 3110 pr_debug("endian/magic failed\n"); 3111 return -1; 3112 } 3113 3114 if (ph->needs_swap) 3115 header->size = bswap_64(header->size); 3116 3117 if (repipe && do_write(&ff, header, sizeof(*header)) < 0) 3118 return -1; 3119 3120 return 0; 3121 } 3122 3123 static int perf_header__read_pipe(struct perf_session *session) 3124 { 3125 struct perf_header *header = &session->header; 3126 struct perf_pipe_file_header f_header; 3127 3128 if (perf_file_header__read_pipe(&f_header, header, 3129 perf_data__fd(session->data), 3130 session->repipe) < 0) { 3131 pr_debug("incompatible file format\n"); 3132 return -EINVAL; 3133 } 3134 3135 return 0; 3136 } 3137 3138 static int read_attr(int fd, struct perf_header *ph, 3139 struct perf_file_attr *f_attr) 3140 { 3141 struct perf_event_attr *attr = &f_attr->attr; 3142 size_t sz, left; 3143 size_t our_sz = sizeof(f_attr->attr); 3144 ssize_t ret; 3145 3146 memset(f_attr, 0, sizeof(*f_attr)); 3147 3148 /* read minimal guaranteed structure */ 3149 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 3150 if (ret <= 0) { 3151 pr_debug("cannot read %d bytes of header attr\n", 3152 PERF_ATTR_SIZE_VER0); 3153 return -1; 3154 } 3155 3156 /* on file perf_event_attr size */ 3157 sz = attr->size; 3158 3159 if (ph->needs_swap) 3160 sz = bswap_32(sz); 3161 3162 if (sz == 0) { 3163 /* assume ABI0 */ 3164 sz = PERF_ATTR_SIZE_VER0; 3165 } else if (sz > our_sz) { 3166 pr_debug("file uses a more recent and unsupported ABI" 3167 " (%zu bytes extra)\n", sz - our_sz); 3168 return -1; 3169 } 3170 /* what we have not yet read and that we know about */ 3171 left = sz - PERF_ATTR_SIZE_VER0; 3172 if (left) { 3173 void *ptr = attr; 3174 ptr += PERF_ATTR_SIZE_VER0; 3175 3176 ret = readn(fd, ptr, left); 3177 } 3178 /* read perf_file_section, ids are read in caller */ 3179 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 3180 3181 return ret <= 0 ? -1 : 0; 3182 } 3183 3184 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, 3185 struct pevent *pevent) 3186 { 3187 struct event_format *event; 3188 char bf[128]; 3189 3190 /* already prepared */ 3191 if (evsel->tp_format) 3192 return 0; 3193 3194 if (pevent == NULL) { 3195 pr_debug("broken or missing trace data\n"); 3196 return -1; 3197 } 3198 3199 event = pevent_find_event(pevent, evsel->attr.config); 3200 if (event == NULL) { 3201 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config); 3202 return -1; 3203 } 3204 3205 if (!evsel->name) { 3206 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 3207 evsel->name = strdup(bf); 3208 if (evsel->name == NULL) 3209 return -1; 3210 } 3211 3212 evsel->tp_format = event; 3213 return 0; 3214 } 3215 3216 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, 3217 struct pevent *pevent) 3218 { 3219 struct perf_evsel *pos; 3220 3221 evlist__for_each_entry(evlist, pos) { 3222 if (pos->attr.type == PERF_TYPE_TRACEPOINT && 3223 perf_evsel__prepare_tracepoint_event(pos, pevent)) 3224 return -1; 3225 } 3226 3227 return 0; 3228 } 3229 3230 int perf_session__read_header(struct perf_session *session) 3231 { 3232 struct perf_data *data = session->data; 3233 struct perf_header *header = &session->header; 3234 struct perf_file_header f_header; 3235 struct perf_file_attr f_attr; 3236 u64 f_id; 3237 int nr_attrs, nr_ids, i, j; 3238 int fd = perf_data__fd(data); 3239 3240 session->evlist = perf_evlist__new(); 3241 if (session->evlist == NULL) 3242 return -ENOMEM; 3243 3244 session->evlist->env = &header->env; 3245 session->machines.host.env = &header->env; 3246 if (perf_data__is_pipe(data)) 3247 return perf_header__read_pipe(session); 3248 3249 if (perf_file_header__read(&f_header, header, fd) < 0) 3250 return -EINVAL; 3251 3252 /* 3253 * Sanity check that perf.data was written cleanly; data size is 3254 * initialized to 0 and updated only if the on_exit function is run. 3255 * If data size is still 0 then the file contains only partial 3256 * information. Just warn user and process it as much as it can. 3257 */ 3258 if (f_header.data.size == 0) { 3259 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 3260 "Was the 'perf record' command properly terminated?\n", 3261 data->file.path); 3262 } 3263 3264 nr_attrs = f_header.attrs.size / f_header.attr_size; 3265 lseek(fd, f_header.attrs.offset, SEEK_SET); 3266 3267 for (i = 0; i < nr_attrs; i++) { 3268 struct perf_evsel *evsel; 3269 off_t tmp; 3270 3271 if (read_attr(fd, header, &f_attr) < 0) 3272 goto out_errno; 3273 3274 if (header->needs_swap) { 3275 f_attr.ids.size = bswap_64(f_attr.ids.size); 3276 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 3277 perf_event__attr_swap(&f_attr.attr); 3278 } 3279 3280 tmp = lseek(fd, 0, SEEK_CUR); 3281 evsel = perf_evsel__new(&f_attr.attr); 3282 3283 if (evsel == NULL) 3284 goto out_delete_evlist; 3285 3286 evsel->needs_swap = header->needs_swap; 3287 /* 3288 * Do it before so that if perf_evsel__alloc_id fails, this 3289 * entry gets purged too at perf_evlist__delete(). 3290 */ 3291 perf_evlist__add(session->evlist, evsel); 3292 3293 nr_ids = f_attr.ids.size / sizeof(u64); 3294 /* 3295 * We don't have the cpu and thread maps on the header, so 3296 * for allocating the perf_sample_id table we fake 1 cpu and 3297 * hattr->ids threads. 3298 */ 3299 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 3300 goto out_delete_evlist; 3301 3302 lseek(fd, f_attr.ids.offset, SEEK_SET); 3303 3304 for (j = 0; j < nr_ids; j++) { 3305 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 3306 goto out_errno; 3307 3308 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 3309 } 3310 3311 lseek(fd, tmp, SEEK_SET); 3312 } 3313 3314 symbol_conf.nr_events = nr_attrs; 3315 3316 perf_header__process_sections(header, fd, &session->tevent, 3317 perf_file_section__process); 3318 3319 if (perf_evlist__prepare_tracepoint_events(session->evlist, 3320 session->tevent.pevent)) 3321 goto out_delete_evlist; 3322 3323 return 0; 3324 out_errno: 3325 return -errno; 3326 3327 out_delete_evlist: 3328 perf_evlist__delete(session->evlist); 3329 session->evlist = NULL; 3330 return -ENOMEM; 3331 } 3332 3333 int perf_event__synthesize_attr(struct perf_tool *tool, 3334 struct perf_event_attr *attr, u32 ids, u64 *id, 3335 perf_event__handler_t process) 3336 { 3337 union perf_event *ev; 3338 size_t size; 3339 int err; 3340 3341 size = sizeof(struct perf_event_attr); 3342 size = PERF_ALIGN(size, sizeof(u64)); 3343 size += sizeof(struct perf_event_header); 3344 size += ids * sizeof(u64); 3345 3346 ev = malloc(size); 3347 3348 if (ev == NULL) 3349 return -ENOMEM; 3350 3351 ev->attr.attr = *attr; 3352 memcpy(ev->attr.id, id, ids * sizeof(u64)); 3353 3354 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 3355 ev->attr.header.size = (u16)size; 3356 3357 if (ev->attr.header.size == size) 3358 err = process(tool, ev, NULL, NULL); 3359 else 3360 err = -E2BIG; 3361 3362 free(ev); 3363 3364 return err; 3365 } 3366 3367 int perf_event__synthesize_features(struct perf_tool *tool, 3368 struct perf_session *session, 3369 struct perf_evlist *evlist, 3370 perf_event__handler_t process) 3371 { 3372 struct perf_header *header = &session->header; 3373 struct feat_fd ff; 3374 struct feature_event *fe; 3375 size_t sz, sz_hdr; 3376 int feat, ret; 3377 3378 sz_hdr = sizeof(fe->header); 3379 sz = sizeof(union perf_event); 3380 /* get a nice alignment */ 3381 sz = PERF_ALIGN(sz, page_size); 3382 3383 memset(&ff, 0, sizeof(ff)); 3384 3385 ff.buf = malloc(sz); 3386 if (!ff.buf) 3387 return -ENOMEM; 3388 3389 ff.size = sz - sz_hdr; 3390 3391 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3392 if (!feat_ops[feat].synthesize) { 3393 pr_debug("No record header feature for header :%d\n", feat); 3394 continue; 3395 } 3396 3397 ff.offset = sizeof(*fe); 3398 3399 ret = feat_ops[feat].write(&ff, evlist); 3400 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 3401 pr_debug("Error writing feature\n"); 3402 continue; 3403 } 3404 /* ff.buf may have changed due to realloc in do_write() */ 3405 fe = ff.buf; 3406 memset(fe, 0, sizeof(*fe)); 3407 3408 fe->feat_id = feat; 3409 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3410 fe->header.size = ff.offset; 3411 3412 ret = process(tool, ff.buf, NULL, NULL); 3413 if (ret) { 3414 free(ff.buf); 3415 return ret; 3416 } 3417 } 3418 free(ff.buf); 3419 return 0; 3420 } 3421 3422 int perf_event__process_feature(struct perf_tool *tool, 3423 union perf_event *event, 3424 struct perf_session *session __maybe_unused) 3425 { 3426 struct feat_fd ff = { .fd = 0 }; 3427 struct feature_event *fe = (struct feature_event *)event; 3428 int type = fe->header.type; 3429 u64 feat = fe->feat_id; 3430 3431 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { 3432 pr_warning("invalid record type %d in pipe-mode\n", type); 3433 return 0; 3434 } 3435 if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) { 3436 pr_warning("invalid record type %d in pipe-mode\n", type); 3437 return -1; 3438 } 3439 3440 if (!feat_ops[feat].process) 3441 return 0; 3442 3443 ff.buf = (void *)fe->data; 3444 ff.size = event->header.size - sizeof(event->header); 3445 ff.ph = &session->header; 3446 3447 if (feat_ops[feat].process(&ff, NULL)) 3448 return -1; 3449 3450 if (!feat_ops[feat].print || !tool->show_feat_hdr) 3451 return 0; 3452 3453 if (!feat_ops[feat].full_only || 3454 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { 3455 feat_ops[feat].print(&ff, stdout); 3456 } else { 3457 fprintf(stdout, "# %s info available, use -I to display\n", 3458 feat_ops[feat].name); 3459 } 3460 3461 return 0; 3462 } 3463 3464 static struct event_update_event * 3465 event_update_event__new(size_t size, u64 type, u64 id) 3466 { 3467 struct event_update_event *ev; 3468 3469 size += sizeof(*ev); 3470 size = PERF_ALIGN(size, sizeof(u64)); 3471 3472 ev = zalloc(size); 3473 if (ev) { 3474 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3475 ev->header.size = (u16)size; 3476 ev->type = type; 3477 ev->id = id; 3478 } 3479 return ev; 3480 } 3481 3482 int 3483 perf_event__synthesize_event_update_unit(struct perf_tool *tool, 3484 struct perf_evsel *evsel, 3485 perf_event__handler_t process) 3486 { 3487 struct event_update_event *ev; 3488 size_t size = strlen(evsel->unit); 3489 int err; 3490 3491 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); 3492 if (ev == NULL) 3493 return -ENOMEM; 3494 3495 strncpy(ev->data, evsel->unit, size); 3496 err = process(tool, (union perf_event *)ev, NULL, NULL); 3497 free(ev); 3498 return err; 3499 } 3500 3501 int 3502 perf_event__synthesize_event_update_scale(struct perf_tool *tool, 3503 struct perf_evsel *evsel, 3504 perf_event__handler_t process) 3505 { 3506 struct event_update_event *ev; 3507 struct event_update_event_scale *ev_data; 3508 int err; 3509 3510 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); 3511 if (ev == NULL) 3512 return -ENOMEM; 3513 3514 ev_data = (struct event_update_event_scale *) ev->data; 3515 ev_data->scale = evsel->scale; 3516 err = process(tool, (union perf_event*) ev, NULL, NULL); 3517 free(ev); 3518 return err; 3519 } 3520 3521 int 3522 perf_event__synthesize_event_update_name(struct perf_tool *tool, 3523 struct perf_evsel *evsel, 3524 perf_event__handler_t process) 3525 { 3526 struct event_update_event *ev; 3527 size_t len = strlen(evsel->name); 3528 int err; 3529 3530 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); 3531 if (ev == NULL) 3532 return -ENOMEM; 3533 3534 strncpy(ev->data, evsel->name, len); 3535 err = process(tool, (union perf_event*) ev, NULL, NULL); 3536 free(ev); 3537 return err; 3538 } 3539 3540 int 3541 perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 3542 struct perf_evsel *evsel, 3543 perf_event__handler_t process) 3544 { 3545 size_t size = sizeof(struct event_update_event); 3546 struct event_update_event *ev; 3547 int max, err; 3548 u16 type; 3549 3550 if (!evsel->own_cpus) 3551 return 0; 3552 3553 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max); 3554 if (!ev) 3555 return -ENOMEM; 3556 3557 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3558 ev->header.size = (u16)size; 3559 ev->type = PERF_EVENT_UPDATE__CPUS; 3560 ev->id = evsel->id[0]; 3561 3562 cpu_map_data__synthesize((struct cpu_map_data *) ev->data, 3563 evsel->own_cpus, 3564 type, max); 3565 3566 err = process(tool, (union perf_event*) ev, NULL, NULL); 3567 free(ev); 3568 return err; 3569 } 3570 3571 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3572 { 3573 struct event_update_event *ev = &event->event_update; 3574 struct event_update_event_scale *ev_scale; 3575 struct event_update_event_cpus *ev_cpus; 3576 struct cpu_map *map; 3577 size_t ret; 3578 3579 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id); 3580 3581 switch (ev->type) { 3582 case PERF_EVENT_UPDATE__SCALE: 3583 ev_scale = (struct event_update_event_scale *) ev->data; 3584 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); 3585 break; 3586 case PERF_EVENT_UPDATE__UNIT: 3587 ret += fprintf(fp, "... unit: %s\n", ev->data); 3588 break; 3589 case PERF_EVENT_UPDATE__NAME: 3590 ret += fprintf(fp, "... name: %s\n", ev->data); 3591 break; 3592 case PERF_EVENT_UPDATE__CPUS: 3593 ev_cpus = (struct event_update_event_cpus *) ev->data; 3594 ret += fprintf(fp, "... "); 3595 3596 map = cpu_map__new_data(&ev_cpus->cpus); 3597 if (map) 3598 ret += cpu_map__fprintf(map, fp); 3599 else 3600 ret += fprintf(fp, "failed to get cpus\n"); 3601 break; 3602 default: 3603 ret += fprintf(fp, "... unknown type\n"); 3604 break; 3605 } 3606 3607 return ret; 3608 } 3609 3610 int perf_event__synthesize_attrs(struct perf_tool *tool, 3611 struct perf_session *session, 3612 perf_event__handler_t process) 3613 { 3614 struct perf_evsel *evsel; 3615 int err = 0; 3616 3617 evlist__for_each_entry(session->evlist, evsel) { 3618 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, 3619 evsel->id, process); 3620 if (err) { 3621 pr_debug("failed to create perf header attribute\n"); 3622 return err; 3623 } 3624 } 3625 3626 return err; 3627 } 3628 3629 static bool has_unit(struct perf_evsel *counter) 3630 { 3631 return counter->unit && *counter->unit; 3632 } 3633 3634 static bool has_scale(struct perf_evsel *counter) 3635 { 3636 return counter->scale != 1; 3637 } 3638 3639 int perf_event__synthesize_extra_attr(struct perf_tool *tool, 3640 struct perf_evlist *evsel_list, 3641 perf_event__handler_t process, 3642 bool is_pipe) 3643 { 3644 struct perf_evsel *counter; 3645 int err; 3646 3647 /* 3648 * Synthesize other events stuff not carried within 3649 * attr event - unit, scale, name 3650 */ 3651 evlist__for_each_entry(evsel_list, counter) { 3652 if (!counter->supported) 3653 continue; 3654 3655 /* 3656 * Synthesize unit and scale only if it's defined. 3657 */ 3658 if (has_unit(counter)) { 3659 err = perf_event__synthesize_event_update_unit(tool, counter, process); 3660 if (err < 0) { 3661 pr_err("Couldn't synthesize evsel unit.\n"); 3662 return err; 3663 } 3664 } 3665 3666 if (has_scale(counter)) { 3667 err = perf_event__synthesize_event_update_scale(tool, counter, process); 3668 if (err < 0) { 3669 pr_err("Couldn't synthesize evsel counter.\n"); 3670 return err; 3671 } 3672 } 3673 3674 if (counter->own_cpus) { 3675 err = perf_event__synthesize_event_update_cpus(tool, counter, process); 3676 if (err < 0) { 3677 pr_err("Couldn't synthesize evsel cpus.\n"); 3678 return err; 3679 } 3680 } 3681 3682 /* 3683 * Name is needed only for pipe output, 3684 * perf.data carries event names. 3685 */ 3686 if (is_pipe) { 3687 err = perf_event__synthesize_event_update_name(tool, counter, process); 3688 if (err < 0) { 3689 pr_err("Couldn't synthesize evsel name.\n"); 3690 return err; 3691 } 3692 } 3693 } 3694 return 0; 3695 } 3696 3697 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 3698 union perf_event *event, 3699 struct perf_evlist **pevlist) 3700 { 3701 u32 i, ids, n_ids; 3702 struct perf_evsel *evsel; 3703 struct perf_evlist *evlist = *pevlist; 3704 3705 if (evlist == NULL) { 3706 *pevlist = evlist = perf_evlist__new(); 3707 if (evlist == NULL) 3708 return -ENOMEM; 3709 } 3710 3711 evsel = perf_evsel__new(&event->attr.attr); 3712 if (evsel == NULL) 3713 return -ENOMEM; 3714 3715 perf_evlist__add(evlist, evsel); 3716 3717 ids = event->header.size; 3718 ids -= (void *)&event->attr.id - (void *)event; 3719 n_ids = ids / sizeof(u64); 3720 /* 3721 * We don't have the cpu and thread maps on the header, so 3722 * for allocating the perf_sample_id table we fake 1 cpu and 3723 * hattr->ids threads. 3724 */ 3725 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 3726 return -ENOMEM; 3727 3728 for (i = 0; i < n_ids; i++) { 3729 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 3730 } 3731 3732 symbol_conf.nr_events = evlist->nr_entries; 3733 3734 return 0; 3735 } 3736 3737 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, 3738 union perf_event *event, 3739 struct perf_evlist **pevlist) 3740 { 3741 struct event_update_event *ev = &event->event_update; 3742 struct event_update_event_scale *ev_scale; 3743 struct event_update_event_cpus *ev_cpus; 3744 struct perf_evlist *evlist; 3745 struct perf_evsel *evsel; 3746 struct cpu_map *map; 3747 3748 if (!pevlist || *pevlist == NULL) 3749 return -EINVAL; 3750 3751 evlist = *pevlist; 3752 3753 evsel = perf_evlist__id2evsel(evlist, ev->id); 3754 if (evsel == NULL) 3755 return -EINVAL; 3756 3757 switch (ev->type) { 3758 case PERF_EVENT_UPDATE__UNIT: 3759 evsel->unit = strdup(ev->data); 3760 break; 3761 case PERF_EVENT_UPDATE__NAME: 3762 evsel->name = strdup(ev->data); 3763 break; 3764 case PERF_EVENT_UPDATE__SCALE: 3765 ev_scale = (struct event_update_event_scale *) ev->data; 3766 evsel->scale = ev_scale->scale; 3767 break; 3768 case PERF_EVENT_UPDATE__CPUS: 3769 ev_cpus = (struct event_update_event_cpus *) ev->data; 3770 3771 map = cpu_map__new_data(&ev_cpus->cpus); 3772 if (map) 3773 evsel->own_cpus = map; 3774 else 3775 pr_err("failed to get event_update cpus\n"); 3776 default: 3777 break; 3778 } 3779 3780 return 0; 3781 } 3782 3783 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 3784 struct perf_evlist *evlist, 3785 perf_event__handler_t process) 3786 { 3787 union perf_event ev; 3788 struct tracing_data *tdata; 3789 ssize_t size = 0, aligned_size = 0, padding; 3790 struct feat_fd ff; 3791 int err __maybe_unused = 0; 3792 3793 /* 3794 * We are going to store the size of the data followed 3795 * by the data contents. Since the fd descriptor is a pipe, 3796 * we cannot seek back to store the size of the data once 3797 * we know it. Instead we: 3798 * 3799 * - write the tracing data to the temp file 3800 * - get/write the data size to pipe 3801 * - write the tracing data from the temp file 3802 * to the pipe 3803 */ 3804 tdata = tracing_data_get(&evlist->entries, fd, true); 3805 if (!tdata) 3806 return -1; 3807 3808 memset(&ev, 0, sizeof(ev)); 3809 3810 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 3811 size = tdata->size; 3812 aligned_size = PERF_ALIGN(size, sizeof(u64)); 3813 padding = aligned_size - size; 3814 ev.tracing_data.header.size = sizeof(ev.tracing_data); 3815 ev.tracing_data.size = aligned_size; 3816 3817 process(tool, &ev, NULL, NULL); 3818 3819 /* 3820 * The put function will copy all the tracing data 3821 * stored in temp file to the pipe. 3822 */ 3823 tracing_data_put(tdata); 3824 3825 ff = (struct feat_fd){ .fd = fd }; 3826 if (write_padded(&ff, NULL, 0, padding)) 3827 return -1; 3828 3829 return aligned_size; 3830 } 3831 3832 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, 3833 union perf_event *event, 3834 struct perf_session *session) 3835 { 3836 ssize_t size_read, padding, size = event->tracing_data.size; 3837 int fd = perf_data__fd(session->data); 3838 off_t offset = lseek(fd, 0, SEEK_CUR); 3839 char buf[BUFSIZ]; 3840 3841 /* setup for reading amidst mmap */ 3842 lseek(fd, offset + sizeof(struct tracing_data_event), 3843 SEEK_SET); 3844 3845 size_read = trace_report(fd, &session->tevent, 3846 session->repipe); 3847 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 3848 3849 if (readn(fd, buf, padding) < 0) { 3850 pr_err("%s: reading input file", __func__); 3851 return -1; 3852 } 3853 if (session->repipe) { 3854 int retw = write(STDOUT_FILENO, buf, padding); 3855 if (retw <= 0 || retw != padding) { 3856 pr_err("%s: repiping tracing data padding", __func__); 3857 return -1; 3858 } 3859 } 3860 3861 if (size_read + padding != size) { 3862 pr_err("%s: tracing data size mismatch", __func__); 3863 return -1; 3864 } 3865 3866 perf_evlist__prepare_tracepoint_events(session->evlist, 3867 session->tevent.pevent); 3868 3869 return size_read + padding; 3870 } 3871 3872 int perf_event__synthesize_build_id(struct perf_tool *tool, 3873 struct dso *pos, u16 misc, 3874 perf_event__handler_t process, 3875 struct machine *machine) 3876 { 3877 union perf_event ev; 3878 size_t len; 3879 int err = 0; 3880 3881 if (!pos->hit) 3882 return err; 3883 3884 memset(&ev, 0, sizeof(ev)); 3885 3886 len = pos->long_name_len + 1; 3887 len = PERF_ALIGN(len, NAME_ALIGN); 3888 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 3889 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 3890 ev.build_id.header.misc = misc; 3891 ev.build_id.pid = machine->pid; 3892 ev.build_id.header.size = sizeof(ev.build_id) + len; 3893 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 3894 3895 err = process(tool, &ev, NULL, machine); 3896 3897 return err; 3898 } 3899 3900 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, 3901 union perf_event *event, 3902 struct perf_session *session) 3903 { 3904 __event_process_build_id(&event->build_id, 3905 event->build_id.filename, 3906 session); 3907 return 0; 3908 } 3909