1 #include "util.h" 2 #include <sys/types.h> 3 #include <byteswap.h> 4 #include <unistd.h> 5 #include <stdio.h> 6 #include <stdlib.h> 7 #include <linux/list.h> 8 #include <linux/kernel.h> 9 #include <linux/bitops.h> 10 #include <sys/utsname.h> 11 12 #include "evlist.h" 13 #include "evsel.h" 14 #include "header.h" 15 #include "../perf.h" 16 #include "trace-event.h" 17 #include "session.h" 18 #include "symbol.h" 19 #include "debug.h" 20 #include "cpumap.h" 21 #include "pmu.h" 22 #include "vdso.h" 23 #include "strbuf.h" 24 #include "build-id.h" 25 #include "data.h" 26 27 static u32 header_argc; 28 static const char **header_argv; 29 30 /* 31 * magic2 = "PERFILE2" 32 * must be a numerical value to let the endianness 33 * determine the memory layout. That way we are able 34 * to detect endianness when reading the perf.data file 35 * back. 36 * 37 * we check for legacy (PERFFILE) format. 38 */ 39 static const char *__perf_magic1 = "PERFFILE"; 40 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 41 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 42 43 #define PERF_MAGIC __perf_magic2 44 45 struct perf_file_attr { 46 struct perf_event_attr attr; 47 struct perf_file_section ids; 48 }; 49 50 void perf_header__set_feat(struct perf_header *header, int feat) 51 { 52 set_bit(feat, header->adds_features); 53 } 54 55 void perf_header__clear_feat(struct perf_header *header, int feat) 56 { 57 clear_bit(feat, header->adds_features); 58 } 59 60 bool perf_header__has_feat(const struct perf_header *header, int feat) 61 { 62 return test_bit(feat, header->adds_features); 63 } 64 65 static int do_write(int fd, const void *buf, size_t size) 66 { 67 while (size) { 68 int ret = write(fd, buf, size); 69 70 if (ret < 0) 71 return -errno; 72 73 size -= ret; 74 buf += ret; 75 } 76 77 return 0; 78 } 79 80 int write_padded(int fd, const void *bf, size_t count, size_t count_aligned) 81 { 82 static const char zero_buf[NAME_ALIGN]; 83 int err = do_write(fd, bf, count); 84 85 if (!err) 86 err = do_write(fd, zero_buf, count_aligned - count); 87 88 return err; 89 } 90 91 static int do_write_string(int fd, const char *str) 92 { 93 u32 len, olen; 94 int ret; 95 96 olen = strlen(str) + 1; 97 len = PERF_ALIGN(olen, NAME_ALIGN); 98 99 /* write len, incl. \0 */ 100 ret = do_write(fd, &len, sizeof(len)); 101 if (ret < 0) 102 return ret; 103 104 return write_padded(fd, str, olen, len); 105 } 106 107 static char *do_read_string(int fd, struct perf_header *ph) 108 { 109 ssize_t sz, ret; 110 u32 len; 111 char *buf; 112 113 sz = readn(fd, &len, sizeof(len)); 114 if (sz < (ssize_t)sizeof(len)) 115 return NULL; 116 117 if (ph->needs_swap) 118 len = bswap_32(len); 119 120 buf = malloc(len); 121 if (!buf) 122 return NULL; 123 124 ret = readn(fd, buf, len); 125 if (ret == (ssize_t)len) { 126 /* 127 * strings are padded by zeroes 128 * thus the actual strlen of buf 129 * may be less than len 130 */ 131 return buf; 132 } 133 134 free(buf); 135 return NULL; 136 } 137 138 int 139 perf_header__set_cmdline(int argc, const char **argv) 140 { 141 int i; 142 143 /* 144 * If header_argv has already been set, do not override it. 145 * This allows a command to set the cmdline, parse args and 146 * then call another builtin function that implements a 147 * command -- e.g, cmd_kvm calling cmd_record. 148 */ 149 if (header_argv) 150 return 0; 151 152 header_argc = (u32)argc; 153 154 /* do not include NULL termination */ 155 header_argv = calloc(argc, sizeof(char *)); 156 if (!header_argv) 157 return -ENOMEM; 158 159 /* 160 * must copy argv contents because it gets moved 161 * around during option parsing 162 */ 163 for (i = 0; i < argc ; i++) 164 header_argv[i] = argv[i]; 165 166 return 0; 167 } 168 169 static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, 170 struct perf_evlist *evlist) 171 { 172 return read_tracing_data(fd, &evlist->entries); 173 } 174 175 176 static int write_build_id(int fd, struct perf_header *h, 177 struct perf_evlist *evlist __maybe_unused) 178 { 179 struct perf_session *session; 180 int err; 181 182 session = container_of(h, struct perf_session, header); 183 184 if (!perf_session__read_build_ids(session, true)) 185 return -1; 186 187 err = perf_session__write_buildid_table(session, fd); 188 if (err < 0) { 189 pr_debug("failed to write buildid table\n"); 190 return err; 191 } 192 perf_session__cache_build_ids(session); 193 194 return 0; 195 } 196 197 static int write_hostname(int fd, struct perf_header *h __maybe_unused, 198 struct perf_evlist *evlist __maybe_unused) 199 { 200 struct utsname uts; 201 int ret; 202 203 ret = uname(&uts); 204 if (ret < 0) 205 return -1; 206 207 return do_write_string(fd, uts.nodename); 208 } 209 210 static int write_osrelease(int fd, struct perf_header *h __maybe_unused, 211 struct perf_evlist *evlist __maybe_unused) 212 { 213 struct utsname uts; 214 int ret; 215 216 ret = uname(&uts); 217 if (ret < 0) 218 return -1; 219 220 return do_write_string(fd, uts.release); 221 } 222 223 static int write_arch(int fd, struct perf_header *h __maybe_unused, 224 struct perf_evlist *evlist __maybe_unused) 225 { 226 struct utsname uts; 227 int ret; 228 229 ret = uname(&uts); 230 if (ret < 0) 231 return -1; 232 233 return do_write_string(fd, uts.machine); 234 } 235 236 static int write_version(int fd, struct perf_header *h __maybe_unused, 237 struct perf_evlist *evlist __maybe_unused) 238 { 239 return do_write_string(fd, perf_version_string); 240 } 241 242 static int __write_cpudesc(int fd, const char *cpuinfo_proc) 243 { 244 FILE *file; 245 char *buf = NULL; 246 char *s, *p; 247 const char *search = cpuinfo_proc; 248 size_t len = 0; 249 int ret = -1; 250 251 if (!search) 252 return -1; 253 254 file = fopen("/proc/cpuinfo", "r"); 255 if (!file) 256 return -1; 257 258 while (getline(&buf, &len, file) > 0) { 259 ret = strncmp(buf, search, strlen(search)); 260 if (!ret) 261 break; 262 } 263 264 if (ret) { 265 ret = -1; 266 goto done; 267 } 268 269 s = buf; 270 271 p = strchr(buf, ':'); 272 if (p && *(p+1) == ' ' && *(p+2)) 273 s = p + 2; 274 p = strchr(s, '\n'); 275 if (p) 276 *p = '\0'; 277 278 /* squash extra space characters (branding string) */ 279 p = s; 280 while (*p) { 281 if (isspace(*p)) { 282 char *r = p + 1; 283 char *q = r; 284 *p = ' '; 285 while (*q && isspace(*q)) 286 q++; 287 if (q != (p+1)) 288 while ((*r++ = *q++)); 289 } 290 p++; 291 } 292 ret = do_write_string(fd, s); 293 done: 294 free(buf); 295 fclose(file); 296 return ret; 297 } 298 299 static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, 300 struct perf_evlist *evlist __maybe_unused) 301 { 302 #ifndef CPUINFO_PROC 303 #define CPUINFO_PROC {"model name", } 304 #endif 305 const char *cpuinfo_procs[] = CPUINFO_PROC; 306 unsigned int i; 307 308 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 309 int ret; 310 ret = __write_cpudesc(fd, cpuinfo_procs[i]); 311 if (ret >= 0) 312 return ret; 313 } 314 return -1; 315 } 316 317 318 static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, 319 struct perf_evlist *evlist __maybe_unused) 320 { 321 long nr; 322 u32 nrc, nra; 323 int ret; 324 325 nr = sysconf(_SC_NPROCESSORS_CONF); 326 if (nr < 0) 327 return -1; 328 329 nrc = (u32)(nr & UINT_MAX); 330 331 nr = sysconf(_SC_NPROCESSORS_ONLN); 332 if (nr < 0) 333 return -1; 334 335 nra = (u32)(nr & UINT_MAX); 336 337 ret = do_write(fd, &nrc, sizeof(nrc)); 338 if (ret < 0) 339 return ret; 340 341 return do_write(fd, &nra, sizeof(nra)); 342 } 343 344 static int write_event_desc(int fd, struct perf_header *h __maybe_unused, 345 struct perf_evlist *evlist) 346 { 347 struct perf_evsel *evsel; 348 u32 nre, nri, sz; 349 int ret; 350 351 nre = evlist->nr_entries; 352 353 /* 354 * write number of events 355 */ 356 ret = do_write(fd, &nre, sizeof(nre)); 357 if (ret < 0) 358 return ret; 359 360 /* 361 * size of perf_event_attr struct 362 */ 363 sz = (u32)sizeof(evsel->attr); 364 ret = do_write(fd, &sz, sizeof(sz)); 365 if (ret < 0) 366 return ret; 367 368 evlist__for_each(evlist, evsel) { 369 ret = do_write(fd, &evsel->attr, sz); 370 if (ret < 0) 371 return ret; 372 /* 373 * write number of unique id per event 374 * there is one id per instance of an event 375 * 376 * copy into an nri to be independent of the 377 * type of ids, 378 */ 379 nri = evsel->ids; 380 ret = do_write(fd, &nri, sizeof(nri)); 381 if (ret < 0) 382 return ret; 383 384 /* 385 * write event string as passed on cmdline 386 */ 387 ret = do_write_string(fd, perf_evsel__name(evsel)); 388 if (ret < 0) 389 return ret; 390 /* 391 * write unique ids for this event 392 */ 393 ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); 394 if (ret < 0) 395 return ret; 396 } 397 return 0; 398 } 399 400 static int write_cmdline(int fd, struct perf_header *h __maybe_unused, 401 struct perf_evlist *evlist __maybe_unused) 402 { 403 char buf[MAXPATHLEN]; 404 char proc[32]; 405 u32 i, n; 406 int ret; 407 408 /* 409 * actual atual path to perf binary 410 */ 411 sprintf(proc, "/proc/%d/exe", getpid()); 412 ret = readlink(proc, buf, sizeof(buf)); 413 if (ret <= 0) 414 return -1; 415 416 /* readlink() does not add null termination */ 417 buf[ret] = '\0'; 418 419 /* account for binary path */ 420 n = header_argc + 1; 421 422 ret = do_write(fd, &n, sizeof(n)); 423 if (ret < 0) 424 return ret; 425 426 ret = do_write_string(fd, buf); 427 if (ret < 0) 428 return ret; 429 430 for (i = 0 ; i < header_argc; i++) { 431 ret = do_write_string(fd, header_argv[i]); 432 if (ret < 0) 433 return ret; 434 } 435 return 0; 436 } 437 438 #define CORE_SIB_FMT \ 439 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" 440 #define THRD_SIB_FMT \ 441 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" 442 443 struct cpu_topo { 444 u32 core_sib; 445 u32 thread_sib; 446 char **core_siblings; 447 char **thread_siblings; 448 }; 449 450 static int build_cpu_topo(struct cpu_topo *tp, int cpu) 451 { 452 FILE *fp; 453 char filename[MAXPATHLEN]; 454 char *buf = NULL, *p; 455 size_t len = 0; 456 ssize_t sret; 457 u32 i = 0; 458 int ret = -1; 459 460 sprintf(filename, CORE_SIB_FMT, cpu); 461 fp = fopen(filename, "r"); 462 if (!fp) 463 goto try_threads; 464 465 sret = getline(&buf, &len, fp); 466 fclose(fp); 467 if (sret <= 0) 468 goto try_threads; 469 470 p = strchr(buf, '\n'); 471 if (p) 472 *p = '\0'; 473 474 for (i = 0; i < tp->core_sib; i++) { 475 if (!strcmp(buf, tp->core_siblings[i])) 476 break; 477 } 478 if (i == tp->core_sib) { 479 tp->core_siblings[i] = buf; 480 tp->core_sib++; 481 buf = NULL; 482 len = 0; 483 } 484 ret = 0; 485 486 try_threads: 487 sprintf(filename, THRD_SIB_FMT, cpu); 488 fp = fopen(filename, "r"); 489 if (!fp) 490 goto done; 491 492 if (getline(&buf, &len, fp) <= 0) 493 goto done; 494 495 p = strchr(buf, '\n'); 496 if (p) 497 *p = '\0'; 498 499 for (i = 0; i < tp->thread_sib; i++) { 500 if (!strcmp(buf, tp->thread_siblings[i])) 501 break; 502 } 503 if (i == tp->thread_sib) { 504 tp->thread_siblings[i] = buf; 505 tp->thread_sib++; 506 buf = NULL; 507 } 508 ret = 0; 509 done: 510 if(fp) 511 fclose(fp); 512 free(buf); 513 return ret; 514 } 515 516 static void free_cpu_topo(struct cpu_topo *tp) 517 { 518 u32 i; 519 520 if (!tp) 521 return; 522 523 for (i = 0 ; i < tp->core_sib; i++) 524 zfree(&tp->core_siblings[i]); 525 526 for (i = 0 ; i < tp->thread_sib; i++) 527 zfree(&tp->thread_siblings[i]); 528 529 free(tp); 530 } 531 532 static struct cpu_topo *build_cpu_topology(void) 533 { 534 struct cpu_topo *tp; 535 void *addr; 536 u32 nr, i; 537 size_t sz; 538 long ncpus; 539 int ret = -1; 540 541 ncpus = sysconf(_SC_NPROCESSORS_CONF); 542 if (ncpus < 0) 543 return NULL; 544 545 nr = (u32)(ncpus & UINT_MAX); 546 547 sz = nr * sizeof(char *); 548 549 addr = calloc(1, sizeof(*tp) + 2 * sz); 550 if (!addr) 551 return NULL; 552 553 tp = addr; 554 555 addr += sizeof(*tp); 556 tp->core_siblings = addr; 557 addr += sz; 558 tp->thread_siblings = addr; 559 560 for (i = 0; i < nr; i++) { 561 ret = build_cpu_topo(tp, i); 562 if (ret < 0) 563 break; 564 } 565 if (ret) { 566 free_cpu_topo(tp); 567 tp = NULL; 568 } 569 return tp; 570 } 571 572 static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused, 573 struct perf_evlist *evlist __maybe_unused) 574 { 575 struct cpu_topo *tp; 576 u32 i; 577 int ret; 578 579 tp = build_cpu_topology(); 580 if (!tp) 581 return -1; 582 583 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib)); 584 if (ret < 0) 585 goto done; 586 587 for (i = 0; i < tp->core_sib; i++) { 588 ret = do_write_string(fd, tp->core_siblings[i]); 589 if (ret < 0) 590 goto done; 591 } 592 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib)); 593 if (ret < 0) 594 goto done; 595 596 for (i = 0; i < tp->thread_sib; i++) { 597 ret = do_write_string(fd, tp->thread_siblings[i]); 598 if (ret < 0) 599 break; 600 } 601 done: 602 free_cpu_topo(tp); 603 return ret; 604 } 605 606 607 608 static int write_total_mem(int fd, struct perf_header *h __maybe_unused, 609 struct perf_evlist *evlist __maybe_unused) 610 { 611 char *buf = NULL; 612 FILE *fp; 613 size_t len = 0; 614 int ret = -1, n; 615 uint64_t mem; 616 617 fp = fopen("/proc/meminfo", "r"); 618 if (!fp) 619 return -1; 620 621 while (getline(&buf, &len, fp) > 0) { 622 ret = strncmp(buf, "MemTotal:", 9); 623 if (!ret) 624 break; 625 } 626 if (!ret) { 627 n = sscanf(buf, "%*s %"PRIu64, &mem); 628 if (n == 1) 629 ret = do_write(fd, &mem, sizeof(mem)); 630 } else 631 ret = -1; 632 free(buf); 633 fclose(fp); 634 return ret; 635 } 636 637 static int write_topo_node(int fd, int node) 638 { 639 char str[MAXPATHLEN]; 640 char field[32]; 641 char *buf = NULL, *p; 642 size_t len = 0; 643 FILE *fp; 644 u64 mem_total, mem_free, mem; 645 int ret = -1; 646 647 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); 648 fp = fopen(str, "r"); 649 if (!fp) 650 return -1; 651 652 while (getline(&buf, &len, fp) > 0) { 653 /* skip over invalid lines */ 654 if (!strchr(buf, ':')) 655 continue; 656 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) 657 goto done; 658 if (!strcmp(field, "MemTotal:")) 659 mem_total = mem; 660 if (!strcmp(field, "MemFree:")) 661 mem_free = mem; 662 } 663 664 fclose(fp); 665 fp = NULL; 666 667 ret = do_write(fd, &mem_total, sizeof(u64)); 668 if (ret) 669 goto done; 670 671 ret = do_write(fd, &mem_free, sizeof(u64)); 672 if (ret) 673 goto done; 674 675 ret = -1; 676 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); 677 678 fp = fopen(str, "r"); 679 if (!fp) 680 goto done; 681 682 if (getline(&buf, &len, fp) <= 0) 683 goto done; 684 685 p = strchr(buf, '\n'); 686 if (p) 687 *p = '\0'; 688 689 ret = do_write_string(fd, buf); 690 done: 691 free(buf); 692 if (fp) 693 fclose(fp); 694 return ret; 695 } 696 697 static int write_numa_topology(int fd, struct perf_header *h __maybe_unused, 698 struct perf_evlist *evlist __maybe_unused) 699 { 700 char *buf = NULL; 701 size_t len = 0; 702 FILE *fp; 703 struct cpu_map *node_map = NULL; 704 char *c; 705 u32 nr, i, j; 706 int ret = -1; 707 708 fp = fopen("/sys/devices/system/node/online", "r"); 709 if (!fp) 710 return -1; 711 712 if (getline(&buf, &len, fp) <= 0) 713 goto done; 714 715 c = strchr(buf, '\n'); 716 if (c) 717 *c = '\0'; 718 719 node_map = cpu_map__new(buf); 720 if (!node_map) 721 goto done; 722 723 nr = (u32)node_map->nr; 724 725 ret = do_write(fd, &nr, sizeof(nr)); 726 if (ret < 0) 727 goto done; 728 729 for (i = 0; i < nr; i++) { 730 j = (u32)node_map->map[i]; 731 ret = do_write(fd, &j, sizeof(j)); 732 if (ret < 0) 733 break; 734 735 ret = write_topo_node(fd, i); 736 if (ret < 0) 737 break; 738 } 739 done: 740 free(buf); 741 fclose(fp); 742 free(node_map); 743 return ret; 744 } 745 746 /* 747 * File format: 748 * 749 * struct pmu_mappings { 750 * u32 pmu_num; 751 * struct pmu_map { 752 * u32 type; 753 * char name[]; 754 * }[pmu_num]; 755 * }; 756 */ 757 758 static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, 759 struct perf_evlist *evlist __maybe_unused) 760 { 761 struct perf_pmu *pmu = NULL; 762 off_t offset = lseek(fd, 0, SEEK_CUR); 763 __u32 pmu_num = 0; 764 int ret; 765 766 /* write real pmu_num later */ 767 ret = do_write(fd, &pmu_num, sizeof(pmu_num)); 768 if (ret < 0) 769 return ret; 770 771 while ((pmu = perf_pmu__scan(pmu))) { 772 if (!pmu->name) 773 continue; 774 pmu_num++; 775 776 ret = do_write(fd, &pmu->type, sizeof(pmu->type)); 777 if (ret < 0) 778 return ret; 779 780 ret = do_write_string(fd, pmu->name); 781 if (ret < 0) 782 return ret; 783 } 784 785 if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { 786 /* discard all */ 787 lseek(fd, offset, SEEK_SET); 788 return -1; 789 } 790 791 return 0; 792 } 793 794 /* 795 * File format: 796 * 797 * struct group_descs { 798 * u32 nr_groups; 799 * struct group_desc { 800 * char name[]; 801 * u32 leader_idx; 802 * u32 nr_members; 803 * }[nr_groups]; 804 * }; 805 */ 806 static int write_group_desc(int fd, struct perf_header *h __maybe_unused, 807 struct perf_evlist *evlist) 808 { 809 u32 nr_groups = evlist->nr_groups; 810 struct perf_evsel *evsel; 811 int ret; 812 813 ret = do_write(fd, &nr_groups, sizeof(nr_groups)); 814 if (ret < 0) 815 return ret; 816 817 evlist__for_each(evlist, evsel) { 818 if (perf_evsel__is_group_leader(evsel) && 819 evsel->nr_members > 1) { 820 const char *name = evsel->group_name ?: "{anon_group}"; 821 u32 leader_idx = evsel->idx; 822 u32 nr_members = evsel->nr_members; 823 824 ret = do_write_string(fd, name); 825 if (ret < 0) 826 return ret; 827 828 ret = do_write(fd, &leader_idx, sizeof(leader_idx)); 829 if (ret < 0) 830 return ret; 831 832 ret = do_write(fd, &nr_members, sizeof(nr_members)); 833 if (ret < 0) 834 return ret; 835 } 836 } 837 return 0; 838 } 839 840 /* 841 * default get_cpuid(): nothing gets recorded 842 * actual implementation must be in arch/$(ARCH)/util/header.c 843 */ 844 int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused, 845 size_t sz __maybe_unused) 846 { 847 return -1; 848 } 849 850 static int write_cpuid(int fd, struct perf_header *h __maybe_unused, 851 struct perf_evlist *evlist __maybe_unused) 852 { 853 char buffer[64]; 854 int ret; 855 856 ret = get_cpuid(buffer, sizeof(buffer)); 857 if (!ret) 858 goto write_it; 859 860 return -1; 861 write_it: 862 return do_write_string(fd, buffer); 863 } 864 865 static int write_branch_stack(int fd __maybe_unused, 866 struct perf_header *h __maybe_unused, 867 struct perf_evlist *evlist __maybe_unused) 868 { 869 return 0; 870 } 871 872 static int write_auxtrace(int fd, struct perf_header *h, 873 struct perf_evlist *evlist __maybe_unused) 874 { 875 struct perf_session *session; 876 int err; 877 878 session = container_of(h, struct perf_session, header); 879 880 err = auxtrace_index__write(fd, &session->auxtrace_index); 881 if (err < 0) 882 pr_err("Failed to write auxtrace index\n"); 883 return err; 884 } 885 886 static void print_hostname(struct perf_header *ph, int fd __maybe_unused, 887 FILE *fp) 888 { 889 fprintf(fp, "# hostname : %s\n", ph->env.hostname); 890 } 891 892 static void print_osrelease(struct perf_header *ph, int fd __maybe_unused, 893 FILE *fp) 894 { 895 fprintf(fp, "# os release : %s\n", ph->env.os_release); 896 } 897 898 static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp) 899 { 900 fprintf(fp, "# arch : %s\n", ph->env.arch); 901 } 902 903 static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused, 904 FILE *fp) 905 { 906 fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc); 907 } 908 909 static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused, 910 FILE *fp) 911 { 912 fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online); 913 fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail); 914 } 915 916 static void print_version(struct perf_header *ph, int fd __maybe_unused, 917 FILE *fp) 918 { 919 fprintf(fp, "# perf version : %s\n", ph->env.version); 920 } 921 922 static void print_cmdline(struct perf_header *ph, int fd __maybe_unused, 923 FILE *fp) 924 { 925 int nr, i; 926 927 nr = ph->env.nr_cmdline; 928 929 fprintf(fp, "# cmdline : "); 930 931 for (i = 0; i < nr; i++) 932 fprintf(fp, "%s ", ph->env.cmdline_argv[i]); 933 fputc('\n', fp); 934 } 935 936 static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused, 937 FILE *fp) 938 { 939 int nr, i; 940 char *str; 941 942 nr = ph->env.nr_sibling_cores; 943 str = ph->env.sibling_cores; 944 945 for (i = 0; i < nr; i++) { 946 fprintf(fp, "# sibling cores : %s\n", str); 947 str += strlen(str) + 1; 948 } 949 950 nr = ph->env.nr_sibling_threads; 951 str = ph->env.sibling_threads; 952 953 for (i = 0; i < nr; i++) { 954 fprintf(fp, "# sibling threads : %s\n", str); 955 str += strlen(str) + 1; 956 } 957 } 958 959 static void free_event_desc(struct perf_evsel *events) 960 { 961 struct perf_evsel *evsel; 962 963 if (!events) 964 return; 965 966 for (evsel = events; evsel->attr.size; evsel++) { 967 zfree(&evsel->name); 968 zfree(&evsel->id); 969 } 970 971 free(events); 972 } 973 974 static struct perf_evsel * 975 read_event_desc(struct perf_header *ph, int fd) 976 { 977 struct perf_evsel *evsel, *events = NULL; 978 u64 *id; 979 void *buf = NULL; 980 u32 nre, sz, nr, i, j; 981 ssize_t ret; 982 size_t msz; 983 984 /* number of events */ 985 ret = readn(fd, &nre, sizeof(nre)); 986 if (ret != (ssize_t)sizeof(nre)) 987 goto error; 988 989 if (ph->needs_swap) 990 nre = bswap_32(nre); 991 992 ret = readn(fd, &sz, sizeof(sz)); 993 if (ret != (ssize_t)sizeof(sz)) 994 goto error; 995 996 if (ph->needs_swap) 997 sz = bswap_32(sz); 998 999 /* buffer to hold on file attr struct */ 1000 buf = malloc(sz); 1001 if (!buf) 1002 goto error; 1003 1004 /* the last event terminates with evsel->attr.size == 0: */ 1005 events = calloc(nre + 1, sizeof(*events)); 1006 if (!events) 1007 goto error; 1008 1009 msz = sizeof(evsel->attr); 1010 if (sz < msz) 1011 msz = sz; 1012 1013 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1014 evsel->idx = i; 1015 1016 /* 1017 * must read entire on-file attr struct to 1018 * sync up with layout. 1019 */ 1020 ret = readn(fd, buf, sz); 1021 if (ret != (ssize_t)sz) 1022 goto error; 1023 1024 if (ph->needs_swap) 1025 perf_event__attr_swap(buf); 1026 1027 memcpy(&evsel->attr, buf, msz); 1028 1029 ret = readn(fd, &nr, sizeof(nr)); 1030 if (ret != (ssize_t)sizeof(nr)) 1031 goto error; 1032 1033 if (ph->needs_swap) { 1034 nr = bswap_32(nr); 1035 evsel->needs_swap = true; 1036 } 1037 1038 evsel->name = do_read_string(fd, ph); 1039 1040 if (!nr) 1041 continue; 1042 1043 id = calloc(nr, sizeof(*id)); 1044 if (!id) 1045 goto error; 1046 evsel->ids = nr; 1047 evsel->id = id; 1048 1049 for (j = 0 ; j < nr; j++) { 1050 ret = readn(fd, id, sizeof(*id)); 1051 if (ret != (ssize_t)sizeof(*id)) 1052 goto error; 1053 if (ph->needs_swap) 1054 *id = bswap_64(*id); 1055 id++; 1056 } 1057 } 1058 out: 1059 free(buf); 1060 return events; 1061 error: 1062 free_event_desc(events); 1063 events = NULL; 1064 goto out; 1065 } 1066 1067 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1068 void *priv __attribute__((unused))) 1069 { 1070 return fprintf(fp, ", %s = %s", name, val); 1071 } 1072 1073 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) 1074 { 1075 struct perf_evsel *evsel, *events = read_event_desc(ph, fd); 1076 u32 j; 1077 u64 *id; 1078 1079 if (!events) { 1080 fprintf(fp, "# event desc: not available or unable to read\n"); 1081 return; 1082 } 1083 1084 for (evsel = events; evsel->attr.size; evsel++) { 1085 fprintf(fp, "# event : name = %s, ", evsel->name); 1086 1087 if (evsel->ids) { 1088 fprintf(fp, ", id = {"); 1089 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1090 if (j) 1091 fputc(',', fp); 1092 fprintf(fp, " %"PRIu64, *id); 1093 } 1094 fprintf(fp, " }"); 1095 } 1096 1097 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL); 1098 1099 fputc('\n', fp); 1100 } 1101 1102 free_event_desc(events); 1103 } 1104 1105 static void print_total_mem(struct perf_header *ph, int fd __maybe_unused, 1106 FILE *fp) 1107 { 1108 fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem); 1109 } 1110 1111 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused, 1112 FILE *fp) 1113 { 1114 u32 nr, c, i; 1115 char *str, *tmp; 1116 uint64_t mem_total, mem_free; 1117 1118 /* nr nodes */ 1119 nr = ph->env.nr_numa_nodes; 1120 str = ph->env.numa_nodes; 1121 1122 for (i = 0; i < nr; i++) { 1123 /* node number */ 1124 c = strtoul(str, &tmp, 0); 1125 if (*tmp != ':') 1126 goto error; 1127 1128 str = tmp + 1; 1129 mem_total = strtoull(str, &tmp, 0); 1130 if (*tmp != ':') 1131 goto error; 1132 1133 str = tmp + 1; 1134 mem_free = strtoull(str, &tmp, 0); 1135 if (*tmp != ':') 1136 goto error; 1137 1138 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1139 " free = %"PRIu64" kB\n", 1140 c, mem_total, mem_free); 1141 1142 str = tmp + 1; 1143 fprintf(fp, "# node%u cpu list : %s\n", c, str); 1144 1145 str += strlen(str) + 1; 1146 } 1147 return; 1148 error: 1149 fprintf(fp, "# numa topology : not available\n"); 1150 } 1151 1152 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp) 1153 { 1154 fprintf(fp, "# cpuid : %s\n", ph->env.cpuid); 1155 } 1156 1157 static void print_branch_stack(struct perf_header *ph __maybe_unused, 1158 int fd __maybe_unused, FILE *fp) 1159 { 1160 fprintf(fp, "# contains samples with branch stack\n"); 1161 } 1162 1163 static void print_auxtrace(struct perf_header *ph __maybe_unused, 1164 int fd __maybe_unused, FILE *fp) 1165 { 1166 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1167 } 1168 1169 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused, 1170 FILE *fp) 1171 { 1172 const char *delimiter = "# pmu mappings: "; 1173 char *str, *tmp; 1174 u32 pmu_num; 1175 u32 type; 1176 1177 pmu_num = ph->env.nr_pmu_mappings; 1178 if (!pmu_num) { 1179 fprintf(fp, "# pmu mappings: not available\n"); 1180 return; 1181 } 1182 1183 str = ph->env.pmu_mappings; 1184 1185 while (pmu_num) { 1186 type = strtoul(str, &tmp, 0); 1187 if (*tmp != ':') 1188 goto error; 1189 1190 str = tmp + 1; 1191 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1192 1193 delimiter = ", "; 1194 str += strlen(str) + 1; 1195 pmu_num--; 1196 } 1197 1198 fprintf(fp, "\n"); 1199 1200 if (!pmu_num) 1201 return; 1202 error: 1203 fprintf(fp, "# pmu mappings: unable to read\n"); 1204 } 1205 1206 static void print_group_desc(struct perf_header *ph, int fd __maybe_unused, 1207 FILE *fp) 1208 { 1209 struct perf_session *session; 1210 struct perf_evsel *evsel; 1211 u32 nr = 0; 1212 1213 session = container_of(ph, struct perf_session, header); 1214 1215 evlist__for_each(session->evlist, evsel) { 1216 if (perf_evsel__is_group_leader(evsel) && 1217 evsel->nr_members > 1) { 1218 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1219 perf_evsel__name(evsel)); 1220 1221 nr = evsel->nr_members - 1; 1222 } else if (nr) { 1223 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1224 1225 if (--nr == 0) 1226 fprintf(fp, "}\n"); 1227 } 1228 } 1229 } 1230 1231 static int __event_process_build_id(struct build_id_event *bev, 1232 char *filename, 1233 struct perf_session *session) 1234 { 1235 int err = -1; 1236 struct machine *machine; 1237 u16 cpumode; 1238 struct dso *dso; 1239 enum dso_kernel_type dso_type; 1240 1241 machine = perf_session__findnew_machine(session, bev->pid); 1242 if (!machine) 1243 goto out; 1244 1245 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1246 1247 switch (cpumode) { 1248 case PERF_RECORD_MISC_KERNEL: 1249 dso_type = DSO_TYPE_KERNEL; 1250 break; 1251 case PERF_RECORD_MISC_GUEST_KERNEL: 1252 dso_type = DSO_TYPE_GUEST_KERNEL; 1253 break; 1254 case PERF_RECORD_MISC_USER: 1255 case PERF_RECORD_MISC_GUEST_USER: 1256 dso_type = DSO_TYPE_USER; 1257 break; 1258 default: 1259 goto out; 1260 } 1261 1262 dso = machine__findnew_dso(machine, filename); 1263 if (dso != NULL) { 1264 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 1265 1266 dso__set_build_id(dso, &bev->build_id); 1267 1268 if (!is_kernel_module(filename, cpumode)) 1269 dso->kernel = dso_type; 1270 1271 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1272 sbuild_id); 1273 pr_debug("build id event received for %s: %s\n", 1274 dso->long_name, sbuild_id); 1275 dso__put(dso); 1276 } 1277 1278 err = 0; 1279 out: 1280 return err; 1281 } 1282 1283 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1284 int input, u64 offset, u64 size) 1285 { 1286 struct perf_session *session = container_of(header, struct perf_session, header); 1287 struct { 1288 struct perf_event_header header; 1289 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1290 char filename[0]; 1291 } old_bev; 1292 struct build_id_event bev; 1293 char filename[PATH_MAX]; 1294 u64 limit = offset + size; 1295 1296 while (offset < limit) { 1297 ssize_t len; 1298 1299 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1300 return -1; 1301 1302 if (header->needs_swap) 1303 perf_event_header__bswap(&old_bev.header); 1304 1305 len = old_bev.header.size - sizeof(old_bev); 1306 if (readn(input, filename, len) != len) 1307 return -1; 1308 1309 bev.header = old_bev.header; 1310 1311 /* 1312 * As the pid is the missing value, we need to fill 1313 * it properly. The header.misc value give us nice hint. 1314 */ 1315 bev.pid = HOST_KERNEL_ID; 1316 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1317 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1318 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1319 1320 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1321 __event_process_build_id(&bev, filename, session); 1322 1323 offset += bev.header.size; 1324 } 1325 1326 return 0; 1327 } 1328 1329 static int perf_header__read_build_ids(struct perf_header *header, 1330 int input, u64 offset, u64 size) 1331 { 1332 struct perf_session *session = container_of(header, struct perf_session, header); 1333 struct build_id_event bev; 1334 char filename[PATH_MAX]; 1335 u64 limit = offset + size, orig_offset = offset; 1336 int err = -1; 1337 1338 while (offset < limit) { 1339 ssize_t len; 1340 1341 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 1342 goto out; 1343 1344 if (header->needs_swap) 1345 perf_event_header__bswap(&bev.header); 1346 1347 len = bev.header.size - sizeof(bev); 1348 if (readn(input, filename, len) != len) 1349 goto out; 1350 /* 1351 * The a1645ce1 changeset: 1352 * 1353 * "perf: 'perf kvm' tool for monitoring guest performance from host" 1354 * 1355 * Added a field to struct build_id_event that broke the file 1356 * format. 1357 * 1358 * Since the kernel build-id is the first entry, process the 1359 * table using the old format if the well known 1360 * '[kernel.kallsyms]' string for the kernel build-id has the 1361 * first 4 characters chopped off (where the pid_t sits). 1362 */ 1363 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 1364 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 1365 return -1; 1366 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 1367 } 1368 1369 __event_process_build_id(&bev, filename, session); 1370 1371 offset += bev.header.size; 1372 } 1373 err = 0; 1374 out: 1375 return err; 1376 } 1377 1378 static int process_tracing_data(struct perf_file_section *section __maybe_unused, 1379 struct perf_header *ph __maybe_unused, 1380 int fd, void *data) 1381 { 1382 ssize_t ret = trace_report(fd, data, false); 1383 return ret < 0 ? -1 : 0; 1384 } 1385 1386 static int process_build_id(struct perf_file_section *section, 1387 struct perf_header *ph, int fd, 1388 void *data __maybe_unused) 1389 { 1390 if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) 1391 pr_debug("Failed to read buildids, continuing...\n"); 1392 return 0; 1393 } 1394 1395 static int process_hostname(struct perf_file_section *section __maybe_unused, 1396 struct perf_header *ph, int fd, 1397 void *data __maybe_unused) 1398 { 1399 ph->env.hostname = do_read_string(fd, ph); 1400 return ph->env.hostname ? 0 : -ENOMEM; 1401 } 1402 1403 static int process_osrelease(struct perf_file_section *section __maybe_unused, 1404 struct perf_header *ph, int fd, 1405 void *data __maybe_unused) 1406 { 1407 ph->env.os_release = do_read_string(fd, ph); 1408 return ph->env.os_release ? 0 : -ENOMEM; 1409 } 1410 1411 static int process_version(struct perf_file_section *section __maybe_unused, 1412 struct perf_header *ph, int fd, 1413 void *data __maybe_unused) 1414 { 1415 ph->env.version = do_read_string(fd, ph); 1416 return ph->env.version ? 0 : -ENOMEM; 1417 } 1418 1419 static int process_arch(struct perf_file_section *section __maybe_unused, 1420 struct perf_header *ph, int fd, 1421 void *data __maybe_unused) 1422 { 1423 ph->env.arch = do_read_string(fd, ph); 1424 return ph->env.arch ? 0 : -ENOMEM; 1425 } 1426 1427 static int process_nrcpus(struct perf_file_section *section __maybe_unused, 1428 struct perf_header *ph, int fd, 1429 void *data __maybe_unused) 1430 { 1431 ssize_t ret; 1432 u32 nr; 1433 1434 ret = readn(fd, &nr, sizeof(nr)); 1435 if (ret != sizeof(nr)) 1436 return -1; 1437 1438 if (ph->needs_swap) 1439 nr = bswap_32(nr); 1440 1441 ph->env.nr_cpus_avail = nr; 1442 1443 ret = readn(fd, &nr, sizeof(nr)); 1444 if (ret != sizeof(nr)) 1445 return -1; 1446 1447 if (ph->needs_swap) 1448 nr = bswap_32(nr); 1449 1450 ph->env.nr_cpus_online = nr; 1451 return 0; 1452 } 1453 1454 static int process_cpudesc(struct perf_file_section *section __maybe_unused, 1455 struct perf_header *ph, int fd, 1456 void *data __maybe_unused) 1457 { 1458 ph->env.cpu_desc = do_read_string(fd, ph); 1459 return ph->env.cpu_desc ? 0 : -ENOMEM; 1460 } 1461 1462 static int process_cpuid(struct perf_file_section *section __maybe_unused, 1463 struct perf_header *ph, int fd, 1464 void *data __maybe_unused) 1465 { 1466 ph->env.cpuid = do_read_string(fd, ph); 1467 return ph->env.cpuid ? 0 : -ENOMEM; 1468 } 1469 1470 static int process_total_mem(struct perf_file_section *section __maybe_unused, 1471 struct perf_header *ph, int fd, 1472 void *data __maybe_unused) 1473 { 1474 uint64_t mem; 1475 ssize_t ret; 1476 1477 ret = readn(fd, &mem, sizeof(mem)); 1478 if (ret != sizeof(mem)) 1479 return -1; 1480 1481 if (ph->needs_swap) 1482 mem = bswap_64(mem); 1483 1484 ph->env.total_mem = mem; 1485 return 0; 1486 } 1487 1488 static struct perf_evsel * 1489 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) 1490 { 1491 struct perf_evsel *evsel; 1492 1493 evlist__for_each(evlist, evsel) { 1494 if (evsel->idx == idx) 1495 return evsel; 1496 } 1497 1498 return NULL; 1499 } 1500 1501 static void 1502 perf_evlist__set_event_name(struct perf_evlist *evlist, 1503 struct perf_evsel *event) 1504 { 1505 struct perf_evsel *evsel; 1506 1507 if (!event->name) 1508 return; 1509 1510 evsel = perf_evlist__find_by_index(evlist, event->idx); 1511 if (!evsel) 1512 return; 1513 1514 if (evsel->name) 1515 return; 1516 1517 evsel->name = strdup(event->name); 1518 } 1519 1520 static int 1521 process_event_desc(struct perf_file_section *section __maybe_unused, 1522 struct perf_header *header, int fd, 1523 void *data __maybe_unused) 1524 { 1525 struct perf_session *session; 1526 struct perf_evsel *evsel, *events = read_event_desc(header, fd); 1527 1528 if (!events) 1529 return 0; 1530 1531 session = container_of(header, struct perf_session, header); 1532 for (evsel = events; evsel->attr.size; evsel++) 1533 perf_evlist__set_event_name(session->evlist, evsel); 1534 1535 free_event_desc(events); 1536 1537 return 0; 1538 } 1539 1540 static int process_cmdline(struct perf_file_section *section, 1541 struct perf_header *ph, int fd, 1542 void *data __maybe_unused) 1543 { 1544 ssize_t ret; 1545 char *str, *cmdline = NULL, **argv = NULL; 1546 u32 nr, i, len = 0; 1547 1548 ret = readn(fd, &nr, sizeof(nr)); 1549 if (ret != sizeof(nr)) 1550 return -1; 1551 1552 if (ph->needs_swap) 1553 nr = bswap_32(nr); 1554 1555 ph->env.nr_cmdline = nr; 1556 1557 cmdline = zalloc(section->size + nr + 1); 1558 if (!cmdline) 1559 return -1; 1560 1561 argv = zalloc(sizeof(char *) * (nr + 1)); 1562 if (!argv) 1563 goto error; 1564 1565 for (i = 0; i < nr; i++) { 1566 str = do_read_string(fd, ph); 1567 if (!str) 1568 goto error; 1569 1570 argv[i] = cmdline + len; 1571 memcpy(argv[i], str, strlen(str) + 1); 1572 len += strlen(str) + 1; 1573 free(str); 1574 } 1575 ph->env.cmdline = cmdline; 1576 ph->env.cmdline_argv = (const char **) argv; 1577 return 0; 1578 1579 error: 1580 free(argv); 1581 free(cmdline); 1582 return -1; 1583 } 1584 1585 static int process_cpu_topology(struct perf_file_section *section __maybe_unused, 1586 struct perf_header *ph, int fd, 1587 void *data __maybe_unused) 1588 { 1589 ssize_t ret; 1590 u32 nr, i; 1591 char *str; 1592 struct strbuf sb; 1593 1594 ret = readn(fd, &nr, sizeof(nr)); 1595 if (ret != sizeof(nr)) 1596 return -1; 1597 1598 if (ph->needs_swap) 1599 nr = bswap_32(nr); 1600 1601 ph->env.nr_sibling_cores = nr; 1602 strbuf_init(&sb, 128); 1603 1604 for (i = 0; i < nr; i++) { 1605 str = do_read_string(fd, ph); 1606 if (!str) 1607 goto error; 1608 1609 /* include a NULL character at the end */ 1610 strbuf_add(&sb, str, strlen(str) + 1); 1611 free(str); 1612 } 1613 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 1614 1615 ret = readn(fd, &nr, sizeof(nr)); 1616 if (ret != sizeof(nr)) 1617 return -1; 1618 1619 if (ph->needs_swap) 1620 nr = bswap_32(nr); 1621 1622 ph->env.nr_sibling_threads = nr; 1623 1624 for (i = 0; i < nr; i++) { 1625 str = do_read_string(fd, ph); 1626 if (!str) 1627 goto error; 1628 1629 /* include a NULL character at the end */ 1630 strbuf_add(&sb, str, strlen(str) + 1); 1631 free(str); 1632 } 1633 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 1634 return 0; 1635 1636 error: 1637 strbuf_release(&sb); 1638 return -1; 1639 } 1640 1641 static int process_numa_topology(struct perf_file_section *section __maybe_unused, 1642 struct perf_header *ph, int fd, 1643 void *data __maybe_unused) 1644 { 1645 ssize_t ret; 1646 u32 nr, node, i; 1647 char *str; 1648 uint64_t mem_total, mem_free; 1649 struct strbuf sb; 1650 1651 /* nr nodes */ 1652 ret = readn(fd, &nr, sizeof(nr)); 1653 if (ret != sizeof(nr)) 1654 goto error; 1655 1656 if (ph->needs_swap) 1657 nr = bswap_32(nr); 1658 1659 ph->env.nr_numa_nodes = nr; 1660 strbuf_init(&sb, 256); 1661 1662 for (i = 0; i < nr; i++) { 1663 /* node number */ 1664 ret = readn(fd, &node, sizeof(node)); 1665 if (ret != sizeof(node)) 1666 goto error; 1667 1668 ret = readn(fd, &mem_total, sizeof(u64)); 1669 if (ret != sizeof(u64)) 1670 goto error; 1671 1672 ret = readn(fd, &mem_free, sizeof(u64)); 1673 if (ret != sizeof(u64)) 1674 goto error; 1675 1676 if (ph->needs_swap) { 1677 node = bswap_32(node); 1678 mem_total = bswap_64(mem_total); 1679 mem_free = bswap_64(mem_free); 1680 } 1681 1682 strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":", 1683 node, mem_total, mem_free); 1684 1685 str = do_read_string(fd, ph); 1686 if (!str) 1687 goto error; 1688 1689 /* include a NULL character at the end */ 1690 strbuf_add(&sb, str, strlen(str) + 1); 1691 free(str); 1692 } 1693 ph->env.numa_nodes = strbuf_detach(&sb, NULL); 1694 return 0; 1695 1696 error: 1697 strbuf_release(&sb); 1698 return -1; 1699 } 1700 1701 static int process_pmu_mappings(struct perf_file_section *section __maybe_unused, 1702 struct perf_header *ph, int fd, 1703 void *data __maybe_unused) 1704 { 1705 ssize_t ret; 1706 char *name; 1707 u32 pmu_num; 1708 u32 type; 1709 struct strbuf sb; 1710 1711 ret = readn(fd, &pmu_num, sizeof(pmu_num)); 1712 if (ret != sizeof(pmu_num)) 1713 return -1; 1714 1715 if (ph->needs_swap) 1716 pmu_num = bswap_32(pmu_num); 1717 1718 if (!pmu_num) { 1719 pr_debug("pmu mappings not available\n"); 1720 return 0; 1721 } 1722 1723 ph->env.nr_pmu_mappings = pmu_num; 1724 strbuf_init(&sb, 128); 1725 1726 while (pmu_num) { 1727 if (readn(fd, &type, sizeof(type)) != sizeof(type)) 1728 goto error; 1729 if (ph->needs_swap) 1730 type = bswap_32(type); 1731 1732 name = do_read_string(fd, ph); 1733 if (!name) 1734 goto error; 1735 1736 strbuf_addf(&sb, "%u:%s", type, name); 1737 /* include a NULL character at the end */ 1738 strbuf_add(&sb, "", 1); 1739 1740 free(name); 1741 pmu_num--; 1742 } 1743 ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 1744 return 0; 1745 1746 error: 1747 strbuf_release(&sb); 1748 return -1; 1749 } 1750 1751 static int process_group_desc(struct perf_file_section *section __maybe_unused, 1752 struct perf_header *ph, int fd, 1753 void *data __maybe_unused) 1754 { 1755 size_t ret = -1; 1756 u32 i, nr, nr_groups; 1757 struct perf_session *session; 1758 struct perf_evsel *evsel, *leader = NULL; 1759 struct group_desc { 1760 char *name; 1761 u32 leader_idx; 1762 u32 nr_members; 1763 } *desc; 1764 1765 if (readn(fd, &nr_groups, sizeof(nr_groups)) != sizeof(nr_groups)) 1766 return -1; 1767 1768 if (ph->needs_swap) 1769 nr_groups = bswap_32(nr_groups); 1770 1771 ph->env.nr_groups = nr_groups; 1772 if (!nr_groups) { 1773 pr_debug("group desc not available\n"); 1774 return 0; 1775 } 1776 1777 desc = calloc(nr_groups, sizeof(*desc)); 1778 if (!desc) 1779 return -1; 1780 1781 for (i = 0; i < nr_groups; i++) { 1782 desc[i].name = do_read_string(fd, ph); 1783 if (!desc[i].name) 1784 goto out_free; 1785 1786 if (readn(fd, &desc[i].leader_idx, sizeof(u32)) != sizeof(u32)) 1787 goto out_free; 1788 1789 if (readn(fd, &desc[i].nr_members, sizeof(u32)) != sizeof(u32)) 1790 goto out_free; 1791 1792 if (ph->needs_swap) { 1793 desc[i].leader_idx = bswap_32(desc[i].leader_idx); 1794 desc[i].nr_members = bswap_32(desc[i].nr_members); 1795 } 1796 } 1797 1798 /* 1799 * Rebuild group relationship based on the group_desc 1800 */ 1801 session = container_of(ph, struct perf_session, header); 1802 session->evlist->nr_groups = nr_groups; 1803 1804 i = nr = 0; 1805 evlist__for_each(session->evlist, evsel) { 1806 if (evsel->idx == (int) desc[i].leader_idx) { 1807 evsel->leader = evsel; 1808 /* {anon_group} is a dummy name */ 1809 if (strcmp(desc[i].name, "{anon_group}")) { 1810 evsel->group_name = desc[i].name; 1811 desc[i].name = NULL; 1812 } 1813 evsel->nr_members = desc[i].nr_members; 1814 1815 if (i >= nr_groups || nr > 0) { 1816 pr_debug("invalid group desc\n"); 1817 goto out_free; 1818 } 1819 1820 leader = evsel; 1821 nr = evsel->nr_members - 1; 1822 i++; 1823 } else if (nr) { 1824 /* This is a group member */ 1825 evsel->leader = leader; 1826 1827 nr--; 1828 } 1829 } 1830 1831 if (i != nr_groups || nr != 0) { 1832 pr_debug("invalid group desc\n"); 1833 goto out_free; 1834 } 1835 1836 ret = 0; 1837 out_free: 1838 for (i = 0; i < nr_groups; i++) 1839 zfree(&desc[i].name); 1840 free(desc); 1841 1842 return ret; 1843 } 1844 1845 static int process_auxtrace(struct perf_file_section *section, 1846 struct perf_header *ph, int fd, 1847 void *data __maybe_unused) 1848 { 1849 struct perf_session *session; 1850 int err; 1851 1852 session = container_of(ph, struct perf_session, header); 1853 1854 err = auxtrace_index__process(fd, section->size, session, 1855 ph->needs_swap); 1856 if (err < 0) 1857 pr_err("Failed to process auxtrace index\n"); 1858 return err; 1859 } 1860 1861 struct feature_ops { 1862 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); 1863 void (*print)(struct perf_header *h, int fd, FILE *fp); 1864 int (*process)(struct perf_file_section *section, 1865 struct perf_header *h, int fd, void *data); 1866 const char *name; 1867 bool full_only; 1868 }; 1869 1870 #define FEAT_OPA(n, func) \ 1871 [n] = { .name = #n, .write = write_##func, .print = print_##func } 1872 #define FEAT_OPP(n, func) \ 1873 [n] = { .name = #n, .write = write_##func, .print = print_##func, \ 1874 .process = process_##func } 1875 #define FEAT_OPF(n, func) \ 1876 [n] = { .name = #n, .write = write_##func, .print = print_##func, \ 1877 .process = process_##func, .full_only = true } 1878 1879 /* feature_ops not implemented: */ 1880 #define print_tracing_data NULL 1881 #define print_build_id NULL 1882 1883 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 1884 FEAT_OPP(HEADER_TRACING_DATA, tracing_data), 1885 FEAT_OPP(HEADER_BUILD_ID, build_id), 1886 FEAT_OPP(HEADER_HOSTNAME, hostname), 1887 FEAT_OPP(HEADER_OSRELEASE, osrelease), 1888 FEAT_OPP(HEADER_VERSION, version), 1889 FEAT_OPP(HEADER_ARCH, arch), 1890 FEAT_OPP(HEADER_NRCPUS, nrcpus), 1891 FEAT_OPP(HEADER_CPUDESC, cpudesc), 1892 FEAT_OPP(HEADER_CPUID, cpuid), 1893 FEAT_OPP(HEADER_TOTAL_MEM, total_mem), 1894 FEAT_OPP(HEADER_EVENT_DESC, event_desc), 1895 FEAT_OPP(HEADER_CMDLINE, cmdline), 1896 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), 1897 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), 1898 FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), 1899 FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), 1900 FEAT_OPP(HEADER_GROUP_DESC, group_desc), 1901 FEAT_OPP(HEADER_AUXTRACE, auxtrace), 1902 }; 1903 1904 struct header_print_data { 1905 FILE *fp; 1906 bool full; /* extended list of headers */ 1907 }; 1908 1909 static int perf_file_section__fprintf_info(struct perf_file_section *section, 1910 struct perf_header *ph, 1911 int feat, int fd, void *data) 1912 { 1913 struct header_print_data *hd = data; 1914 1915 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 1916 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 1917 "%d, continuing...\n", section->offset, feat); 1918 return 0; 1919 } 1920 if (feat >= HEADER_LAST_FEATURE) { 1921 pr_warning("unknown feature %d\n", feat); 1922 return 0; 1923 } 1924 if (!feat_ops[feat].print) 1925 return 0; 1926 1927 if (!feat_ops[feat].full_only || hd->full) 1928 feat_ops[feat].print(ph, fd, hd->fp); 1929 else 1930 fprintf(hd->fp, "# %s info available, use -I to display\n", 1931 feat_ops[feat].name); 1932 1933 return 0; 1934 } 1935 1936 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 1937 { 1938 struct header_print_data hd; 1939 struct perf_header *header = &session->header; 1940 int fd = perf_data_file__fd(session->file); 1941 hd.fp = fp; 1942 hd.full = full; 1943 1944 perf_header__process_sections(header, fd, &hd, 1945 perf_file_section__fprintf_info); 1946 return 0; 1947 } 1948 1949 static int do_write_feat(int fd, struct perf_header *h, int type, 1950 struct perf_file_section **p, 1951 struct perf_evlist *evlist) 1952 { 1953 int err; 1954 int ret = 0; 1955 1956 if (perf_header__has_feat(h, type)) { 1957 if (!feat_ops[type].write) 1958 return -1; 1959 1960 (*p)->offset = lseek(fd, 0, SEEK_CUR); 1961 1962 err = feat_ops[type].write(fd, h, evlist); 1963 if (err < 0) { 1964 pr_debug("failed to write feature %d\n", type); 1965 1966 /* undo anything written */ 1967 lseek(fd, (*p)->offset, SEEK_SET); 1968 1969 return -1; 1970 } 1971 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset; 1972 (*p)++; 1973 } 1974 return ret; 1975 } 1976 1977 static int perf_header__adds_write(struct perf_header *header, 1978 struct perf_evlist *evlist, int fd) 1979 { 1980 int nr_sections; 1981 struct perf_file_section *feat_sec, *p; 1982 int sec_size; 1983 u64 sec_start; 1984 int feat; 1985 int err; 1986 1987 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 1988 if (!nr_sections) 1989 return 0; 1990 1991 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 1992 if (feat_sec == NULL) 1993 return -ENOMEM; 1994 1995 sec_size = sizeof(*feat_sec) * nr_sections; 1996 1997 sec_start = header->feat_offset; 1998 lseek(fd, sec_start + sec_size, SEEK_SET); 1999 2000 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 2001 if (do_write_feat(fd, header, feat, &p, evlist)) 2002 perf_header__clear_feat(header, feat); 2003 } 2004 2005 lseek(fd, sec_start, SEEK_SET); 2006 /* 2007 * may write more than needed due to dropped feature, but 2008 * this is okay, reader will skip the mising entries 2009 */ 2010 err = do_write(fd, feat_sec, sec_size); 2011 if (err < 0) 2012 pr_debug("failed to write feature section\n"); 2013 free(feat_sec); 2014 return err; 2015 } 2016 2017 int perf_header__write_pipe(int fd) 2018 { 2019 struct perf_pipe_file_header f_header; 2020 int err; 2021 2022 f_header = (struct perf_pipe_file_header){ 2023 .magic = PERF_MAGIC, 2024 .size = sizeof(f_header), 2025 }; 2026 2027 err = do_write(fd, &f_header, sizeof(f_header)); 2028 if (err < 0) { 2029 pr_debug("failed to write perf pipe header\n"); 2030 return err; 2031 } 2032 2033 return 0; 2034 } 2035 2036 int perf_session__write_header(struct perf_session *session, 2037 struct perf_evlist *evlist, 2038 int fd, bool at_exit) 2039 { 2040 struct perf_file_header f_header; 2041 struct perf_file_attr f_attr; 2042 struct perf_header *header = &session->header; 2043 struct perf_evsel *evsel; 2044 u64 attr_offset; 2045 int err; 2046 2047 lseek(fd, sizeof(f_header), SEEK_SET); 2048 2049 evlist__for_each(session->evlist, evsel) { 2050 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 2051 err = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); 2052 if (err < 0) { 2053 pr_debug("failed to write perf header\n"); 2054 return err; 2055 } 2056 } 2057 2058 attr_offset = lseek(fd, 0, SEEK_CUR); 2059 2060 evlist__for_each(evlist, evsel) { 2061 f_attr = (struct perf_file_attr){ 2062 .attr = evsel->attr, 2063 .ids = { 2064 .offset = evsel->id_offset, 2065 .size = evsel->ids * sizeof(u64), 2066 } 2067 }; 2068 err = do_write(fd, &f_attr, sizeof(f_attr)); 2069 if (err < 0) { 2070 pr_debug("failed to write perf header attribute\n"); 2071 return err; 2072 } 2073 } 2074 2075 if (!header->data_offset) 2076 header->data_offset = lseek(fd, 0, SEEK_CUR); 2077 header->feat_offset = header->data_offset + header->data_size; 2078 2079 if (at_exit) { 2080 err = perf_header__adds_write(header, evlist, fd); 2081 if (err < 0) 2082 return err; 2083 } 2084 2085 f_header = (struct perf_file_header){ 2086 .magic = PERF_MAGIC, 2087 .size = sizeof(f_header), 2088 .attr_size = sizeof(f_attr), 2089 .attrs = { 2090 .offset = attr_offset, 2091 .size = evlist->nr_entries * sizeof(f_attr), 2092 }, 2093 .data = { 2094 .offset = header->data_offset, 2095 .size = header->data_size, 2096 }, 2097 /* event_types is ignored, store zeros */ 2098 }; 2099 2100 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 2101 2102 lseek(fd, 0, SEEK_SET); 2103 err = do_write(fd, &f_header, sizeof(f_header)); 2104 if (err < 0) { 2105 pr_debug("failed to write perf header\n"); 2106 return err; 2107 } 2108 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 2109 2110 return 0; 2111 } 2112 2113 static int perf_header__getbuffer64(struct perf_header *header, 2114 int fd, void *buf, size_t size) 2115 { 2116 if (readn(fd, buf, size) <= 0) 2117 return -1; 2118 2119 if (header->needs_swap) 2120 mem_bswap_64(buf, size); 2121 2122 return 0; 2123 } 2124 2125 int perf_header__process_sections(struct perf_header *header, int fd, 2126 void *data, 2127 int (*process)(struct perf_file_section *section, 2128 struct perf_header *ph, 2129 int feat, int fd, void *data)) 2130 { 2131 struct perf_file_section *feat_sec, *sec; 2132 int nr_sections; 2133 int sec_size; 2134 int feat; 2135 int err; 2136 2137 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2138 if (!nr_sections) 2139 return 0; 2140 2141 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 2142 if (!feat_sec) 2143 return -1; 2144 2145 sec_size = sizeof(*feat_sec) * nr_sections; 2146 2147 lseek(fd, header->feat_offset, SEEK_SET); 2148 2149 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 2150 if (err < 0) 2151 goto out_free; 2152 2153 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 2154 err = process(sec++, header, feat, fd, data); 2155 if (err < 0) 2156 goto out_free; 2157 } 2158 err = 0; 2159 out_free: 2160 free(feat_sec); 2161 return err; 2162 } 2163 2164 static const int attr_file_abi_sizes[] = { 2165 [0] = PERF_ATTR_SIZE_VER0, 2166 [1] = PERF_ATTR_SIZE_VER1, 2167 [2] = PERF_ATTR_SIZE_VER2, 2168 [3] = PERF_ATTR_SIZE_VER3, 2169 [4] = PERF_ATTR_SIZE_VER4, 2170 0, 2171 }; 2172 2173 /* 2174 * In the legacy file format, the magic number is not used to encode endianness. 2175 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 2176 * on ABI revisions, we need to try all combinations for all endianness to 2177 * detect the endianness. 2178 */ 2179 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 2180 { 2181 uint64_t ref_size, attr_size; 2182 int i; 2183 2184 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 2185 ref_size = attr_file_abi_sizes[i] 2186 + sizeof(struct perf_file_section); 2187 if (hdr_sz != ref_size) { 2188 attr_size = bswap_64(hdr_sz); 2189 if (attr_size != ref_size) 2190 continue; 2191 2192 ph->needs_swap = true; 2193 } 2194 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 2195 i, 2196 ph->needs_swap); 2197 return 0; 2198 } 2199 /* could not determine endianness */ 2200 return -1; 2201 } 2202 2203 #define PERF_PIPE_HDR_VER0 16 2204 2205 static const size_t attr_pipe_abi_sizes[] = { 2206 [0] = PERF_PIPE_HDR_VER0, 2207 0, 2208 }; 2209 2210 /* 2211 * In the legacy pipe format, there is an implicit assumption that endiannesss 2212 * between host recording the samples, and host parsing the samples is the 2213 * same. This is not always the case given that the pipe output may always be 2214 * redirected into a file and analyzed on a different machine with possibly a 2215 * different endianness and perf_event ABI revsions in the perf tool itself. 2216 */ 2217 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 2218 { 2219 u64 attr_size; 2220 int i; 2221 2222 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 2223 if (hdr_sz != attr_pipe_abi_sizes[i]) { 2224 attr_size = bswap_64(hdr_sz); 2225 if (attr_size != hdr_sz) 2226 continue; 2227 2228 ph->needs_swap = true; 2229 } 2230 pr_debug("Pipe ABI%d perf.data file detected\n", i); 2231 return 0; 2232 } 2233 return -1; 2234 } 2235 2236 bool is_perf_magic(u64 magic) 2237 { 2238 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 2239 || magic == __perf_magic2 2240 || magic == __perf_magic2_sw) 2241 return true; 2242 2243 return false; 2244 } 2245 2246 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 2247 bool is_pipe, struct perf_header *ph) 2248 { 2249 int ret; 2250 2251 /* check for legacy format */ 2252 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 2253 if (ret == 0) { 2254 ph->version = PERF_HEADER_VERSION_1; 2255 pr_debug("legacy perf.data format\n"); 2256 if (is_pipe) 2257 return try_all_pipe_abis(hdr_sz, ph); 2258 2259 return try_all_file_abis(hdr_sz, ph); 2260 } 2261 /* 2262 * the new magic number serves two purposes: 2263 * - unique number to identify actual perf.data files 2264 * - encode endianness of file 2265 */ 2266 ph->version = PERF_HEADER_VERSION_2; 2267 2268 /* check magic number with one endianness */ 2269 if (magic == __perf_magic2) 2270 return 0; 2271 2272 /* check magic number with opposite endianness */ 2273 if (magic != __perf_magic2_sw) 2274 return -1; 2275 2276 ph->needs_swap = true; 2277 2278 return 0; 2279 } 2280 2281 int perf_file_header__read(struct perf_file_header *header, 2282 struct perf_header *ph, int fd) 2283 { 2284 ssize_t ret; 2285 2286 lseek(fd, 0, SEEK_SET); 2287 2288 ret = readn(fd, header, sizeof(*header)); 2289 if (ret <= 0) 2290 return -1; 2291 2292 if (check_magic_endian(header->magic, 2293 header->attr_size, false, ph) < 0) { 2294 pr_debug("magic/endian check failed\n"); 2295 return -1; 2296 } 2297 2298 if (ph->needs_swap) { 2299 mem_bswap_64(header, offsetof(struct perf_file_header, 2300 adds_features)); 2301 } 2302 2303 if (header->size != sizeof(*header)) { 2304 /* Support the previous format */ 2305 if (header->size == offsetof(typeof(*header), adds_features)) 2306 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 2307 else 2308 return -1; 2309 } else if (ph->needs_swap) { 2310 /* 2311 * feature bitmap is declared as an array of unsigned longs -- 2312 * not good since its size can differ between the host that 2313 * generated the data file and the host analyzing the file. 2314 * 2315 * We need to handle endianness, but we don't know the size of 2316 * the unsigned long where the file was generated. Take a best 2317 * guess at determining it: try 64-bit swap first (ie., file 2318 * created on a 64-bit host), and check if the hostname feature 2319 * bit is set (this feature bit is forced on as of fbe96f2). 2320 * If the bit is not, undo the 64-bit swap and try a 32-bit 2321 * swap. If the hostname bit is still not set (e.g., older data 2322 * file), punt and fallback to the original behavior -- 2323 * clearing all feature bits and setting buildid. 2324 */ 2325 mem_bswap_64(&header->adds_features, 2326 BITS_TO_U64(HEADER_FEAT_BITS)); 2327 2328 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 2329 /* unswap as u64 */ 2330 mem_bswap_64(&header->adds_features, 2331 BITS_TO_U64(HEADER_FEAT_BITS)); 2332 2333 /* unswap as u32 */ 2334 mem_bswap_32(&header->adds_features, 2335 BITS_TO_U32(HEADER_FEAT_BITS)); 2336 } 2337 2338 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 2339 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 2340 set_bit(HEADER_BUILD_ID, header->adds_features); 2341 } 2342 } 2343 2344 memcpy(&ph->adds_features, &header->adds_features, 2345 sizeof(ph->adds_features)); 2346 2347 ph->data_offset = header->data.offset; 2348 ph->data_size = header->data.size; 2349 ph->feat_offset = header->data.offset + header->data.size; 2350 return 0; 2351 } 2352 2353 static int perf_file_section__process(struct perf_file_section *section, 2354 struct perf_header *ph, 2355 int feat, int fd, void *data) 2356 { 2357 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2358 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2359 "%d, continuing...\n", section->offset, feat); 2360 return 0; 2361 } 2362 2363 if (feat >= HEADER_LAST_FEATURE) { 2364 pr_debug("unknown feature %d, continuing...\n", feat); 2365 return 0; 2366 } 2367 2368 if (!feat_ops[feat].process) 2369 return 0; 2370 2371 return feat_ops[feat].process(section, ph, fd, data); 2372 } 2373 2374 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 2375 struct perf_header *ph, int fd, 2376 bool repipe) 2377 { 2378 ssize_t ret; 2379 2380 ret = readn(fd, header, sizeof(*header)); 2381 if (ret <= 0) 2382 return -1; 2383 2384 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 2385 pr_debug("endian/magic failed\n"); 2386 return -1; 2387 } 2388 2389 if (ph->needs_swap) 2390 header->size = bswap_64(header->size); 2391 2392 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) 2393 return -1; 2394 2395 return 0; 2396 } 2397 2398 static int perf_header__read_pipe(struct perf_session *session) 2399 { 2400 struct perf_header *header = &session->header; 2401 struct perf_pipe_file_header f_header; 2402 2403 if (perf_file_header__read_pipe(&f_header, header, 2404 perf_data_file__fd(session->file), 2405 session->repipe) < 0) { 2406 pr_debug("incompatible file format\n"); 2407 return -EINVAL; 2408 } 2409 2410 return 0; 2411 } 2412 2413 static int read_attr(int fd, struct perf_header *ph, 2414 struct perf_file_attr *f_attr) 2415 { 2416 struct perf_event_attr *attr = &f_attr->attr; 2417 size_t sz, left; 2418 size_t our_sz = sizeof(f_attr->attr); 2419 ssize_t ret; 2420 2421 memset(f_attr, 0, sizeof(*f_attr)); 2422 2423 /* read minimal guaranteed structure */ 2424 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 2425 if (ret <= 0) { 2426 pr_debug("cannot read %d bytes of header attr\n", 2427 PERF_ATTR_SIZE_VER0); 2428 return -1; 2429 } 2430 2431 /* on file perf_event_attr size */ 2432 sz = attr->size; 2433 2434 if (ph->needs_swap) 2435 sz = bswap_32(sz); 2436 2437 if (sz == 0) { 2438 /* assume ABI0 */ 2439 sz = PERF_ATTR_SIZE_VER0; 2440 } else if (sz > our_sz) { 2441 pr_debug("file uses a more recent and unsupported ABI" 2442 " (%zu bytes extra)\n", sz - our_sz); 2443 return -1; 2444 } 2445 /* what we have not yet read and that we know about */ 2446 left = sz - PERF_ATTR_SIZE_VER0; 2447 if (left) { 2448 void *ptr = attr; 2449 ptr += PERF_ATTR_SIZE_VER0; 2450 2451 ret = readn(fd, ptr, left); 2452 } 2453 /* read perf_file_section, ids are read in caller */ 2454 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 2455 2456 return ret <= 0 ? -1 : 0; 2457 } 2458 2459 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, 2460 struct pevent *pevent) 2461 { 2462 struct event_format *event; 2463 char bf[128]; 2464 2465 /* already prepared */ 2466 if (evsel->tp_format) 2467 return 0; 2468 2469 if (pevent == NULL) { 2470 pr_debug("broken or missing trace data\n"); 2471 return -1; 2472 } 2473 2474 event = pevent_find_event(pevent, evsel->attr.config); 2475 if (event == NULL) 2476 return -1; 2477 2478 if (!evsel->name) { 2479 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 2480 evsel->name = strdup(bf); 2481 if (evsel->name == NULL) 2482 return -1; 2483 } 2484 2485 evsel->tp_format = event; 2486 return 0; 2487 } 2488 2489 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, 2490 struct pevent *pevent) 2491 { 2492 struct perf_evsel *pos; 2493 2494 evlist__for_each(evlist, pos) { 2495 if (pos->attr.type == PERF_TYPE_TRACEPOINT && 2496 perf_evsel__prepare_tracepoint_event(pos, pevent)) 2497 return -1; 2498 } 2499 2500 return 0; 2501 } 2502 2503 int perf_session__read_header(struct perf_session *session) 2504 { 2505 struct perf_data_file *file = session->file; 2506 struct perf_header *header = &session->header; 2507 struct perf_file_header f_header; 2508 struct perf_file_attr f_attr; 2509 u64 f_id; 2510 int nr_attrs, nr_ids, i, j; 2511 int fd = perf_data_file__fd(file); 2512 2513 session->evlist = perf_evlist__new(); 2514 if (session->evlist == NULL) 2515 return -ENOMEM; 2516 2517 session->evlist->env = &header->env; 2518 if (perf_data_file__is_pipe(file)) 2519 return perf_header__read_pipe(session); 2520 2521 if (perf_file_header__read(&f_header, header, fd) < 0) 2522 return -EINVAL; 2523 2524 /* 2525 * Sanity check that perf.data was written cleanly; data size is 2526 * initialized to 0 and updated only if the on_exit function is run. 2527 * If data size is still 0 then the file contains only partial 2528 * information. Just warn user and process it as much as it can. 2529 */ 2530 if (f_header.data.size == 0) { 2531 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 2532 "Was the 'perf record' command properly terminated?\n", 2533 file->path); 2534 } 2535 2536 nr_attrs = f_header.attrs.size / f_header.attr_size; 2537 lseek(fd, f_header.attrs.offset, SEEK_SET); 2538 2539 for (i = 0; i < nr_attrs; i++) { 2540 struct perf_evsel *evsel; 2541 off_t tmp; 2542 2543 if (read_attr(fd, header, &f_attr) < 0) 2544 goto out_errno; 2545 2546 if (header->needs_swap) { 2547 f_attr.ids.size = bswap_64(f_attr.ids.size); 2548 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 2549 perf_event__attr_swap(&f_attr.attr); 2550 } 2551 2552 tmp = lseek(fd, 0, SEEK_CUR); 2553 evsel = perf_evsel__new(&f_attr.attr); 2554 2555 if (evsel == NULL) 2556 goto out_delete_evlist; 2557 2558 evsel->needs_swap = header->needs_swap; 2559 /* 2560 * Do it before so that if perf_evsel__alloc_id fails, this 2561 * entry gets purged too at perf_evlist__delete(). 2562 */ 2563 perf_evlist__add(session->evlist, evsel); 2564 2565 nr_ids = f_attr.ids.size / sizeof(u64); 2566 /* 2567 * We don't have the cpu and thread maps on the header, so 2568 * for allocating the perf_sample_id table we fake 1 cpu and 2569 * hattr->ids threads. 2570 */ 2571 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 2572 goto out_delete_evlist; 2573 2574 lseek(fd, f_attr.ids.offset, SEEK_SET); 2575 2576 for (j = 0; j < nr_ids; j++) { 2577 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 2578 goto out_errno; 2579 2580 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 2581 } 2582 2583 lseek(fd, tmp, SEEK_SET); 2584 } 2585 2586 symbol_conf.nr_events = nr_attrs; 2587 2588 perf_header__process_sections(header, fd, &session->tevent, 2589 perf_file_section__process); 2590 2591 if (perf_evlist__prepare_tracepoint_events(session->evlist, 2592 session->tevent.pevent)) 2593 goto out_delete_evlist; 2594 2595 return 0; 2596 out_errno: 2597 return -errno; 2598 2599 out_delete_evlist: 2600 perf_evlist__delete(session->evlist); 2601 session->evlist = NULL; 2602 return -ENOMEM; 2603 } 2604 2605 int perf_event__synthesize_attr(struct perf_tool *tool, 2606 struct perf_event_attr *attr, u32 ids, u64 *id, 2607 perf_event__handler_t process) 2608 { 2609 union perf_event *ev; 2610 size_t size; 2611 int err; 2612 2613 size = sizeof(struct perf_event_attr); 2614 size = PERF_ALIGN(size, sizeof(u64)); 2615 size += sizeof(struct perf_event_header); 2616 size += ids * sizeof(u64); 2617 2618 ev = malloc(size); 2619 2620 if (ev == NULL) 2621 return -ENOMEM; 2622 2623 ev->attr.attr = *attr; 2624 memcpy(ev->attr.id, id, ids * sizeof(u64)); 2625 2626 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 2627 ev->attr.header.size = (u16)size; 2628 2629 if (ev->attr.header.size == size) 2630 err = process(tool, ev, NULL, NULL); 2631 else 2632 err = -E2BIG; 2633 2634 free(ev); 2635 2636 return err; 2637 } 2638 2639 int perf_event__synthesize_attrs(struct perf_tool *tool, 2640 struct perf_session *session, 2641 perf_event__handler_t process) 2642 { 2643 struct perf_evsel *evsel; 2644 int err = 0; 2645 2646 evlist__for_each(session->evlist, evsel) { 2647 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, 2648 evsel->id, process); 2649 if (err) { 2650 pr_debug("failed to create perf header attribute\n"); 2651 return err; 2652 } 2653 } 2654 2655 return err; 2656 } 2657 2658 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 2659 union perf_event *event, 2660 struct perf_evlist **pevlist) 2661 { 2662 u32 i, ids, n_ids; 2663 struct perf_evsel *evsel; 2664 struct perf_evlist *evlist = *pevlist; 2665 2666 if (evlist == NULL) { 2667 *pevlist = evlist = perf_evlist__new(); 2668 if (evlist == NULL) 2669 return -ENOMEM; 2670 } 2671 2672 evsel = perf_evsel__new(&event->attr.attr); 2673 if (evsel == NULL) 2674 return -ENOMEM; 2675 2676 perf_evlist__add(evlist, evsel); 2677 2678 ids = event->header.size; 2679 ids -= (void *)&event->attr.id - (void *)event; 2680 n_ids = ids / sizeof(u64); 2681 /* 2682 * We don't have the cpu and thread maps on the header, so 2683 * for allocating the perf_sample_id table we fake 1 cpu and 2684 * hattr->ids threads. 2685 */ 2686 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 2687 return -ENOMEM; 2688 2689 for (i = 0; i < n_ids; i++) { 2690 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 2691 } 2692 2693 symbol_conf.nr_events = evlist->nr_entries; 2694 2695 return 0; 2696 } 2697 2698 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 2699 struct perf_evlist *evlist, 2700 perf_event__handler_t process) 2701 { 2702 union perf_event ev; 2703 struct tracing_data *tdata; 2704 ssize_t size = 0, aligned_size = 0, padding; 2705 int err __maybe_unused = 0; 2706 2707 /* 2708 * We are going to store the size of the data followed 2709 * by the data contents. Since the fd descriptor is a pipe, 2710 * we cannot seek back to store the size of the data once 2711 * we know it. Instead we: 2712 * 2713 * - write the tracing data to the temp file 2714 * - get/write the data size to pipe 2715 * - write the tracing data from the temp file 2716 * to the pipe 2717 */ 2718 tdata = tracing_data_get(&evlist->entries, fd, true); 2719 if (!tdata) 2720 return -1; 2721 2722 memset(&ev, 0, sizeof(ev)); 2723 2724 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 2725 size = tdata->size; 2726 aligned_size = PERF_ALIGN(size, sizeof(u64)); 2727 padding = aligned_size - size; 2728 ev.tracing_data.header.size = sizeof(ev.tracing_data); 2729 ev.tracing_data.size = aligned_size; 2730 2731 process(tool, &ev, NULL, NULL); 2732 2733 /* 2734 * The put function will copy all the tracing data 2735 * stored in temp file to the pipe. 2736 */ 2737 tracing_data_put(tdata); 2738 2739 write_padded(fd, NULL, 0, padding); 2740 2741 return aligned_size; 2742 } 2743 2744 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, 2745 union perf_event *event, 2746 struct perf_session *session) 2747 { 2748 ssize_t size_read, padding, size = event->tracing_data.size; 2749 int fd = perf_data_file__fd(session->file); 2750 off_t offset = lseek(fd, 0, SEEK_CUR); 2751 char buf[BUFSIZ]; 2752 2753 /* setup for reading amidst mmap */ 2754 lseek(fd, offset + sizeof(struct tracing_data_event), 2755 SEEK_SET); 2756 2757 size_read = trace_report(fd, &session->tevent, 2758 session->repipe); 2759 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 2760 2761 if (readn(fd, buf, padding) < 0) { 2762 pr_err("%s: reading input file", __func__); 2763 return -1; 2764 } 2765 if (session->repipe) { 2766 int retw = write(STDOUT_FILENO, buf, padding); 2767 if (retw <= 0 || retw != padding) { 2768 pr_err("%s: repiping tracing data padding", __func__); 2769 return -1; 2770 } 2771 } 2772 2773 if (size_read + padding != size) { 2774 pr_err("%s: tracing data size mismatch", __func__); 2775 return -1; 2776 } 2777 2778 perf_evlist__prepare_tracepoint_events(session->evlist, 2779 session->tevent.pevent); 2780 2781 return size_read + padding; 2782 } 2783 2784 int perf_event__synthesize_build_id(struct perf_tool *tool, 2785 struct dso *pos, u16 misc, 2786 perf_event__handler_t process, 2787 struct machine *machine) 2788 { 2789 union perf_event ev; 2790 size_t len; 2791 int err = 0; 2792 2793 if (!pos->hit) 2794 return err; 2795 2796 memset(&ev, 0, sizeof(ev)); 2797 2798 len = pos->long_name_len + 1; 2799 len = PERF_ALIGN(len, NAME_ALIGN); 2800 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 2801 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 2802 ev.build_id.header.misc = misc; 2803 ev.build_id.pid = machine->pid; 2804 ev.build_id.header.size = sizeof(ev.build_id) + len; 2805 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 2806 2807 err = process(tool, &ev, NULL, machine); 2808 2809 return err; 2810 } 2811 2812 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, 2813 union perf_event *event, 2814 struct perf_session *session) 2815 { 2816 __event_process_build_id(&event->build_id, 2817 event->build_id.filename, 2818 session); 2819 return 0; 2820 } 2821