1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "util.h" 5 #include "string2.h" 6 #include <sys/param.h> 7 #include <sys/types.h> 8 #include <byteswap.h> 9 #include <unistd.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <linux/compiler.h> 13 #include <linux/list.h> 14 #include <linux/kernel.h> 15 #include <linux/bitops.h> 16 #include <linux/stringify.h> 17 #include <sys/stat.h> 18 #include <sys/utsname.h> 19 20 #include "evlist.h" 21 #include "evsel.h" 22 #include "header.h" 23 #include "memswap.h" 24 #include "../perf.h" 25 #include "trace-event.h" 26 #include "session.h" 27 #include "symbol.h" 28 #include "debug.h" 29 #include "cpumap.h" 30 #include "pmu.h" 31 #include "vdso.h" 32 #include "strbuf.h" 33 #include "build-id.h" 34 #include "data.h" 35 #include <api/fs/fs.h> 36 #include "asm/bug.h" 37 #include "tool.h" 38 39 #include "sane_ctype.h" 40 41 /* 42 * magic2 = "PERFILE2" 43 * must be a numerical value to let the endianness 44 * determine the memory layout. That way we are able 45 * to detect endianness when reading the perf.data file 46 * back. 47 * 48 * we check for legacy (PERFFILE) format. 49 */ 50 static const char *__perf_magic1 = "PERFFILE"; 51 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 52 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 53 54 #define PERF_MAGIC __perf_magic2 55 56 const char perf_version_string[] = PERF_VERSION; 57 58 struct perf_file_attr { 59 struct perf_event_attr attr; 60 struct perf_file_section ids; 61 }; 62 63 struct feat_fd { 64 struct perf_header *ph; 65 int fd; 66 void *buf; /* Either buf != NULL or fd >= 0 */ 67 ssize_t offset; 68 size_t size; 69 struct perf_evsel *events; 70 }; 71 72 void perf_header__set_feat(struct perf_header *header, int feat) 73 { 74 set_bit(feat, header->adds_features); 75 } 76 77 void perf_header__clear_feat(struct perf_header *header, int feat) 78 { 79 clear_bit(feat, header->adds_features); 80 } 81 82 bool perf_header__has_feat(const struct perf_header *header, int feat) 83 { 84 return test_bit(feat, header->adds_features); 85 } 86 87 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) 88 { 89 ssize_t ret = writen(ff->fd, buf, size); 90 91 if (ret != (ssize_t)size) 92 return ret < 0 ? (int)ret : -1; 93 return 0; 94 } 95 96 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) 97 { 98 /* struct perf_event_header::size is u16 */ 99 const size_t max_size = 0xffff - sizeof(struct perf_event_header); 100 size_t new_size = ff->size; 101 void *addr; 102 103 if (size + ff->offset > max_size) 104 return -E2BIG; 105 106 while (size > (new_size - ff->offset)) 107 new_size <<= 1; 108 new_size = min(max_size, new_size); 109 110 if (ff->size < new_size) { 111 addr = realloc(ff->buf, new_size); 112 if (!addr) 113 return -ENOMEM; 114 ff->buf = addr; 115 ff->size = new_size; 116 } 117 118 memcpy(ff->buf + ff->offset, buf, size); 119 ff->offset += size; 120 121 return 0; 122 } 123 124 /* Return: 0 if succeded, -ERR if failed. */ 125 int do_write(struct feat_fd *ff, const void *buf, size_t size) 126 { 127 if (!ff->buf) 128 return __do_write_fd(ff, buf, size); 129 return __do_write_buf(ff, buf, size); 130 } 131 132 /* Return: 0 if succeded, -ERR if failed. */ 133 int write_padded(struct feat_fd *ff, const void *bf, 134 size_t count, size_t count_aligned) 135 { 136 static const char zero_buf[NAME_ALIGN]; 137 int err = do_write(ff, bf, count); 138 139 if (!err) 140 err = do_write(ff, zero_buf, count_aligned - count); 141 142 return err; 143 } 144 145 #define string_size(str) \ 146 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) 147 148 /* Return: 0 if succeded, -ERR if failed. */ 149 static int do_write_string(struct feat_fd *ff, const char *str) 150 { 151 u32 len, olen; 152 int ret; 153 154 olen = strlen(str) + 1; 155 len = PERF_ALIGN(olen, NAME_ALIGN); 156 157 /* write len, incl. \0 */ 158 ret = do_write(ff, &len, sizeof(len)); 159 if (ret < 0) 160 return ret; 161 162 return write_padded(ff, str, olen, len); 163 } 164 165 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) 166 { 167 ssize_t ret = readn(ff->fd, addr, size); 168 169 if (ret != size) 170 return ret < 0 ? (int)ret : -1; 171 return 0; 172 } 173 174 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) 175 { 176 if (size > (ssize_t)ff->size - ff->offset) 177 return -1; 178 179 memcpy(addr, ff->buf + ff->offset, size); 180 ff->offset += size; 181 182 return 0; 183 184 } 185 186 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) 187 { 188 if (!ff->buf) 189 return __do_read_fd(ff, addr, size); 190 return __do_read_buf(ff, addr, size); 191 } 192 193 static int do_read_u32(struct feat_fd *ff, u32 *addr) 194 { 195 int ret; 196 197 ret = __do_read(ff, addr, sizeof(*addr)); 198 if (ret) 199 return ret; 200 201 if (ff->ph->needs_swap) 202 *addr = bswap_32(*addr); 203 return 0; 204 } 205 206 static int do_read_u64(struct feat_fd *ff, u64 *addr) 207 { 208 int ret; 209 210 ret = __do_read(ff, addr, sizeof(*addr)); 211 if (ret) 212 return ret; 213 214 if (ff->ph->needs_swap) 215 *addr = bswap_64(*addr); 216 return 0; 217 } 218 219 static char *do_read_string(struct feat_fd *ff) 220 { 221 u32 len; 222 char *buf; 223 224 if (do_read_u32(ff, &len)) 225 return NULL; 226 227 buf = malloc(len); 228 if (!buf) 229 return NULL; 230 231 if (!__do_read(ff, buf, len)) { 232 /* 233 * strings are padded by zeroes 234 * thus the actual strlen of buf 235 * may be less than len 236 */ 237 return buf; 238 } 239 240 free(buf); 241 return NULL; 242 } 243 244 static int write_tracing_data(struct feat_fd *ff, 245 struct perf_evlist *evlist) 246 { 247 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 248 return -1; 249 250 return read_tracing_data(ff->fd, &evlist->entries); 251 } 252 253 static int write_build_id(struct feat_fd *ff, 254 struct perf_evlist *evlist __maybe_unused) 255 { 256 struct perf_session *session; 257 int err; 258 259 session = container_of(ff->ph, struct perf_session, header); 260 261 if (!perf_session__read_build_ids(session, true)) 262 return -1; 263 264 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 265 return -1; 266 267 err = perf_session__write_buildid_table(session, ff); 268 if (err < 0) { 269 pr_debug("failed to write buildid table\n"); 270 return err; 271 } 272 perf_session__cache_build_ids(session); 273 274 return 0; 275 } 276 277 static int write_hostname(struct feat_fd *ff, 278 struct perf_evlist *evlist __maybe_unused) 279 { 280 struct utsname uts; 281 int ret; 282 283 ret = uname(&uts); 284 if (ret < 0) 285 return -1; 286 287 return do_write_string(ff, uts.nodename); 288 } 289 290 static int write_osrelease(struct feat_fd *ff, 291 struct perf_evlist *evlist __maybe_unused) 292 { 293 struct utsname uts; 294 int ret; 295 296 ret = uname(&uts); 297 if (ret < 0) 298 return -1; 299 300 return do_write_string(ff, uts.release); 301 } 302 303 static int write_arch(struct feat_fd *ff, 304 struct perf_evlist *evlist __maybe_unused) 305 { 306 struct utsname uts; 307 int ret; 308 309 ret = uname(&uts); 310 if (ret < 0) 311 return -1; 312 313 return do_write_string(ff, uts.machine); 314 } 315 316 static int write_version(struct feat_fd *ff, 317 struct perf_evlist *evlist __maybe_unused) 318 { 319 return do_write_string(ff, perf_version_string); 320 } 321 322 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) 323 { 324 FILE *file; 325 char *buf = NULL; 326 char *s, *p; 327 const char *search = cpuinfo_proc; 328 size_t len = 0; 329 int ret = -1; 330 331 if (!search) 332 return -1; 333 334 file = fopen("/proc/cpuinfo", "r"); 335 if (!file) 336 return -1; 337 338 while (getline(&buf, &len, file) > 0) { 339 ret = strncmp(buf, search, strlen(search)); 340 if (!ret) 341 break; 342 } 343 344 if (ret) { 345 ret = -1; 346 goto done; 347 } 348 349 s = buf; 350 351 p = strchr(buf, ':'); 352 if (p && *(p+1) == ' ' && *(p+2)) 353 s = p + 2; 354 p = strchr(s, '\n'); 355 if (p) 356 *p = '\0'; 357 358 /* squash extra space characters (branding string) */ 359 p = s; 360 while (*p) { 361 if (isspace(*p)) { 362 char *r = p + 1; 363 char *q = r; 364 *p = ' '; 365 while (*q && isspace(*q)) 366 q++; 367 if (q != (p+1)) 368 while ((*r++ = *q++)); 369 } 370 p++; 371 } 372 ret = do_write_string(ff, s); 373 done: 374 free(buf); 375 fclose(file); 376 return ret; 377 } 378 379 static int write_cpudesc(struct feat_fd *ff, 380 struct perf_evlist *evlist __maybe_unused) 381 { 382 const char *cpuinfo_procs[] = CPUINFO_PROC; 383 unsigned int i; 384 385 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 386 int ret; 387 ret = __write_cpudesc(ff, cpuinfo_procs[i]); 388 if (ret >= 0) 389 return ret; 390 } 391 return -1; 392 } 393 394 395 static int write_nrcpus(struct feat_fd *ff, 396 struct perf_evlist *evlist __maybe_unused) 397 { 398 long nr; 399 u32 nrc, nra; 400 int ret; 401 402 nrc = cpu__max_present_cpu(); 403 404 nr = sysconf(_SC_NPROCESSORS_ONLN); 405 if (nr < 0) 406 return -1; 407 408 nra = (u32)(nr & UINT_MAX); 409 410 ret = do_write(ff, &nrc, sizeof(nrc)); 411 if (ret < 0) 412 return ret; 413 414 return do_write(ff, &nra, sizeof(nra)); 415 } 416 417 static int write_event_desc(struct feat_fd *ff, 418 struct perf_evlist *evlist) 419 { 420 struct perf_evsel *evsel; 421 u32 nre, nri, sz; 422 int ret; 423 424 nre = evlist->nr_entries; 425 426 /* 427 * write number of events 428 */ 429 ret = do_write(ff, &nre, sizeof(nre)); 430 if (ret < 0) 431 return ret; 432 433 /* 434 * size of perf_event_attr struct 435 */ 436 sz = (u32)sizeof(evsel->attr); 437 ret = do_write(ff, &sz, sizeof(sz)); 438 if (ret < 0) 439 return ret; 440 441 evlist__for_each_entry(evlist, evsel) { 442 ret = do_write(ff, &evsel->attr, sz); 443 if (ret < 0) 444 return ret; 445 /* 446 * write number of unique id per event 447 * there is one id per instance of an event 448 * 449 * copy into an nri to be independent of the 450 * type of ids, 451 */ 452 nri = evsel->ids; 453 ret = do_write(ff, &nri, sizeof(nri)); 454 if (ret < 0) 455 return ret; 456 457 /* 458 * write event string as passed on cmdline 459 */ 460 ret = do_write_string(ff, perf_evsel__name(evsel)); 461 if (ret < 0) 462 return ret; 463 /* 464 * write unique ids for this event 465 */ 466 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); 467 if (ret < 0) 468 return ret; 469 } 470 return 0; 471 } 472 473 static int write_cmdline(struct feat_fd *ff, 474 struct perf_evlist *evlist __maybe_unused) 475 { 476 char buf[MAXPATHLEN]; 477 u32 n; 478 int i, ret; 479 480 /* actual path to perf binary */ 481 ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1); 482 if (ret <= 0) 483 return -1; 484 485 /* readlink() does not add null termination */ 486 buf[ret] = '\0'; 487 488 /* account for binary path */ 489 n = perf_env.nr_cmdline + 1; 490 491 ret = do_write(ff, &n, sizeof(n)); 492 if (ret < 0) 493 return ret; 494 495 ret = do_write_string(ff, buf); 496 if (ret < 0) 497 return ret; 498 499 for (i = 0 ; i < perf_env.nr_cmdline; i++) { 500 ret = do_write_string(ff, perf_env.cmdline_argv[i]); 501 if (ret < 0) 502 return ret; 503 } 504 return 0; 505 } 506 507 #define CORE_SIB_FMT \ 508 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" 509 #define THRD_SIB_FMT \ 510 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" 511 512 struct cpu_topo { 513 u32 cpu_nr; 514 u32 core_sib; 515 u32 thread_sib; 516 char **core_siblings; 517 char **thread_siblings; 518 }; 519 520 static int build_cpu_topo(struct cpu_topo *tp, int cpu) 521 { 522 FILE *fp; 523 char filename[MAXPATHLEN]; 524 char *buf = NULL, *p; 525 size_t len = 0; 526 ssize_t sret; 527 u32 i = 0; 528 int ret = -1; 529 530 sprintf(filename, CORE_SIB_FMT, cpu); 531 fp = fopen(filename, "r"); 532 if (!fp) 533 goto try_threads; 534 535 sret = getline(&buf, &len, fp); 536 fclose(fp); 537 if (sret <= 0) 538 goto try_threads; 539 540 p = strchr(buf, '\n'); 541 if (p) 542 *p = '\0'; 543 544 for (i = 0; i < tp->core_sib; i++) { 545 if (!strcmp(buf, tp->core_siblings[i])) 546 break; 547 } 548 if (i == tp->core_sib) { 549 tp->core_siblings[i] = buf; 550 tp->core_sib++; 551 buf = NULL; 552 len = 0; 553 } 554 ret = 0; 555 556 try_threads: 557 sprintf(filename, THRD_SIB_FMT, cpu); 558 fp = fopen(filename, "r"); 559 if (!fp) 560 goto done; 561 562 if (getline(&buf, &len, fp) <= 0) 563 goto done; 564 565 p = strchr(buf, '\n'); 566 if (p) 567 *p = '\0'; 568 569 for (i = 0; i < tp->thread_sib; i++) { 570 if (!strcmp(buf, tp->thread_siblings[i])) 571 break; 572 } 573 if (i == tp->thread_sib) { 574 tp->thread_siblings[i] = buf; 575 tp->thread_sib++; 576 buf = NULL; 577 } 578 ret = 0; 579 done: 580 if(fp) 581 fclose(fp); 582 free(buf); 583 return ret; 584 } 585 586 static void free_cpu_topo(struct cpu_topo *tp) 587 { 588 u32 i; 589 590 if (!tp) 591 return; 592 593 for (i = 0 ; i < tp->core_sib; i++) 594 zfree(&tp->core_siblings[i]); 595 596 for (i = 0 ; i < tp->thread_sib; i++) 597 zfree(&tp->thread_siblings[i]); 598 599 free(tp); 600 } 601 602 static struct cpu_topo *build_cpu_topology(void) 603 { 604 struct cpu_topo *tp = NULL; 605 void *addr; 606 u32 nr, i; 607 size_t sz; 608 long ncpus; 609 int ret = -1; 610 struct cpu_map *map; 611 612 ncpus = cpu__max_present_cpu(); 613 614 /* build online CPU map */ 615 map = cpu_map__new(NULL); 616 if (map == NULL) { 617 pr_debug("failed to get system cpumap\n"); 618 return NULL; 619 } 620 621 nr = (u32)(ncpus & UINT_MAX); 622 623 sz = nr * sizeof(char *); 624 addr = calloc(1, sizeof(*tp) + 2 * sz); 625 if (!addr) 626 goto out_free; 627 628 tp = addr; 629 tp->cpu_nr = nr; 630 addr += sizeof(*tp); 631 tp->core_siblings = addr; 632 addr += sz; 633 tp->thread_siblings = addr; 634 635 for (i = 0; i < nr; i++) { 636 if (!cpu_map__has(map, i)) 637 continue; 638 639 ret = build_cpu_topo(tp, i); 640 if (ret < 0) 641 break; 642 } 643 644 out_free: 645 cpu_map__put(map); 646 if (ret) { 647 free_cpu_topo(tp); 648 tp = NULL; 649 } 650 return tp; 651 } 652 653 static int write_cpu_topology(struct feat_fd *ff, 654 struct perf_evlist *evlist __maybe_unused) 655 { 656 struct cpu_topo *tp; 657 u32 i; 658 int ret, j; 659 660 tp = build_cpu_topology(); 661 if (!tp) 662 return -1; 663 664 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib)); 665 if (ret < 0) 666 goto done; 667 668 for (i = 0; i < tp->core_sib; i++) { 669 ret = do_write_string(ff, tp->core_siblings[i]); 670 if (ret < 0) 671 goto done; 672 } 673 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib)); 674 if (ret < 0) 675 goto done; 676 677 for (i = 0; i < tp->thread_sib; i++) { 678 ret = do_write_string(ff, tp->thread_siblings[i]); 679 if (ret < 0) 680 break; 681 } 682 683 ret = perf_env__read_cpu_topology_map(&perf_env); 684 if (ret < 0) 685 goto done; 686 687 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 688 ret = do_write(ff, &perf_env.cpu[j].core_id, 689 sizeof(perf_env.cpu[j].core_id)); 690 if (ret < 0) 691 return ret; 692 ret = do_write(ff, &perf_env.cpu[j].socket_id, 693 sizeof(perf_env.cpu[j].socket_id)); 694 if (ret < 0) 695 return ret; 696 } 697 done: 698 free_cpu_topo(tp); 699 return ret; 700 } 701 702 703 704 static int write_total_mem(struct feat_fd *ff, 705 struct perf_evlist *evlist __maybe_unused) 706 { 707 char *buf = NULL; 708 FILE *fp; 709 size_t len = 0; 710 int ret = -1, n; 711 uint64_t mem; 712 713 fp = fopen("/proc/meminfo", "r"); 714 if (!fp) 715 return -1; 716 717 while (getline(&buf, &len, fp) > 0) { 718 ret = strncmp(buf, "MemTotal:", 9); 719 if (!ret) 720 break; 721 } 722 if (!ret) { 723 n = sscanf(buf, "%*s %"PRIu64, &mem); 724 if (n == 1) 725 ret = do_write(ff, &mem, sizeof(mem)); 726 } else 727 ret = -1; 728 free(buf); 729 fclose(fp); 730 return ret; 731 } 732 733 static int write_topo_node(struct feat_fd *ff, int node) 734 { 735 char str[MAXPATHLEN]; 736 char field[32]; 737 char *buf = NULL, *p; 738 size_t len = 0; 739 FILE *fp; 740 u64 mem_total, mem_free, mem; 741 int ret = -1; 742 743 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); 744 fp = fopen(str, "r"); 745 if (!fp) 746 return -1; 747 748 while (getline(&buf, &len, fp) > 0) { 749 /* skip over invalid lines */ 750 if (!strchr(buf, ':')) 751 continue; 752 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) 753 goto done; 754 if (!strcmp(field, "MemTotal:")) 755 mem_total = mem; 756 if (!strcmp(field, "MemFree:")) 757 mem_free = mem; 758 } 759 760 fclose(fp); 761 fp = NULL; 762 763 ret = do_write(ff, &mem_total, sizeof(u64)); 764 if (ret) 765 goto done; 766 767 ret = do_write(ff, &mem_free, sizeof(u64)); 768 if (ret) 769 goto done; 770 771 ret = -1; 772 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); 773 774 fp = fopen(str, "r"); 775 if (!fp) 776 goto done; 777 778 if (getline(&buf, &len, fp) <= 0) 779 goto done; 780 781 p = strchr(buf, '\n'); 782 if (p) 783 *p = '\0'; 784 785 ret = do_write_string(ff, buf); 786 done: 787 free(buf); 788 if (fp) 789 fclose(fp); 790 return ret; 791 } 792 793 static int write_numa_topology(struct feat_fd *ff, 794 struct perf_evlist *evlist __maybe_unused) 795 { 796 char *buf = NULL; 797 size_t len = 0; 798 FILE *fp; 799 struct cpu_map *node_map = NULL; 800 char *c; 801 u32 nr, i, j; 802 int ret = -1; 803 804 fp = fopen("/sys/devices/system/node/online", "r"); 805 if (!fp) 806 return -1; 807 808 if (getline(&buf, &len, fp) <= 0) 809 goto done; 810 811 c = strchr(buf, '\n'); 812 if (c) 813 *c = '\0'; 814 815 node_map = cpu_map__new(buf); 816 if (!node_map) 817 goto done; 818 819 nr = (u32)node_map->nr; 820 821 ret = do_write(ff, &nr, sizeof(nr)); 822 if (ret < 0) 823 goto done; 824 825 for (i = 0; i < nr; i++) { 826 j = (u32)node_map->map[i]; 827 ret = do_write(ff, &j, sizeof(j)); 828 if (ret < 0) 829 break; 830 831 ret = write_topo_node(ff, i); 832 if (ret < 0) 833 break; 834 } 835 done: 836 free(buf); 837 fclose(fp); 838 cpu_map__put(node_map); 839 return ret; 840 } 841 842 /* 843 * File format: 844 * 845 * struct pmu_mappings { 846 * u32 pmu_num; 847 * struct pmu_map { 848 * u32 type; 849 * char name[]; 850 * }[pmu_num]; 851 * }; 852 */ 853 854 static int write_pmu_mappings(struct feat_fd *ff, 855 struct perf_evlist *evlist __maybe_unused) 856 { 857 struct perf_pmu *pmu = NULL; 858 u32 pmu_num = 0; 859 int ret; 860 861 /* 862 * Do a first pass to count number of pmu to avoid lseek so this 863 * works in pipe mode as well. 864 */ 865 while ((pmu = perf_pmu__scan(pmu))) { 866 if (!pmu->name) 867 continue; 868 pmu_num++; 869 } 870 871 ret = do_write(ff, &pmu_num, sizeof(pmu_num)); 872 if (ret < 0) 873 return ret; 874 875 while ((pmu = perf_pmu__scan(pmu))) { 876 if (!pmu->name) 877 continue; 878 879 ret = do_write(ff, &pmu->type, sizeof(pmu->type)); 880 if (ret < 0) 881 return ret; 882 883 ret = do_write_string(ff, pmu->name); 884 if (ret < 0) 885 return ret; 886 } 887 888 return 0; 889 } 890 891 /* 892 * File format: 893 * 894 * struct group_descs { 895 * u32 nr_groups; 896 * struct group_desc { 897 * char name[]; 898 * u32 leader_idx; 899 * u32 nr_members; 900 * }[nr_groups]; 901 * }; 902 */ 903 static int write_group_desc(struct feat_fd *ff, 904 struct perf_evlist *evlist) 905 { 906 u32 nr_groups = evlist->nr_groups; 907 struct perf_evsel *evsel; 908 int ret; 909 910 ret = do_write(ff, &nr_groups, sizeof(nr_groups)); 911 if (ret < 0) 912 return ret; 913 914 evlist__for_each_entry(evlist, evsel) { 915 if (perf_evsel__is_group_leader(evsel) && 916 evsel->nr_members > 1) { 917 const char *name = evsel->group_name ?: "{anon_group}"; 918 u32 leader_idx = evsel->idx; 919 u32 nr_members = evsel->nr_members; 920 921 ret = do_write_string(ff, name); 922 if (ret < 0) 923 return ret; 924 925 ret = do_write(ff, &leader_idx, sizeof(leader_idx)); 926 if (ret < 0) 927 return ret; 928 929 ret = do_write(ff, &nr_members, sizeof(nr_members)); 930 if (ret < 0) 931 return ret; 932 } 933 } 934 return 0; 935 } 936 937 /* 938 * default get_cpuid(): nothing gets recorded 939 * actual implementation must be in arch/$(SRCARCH)/util/header.c 940 */ 941 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 942 { 943 return -1; 944 } 945 946 static int write_cpuid(struct feat_fd *ff, 947 struct perf_evlist *evlist __maybe_unused) 948 { 949 char buffer[64]; 950 int ret; 951 952 ret = get_cpuid(buffer, sizeof(buffer)); 953 if (!ret) 954 goto write_it; 955 956 return -1; 957 write_it: 958 return do_write_string(ff, buffer); 959 } 960 961 static int write_branch_stack(struct feat_fd *ff __maybe_unused, 962 struct perf_evlist *evlist __maybe_unused) 963 { 964 return 0; 965 } 966 967 static int write_auxtrace(struct feat_fd *ff, 968 struct perf_evlist *evlist __maybe_unused) 969 { 970 struct perf_session *session; 971 int err; 972 973 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 974 return -1; 975 976 session = container_of(ff->ph, struct perf_session, header); 977 978 err = auxtrace_index__write(ff->fd, &session->auxtrace_index); 979 if (err < 0) 980 pr_err("Failed to write auxtrace index\n"); 981 return err; 982 } 983 984 static int cpu_cache_level__sort(const void *a, const void *b) 985 { 986 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 987 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; 988 989 return cache_a->level - cache_b->level; 990 } 991 992 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) 993 { 994 if (a->level != b->level) 995 return false; 996 997 if (a->line_size != b->line_size) 998 return false; 999 1000 if (a->sets != b->sets) 1001 return false; 1002 1003 if (a->ways != b->ways) 1004 return false; 1005 1006 if (strcmp(a->type, b->type)) 1007 return false; 1008 1009 if (strcmp(a->size, b->size)) 1010 return false; 1011 1012 if (strcmp(a->map, b->map)) 1013 return false; 1014 1015 return true; 1016 } 1017 1018 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) 1019 { 1020 char path[PATH_MAX], file[PATH_MAX]; 1021 struct stat st; 1022 size_t len; 1023 1024 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level); 1025 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path); 1026 1027 if (stat(file, &st)) 1028 return 1; 1029 1030 scnprintf(file, PATH_MAX, "%s/level", path); 1031 if (sysfs__read_int(file, (int *) &cache->level)) 1032 return -1; 1033 1034 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path); 1035 if (sysfs__read_int(file, (int *) &cache->line_size)) 1036 return -1; 1037 1038 scnprintf(file, PATH_MAX, "%s/number_of_sets", path); 1039 if (sysfs__read_int(file, (int *) &cache->sets)) 1040 return -1; 1041 1042 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path); 1043 if (sysfs__read_int(file, (int *) &cache->ways)) 1044 return -1; 1045 1046 scnprintf(file, PATH_MAX, "%s/type", path); 1047 if (sysfs__read_str(file, &cache->type, &len)) 1048 return -1; 1049 1050 cache->type[len] = 0; 1051 cache->type = rtrim(cache->type); 1052 1053 scnprintf(file, PATH_MAX, "%s/size", path); 1054 if (sysfs__read_str(file, &cache->size, &len)) { 1055 free(cache->type); 1056 return -1; 1057 } 1058 1059 cache->size[len] = 0; 1060 cache->size = rtrim(cache->size); 1061 1062 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 1063 if (sysfs__read_str(file, &cache->map, &len)) { 1064 free(cache->map); 1065 free(cache->type); 1066 return -1; 1067 } 1068 1069 cache->map[len] = 0; 1070 cache->map = rtrim(cache->map); 1071 return 0; 1072 } 1073 1074 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) 1075 { 1076 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); 1077 } 1078 1079 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) 1080 { 1081 u32 i, cnt = 0; 1082 long ncpus; 1083 u32 nr, cpu; 1084 u16 level; 1085 1086 ncpus = sysconf(_SC_NPROCESSORS_CONF); 1087 if (ncpus < 0) 1088 return -1; 1089 1090 nr = (u32)(ncpus & UINT_MAX); 1091 1092 for (cpu = 0; cpu < nr; cpu++) { 1093 for (level = 0; level < 10; level++) { 1094 struct cpu_cache_level c; 1095 int err; 1096 1097 err = cpu_cache_level__read(&c, cpu, level); 1098 if (err < 0) 1099 return err; 1100 1101 if (err == 1) 1102 break; 1103 1104 for (i = 0; i < cnt; i++) { 1105 if (cpu_cache_level__cmp(&c, &caches[i])) 1106 break; 1107 } 1108 1109 if (i == cnt) 1110 caches[cnt++] = c; 1111 else 1112 cpu_cache_level__free(&c); 1113 1114 if (WARN_ONCE(cnt == size, "way too many cpu caches..")) 1115 goto out; 1116 } 1117 } 1118 out: 1119 *cntp = cnt; 1120 return 0; 1121 } 1122 1123 #define MAX_CACHES 2000 1124 1125 static int write_cache(struct feat_fd *ff, 1126 struct perf_evlist *evlist __maybe_unused) 1127 { 1128 struct cpu_cache_level caches[MAX_CACHES]; 1129 u32 cnt = 0, i, version = 1; 1130 int ret; 1131 1132 ret = build_caches(caches, MAX_CACHES, &cnt); 1133 if (ret) 1134 goto out; 1135 1136 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); 1137 1138 ret = do_write(ff, &version, sizeof(u32)); 1139 if (ret < 0) 1140 goto out; 1141 1142 ret = do_write(ff, &cnt, sizeof(u32)); 1143 if (ret < 0) 1144 goto out; 1145 1146 for (i = 0; i < cnt; i++) { 1147 struct cpu_cache_level *c = &caches[i]; 1148 1149 #define _W(v) \ 1150 ret = do_write(ff, &c->v, sizeof(u32)); \ 1151 if (ret < 0) \ 1152 goto out; 1153 1154 _W(level) 1155 _W(line_size) 1156 _W(sets) 1157 _W(ways) 1158 #undef _W 1159 1160 #define _W(v) \ 1161 ret = do_write_string(ff, (const char *) c->v); \ 1162 if (ret < 0) \ 1163 goto out; 1164 1165 _W(type) 1166 _W(size) 1167 _W(map) 1168 #undef _W 1169 } 1170 1171 out: 1172 for (i = 0; i < cnt; i++) 1173 cpu_cache_level__free(&caches[i]); 1174 return ret; 1175 } 1176 1177 static int write_stat(struct feat_fd *ff __maybe_unused, 1178 struct perf_evlist *evlist __maybe_unused) 1179 { 1180 return 0; 1181 } 1182 1183 static void print_hostname(struct feat_fd *ff, FILE *fp) 1184 { 1185 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); 1186 } 1187 1188 static void print_osrelease(struct feat_fd *ff, FILE *fp) 1189 { 1190 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); 1191 } 1192 1193 static void print_arch(struct feat_fd *ff, FILE *fp) 1194 { 1195 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); 1196 } 1197 1198 static void print_cpudesc(struct feat_fd *ff, FILE *fp) 1199 { 1200 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); 1201 } 1202 1203 static void print_nrcpus(struct feat_fd *ff, FILE *fp) 1204 { 1205 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); 1206 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); 1207 } 1208 1209 static void print_version(struct feat_fd *ff, FILE *fp) 1210 { 1211 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); 1212 } 1213 1214 static void print_cmdline(struct feat_fd *ff, FILE *fp) 1215 { 1216 int nr, i; 1217 1218 nr = ff->ph->env.nr_cmdline; 1219 1220 fprintf(fp, "# cmdline : "); 1221 1222 for (i = 0; i < nr; i++) 1223 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); 1224 fputc('\n', fp); 1225 } 1226 1227 static void print_cpu_topology(struct feat_fd *ff, FILE *fp) 1228 { 1229 struct perf_header *ph = ff->ph; 1230 int cpu_nr = ph->env.nr_cpus_avail; 1231 int nr, i; 1232 char *str; 1233 1234 nr = ph->env.nr_sibling_cores; 1235 str = ph->env.sibling_cores; 1236 1237 for (i = 0; i < nr; i++) { 1238 fprintf(fp, "# sibling cores : %s\n", str); 1239 str += strlen(str) + 1; 1240 } 1241 1242 nr = ph->env.nr_sibling_threads; 1243 str = ph->env.sibling_threads; 1244 1245 for (i = 0; i < nr; i++) { 1246 fprintf(fp, "# sibling threads : %s\n", str); 1247 str += strlen(str) + 1; 1248 } 1249 1250 if (ph->env.cpu != NULL) { 1251 for (i = 0; i < cpu_nr; i++) 1252 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i, 1253 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id); 1254 } else 1255 fprintf(fp, "# Core ID and Socket ID information is not available\n"); 1256 } 1257 1258 static void free_event_desc(struct perf_evsel *events) 1259 { 1260 struct perf_evsel *evsel; 1261 1262 if (!events) 1263 return; 1264 1265 for (evsel = events; evsel->attr.size; evsel++) { 1266 zfree(&evsel->name); 1267 zfree(&evsel->id); 1268 } 1269 1270 free(events); 1271 } 1272 1273 static struct perf_evsel *read_event_desc(struct feat_fd *ff) 1274 { 1275 struct perf_evsel *evsel, *events = NULL; 1276 u64 *id; 1277 void *buf = NULL; 1278 u32 nre, sz, nr, i, j; 1279 size_t msz; 1280 1281 /* number of events */ 1282 if (do_read_u32(ff, &nre)) 1283 goto error; 1284 1285 if (do_read_u32(ff, &sz)) 1286 goto error; 1287 1288 /* buffer to hold on file attr struct */ 1289 buf = malloc(sz); 1290 if (!buf) 1291 goto error; 1292 1293 /* the last event terminates with evsel->attr.size == 0: */ 1294 events = calloc(nre + 1, sizeof(*events)); 1295 if (!events) 1296 goto error; 1297 1298 msz = sizeof(evsel->attr); 1299 if (sz < msz) 1300 msz = sz; 1301 1302 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1303 evsel->idx = i; 1304 1305 /* 1306 * must read entire on-file attr struct to 1307 * sync up with layout. 1308 */ 1309 if (__do_read(ff, buf, sz)) 1310 goto error; 1311 1312 if (ff->ph->needs_swap) 1313 perf_event__attr_swap(buf); 1314 1315 memcpy(&evsel->attr, buf, msz); 1316 1317 if (do_read_u32(ff, &nr)) 1318 goto error; 1319 1320 if (ff->ph->needs_swap) 1321 evsel->needs_swap = true; 1322 1323 evsel->name = do_read_string(ff); 1324 if (!evsel->name) 1325 goto error; 1326 1327 if (!nr) 1328 continue; 1329 1330 id = calloc(nr, sizeof(*id)); 1331 if (!id) 1332 goto error; 1333 evsel->ids = nr; 1334 evsel->id = id; 1335 1336 for (j = 0 ; j < nr; j++) { 1337 if (do_read_u64(ff, id)) 1338 goto error; 1339 id++; 1340 } 1341 } 1342 out: 1343 free(buf); 1344 return events; 1345 error: 1346 free_event_desc(events); 1347 events = NULL; 1348 goto out; 1349 } 1350 1351 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1352 void *priv __maybe_unused) 1353 { 1354 return fprintf(fp, ", %s = %s", name, val); 1355 } 1356 1357 static void print_event_desc(struct feat_fd *ff, FILE *fp) 1358 { 1359 struct perf_evsel *evsel, *events; 1360 u32 j; 1361 u64 *id; 1362 1363 if (ff->events) 1364 events = ff->events; 1365 else 1366 events = read_event_desc(ff); 1367 1368 if (!events) { 1369 fprintf(fp, "# event desc: not available or unable to read\n"); 1370 return; 1371 } 1372 1373 for (evsel = events; evsel->attr.size; evsel++) { 1374 fprintf(fp, "# event : name = %s, ", evsel->name); 1375 1376 if (evsel->ids) { 1377 fprintf(fp, ", id = {"); 1378 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1379 if (j) 1380 fputc(',', fp); 1381 fprintf(fp, " %"PRIu64, *id); 1382 } 1383 fprintf(fp, " }"); 1384 } 1385 1386 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL); 1387 1388 fputc('\n', fp); 1389 } 1390 1391 free_event_desc(events); 1392 ff->events = NULL; 1393 } 1394 1395 static void print_total_mem(struct feat_fd *ff, FILE *fp) 1396 { 1397 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); 1398 } 1399 1400 static void print_numa_topology(struct feat_fd *ff, FILE *fp) 1401 { 1402 int i; 1403 struct numa_node *n; 1404 1405 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { 1406 n = &ff->ph->env.numa_nodes[i]; 1407 1408 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1409 " free = %"PRIu64" kB\n", 1410 n->node, n->mem_total, n->mem_free); 1411 1412 fprintf(fp, "# node%u cpu list : ", n->node); 1413 cpu_map__fprintf(n->map, fp); 1414 } 1415 } 1416 1417 static void print_cpuid(struct feat_fd *ff, FILE *fp) 1418 { 1419 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); 1420 } 1421 1422 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) 1423 { 1424 fprintf(fp, "# contains samples with branch stack\n"); 1425 } 1426 1427 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) 1428 { 1429 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1430 } 1431 1432 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) 1433 { 1434 fprintf(fp, "# contains stat data\n"); 1435 } 1436 1437 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) 1438 { 1439 int i; 1440 1441 fprintf(fp, "# CPU cache info:\n"); 1442 for (i = 0; i < ff->ph->env.caches_cnt; i++) { 1443 fprintf(fp, "# "); 1444 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); 1445 } 1446 } 1447 1448 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) 1449 { 1450 const char *delimiter = "# pmu mappings: "; 1451 char *str, *tmp; 1452 u32 pmu_num; 1453 u32 type; 1454 1455 pmu_num = ff->ph->env.nr_pmu_mappings; 1456 if (!pmu_num) { 1457 fprintf(fp, "# pmu mappings: not available\n"); 1458 return; 1459 } 1460 1461 str = ff->ph->env.pmu_mappings; 1462 1463 while (pmu_num) { 1464 type = strtoul(str, &tmp, 0); 1465 if (*tmp != ':') 1466 goto error; 1467 1468 str = tmp + 1; 1469 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1470 1471 delimiter = ", "; 1472 str += strlen(str) + 1; 1473 pmu_num--; 1474 } 1475 1476 fprintf(fp, "\n"); 1477 1478 if (!pmu_num) 1479 return; 1480 error: 1481 fprintf(fp, "# pmu mappings: unable to read\n"); 1482 } 1483 1484 static void print_group_desc(struct feat_fd *ff, FILE *fp) 1485 { 1486 struct perf_session *session; 1487 struct perf_evsel *evsel; 1488 u32 nr = 0; 1489 1490 session = container_of(ff->ph, struct perf_session, header); 1491 1492 evlist__for_each_entry(session->evlist, evsel) { 1493 if (perf_evsel__is_group_leader(evsel) && 1494 evsel->nr_members > 1) { 1495 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1496 perf_evsel__name(evsel)); 1497 1498 nr = evsel->nr_members - 1; 1499 } else if (nr) { 1500 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1501 1502 if (--nr == 0) 1503 fprintf(fp, "}\n"); 1504 } 1505 } 1506 } 1507 1508 static int __event_process_build_id(struct build_id_event *bev, 1509 char *filename, 1510 struct perf_session *session) 1511 { 1512 int err = -1; 1513 struct machine *machine; 1514 u16 cpumode; 1515 struct dso *dso; 1516 enum dso_kernel_type dso_type; 1517 1518 machine = perf_session__findnew_machine(session, bev->pid); 1519 if (!machine) 1520 goto out; 1521 1522 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1523 1524 switch (cpumode) { 1525 case PERF_RECORD_MISC_KERNEL: 1526 dso_type = DSO_TYPE_KERNEL; 1527 break; 1528 case PERF_RECORD_MISC_GUEST_KERNEL: 1529 dso_type = DSO_TYPE_GUEST_KERNEL; 1530 break; 1531 case PERF_RECORD_MISC_USER: 1532 case PERF_RECORD_MISC_GUEST_USER: 1533 dso_type = DSO_TYPE_USER; 1534 break; 1535 default: 1536 goto out; 1537 } 1538 1539 dso = machine__findnew_dso(machine, filename); 1540 if (dso != NULL) { 1541 char sbuild_id[SBUILD_ID_SIZE]; 1542 1543 dso__set_build_id(dso, &bev->build_id); 1544 1545 if (dso_type != DSO_TYPE_USER) { 1546 struct kmod_path m = { .name = NULL, }; 1547 1548 if (!kmod_path__parse_name(&m, filename) && m.kmod) 1549 dso__set_module_info(dso, &m, machine); 1550 else 1551 dso->kernel = dso_type; 1552 1553 free(m.name); 1554 } 1555 1556 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1557 sbuild_id); 1558 pr_debug("build id event received for %s: %s\n", 1559 dso->long_name, sbuild_id); 1560 dso__put(dso); 1561 } 1562 1563 err = 0; 1564 out: 1565 return err; 1566 } 1567 1568 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1569 int input, u64 offset, u64 size) 1570 { 1571 struct perf_session *session = container_of(header, struct perf_session, header); 1572 struct { 1573 struct perf_event_header header; 1574 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1575 char filename[0]; 1576 } old_bev; 1577 struct build_id_event bev; 1578 char filename[PATH_MAX]; 1579 u64 limit = offset + size; 1580 1581 while (offset < limit) { 1582 ssize_t len; 1583 1584 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1585 return -1; 1586 1587 if (header->needs_swap) 1588 perf_event_header__bswap(&old_bev.header); 1589 1590 len = old_bev.header.size - sizeof(old_bev); 1591 if (readn(input, filename, len) != len) 1592 return -1; 1593 1594 bev.header = old_bev.header; 1595 1596 /* 1597 * As the pid is the missing value, we need to fill 1598 * it properly. The header.misc value give us nice hint. 1599 */ 1600 bev.pid = HOST_KERNEL_ID; 1601 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1602 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1603 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1604 1605 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1606 __event_process_build_id(&bev, filename, session); 1607 1608 offset += bev.header.size; 1609 } 1610 1611 return 0; 1612 } 1613 1614 static int perf_header__read_build_ids(struct perf_header *header, 1615 int input, u64 offset, u64 size) 1616 { 1617 struct perf_session *session = container_of(header, struct perf_session, header); 1618 struct build_id_event bev; 1619 char filename[PATH_MAX]; 1620 u64 limit = offset + size, orig_offset = offset; 1621 int err = -1; 1622 1623 while (offset < limit) { 1624 ssize_t len; 1625 1626 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 1627 goto out; 1628 1629 if (header->needs_swap) 1630 perf_event_header__bswap(&bev.header); 1631 1632 len = bev.header.size - sizeof(bev); 1633 if (readn(input, filename, len) != len) 1634 goto out; 1635 /* 1636 * The a1645ce1 changeset: 1637 * 1638 * "perf: 'perf kvm' tool for monitoring guest performance from host" 1639 * 1640 * Added a field to struct build_id_event that broke the file 1641 * format. 1642 * 1643 * Since the kernel build-id is the first entry, process the 1644 * table using the old format if the well known 1645 * '[kernel.kallsyms]' string for the kernel build-id has the 1646 * first 4 characters chopped off (where the pid_t sits). 1647 */ 1648 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 1649 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 1650 return -1; 1651 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 1652 } 1653 1654 __event_process_build_id(&bev, filename, session); 1655 1656 offset += bev.header.size; 1657 } 1658 err = 0; 1659 out: 1660 return err; 1661 } 1662 1663 /* Macro for features that simply need to read and store a string. */ 1664 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ 1665 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ 1666 {\ 1667 ff->ph->env.__feat_env = do_read_string(ff); \ 1668 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ 1669 } 1670 1671 FEAT_PROCESS_STR_FUN(hostname, hostname); 1672 FEAT_PROCESS_STR_FUN(osrelease, os_release); 1673 FEAT_PROCESS_STR_FUN(version, version); 1674 FEAT_PROCESS_STR_FUN(arch, arch); 1675 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc); 1676 FEAT_PROCESS_STR_FUN(cpuid, cpuid); 1677 1678 static int process_tracing_data(struct feat_fd *ff, void *data) 1679 { 1680 ssize_t ret = trace_report(ff->fd, data, false); 1681 1682 return ret < 0 ? -1 : 0; 1683 } 1684 1685 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) 1686 { 1687 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size)) 1688 pr_debug("Failed to read buildids, continuing...\n"); 1689 return 0; 1690 } 1691 1692 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) 1693 { 1694 int ret; 1695 u32 nr_cpus_avail, nr_cpus_online; 1696 1697 ret = do_read_u32(ff, &nr_cpus_avail); 1698 if (ret) 1699 return ret; 1700 1701 ret = do_read_u32(ff, &nr_cpus_online); 1702 if (ret) 1703 return ret; 1704 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; 1705 ff->ph->env.nr_cpus_online = (int)nr_cpus_online; 1706 return 0; 1707 } 1708 1709 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) 1710 { 1711 u64 total_mem; 1712 int ret; 1713 1714 ret = do_read_u64(ff, &total_mem); 1715 if (ret) 1716 return -1; 1717 ff->ph->env.total_mem = (unsigned long long)total_mem; 1718 return 0; 1719 } 1720 1721 static struct perf_evsel * 1722 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) 1723 { 1724 struct perf_evsel *evsel; 1725 1726 evlist__for_each_entry(evlist, evsel) { 1727 if (evsel->idx == idx) 1728 return evsel; 1729 } 1730 1731 return NULL; 1732 } 1733 1734 static void 1735 perf_evlist__set_event_name(struct perf_evlist *evlist, 1736 struct perf_evsel *event) 1737 { 1738 struct perf_evsel *evsel; 1739 1740 if (!event->name) 1741 return; 1742 1743 evsel = perf_evlist__find_by_index(evlist, event->idx); 1744 if (!evsel) 1745 return; 1746 1747 if (evsel->name) 1748 return; 1749 1750 evsel->name = strdup(event->name); 1751 } 1752 1753 static int 1754 process_event_desc(struct feat_fd *ff, void *data __maybe_unused) 1755 { 1756 struct perf_session *session; 1757 struct perf_evsel *evsel, *events = read_event_desc(ff); 1758 1759 if (!events) 1760 return 0; 1761 1762 session = container_of(ff->ph, struct perf_session, header); 1763 1764 if (session->data->is_pipe) { 1765 /* Save events for reading later by print_event_desc, 1766 * since they can't be read again in pipe mode. */ 1767 ff->events = events; 1768 } 1769 1770 for (evsel = events; evsel->attr.size; evsel++) 1771 perf_evlist__set_event_name(session->evlist, evsel); 1772 1773 if (!session->data->is_pipe) 1774 free_event_desc(events); 1775 1776 return 0; 1777 } 1778 1779 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) 1780 { 1781 char *str, *cmdline = NULL, **argv = NULL; 1782 u32 nr, i, len = 0; 1783 1784 if (do_read_u32(ff, &nr)) 1785 return -1; 1786 1787 ff->ph->env.nr_cmdline = nr; 1788 1789 cmdline = zalloc(ff->size + nr + 1); 1790 if (!cmdline) 1791 return -1; 1792 1793 argv = zalloc(sizeof(char *) * (nr + 1)); 1794 if (!argv) 1795 goto error; 1796 1797 for (i = 0; i < nr; i++) { 1798 str = do_read_string(ff); 1799 if (!str) 1800 goto error; 1801 1802 argv[i] = cmdline + len; 1803 memcpy(argv[i], str, strlen(str) + 1); 1804 len += strlen(str) + 1; 1805 free(str); 1806 } 1807 ff->ph->env.cmdline = cmdline; 1808 ff->ph->env.cmdline_argv = (const char **) argv; 1809 return 0; 1810 1811 error: 1812 free(argv); 1813 free(cmdline); 1814 return -1; 1815 } 1816 1817 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) 1818 { 1819 u32 nr, i; 1820 char *str; 1821 struct strbuf sb; 1822 int cpu_nr = ff->ph->env.nr_cpus_avail; 1823 u64 size = 0; 1824 struct perf_header *ph = ff->ph; 1825 1826 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); 1827 if (!ph->env.cpu) 1828 return -1; 1829 1830 if (do_read_u32(ff, &nr)) 1831 goto free_cpu; 1832 1833 ph->env.nr_sibling_cores = nr; 1834 size += sizeof(u32); 1835 if (strbuf_init(&sb, 128) < 0) 1836 goto free_cpu; 1837 1838 for (i = 0; i < nr; i++) { 1839 str = do_read_string(ff); 1840 if (!str) 1841 goto error; 1842 1843 /* include a NULL character at the end */ 1844 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 1845 goto error; 1846 size += string_size(str); 1847 free(str); 1848 } 1849 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 1850 1851 if (do_read_u32(ff, &nr)) 1852 return -1; 1853 1854 ph->env.nr_sibling_threads = nr; 1855 size += sizeof(u32); 1856 1857 for (i = 0; i < nr; i++) { 1858 str = do_read_string(ff); 1859 if (!str) 1860 goto error; 1861 1862 /* include a NULL character at the end */ 1863 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 1864 goto error; 1865 size += string_size(str); 1866 free(str); 1867 } 1868 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 1869 1870 /* 1871 * The header may be from old perf, 1872 * which doesn't include core id and socket id information. 1873 */ 1874 if (ff->size <= size) { 1875 zfree(&ph->env.cpu); 1876 return 0; 1877 } 1878 1879 for (i = 0; i < (u32)cpu_nr; i++) { 1880 if (do_read_u32(ff, &nr)) 1881 goto free_cpu; 1882 1883 ph->env.cpu[i].core_id = nr; 1884 1885 if (do_read_u32(ff, &nr)) 1886 goto free_cpu; 1887 1888 if (nr != (u32)-1 && nr > (u32)cpu_nr) { 1889 pr_debug("socket_id number is too big." 1890 "You may need to upgrade the perf tool.\n"); 1891 goto free_cpu; 1892 } 1893 1894 ph->env.cpu[i].socket_id = nr; 1895 } 1896 1897 return 0; 1898 1899 error: 1900 strbuf_release(&sb); 1901 free_cpu: 1902 zfree(&ph->env.cpu); 1903 return -1; 1904 } 1905 1906 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) 1907 { 1908 struct numa_node *nodes, *n; 1909 u32 nr, i; 1910 char *str; 1911 1912 /* nr nodes */ 1913 if (do_read_u32(ff, &nr)) 1914 return -1; 1915 1916 nodes = zalloc(sizeof(*nodes) * nr); 1917 if (!nodes) 1918 return -ENOMEM; 1919 1920 for (i = 0; i < nr; i++) { 1921 n = &nodes[i]; 1922 1923 /* node number */ 1924 if (do_read_u32(ff, &n->node)) 1925 goto error; 1926 1927 if (do_read_u64(ff, &n->mem_total)) 1928 goto error; 1929 1930 if (do_read_u64(ff, &n->mem_free)) 1931 goto error; 1932 1933 str = do_read_string(ff); 1934 if (!str) 1935 goto error; 1936 1937 n->map = cpu_map__new(str); 1938 if (!n->map) 1939 goto error; 1940 1941 free(str); 1942 } 1943 ff->ph->env.nr_numa_nodes = nr; 1944 ff->ph->env.numa_nodes = nodes; 1945 return 0; 1946 1947 error: 1948 free(nodes); 1949 return -1; 1950 } 1951 1952 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) 1953 { 1954 char *name; 1955 u32 pmu_num; 1956 u32 type; 1957 struct strbuf sb; 1958 1959 if (do_read_u32(ff, &pmu_num)) 1960 return -1; 1961 1962 if (!pmu_num) { 1963 pr_debug("pmu mappings not available\n"); 1964 return 0; 1965 } 1966 1967 ff->ph->env.nr_pmu_mappings = pmu_num; 1968 if (strbuf_init(&sb, 128) < 0) 1969 return -1; 1970 1971 while (pmu_num) { 1972 if (do_read_u32(ff, &type)) 1973 goto error; 1974 1975 name = do_read_string(ff); 1976 if (!name) 1977 goto error; 1978 1979 if (strbuf_addf(&sb, "%u:%s", type, name) < 0) 1980 goto error; 1981 /* include a NULL character at the end */ 1982 if (strbuf_add(&sb, "", 1) < 0) 1983 goto error; 1984 1985 if (!strcmp(name, "msr")) 1986 ff->ph->env.msr_pmu_type = type; 1987 1988 free(name); 1989 pmu_num--; 1990 } 1991 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 1992 return 0; 1993 1994 error: 1995 strbuf_release(&sb); 1996 return -1; 1997 } 1998 1999 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) 2000 { 2001 size_t ret = -1; 2002 u32 i, nr, nr_groups; 2003 struct perf_session *session; 2004 struct perf_evsel *evsel, *leader = NULL; 2005 struct group_desc { 2006 char *name; 2007 u32 leader_idx; 2008 u32 nr_members; 2009 } *desc; 2010 2011 if (do_read_u32(ff, &nr_groups)) 2012 return -1; 2013 2014 ff->ph->env.nr_groups = nr_groups; 2015 if (!nr_groups) { 2016 pr_debug("group desc not available\n"); 2017 return 0; 2018 } 2019 2020 desc = calloc(nr_groups, sizeof(*desc)); 2021 if (!desc) 2022 return -1; 2023 2024 for (i = 0; i < nr_groups; i++) { 2025 desc[i].name = do_read_string(ff); 2026 if (!desc[i].name) 2027 goto out_free; 2028 2029 if (do_read_u32(ff, &desc[i].leader_idx)) 2030 goto out_free; 2031 2032 if (do_read_u32(ff, &desc[i].nr_members)) 2033 goto out_free; 2034 } 2035 2036 /* 2037 * Rebuild group relationship based on the group_desc 2038 */ 2039 session = container_of(ff->ph, struct perf_session, header); 2040 session->evlist->nr_groups = nr_groups; 2041 2042 i = nr = 0; 2043 evlist__for_each_entry(session->evlist, evsel) { 2044 if (evsel->idx == (int) desc[i].leader_idx) { 2045 evsel->leader = evsel; 2046 /* {anon_group} is a dummy name */ 2047 if (strcmp(desc[i].name, "{anon_group}")) { 2048 evsel->group_name = desc[i].name; 2049 desc[i].name = NULL; 2050 } 2051 evsel->nr_members = desc[i].nr_members; 2052 2053 if (i >= nr_groups || nr > 0) { 2054 pr_debug("invalid group desc\n"); 2055 goto out_free; 2056 } 2057 2058 leader = evsel; 2059 nr = evsel->nr_members - 1; 2060 i++; 2061 } else if (nr) { 2062 /* This is a group member */ 2063 evsel->leader = leader; 2064 2065 nr--; 2066 } 2067 } 2068 2069 if (i != nr_groups || nr != 0) { 2070 pr_debug("invalid group desc\n"); 2071 goto out_free; 2072 } 2073 2074 ret = 0; 2075 out_free: 2076 for (i = 0; i < nr_groups; i++) 2077 zfree(&desc[i].name); 2078 free(desc); 2079 2080 return ret; 2081 } 2082 2083 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) 2084 { 2085 struct perf_session *session; 2086 int err; 2087 2088 session = container_of(ff->ph, struct perf_session, header); 2089 2090 err = auxtrace_index__process(ff->fd, ff->size, session, 2091 ff->ph->needs_swap); 2092 if (err < 0) 2093 pr_err("Failed to process auxtrace index\n"); 2094 return err; 2095 } 2096 2097 static int process_cache(struct feat_fd *ff, void *data __maybe_unused) 2098 { 2099 struct cpu_cache_level *caches; 2100 u32 cnt, i, version; 2101 2102 if (do_read_u32(ff, &version)) 2103 return -1; 2104 2105 if (version != 1) 2106 return -1; 2107 2108 if (do_read_u32(ff, &cnt)) 2109 return -1; 2110 2111 caches = zalloc(sizeof(*caches) * cnt); 2112 if (!caches) 2113 return -1; 2114 2115 for (i = 0; i < cnt; i++) { 2116 struct cpu_cache_level c; 2117 2118 #define _R(v) \ 2119 if (do_read_u32(ff, &c.v))\ 2120 goto out_free_caches; \ 2121 2122 _R(level) 2123 _R(line_size) 2124 _R(sets) 2125 _R(ways) 2126 #undef _R 2127 2128 #define _R(v) \ 2129 c.v = do_read_string(ff); \ 2130 if (!c.v) \ 2131 goto out_free_caches; 2132 2133 _R(type) 2134 _R(size) 2135 _R(map) 2136 #undef _R 2137 2138 caches[i] = c; 2139 } 2140 2141 ff->ph->env.caches = caches; 2142 ff->ph->env.caches_cnt = cnt; 2143 return 0; 2144 out_free_caches: 2145 free(caches); 2146 return -1; 2147 } 2148 2149 struct feature_ops { 2150 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist); 2151 void (*print)(struct feat_fd *ff, FILE *fp); 2152 int (*process)(struct feat_fd *ff, void *data); 2153 const char *name; 2154 bool full_only; 2155 bool synthesize; 2156 }; 2157 2158 #define FEAT_OPR(n, func, __full_only) \ 2159 [HEADER_##n] = { \ 2160 .name = __stringify(n), \ 2161 .write = write_##func, \ 2162 .print = print_##func, \ 2163 .full_only = __full_only, \ 2164 .process = process_##func, \ 2165 .synthesize = true \ 2166 } 2167 2168 #define FEAT_OPN(n, func, __full_only) \ 2169 [HEADER_##n] = { \ 2170 .name = __stringify(n), \ 2171 .write = write_##func, \ 2172 .print = print_##func, \ 2173 .full_only = __full_only, \ 2174 .process = process_##func \ 2175 } 2176 2177 /* feature_ops not implemented: */ 2178 #define print_tracing_data NULL 2179 #define print_build_id NULL 2180 2181 #define process_branch_stack NULL 2182 #define process_stat NULL 2183 2184 2185 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2186 FEAT_OPN(TRACING_DATA, tracing_data, false), 2187 FEAT_OPN(BUILD_ID, build_id, false), 2188 FEAT_OPR(HOSTNAME, hostname, false), 2189 FEAT_OPR(OSRELEASE, osrelease, false), 2190 FEAT_OPR(VERSION, version, false), 2191 FEAT_OPR(ARCH, arch, false), 2192 FEAT_OPR(NRCPUS, nrcpus, false), 2193 FEAT_OPR(CPUDESC, cpudesc, false), 2194 FEAT_OPR(CPUID, cpuid, false), 2195 FEAT_OPR(TOTAL_MEM, total_mem, false), 2196 FEAT_OPR(EVENT_DESC, event_desc, false), 2197 FEAT_OPR(CMDLINE, cmdline, false), 2198 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true), 2199 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), 2200 FEAT_OPN(BRANCH_STACK, branch_stack, false), 2201 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), 2202 FEAT_OPN(GROUP_DESC, group_desc, false), 2203 FEAT_OPN(AUXTRACE, auxtrace, false), 2204 FEAT_OPN(STAT, stat, false), 2205 FEAT_OPN(CACHE, cache, true), 2206 }; 2207 2208 struct header_print_data { 2209 FILE *fp; 2210 bool full; /* extended list of headers */ 2211 }; 2212 2213 static int perf_file_section__fprintf_info(struct perf_file_section *section, 2214 struct perf_header *ph, 2215 int feat, int fd, void *data) 2216 { 2217 struct header_print_data *hd = data; 2218 struct feat_fd ff; 2219 2220 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2221 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2222 "%d, continuing...\n", section->offset, feat); 2223 return 0; 2224 } 2225 if (feat >= HEADER_LAST_FEATURE) { 2226 pr_warning("unknown feature %d\n", feat); 2227 return 0; 2228 } 2229 if (!feat_ops[feat].print) 2230 return 0; 2231 2232 ff = (struct feat_fd) { 2233 .fd = fd, 2234 .ph = ph, 2235 }; 2236 2237 if (!feat_ops[feat].full_only || hd->full) 2238 feat_ops[feat].print(&ff, hd->fp); 2239 else 2240 fprintf(hd->fp, "# %s info available, use -I to display\n", 2241 feat_ops[feat].name); 2242 2243 return 0; 2244 } 2245 2246 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 2247 { 2248 struct header_print_data hd; 2249 struct perf_header *header = &session->header; 2250 int fd = perf_data__fd(session->data); 2251 struct stat st; 2252 int ret, bit; 2253 2254 hd.fp = fp; 2255 hd.full = full; 2256 2257 ret = fstat(fd, &st); 2258 if (ret == -1) 2259 return -1; 2260 2261 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 2262 2263 perf_header__process_sections(header, fd, &hd, 2264 perf_file_section__fprintf_info); 2265 2266 if (session->data->is_pipe) 2267 return 0; 2268 2269 fprintf(fp, "# missing features: "); 2270 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { 2271 if (bit) 2272 fprintf(fp, "%s ", feat_ops[bit].name); 2273 } 2274 2275 fprintf(fp, "\n"); 2276 return 0; 2277 } 2278 2279 static int do_write_feat(struct feat_fd *ff, int type, 2280 struct perf_file_section **p, 2281 struct perf_evlist *evlist) 2282 { 2283 int err; 2284 int ret = 0; 2285 2286 if (perf_header__has_feat(ff->ph, type)) { 2287 if (!feat_ops[type].write) 2288 return -1; 2289 2290 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 2291 return -1; 2292 2293 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); 2294 2295 err = feat_ops[type].write(ff, evlist); 2296 if (err < 0) { 2297 pr_debug("failed to write feature %s\n", feat_ops[type].name); 2298 2299 /* undo anything written */ 2300 lseek(ff->fd, (*p)->offset, SEEK_SET); 2301 2302 return -1; 2303 } 2304 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; 2305 (*p)++; 2306 } 2307 return ret; 2308 } 2309 2310 static int perf_header__adds_write(struct perf_header *header, 2311 struct perf_evlist *evlist, int fd) 2312 { 2313 int nr_sections; 2314 struct feat_fd ff; 2315 struct perf_file_section *feat_sec, *p; 2316 int sec_size; 2317 u64 sec_start; 2318 int feat; 2319 int err; 2320 2321 ff = (struct feat_fd){ 2322 .fd = fd, 2323 .ph = header, 2324 }; 2325 2326 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2327 if (!nr_sections) 2328 return 0; 2329 2330 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 2331 if (feat_sec == NULL) 2332 return -ENOMEM; 2333 2334 sec_size = sizeof(*feat_sec) * nr_sections; 2335 2336 sec_start = header->feat_offset; 2337 lseek(fd, sec_start + sec_size, SEEK_SET); 2338 2339 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 2340 if (do_write_feat(&ff, feat, &p, evlist)) 2341 perf_header__clear_feat(header, feat); 2342 } 2343 2344 lseek(fd, sec_start, SEEK_SET); 2345 /* 2346 * may write more than needed due to dropped feature, but 2347 * this is okay, reader will skip the mising entries 2348 */ 2349 err = do_write(&ff, feat_sec, sec_size); 2350 if (err < 0) 2351 pr_debug("failed to write feature section\n"); 2352 free(feat_sec); 2353 return err; 2354 } 2355 2356 int perf_header__write_pipe(int fd) 2357 { 2358 struct perf_pipe_file_header f_header; 2359 struct feat_fd ff; 2360 int err; 2361 2362 ff = (struct feat_fd){ .fd = fd }; 2363 2364 f_header = (struct perf_pipe_file_header){ 2365 .magic = PERF_MAGIC, 2366 .size = sizeof(f_header), 2367 }; 2368 2369 err = do_write(&ff, &f_header, sizeof(f_header)); 2370 if (err < 0) { 2371 pr_debug("failed to write perf pipe header\n"); 2372 return err; 2373 } 2374 2375 return 0; 2376 } 2377 2378 int perf_session__write_header(struct perf_session *session, 2379 struct perf_evlist *evlist, 2380 int fd, bool at_exit) 2381 { 2382 struct perf_file_header f_header; 2383 struct perf_file_attr f_attr; 2384 struct perf_header *header = &session->header; 2385 struct perf_evsel *evsel; 2386 struct feat_fd ff; 2387 u64 attr_offset; 2388 int err; 2389 2390 ff = (struct feat_fd){ .fd = fd}; 2391 lseek(fd, sizeof(f_header), SEEK_SET); 2392 2393 evlist__for_each_entry(session->evlist, evsel) { 2394 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 2395 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); 2396 if (err < 0) { 2397 pr_debug("failed to write perf header\n"); 2398 return err; 2399 } 2400 } 2401 2402 attr_offset = lseek(ff.fd, 0, SEEK_CUR); 2403 2404 evlist__for_each_entry(evlist, evsel) { 2405 f_attr = (struct perf_file_attr){ 2406 .attr = evsel->attr, 2407 .ids = { 2408 .offset = evsel->id_offset, 2409 .size = evsel->ids * sizeof(u64), 2410 } 2411 }; 2412 err = do_write(&ff, &f_attr, sizeof(f_attr)); 2413 if (err < 0) { 2414 pr_debug("failed to write perf header attribute\n"); 2415 return err; 2416 } 2417 } 2418 2419 if (!header->data_offset) 2420 header->data_offset = lseek(fd, 0, SEEK_CUR); 2421 header->feat_offset = header->data_offset + header->data_size; 2422 2423 if (at_exit) { 2424 err = perf_header__adds_write(header, evlist, fd); 2425 if (err < 0) 2426 return err; 2427 } 2428 2429 f_header = (struct perf_file_header){ 2430 .magic = PERF_MAGIC, 2431 .size = sizeof(f_header), 2432 .attr_size = sizeof(f_attr), 2433 .attrs = { 2434 .offset = attr_offset, 2435 .size = evlist->nr_entries * sizeof(f_attr), 2436 }, 2437 .data = { 2438 .offset = header->data_offset, 2439 .size = header->data_size, 2440 }, 2441 /* event_types is ignored, store zeros */ 2442 }; 2443 2444 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 2445 2446 lseek(fd, 0, SEEK_SET); 2447 err = do_write(&ff, &f_header, sizeof(f_header)); 2448 if (err < 0) { 2449 pr_debug("failed to write perf header\n"); 2450 return err; 2451 } 2452 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 2453 2454 return 0; 2455 } 2456 2457 static int perf_header__getbuffer64(struct perf_header *header, 2458 int fd, void *buf, size_t size) 2459 { 2460 if (readn(fd, buf, size) <= 0) 2461 return -1; 2462 2463 if (header->needs_swap) 2464 mem_bswap_64(buf, size); 2465 2466 return 0; 2467 } 2468 2469 int perf_header__process_sections(struct perf_header *header, int fd, 2470 void *data, 2471 int (*process)(struct perf_file_section *section, 2472 struct perf_header *ph, 2473 int feat, int fd, void *data)) 2474 { 2475 struct perf_file_section *feat_sec, *sec; 2476 int nr_sections; 2477 int sec_size; 2478 int feat; 2479 int err; 2480 2481 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2482 if (!nr_sections) 2483 return 0; 2484 2485 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 2486 if (!feat_sec) 2487 return -1; 2488 2489 sec_size = sizeof(*feat_sec) * nr_sections; 2490 2491 lseek(fd, header->feat_offset, SEEK_SET); 2492 2493 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 2494 if (err < 0) 2495 goto out_free; 2496 2497 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 2498 err = process(sec++, header, feat, fd, data); 2499 if (err < 0) 2500 goto out_free; 2501 } 2502 err = 0; 2503 out_free: 2504 free(feat_sec); 2505 return err; 2506 } 2507 2508 static const int attr_file_abi_sizes[] = { 2509 [0] = PERF_ATTR_SIZE_VER0, 2510 [1] = PERF_ATTR_SIZE_VER1, 2511 [2] = PERF_ATTR_SIZE_VER2, 2512 [3] = PERF_ATTR_SIZE_VER3, 2513 [4] = PERF_ATTR_SIZE_VER4, 2514 0, 2515 }; 2516 2517 /* 2518 * In the legacy file format, the magic number is not used to encode endianness. 2519 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 2520 * on ABI revisions, we need to try all combinations for all endianness to 2521 * detect the endianness. 2522 */ 2523 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 2524 { 2525 uint64_t ref_size, attr_size; 2526 int i; 2527 2528 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 2529 ref_size = attr_file_abi_sizes[i] 2530 + sizeof(struct perf_file_section); 2531 if (hdr_sz != ref_size) { 2532 attr_size = bswap_64(hdr_sz); 2533 if (attr_size != ref_size) 2534 continue; 2535 2536 ph->needs_swap = true; 2537 } 2538 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 2539 i, 2540 ph->needs_swap); 2541 return 0; 2542 } 2543 /* could not determine endianness */ 2544 return -1; 2545 } 2546 2547 #define PERF_PIPE_HDR_VER0 16 2548 2549 static const size_t attr_pipe_abi_sizes[] = { 2550 [0] = PERF_PIPE_HDR_VER0, 2551 0, 2552 }; 2553 2554 /* 2555 * In the legacy pipe format, there is an implicit assumption that endiannesss 2556 * between host recording the samples, and host parsing the samples is the 2557 * same. This is not always the case given that the pipe output may always be 2558 * redirected into a file and analyzed on a different machine with possibly a 2559 * different endianness and perf_event ABI revsions in the perf tool itself. 2560 */ 2561 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 2562 { 2563 u64 attr_size; 2564 int i; 2565 2566 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 2567 if (hdr_sz != attr_pipe_abi_sizes[i]) { 2568 attr_size = bswap_64(hdr_sz); 2569 if (attr_size != hdr_sz) 2570 continue; 2571 2572 ph->needs_swap = true; 2573 } 2574 pr_debug("Pipe ABI%d perf.data file detected\n", i); 2575 return 0; 2576 } 2577 return -1; 2578 } 2579 2580 bool is_perf_magic(u64 magic) 2581 { 2582 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 2583 || magic == __perf_magic2 2584 || magic == __perf_magic2_sw) 2585 return true; 2586 2587 return false; 2588 } 2589 2590 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 2591 bool is_pipe, struct perf_header *ph) 2592 { 2593 int ret; 2594 2595 /* check for legacy format */ 2596 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 2597 if (ret == 0) { 2598 ph->version = PERF_HEADER_VERSION_1; 2599 pr_debug("legacy perf.data format\n"); 2600 if (is_pipe) 2601 return try_all_pipe_abis(hdr_sz, ph); 2602 2603 return try_all_file_abis(hdr_sz, ph); 2604 } 2605 /* 2606 * the new magic number serves two purposes: 2607 * - unique number to identify actual perf.data files 2608 * - encode endianness of file 2609 */ 2610 ph->version = PERF_HEADER_VERSION_2; 2611 2612 /* check magic number with one endianness */ 2613 if (magic == __perf_magic2) 2614 return 0; 2615 2616 /* check magic number with opposite endianness */ 2617 if (magic != __perf_magic2_sw) 2618 return -1; 2619 2620 ph->needs_swap = true; 2621 2622 return 0; 2623 } 2624 2625 int perf_file_header__read(struct perf_file_header *header, 2626 struct perf_header *ph, int fd) 2627 { 2628 ssize_t ret; 2629 2630 lseek(fd, 0, SEEK_SET); 2631 2632 ret = readn(fd, header, sizeof(*header)); 2633 if (ret <= 0) 2634 return -1; 2635 2636 if (check_magic_endian(header->magic, 2637 header->attr_size, false, ph) < 0) { 2638 pr_debug("magic/endian check failed\n"); 2639 return -1; 2640 } 2641 2642 if (ph->needs_swap) { 2643 mem_bswap_64(header, offsetof(struct perf_file_header, 2644 adds_features)); 2645 } 2646 2647 if (header->size != sizeof(*header)) { 2648 /* Support the previous format */ 2649 if (header->size == offsetof(typeof(*header), adds_features)) 2650 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 2651 else 2652 return -1; 2653 } else if (ph->needs_swap) { 2654 /* 2655 * feature bitmap is declared as an array of unsigned longs -- 2656 * not good since its size can differ between the host that 2657 * generated the data file and the host analyzing the file. 2658 * 2659 * We need to handle endianness, but we don't know the size of 2660 * the unsigned long where the file was generated. Take a best 2661 * guess at determining it: try 64-bit swap first (ie., file 2662 * created on a 64-bit host), and check if the hostname feature 2663 * bit is set (this feature bit is forced on as of fbe96f2). 2664 * If the bit is not, undo the 64-bit swap and try a 32-bit 2665 * swap. If the hostname bit is still not set (e.g., older data 2666 * file), punt and fallback to the original behavior -- 2667 * clearing all feature bits and setting buildid. 2668 */ 2669 mem_bswap_64(&header->adds_features, 2670 BITS_TO_U64(HEADER_FEAT_BITS)); 2671 2672 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 2673 /* unswap as u64 */ 2674 mem_bswap_64(&header->adds_features, 2675 BITS_TO_U64(HEADER_FEAT_BITS)); 2676 2677 /* unswap as u32 */ 2678 mem_bswap_32(&header->adds_features, 2679 BITS_TO_U32(HEADER_FEAT_BITS)); 2680 } 2681 2682 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 2683 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 2684 set_bit(HEADER_BUILD_ID, header->adds_features); 2685 } 2686 } 2687 2688 memcpy(&ph->adds_features, &header->adds_features, 2689 sizeof(ph->adds_features)); 2690 2691 ph->data_offset = header->data.offset; 2692 ph->data_size = header->data.size; 2693 ph->feat_offset = header->data.offset + header->data.size; 2694 return 0; 2695 } 2696 2697 static int perf_file_section__process(struct perf_file_section *section, 2698 struct perf_header *ph, 2699 int feat, int fd, void *data) 2700 { 2701 struct feat_fd fdd = { 2702 .fd = fd, 2703 .ph = ph, 2704 .size = section->size, 2705 .offset = section->offset, 2706 }; 2707 2708 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2709 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2710 "%d, continuing...\n", section->offset, feat); 2711 return 0; 2712 } 2713 2714 if (feat >= HEADER_LAST_FEATURE) { 2715 pr_debug("unknown feature %d, continuing...\n", feat); 2716 return 0; 2717 } 2718 2719 if (!feat_ops[feat].process) 2720 return 0; 2721 2722 return feat_ops[feat].process(&fdd, data); 2723 } 2724 2725 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 2726 struct perf_header *ph, int fd, 2727 bool repipe) 2728 { 2729 struct feat_fd ff = { 2730 .fd = STDOUT_FILENO, 2731 .ph = ph, 2732 }; 2733 ssize_t ret; 2734 2735 ret = readn(fd, header, sizeof(*header)); 2736 if (ret <= 0) 2737 return -1; 2738 2739 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 2740 pr_debug("endian/magic failed\n"); 2741 return -1; 2742 } 2743 2744 if (ph->needs_swap) 2745 header->size = bswap_64(header->size); 2746 2747 if (repipe && do_write(&ff, header, sizeof(*header)) < 0) 2748 return -1; 2749 2750 return 0; 2751 } 2752 2753 static int perf_header__read_pipe(struct perf_session *session) 2754 { 2755 struct perf_header *header = &session->header; 2756 struct perf_pipe_file_header f_header; 2757 2758 if (perf_file_header__read_pipe(&f_header, header, 2759 perf_data__fd(session->data), 2760 session->repipe) < 0) { 2761 pr_debug("incompatible file format\n"); 2762 return -EINVAL; 2763 } 2764 2765 return 0; 2766 } 2767 2768 static int read_attr(int fd, struct perf_header *ph, 2769 struct perf_file_attr *f_attr) 2770 { 2771 struct perf_event_attr *attr = &f_attr->attr; 2772 size_t sz, left; 2773 size_t our_sz = sizeof(f_attr->attr); 2774 ssize_t ret; 2775 2776 memset(f_attr, 0, sizeof(*f_attr)); 2777 2778 /* read minimal guaranteed structure */ 2779 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 2780 if (ret <= 0) { 2781 pr_debug("cannot read %d bytes of header attr\n", 2782 PERF_ATTR_SIZE_VER0); 2783 return -1; 2784 } 2785 2786 /* on file perf_event_attr size */ 2787 sz = attr->size; 2788 2789 if (ph->needs_swap) 2790 sz = bswap_32(sz); 2791 2792 if (sz == 0) { 2793 /* assume ABI0 */ 2794 sz = PERF_ATTR_SIZE_VER0; 2795 } else if (sz > our_sz) { 2796 pr_debug("file uses a more recent and unsupported ABI" 2797 " (%zu bytes extra)\n", sz - our_sz); 2798 return -1; 2799 } 2800 /* what we have not yet read and that we know about */ 2801 left = sz - PERF_ATTR_SIZE_VER0; 2802 if (left) { 2803 void *ptr = attr; 2804 ptr += PERF_ATTR_SIZE_VER0; 2805 2806 ret = readn(fd, ptr, left); 2807 } 2808 /* read perf_file_section, ids are read in caller */ 2809 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 2810 2811 return ret <= 0 ? -1 : 0; 2812 } 2813 2814 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, 2815 struct pevent *pevent) 2816 { 2817 struct event_format *event; 2818 char bf[128]; 2819 2820 /* already prepared */ 2821 if (evsel->tp_format) 2822 return 0; 2823 2824 if (pevent == NULL) { 2825 pr_debug("broken or missing trace data\n"); 2826 return -1; 2827 } 2828 2829 event = pevent_find_event(pevent, evsel->attr.config); 2830 if (event == NULL) { 2831 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config); 2832 return -1; 2833 } 2834 2835 if (!evsel->name) { 2836 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 2837 evsel->name = strdup(bf); 2838 if (evsel->name == NULL) 2839 return -1; 2840 } 2841 2842 evsel->tp_format = event; 2843 return 0; 2844 } 2845 2846 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, 2847 struct pevent *pevent) 2848 { 2849 struct perf_evsel *pos; 2850 2851 evlist__for_each_entry(evlist, pos) { 2852 if (pos->attr.type == PERF_TYPE_TRACEPOINT && 2853 perf_evsel__prepare_tracepoint_event(pos, pevent)) 2854 return -1; 2855 } 2856 2857 return 0; 2858 } 2859 2860 int perf_session__read_header(struct perf_session *session) 2861 { 2862 struct perf_data *data = session->data; 2863 struct perf_header *header = &session->header; 2864 struct perf_file_header f_header; 2865 struct perf_file_attr f_attr; 2866 u64 f_id; 2867 int nr_attrs, nr_ids, i, j; 2868 int fd = perf_data__fd(data); 2869 2870 session->evlist = perf_evlist__new(); 2871 if (session->evlist == NULL) 2872 return -ENOMEM; 2873 2874 session->evlist->env = &header->env; 2875 session->machines.host.env = &header->env; 2876 if (perf_data__is_pipe(data)) 2877 return perf_header__read_pipe(session); 2878 2879 if (perf_file_header__read(&f_header, header, fd) < 0) 2880 return -EINVAL; 2881 2882 /* 2883 * Sanity check that perf.data was written cleanly; data size is 2884 * initialized to 0 and updated only if the on_exit function is run. 2885 * If data size is still 0 then the file contains only partial 2886 * information. Just warn user and process it as much as it can. 2887 */ 2888 if (f_header.data.size == 0) { 2889 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 2890 "Was the 'perf record' command properly terminated?\n", 2891 data->file.path); 2892 } 2893 2894 nr_attrs = f_header.attrs.size / f_header.attr_size; 2895 lseek(fd, f_header.attrs.offset, SEEK_SET); 2896 2897 for (i = 0; i < nr_attrs; i++) { 2898 struct perf_evsel *evsel; 2899 off_t tmp; 2900 2901 if (read_attr(fd, header, &f_attr) < 0) 2902 goto out_errno; 2903 2904 if (header->needs_swap) { 2905 f_attr.ids.size = bswap_64(f_attr.ids.size); 2906 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 2907 perf_event__attr_swap(&f_attr.attr); 2908 } 2909 2910 tmp = lseek(fd, 0, SEEK_CUR); 2911 evsel = perf_evsel__new(&f_attr.attr); 2912 2913 if (evsel == NULL) 2914 goto out_delete_evlist; 2915 2916 evsel->needs_swap = header->needs_swap; 2917 /* 2918 * Do it before so that if perf_evsel__alloc_id fails, this 2919 * entry gets purged too at perf_evlist__delete(). 2920 */ 2921 perf_evlist__add(session->evlist, evsel); 2922 2923 nr_ids = f_attr.ids.size / sizeof(u64); 2924 /* 2925 * We don't have the cpu and thread maps on the header, so 2926 * for allocating the perf_sample_id table we fake 1 cpu and 2927 * hattr->ids threads. 2928 */ 2929 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 2930 goto out_delete_evlist; 2931 2932 lseek(fd, f_attr.ids.offset, SEEK_SET); 2933 2934 for (j = 0; j < nr_ids; j++) { 2935 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 2936 goto out_errno; 2937 2938 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 2939 } 2940 2941 lseek(fd, tmp, SEEK_SET); 2942 } 2943 2944 symbol_conf.nr_events = nr_attrs; 2945 2946 perf_header__process_sections(header, fd, &session->tevent, 2947 perf_file_section__process); 2948 2949 if (perf_evlist__prepare_tracepoint_events(session->evlist, 2950 session->tevent.pevent)) 2951 goto out_delete_evlist; 2952 2953 return 0; 2954 out_errno: 2955 return -errno; 2956 2957 out_delete_evlist: 2958 perf_evlist__delete(session->evlist); 2959 session->evlist = NULL; 2960 return -ENOMEM; 2961 } 2962 2963 int perf_event__synthesize_attr(struct perf_tool *tool, 2964 struct perf_event_attr *attr, u32 ids, u64 *id, 2965 perf_event__handler_t process) 2966 { 2967 union perf_event *ev; 2968 size_t size; 2969 int err; 2970 2971 size = sizeof(struct perf_event_attr); 2972 size = PERF_ALIGN(size, sizeof(u64)); 2973 size += sizeof(struct perf_event_header); 2974 size += ids * sizeof(u64); 2975 2976 ev = malloc(size); 2977 2978 if (ev == NULL) 2979 return -ENOMEM; 2980 2981 ev->attr.attr = *attr; 2982 memcpy(ev->attr.id, id, ids * sizeof(u64)); 2983 2984 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 2985 ev->attr.header.size = (u16)size; 2986 2987 if (ev->attr.header.size == size) 2988 err = process(tool, ev, NULL, NULL); 2989 else 2990 err = -E2BIG; 2991 2992 free(ev); 2993 2994 return err; 2995 } 2996 2997 int perf_event__synthesize_features(struct perf_tool *tool, 2998 struct perf_session *session, 2999 struct perf_evlist *evlist, 3000 perf_event__handler_t process) 3001 { 3002 struct perf_header *header = &session->header; 3003 struct feat_fd ff; 3004 struct feature_event *fe; 3005 size_t sz, sz_hdr; 3006 int feat, ret; 3007 3008 sz_hdr = sizeof(fe->header); 3009 sz = sizeof(union perf_event); 3010 /* get a nice alignment */ 3011 sz = PERF_ALIGN(sz, page_size); 3012 3013 memset(&ff, 0, sizeof(ff)); 3014 3015 ff.buf = malloc(sz); 3016 if (!ff.buf) 3017 return -ENOMEM; 3018 3019 ff.size = sz - sz_hdr; 3020 3021 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3022 if (!feat_ops[feat].synthesize) { 3023 pr_debug("No record header feature for header :%d\n", feat); 3024 continue; 3025 } 3026 3027 ff.offset = sizeof(*fe); 3028 3029 ret = feat_ops[feat].write(&ff, evlist); 3030 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 3031 pr_debug("Error writing feature\n"); 3032 continue; 3033 } 3034 /* ff.buf may have changed due to realloc in do_write() */ 3035 fe = ff.buf; 3036 memset(fe, 0, sizeof(*fe)); 3037 3038 fe->feat_id = feat; 3039 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3040 fe->header.size = ff.offset; 3041 3042 ret = process(tool, ff.buf, NULL, NULL); 3043 if (ret) { 3044 free(ff.buf); 3045 return ret; 3046 } 3047 } 3048 free(ff.buf); 3049 return 0; 3050 } 3051 3052 int perf_event__process_feature(struct perf_tool *tool, 3053 union perf_event *event, 3054 struct perf_session *session __maybe_unused) 3055 { 3056 struct feat_fd ff = { .fd = 0 }; 3057 struct feature_event *fe = (struct feature_event *)event; 3058 int type = fe->header.type; 3059 u64 feat = fe->feat_id; 3060 3061 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { 3062 pr_warning("invalid record type %d in pipe-mode\n", type); 3063 return 0; 3064 } 3065 if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) { 3066 pr_warning("invalid record type %d in pipe-mode\n", type); 3067 return -1; 3068 } 3069 3070 if (!feat_ops[feat].process) 3071 return 0; 3072 3073 ff.buf = (void *)fe->data; 3074 ff.size = event->header.size - sizeof(event->header); 3075 ff.ph = &session->header; 3076 3077 if (feat_ops[feat].process(&ff, NULL)) 3078 return -1; 3079 3080 if (!feat_ops[feat].print || !tool->show_feat_hdr) 3081 return 0; 3082 3083 if (!feat_ops[feat].full_only || 3084 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { 3085 feat_ops[feat].print(&ff, stdout); 3086 } else { 3087 fprintf(stdout, "# %s info available, use -I to display\n", 3088 feat_ops[feat].name); 3089 } 3090 3091 return 0; 3092 } 3093 3094 static struct event_update_event * 3095 event_update_event__new(size_t size, u64 type, u64 id) 3096 { 3097 struct event_update_event *ev; 3098 3099 size += sizeof(*ev); 3100 size = PERF_ALIGN(size, sizeof(u64)); 3101 3102 ev = zalloc(size); 3103 if (ev) { 3104 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3105 ev->header.size = (u16)size; 3106 ev->type = type; 3107 ev->id = id; 3108 } 3109 return ev; 3110 } 3111 3112 int 3113 perf_event__synthesize_event_update_unit(struct perf_tool *tool, 3114 struct perf_evsel *evsel, 3115 perf_event__handler_t process) 3116 { 3117 struct event_update_event *ev; 3118 size_t size = strlen(evsel->unit); 3119 int err; 3120 3121 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); 3122 if (ev == NULL) 3123 return -ENOMEM; 3124 3125 strncpy(ev->data, evsel->unit, size); 3126 err = process(tool, (union perf_event *)ev, NULL, NULL); 3127 free(ev); 3128 return err; 3129 } 3130 3131 int 3132 perf_event__synthesize_event_update_scale(struct perf_tool *tool, 3133 struct perf_evsel *evsel, 3134 perf_event__handler_t process) 3135 { 3136 struct event_update_event *ev; 3137 struct event_update_event_scale *ev_data; 3138 int err; 3139 3140 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); 3141 if (ev == NULL) 3142 return -ENOMEM; 3143 3144 ev_data = (struct event_update_event_scale *) ev->data; 3145 ev_data->scale = evsel->scale; 3146 err = process(tool, (union perf_event*) ev, NULL, NULL); 3147 free(ev); 3148 return err; 3149 } 3150 3151 int 3152 perf_event__synthesize_event_update_name(struct perf_tool *tool, 3153 struct perf_evsel *evsel, 3154 perf_event__handler_t process) 3155 { 3156 struct event_update_event *ev; 3157 size_t len = strlen(evsel->name); 3158 int err; 3159 3160 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); 3161 if (ev == NULL) 3162 return -ENOMEM; 3163 3164 strncpy(ev->data, evsel->name, len); 3165 err = process(tool, (union perf_event*) ev, NULL, NULL); 3166 free(ev); 3167 return err; 3168 } 3169 3170 int 3171 perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 3172 struct perf_evsel *evsel, 3173 perf_event__handler_t process) 3174 { 3175 size_t size = sizeof(struct event_update_event); 3176 struct event_update_event *ev; 3177 int max, err; 3178 u16 type; 3179 3180 if (!evsel->own_cpus) 3181 return 0; 3182 3183 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max); 3184 if (!ev) 3185 return -ENOMEM; 3186 3187 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3188 ev->header.size = (u16)size; 3189 ev->type = PERF_EVENT_UPDATE__CPUS; 3190 ev->id = evsel->id[0]; 3191 3192 cpu_map_data__synthesize((struct cpu_map_data *) ev->data, 3193 evsel->own_cpus, 3194 type, max); 3195 3196 err = process(tool, (union perf_event*) ev, NULL, NULL); 3197 free(ev); 3198 return err; 3199 } 3200 3201 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3202 { 3203 struct event_update_event *ev = &event->event_update; 3204 struct event_update_event_scale *ev_scale; 3205 struct event_update_event_cpus *ev_cpus; 3206 struct cpu_map *map; 3207 size_t ret; 3208 3209 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id); 3210 3211 switch (ev->type) { 3212 case PERF_EVENT_UPDATE__SCALE: 3213 ev_scale = (struct event_update_event_scale *) ev->data; 3214 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); 3215 break; 3216 case PERF_EVENT_UPDATE__UNIT: 3217 ret += fprintf(fp, "... unit: %s\n", ev->data); 3218 break; 3219 case PERF_EVENT_UPDATE__NAME: 3220 ret += fprintf(fp, "... name: %s\n", ev->data); 3221 break; 3222 case PERF_EVENT_UPDATE__CPUS: 3223 ev_cpus = (struct event_update_event_cpus *) ev->data; 3224 ret += fprintf(fp, "... "); 3225 3226 map = cpu_map__new_data(&ev_cpus->cpus); 3227 if (map) 3228 ret += cpu_map__fprintf(map, fp); 3229 else 3230 ret += fprintf(fp, "failed to get cpus\n"); 3231 break; 3232 default: 3233 ret += fprintf(fp, "... unknown type\n"); 3234 break; 3235 } 3236 3237 return ret; 3238 } 3239 3240 int perf_event__synthesize_attrs(struct perf_tool *tool, 3241 struct perf_session *session, 3242 perf_event__handler_t process) 3243 { 3244 struct perf_evsel *evsel; 3245 int err = 0; 3246 3247 evlist__for_each_entry(session->evlist, evsel) { 3248 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, 3249 evsel->id, process); 3250 if (err) { 3251 pr_debug("failed to create perf header attribute\n"); 3252 return err; 3253 } 3254 } 3255 3256 return err; 3257 } 3258 3259 static bool has_unit(struct perf_evsel *counter) 3260 { 3261 return counter->unit && *counter->unit; 3262 } 3263 3264 static bool has_scale(struct perf_evsel *counter) 3265 { 3266 return counter->scale != 1; 3267 } 3268 3269 int perf_event__synthesize_extra_attr(struct perf_tool *tool, 3270 struct perf_evlist *evsel_list, 3271 perf_event__handler_t process, 3272 bool is_pipe) 3273 { 3274 struct perf_evsel *counter; 3275 int err; 3276 3277 /* 3278 * Synthesize other events stuff not carried within 3279 * attr event - unit, scale, name 3280 */ 3281 evlist__for_each_entry(evsel_list, counter) { 3282 if (!counter->supported) 3283 continue; 3284 3285 /* 3286 * Synthesize unit and scale only if it's defined. 3287 */ 3288 if (has_unit(counter)) { 3289 err = perf_event__synthesize_event_update_unit(tool, counter, process); 3290 if (err < 0) { 3291 pr_err("Couldn't synthesize evsel unit.\n"); 3292 return err; 3293 } 3294 } 3295 3296 if (has_scale(counter)) { 3297 err = perf_event__synthesize_event_update_scale(tool, counter, process); 3298 if (err < 0) { 3299 pr_err("Couldn't synthesize evsel counter.\n"); 3300 return err; 3301 } 3302 } 3303 3304 if (counter->own_cpus) { 3305 err = perf_event__synthesize_event_update_cpus(tool, counter, process); 3306 if (err < 0) { 3307 pr_err("Couldn't synthesize evsel cpus.\n"); 3308 return err; 3309 } 3310 } 3311 3312 /* 3313 * Name is needed only for pipe output, 3314 * perf.data carries event names. 3315 */ 3316 if (is_pipe) { 3317 err = perf_event__synthesize_event_update_name(tool, counter, process); 3318 if (err < 0) { 3319 pr_err("Couldn't synthesize evsel name.\n"); 3320 return err; 3321 } 3322 } 3323 } 3324 return 0; 3325 } 3326 3327 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 3328 union perf_event *event, 3329 struct perf_evlist **pevlist) 3330 { 3331 u32 i, ids, n_ids; 3332 struct perf_evsel *evsel; 3333 struct perf_evlist *evlist = *pevlist; 3334 3335 if (evlist == NULL) { 3336 *pevlist = evlist = perf_evlist__new(); 3337 if (evlist == NULL) 3338 return -ENOMEM; 3339 } 3340 3341 evsel = perf_evsel__new(&event->attr.attr); 3342 if (evsel == NULL) 3343 return -ENOMEM; 3344 3345 perf_evlist__add(evlist, evsel); 3346 3347 ids = event->header.size; 3348 ids -= (void *)&event->attr.id - (void *)event; 3349 n_ids = ids / sizeof(u64); 3350 /* 3351 * We don't have the cpu and thread maps on the header, so 3352 * for allocating the perf_sample_id table we fake 1 cpu and 3353 * hattr->ids threads. 3354 */ 3355 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 3356 return -ENOMEM; 3357 3358 for (i = 0; i < n_ids; i++) { 3359 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 3360 } 3361 3362 symbol_conf.nr_events = evlist->nr_entries; 3363 3364 return 0; 3365 } 3366 3367 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, 3368 union perf_event *event, 3369 struct perf_evlist **pevlist) 3370 { 3371 struct event_update_event *ev = &event->event_update; 3372 struct event_update_event_scale *ev_scale; 3373 struct event_update_event_cpus *ev_cpus; 3374 struct perf_evlist *evlist; 3375 struct perf_evsel *evsel; 3376 struct cpu_map *map; 3377 3378 if (!pevlist || *pevlist == NULL) 3379 return -EINVAL; 3380 3381 evlist = *pevlist; 3382 3383 evsel = perf_evlist__id2evsel(evlist, ev->id); 3384 if (evsel == NULL) 3385 return -EINVAL; 3386 3387 switch (ev->type) { 3388 case PERF_EVENT_UPDATE__UNIT: 3389 evsel->unit = strdup(ev->data); 3390 break; 3391 case PERF_EVENT_UPDATE__NAME: 3392 evsel->name = strdup(ev->data); 3393 break; 3394 case PERF_EVENT_UPDATE__SCALE: 3395 ev_scale = (struct event_update_event_scale *) ev->data; 3396 evsel->scale = ev_scale->scale; 3397 break; 3398 case PERF_EVENT_UPDATE__CPUS: 3399 ev_cpus = (struct event_update_event_cpus *) ev->data; 3400 3401 map = cpu_map__new_data(&ev_cpus->cpus); 3402 if (map) 3403 evsel->own_cpus = map; 3404 else 3405 pr_err("failed to get event_update cpus\n"); 3406 default: 3407 break; 3408 } 3409 3410 return 0; 3411 } 3412 3413 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 3414 struct perf_evlist *evlist, 3415 perf_event__handler_t process) 3416 { 3417 union perf_event ev; 3418 struct tracing_data *tdata; 3419 ssize_t size = 0, aligned_size = 0, padding; 3420 struct feat_fd ff; 3421 int err __maybe_unused = 0; 3422 3423 /* 3424 * We are going to store the size of the data followed 3425 * by the data contents. Since the fd descriptor is a pipe, 3426 * we cannot seek back to store the size of the data once 3427 * we know it. Instead we: 3428 * 3429 * - write the tracing data to the temp file 3430 * - get/write the data size to pipe 3431 * - write the tracing data from the temp file 3432 * to the pipe 3433 */ 3434 tdata = tracing_data_get(&evlist->entries, fd, true); 3435 if (!tdata) 3436 return -1; 3437 3438 memset(&ev, 0, sizeof(ev)); 3439 3440 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 3441 size = tdata->size; 3442 aligned_size = PERF_ALIGN(size, sizeof(u64)); 3443 padding = aligned_size - size; 3444 ev.tracing_data.header.size = sizeof(ev.tracing_data); 3445 ev.tracing_data.size = aligned_size; 3446 3447 process(tool, &ev, NULL, NULL); 3448 3449 /* 3450 * The put function will copy all the tracing data 3451 * stored in temp file to the pipe. 3452 */ 3453 tracing_data_put(tdata); 3454 3455 ff = (struct feat_fd){ .fd = fd }; 3456 if (write_padded(&ff, NULL, 0, padding)) 3457 return -1; 3458 3459 return aligned_size; 3460 } 3461 3462 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, 3463 union perf_event *event, 3464 struct perf_session *session) 3465 { 3466 ssize_t size_read, padding, size = event->tracing_data.size; 3467 int fd = perf_data__fd(session->data); 3468 off_t offset = lseek(fd, 0, SEEK_CUR); 3469 char buf[BUFSIZ]; 3470 3471 /* setup for reading amidst mmap */ 3472 lseek(fd, offset + sizeof(struct tracing_data_event), 3473 SEEK_SET); 3474 3475 size_read = trace_report(fd, &session->tevent, 3476 session->repipe); 3477 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 3478 3479 if (readn(fd, buf, padding) < 0) { 3480 pr_err("%s: reading input file", __func__); 3481 return -1; 3482 } 3483 if (session->repipe) { 3484 int retw = write(STDOUT_FILENO, buf, padding); 3485 if (retw <= 0 || retw != padding) { 3486 pr_err("%s: repiping tracing data padding", __func__); 3487 return -1; 3488 } 3489 } 3490 3491 if (size_read + padding != size) { 3492 pr_err("%s: tracing data size mismatch", __func__); 3493 return -1; 3494 } 3495 3496 perf_evlist__prepare_tracepoint_events(session->evlist, 3497 session->tevent.pevent); 3498 3499 return size_read + padding; 3500 } 3501 3502 int perf_event__synthesize_build_id(struct perf_tool *tool, 3503 struct dso *pos, u16 misc, 3504 perf_event__handler_t process, 3505 struct machine *machine) 3506 { 3507 union perf_event ev; 3508 size_t len; 3509 int err = 0; 3510 3511 if (!pos->hit) 3512 return err; 3513 3514 memset(&ev, 0, sizeof(ev)); 3515 3516 len = pos->long_name_len + 1; 3517 len = PERF_ALIGN(len, NAME_ALIGN); 3518 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 3519 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 3520 ev.build_id.header.misc = misc; 3521 ev.build_id.pid = machine->pid; 3522 ev.build_id.header.size = sizeof(ev.build_id) + len; 3523 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 3524 3525 err = process(tool, &ev, NULL, machine); 3526 3527 return err; 3528 } 3529 3530 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, 3531 union perf_event *event, 3532 struct perf_session *session) 3533 { 3534 __event_process_build_id(&event->build_id, 3535 event->build_id.filename, 3536 session); 3537 return 0; 3538 } 3539