1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "string2.h" 5 #include <sys/param.h> 6 #include <sys/types.h> 7 #include <byteswap.h> 8 #include <unistd.h> 9 #include <stdio.h> 10 #include <stdlib.h> 11 #include <linux/compiler.h> 12 #include <linux/list.h> 13 #include <linux/kernel.h> 14 #include <linux/bitops.h> 15 #include <linux/string.h> 16 #include <linux/stringify.h> 17 #include <linux/zalloc.h> 18 #include <sys/stat.h> 19 #include <sys/utsname.h> 20 #include <linux/time64.h> 21 #include <dirent.h> 22 #include <bpf/libbpf.h> 23 #include <perf/cpumap.h> 24 25 #include "dso.h" 26 #include "evlist.h" 27 #include "evsel.h" 28 #include "header.h" 29 #include "memswap.h" 30 #include "trace-event.h" 31 #include "session.h" 32 #include "symbol.h" 33 #include "debug.h" 34 #include "cpumap.h" 35 #include "pmu.h" 36 #include "vdso.h" 37 #include "strbuf.h" 38 #include "build-id.h" 39 #include "data.h" 40 #include <api/fs/fs.h> 41 #include "asm/bug.h" 42 #include "tool.h" 43 #include "time-utils.h" 44 #include "units.h" 45 #include "util.h" // page_size, perf_exe() 46 #include "cputopo.h" 47 #include "bpf-event.h" 48 49 #include <linux/ctype.h> 50 #include <internal/lib.h> 51 52 /* 53 * magic2 = "PERFILE2" 54 * must be a numerical value to let the endianness 55 * determine the memory layout. That way we are able 56 * to detect endianness when reading the perf.data file 57 * back. 58 * 59 * we check for legacy (PERFFILE) format. 60 */ 61 static const char *__perf_magic1 = "PERFFILE"; 62 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 63 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 64 65 #define PERF_MAGIC __perf_magic2 66 67 const char perf_version_string[] = PERF_VERSION; 68 69 struct perf_file_attr { 70 struct perf_event_attr attr; 71 struct perf_file_section ids; 72 }; 73 74 struct feat_fd { 75 struct perf_header *ph; 76 int fd; 77 void *buf; /* Either buf != NULL or fd >= 0 */ 78 ssize_t offset; 79 size_t size; 80 struct evsel *events; 81 }; 82 83 void perf_header__set_feat(struct perf_header *header, int feat) 84 { 85 set_bit(feat, header->adds_features); 86 } 87 88 void perf_header__clear_feat(struct perf_header *header, int feat) 89 { 90 clear_bit(feat, header->adds_features); 91 } 92 93 bool perf_header__has_feat(const struct perf_header *header, int feat) 94 { 95 return test_bit(feat, header->adds_features); 96 } 97 98 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) 99 { 100 ssize_t ret = writen(ff->fd, buf, size); 101 102 if (ret != (ssize_t)size) 103 return ret < 0 ? (int)ret : -1; 104 return 0; 105 } 106 107 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) 108 { 109 /* struct perf_event_header::size is u16 */ 110 const size_t max_size = 0xffff - sizeof(struct perf_event_header); 111 size_t new_size = ff->size; 112 void *addr; 113 114 if (size + ff->offset > max_size) 115 return -E2BIG; 116 117 while (size > (new_size - ff->offset)) 118 new_size <<= 1; 119 new_size = min(max_size, new_size); 120 121 if (ff->size < new_size) { 122 addr = realloc(ff->buf, new_size); 123 if (!addr) 124 return -ENOMEM; 125 ff->buf = addr; 126 ff->size = new_size; 127 } 128 129 memcpy(ff->buf + ff->offset, buf, size); 130 ff->offset += size; 131 132 return 0; 133 } 134 135 /* Return: 0 if succeded, -ERR if failed. */ 136 int do_write(struct feat_fd *ff, const void *buf, size_t size) 137 { 138 if (!ff->buf) 139 return __do_write_fd(ff, buf, size); 140 return __do_write_buf(ff, buf, size); 141 } 142 143 /* Return: 0 if succeded, -ERR if failed. */ 144 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size) 145 { 146 u64 *p = (u64 *) set; 147 int i, ret; 148 149 ret = do_write(ff, &size, sizeof(size)); 150 if (ret < 0) 151 return ret; 152 153 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 154 ret = do_write(ff, p + i, sizeof(*p)); 155 if (ret < 0) 156 return ret; 157 } 158 159 return 0; 160 } 161 162 /* Return: 0 if succeded, -ERR if failed. */ 163 int write_padded(struct feat_fd *ff, const void *bf, 164 size_t count, size_t count_aligned) 165 { 166 static const char zero_buf[NAME_ALIGN]; 167 int err = do_write(ff, bf, count); 168 169 if (!err) 170 err = do_write(ff, zero_buf, count_aligned - count); 171 172 return err; 173 } 174 175 #define string_size(str) \ 176 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) 177 178 /* Return: 0 if succeded, -ERR if failed. */ 179 static int do_write_string(struct feat_fd *ff, const char *str) 180 { 181 u32 len, olen; 182 int ret; 183 184 olen = strlen(str) + 1; 185 len = PERF_ALIGN(olen, NAME_ALIGN); 186 187 /* write len, incl. \0 */ 188 ret = do_write(ff, &len, sizeof(len)); 189 if (ret < 0) 190 return ret; 191 192 return write_padded(ff, str, olen, len); 193 } 194 195 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) 196 { 197 ssize_t ret = readn(ff->fd, addr, size); 198 199 if (ret != size) 200 return ret < 0 ? (int)ret : -1; 201 return 0; 202 } 203 204 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) 205 { 206 if (size > (ssize_t)ff->size - ff->offset) 207 return -1; 208 209 memcpy(addr, ff->buf + ff->offset, size); 210 ff->offset += size; 211 212 return 0; 213 214 } 215 216 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) 217 { 218 if (!ff->buf) 219 return __do_read_fd(ff, addr, size); 220 return __do_read_buf(ff, addr, size); 221 } 222 223 static int do_read_u32(struct feat_fd *ff, u32 *addr) 224 { 225 int ret; 226 227 ret = __do_read(ff, addr, sizeof(*addr)); 228 if (ret) 229 return ret; 230 231 if (ff->ph->needs_swap) 232 *addr = bswap_32(*addr); 233 return 0; 234 } 235 236 static int do_read_u64(struct feat_fd *ff, u64 *addr) 237 { 238 int ret; 239 240 ret = __do_read(ff, addr, sizeof(*addr)); 241 if (ret) 242 return ret; 243 244 if (ff->ph->needs_swap) 245 *addr = bswap_64(*addr); 246 return 0; 247 } 248 249 static char *do_read_string(struct feat_fd *ff) 250 { 251 u32 len; 252 char *buf; 253 254 if (do_read_u32(ff, &len)) 255 return NULL; 256 257 buf = malloc(len); 258 if (!buf) 259 return NULL; 260 261 if (!__do_read(ff, buf, len)) { 262 /* 263 * strings are padded by zeroes 264 * thus the actual strlen of buf 265 * may be less than len 266 */ 267 return buf; 268 } 269 270 free(buf); 271 return NULL; 272 } 273 274 /* Return: 0 if succeded, -ERR if failed. */ 275 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) 276 { 277 unsigned long *set; 278 u64 size, *p; 279 int i, ret; 280 281 ret = do_read_u64(ff, &size); 282 if (ret) 283 return ret; 284 285 set = bitmap_alloc(size); 286 if (!set) 287 return -ENOMEM; 288 289 p = (u64 *) set; 290 291 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 292 ret = do_read_u64(ff, p + i); 293 if (ret < 0) { 294 free(set); 295 return ret; 296 } 297 } 298 299 *pset = set; 300 *psize = size; 301 return 0; 302 } 303 304 static int write_tracing_data(struct feat_fd *ff, 305 struct evlist *evlist) 306 { 307 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 308 return -1; 309 310 return read_tracing_data(ff->fd, &evlist->core.entries); 311 } 312 313 static int write_build_id(struct feat_fd *ff, 314 struct evlist *evlist __maybe_unused) 315 { 316 struct perf_session *session; 317 int err; 318 319 session = container_of(ff->ph, struct perf_session, header); 320 321 if (!perf_session__read_build_ids(session, true)) 322 return -1; 323 324 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 325 return -1; 326 327 err = perf_session__write_buildid_table(session, ff); 328 if (err < 0) { 329 pr_debug("failed to write buildid table\n"); 330 return err; 331 } 332 perf_session__cache_build_ids(session); 333 334 return 0; 335 } 336 337 static int write_hostname(struct feat_fd *ff, 338 struct evlist *evlist __maybe_unused) 339 { 340 struct utsname uts; 341 int ret; 342 343 ret = uname(&uts); 344 if (ret < 0) 345 return -1; 346 347 return do_write_string(ff, uts.nodename); 348 } 349 350 static int write_osrelease(struct feat_fd *ff, 351 struct evlist *evlist __maybe_unused) 352 { 353 struct utsname uts; 354 int ret; 355 356 ret = uname(&uts); 357 if (ret < 0) 358 return -1; 359 360 return do_write_string(ff, uts.release); 361 } 362 363 static int write_arch(struct feat_fd *ff, 364 struct evlist *evlist __maybe_unused) 365 { 366 struct utsname uts; 367 int ret; 368 369 ret = uname(&uts); 370 if (ret < 0) 371 return -1; 372 373 return do_write_string(ff, uts.machine); 374 } 375 376 static int write_version(struct feat_fd *ff, 377 struct evlist *evlist __maybe_unused) 378 { 379 return do_write_string(ff, perf_version_string); 380 } 381 382 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) 383 { 384 FILE *file; 385 char *buf = NULL; 386 char *s, *p; 387 const char *search = cpuinfo_proc; 388 size_t len = 0; 389 int ret = -1; 390 391 if (!search) 392 return -1; 393 394 file = fopen("/proc/cpuinfo", "r"); 395 if (!file) 396 return -1; 397 398 while (getline(&buf, &len, file) > 0) { 399 ret = strncmp(buf, search, strlen(search)); 400 if (!ret) 401 break; 402 } 403 404 if (ret) { 405 ret = -1; 406 goto done; 407 } 408 409 s = buf; 410 411 p = strchr(buf, ':'); 412 if (p && *(p+1) == ' ' && *(p+2)) 413 s = p + 2; 414 p = strchr(s, '\n'); 415 if (p) 416 *p = '\0'; 417 418 /* squash extra space characters (branding string) */ 419 p = s; 420 while (*p) { 421 if (isspace(*p)) { 422 char *r = p + 1; 423 char *q = skip_spaces(r); 424 *p = ' '; 425 if (q != (p+1)) 426 while ((*r++ = *q++)); 427 } 428 p++; 429 } 430 ret = do_write_string(ff, s); 431 done: 432 free(buf); 433 fclose(file); 434 return ret; 435 } 436 437 static int write_cpudesc(struct feat_fd *ff, 438 struct evlist *evlist __maybe_unused) 439 { 440 #if defined(__powerpc__) || defined(__hppa__) || defined(__sparc__) 441 #define CPUINFO_PROC { "cpu", } 442 #elif defined(__s390__) 443 #define CPUINFO_PROC { "vendor_id", } 444 #elif defined(__sh__) 445 #define CPUINFO_PROC { "cpu type", } 446 #elif defined(__alpha__) || defined(__mips__) 447 #define CPUINFO_PROC { "cpu model", } 448 #elif defined(__arm__) 449 #define CPUINFO_PROC { "model name", "Processor", } 450 #elif defined(__arc__) 451 #define CPUINFO_PROC { "Processor", } 452 #elif defined(__xtensa__) 453 #define CPUINFO_PROC { "core ID", } 454 #else 455 #define CPUINFO_PROC { "model name", } 456 #endif 457 const char *cpuinfo_procs[] = CPUINFO_PROC; 458 #undef CPUINFO_PROC 459 unsigned int i; 460 461 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 462 int ret; 463 ret = __write_cpudesc(ff, cpuinfo_procs[i]); 464 if (ret >= 0) 465 return ret; 466 } 467 return -1; 468 } 469 470 471 static int write_nrcpus(struct feat_fd *ff, 472 struct evlist *evlist __maybe_unused) 473 { 474 long nr; 475 u32 nrc, nra; 476 int ret; 477 478 nrc = cpu__max_present_cpu(); 479 480 nr = sysconf(_SC_NPROCESSORS_ONLN); 481 if (nr < 0) 482 return -1; 483 484 nra = (u32)(nr & UINT_MAX); 485 486 ret = do_write(ff, &nrc, sizeof(nrc)); 487 if (ret < 0) 488 return ret; 489 490 return do_write(ff, &nra, sizeof(nra)); 491 } 492 493 static int write_event_desc(struct feat_fd *ff, 494 struct evlist *evlist) 495 { 496 struct evsel *evsel; 497 u32 nre, nri, sz; 498 int ret; 499 500 nre = evlist->core.nr_entries; 501 502 /* 503 * write number of events 504 */ 505 ret = do_write(ff, &nre, sizeof(nre)); 506 if (ret < 0) 507 return ret; 508 509 /* 510 * size of perf_event_attr struct 511 */ 512 sz = (u32)sizeof(evsel->core.attr); 513 ret = do_write(ff, &sz, sizeof(sz)); 514 if (ret < 0) 515 return ret; 516 517 evlist__for_each_entry(evlist, evsel) { 518 ret = do_write(ff, &evsel->core.attr, sz); 519 if (ret < 0) 520 return ret; 521 /* 522 * write number of unique id per event 523 * there is one id per instance of an event 524 * 525 * copy into an nri to be independent of the 526 * type of ids, 527 */ 528 nri = evsel->ids; 529 ret = do_write(ff, &nri, sizeof(nri)); 530 if (ret < 0) 531 return ret; 532 533 /* 534 * write event string as passed on cmdline 535 */ 536 ret = do_write_string(ff, perf_evsel__name(evsel)); 537 if (ret < 0) 538 return ret; 539 /* 540 * write unique ids for this event 541 */ 542 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); 543 if (ret < 0) 544 return ret; 545 } 546 return 0; 547 } 548 549 static int write_cmdline(struct feat_fd *ff, 550 struct evlist *evlist __maybe_unused) 551 { 552 char pbuf[MAXPATHLEN], *buf; 553 int i, ret, n; 554 555 /* actual path to perf binary */ 556 buf = perf_exe(pbuf, MAXPATHLEN); 557 558 /* account for binary path */ 559 n = perf_env.nr_cmdline + 1; 560 561 ret = do_write(ff, &n, sizeof(n)); 562 if (ret < 0) 563 return ret; 564 565 ret = do_write_string(ff, buf); 566 if (ret < 0) 567 return ret; 568 569 for (i = 0 ; i < perf_env.nr_cmdline; i++) { 570 ret = do_write_string(ff, perf_env.cmdline_argv[i]); 571 if (ret < 0) 572 return ret; 573 } 574 return 0; 575 } 576 577 578 static int write_cpu_topology(struct feat_fd *ff, 579 struct evlist *evlist __maybe_unused) 580 { 581 struct cpu_topology *tp; 582 u32 i; 583 int ret, j; 584 585 tp = cpu_topology__new(); 586 if (!tp) 587 return -1; 588 589 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib)); 590 if (ret < 0) 591 goto done; 592 593 for (i = 0; i < tp->core_sib; i++) { 594 ret = do_write_string(ff, tp->core_siblings[i]); 595 if (ret < 0) 596 goto done; 597 } 598 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib)); 599 if (ret < 0) 600 goto done; 601 602 for (i = 0; i < tp->thread_sib; i++) { 603 ret = do_write_string(ff, tp->thread_siblings[i]); 604 if (ret < 0) 605 break; 606 } 607 608 ret = perf_env__read_cpu_topology_map(&perf_env); 609 if (ret < 0) 610 goto done; 611 612 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 613 ret = do_write(ff, &perf_env.cpu[j].core_id, 614 sizeof(perf_env.cpu[j].core_id)); 615 if (ret < 0) 616 return ret; 617 ret = do_write(ff, &perf_env.cpu[j].socket_id, 618 sizeof(perf_env.cpu[j].socket_id)); 619 if (ret < 0) 620 return ret; 621 } 622 623 if (!tp->die_sib) 624 goto done; 625 626 ret = do_write(ff, &tp->die_sib, sizeof(tp->die_sib)); 627 if (ret < 0) 628 goto done; 629 630 for (i = 0; i < tp->die_sib; i++) { 631 ret = do_write_string(ff, tp->die_siblings[i]); 632 if (ret < 0) 633 goto done; 634 } 635 636 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 637 ret = do_write(ff, &perf_env.cpu[j].die_id, 638 sizeof(perf_env.cpu[j].die_id)); 639 if (ret < 0) 640 return ret; 641 } 642 643 done: 644 cpu_topology__delete(tp); 645 return ret; 646 } 647 648 649 650 static int write_total_mem(struct feat_fd *ff, 651 struct evlist *evlist __maybe_unused) 652 { 653 char *buf = NULL; 654 FILE *fp; 655 size_t len = 0; 656 int ret = -1, n; 657 uint64_t mem; 658 659 fp = fopen("/proc/meminfo", "r"); 660 if (!fp) 661 return -1; 662 663 while (getline(&buf, &len, fp) > 0) { 664 ret = strncmp(buf, "MemTotal:", 9); 665 if (!ret) 666 break; 667 } 668 if (!ret) { 669 n = sscanf(buf, "%*s %"PRIu64, &mem); 670 if (n == 1) 671 ret = do_write(ff, &mem, sizeof(mem)); 672 } else 673 ret = -1; 674 free(buf); 675 fclose(fp); 676 return ret; 677 } 678 679 static int write_numa_topology(struct feat_fd *ff, 680 struct evlist *evlist __maybe_unused) 681 { 682 struct numa_topology *tp; 683 int ret = -1; 684 u32 i; 685 686 tp = numa_topology__new(); 687 if (!tp) 688 return -ENOMEM; 689 690 ret = do_write(ff, &tp->nr, sizeof(u32)); 691 if (ret < 0) 692 goto err; 693 694 for (i = 0; i < tp->nr; i++) { 695 struct numa_topology_node *n = &tp->nodes[i]; 696 697 ret = do_write(ff, &n->node, sizeof(u32)); 698 if (ret < 0) 699 goto err; 700 701 ret = do_write(ff, &n->mem_total, sizeof(u64)); 702 if (ret) 703 goto err; 704 705 ret = do_write(ff, &n->mem_free, sizeof(u64)); 706 if (ret) 707 goto err; 708 709 ret = do_write_string(ff, n->cpus); 710 if (ret < 0) 711 goto err; 712 } 713 714 ret = 0; 715 716 err: 717 numa_topology__delete(tp); 718 return ret; 719 } 720 721 /* 722 * File format: 723 * 724 * struct pmu_mappings { 725 * u32 pmu_num; 726 * struct pmu_map { 727 * u32 type; 728 * char name[]; 729 * }[pmu_num]; 730 * }; 731 */ 732 733 static int write_pmu_mappings(struct feat_fd *ff, 734 struct evlist *evlist __maybe_unused) 735 { 736 struct perf_pmu *pmu = NULL; 737 u32 pmu_num = 0; 738 int ret; 739 740 /* 741 * Do a first pass to count number of pmu to avoid lseek so this 742 * works in pipe mode as well. 743 */ 744 while ((pmu = perf_pmu__scan(pmu))) { 745 if (!pmu->name) 746 continue; 747 pmu_num++; 748 } 749 750 ret = do_write(ff, &pmu_num, sizeof(pmu_num)); 751 if (ret < 0) 752 return ret; 753 754 while ((pmu = perf_pmu__scan(pmu))) { 755 if (!pmu->name) 756 continue; 757 758 ret = do_write(ff, &pmu->type, sizeof(pmu->type)); 759 if (ret < 0) 760 return ret; 761 762 ret = do_write_string(ff, pmu->name); 763 if (ret < 0) 764 return ret; 765 } 766 767 return 0; 768 } 769 770 /* 771 * File format: 772 * 773 * struct group_descs { 774 * u32 nr_groups; 775 * struct group_desc { 776 * char name[]; 777 * u32 leader_idx; 778 * u32 nr_members; 779 * }[nr_groups]; 780 * }; 781 */ 782 static int write_group_desc(struct feat_fd *ff, 783 struct evlist *evlist) 784 { 785 u32 nr_groups = evlist->nr_groups; 786 struct evsel *evsel; 787 int ret; 788 789 ret = do_write(ff, &nr_groups, sizeof(nr_groups)); 790 if (ret < 0) 791 return ret; 792 793 evlist__for_each_entry(evlist, evsel) { 794 if (perf_evsel__is_group_leader(evsel) && 795 evsel->core.nr_members > 1) { 796 const char *name = evsel->group_name ?: "{anon_group}"; 797 u32 leader_idx = evsel->idx; 798 u32 nr_members = evsel->core.nr_members; 799 800 ret = do_write_string(ff, name); 801 if (ret < 0) 802 return ret; 803 804 ret = do_write(ff, &leader_idx, sizeof(leader_idx)); 805 if (ret < 0) 806 return ret; 807 808 ret = do_write(ff, &nr_members, sizeof(nr_members)); 809 if (ret < 0) 810 return ret; 811 } 812 } 813 return 0; 814 } 815 816 /* 817 * Return the CPU id as a raw string. 818 * 819 * Each architecture should provide a more precise id string that 820 * can be use to match the architecture's "mapfile". 821 */ 822 char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) 823 { 824 return NULL; 825 } 826 827 /* Return zero when the cpuid from the mapfile.csv matches the 828 * cpuid string generated on this platform. 829 * Otherwise return non-zero. 830 */ 831 int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) 832 { 833 regex_t re; 834 regmatch_t pmatch[1]; 835 int match; 836 837 if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { 838 /* Warn unable to generate match particular string. */ 839 pr_info("Invalid regular expression %s\n", mapcpuid); 840 return 1; 841 } 842 843 match = !regexec(&re, cpuid, 1, pmatch, 0); 844 regfree(&re); 845 if (match) { 846 size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); 847 848 /* Verify the entire string matched. */ 849 if (match_len == strlen(cpuid)) 850 return 0; 851 } 852 return 1; 853 } 854 855 /* 856 * default get_cpuid(): nothing gets recorded 857 * actual implementation must be in arch/$(SRCARCH)/util/header.c 858 */ 859 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 860 { 861 return -1; 862 } 863 864 static int write_cpuid(struct feat_fd *ff, 865 struct evlist *evlist __maybe_unused) 866 { 867 char buffer[64]; 868 int ret; 869 870 ret = get_cpuid(buffer, sizeof(buffer)); 871 if (ret) 872 return -1; 873 874 return do_write_string(ff, buffer); 875 } 876 877 static int write_branch_stack(struct feat_fd *ff __maybe_unused, 878 struct evlist *evlist __maybe_unused) 879 { 880 return 0; 881 } 882 883 static int write_auxtrace(struct feat_fd *ff, 884 struct evlist *evlist __maybe_unused) 885 { 886 struct perf_session *session; 887 int err; 888 889 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 890 return -1; 891 892 session = container_of(ff->ph, struct perf_session, header); 893 894 err = auxtrace_index__write(ff->fd, &session->auxtrace_index); 895 if (err < 0) 896 pr_err("Failed to write auxtrace index\n"); 897 return err; 898 } 899 900 static int write_clockid(struct feat_fd *ff, 901 struct evlist *evlist __maybe_unused) 902 { 903 return do_write(ff, &ff->ph->env.clockid_res_ns, 904 sizeof(ff->ph->env.clockid_res_ns)); 905 } 906 907 static int write_dir_format(struct feat_fd *ff, 908 struct evlist *evlist __maybe_unused) 909 { 910 struct perf_session *session; 911 struct perf_data *data; 912 913 session = container_of(ff->ph, struct perf_session, header); 914 data = session->data; 915 916 if (WARN_ON(!perf_data__is_dir(data))) 917 return -1; 918 919 return do_write(ff, &data->dir.version, sizeof(data->dir.version)); 920 } 921 922 #ifdef HAVE_LIBBPF_SUPPORT 923 static int write_bpf_prog_info(struct feat_fd *ff, 924 struct evlist *evlist __maybe_unused) 925 { 926 struct perf_env *env = &ff->ph->env; 927 struct rb_root *root; 928 struct rb_node *next; 929 int ret; 930 931 down_read(&env->bpf_progs.lock); 932 933 ret = do_write(ff, &env->bpf_progs.infos_cnt, 934 sizeof(env->bpf_progs.infos_cnt)); 935 if (ret < 0) 936 goto out; 937 938 root = &env->bpf_progs.infos; 939 next = rb_first(root); 940 while (next) { 941 struct bpf_prog_info_node *node; 942 size_t len; 943 944 node = rb_entry(next, struct bpf_prog_info_node, rb_node); 945 next = rb_next(&node->rb_node); 946 len = sizeof(struct bpf_prog_info_linear) + 947 node->info_linear->data_len; 948 949 /* before writing to file, translate address to offset */ 950 bpf_program__bpil_addr_to_offs(node->info_linear); 951 ret = do_write(ff, node->info_linear, len); 952 /* 953 * translate back to address even when do_write() fails, 954 * so that this function never changes the data. 955 */ 956 bpf_program__bpil_offs_to_addr(node->info_linear); 957 if (ret < 0) 958 goto out; 959 } 960 out: 961 up_read(&env->bpf_progs.lock); 962 return ret; 963 } 964 #else // HAVE_LIBBPF_SUPPORT 965 static int write_bpf_prog_info(struct feat_fd *ff __maybe_unused, 966 struct evlist *evlist __maybe_unused) 967 { 968 return 0; 969 } 970 #endif // HAVE_LIBBPF_SUPPORT 971 972 static int write_bpf_btf(struct feat_fd *ff, 973 struct evlist *evlist __maybe_unused) 974 { 975 struct perf_env *env = &ff->ph->env; 976 struct rb_root *root; 977 struct rb_node *next; 978 int ret; 979 980 down_read(&env->bpf_progs.lock); 981 982 ret = do_write(ff, &env->bpf_progs.btfs_cnt, 983 sizeof(env->bpf_progs.btfs_cnt)); 984 985 if (ret < 0) 986 goto out; 987 988 root = &env->bpf_progs.btfs; 989 next = rb_first(root); 990 while (next) { 991 struct btf_node *node; 992 993 node = rb_entry(next, struct btf_node, rb_node); 994 next = rb_next(&node->rb_node); 995 ret = do_write(ff, &node->id, 996 sizeof(u32) * 2 + node->data_size); 997 if (ret < 0) 998 goto out; 999 } 1000 out: 1001 up_read(&env->bpf_progs.lock); 1002 return ret; 1003 } 1004 1005 static int cpu_cache_level__sort(const void *a, const void *b) 1006 { 1007 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 1008 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; 1009 1010 return cache_a->level - cache_b->level; 1011 } 1012 1013 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) 1014 { 1015 if (a->level != b->level) 1016 return false; 1017 1018 if (a->line_size != b->line_size) 1019 return false; 1020 1021 if (a->sets != b->sets) 1022 return false; 1023 1024 if (a->ways != b->ways) 1025 return false; 1026 1027 if (strcmp(a->type, b->type)) 1028 return false; 1029 1030 if (strcmp(a->size, b->size)) 1031 return false; 1032 1033 if (strcmp(a->map, b->map)) 1034 return false; 1035 1036 return true; 1037 } 1038 1039 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) 1040 { 1041 char path[PATH_MAX], file[PATH_MAX]; 1042 struct stat st; 1043 size_t len; 1044 1045 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level); 1046 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path); 1047 1048 if (stat(file, &st)) 1049 return 1; 1050 1051 scnprintf(file, PATH_MAX, "%s/level", path); 1052 if (sysfs__read_int(file, (int *) &cache->level)) 1053 return -1; 1054 1055 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path); 1056 if (sysfs__read_int(file, (int *) &cache->line_size)) 1057 return -1; 1058 1059 scnprintf(file, PATH_MAX, "%s/number_of_sets", path); 1060 if (sysfs__read_int(file, (int *) &cache->sets)) 1061 return -1; 1062 1063 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path); 1064 if (sysfs__read_int(file, (int *) &cache->ways)) 1065 return -1; 1066 1067 scnprintf(file, PATH_MAX, "%s/type", path); 1068 if (sysfs__read_str(file, &cache->type, &len)) 1069 return -1; 1070 1071 cache->type[len] = 0; 1072 cache->type = strim(cache->type); 1073 1074 scnprintf(file, PATH_MAX, "%s/size", path); 1075 if (sysfs__read_str(file, &cache->size, &len)) { 1076 zfree(&cache->type); 1077 return -1; 1078 } 1079 1080 cache->size[len] = 0; 1081 cache->size = strim(cache->size); 1082 1083 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 1084 if (sysfs__read_str(file, &cache->map, &len)) { 1085 zfree(&cache->map); 1086 zfree(&cache->type); 1087 return -1; 1088 } 1089 1090 cache->map[len] = 0; 1091 cache->map = strim(cache->map); 1092 return 0; 1093 } 1094 1095 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) 1096 { 1097 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); 1098 } 1099 1100 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) 1101 { 1102 u32 i, cnt = 0; 1103 long ncpus; 1104 u32 nr, cpu; 1105 u16 level; 1106 1107 ncpus = sysconf(_SC_NPROCESSORS_CONF); 1108 if (ncpus < 0) 1109 return -1; 1110 1111 nr = (u32)(ncpus & UINT_MAX); 1112 1113 for (cpu = 0; cpu < nr; cpu++) { 1114 for (level = 0; level < 10; level++) { 1115 struct cpu_cache_level c; 1116 int err; 1117 1118 err = cpu_cache_level__read(&c, cpu, level); 1119 if (err < 0) 1120 return err; 1121 1122 if (err == 1) 1123 break; 1124 1125 for (i = 0; i < cnt; i++) { 1126 if (cpu_cache_level__cmp(&c, &caches[i])) 1127 break; 1128 } 1129 1130 if (i == cnt) 1131 caches[cnt++] = c; 1132 else 1133 cpu_cache_level__free(&c); 1134 1135 if (WARN_ONCE(cnt == size, "way too many cpu caches..")) 1136 goto out; 1137 } 1138 } 1139 out: 1140 *cntp = cnt; 1141 return 0; 1142 } 1143 1144 #define MAX_CACHE_LVL 4 1145 1146 static int write_cache(struct feat_fd *ff, 1147 struct evlist *evlist __maybe_unused) 1148 { 1149 u32 max_caches = cpu__max_cpu() * MAX_CACHE_LVL; 1150 struct cpu_cache_level caches[max_caches]; 1151 u32 cnt = 0, i, version = 1; 1152 int ret; 1153 1154 ret = build_caches(caches, max_caches, &cnt); 1155 if (ret) 1156 goto out; 1157 1158 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); 1159 1160 ret = do_write(ff, &version, sizeof(u32)); 1161 if (ret < 0) 1162 goto out; 1163 1164 ret = do_write(ff, &cnt, sizeof(u32)); 1165 if (ret < 0) 1166 goto out; 1167 1168 for (i = 0; i < cnt; i++) { 1169 struct cpu_cache_level *c = &caches[i]; 1170 1171 #define _W(v) \ 1172 ret = do_write(ff, &c->v, sizeof(u32)); \ 1173 if (ret < 0) \ 1174 goto out; 1175 1176 _W(level) 1177 _W(line_size) 1178 _W(sets) 1179 _W(ways) 1180 #undef _W 1181 1182 #define _W(v) \ 1183 ret = do_write_string(ff, (const char *) c->v); \ 1184 if (ret < 0) \ 1185 goto out; 1186 1187 _W(type) 1188 _W(size) 1189 _W(map) 1190 #undef _W 1191 } 1192 1193 out: 1194 for (i = 0; i < cnt; i++) 1195 cpu_cache_level__free(&caches[i]); 1196 return ret; 1197 } 1198 1199 static int write_stat(struct feat_fd *ff __maybe_unused, 1200 struct evlist *evlist __maybe_unused) 1201 { 1202 return 0; 1203 } 1204 1205 static int write_sample_time(struct feat_fd *ff, 1206 struct evlist *evlist) 1207 { 1208 int ret; 1209 1210 ret = do_write(ff, &evlist->first_sample_time, 1211 sizeof(evlist->first_sample_time)); 1212 if (ret < 0) 1213 return ret; 1214 1215 return do_write(ff, &evlist->last_sample_time, 1216 sizeof(evlist->last_sample_time)); 1217 } 1218 1219 1220 static int memory_node__read(struct memory_node *n, unsigned long idx) 1221 { 1222 unsigned int phys, size = 0; 1223 char path[PATH_MAX]; 1224 struct dirent *ent; 1225 DIR *dir; 1226 1227 #define for_each_memory(mem, dir) \ 1228 while ((ent = readdir(dir))) \ 1229 if (strcmp(ent->d_name, ".") && \ 1230 strcmp(ent->d_name, "..") && \ 1231 sscanf(ent->d_name, "memory%u", &mem) == 1) 1232 1233 scnprintf(path, PATH_MAX, 1234 "%s/devices/system/node/node%lu", 1235 sysfs__mountpoint(), idx); 1236 1237 dir = opendir(path); 1238 if (!dir) { 1239 pr_warning("failed: cant' open memory sysfs data\n"); 1240 return -1; 1241 } 1242 1243 for_each_memory(phys, dir) { 1244 size = max(phys, size); 1245 } 1246 1247 size++; 1248 1249 n->set = bitmap_alloc(size); 1250 if (!n->set) { 1251 closedir(dir); 1252 return -ENOMEM; 1253 } 1254 1255 n->node = idx; 1256 n->size = size; 1257 1258 rewinddir(dir); 1259 1260 for_each_memory(phys, dir) { 1261 set_bit(phys, n->set); 1262 } 1263 1264 closedir(dir); 1265 return 0; 1266 } 1267 1268 static int memory_node__sort(const void *a, const void *b) 1269 { 1270 const struct memory_node *na = a; 1271 const struct memory_node *nb = b; 1272 1273 return na->node - nb->node; 1274 } 1275 1276 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) 1277 { 1278 char path[PATH_MAX]; 1279 struct dirent *ent; 1280 DIR *dir; 1281 u64 cnt = 0; 1282 int ret = 0; 1283 1284 scnprintf(path, PATH_MAX, "%s/devices/system/node/", 1285 sysfs__mountpoint()); 1286 1287 dir = opendir(path); 1288 if (!dir) { 1289 pr_debug2("%s: could't read %s, does this arch have topology information?\n", 1290 __func__, path); 1291 return -1; 1292 } 1293 1294 while (!ret && (ent = readdir(dir))) { 1295 unsigned int idx; 1296 int r; 1297 1298 if (!strcmp(ent->d_name, ".") || 1299 !strcmp(ent->d_name, "..")) 1300 continue; 1301 1302 r = sscanf(ent->d_name, "node%u", &idx); 1303 if (r != 1) 1304 continue; 1305 1306 if (WARN_ONCE(cnt >= size, 1307 "failed to write MEM_TOPOLOGY, way too many nodes\n")) 1308 return -1; 1309 1310 ret = memory_node__read(&nodes[cnt++], idx); 1311 } 1312 1313 *cntp = cnt; 1314 closedir(dir); 1315 1316 if (!ret) 1317 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort); 1318 1319 return ret; 1320 } 1321 1322 #define MAX_MEMORY_NODES 2000 1323 1324 /* 1325 * The MEM_TOPOLOGY holds physical memory map for every 1326 * node in system. The format of data is as follows: 1327 * 1328 * 0 - version | for future changes 1329 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes 1330 * 16 - count | number of nodes 1331 * 1332 * For each node we store map of physical indexes for 1333 * each node: 1334 * 1335 * 32 - node id | node index 1336 * 40 - size | size of bitmap 1337 * 48 - bitmap | bitmap of memory indexes that belongs to node 1338 */ 1339 static int write_mem_topology(struct feat_fd *ff __maybe_unused, 1340 struct evlist *evlist __maybe_unused) 1341 { 1342 static struct memory_node nodes[MAX_MEMORY_NODES]; 1343 u64 bsize, version = 1, i, nr; 1344 int ret; 1345 1346 ret = sysfs__read_xll("devices/system/memory/block_size_bytes", 1347 (unsigned long long *) &bsize); 1348 if (ret) 1349 return ret; 1350 1351 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr); 1352 if (ret) 1353 return ret; 1354 1355 ret = do_write(ff, &version, sizeof(version)); 1356 if (ret < 0) 1357 goto out; 1358 1359 ret = do_write(ff, &bsize, sizeof(bsize)); 1360 if (ret < 0) 1361 goto out; 1362 1363 ret = do_write(ff, &nr, sizeof(nr)); 1364 if (ret < 0) 1365 goto out; 1366 1367 for (i = 0; i < nr; i++) { 1368 struct memory_node *n = &nodes[i]; 1369 1370 #define _W(v) \ 1371 ret = do_write(ff, &n->v, sizeof(n->v)); \ 1372 if (ret < 0) \ 1373 goto out; 1374 1375 _W(node) 1376 _W(size) 1377 1378 #undef _W 1379 1380 ret = do_write_bitmap(ff, n->set, n->size); 1381 if (ret < 0) 1382 goto out; 1383 } 1384 1385 out: 1386 return ret; 1387 } 1388 1389 static int write_compressed(struct feat_fd *ff __maybe_unused, 1390 struct evlist *evlist __maybe_unused) 1391 { 1392 int ret; 1393 1394 ret = do_write(ff, &(ff->ph->env.comp_ver), sizeof(ff->ph->env.comp_ver)); 1395 if (ret) 1396 return ret; 1397 1398 ret = do_write(ff, &(ff->ph->env.comp_type), sizeof(ff->ph->env.comp_type)); 1399 if (ret) 1400 return ret; 1401 1402 ret = do_write(ff, &(ff->ph->env.comp_level), sizeof(ff->ph->env.comp_level)); 1403 if (ret) 1404 return ret; 1405 1406 ret = do_write(ff, &(ff->ph->env.comp_ratio), sizeof(ff->ph->env.comp_ratio)); 1407 if (ret) 1408 return ret; 1409 1410 return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len)); 1411 } 1412 1413 static void print_hostname(struct feat_fd *ff, FILE *fp) 1414 { 1415 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); 1416 } 1417 1418 static void print_osrelease(struct feat_fd *ff, FILE *fp) 1419 { 1420 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); 1421 } 1422 1423 static void print_arch(struct feat_fd *ff, FILE *fp) 1424 { 1425 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); 1426 } 1427 1428 static void print_cpudesc(struct feat_fd *ff, FILE *fp) 1429 { 1430 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); 1431 } 1432 1433 static void print_nrcpus(struct feat_fd *ff, FILE *fp) 1434 { 1435 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); 1436 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); 1437 } 1438 1439 static void print_version(struct feat_fd *ff, FILE *fp) 1440 { 1441 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); 1442 } 1443 1444 static void print_cmdline(struct feat_fd *ff, FILE *fp) 1445 { 1446 int nr, i; 1447 1448 nr = ff->ph->env.nr_cmdline; 1449 1450 fprintf(fp, "# cmdline : "); 1451 1452 for (i = 0; i < nr; i++) { 1453 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]); 1454 if (!argv_i) { 1455 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); 1456 } else { 1457 char *mem = argv_i; 1458 do { 1459 char *quote = strchr(argv_i, '\''); 1460 if (!quote) 1461 break; 1462 *quote++ = '\0'; 1463 fprintf(fp, "%s\\\'", argv_i); 1464 argv_i = quote; 1465 } while (1); 1466 fprintf(fp, "%s ", argv_i); 1467 free(mem); 1468 } 1469 } 1470 fputc('\n', fp); 1471 } 1472 1473 static void print_cpu_topology(struct feat_fd *ff, FILE *fp) 1474 { 1475 struct perf_header *ph = ff->ph; 1476 int cpu_nr = ph->env.nr_cpus_avail; 1477 int nr, i; 1478 char *str; 1479 1480 nr = ph->env.nr_sibling_cores; 1481 str = ph->env.sibling_cores; 1482 1483 for (i = 0; i < nr; i++) { 1484 fprintf(fp, "# sibling sockets : %s\n", str); 1485 str += strlen(str) + 1; 1486 } 1487 1488 if (ph->env.nr_sibling_dies) { 1489 nr = ph->env.nr_sibling_dies; 1490 str = ph->env.sibling_dies; 1491 1492 for (i = 0; i < nr; i++) { 1493 fprintf(fp, "# sibling dies : %s\n", str); 1494 str += strlen(str) + 1; 1495 } 1496 } 1497 1498 nr = ph->env.nr_sibling_threads; 1499 str = ph->env.sibling_threads; 1500 1501 for (i = 0; i < nr; i++) { 1502 fprintf(fp, "# sibling threads : %s\n", str); 1503 str += strlen(str) + 1; 1504 } 1505 1506 if (ph->env.nr_sibling_dies) { 1507 if (ph->env.cpu != NULL) { 1508 for (i = 0; i < cpu_nr; i++) 1509 fprintf(fp, "# CPU %d: Core ID %d, " 1510 "Die ID %d, Socket ID %d\n", 1511 i, ph->env.cpu[i].core_id, 1512 ph->env.cpu[i].die_id, 1513 ph->env.cpu[i].socket_id); 1514 } else 1515 fprintf(fp, "# Core ID, Die ID and Socket ID " 1516 "information is not available\n"); 1517 } else { 1518 if (ph->env.cpu != NULL) { 1519 for (i = 0; i < cpu_nr; i++) 1520 fprintf(fp, "# CPU %d: Core ID %d, " 1521 "Socket ID %d\n", 1522 i, ph->env.cpu[i].core_id, 1523 ph->env.cpu[i].socket_id); 1524 } else 1525 fprintf(fp, "# Core ID and Socket ID " 1526 "information is not available\n"); 1527 } 1528 } 1529 1530 static void print_clockid(struct feat_fd *ff, FILE *fp) 1531 { 1532 fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n", 1533 ff->ph->env.clockid_res_ns * 1000); 1534 } 1535 1536 static void print_dir_format(struct feat_fd *ff, FILE *fp) 1537 { 1538 struct perf_session *session; 1539 struct perf_data *data; 1540 1541 session = container_of(ff->ph, struct perf_session, header); 1542 data = session->data; 1543 1544 fprintf(fp, "# directory data version : %"PRIu64"\n", data->dir.version); 1545 } 1546 1547 static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp) 1548 { 1549 struct perf_env *env = &ff->ph->env; 1550 struct rb_root *root; 1551 struct rb_node *next; 1552 1553 down_read(&env->bpf_progs.lock); 1554 1555 root = &env->bpf_progs.infos; 1556 next = rb_first(root); 1557 1558 while (next) { 1559 struct bpf_prog_info_node *node; 1560 1561 node = rb_entry(next, struct bpf_prog_info_node, rb_node); 1562 next = rb_next(&node->rb_node); 1563 1564 bpf_event__print_bpf_prog_info(&node->info_linear->info, 1565 env, fp); 1566 } 1567 1568 up_read(&env->bpf_progs.lock); 1569 } 1570 1571 static void print_bpf_btf(struct feat_fd *ff, FILE *fp) 1572 { 1573 struct perf_env *env = &ff->ph->env; 1574 struct rb_root *root; 1575 struct rb_node *next; 1576 1577 down_read(&env->bpf_progs.lock); 1578 1579 root = &env->bpf_progs.btfs; 1580 next = rb_first(root); 1581 1582 while (next) { 1583 struct btf_node *node; 1584 1585 node = rb_entry(next, struct btf_node, rb_node); 1586 next = rb_next(&node->rb_node); 1587 fprintf(fp, "# btf info of id %u\n", node->id); 1588 } 1589 1590 up_read(&env->bpf_progs.lock); 1591 } 1592 1593 static void free_event_desc(struct evsel *events) 1594 { 1595 struct evsel *evsel; 1596 1597 if (!events) 1598 return; 1599 1600 for (evsel = events; evsel->core.attr.size; evsel++) { 1601 zfree(&evsel->name); 1602 zfree(&evsel->id); 1603 } 1604 1605 free(events); 1606 } 1607 1608 static struct evsel *read_event_desc(struct feat_fd *ff) 1609 { 1610 struct evsel *evsel, *events = NULL; 1611 u64 *id; 1612 void *buf = NULL; 1613 u32 nre, sz, nr, i, j; 1614 size_t msz; 1615 1616 /* number of events */ 1617 if (do_read_u32(ff, &nre)) 1618 goto error; 1619 1620 if (do_read_u32(ff, &sz)) 1621 goto error; 1622 1623 /* buffer to hold on file attr struct */ 1624 buf = malloc(sz); 1625 if (!buf) 1626 goto error; 1627 1628 /* the last event terminates with evsel->core.attr.size == 0: */ 1629 events = calloc(nre + 1, sizeof(*events)); 1630 if (!events) 1631 goto error; 1632 1633 msz = sizeof(evsel->core.attr); 1634 if (sz < msz) 1635 msz = sz; 1636 1637 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1638 evsel->idx = i; 1639 1640 /* 1641 * must read entire on-file attr struct to 1642 * sync up with layout. 1643 */ 1644 if (__do_read(ff, buf, sz)) 1645 goto error; 1646 1647 if (ff->ph->needs_swap) 1648 perf_event__attr_swap(buf); 1649 1650 memcpy(&evsel->core.attr, buf, msz); 1651 1652 if (do_read_u32(ff, &nr)) 1653 goto error; 1654 1655 if (ff->ph->needs_swap) 1656 evsel->needs_swap = true; 1657 1658 evsel->name = do_read_string(ff); 1659 if (!evsel->name) 1660 goto error; 1661 1662 if (!nr) 1663 continue; 1664 1665 id = calloc(nr, sizeof(*id)); 1666 if (!id) 1667 goto error; 1668 evsel->ids = nr; 1669 evsel->id = id; 1670 1671 for (j = 0 ; j < nr; j++) { 1672 if (do_read_u64(ff, id)) 1673 goto error; 1674 id++; 1675 } 1676 } 1677 out: 1678 free(buf); 1679 return events; 1680 error: 1681 free_event_desc(events); 1682 events = NULL; 1683 goto out; 1684 } 1685 1686 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1687 void *priv __maybe_unused) 1688 { 1689 return fprintf(fp, ", %s = %s", name, val); 1690 } 1691 1692 static void print_event_desc(struct feat_fd *ff, FILE *fp) 1693 { 1694 struct evsel *evsel, *events; 1695 u32 j; 1696 u64 *id; 1697 1698 if (ff->events) 1699 events = ff->events; 1700 else 1701 events = read_event_desc(ff); 1702 1703 if (!events) { 1704 fprintf(fp, "# event desc: not available or unable to read\n"); 1705 return; 1706 } 1707 1708 for (evsel = events; evsel->core.attr.size; evsel++) { 1709 fprintf(fp, "# event : name = %s, ", evsel->name); 1710 1711 if (evsel->ids) { 1712 fprintf(fp, ", id = {"); 1713 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1714 if (j) 1715 fputc(',', fp); 1716 fprintf(fp, " %"PRIu64, *id); 1717 } 1718 fprintf(fp, " }"); 1719 } 1720 1721 perf_event_attr__fprintf(fp, &evsel->core.attr, __desc_attr__fprintf, NULL); 1722 1723 fputc('\n', fp); 1724 } 1725 1726 free_event_desc(events); 1727 ff->events = NULL; 1728 } 1729 1730 static void print_total_mem(struct feat_fd *ff, FILE *fp) 1731 { 1732 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); 1733 } 1734 1735 static void print_numa_topology(struct feat_fd *ff, FILE *fp) 1736 { 1737 int i; 1738 struct numa_node *n; 1739 1740 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { 1741 n = &ff->ph->env.numa_nodes[i]; 1742 1743 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1744 " free = %"PRIu64" kB\n", 1745 n->node, n->mem_total, n->mem_free); 1746 1747 fprintf(fp, "# node%u cpu list : ", n->node); 1748 cpu_map__fprintf(n->map, fp); 1749 } 1750 } 1751 1752 static void print_cpuid(struct feat_fd *ff, FILE *fp) 1753 { 1754 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); 1755 } 1756 1757 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) 1758 { 1759 fprintf(fp, "# contains samples with branch stack\n"); 1760 } 1761 1762 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) 1763 { 1764 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1765 } 1766 1767 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) 1768 { 1769 fprintf(fp, "# contains stat data\n"); 1770 } 1771 1772 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) 1773 { 1774 int i; 1775 1776 fprintf(fp, "# CPU cache info:\n"); 1777 for (i = 0; i < ff->ph->env.caches_cnt; i++) { 1778 fprintf(fp, "# "); 1779 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); 1780 } 1781 } 1782 1783 static void print_compressed(struct feat_fd *ff, FILE *fp) 1784 { 1785 fprintf(fp, "# compressed : %s, level = %d, ratio = %d\n", 1786 ff->ph->env.comp_type == PERF_COMP_ZSTD ? "Zstd" : "Unknown", 1787 ff->ph->env.comp_level, ff->ph->env.comp_ratio); 1788 } 1789 1790 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) 1791 { 1792 const char *delimiter = "# pmu mappings: "; 1793 char *str, *tmp; 1794 u32 pmu_num; 1795 u32 type; 1796 1797 pmu_num = ff->ph->env.nr_pmu_mappings; 1798 if (!pmu_num) { 1799 fprintf(fp, "# pmu mappings: not available\n"); 1800 return; 1801 } 1802 1803 str = ff->ph->env.pmu_mappings; 1804 1805 while (pmu_num) { 1806 type = strtoul(str, &tmp, 0); 1807 if (*tmp != ':') 1808 goto error; 1809 1810 str = tmp + 1; 1811 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1812 1813 delimiter = ", "; 1814 str += strlen(str) + 1; 1815 pmu_num--; 1816 } 1817 1818 fprintf(fp, "\n"); 1819 1820 if (!pmu_num) 1821 return; 1822 error: 1823 fprintf(fp, "# pmu mappings: unable to read\n"); 1824 } 1825 1826 static void print_group_desc(struct feat_fd *ff, FILE *fp) 1827 { 1828 struct perf_session *session; 1829 struct evsel *evsel; 1830 u32 nr = 0; 1831 1832 session = container_of(ff->ph, struct perf_session, header); 1833 1834 evlist__for_each_entry(session->evlist, evsel) { 1835 if (perf_evsel__is_group_leader(evsel) && 1836 evsel->core.nr_members > 1) { 1837 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1838 perf_evsel__name(evsel)); 1839 1840 nr = evsel->core.nr_members - 1; 1841 } else if (nr) { 1842 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1843 1844 if (--nr == 0) 1845 fprintf(fp, "}\n"); 1846 } 1847 } 1848 } 1849 1850 static void print_sample_time(struct feat_fd *ff, FILE *fp) 1851 { 1852 struct perf_session *session; 1853 char time_buf[32]; 1854 double d; 1855 1856 session = container_of(ff->ph, struct perf_session, header); 1857 1858 timestamp__scnprintf_usec(session->evlist->first_sample_time, 1859 time_buf, sizeof(time_buf)); 1860 fprintf(fp, "# time of first sample : %s\n", time_buf); 1861 1862 timestamp__scnprintf_usec(session->evlist->last_sample_time, 1863 time_buf, sizeof(time_buf)); 1864 fprintf(fp, "# time of last sample : %s\n", time_buf); 1865 1866 d = (double)(session->evlist->last_sample_time - 1867 session->evlist->first_sample_time) / NSEC_PER_MSEC; 1868 1869 fprintf(fp, "# sample duration : %10.3f ms\n", d); 1870 } 1871 1872 static void memory_node__fprintf(struct memory_node *n, 1873 unsigned long long bsize, FILE *fp) 1874 { 1875 char buf_map[100], buf_size[50]; 1876 unsigned long long size; 1877 1878 size = bsize * bitmap_weight(n->set, n->size); 1879 unit_number__scnprintf(buf_size, 50, size); 1880 1881 bitmap_scnprintf(n->set, n->size, buf_map, 100); 1882 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map); 1883 } 1884 1885 static void print_mem_topology(struct feat_fd *ff, FILE *fp) 1886 { 1887 struct memory_node *nodes; 1888 int i, nr; 1889 1890 nodes = ff->ph->env.memory_nodes; 1891 nr = ff->ph->env.nr_memory_nodes; 1892 1893 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n", 1894 nr, ff->ph->env.memory_bsize); 1895 1896 for (i = 0; i < nr; i++) { 1897 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp); 1898 } 1899 } 1900 1901 static int __event_process_build_id(struct perf_record_header_build_id *bev, 1902 char *filename, 1903 struct perf_session *session) 1904 { 1905 int err = -1; 1906 struct machine *machine; 1907 u16 cpumode; 1908 struct dso *dso; 1909 enum dso_kernel_type dso_type; 1910 1911 machine = perf_session__findnew_machine(session, bev->pid); 1912 if (!machine) 1913 goto out; 1914 1915 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1916 1917 switch (cpumode) { 1918 case PERF_RECORD_MISC_KERNEL: 1919 dso_type = DSO_TYPE_KERNEL; 1920 break; 1921 case PERF_RECORD_MISC_GUEST_KERNEL: 1922 dso_type = DSO_TYPE_GUEST_KERNEL; 1923 break; 1924 case PERF_RECORD_MISC_USER: 1925 case PERF_RECORD_MISC_GUEST_USER: 1926 dso_type = DSO_TYPE_USER; 1927 break; 1928 default: 1929 goto out; 1930 } 1931 1932 dso = machine__findnew_dso(machine, filename); 1933 if (dso != NULL) { 1934 char sbuild_id[SBUILD_ID_SIZE]; 1935 1936 dso__set_build_id(dso, &bev->build_id); 1937 1938 if (dso_type != DSO_TYPE_USER) { 1939 struct kmod_path m = { .name = NULL, }; 1940 1941 if (!kmod_path__parse_name(&m, filename) && m.kmod) 1942 dso__set_module_info(dso, &m, machine); 1943 else 1944 dso->kernel = dso_type; 1945 1946 free(m.name); 1947 } 1948 1949 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1950 sbuild_id); 1951 pr_debug("build id event received for %s: %s\n", 1952 dso->long_name, sbuild_id); 1953 dso__put(dso); 1954 } 1955 1956 err = 0; 1957 out: 1958 return err; 1959 } 1960 1961 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1962 int input, u64 offset, u64 size) 1963 { 1964 struct perf_session *session = container_of(header, struct perf_session, header); 1965 struct { 1966 struct perf_event_header header; 1967 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1968 char filename[0]; 1969 } old_bev; 1970 struct perf_record_header_build_id bev; 1971 char filename[PATH_MAX]; 1972 u64 limit = offset + size; 1973 1974 while (offset < limit) { 1975 ssize_t len; 1976 1977 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1978 return -1; 1979 1980 if (header->needs_swap) 1981 perf_event_header__bswap(&old_bev.header); 1982 1983 len = old_bev.header.size - sizeof(old_bev); 1984 if (readn(input, filename, len) != len) 1985 return -1; 1986 1987 bev.header = old_bev.header; 1988 1989 /* 1990 * As the pid is the missing value, we need to fill 1991 * it properly. The header.misc value give us nice hint. 1992 */ 1993 bev.pid = HOST_KERNEL_ID; 1994 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1995 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1996 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1997 1998 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1999 __event_process_build_id(&bev, filename, session); 2000 2001 offset += bev.header.size; 2002 } 2003 2004 return 0; 2005 } 2006 2007 static int perf_header__read_build_ids(struct perf_header *header, 2008 int input, u64 offset, u64 size) 2009 { 2010 struct perf_session *session = container_of(header, struct perf_session, header); 2011 struct perf_record_header_build_id bev; 2012 char filename[PATH_MAX]; 2013 u64 limit = offset + size, orig_offset = offset; 2014 int err = -1; 2015 2016 while (offset < limit) { 2017 ssize_t len; 2018 2019 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 2020 goto out; 2021 2022 if (header->needs_swap) 2023 perf_event_header__bswap(&bev.header); 2024 2025 len = bev.header.size - sizeof(bev); 2026 if (readn(input, filename, len) != len) 2027 goto out; 2028 /* 2029 * The a1645ce1 changeset: 2030 * 2031 * "perf: 'perf kvm' tool for monitoring guest performance from host" 2032 * 2033 * Added a field to struct perf_record_header_build_id that broke the file 2034 * format. 2035 * 2036 * Since the kernel build-id is the first entry, process the 2037 * table using the old format if the well known 2038 * '[kernel.kallsyms]' string for the kernel build-id has the 2039 * first 4 characters chopped off (where the pid_t sits). 2040 */ 2041 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 2042 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 2043 return -1; 2044 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 2045 } 2046 2047 __event_process_build_id(&bev, filename, session); 2048 2049 offset += bev.header.size; 2050 } 2051 err = 0; 2052 out: 2053 return err; 2054 } 2055 2056 /* Macro for features that simply need to read and store a string. */ 2057 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ 2058 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ 2059 {\ 2060 ff->ph->env.__feat_env = do_read_string(ff); \ 2061 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ 2062 } 2063 2064 FEAT_PROCESS_STR_FUN(hostname, hostname); 2065 FEAT_PROCESS_STR_FUN(osrelease, os_release); 2066 FEAT_PROCESS_STR_FUN(version, version); 2067 FEAT_PROCESS_STR_FUN(arch, arch); 2068 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc); 2069 FEAT_PROCESS_STR_FUN(cpuid, cpuid); 2070 2071 static int process_tracing_data(struct feat_fd *ff, void *data) 2072 { 2073 ssize_t ret = trace_report(ff->fd, data, false); 2074 2075 return ret < 0 ? -1 : 0; 2076 } 2077 2078 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) 2079 { 2080 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size)) 2081 pr_debug("Failed to read buildids, continuing...\n"); 2082 return 0; 2083 } 2084 2085 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) 2086 { 2087 int ret; 2088 u32 nr_cpus_avail, nr_cpus_online; 2089 2090 ret = do_read_u32(ff, &nr_cpus_avail); 2091 if (ret) 2092 return ret; 2093 2094 ret = do_read_u32(ff, &nr_cpus_online); 2095 if (ret) 2096 return ret; 2097 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; 2098 ff->ph->env.nr_cpus_online = (int)nr_cpus_online; 2099 return 0; 2100 } 2101 2102 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) 2103 { 2104 u64 total_mem; 2105 int ret; 2106 2107 ret = do_read_u64(ff, &total_mem); 2108 if (ret) 2109 return -1; 2110 ff->ph->env.total_mem = (unsigned long long)total_mem; 2111 return 0; 2112 } 2113 2114 static struct evsel * 2115 perf_evlist__find_by_index(struct evlist *evlist, int idx) 2116 { 2117 struct evsel *evsel; 2118 2119 evlist__for_each_entry(evlist, evsel) { 2120 if (evsel->idx == idx) 2121 return evsel; 2122 } 2123 2124 return NULL; 2125 } 2126 2127 static void 2128 perf_evlist__set_event_name(struct evlist *evlist, 2129 struct evsel *event) 2130 { 2131 struct evsel *evsel; 2132 2133 if (!event->name) 2134 return; 2135 2136 evsel = perf_evlist__find_by_index(evlist, event->idx); 2137 if (!evsel) 2138 return; 2139 2140 if (evsel->name) 2141 return; 2142 2143 evsel->name = strdup(event->name); 2144 } 2145 2146 static int 2147 process_event_desc(struct feat_fd *ff, void *data __maybe_unused) 2148 { 2149 struct perf_session *session; 2150 struct evsel *evsel, *events = read_event_desc(ff); 2151 2152 if (!events) 2153 return 0; 2154 2155 session = container_of(ff->ph, struct perf_session, header); 2156 2157 if (session->data->is_pipe) { 2158 /* Save events for reading later by print_event_desc, 2159 * since they can't be read again in pipe mode. */ 2160 ff->events = events; 2161 } 2162 2163 for (evsel = events; evsel->core.attr.size; evsel++) 2164 perf_evlist__set_event_name(session->evlist, evsel); 2165 2166 if (!session->data->is_pipe) 2167 free_event_desc(events); 2168 2169 return 0; 2170 } 2171 2172 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) 2173 { 2174 char *str, *cmdline = NULL, **argv = NULL; 2175 u32 nr, i, len = 0; 2176 2177 if (do_read_u32(ff, &nr)) 2178 return -1; 2179 2180 ff->ph->env.nr_cmdline = nr; 2181 2182 cmdline = zalloc(ff->size + nr + 1); 2183 if (!cmdline) 2184 return -1; 2185 2186 argv = zalloc(sizeof(char *) * (nr + 1)); 2187 if (!argv) 2188 goto error; 2189 2190 for (i = 0; i < nr; i++) { 2191 str = do_read_string(ff); 2192 if (!str) 2193 goto error; 2194 2195 argv[i] = cmdline + len; 2196 memcpy(argv[i], str, strlen(str) + 1); 2197 len += strlen(str) + 1; 2198 free(str); 2199 } 2200 ff->ph->env.cmdline = cmdline; 2201 ff->ph->env.cmdline_argv = (const char **) argv; 2202 return 0; 2203 2204 error: 2205 free(argv); 2206 free(cmdline); 2207 return -1; 2208 } 2209 2210 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) 2211 { 2212 u32 nr, i; 2213 char *str; 2214 struct strbuf sb; 2215 int cpu_nr = ff->ph->env.nr_cpus_avail; 2216 u64 size = 0; 2217 struct perf_header *ph = ff->ph; 2218 bool do_core_id_test = true; 2219 2220 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); 2221 if (!ph->env.cpu) 2222 return -1; 2223 2224 if (do_read_u32(ff, &nr)) 2225 goto free_cpu; 2226 2227 ph->env.nr_sibling_cores = nr; 2228 size += sizeof(u32); 2229 if (strbuf_init(&sb, 128) < 0) 2230 goto free_cpu; 2231 2232 for (i = 0; i < nr; i++) { 2233 str = do_read_string(ff); 2234 if (!str) 2235 goto error; 2236 2237 /* include a NULL character at the end */ 2238 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2239 goto error; 2240 size += string_size(str); 2241 free(str); 2242 } 2243 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 2244 2245 if (do_read_u32(ff, &nr)) 2246 return -1; 2247 2248 ph->env.nr_sibling_threads = nr; 2249 size += sizeof(u32); 2250 2251 for (i = 0; i < nr; i++) { 2252 str = do_read_string(ff); 2253 if (!str) 2254 goto error; 2255 2256 /* include a NULL character at the end */ 2257 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2258 goto error; 2259 size += string_size(str); 2260 free(str); 2261 } 2262 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 2263 2264 /* 2265 * The header may be from old perf, 2266 * which doesn't include core id and socket id information. 2267 */ 2268 if (ff->size <= size) { 2269 zfree(&ph->env.cpu); 2270 return 0; 2271 } 2272 2273 /* On s390 the socket_id number is not related to the numbers of cpus. 2274 * The socket_id number might be higher than the numbers of cpus. 2275 * This depends on the configuration. 2276 * AArch64 is the same. 2277 */ 2278 if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4) 2279 || !strncmp(ph->env.arch, "aarch64", 7))) 2280 do_core_id_test = false; 2281 2282 for (i = 0; i < (u32)cpu_nr; i++) { 2283 if (do_read_u32(ff, &nr)) 2284 goto free_cpu; 2285 2286 ph->env.cpu[i].core_id = nr; 2287 size += sizeof(u32); 2288 2289 if (do_read_u32(ff, &nr)) 2290 goto free_cpu; 2291 2292 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) { 2293 pr_debug("socket_id number is too big." 2294 "You may need to upgrade the perf tool.\n"); 2295 goto free_cpu; 2296 } 2297 2298 ph->env.cpu[i].socket_id = nr; 2299 size += sizeof(u32); 2300 } 2301 2302 /* 2303 * The header may be from old perf, 2304 * which doesn't include die information. 2305 */ 2306 if (ff->size <= size) 2307 return 0; 2308 2309 if (do_read_u32(ff, &nr)) 2310 return -1; 2311 2312 ph->env.nr_sibling_dies = nr; 2313 size += sizeof(u32); 2314 2315 for (i = 0; i < nr; i++) { 2316 str = do_read_string(ff); 2317 if (!str) 2318 goto error; 2319 2320 /* include a NULL character at the end */ 2321 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2322 goto error; 2323 size += string_size(str); 2324 free(str); 2325 } 2326 ph->env.sibling_dies = strbuf_detach(&sb, NULL); 2327 2328 for (i = 0; i < (u32)cpu_nr; i++) { 2329 if (do_read_u32(ff, &nr)) 2330 goto free_cpu; 2331 2332 ph->env.cpu[i].die_id = nr; 2333 } 2334 2335 return 0; 2336 2337 error: 2338 strbuf_release(&sb); 2339 free_cpu: 2340 zfree(&ph->env.cpu); 2341 return -1; 2342 } 2343 2344 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) 2345 { 2346 struct numa_node *nodes, *n; 2347 u32 nr, i; 2348 char *str; 2349 2350 /* nr nodes */ 2351 if (do_read_u32(ff, &nr)) 2352 return -1; 2353 2354 nodes = zalloc(sizeof(*nodes) * nr); 2355 if (!nodes) 2356 return -ENOMEM; 2357 2358 for (i = 0; i < nr; i++) { 2359 n = &nodes[i]; 2360 2361 /* node number */ 2362 if (do_read_u32(ff, &n->node)) 2363 goto error; 2364 2365 if (do_read_u64(ff, &n->mem_total)) 2366 goto error; 2367 2368 if (do_read_u64(ff, &n->mem_free)) 2369 goto error; 2370 2371 str = do_read_string(ff); 2372 if (!str) 2373 goto error; 2374 2375 n->map = perf_cpu_map__new(str); 2376 if (!n->map) 2377 goto error; 2378 2379 free(str); 2380 } 2381 ff->ph->env.nr_numa_nodes = nr; 2382 ff->ph->env.numa_nodes = nodes; 2383 return 0; 2384 2385 error: 2386 free(nodes); 2387 return -1; 2388 } 2389 2390 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) 2391 { 2392 char *name; 2393 u32 pmu_num; 2394 u32 type; 2395 struct strbuf sb; 2396 2397 if (do_read_u32(ff, &pmu_num)) 2398 return -1; 2399 2400 if (!pmu_num) { 2401 pr_debug("pmu mappings not available\n"); 2402 return 0; 2403 } 2404 2405 ff->ph->env.nr_pmu_mappings = pmu_num; 2406 if (strbuf_init(&sb, 128) < 0) 2407 return -1; 2408 2409 while (pmu_num) { 2410 if (do_read_u32(ff, &type)) 2411 goto error; 2412 2413 name = do_read_string(ff); 2414 if (!name) 2415 goto error; 2416 2417 if (strbuf_addf(&sb, "%u:%s", type, name) < 0) 2418 goto error; 2419 /* include a NULL character at the end */ 2420 if (strbuf_add(&sb, "", 1) < 0) 2421 goto error; 2422 2423 if (!strcmp(name, "msr")) 2424 ff->ph->env.msr_pmu_type = type; 2425 2426 free(name); 2427 pmu_num--; 2428 } 2429 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 2430 return 0; 2431 2432 error: 2433 strbuf_release(&sb); 2434 return -1; 2435 } 2436 2437 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) 2438 { 2439 size_t ret = -1; 2440 u32 i, nr, nr_groups; 2441 struct perf_session *session; 2442 struct evsel *evsel, *leader = NULL; 2443 struct group_desc { 2444 char *name; 2445 u32 leader_idx; 2446 u32 nr_members; 2447 } *desc; 2448 2449 if (do_read_u32(ff, &nr_groups)) 2450 return -1; 2451 2452 ff->ph->env.nr_groups = nr_groups; 2453 if (!nr_groups) { 2454 pr_debug("group desc not available\n"); 2455 return 0; 2456 } 2457 2458 desc = calloc(nr_groups, sizeof(*desc)); 2459 if (!desc) 2460 return -1; 2461 2462 for (i = 0; i < nr_groups; i++) { 2463 desc[i].name = do_read_string(ff); 2464 if (!desc[i].name) 2465 goto out_free; 2466 2467 if (do_read_u32(ff, &desc[i].leader_idx)) 2468 goto out_free; 2469 2470 if (do_read_u32(ff, &desc[i].nr_members)) 2471 goto out_free; 2472 } 2473 2474 /* 2475 * Rebuild group relationship based on the group_desc 2476 */ 2477 session = container_of(ff->ph, struct perf_session, header); 2478 session->evlist->nr_groups = nr_groups; 2479 2480 i = nr = 0; 2481 evlist__for_each_entry(session->evlist, evsel) { 2482 if (evsel->idx == (int) desc[i].leader_idx) { 2483 evsel->leader = evsel; 2484 /* {anon_group} is a dummy name */ 2485 if (strcmp(desc[i].name, "{anon_group}")) { 2486 evsel->group_name = desc[i].name; 2487 desc[i].name = NULL; 2488 } 2489 evsel->core.nr_members = desc[i].nr_members; 2490 2491 if (i >= nr_groups || nr > 0) { 2492 pr_debug("invalid group desc\n"); 2493 goto out_free; 2494 } 2495 2496 leader = evsel; 2497 nr = evsel->core.nr_members - 1; 2498 i++; 2499 } else if (nr) { 2500 /* This is a group member */ 2501 evsel->leader = leader; 2502 2503 nr--; 2504 } 2505 } 2506 2507 if (i != nr_groups || nr != 0) { 2508 pr_debug("invalid group desc\n"); 2509 goto out_free; 2510 } 2511 2512 ret = 0; 2513 out_free: 2514 for (i = 0; i < nr_groups; i++) 2515 zfree(&desc[i].name); 2516 free(desc); 2517 2518 return ret; 2519 } 2520 2521 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) 2522 { 2523 struct perf_session *session; 2524 int err; 2525 2526 session = container_of(ff->ph, struct perf_session, header); 2527 2528 err = auxtrace_index__process(ff->fd, ff->size, session, 2529 ff->ph->needs_swap); 2530 if (err < 0) 2531 pr_err("Failed to process auxtrace index\n"); 2532 return err; 2533 } 2534 2535 static int process_cache(struct feat_fd *ff, void *data __maybe_unused) 2536 { 2537 struct cpu_cache_level *caches; 2538 u32 cnt, i, version; 2539 2540 if (do_read_u32(ff, &version)) 2541 return -1; 2542 2543 if (version != 1) 2544 return -1; 2545 2546 if (do_read_u32(ff, &cnt)) 2547 return -1; 2548 2549 caches = zalloc(sizeof(*caches) * cnt); 2550 if (!caches) 2551 return -1; 2552 2553 for (i = 0; i < cnt; i++) { 2554 struct cpu_cache_level c; 2555 2556 #define _R(v) \ 2557 if (do_read_u32(ff, &c.v))\ 2558 goto out_free_caches; \ 2559 2560 _R(level) 2561 _R(line_size) 2562 _R(sets) 2563 _R(ways) 2564 #undef _R 2565 2566 #define _R(v) \ 2567 c.v = do_read_string(ff); \ 2568 if (!c.v) \ 2569 goto out_free_caches; 2570 2571 _R(type) 2572 _R(size) 2573 _R(map) 2574 #undef _R 2575 2576 caches[i] = c; 2577 } 2578 2579 ff->ph->env.caches = caches; 2580 ff->ph->env.caches_cnt = cnt; 2581 return 0; 2582 out_free_caches: 2583 free(caches); 2584 return -1; 2585 } 2586 2587 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused) 2588 { 2589 struct perf_session *session; 2590 u64 first_sample_time, last_sample_time; 2591 int ret; 2592 2593 session = container_of(ff->ph, struct perf_session, header); 2594 2595 ret = do_read_u64(ff, &first_sample_time); 2596 if (ret) 2597 return -1; 2598 2599 ret = do_read_u64(ff, &last_sample_time); 2600 if (ret) 2601 return -1; 2602 2603 session->evlist->first_sample_time = first_sample_time; 2604 session->evlist->last_sample_time = last_sample_time; 2605 return 0; 2606 } 2607 2608 static int process_mem_topology(struct feat_fd *ff, 2609 void *data __maybe_unused) 2610 { 2611 struct memory_node *nodes; 2612 u64 version, i, nr, bsize; 2613 int ret = -1; 2614 2615 if (do_read_u64(ff, &version)) 2616 return -1; 2617 2618 if (version != 1) 2619 return -1; 2620 2621 if (do_read_u64(ff, &bsize)) 2622 return -1; 2623 2624 if (do_read_u64(ff, &nr)) 2625 return -1; 2626 2627 nodes = zalloc(sizeof(*nodes) * nr); 2628 if (!nodes) 2629 return -1; 2630 2631 for (i = 0; i < nr; i++) { 2632 struct memory_node n; 2633 2634 #define _R(v) \ 2635 if (do_read_u64(ff, &n.v)) \ 2636 goto out; \ 2637 2638 _R(node) 2639 _R(size) 2640 2641 #undef _R 2642 2643 if (do_read_bitmap(ff, &n.set, &n.size)) 2644 goto out; 2645 2646 nodes[i] = n; 2647 } 2648 2649 ff->ph->env.memory_bsize = bsize; 2650 ff->ph->env.memory_nodes = nodes; 2651 ff->ph->env.nr_memory_nodes = nr; 2652 ret = 0; 2653 2654 out: 2655 if (ret) 2656 free(nodes); 2657 return ret; 2658 } 2659 2660 static int process_clockid(struct feat_fd *ff, 2661 void *data __maybe_unused) 2662 { 2663 if (do_read_u64(ff, &ff->ph->env.clockid_res_ns)) 2664 return -1; 2665 2666 return 0; 2667 } 2668 2669 static int process_dir_format(struct feat_fd *ff, 2670 void *_data __maybe_unused) 2671 { 2672 struct perf_session *session; 2673 struct perf_data *data; 2674 2675 session = container_of(ff->ph, struct perf_session, header); 2676 data = session->data; 2677 2678 if (WARN_ON(!perf_data__is_dir(data))) 2679 return -1; 2680 2681 return do_read_u64(ff, &data->dir.version); 2682 } 2683 2684 #ifdef HAVE_LIBBPF_SUPPORT 2685 static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) 2686 { 2687 struct bpf_prog_info_linear *info_linear; 2688 struct bpf_prog_info_node *info_node; 2689 struct perf_env *env = &ff->ph->env; 2690 u32 count, i; 2691 int err = -1; 2692 2693 if (ff->ph->needs_swap) { 2694 pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n"); 2695 return 0; 2696 } 2697 2698 if (do_read_u32(ff, &count)) 2699 return -1; 2700 2701 down_write(&env->bpf_progs.lock); 2702 2703 for (i = 0; i < count; ++i) { 2704 u32 info_len, data_len; 2705 2706 info_linear = NULL; 2707 info_node = NULL; 2708 if (do_read_u32(ff, &info_len)) 2709 goto out; 2710 if (do_read_u32(ff, &data_len)) 2711 goto out; 2712 2713 if (info_len > sizeof(struct bpf_prog_info)) { 2714 pr_warning("detected invalid bpf_prog_info\n"); 2715 goto out; 2716 } 2717 2718 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + 2719 data_len); 2720 if (!info_linear) 2721 goto out; 2722 info_linear->info_len = sizeof(struct bpf_prog_info); 2723 info_linear->data_len = data_len; 2724 if (do_read_u64(ff, (u64 *)(&info_linear->arrays))) 2725 goto out; 2726 if (__do_read(ff, &info_linear->info, info_len)) 2727 goto out; 2728 if (info_len < sizeof(struct bpf_prog_info)) 2729 memset(((void *)(&info_linear->info)) + info_len, 0, 2730 sizeof(struct bpf_prog_info) - info_len); 2731 2732 if (__do_read(ff, info_linear->data, data_len)) 2733 goto out; 2734 2735 info_node = malloc(sizeof(struct bpf_prog_info_node)); 2736 if (!info_node) 2737 goto out; 2738 2739 /* after reading from file, translate offset to address */ 2740 bpf_program__bpil_offs_to_addr(info_linear); 2741 info_node->info_linear = info_linear; 2742 perf_env__insert_bpf_prog_info(env, info_node); 2743 } 2744 2745 up_write(&env->bpf_progs.lock); 2746 return 0; 2747 out: 2748 free(info_linear); 2749 free(info_node); 2750 up_write(&env->bpf_progs.lock); 2751 return err; 2752 } 2753 #else // HAVE_LIBBPF_SUPPORT 2754 static int process_bpf_prog_info(struct feat_fd *ff __maybe_unused, void *data __maybe_unused) 2755 { 2756 return 0; 2757 } 2758 #endif // HAVE_LIBBPF_SUPPORT 2759 2760 static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) 2761 { 2762 struct perf_env *env = &ff->ph->env; 2763 struct btf_node *node = NULL; 2764 u32 count, i; 2765 int err = -1; 2766 2767 if (ff->ph->needs_swap) { 2768 pr_warning("interpreting btf from systems with endianity is not yet supported\n"); 2769 return 0; 2770 } 2771 2772 if (do_read_u32(ff, &count)) 2773 return -1; 2774 2775 down_write(&env->bpf_progs.lock); 2776 2777 for (i = 0; i < count; ++i) { 2778 u32 id, data_size; 2779 2780 if (do_read_u32(ff, &id)) 2781 goto out; 2782 if (do_read_u32(ff, &data_size)) 2783 goto out; 2784 2785 node = malloc(sizeof(struct btf_node) + data_size); 2786 if (!node) 2787 goto out; 2788 2789 node->id = id; 2790 node->data_size = data_size; 2791 2792 if (__do_read(ff, node->data, data_size)) 2793 goto out; 2794 2795 perf_env__insert_btf(env, node); 2796 node = NULL; 2797 } 2798 2799 err = 0; 2800 out: 2801 up_write(&env->bpf_progs.lock); 2802 free(node); 2803 return err; 2804 } 2805 2806 static int process_compressed(struct feat_fd *ff, 2807 void *data __maybe_unused) 2808 { 2809 if (do_read_u32(ff, &(ff->ph->env.comp_ver))) 2810 return -1; 2811 2812 if (do_read_u32(ff, &(ff->ph->env.comp_type))) 2813 return -1; 2814 2815 if (do_read_u32(ff, &(ff->ph->env.comp_level))) 2816 return -1; 2817 2818 if (do_read_u32(ff, &(ff->ph->env.comp_ratio))) 2819 return -1; 2820 2821 if (do_read_u32(ff, &(ff->ph->env.comp_mmap_len))) 2822 return -1; 2823 2824 return 0; 2825 } 2826 2827 struct feature_ops { 2828 int (*write)(struct feat_fd *ff, struct evlist *evlist); 2829 void (*print)(struct feat_fd *ff, FILE *fp); 2830 int (*process)(struct feat_fd *ff, void *data); 2831 const char *name; 2832 bool full_only; 2833 bool synthesize; 2834 }; 2835 2836 #define FEAT_OPR(n, func, __full_only) \ 2837 [HEADER_##n] = { \ 2838 .name = __stringify(n), \ 2839 .write = write_##func, \ 2840 .print = print_##func, \ 2841 .full_only = __full_only, \ 2842 .process = process_##func, \ 2843 .synthesize = true \ 2844 } 2845 2846 #define FEAT_OPN(n, func, __full_only) \ 2847 [HEADER_##n] = { \ 2848 .name = __stringify(n), \ 2849 .write = write_##func, \ 2850 .print = print_##func, \ 2851 .full_only = __full_only, \ 2852 .process = process_##func \ 2853 } 2854 2855 /* feature_ops not implemented: */ 2856 #define print_tracing_data NULL 2857 #define print_build_id NULL 2858 2859 #define process_branch_stack NULL 2860 #define process_stat NULL 2861 2862 2863 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2864 FEAT_OPN(TRACING_DATA, tracing_data, false), 2865 FEAT_OPN(BUILD_ID, build_id, false), 2866 FEAT_OPR(HOSTNAME, hostname, false), 2867 FEAT_OPR(OSRELEASE, osrelease, false), 2868 FEAT_OPR(VERSION, version, false), 2869 FEAT_OPR(ARCH, arch, false), 2870 FEAT_OPR(NRCPUS, nrcpus, false), 2871 FEAT_OPR(CPUDESC, cpudesc, false), 2872 FEAT_OPR(CPUID, cpuid, false), 2873 FEAT_OPR(TOTAL_MEM, total_mem, false), 2874 FEAT_OPR(EVENT_DESC, event_desc, false), 2875 FEAT_OPR(CMDLINE, cmdline, false), 2876 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true), 2877 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), 2878 FEAT_OPN(BRANCH_STACK, branch_stack, false), 2879 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), 2880 FEAT_OPR(GROUP_DESC, group_desc, false), 2881 FEAT_OPN(AUXTRACE, auxtrace, false), 2882 FEAT_OPN(STAT, stat, false), 2883 FEAT_OPN(CACHE, cache, true), 2884 FEAT_OPR(SAMPLE_TIME, sample_time, false), 2885 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), 2886 FEAT_OPR(CLOCKID, clockid, false), 2887 FEAT_OPN(DIR_FORMAT, dir_format, false), 2888 FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false), 2889 FEAT_OPR(BPF_BTF, bpf_btf, false), 2890 FEAT_OPR(COMPRESSED, compressed, false), 2891 }; 2892 2893 struct header_print_data { 2894 FILE *fp; 2895 bool full; /* extended list of headers */ 2896 }; 2897 2898 static int perf_file_section__fprintf_info(struct perf_file_section *section, 2899 struct perf_header *ph, 2900 int feat, int fd, void *data) 2901 { 2902 struct header_print_data *hd = data; 2903 struct feat_fd ff; 2904 2905 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2906 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2907 "%d, continuing...\n", section->offset, feat); 2908 return 0; 2909 } 2910 if (feat >= HEADER_LAST_FEATURE) { 2911 pr_warning("unknown feature %d\n", feat); 2912 return 0; 2913 } 2914 if (!feat_ops[feat].print) 2915 return 0; 2916 2917 ff = (struct feat_fd) { 2918 .fd = fd, 2919 .ph = ph, 2920 }; 2921 2922 if (!feat_ops[feat].full_only || hd->full) 2923 feat_ops[feat].print(&ff, hd->fp); 2924 else 2925 fprintf(hd->fp, "# %s info available, use -I to display\n", 2926 feat_ops[feat].name); 2927 2928 return 0; 2929 } 2930 2931 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 2932 { 2933 struct header_print_data hd; 2934 struct perf_header *header = &session->header; 2935 int fd = perf_data__fd(session->data); 2936 struct stat st; 2937 time_t stctime; 2938 int ret, bit; 2939 2940 hd.fp = fp; 2941 hd.full = full; 2942 2943 ret = fstat(fd, &st); 2944 if (ret == -1) 2945 return -1; 2946 2947 stctime = st.st_ctime; 2948 fprintf(fp, "# captured on : %s", ctime(&stctime)); 2949 2950 fprintf(fp, "# header version : %u\n", header->version); 2951 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset); 2952 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size); 2953 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset); 2954 2955 perf_header__process_sections(header, fd, &hd, 2956 perf_file_section__fprintf_info); 2957 2958 if (session->data->is_pipe) 2959 return 0; 2960 2961 fprintf(fp, "# missing features: "); 2962 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { 2963 if (bit) 2964 fprintf(fp, "%s ", feat_ops[bit].name); 2965 } 2966 2967 fprintf(fp, "\n"); 2968 return 0; 2969 } 2970 2971 static int do_write_feat(struct feat_fd *ff, int type, 2972 struct perf_file_section **p, 2973 struct evlist *evlist) 2974 { 2975 int err; 2976 int ret = 0; 2977 2978 if (perf_header__has_feat(ff->ph, type)) { 2979 if (!feat_ops[type].write) 2980 return -1; 2981 2982 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 2983 return -1; 2984 2985 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); 2986 2987 err = feat_ops[type].write(ff, evlist); 2988 if (err < 0) { 2989 pr_debug("failed to write feature %s\n", feat_ops[type].name); 2990 2991 /* undo anything written */ 2992 lseek(ff->fd, (*p)->offset, SEEK_SET); 2993 2994 return -1; 2995 } 2996 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; 2997 (*p)++; 2998 } 2999 return ret; 3000 } 3001 3002 static int perf_header__adds_write(struct perf_header *header, 3003 struct evlist *evlist, int fd) 3004 { 3005 int nr_sections; 3006 struct feat_fd ff; 3007 struct perf_file_section *feat_sec, *p; 3008 int sec_size; 3009 u64 sec_start; 3010 int feat; 3011 int err; 3012 3013 ff = (struct feat_fd){ 3014 .fd = fd, 3015 .ph = header, 3016 }; 3017 3018 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 3019 if (!nr_sections) 3020 return 0; 3021 3022 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 3023 if (feat_sec == NULL) 3024 return -ENOMEM; 3025 3026 sec_size = sizeof(*feat_sec) * nr_sections; 3027 3028 sec_start = header->feat_offset; 3029 lseek(fd, sec_start + sec_size, SEEK_SET); 3030 3031 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3032 if (do_write_feat(&ff, feat, &p, evlist)) 3033 perf_header__clear_feat(header, feat); 3034 } 3035 3036 lseek(fd, sec_start, SEEK_SET); 3037 /* 3038 * may write more than needed due to dropped feature, but 3039 * this is okay, reader will skip the missing entries 3040 */ 3041 err = do_write(&ff, feat_sec, sec_size); 3042 if (err < 0) 3043 pr_debug("failed to write feature section\n"); 3044 free(feat_sec); 3045 return err; 3046 } 3047 3048 int perf_header__write_pipe(int fd) 3049 { 3050 struct perf_pipe_file_header f_header; 3051 struct feat_fd ff; 3052 int err; 3053 3054 ff = (struct feat_fd){ .fd = fd }; 3055 3056 f_header = (struct perf_pipe_file_header){ 3057 .magic = PERF_MAGIC, 3058 .size = sizeof(f_header), 3059 }; 3060 3061 err = do_write(&ff, &f_header, sizeof(f_header)); 3062 if (err < 0) { 3063 pr_debug("failed to write perf pipe header\n"); 3064 return err; 3065 } 3066 3067 return 0; 3068 } 3069 3070 int perf_session__write_header(struct perf_session *session, 3071 struct evlist *evlist, 3072 int fd, bool at_exit) 3073 { 3074 struct perf_file_header f_header; 3075 struct perf_file_attr f_attr; 3076 struct perf_header *header = &session->header; 3077 struct evsel *evsel; 3078 struct feat_fd ff; 3079 u64 attr_offset; 3080 int err; 3081 3082 ff = (struct feat_fd){ .fd = fd}; 3083 lseek(fd, sizeof(f_header), SEEK_SET); 3084 3085 evlist__for_each_entry(session->evlist, evsel) { 3086 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 3087 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); 3088 if (err < 0) { 3089 pr_debug("failed to write perf header\n"); 3090 return err; 3091 } 3092 } 3093 3094 attr_offset = lseek(ff.fd, 0, SEEK_CUR); 3095 3096 evlist__for_each_entry(evlist, evsel) { 3097 f_attr = (struct perf_file_attr){ 3098 .attr = evsel->core.attr, 3099 .ids = { 3100 .offset = evsel->id_offset, 3101 .size = evsel->ids * sizeof(u64), 3102 } 3103 }; 3104 err = do_write(&ff, &f_attr, sizeof(f_attr)); 3105 if (err < 0) { 3106 pr_debug("failed to write perf header attribute\n"); 3107 return err; 3108 } 3109 } 3110 3111 if (!header->data_offset) 3112 header->data_offset = lseek(fd, 0, SEEK_CUR); 3113 header->feat_offset = header->data_offset + header->data_size; 3114 3115 if (at_exit) { 3116 err = perf_header__adds_write(header, evlist, fd); 3117 if (err < 0) 3118 return err; 3119 } 3120 3121 f_header = (struct perf_file_header){ 3122 .magic = PERF_MAGIC, 3123 .size = sizeof(f_header), 3124 .attr_size = sizeof(f_attr), 3125 .attrs = { 3126 .offset = attr_offset, 3127 .size = evlist->core.nr_entries * sizeof(f_attr), 3128 }, 3129 .data = { 3130 .offset = header->data_offset, 3131 .size = header->data_size, 3132 }, 3133 /* event_types is ignored, store zeros */ 3134 }; 3135 3136 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 3137 3138 lseek(fd, 0, SEEK_SET); 3139 err = do_write(&ff, &f_header, sizeof(f_header)); 3140 if (err < 0) { 3141 pr_debug("failed to write perf header\n"); 3142 return err; 3143 } 3144 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 3145 3146 return 0; 3147 } 3148 3149 static int perf_header__getbuffer64(struct perf_header *header, 3150 int fd, void *buf, size_t size) 3151 { 3152 if (readn(fd, buf, size) <= 0) 3153 return -1; 3154 3155 if (header->needs_swap) 3156 mem_bswap_64(buf, size); 3157 3158 return 0; 3159 } 3160 3161 int perf_header__process_sections(struct perf_header *header, int fd, 3162 void *data, 3163 int (*process)(struct perf_file_section *section, 3164 struct perf_header *ph, 3165 int feat, int fd, void *data)) 3166 { 3167 struct perf_file_section *feat_sec, *sec; 3168 int nr_sections; 3169 int sec_size; 3170 int feat; 3171 int err; 3172 3173 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 3174 if (!nr_sections) 3175 return 0; 3176 3177 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 3178 if (!feat_sec) 3179 return -1; 3180 3181 sec_size = sizeof(*feat_sec) * nr_sections; 3182 3183 lseek(fd, header->feat_offset, SEEK_SET); 3184 3185 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 3186 if (err < 0) 3187 goto out_free; 3188 3189 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 3190 err = process(sec++, header, feat, fd, data); 3191 if (err < 0) 3192 goto out_free; 3193 } 3194 err = 0; 3195 out_free: 3196 free(feat_sec); 3197 return err; 3198 } 3199 3200 static const int attr_file_abi_sizes[] = { 3201 [0] = PERF_ATTR_SIZE_VER0, 3202 [1] = PERF_ATTR_SIZE_VER1, 3203 [2] = PERF_ATTR_SIZE_VER2, 3204 [3] = PERF_ATTR_SIZE_VER3, 3205 [4] = PERF_ATTR_SIZE_VER4, 3206 0, 3207 }; 3208 3209 /* 3210 * In the legacy file format, the magic number is not used to encode endianness. 3211 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 3212 * on ABI revisions, we need to try all combinations for all endianness to 3213 * detect the endianness. 3214 */ 3215 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 3216 { 3217 uint64_t ref_size, attr_size; 3218 int i; 3219 3220 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 3221 ref_size = attr_file_abi_sizes[i] 3222 + sizeof(struct perf_file_section); 3223 if (hdr_sz != ref_size) { 3224 attr_size = bswap_64(hdr_sz); 3225 if (attr_size != ref_size) 3226 continue; 3227 3228 ph->needs_swap = true; 3229 } 3230 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 3231 i, 3232 ph->needs_swap); 3233 return 0; 3234 } 3235 /* could not determine endianness */ 3236 return -1; 3237 } 3238 3239 #define PERF_PIPE_HDR_VER0 16 3240 3241 static const size_t attr_pipe_abi_sizes[] = { 3242 [0] = PERF_PIPE_HDR_VER0, 3243 0, 3244 }; 3245 3246 /* 3247 * In the legacy pipe format, there is an implicit assumption that endiannesss 3248 * between host recording the samples, and host parsing the samples is the 3249 * same. This is not always the case given that the pipe output may always be 3250 * redirected into a file and analyzed on a different machine with possibly a 3251 * different endianness and perf_event ABI revsions in the perf tool itself. 3252 */ 3253 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 3254 { 3255 u64 attr_size; 3256 int i; 3257 3258 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 3259 if (hdr_sz != attr_pipe_abi_sizes[i]) { 3260 attr_size = bswap_64(hdr_sz); 3261 if (attr_size != hdr_sz) 3262 continue; 3263 3264 ph->needs_swap = true; 3265 } 3266 pr_debug("Pipe ABI%d perf.data file detected\n", i); 3267 return 0; 3268 } 3269 return -1; 3270 } 3271 3272 bool is_perf_magic(u64 magic) 3273 { 3274 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 3275 || magic == __perf_magic2 3276 || magic == __perf_magic2_sw) 3277 return true; 3278 3279 return false; 3280 } 3281 3282 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 3283 bool is_pipe, struct perf_header *ph) 3284 { 3285 int ret; 3286 3287 /* check for legacy format */ 3288 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 3289 if (ret == 0) { 3290 ph->version = PERF_HEADER_VERSION_1; 3291 pr_debug("legacy perf.data format\n"); 3292 if (is_pipe) 3293 return try_all_pipe_abis(hdr_sz, ph); 3294 3295 return try_all_file_abis(hdr_sz, ph); 3296 } 3297 /* 3298 * the new magic number serves two purposes: 3299 * - unique number to identify actual perf.data files 3300 * - encode endianness of file 3301 */ 3302 ph->version = PERF_HEADER_VERSION_2; 3303 3304 /* check magic number with one endianness */ 3305 if (magic == __perf_magic2) 3306 return 0; 3307 3308 /* check magic number with opposite endianness */ 3309 if (magic != __perf_magic2_sw) 3310 return -1; 3311 3312 ph->needs_swap = true; 3313 3314 return 0; 3315 } 3316 3317 int perf_file_header__read(struct perf_file_header *header, 3318 struct perf_header *ph, int fd) 3319 { 3320 ssize_t ret; 3321 3322 lseek(fd, 0, SEEK_SET); 3323 3324 ret = readn(fd, header, sizeof(*header)); 3325 if (ret <= 0) 3326 return -1; 3327 3328 if (check_magic_endian(header->magic, 3329 header->attr_size, false, ph) < 0) { 3330 pr_debug("magic/endian check failed\n"); 3331 return -1; 3332 } 3333 3334 if (ph->needs_swap) { 3335 mem_bswap_64(header, offsetof(struct perf_file_header, 3336 adds_features)); 3337 } 3338 3339 if (header->size != sizeof(*header)) { 3340 /* Support the previous format */ 3341 if (header->size == offsetof(typeof(*header), adds_features)) 3342 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3343 else 3344 return -1; 3345 } else if (ph->needs_swap) { 3346 /* 3347 * feature bitmap is declared as an array of unsigned longs -- 3348 * not good since its size can differ between the host that 3349 * generated the data file and the host analyzing the file. 3350 * 3351 * We need to handle endianness, but we don't know the size of 3352 * the unsigned long where the file was generated. Take a best 3353 * guess at determining it: try 64-bit swap first (ie., file 3354 * created on a 64-bit host), and check if the hostname feature 3355 * bit is set (this feature bit is forced on as of fbe96f2). 3356 * If the bit is not, undo the 64-bit swap and try a 32-bit 3357 * swap. If the hostname bit is still not set (e.g., older data 3358 * file), punt and fallback to the original behavior -- 3359 * clearing all feature bits and setting buildid. 3360 */ 3361 mem_bswap_64(&header->adds_features, 3362 BITS_TO_U64(HEADER_FEAT_BITS)); 3363 3364 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3365 /* unswap as u64 */ 3366 mem_bswap_64(&header->adds_features, 3367 BITS_TO_U64(HEADER_FEAT_BITS)); 3368 3369 /* unswap as u32 */ 3370 mem_bswap_32(&header->adds_features, 3371 BITS_TO_U32(HEADER_FEAT_BITS)); 3372 } 3373 3374 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3375 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3376 set_bit(HEADER_BUILD_ID, header->adds_features); 3377 } 3378 } 3379 3380 memcpy(&ph->adds_features, &header->adds_features, 3381 sizeof(ph->adds_features)); 3382 3383 ph->data_offset = header->data.offset; 3384 ph->data_size = header->data.size; 3385 ph->feat_offset = header->data.offset + header->data.size; 3386 return 0; 3387 } 3388 3389 static int perf_file_section__process(struct perf_file_section *section, 3390 struct perf_header *ph, 3391 int feat, int fd, void *data) 3392 { 3393 struct feat_fd fdd = { 3394 .fd = fd, 3395 .ph = ph, 3396 .size = section->size, 3397 .offset = section->offset, 3398 }; 3399 3400 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 3401 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 3402 "%d, continuing...\n", section->offset, feat); 3403 return 0; 3404 } 3405 3406 if (feat >= HEADER_LAST_FEATURE) { 3407 pr_debug("unknown feature %d, continuing...\n", feat); 3408 return 0; 3409 } 3410 3411 if (!feat_ops[feat].process) 3412 return 0; 3413 3414 return feat_ops[feat].process(&fdd, data); 3415 } 3416 3417 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 3418 struct perf_header *ph, int fd, 3419 bool repipe) 3420 { 3421 struct feat_fd ff = { 3422 .fd = STDOUT_FILENO, 3423 .ph = ph, 3424 }; 3425 ssize_t ret; 3426 3427 ret = readn(fd, header, sizeof(*header)); 3428 if (ret <= 0) 3429 return -1; 3430 3431 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 3432 pr_debug("endian/magic failed\n"); 3433 return -1; 3434 } 3435 3436 if (ph->needs_swap) 3437 header->size = bswap_64(header->size); 3438 3439 if (repipe && do_write(&ff, header, sizeof(*header)) < 0) 3440 return -1; 3441 3442 return 0; 3443 } 3444 3445 static int perf_header__read_pipe(struct perf_session *session) 3446 { 3447 struct perf_header *header = &session->header; 3448 struct perf_pipe_file_header f_header; 3449 3450 if (perf_file_header__read_pipe(&f_header, header, 3451 perf_data__fd(session->data), 3452 session->repipe) < 0) { 3453 pr_debug("incompatible file format\n"); 3454 return -EINVAL; 3455 } 3456 3457 return 0; 3458 } 3459 3460 static int read_attr(int fd, struct perf_header *ph, 3461 struct perf_file_attr *f_attr) 3462 { 3463 struct perf_event_attr *attr = &f_attr->attr; 3464 size_t sz, left; 3465 size_t our_sz = sizeof(f_attr->attr); 3466 ssize_t ret; 3467 3468 memset(f_attr, 0, sizeof(*f_attr)); 3469 3470 /* read minimal guaranteed structure */ 3471 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 3472 if (ret <= 0) { 3473 pr_debug("cannot read %d bytes of header attr\n", 3474 PERF_ATTR_SIZE_VER0); 3475 return -1; 3476 } 3477 3478 /* on file perf_event_attr size */ 3479 sz = attr->size; 3480 3481 if (ph->needs_swap) 3482 sz = bswap_32(sz); 3483 3484 if (sz == 0) { 3485 /* assume ABI0 */ 3486 sz = PERF_ATTR_SIZE_VER0; 3487 } else if (sz > our_sz) { 3488 pr_debug("file uses a more recent and unsupported ABI" 3489 " (%zu bytes extra)\n", sz - our_sz); 3490 return -1; 3491 } 3492 /* what we have not yet read and that we know about */ 3493 left = sz - PERF_ATTR_SIZE_VER0; 3494 if (left) { 3495 void *ptr = attr; 3496 ptr += PERF_ATTR_SIZE_VER0; 3497 3498 ret = readn(fd, ptr, left); 3499 } 3500 /* read perf_file_section, ids are read in caller */ 3501 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 3502 3503 return ret <= 0 ? -1 : 0; 3504 } 3505 3506 static int perf_evsel__prepare_tracepoint_event(struct evsel *evsel, 3507 struct tep_handle *pevent) 3508 { 3509 struct tep_event *event; 3510 char bf[128]; 3511 3512 /* already prepared */ 3513 if (evsel->tp_format) 3514 return 0; 3515 3516 if (pevent == NULL) { 3517 pr_debug("broken or missing trace data\n"); 3518 return -1; 3519 } 3520 3521 event = tep_find_event(pevent, evsel->core.attr.config); 3522 if (event == NULL) { 3523 pr_debug("cannot find event format for %d\n", (int)evsel->core.attr.config); 3524 return -1; 3525 } 3526 3527 if (!evsel->name) { 3528 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 3529 evsel->name = strdup(bf); 3530 if (evsel->name == NULL) 3531 return -1; 3532 } 3533 3534 evsel->tp_format = event; 3535 return 0; 3536 } 3537 3538 static int perf_evlist__prepare_tracepoint_events(struct evlist *evlist, 3539 struct tep_handle *pevent) 3540 { 3541 struct evsel *pos; 3542 3543 evlist__for_each_entry(evlist, pos) { 3544 if (pos->core.attr.type == PERF_TYPE_TRACEPOINT && 3545 perf_evsel__prepare_tracepoint_event(pos, pevent)) 3546 return -1; 3547 } 3548 3549 return 0; 3550 } 3551 3552 int perf_session__read_header(struct perf_session *session) 3553 { 3554 struct perf_data *data = session->data; 3555 struct perf_header *header = &session->header; 3556 struct perf_file_header f_header; 3557 struct perf_file_attr f_attr; 3558 u64 f_id; 3559 int nr_attrs, nr_ids, i, j; 3560 int fd = perf_data__fd(data); 3561 3562 session->evlist = evlist__new(); 3563 if (session->evlist == NULL) 3564 return -ENOMEM; 3565 3566 session->evlist->env = &header->env; 3567 session->machines.host.env = &header->env; 3568 if (perf_data__is_pipe(data)) 3569 return perf_header__read_pipe(session); 3570 3571 if (perf_file_header__read(&f_header, header, fd) < 0) 3572 return -EINVAL; 3573 3574 /* 3575 * Sanity check that perf.data was written cleanly; data size is 3576 * initialized to 0 and updated only if the on_exit function is run. 3577 * If data size is still 0 then the file contains only partial 3578 * information. Just warn user and process it as much as it can. 3579 */ 3580 if (f_header.data.size == 0) { 3581 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 3582 "Was the 'perf record' command properly terminated?\n", 3583 data->file.path); 3584 } 3585 3586 if (f_header.attr_size == 0) { 3587 pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n" 3588 "Was the 'perf record' command properly terminated?\n", 3589 data->file.path); 3590 return -EINVAL; 3591 } 3592 3593 nr_attrs = f_header.attrs.size / f_header.attr_size; 3594 lseek(fd, f_header.attrs.offset, SEEK_SET); 3595 3596 for (i = 0; i < nr_attrs; i++) { 3597 struct evsel *evsel; 3598 off_t tmp; 3599 3600 if (read_attr(fd, header, &f_attr) < 0) 3601 goto out_errno; 3602 3603 if (header->needs_swap) { 3604 f_attr.ids.size = bswap_64(f_attr.ids.size); 3605 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 3606 perf_event__attr_swap(&f_attr.attr); 3607 } 3608 3609 tmp = lseek(fd, 0, SEEK_CUR); 3610 evsel = evsel__new(&f_attr.attr); 3611 3612 if (evsel == NULL) 3613 goto out_delete_evlist; 3614 3615 evsel->needs_swap = header->needs_swap; 3616 /* 3617 * Do it before so that if perf_evsel__alloc_id fails, this 3618 * entry gets purged too at evlist__delete(). 3619 */ 3620 evlist__add(session->evlist, evsel); 3621 3622 nr_ids = f_attr.ids.size / sizeof(u64); 3623 /* 3624 * We don't have the cpu and thread maps on the header, so 3625 * for allocating the perf_sample_id table we fake 1 cpu and 3626 * hattr->ids threads. 3627 */ 3628 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 3629 goto out_delete_evlist; 3630 3631 lseek(fd, f_attr.ids.offset, SEEK_SET); 3632 3633 for (j = 0; j < nr_ids; j++) { 3634 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 3635 goto out_errno; 3636 3637 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 3638 } 3639 3640 lseek(fd, tmp, SEEK_SET); 3641 } 3642 3643 perf_header__process_sections(header, fd, &session->tevent, 3644 perf_file_section__process); 3645 3646 if (perf_evlist__prepare_tracepoint_events(session->evlist, 3647 session->tevent.pevent)) 3648 goto out_delete_evlist; 3649 3650 return 0; 3651 out_errno: 3652 return -errno; 3653 3654 out_delete_evlist: 3655 evlist__delete(session->evlist); 3656 session->evlist = NULL; 3657 return -ENOMEM; 3658 } 3659 3660 int perf_event__synthesize_attr(struct perf_tool *tool, 3661 struct perf_event_attr *attr, u32 ids, u64 *id, 3662 perf_event__handler_t process) 3663 { 3664 union perf_event *ev; 3665 size_t size; 3666 int err; 3667 3668 size = sizeof(struct perf_event_attr); 3669 size = PERF_ALIGN(size, sizeof(u64)); 3670 size += sizeof(struct perf_event_header); 3671 size += ids * sizeof(u64); 3672 3673 ev = zalloc(size); 3674 3675 if (ev == NULL) 3676 return -ENOMEM; 3677 3678 ev->attr.attr = *attr; 3679 memcpy(ev->attr.id, id, ids * sizeof(u64)); 3680 3681 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 3682 ev->attr.header.size = (u16)size; 3683 3684 if (ev->attr.header.size == size) 3685 err = process(tool, ev, NULL, NULL); 3686 else 3687 err = -E2BIG; 3688 3689 free(ev); 3690 3691 return err; 3692 } 3693 3694 int perf_event__synthesize_features(struct perf_tool *tool, 3695 struct perf_session *session, 3696 struct evlist *evlist, 3697 perf_event__handler_t process) 3698 { 3699 struct perf_header *header = &session->header; 3700 struct feat_fd ff; 3701 struct perf_record_header_feature *fe; 3702 size_t sz, sz_hdr; 3703 int feat, ret; 3704 3705 sz_hdr = sizeof(fe->header); 3706 sz = sizeof(union perf_event); 3707 /* get a nice alignment */ 3708 sz = PERF_ALIGN(sz, page_size); 3709 3710 memset(&ff, 0, sizeof(ff)); 3711 3712 ff.buf = malloc(sz); 3713 if (!ff.buf) 3714 return -ENOMEM; 3715 3716 ff.size = sz - sz_hdr; 3717 ff.ph = &session->header; 3718 3719 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3720 if (!feat_ops[feat].synthesize) { 3721 pr_debug("No record header feature for header :%d\n", feat); 3722 continue; 3723 } 3724 3725 ff.offset = sizeof(*fe); 3726 3727 ret = feat_ops[feat].write(&ff, evlist); 3728 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 3729 pr_debug("Error writing feature\n"); 3730 continue; 3731 } 3732 /* ff.buf may have changed due to realloc in do_write() */ 3733 fe = ff.buf; 3734 memset(fe, 0, sizeof(*fe)); 3735 3736 fe->feat_id = feat; 3737 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3738 fe->header.size = ff.offset; 3739 3740 ret = process(tool, ff.buf, NULL, NULL); 3741 if (ret) { 3742 free(ff.buf); 3743 return ret; 3744 } 3745 } 3746 3747 /* Send HEADER_LAST_FEATURE mark. */ 3748 fe = ff.buf; 3749 fe->feat_id = HEADER_LAST_FEATURE; 3750 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3751 fe->header.size = sizeof(*fe); 3752 3753 ret = process(tool, ff.buf, NULL, NULL); 3754 3755 free(ff.buf); 3756 return ret; 3757 } 3758 3759 int perf_event__process_feature(struct perf_session *session, 3760 union perf_event *event) 3761 { 3762 struct perf_tool *tool = session->tool; 3763 struct feat_fd ff = { .fd = 0 }; 3764 struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event; 3765 int type = fe->header.type; 3766 u64 feat = fe->feat_id; 3767 3768 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { 3769 pr_warning("invalid record type %d in pipe-mode\n", type); 3770 return 0; 3771 } 3772 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) { 3773 pr_warning("invalid record type %d in pipe-mode\n", type); 3774 return -1; 3775 } 3776 3777 if (!feat_ops[feat].process) 3778 return 0; 3779 3780 ff.buf = (void *)fe->data; 3781 ff.size = event->header.size - sizeof(*fe); 3782 ff.ph = &session->header; 3783 3784 if (feat_ops[feat].process(&ff, NULL)) 3785 return -1; 3786 3787 if (!feat_ops[feat].print || !tool->show_feat_hdr) 3788 return 0; 3789 3790 if (!feat_ops[feat].full_only || 3791 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { 3792 feat_ops[feat].print(&ff, stdout); 3793 } else { 3794 fprintf(stdout, "# %s info available, use -I to display\n", 3795 feat_ops[feat].name); 3796 } 3797 3798 return 0; 3799 } 3800 3801 static struct perf_record_event_update * 3802 event_update_event__new(size_t size, u64 type, u64 id) 3803 { 3804 struct perf_record_event_update *ev; 3805 3806 size += sizeof(*ev); 3807 size = PERF_ALIGN(size, sizeof(u64)); 3808 3809 ev = zalloc(size); 3810 if (ev) { 3811 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3812 ev->header.size = (u16)size; 3813 ev->type = type; 3814 ev->id = id; 3815 } 3816 return ev; 3817 } 3818 3819 int 3820 perf_event__synthesize_event_update_unit(struct perf_tool *tool, 3821 struct evsel *evsel, 3822 perf_event__handler_t process) 3823 { 3824 struct perf_record_event_update *ev; 3825 size_t size = strlen(evsel->unit); 3826 int err; 3827 3828 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); 3829 if (ev == NULL) 3830 return -ENOMEM; 3831 3832 strlcpy(ev->data, evsel->unit, size + 1); 3833 err = process(tool, (union perf_event *)ev, NULL, NULL); 3834 free(ev); 3835 return err; 3836 } 3837 3838 int 3839 perf_event__synthesize_event_update_scale(struct perf_tool *tool, 3840 struct evsel *evsel, 3841 perf_event__handler_t process) 3842 { 3843 struct perf_record_event_update *ev; 3844 struct perf_record_event_update_scale *ev_data; 3845 int err; 3846 3847 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); 3848 if (ev == NULL) 3849 return -ENOMEM; 3850 3851 ev_data = (struct perf_record_event_update_scale *)ev->data; 3852 ev_data->scale = evsel->scale; 3853 err = process(tool, (union perf_event*) ev, NULL, NULL); 3854 free(ev); 3855 return err; 3856 } 3857 3858 int 3859 perf_event__synthesize_event_update_name(struct perf_tool *tool, 3860 struct evsel *evsel, 3861 perf_event__handler_t process) 3862 { 3863 struct perf_record_event_update *ev; 3864 size_t len = strlen(evsel->name); 3865 int err; 3866 3867 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); 3868 if (ev == NULL) 3869 return -ENOMEM; 3870 3871 strlcpy(ev->data, evsel->name, len + 1); 3872 err = process(tool, (union perf_event*) ev, NULL, NULL); 3873 free(ev); 3874 return err; 3875 } 3876 3877 int 3878 perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 3879 struct evsel *evsel, 3880 perf_event__handler_t process) 3881 { 3882 size_t size = sizeof(struct perf_record_event_update); 3883 struct perf_record_event_update *ev; 3884 int max, err; 3885 u16 type; 3886 3887 if (!evsel->core.own_cpus) 3888 return 0; 3889 3890 ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max); 3891 if (!ev) 3892 return -ENOMEM; 3893 3894 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3895 ev->header.size = (u16)size; 3896 ev->type = PERF_EVENT_UPDATE__CPUS; 3897 ev->id = evsel->id[0]; 3898 3899 cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data, 3900 evsel->core.own_cpus, 3901 type, max); 3902 3903 err = process(tool, (union perf_event*) ev, NULL, NULL); 3904 free(ev); 3905 return err; 3906 } 3907 3908 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3909 { 3910 struct perf_record_event_update *ev = &event->event_update; 3911 struct perf_record_event_update_scale *ev_scale; 3912 struct perf_record_event_update_cpus *ev_cpus; 3913 struct perf_cpu_map *map; 3914 size_t ret; 3915 3916 ret = fprintf(fp, "\n... id: %" PRI_lu64 "\n", ev->id); 3917 3918 switch (ev->type) { 3919 case PERF_EVENT_UPDATE__SCALE: 3920 ev_scale = (struct perf_record_event_update_scale *)ev->data; 3921 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); 3922 break; 3923 case PERF_EVENT_UPDATE__UNIT: 3924 ret += fprintf(fp, "... unit: %s\n", ev->data); 3925 break; 3926 case PERF_EVENT_UPDATE__NAME: 3927 ret += fprintf(fp, "... name: %s\n", ev->data); 3928 break; 3929 case PERF_EVENT_UPDATE__CPUS: 3930 ev_cpus = (struct perf_record_event_update_cpus *)ev->data; 3931 ret += fprintf(fp, "... "); 3932 3933 map = cpu_map__new_data(&ev_cpus->cpus); 3934 if (map) 3935 ret += cpu_map__fprintf(map, fp); 3936 else 3937 ret += fprintf(fp, "failed to get cpus\n"); 3938 break; 3939 default: 3940 ret += fprintf(fp, "... unknown type\n"); 3941 break; 3942 } 3943 3944 return ret; 3945 } 3946 3947 int perf_event__synthesize_attrs(struct perf_tool *tool, 3948 struct evlist *evlist, 3949 perf_event__handler_t process) 3950 { 3951 struct evsel *evsel; 3952 int err = 0; 3953 3954 evlist__for_each_entry(evlist, evsel) { 3955 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->ids, 3956 evsel->id, process); 3957 if (err) { 3958 pr_debug("failed to create perf header attribute\n"); 3959 return err; 3960 } 3961 } 3962 3963 return err; 3964 } 3965 3966 static bool has_unit(struct evsel *counter) 3967 { 3968 return counter->unit && *counter->unit; 3969 } 3970 3971 static bool has_scale(struct evsel *counter) 3972 { 3973 return counter->scale != 1; 3974 } 3975 3976 int perf_event__synthesize_extra_attr(struct perf_tool *tool, 3977 struct evlist *evsel_list, 3978 perf_event__handler_t process, 3979 bool is_pipe) 3980 { 3981 struct evsel *counter; 3982 int err; 3983 3984 /* 3985 * Synthesize other events stuff not carried within 3986 * attr event - unit, scale, name 3987 */ 3988 evlist__for_each_entry(evsel_list, counter) { 3989 if (!counter->supported) 3990 continue; 3991 3992 /* 3993 * Synthesize unit and scale only if it's defined. 3994 */ 3995 if (has_unit(counter)) { 3996 err = perf_event__synthesize_event_update_unit(tool, counter, process); 3997 if (err < 0) { 3998 pr_err("Couldn't synthesize evsel unit.\n"); 3999 return err; 4000 } 4001 } 4002 4003 if (has_scale(counter)) { 4004 err = perf_event__synthesize_event_update_scale(tool, counter, process); 4005 if (err < 0) { 4006 pr_err("Couldn't synthesize evsel counter.\n"); 4007 return err; 4008 } 4009 } 4010 4011 if (counter->core.own_cpus) { 4012 err = perf_event__synthesize_event_update_cpus(tool, counter, process); 4013 if (err < 0) { 4014 pr_err("Couldn't synthesize evsel cpus.\n"); 4015 return err; 4016 } 4017 } 4018 4019 /* 4020 * Name is needed only for pipe output, 4021 * perf.data carries event names. 4022 */ 4023 if (is_pipe) { 4024 err = perf_event__synthesize_event_update_name(tool, counter, process); 4025 if (err < 0) { 4026 pr_err("Couldn't synthesize evsel name.\n"); 4027 return err; 4028 } 4029 } 4030 } 4031 return 0; 4032 } 4033 4034 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 4035 union perf_event *event, 4036 struct evlist **pevlist) 4037 { 4038 u32 i, ids, n_ids; 4039 struct evsel *evsel; 4040 struct evlist *evlist = *pevlist; 4041 4042 if (evlist == NULL) { 4043 *pevlist = evlist = evlist__new(); 4044 if (evlist == NULL) 4045 return -ENOMEM; 4046 } 4047 4048 evsel = evsel__new(&event->attr.attr); 4049 if (evsel == NULL) 4050 return -ENOMEM; 4051 4052 evlist__add(evlist, evsel); 4053 4054 ids = event->header.size; 4055 ids -= (void *)&event->attr.id - (void *)event; 4056 n_ids = ids / sizeof(u64); 4057 /* 4058 * We don't have the cpu and thread maps on the header, so 4059 * for allocating the perf_sample_id table we fake 1 cpu and 4060 * hattr->ids threads. 4061 */ 4062 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 4063 return -ENOMEM; 4064 4065 for (i = 0; i < n_ids; i++) { 4066 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 4067 } 4068 4069 return 0; 4070 } 4071 4072 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, 4073 union perf_event *event, 4074 struct evlist **pevlist) 4075 { 4076 struct perf_record_event_update *ev = &event->event_update; 4077 struct perf_record_event_update_scale *ev_scale; 4078 struct perf_record_event_update_cpus *ev_cpus; 4079 struct evlist *evlist; 4080 struct evsel *evsel; 4081 struct perf_cpu_map *map; 4082 4083 if (!pevlist || *pevlist == NULL) 4084 return -EINVAL; 4085 4086 evlist = *pevlist; 4087 4088 evsel = perf_evlist__id2evsel(evlist, ev->id); 4089 if (evsel == NULL) 4090 return -EINVAL; 4091 4092 switch (ev->type) { 4093 case PERF_EVENT_UPDATE__UNIT: 4094 evsel->unit = strdup(ev->data); 4095 break; 4096 case PERF_EVENT_UPDATE__NAME: 4097 evsel->name = strdup(ev->data); 4098 break; 4099 case PERF_EVENT_UPDATE__SCALE: 4100 ev_scale = (struct perf_record_event_update_scale *)ev->data; 4101 evsel->scale = ev_scale->scale; 4102 break; 4103 case PERF_EVENT_UPDATE__CPUS: 4104 ev_cpus = (struct perf_record_event_update_cpus *)ev->data; 4105 4106 map = cpu_map__new_data(&ev_cpus->cpus); 4107 if (map) 4108 evsel->core.own_cpus = map; 4109 else 4110 pr_err("failed to get event_update cpus\n"); 4111 default: 4112 break; 4113 } 4114 4115 return 0; 4116 } 4117 4118 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 4119 struct evlist *evlist, 4120 perf_event__handler_t process) 4121 { 4122 union perf_event ev; 4123 struct tracing_data *tdata; 4124 ssize_t size = 0, aligned_size = 0, padding; 4125 struct feat_fd ff; 4126 int err __maybe_unused = 0; 4127 4128 /* 4129 * We are going to store the size of the data followed 4130 * by the data contents. Since the fd descriptor is a pipe, 4131 * we cannot seek back to store the size of the data once 4132 * we know it. Instead we: 4133 * 4134 * - write the tracing data to the temp file 4135 * - get/write the data size to pipe 4136 * - write the tracing data from the temp file 4137 * to the pipe 4138 */ 4139 tdata = tracing_data_get(&evlist->core.entries, fd, true); 4140 if (!tdata) 4141 return -1; 4142 4143 memset(&ev, 0, sizeof(ev)); 4144 4145 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 4146 size = tdata->size; 4147 aligned_size = PERF_ALIGN(size, sizeof(u64)); 4148 padding = aligned_size - size; 4149 ev.tracing_data.header.size = sizeof(ev.tracing_data); 4150 ev.tracing_data.size = aligned_size; 4151 4152 process(tool, &ev, NULL, NULL); 4153 4154 /* 4155 * The put function will copy all the tracing data 4156 * stored in temp file to the pipe. 4157 */ 4158 tracing_data_put(tdata); 4159 4160 ff = (struct feat_fd){ .fd = fd }; 4161 if (write_padded(&ff, NULL, 0, padding)) 4162 return -1; 4163 4164 return aligned_size; 4165 } 4166 4167 int perf_event__process_tracing_data(struct perf_session *session, 4168 union perf_event *event) 4169 { 4170 ssize_t size_read, padding, size = event->tracing_data.size; 4171 int fd = perf_data__fd(session->data); 4172 off_t offset = lseek(fd, 0, SEEK_CUR); 4173 char buf[BUFSIZ]; 4174 4175 /* setup for reading amidst mmap */ 4176 lseek(fd, offset + sizeof(struct perf_record_header_tracing_data), 4177 SEEK_SET); 4178 4179 size_read = trace_report(fd, &session->tevent, 4180 session->repipe); 4181 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 4182 4183 if (readn(fd, buf, padding) < 0) { 4184 pr_err("%s: reading input file", __func__); 4185 return -1; 4186 } 4187 if (session->repipe) { 4188 int retw = write(STDOUT_FILENO, buf, padding); 4189 if (retw <= 0 || retw != padding) { 4190 pr_err("%s: repiping tracing data padding", __func__); 4191 return -1; 4192 } 4193 } 4194 4195 if (size_read + padding != size) { 4196 pr_err("%s: tracing data size mismatch", __func__); 4197 return -1; 4198 } 4199 4200 perf_evlist__prepare_tracepoint_events(session->evlist, 4201 session->tevent.pevent); 4202 4203 return size_read + padding; 4204 } 4205 4206 int perf_event__synthesize_build_id(struct perf_tool *tool, 4207 struct dso *pos, u16 misc, 4208 perf_event__handler_t process, 4209 struct machine *machine) 4210 { 4211 union perf_event ev; 4212 size_t len; 4213 int err = 0; 4214 4215 if (!pos->hit) 4216 return err; 4217 4218 memset(&ev, 0, sizeof(ev)); 4219 4220 len = pos->long_name_len + 1; 4221 len = PERF_ALIGN(len, NAME_ALIGN); 4222 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 4223 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 4224 ev.build_id.header.misc = misc; 4225 ev.build_id.pid = machine->pid; 4226 ev.build_id.header.size = sizeof(ev.build_id) + len; 4227 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 4228 4229 err = process(tool, &ev, NULL, machine); 4230 4231 return err; 4232 } 4233 4234 int perf_event__process_build_id(struct perf_session *session, 4235 union perf_event *event) 4236 { 4237 __event_process_build_id(&event->build_id, 4238 event->build_id.filename, 4239 session); 4240 return 0; 4241 } 4242