1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include "util.h" 5 #include "string2.h" 6 #include <sys/param.h> 7 #include <sys/types.h> 8 #include <byteswap.h> 9 #include <unistd.h> 10 #include <stdio.h> 11 #include <stdlib.h> 12 #include <linux/compiler.h> 13 #include <linux/list.h> 14 #include <linux/kernel.h> 15 #include <linux/bitops.h> 16 #include <linux/stringify.h> 17 #include <sys/stat.h> 18 #include <sys/utsname.h> 19 #include <linux/time64.h> 20 #include <dirent.h> 21 22 #include "evlist.h" 23 #include "evsel.h" 24 #include "header.h" 25 #include "memswap.h" 26 #include "../perf.h" 27 #include "trace-event.h" 28 #include "session.h" 29 #include "symbol.h" 30 #include "debug.h" 31 #include "cpumap.h" 32 #include "pmu.h" 33 #include "vdso.h" 34 #include "strbuf.h" 35 #include "build-id.h" 36 #include "data.h" 37 #include <api/fs/fs.h> 38 #include "asm/bug.h" 39 #include "tool.h" 40 #include "time-utils.h" 41 #include "units.h" 42 43 #include "sane_ctype.h" 44 45 /* 46 * magic2 = "PERFILE2" 47 * must be a numerical value to let the endianness 48 * determine the memory layout. That way we are able 49 * to detect endianness when reading the perf.data file 50 * back. 51 * 52 * we check for legacy (PERFFILE) format. 53 */ 54 static const char *__perf_magic1 = "PERFFILE"; 55 static const u64 __perf_magic2 = 0x32454c4946524550ULL; 56 static const u64 __perf_magic2_sw = 0x50455246494c4532ULL; 57 58 #define PERF_MAGIC __perf_magic2 59 60 const char perf_version_string[] = PERF_VERSION; 61 62 struct perf_file_attr { 63 struct perf_event_attr attr; 64 struct perf_file_section ids; 65 }; 66 67 struct feat_fd { 68 struct perf_header *ph; 69 int fd; 70 void *buf; /* Either buf != NULL or fd >= 0 */ 71 ssize_t offset; 72 size_t size; 73 struct perf_evsel *events; 74 }; 75 76 void perf_header__set_feat(struct perf_header *header, int feat) 77 { 78 set_bit(feat, header->adds_features); 79 } 80 81 void perf_header__clear_feat(struct perf_header *header, int feat) 82 { 83 clear_bit(feat, header->adds_features); 84 } 85 86 bool perf_header__has_feat(const struct perf_header *header, int feat) 87 { 88 return test_bit(feat, header->adds_features); 89 } 90 91 static int __do_write_fd(struct feat_fd *ff, const void *buf, size_t size) 92 { 93 ssize_t ret = writen(ff->fd, buf, size); 94 95 if (ret != (ssize_t)size) 96 return ret < 0 ? (int)ret : -1; 97 return 0; 98 } 99 100 static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size) 101 { 102 /* struct perf_event_header::size is u16 */ 103 const size_t max_size = 0xffff - sizeof(struct perf_event_header); 104 size_t new_size = ff->size; 105 void *addr; 106 107 if (size + ff->offset > max_size) 108 return -E2BIG; 109 110 while (size > (new_size - ff->offset)) 111 new_size <<= 1; 112 new_size = min(max_size, new_size); 113 114 if (ff->size < new_size) { 115 addr = realloc(ff->buf, new_size); 116 if (!addr) 117 return -ENOMEM; 118 ff->buf = addr; 119 ff->size = new_size; 120 } 121 122 memcpy(ff->buf + ff->offset, buf, size); 123 ff->offset += size; 124 125 return 0; 126 } 127 128 /* Return: 0 if succeded, -ERR if failed. */ 129 int do_write(struct feat_fd *ff, const void *buf, size_t size) 130 { 131 if (!ff->buf) 132 return __do_write_fd(ff, buf, size); 133 return __do_write_buf(ff, buf, size); 134 } 135 136 /* Return: 0 if succeded, -ERR if failed. */ 137 static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size) 138 { 139 u64 *p = (u64 *) set; 140 int i, ret; 141 142 ret = do_write(ff, &size, sizeof(size)); 143 if (ret < 0) 144 return ret; 145 146 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 147 ret = do_write(ff, p + i, sizeof(*p)); 148 if (ret < 0) 149 return ret; 150 } 151 152 return 0; 153 } 154 155 /* Return: 0 if succeded, -ERR if failed. */ 156 int write_padded(struct feat_fd *ff, const void *bf, 157 size_t count, size_t count_aligned) 158 { 159 static const char zero_buf[NAME_ALIGN]; 160 int err = do_write(ff, bf, count); 161 162 if (!err) 163 err = do_write(ff, zero_buf, count_aligned - count); 164 165 return err; 166 } 167 168 #define string_size(str) \ 169 (PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32)) 170 171 /* Return: 0 if succeded, -ERR if failed. */ 172 static int do_write_string(struct feat_fd *ff, const char *str) 173 { 174 u32 len, olen; 175 int ret; 176 177 olen = strlen(str) + 1; 178 len = PERF_ALIGN(olen, NAME_ALIGN); 179 180 /* write len, incl. \0 */ 181 ret = do_write(ff, &len, sizeof(len)); 182 if (ret < 0) 183 return ret; 184 185 return write_padded(ff, str, olen, len); 186 } 187 188 static int __do_read_fd(struct feat_fd *ff, void *addr, ssize_t size) 189 { 190 ssize_t ret = readn(ff->fd, addr, size); 191 192 if (ret != size) 193 return ret < 0 ? (int)ret : -1; 194 return 0; 195 } 196 197 static int __do_read_buf(struct feat_fd *ff, void *addr, ssize_t size) 198 { 199 if (size > (ssize_t)ff->size - ff->offset) 200 return -1; 201 202 memcpy(addr, ff->buf + ff->offset, size); 203 ff->offset += size; 204 205 return 0; 206 207 } 208 209 static int __do_read(struct feat_fd *ff, void *addr, ssize_t size) 210 { 211 if (!ff->buf) 212 return __do_read_fd(ff, addr, size); 213 return __do_read_buf(ff, addr, size); 214 } 215 216 static int do_read_u32(struct feat_fd *ff, u32 *addr) 217 { 218 int ret; 219 220 ret = __do_read(ff, addr, sizeof(*addr)); 221 if (ret) 222 return ret; 223 224 if (ff->ph->needs_swap) 225 *addr = bswap_32(*addr); 226 return 0; 227 } 228 229 static int do_read_u64(struct feat_fd *ff, u64 *addr) 230 { 231 int ret; 232 233 ret = __do_read(ff, addr, sizeof(*addr)); 234 if (ret) 235 return ret; 236 237 if (ff->ph->needs_swap) 238 *addr = bswap_64(*addr); 239 return 0; 240 } 241 242 static char *do_read_string(struct feat_fd *ff) 243 { 244 u32 len; 245 char *buf; 246 247 if (do_read_u32(ff, &len)) 248 return NULL; 249 250 buf = malloc(len); 251 if (!buf) 252 return NULL; 253 254 if (!__do_read(ff, buf, len)) { 255 /* 256 * strings are padded by zeroes 257 * thus the actual strlen of buf 258 * may be less than len 259 */ 260 return buf; 261 } 262 263 free(buf); 264 return NULL; 265 } 266 267 /* Return: 0 if succeded, -ERR if failed. */ 268 static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) 269 { 270 unsigned long *set; 271 u64 size, *p; 272 int i, ret; 273 274 ret = do_read_u64(ff, &size); 275 if (ret) 276 return ret; 277 278 set = bitmap_alloc(size); 279 if (!set) 280 return -ENOMEM; 281 282 p = (u64 *) set; 283 284 for (i = 0; (u64) i < BITS_TO_U64(size); i++) { 285 ret = do_read_u64(ff, p + i); 286 if (ret < 0) { 287 free(set); 288 return ret; 289 } 290 } 291 292 *pset = set; 293 *psize = size; 294 return 0; 295 } 296 297 static int write_tracing_data(struct feat_fd *ff, 298 struct perf_evlist *evlist) 299 { 300 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 301 return -1; 302 303 return read_tracing_data(ff->fd, &evlist->entries); 304 } 305 306 static int write_build_id(struct feat_fd *ff, 307 struct perf_evlist *evlist __maybe_unused) 308 { 309 struct perf_session *session; 310 int err; 311 312 session = container_of(ff->ph, struct perf_session, header); 313 314 if (!perf_session__read_build_ids(session, true)) 315 return -1; 316 317 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 318 return -1; 319 320 err = perf_session__write_buildid_table(session, ff); 321 if (err < 0) { 322 pr_debug("failed to write buildid table\n"); 323 return err; 324 } 325 perf_session__cache_build_ids(session); 326 327 return 0; 328 } 329 330 static int write_hostname(struct feat_fd *ff, 331 struct perf_evlist *evlist __maybe_unused) 332 { 333 struct utsname uts; 334 int ret; 335 336 ret = uname(&uts); 337 if (ret < 0) 338 return -1; 339 340 return do_write_string(ff, uts.nodename); 341 } 342 343 static int write_osrelease(struct feat_fd *ff, 344 struct perf_evlist *evlist __maybe_unused) 345 { 346 struct utsname uts; 347 int ret; 348 349 ret = uname(&uts); 350 if (ret < 0) 351 return -1; 352 353 return do_write_string(ff, uts.release); 354 } 355 356 static int write_arch(struct feat_fd *ff, 357 struct perf_evlist *evlist __maybe_unused) 358 { 359 struct utsname uts; 360 int ret; 361 362 ret = uname(&uts); 363 if (ret < 0) 364 return -1; 365 366 return do_write_string(ff, uts.machine); 367 } 368 369 static int write_version(struct feat_fd *ff, 370 struct perf_evlist *evlist __maybe_unused) 371 { 372 return do_write_string(ff, perf_version_string); 373 } 374 375 static int __write_cpudesc(struct feat_fd *ff, const char *cpuinfo_proc) 376 { 377 FILE *file; 378 char *buf = NULL; 379 char *s, *p; 380 const char *search = cpuinfo_proc; 381 size_t len = 0; 382 int ret = -1; 383 384 if (!search) 385 return -1; 386 387 file = fopen("/proc/cpuinfo", "r"); 388 if (!file) 389 return -1; 390 391 while (getline(&buf, &len, file) > 0) { 392 ret = strncmp(buf, search, strlen(search)); 393 if (!ret) 394 break; 395 } 396 397 if (ret) { 398 ret = -1; 399 goto done; 400 } 401 402 s = buf; 403 404 p = strchr(buf, ':'); 405 if (p && *(p+1) == ' ' && *(p+2)) 406 s = p + 2; 407 p = strchr(s, '\n'); 408 if (p) 409 *p = '\0'; 410 411 /* squash extra space characters (branding string) */ 412 p = s; 413 while (*p) { 414 if (isspace(*p)) { 415 char *r = p + 1; 416 char *q = r; 417 *p = ' '; 418 while (*q && isspace(*q)) 419 q++; 420 if (q != (p+1)) 421 while ((*r++ = *q++)); 422 } 423 p++; 424 } 425 ret = do_write_string(ff, s); 426 done: 427 free(buf); 428 fclose(file); 429 return ret; 430 } 431 432 static int write_cpudesc(struct feat_fd *ff, 433 struct perf_evlist *evlist __maybe_unused) 434 { 435 const char *cpuinfo_procs[] = CPUINFO_PROC; 436 unsigned int i; 437 438 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) { 439 int ret; 440 ret = __write_cpudesc(ff, cpuinfo_procs[i]); 441 if (ret >= 0) 442 return ret; 443 } 444 return -1; 445 } 446 447 448 static int write_nrcpus(struct feat_fd *ff, 449 struct perf_evlist *evlist __maybe_unused) 450 { 451 long nr; 452 u32 nrc, nra; 453 int ret; 454 455 nrc = cpu__max_present_cpu(); 456 457 nr = sysconf(_SC_NPROCESSORS_ONLN); 458 if (nr < 0) 459 return -1; 460 461 nra = (u32)(nr & UINT_MAX); 462 463 ret = do_write(ff, &nrc, sizeof(nrc)); 464 if (ret < 0) 465 return ret; 466 467 return do_write(ff, &nra, sizeof(nra)); 468 } 469 470 static int write_event_desc(struct feat_fd *ff, 471 struct perf_evlist *evlist) 472 { 473 struct perf_evsel *evsel; 474 u32 nre, nri, sz; 475 int ret; 476 477 nre = evlist->nr_entries; 478 479 /* 480 * write number of events 481 */ 482 ret = do_write(ff, &nre, sizeof(nre)); 483 if (ret < 0) 484 return ret; 485 486 /* 487 * size of perf_event_attr struct 488 */ 489 sz = (u32)sizeof(evsel->attr); 490 ret = do_write(ff, &sz, sizeof(sz)); 491 if (ret < 0) 492 return ret; 493 494 evlist__for_each_entry(evlist, evsel) { 495 ret = do_write(ff, &evsel->attr, sz); 496 if (ret < 0) 497 return ret; 498 /* 499 * write number of unique id per event 500 * there is one id per instance of an event 501 * 502 * copy into an nri to be independent of the 503 * type of ids, 504 */ 505 nri = evsel->ids; 506 ret = do_write(ff, &nri, sizeof(nri)); 507 if (ret < 0) 508 return ret; 509 510 /* 511 * write event string as passed on cmdline 512 */ 513 ret = do_write_string(ff, perf_evsel__name(evsel)); 514 if (ret < 0) 515 return ret; 516 /* 517 * write unique ids for this event 518 */ 519 ret = do_write(ff, evsel->id, evsel->ids * sizeof(u64)); 520 if (ret < 0) 521 return ret; 522 } 523 return 0; 524 } 525 526 static int write_cmdline(struct feat_fd *ff, 527 struct perf_evlist *evlist __maybe_unused) 528 { 529 char buf[MAXPATHLEN]; 530 u32 n; 531 int i, ret; 532 533 /* actual path to perf binary */ 534 ret = readlink("/proc/self/exe", buf, sizeof(buf) - 1); 535 if (ret <= 0) 536 return -1; 537 538 /* readlink() does not add null termination */ 539 buf[ret] = '\0'; 540 541 /* account for binary path */ 542 n = perf_env.nr_cmdline + 1; 543 544 ret = do_write(ff, &n, sizeof(n)); 545 if (ret < 0) 546 return ret; 547 548 ret = do_write_string(ff, buf); 549 if (ret < 0) 550 return ret; 551 552 for (i = 0 ; i < perf_env.nr_cmdline; i++) { 553 ret = do_write_string(ff, perf_env.cmdline_argv[i]); 554 if (ret < 0) 555 return ret; 556 } 557 return 0; 558 } 559 560 #define CORE_SIB_FMT \ 561 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" 562 #define THRD_SIB_FMT \ 563 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" 564 565 struct cpu_topo { 566 u32 cpu_nr; 567 u32 core_sib; 568 u32 thread_sib; 569 char **core_siblings; 570 char **thread_siblings; 571 }; 572 573 static int build_cpu_topo(struct cpu_topo *tp, int cpu) 574 { 575 FILE *fp; 576 char filename[MAXPATHLEN]; 577 char *buf = NULL, *p; 578 size_t len = 0; 579 ssize_t sret; 580 u32 i = 0; 581 int ret = -1; 582 583 sprintf(filename, CORE_SIB_FMT, cpu); 584 fp = fopen(filename, "r"); 585 if (!fp) 586 goto try_threads; 587 588 sret = getline(&buf, &len, fp); 589 fclose(fp); 590 if (sret <= 0) 591 goto try_threads; 592 593 p = strchr(buf, '\n'); 594 if (p) 595 *p = '\0'; 596 597 for (i = 0; i < tp->core_sib; i++) { 598 if (!strcmp(buf, tp->core_siblings[i])) 599 break; 600 } 601 if (i == tp->core_sib) { 602 tp->core_siblings[i] = buf; 603 tp->core_sib++; 604 buf = NULL; 605 len = 0; 606 } 607 ret = 0; 608 609 try_threads: 610 sprintf(filename, THRD_SIB_FMT, cpu); 611 fp = fopen(filename, "r"); 612 if (!fp) 613 goto done; 614 615 if (getline(&buf, &len, fp) <= 0) 616 goto done; 617 618 p = strchr(buf, '\n'); 619 if (p) 620 *p = '\0'; 621 622 for (i = 0; i < tp->thread_sib; i++) { 623 if (!strcmp(buf, tp->thread_siblings[i])) 624 break; 625 } 626 if (i == tp->thread_sib) { 627 tp->thread_siblings[i] = buf; 628 tp->thread_sib++; 629 buf = NULL; 630 } 631 ret = 0; 632 done: 633 if(fp) 634 fclose(fp); 635 free(buf); 636 return ret; 637 } 638 639 static void free_cpu_topo(struct cpu_topo *tp) 640 { 641 u32 i; 642 643 if (!tp) 644 return; 645 646 for (i = 0 ; i < tp->core_sib; i++) 647 zfree(&tp->core_siblings[i]); 648 649 for (i = 0 ; i < tp->thread_sib; i++) 650 zfree(&tp->thread_siblings[i]); 651 652 free(tp); 653 } 654 655 static struct cpu_topo *build_cpu_topology(void) 656 { 657 struct cpu_topo *tp = NULL; 658 void *addr; 659 u32 nr, i; 660 size_t sz; 661 long ncpus; 662 int ret = -1; 663 struct cpu_map *map; 664 665 ncpus = cpu__max_present_cpu(); 666 667 /* build online CPU map */ 668 map = cpu_map__new(NULL); 669 if (map == NULL) { 670 pr_debug("failed to get system cpumap\n"); 671 return NULL; 672 } 673 674 nr = (u32)(ncpus & UINT_MAX); 675 676 sz = nr * sizeof(char *); 677 addr = calloc(1, sizeof(*tp) + 2 * sz); 678 if (!addr) 679 goto out_free; 680 681 tp = addr; 682 tp->cpu_nr = nr; 683 addr += sizeof(*tp); 684 tp->core_siblings = addr; 685 addr += sz; 686 tp->thread_siblings = addr; 687 688 for (i = 0; i < nr; i++) { 689 if (!cpu_map__has(map, i)) 690 continue; 691 692 ret = build_cpu_topo(tp, i); 693 if (ret < 0) 694 break; 695 } 696 697 out_free: 698 cpu_map__put(map); 699 if (ret) { 700 free_cpu_topo(tp); 701 tp = NULL; 702 } 703 return tp; 704 } 705 706 static int write_cpu_topology(struct feat_fd *ff, 707 struct perf_evlist *evlist __maybe_unused) 708 { 709 struct cpu_topo *tp; 710 u32 i; 711 int ret, j; 712 713 tp = build_cpu_topology(); 714 if (!tp) 715 return -1; 716 717 ret = do_write(ff, &tp->core_sib, sizeof(tp->core_sib)); 718 if (ret < 0) 719 goto done; 720 721 for (i = 0; i < tp->core_sib; i++) { 722 ret = do_write_string(ff, tp->core_siblings[i]); 723 if (ret < 0) 724 goto done; 725 } 726 ret = do_write(ff, &tp->thread_sib, sizeof(tp->thread_sib)); 727 if (ret < 0) 728 goto done; 729 730 for (i = 0; i < tp->thread_sib; i++) { 731 ret = do_write_string(ff, tp->thread_siblings[i]); 732 if (ret < 0) 733 break; 734 } 735 736 ret = perf_env__read_cpu_topology_map(&perf_env); 737 if (ret < 0) 738 goto done; 739 740 for (j = 0; j < perf_env.nr_cpus_avail; j++) { 741 ret = do_write(ff, &perf_env.cpu[j].core_id, 742 sizeof(perf_env.cpu[j].core_id)); 743 if (ret < 0) 744 return ret; 745 ret = do_write(ff, &perf_env.cpu[j].socket_id, 746 sizeof(perf_env.cpu[j].socket_id)); 747 if (ret < 0) 748 return ret; 749 } 750 done: 751 free_cpu_topo(tp); 752 return ret; 753 } 754 755 756 757 static int write_total_mem(struct feat_fd *ff, 758 struct perf_evlist *evlist __maybe_unused) 759 { 760 char *buf = NULL; 761 FILE *fp; 762 size_t len = 0; 763 int ret = -1, n; 764 uint64_t mem; 765 766 fp = fopen("/proc/meminfo", "r"); 767 if (!fp) 768 return -1; 769 770 while (getline(&buf, &len, fp) > 0) { 771 ret = strncmp(buf, "MemTotal:", 9); 772 if (!ret) 773 break; 774 } 775 if (!ret) { 776 n = sscanf(buf, "%*s %"PRIu64, &mem); 777 if (n == 1) 778 ret = do_write(ff, &mem, sizeof(mem)); 779 } else 780 ret = -1; 781 free(buf); 782 fclose(fp); 783 return ret; 784 } 785 786 static int write_topo_node(struct feat_fd *ff, int node) 787 { 788 char str[MAXPATHLEN]; 789 char field[32]; 790 char *buf = NULL, *p; 791 size_t len = 0; 792 FILE *fp; 793 u64 mem_total, mem_free, mem; 794 int ret = -1; 795 796 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); 797 fp = fopen(str, "r"); 798 if (!fp) 799 return -1; 800 801 while (getline(&buf, &len, fp) > 0) { 802 /* skip over invalid lines */ 803 if (!strchr(buf, ':')) 804 continue; 805 if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) 806 goto done; 807 if (!strcmp(field, "MemTotal:")) 808 mem_total = mem; 809 if (!strcmp(field, "MemFree:")) 810 mem_free = mem; 811 } 812 813 fclose(fp); 814 fp = NULL; 815 816 ret = do_write(ff, &mem_total, sizeof(u64)); 817 if (ret) 818 goto done; 819 820 ret = do_write(ff, &mem_free, sizeof(u64)); 821 if (ret) 822 goto done; 823 824 ret = -1; 825 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); 826 827 fp = fopen(str, "r"); 828 if (!fp) 829 goto done; 830 831 if (getline(&buf, &len, fp) <= 0) 832 goto done; 833 834 p = strchr(buf, '\n'); 835 if (p) 836 *p = '\0'; 837 838 ret = do_write_string(ff, buf); 839 done: 840 free(buf); 841 if (fp) 842 fclose(fp); 843 return ret; 844 } 845 846 static int write_numa_topology(struct feat_fd *ff, 847 struct perf_evlist *evlist __maybe_unused) 848 { 849 char *buf = NULL; 850 size_t len = 0; 851 FILE *fp; 852 struct cpu_map *node_map = NULL; 853 char *c; 854 u32 nr, i, j; 855 int ret = -1; 856 857 fp = fopen("/sys/devices/system/node/online", "r"); 858 if (!fp) 859 return -1; 860 861 if (getline(&buf, &len, fp) <= 0) 862 goto done; 863 864 c = strchr(buf, '\n'); 865 if (c) 866 *c = '\0'; 867 868 node_map = cpu_map__new(buf); 869 if (!node_map) 870 goto done; 871 872 nr = (u32)node_map->nr; 873 874 ret = do_write(ff, &nr, sizeof(nr)); 875 if (ret < 0) 876 goto done; 877 878 for (i = 0; i < nr; i++) { 879 j = (u32)node_map->map[i]; 880 ret = do_write(ff, &j, sizeof(j)); 881 if (ret < 0) 882 break; 883 884 ret = write_topo_node(ff, i); 885 if (ret < 0) 886 break; 887 } 888 done: 889 free(buf); 890 fclose(fp); 891 cpu_map__put(node_map); 892 return ret; 893 } 894 895 /* 896 * File format: 897 * 898 * struct pmu_mappings { 899 * u32 pmu_num; 900 * struct pmu_map { 901 * u32 type; 902 * char name[]; 903 * }[pmu_num]; 904 * }; 905 */ 906 907 static int write_pmu_mappings(struct feat_fd *ff, 908 struct perf_evlist *evlist __maybe_unused) 909 { 910 struct perf_pmu *pmu = NULL; 911 u32 pmu_num = 0; 912 int ret; 913 914 /* 915 * Do a first pass to count number of pmu to avoid lseek so this 916 * works in pipe mode as well. 917 */ 918 while ((pmu = perf_pmu__scan(pmu))) { 919 if (!pmu->name) 920 continue; 921 pmu_num++; 922 } 923 924 ret = do_write(ff, &pmu_num, sizeof(pmu_num)); 925 if (ret < 0) 926 return ret; 927 928 while ((pmu = perf_pmu__scan(pmu))) { 929 if (!pmu->name) 930 continue; 931 932 ret = do_write(ff, &pmu->type, sizeof(pmu->type)); 933 if (ret < 0) 934 return ret; 935 936 ret = do_write_string(ff, pmu->name); 937 if (ret < 0) 938 return ret; 939 } 940 941 return 0; 942 } 943 944 /* 945 * File format: 946 * 947 * struct group_descs { 948 * u32 nr_groups; 949 * struct group_desc { 950 * char name[]; 951 * u32 leader_idx; 952 * u32 nr_members; 953 * }[nr_groups]; 954 * }; 955 */ 956 static int write_group_desc(struct feat_fd *ff, 957 struct perf_evlist *evlist) 958 { 959 u32 nr_groups = evlist->nr_groups; 960 struct perf_evsel *evsel; 961 int ret; 962 963 ret = do_write(ff, &nr_groups, sizeof(nr_groups)); 964 if (ret < 0) 965 return ret; 966 967 evlist__for_each_entry(evlist, evsel) { 968 if (perf_evsel__is_group_leader(evsel) && 969 evsel->nr_members > 1) { 970 const char *name = evsel->group_name ?: "{anon_group}"; 971 u32 leader_idx = evsel->idx; 972 u32 nr_members = evsel->nr_members; 973 974 ret = do_write_string(ff, name); 975 if (ret < 0) 976 return ret; 977 978 ret = do_write(ff, &leader_idx, sizeof(leader_idx)); 979 if (ret < 0) 980 return ret; 981 982 ret = do_write(ff, &nr_members, sizeof(nr_members)); 983 if (ret < 0) 984 return ret; 985 } 986 } 987 return 0; 988 } 989 990 /* 991 * default get_cpuid(): nothing gets recorded 992 * actual implementation must be in arch/$(SRCARCH)/util/header.c 993 */ 994 int __weak get_cpuid(char *buffer __maybe_unused, size_t sz __maybe_unused) 995 { 996 return -1; 997 } 998 999 static int write_cpuid(struct feat_fd *ff, 1000 struct perf_evlist *evlist __maybe_unused) 1001 { 1002 char buffer[64]; 1003 int ret; 1004 1005 ret = get_cpuid(buffer, sizeof(buffer)); 1006 if (!ret) 1007 goto write_it; 1008 1009 return -1; 1010 write_it: 1011 return do_write_string(ff, buffer); 1012 } 1013 1014 static int write_branch_stack(struct feat_fd *ff __maybe_unused, 1015 struct perf_evlist *evlist __maybe_unused) 1016 { 1017 return 0; 1018 } 1019 1020 static int write_auxtrace(struct feat_fd *ff, 1021 struct perf_evlist *evlist __maybe_unused) 1022 { 1023 struct perf_session *session; 1024 int err; 1025 1026 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 1027 return -1; 1028 1029 session = container_of(ff->ph, struct perf_session, header); 1030 1031 err = auxtrace_index__write(ff->fd, &session->auxtrace_index); 1032 if (err < 0) 1033 pr_err("Failed to write auxtrace index\n"); 1034 return err; 1035 } 1036 1037 static int cpu_cache_level__sort(const void *a, const void *b) 1038 { 1039 struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a; 1040 struct cpu_cache_level *cache_b = (struct cpu_cache_level *)b; 1041 1042 return cache_a->level - cache_b->level; 1043 } 1044 1045 static bool cpu_cache_level__cmp(struct cpu_cache_level *a, struct cpu_cache_level *b) 1046 { 1047 if (a->level != b->level) 1048 return false; 1049 1050 if (a->line_size != b->line_size) 1051 return false; 1052 1053 if (a->sets != b->sets) 1054 return false; 1055 1056 if (a->ways != b->ways) 1057 return false; 1058 1059 if (strcmp(a->type, b->type)) 1060 return false; 1061 1062 if (strcmp(a->size, b->size)) 1063 return false; 1064 1065 if (strcmp(a->map, b->map)) 1066 return false; 1067 1068 return true; 1069 } 1070 1071 static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) 1072 { 1073 char path[PATH_MAX], file[PATH_MAX]; 1074 struct stat st; 1075 size_t len; 1076 1077 scnprintf(path, PATH_MAX, "devices/system/cpu/cpu%d/cache/index%d/", cpu, level); 1078 scnprintf(file, PATH_MAX, "%s/%s", sysfs__mountpoint(), path); 1079 1080 if (stat(file, &st)) 1081 return 1; 1082 1083 scnprintf(file, PATH_MAX, "%s/level", path); 1084 if (sysfs__read_int(file, (int *) &cache->level)) 1085 return -1; 1086 1087 scnprintf(file, PATH_MAX, "%s/coherency_line_size", path); 1088 if (sysfs__read_int(file, (int *) &cache->line_size)) 1089 return -1; 1090 1091 scnprintf(file, PATH_MAX, "%s/number_of_sets", path); 1092 if (sysfs__read_int(file, (int *) &cache->sets)) 1093 return -1; 1094 1095 scnprintf(file, PATH_MAX, "%s/ways_of_associativity", path); 1096 if (sysfs__read_int(file, (int *) &cache->ways)) 1097 return -1; 1098 1099 scnprintf(file, PATH_MAX, "%s/type", path); 1100 if (sysfs__read_str(file, &cache->type, &len)) 1101 return -1; 1102 1103 cache->type[len] = 0; 1104 cache->type = rtrim(cache->type); 1105 1106 scnprintf(file, PATH_MAX, "%s/size", path); 1107 if (sysfs__read_str(file, &cache->size, &len)) { 1108 free(cache->type); 1109 return -1; 1110 } 1111 1112 cache->size[len] = 0; 1113 cache->size = rtrim(cache->size); 1114 1115 scnprintf(file, PATH_MAX, "%s/shared_cpu_list", path); 1116 if (sysfs__read_str(file, &cache->map, &len)) { 1117 free(cache->map); 1118 free(cache->type); 1119 return -1; 1120 } 1121 1122 cache->map[len] = 0; 1123 cache->map = rtrim(cache->map); 1124 return 0; 1125 } 1126 1127 static void cpu_cache_level__fprintf(FILE *out, struct cpu_cache_level *c) 1128 { 1129 fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); 1130 } 1131 1132 static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp) 1133 { 1134 u32 i, cnt = 0; 1135 long ncpus; 1136 u32 nr, cpu; 1137 u16 level; 1138 1139 ncpus = sysconf(_SC_NPROCESSORS_CONF); 1140 if (ncpus < 0) 1141 return -1; 1142 1143 nr = (u32)(ncpus & UINT_MAX); 1144 1145 for (cpu = 0; cpu < nr; cpu++) { 1146 for (level = 0; level < 10; level++) { 1147 struct cpu_cache_level c; 1148 int err; 1149 1150 err = cpu_cache_level__read(&c, cpu, level); 1151 if (err < 0) 1152 return err; 1153 1154 if (err == 1) 1155 break; 1156 1157 for (i = 0; i < cnt; i++) { 1158 if (cpu_cache_level__cmp(&c, &caches[i])) 1159 break; 1160 } 1161 1162 if (i == cnt) 1163 caches[cnt++] = c; 1164 else 1165 cpu_cache_level__free(&c); 1166 1167 if (WARN_ONCE(cnt == size, "way too many cpu caches..")) 1168 goto out; 1169 } 1170 } 1171 out: 1172 *cntp = cnt; 1173 return 0; 1174 } 1175 1176 #define MAX_CACHES 2000 1177 1178 static int write_cache(struct feat_fd *ff, 1179 struct perf_evlist *evlist __maybe_unused) 1180 { 1181 struct cpu_cache_level caches[MAX_CACHES]; 1182 u32 cnt = 0, i, version = 1; 1183 int ret; 1184 1185 ret = build_caches(caches, MAX_CACHES, &cnt); 1186 if (ret) 1187 goto out; 1188 1189 qsort(&caches, cnt, sizeof(struct cpu_cache_level), cpu_cache_level__sort); 1190 1191 ret = do_write(ff, &version, sizeof(u32)); 1192 if (ret < 0) 1193 goto out; 1194 1195 ret = do_write(ff, &cnt, sizeof(u32)); 1196 if (ret < 0) 1197 goto out; 1198 1199 for (i = 0; i < cnt; i++) { 1200 struct cpu_cache_level *c = &caches[i]; 1201 1202 #define _W(v) \ 1203 ret = do_write(ff, &c->v, sizeof(u32)); \ 1204 if (ret < 0) \ 1205 goto out; 1206 1207 _W(level) 1208 _W(line_size) 1209 _W(sets) 1210 _W(ways) 1211 #undef _W 1212 1213 #define _W(v) \ 1214 ret = do_write_string(ff, (const char *) c->v); \ 1215 if (ret < 0) \ 1216 goto out; 1217 1218 _W(type) 1219 _W(size) 1220 _W(map) 1221 #undef _W 1222 } 1223 1224 out: 1225 for (i = 0; i < cnt; i++) 1226 cpu_cache_level__free(&caches[i]); 1227 return ret; 1228 } 1229 1230 static int write_stat(struct feat_fd *ff __maybe_unused, 1231 struct perf_evlist *evlist __maybe_unused) 1232 { 1233 return 0; 1234 } 1235 1236 static int write_sample_time(struct feat_fd *ff, 1237 struct perf_evlist *evlist) 1238 { 1239 int ret; 1240 1241 ret = do_write(ff, &evlist->first_sample_time, 1242 sizeof(evlist->first_sample_time)); 1243 if (ret < 0) 1244 return ret; 1245 1246 return do_write(ff, &evlist->last_sample_time, 1247 sizeof(evlist->last_sample_time)); 1248 } 1249 1250 1251 static int memory_node__read(struct memory_node *n, unsigned long idx) 1252 { 1253 unsigned int phys, size = 0; 1254 char path[PATH_MAX]; 1255 struct dirent *ent; 1256 DIR *dir; 1257 1258 #define for_each_memory(mem, dir) \ 1259 while ((ent = readdir(dir))) \ 1260 if (strcmp(ent->d_name, ".") && \ 1261 strcmp(ent->d_name, "..") && \ 1262 sscanf(ent->d_name, "memory%u", &mem) == 1) 1263 1264 scnprintf(path, PATH_MAX, 1265 "%s/devices/system/node/node%lu", 1266 sysfs__mountpoint(), idx); 1267 1268 dir = opendir(path); 1269 if (!dir) { 1270 pr_warning("failed: cant' open memory sysfs data\n"); 1271 return -1; 1272 } 1273 1274 for_each_memory(phys, dir) { 1275 size = max(phys, size); 1276 } 1277 1278 size++; 1279 1280 n->set = bitmap_alloc(size); 1281 if (!n->set) { 1282 closedir(dir); 1283 return -ENOMEM; 1284 } 1285 1286 n->node = idx; 1287 n->size = size; 1288 1289 rewinddir(dir); 1290 1291 for_each_memory(phys, dir) { 1292 set_bit(phys, n->set); 1293 } 1294 1295 closedir(dir); 1296 return 0; 1297 } 1298 1299 static int memory_node__sort(const void *a, const void *b) 1300 { 1301 const struct memory_node *na = a; 1302 const struct memory_node *nb = b; 1303 1304 return na->node - nb->node; 1305 } 1306 1307 static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) 1308 { 1309 char path[PATH_MAX]; 1310 struct dirent *ent; 1311 DIR *dir; 1312 u64 cnt = 0; 1313 int ret = 0; 1314 1315 scnprintf(path, PATH_MAX, "%s/devices/system/node/", 1316 sysfs__mountpoint()); 1317 1318 dir = opendir(path); 1319 if (!dir) { 1320 pr_debug2("%s: could't read %s, does this arch have topology information?\n", 1321 __func__, path); 1322 return -1; 1323 } 1324 1325 while (!ret && (ent = readdir(dir))) { 1326 unsigned int idx; 1327 int r; 1328 1329 if (!strcmp(ent->d_name, ".") || 1330 !strcmp(ent->d_name, "..")) 1331 continue; 1332 1333 r = sscanf(ent->d_name, "node%u", &idx); 1334 if (r != 1) 1335 continue; 1336 1337 if (WARN_ONCE(cnt >= size, 1338 "failed to write MEM_TOPOLOGY, way too many nodes\n")) 1339 return -1; 1340 1341 ret = memory_node__read(&nodes[cnt++], idx); 1342 } 1343 1344 *cntp = cnt; 1345 closedir(dir); 1346 1347 if (!ret) 1348 qsort(nodes, cnt, sizeof(nodes[0]), memory_node__sort); 1349 1350 return ret; 1351 } 1352 1353 #define MAX_MEMORY_NODES 2000 1354 1355 /* 1356 * The MEM_TOPOLOGY holds physical memory map for every 1357 * node in system. The format of data is as follows: 1358 * 1359 * 0 - version | for future changes 1360 * 8 - block_size_bytes | /sys/devices/system/memory/block_size_bytes 1361 * 16 - count | number of nodes 1362 * 1363 * For each node we store map of physical indexes for 1364 * each node: 1365 * 1366 * 32 - node id | node index 1367 * 40 - size | size of bitmap 1368 * 48 - bitmap | bitmap of memory indexes that belongs to node 1369 */ 1370 static int write_mem_topology(struct feat_fd *ff __maybe_unused, 1371 struct perf_evlist *evlist __maybe_unused) 1372 { 1373 static struct memory_node nodes[MAX_MEMORY_NODES]; 1374 u64 bsize, version = 1, i, nr; 1375 int ret; 1376 1377 ret = sysfs__read_xll("devices/system/memory/block_size_bytes", 1378 (unsigned long long *) &bsize); 1379 if (ret) 1380 return ret; 1381 1382 ret = build_mem_topology(&nodes[0], MAX_MEMORY_NODES, &nr); 1383 if (ret) 1384 return ret; 1385 1386 ret = do_write(ff, &version, sizeof(version)); 1387 if (ret < 0) 1388 goto out; 1389 1390 ret = do_write(ff, &bsize, sizeof(bsize)); 1391 if (ret < 0) 1392 goto out; 1393 1394 ret = do_write(ff, &nr, sizeof(nr)); 1395 if (ret < 0) 1396 goto out; 1397 1398 for (i = 0; i < nr; i++) { 1399 struct memory_node *n = &nodes[i]; 1400 1401 #define _W(v) \ 1402 ret = do_write(ff, &n->v, sizeof(n->v)); \ 1403 if (ret < 0) \ 1404 goto out; 1405 1406 _W(node) 1407 _W(size) 1408 1409 #undef _W 1410 1411 ret = do_write_bitmap(ff, n->set, n->size); 1412 if (ret < 0) 1413 goto out; 1414 } 1415 1416 out: 1417 return ret; 1418 } 1419 1420 static void print_hostname(struct feat_fd *ff, FILE *fp) 1421 { 1422 fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname); 1423 } 1424 1425 static void print_osrelease(struct feat_fd *ff, FILE *fp) 1426 { 1427 fprintf(fp, "# os release : %s\n", ff->ph->env.os_release); 1428 } 1429 1430 static void print_arch(struct feat_fd *ff, FILE *fp) 1431 { 1432 fprintf(fp, "# arch : %s\n", ff->ph->env.arch); 1433 } 1434 1435 static void print_cpudesc(struct feat_fd *ff, FILE *fp) 1436 { 1437 fprintf(fp, "# cpudesc : %s\n", ff->ph->env.cpu_desc); 1438 } 1439 1440 static void print_nrcpus(struct feat_fd *ff, FILE *fp) 1441 { 1442 fprintf(fp, "# nrcpus online : %u\n", ff->ph->env.nr_cpus_online); 1443 fprintf(fp, "# nrcpus avail : %u\n", ff->ph->env.nr_cpus_avail); 1444 } 1445 1446 static void print_version(struct feat_fd *ff, FILE *fp) 1447 { 1448 fprintf(fp, "# perf version : %s\n", ff->ph->env.version); 1449 } 1450 1451 static void print_cmdline(struct feat_fd *ff, FILE *fp) 1452 { 1453 int nr, i; 1454 1455 nr = ff->ph->env.nr_cmdline; 1456 1457 fprintf(fp, "# cmdline : "); 1458 1459 for (i = 0; i < nr; i++) { 1460 char *argv_i = strdup(ff->ph->env.cmdline_argv[i]); 1461 if (!argv_i) { 1462 fprintf(fp, "%s ", ff->ph->env.cmdline_argv[i]); 1463 } else { 1464 char *mem = argv_i; 1465 do { 1466 char *quote = strchr(argv_i, '\''); 1467 if (!quote) 1468 break; 1469 *quote++ = '\0'; 1470 fprintf(fp, "%s\\\'", argv_i); 1471 argv_i = quote; 1472 } while (1); 1473 fprintf(fp, "%s ", argv_i); 1474 free(mem); 1475 } 1476 } 1477 fputc('\n', fp); 1478 } 1479 1480 static void print_cpu_topology(struct feat_fd *ff, FILE *fp) 1481 { 1482 struct perf_header *ph = ff->ph; 1483 int cpu_nr = ph->env.nr_cpus_avail; 1484 int nr, i; 1485 char *str; 1486 1487 nr = ph->env.nr_sibling_cores; 1488 str = ph->env.sibling_cores; 1489 1490 for (i = 0; i < nr; i++) { 1491 fprintf(fp, "# sibling cores : %s\n", str); 1492 str += strlen(str) + 1; 1493 } 1494 1495 nr = ph->env.nr_sibling_threads; 1496 str = ph->env.sibling_threads; 1497 1498 for (i = 0; i < nr; i++) { 1499 fprintf(fp, "# sibling threads : %s\n", str); 1500 str += strlen(str) + 1; 1501 } 1502 1503 if (ph->env.cpu != NULL) { 1504 for (i = 0; i < cpu_nr; i++) 1505 fprintf(fp, "# CPU %d: Core ID %d, Socket ID %d\n", i, 1506 ph->env.cpu[i].core_id, ph->env.cpu[i].socket_id); 1507 } else 1508 fprintf(fp, "# Core ID and Socket ID information is not available\n"); 1509 } 1510 1511 static void free_event_desc(struct perf_evsel *events) 1512 { 1513 struct perf_evsel *evsel; 1514 1515 if (!events) 1516 return; 1517 1518 for (evsel = events; evsel->attr.size; evsel++) { 1519 zfree(&evsel->name); 1520 zfree(&evsel->id); 1521 } 1522 1523 free(events); 1524 } 1525 1526 static struct perf_evsel *read_event_desc(struct feat_fd *ff) 1527 { 1528 struct perf_evsel *evsel, *events = NULL; 1529 u64 *id; 1530 void *buf = NULL; 1531 u32 nre, sz, nr, i, j; 1532 size_t msz; 1533 1534 /* number of events */ 1535 if (do_read_u32(ff, &nre)) 1536 goto error; 1537 1538 if (do_read_u32(ff, &sz)) 1539 goto error; 1540 1541 /* buffer to hold on file attr struct */ 1542 buf = malloc(sz); 1543 if (!buf) 1544 goto error; 1545 1546 /* the last event terminates with evsel->attr.size == 0: */ 1547 events = calloc(nre + 1, sizeof(*events)); 1548 if (!events) 1549 goto error; 1550 1551 msz = sizeof(evsel->attr); 1552 if (sz < msz) 1553 msz = sz; 1554 1555 for (i = 0, evsel = events; i < nre; evsel++, i++) { 1556 evsel->idx = i; 1557 1558 /* 1559 * must read entire on-file attr struct to 1560 * sync up with layout. 1561 */ 1562 if (__do_read(ff, buf, sz)) 1563 goto error; 1564 1565 if (ff->ph->needs_swap) 1566 perf_event__attr_swap(buf); 1567 1568 memcpy(&evsel->attr, buf, msz); 1569 1570 if (do_read_u32(ff, &nr)) 1571 goto error; 1572 1573 if (ff->ph->needs_swap) 1574 evsel->needs_swap = true; 1575 1576 evsel->name = do_read_string(ff); 1577 if (!evsel->name) 1578 goto error; 1579 1580 if (!nr) 1581 continue; 1582 1583 id = calloc(nr, sizeof(*id)); 1584 if (!id) 1585 goto error; 1586 evsel->ids = nr; 1587 evsel->id = id; 1588 1589 for (j = 0 ; j < nr; j++) { 1590 if (do_read_u64(ff, id)) 1591 goto error; 1592 id++; 1593 } 1594 } 1595 out: 1596 free(buf); 1597 return events; 1598 error: 1599 free_event_desc(events); 1600 events = NULL; 1601 goto out; 1602 } 1603 1604 static int __desc_attr__fprintf(FILE *fp, const char *name, const char *val, 1605 void *priv __maybe_unused) 1606 { 1607 return fprintf(fp, ", %s = %s", name, val); 1608 } 1609 1610 static void print_event_desc(struct feat_fd *ff, FILE *fp) 1611 { 1612 struct perf_evsel *evsel, *events; 1613 u32 j; 1614 u64 *id; 1615 1616 if (ff->events) 1617 events = ff->events; 1618 else 1619 events = read_event_desc(ff); 1620 1621 if (!events) { 1622 fprintf(fp, "# event desc: not available or unable to read\n"); 1623 return; 1624 } 1625 1626 for (evsel = events; evsel->attr.size; evsel++) { 1627 fprintf(fp, "# event : name = %s, ", evsel->name); 1628 1629 if (evsel->ids) { 1630 fprintf(fp, ", id = {"); 1631 for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { 1632 if (j) 1633 fputc(',', fp); 1634 fprintf(fp, " %"PRIu64, *id); 1635 } 1636 fprintf(fp, " }"); 1637 } 1638 1639 perf_event_attr__fprintf(fp, &evsel->attr, __desc_attr__fprintf, NULL); 1640 1641 fputc('\n', fp); 1642 } 1643 1644 free_event_desc(events); 1645 ff->events = NULL; 1646 } 1647 1648 static void print_total_mem(struct feat_fd *ff, FILE *fp) 1649 { 1650 fprintf(fp, "# total memory : %llu kB\n", ff->ph->env.total_mem); 1651 } 1652 1653 static void print_numa_topology(struct feat_fd *ff, FILE *fp) 1654 { 1655 int i; 1656 struct numa_node *n; 1657 1658 for (i = 0; i < ff->ph->env.nr_numa_nodes; i++) { 1659 n = &ff->ph->env.numa_nodes[i]; 1660 1661 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1662 " free = %"PRIu64" kB\n", 1663 n->node, n->mem_total, n->mem_free); 1664 1665 fprintf(fp, "# node%u cpu list : ", n->node); 1666 cpu_map__fprintf(n->map, fp); 1667 } 1668 } 1669 1670 static void print_cpuid(struct feat_fd *ff, FILE *fp) 1671 { 1672 fprintf(fp, "# cpuid : %s\n", ff->ph->env.cpuid); 1673 } 1674 1675 static void print_branch_stack(struct feat_fd *ff __maybe_unused, FILE *fp) 1676 { 1677 fprintf(fp, "# contains samples with branch stack\n"); 1678 } 1679 1680 static void print_auxtrace(struct feat_fd *ff __maybe_unused, FILE *fp) 1681 { 1682 fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n"); 1683 } 1684 1685 static void print_stat(struct feat_fd *ff __maybe_unused, FILE *fp) 1686 { 1687 fprintf(fp, "# contains stat data\n"); 1688 } 1689 1690 static void print_cache(struct feat_fd *ff, FILE *fp __maybe_unused) 1691 { 1692 int i; 1693 1694 fprintf(fp, "# CPU cache info:\n"); 1695 for (i = 0; i < ff->ph->env.caches_cnt; i++) { 1696 fprintf(fp, "# "); 1697 cpu_cache_level__fprintf(fp, &ff->ph->env.caches[i]); 1698 } 1699 } 1700 1701 static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) 1702 { 1703 const char *delimiter = "# pmu mappings: "; 1704 char *str, *tmp; 1705 u32 pmu_num; 1706 u32 type; 1707 1708 pmu_num = ff->ph->env.nr_pmu_mappings; 1709 if (!pmu_num) { 1710 fprintf(fp, "# pmu mappings: not available\n"); 1711 return; 1712 } 1713 1714 str = ff->ph->env.pmu_mappings; 1715 1716 while (pmu_num) { 1717 type = strtoul(str, &tmp, 0); 1718 if (*tmp != ':') 1719 goto error; 1720 1721 str = tmp + 1; 1722 fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); 1723 1724 delimiter = ", "; 1725 str += strlen(str) + 1; 1726 pmu_num--; 1727 } 1728 1729 fprintf(fp, "\n"); 1730 1731 if (!pmu_num) 1732 return; 1733 error: 1734 fprintf(fp, "# pmu mappings: unable to read\n"); 1735 } 1736 1737 static void print_group_desc(struct feat_fd *ff, FILE *fp) 1738 { 1739 struct perf_session *session; 1740 struct perf_evsel *evsel; 1741 u32 nr = 0; 1742 1743 session = container_of(ff->ph, struct perf_session, header); 1744 1745 evlist__for_each_entry(session->evlist, evsel) { 1746 if (perf_evsel__is_group_leader(evsel) && 1747 evsel->nr_members > 1) { 1748 fprintf(fp, "# group: %s{%s", evsel->group_name ?: "", 1749 perf_evsel__name(evsel)); 1750 1751 nr = evsel->nr_members - 1; 1752 } else if (nr) { 1753 fprintf(fp, ",%s", perf_evsel__name(evsel)); 1754 1755 if (--nr == 0) 1756 fprintf(fp, "}\n"); 1757 } 1758 } 1759 } 1760 1761 static void print_sample_time(struct feat_fd *ff, FILE *fp) 1762 { 1763 struct perf_session *session; 1764 char time_buf[32]; 1765 double d; 1766 1767 session = container_of(ff->ph, struct perf_session, header); 1768 1769 timestamp__scnprintf_usec(session->evlist->first_sample_time, 1770 time_buf, sizeof(time_buf)); 1771 fprintf(fp, "# time of first sample : %s\n", time_buf); 1772 1773 timestamp__scnprintf_usec(session->evlist->last_sample_time, 1774 time_buf, sizeof(time_buf)); 1775 fprintf(fp, "# time of last sample : %s\n", time_buf); 1776 1777 d = (double)(session->evlist->last_sample_time - 1778 session->evlist->first_sample_time) / NSEC_PER_MSEC; 1779 1780 fprintf(fp, "# sample duration : %10.3f ms\n", d); 1781 } 1782 1783 static void memory_node__fprintf(struct memory_node *n, 1784 unsigned long long bsize, FILE *fp) 1785 { 1786 char buf_map[100], buf_size[50]; 1787 unsigned long long size; 1788 1789 size = bsize * bitmap_weight(n->set, n->size); 1790 unit_number__scnprintf(buf_size, 50, size); 1791 1792 bitmap_scnprintf(n->set, n->size, buf_map, 100); 1793 fprintf(fp, "# %3" PRIu64 " [%s]: %s\n", n->node, buf_size, buf_map); 1794 } 1795 1796 static void print_mem_topology(struct feat_fd *ff, FILE *fp) 1797 { 1798 struct memory_node *nodes; 1799 int i, nr; 1800 1801 nodes = ff->ph->env.memory_nodes; 1802 nr = ff->ph->env.nr_memory_nodes; 1803 1804 fprintf(fp, "# memory nodes (nr %d, block size 0x%llx):\n", 1805 nr, ff->ph->env.memory_bsize); 1806 1807 for (i = 0; i < nr; i++) { 1808 memory_node__fprintf(&nodes[i], ff->ph->env.memory_bsize, fp); 1809 } 1810 } 1811 1812 static int __event_process_build_id(struct build_id_event *bev, 1813 char *filename, 1814 struct perf_session *session) 1815 { 1816 int err = -1; 1817 struct machine *machine; 1818 u16 cpumode; 1819 struct dso *dso; 1820 enum dso_kernel_type dso_type; 1821 1822 machine = perf_session__findnew_machine(session, bev->pid); 1823 if (!machine) 1824 goto out; 1825 1826 cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1827 1828 switch (cpumode) { 1829 case PERF_RECORD_MISC_KERNEL: 1830 dso_type = DSO_TYPE_KERNEL; 1831 break; 1832 case PERF_RECORD_MISC_GUEST_KERNEL: 1833 dso_type = DSO_TYPE_GUEST_KERNEL; 1834 break; 1835 case PERF_RECORD_MISC_USER: 1836 case PERF_RECORD_MISC_GUEST_USER: 1837 dso_type = DSO_TYPE_USER; 1838 break; 1839 default: 1840 goto out; 1841 } 1842 1843 dso = machine__findnew_dso(machine, filename); 1844 if (dso != NULL) { 1845 char sbuild_id[SBUILD_ID_SIZE]; 1846 1847 dso__set_build_id(dso, &bev->build_id); 1848 1849 if (dso_type != DSO_TYPE_USER) { 1850 struct kmod_path m = { .name = NULL, }; 1851 1852 if (!kmod_path__parse_name(&m, filename) && m.kmod) 1853 dso__set_module_info(dso, &m, machine); 1854 else 1855 dso->kernel = dso_type; 1856 1857 free(m.name); 1858 } 1859 1860 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1861 sbuild_id); 1862 pr_debug("build id event received for %s: %s\n", 1863 dso->long_name, sbuild_id); 1864 dso__put(dso); 1865 } 1866 1867 err = 0; 1868 out: 1869 return err; 1870 } 1871 1872 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1873 int input, u64 offset, u64 size) 1874 { 1875 struct perf_session *session = container_of(header, struct perf_session, header); 1876 struct { 1877 struct perf_event_header header; 1878 u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1879 char filename[0]; 1880 } old_bev; 1881 struct build_id_event bev; 1882 char filename[PATH_MAX]; 1883 u64 limit = offset + size; 1884 1885 while (offset < limit) { 1886 ssize_t len; 1887 1888 if (readn(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1889 return -1; 1890 1891 if (header->needs_swap) 1892 perf_event_header__bswap(&old_bev.header); 1893 1894 len = old_bev.header.size - sizeof(old_bev); 1895 if (readn(input, filename, len) != len) 1896 return -1; 1897 1898 bev.header = old_bev.header; 1899 1900 /* 1901 * As the pid is the missing value, we need to fill 1902 * it properly. The header.misc value give us nice hint. 1903 */ 1904 bev.pid = HOST_KERNEL_ID; 1905 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1906 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1907 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1908 1909 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1910 __event_process_build_id(&bev, filename, session); 1911 1912 offset += bev.header.size; 1913 } 1914 1915 return 0; 1916 } 1917 1918 static int perf_header__read_build_ids(struct perf_header *header, 1919 int input, u64 offset, u64 size) 1920 { 1921 struct perf_session *session = container_of(header, struct perf_session, header); 1922 struct build_id_event bev; 1923 char filename[PATH_MAX]; 1924 u64 limit = offset + size, orig_offset = offset; 1925 int err = -1; 1926 1927 while (offset < limit) { 1928 ssize_t len; 1929 1930 if (readn(input, &bev, sizeof(bev)) != sizeof(bev)) 1931 goto out; 1932 1933 if (header->needs_swap) 1934 perf_event_header__bswap(&bev.header); 1935 1936 len = bev.header.size - sizeof(bev); 1937 if (readn(input, filename, len) != len) 1938 goto out; 1939 /* 1940 * The a1645ce1 changeset: 1941 * 1942 * "perf: 'perf kvm' tool for monitoring guest performance from host" 1943 * 1944 * Added a field to struct build_id_event that broke the file 1945 * format. 1946 * 1947 * Since the kernel build-id is the first entry, process the 1948 * table using the old format if the well known 1949 * '[kernel.kallsyms]' string for the kernel build-id has the 1950 * first 4 characters chopped off (where the pid_t sits). 1951 */ 1952 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 1953 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 1954 return -1; 1955 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 1956 } 1957 1958 __event_process_build_id(&bev, filename, session); 1959 1960 offset += bev.header.size; 1961 } 1962 err = 0; 1963 out: 1964 return err; 1965 } 1966 1967 /* Macro for features that simply need to read and store a string. */ 1968 #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ 1969 static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ 1970 {\ 1971 ff->ph->env.__feat_env = do_read_string(ff); \ 1972 return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ 1973 } 1974 1975 FEAT_PROCESS_STR_FUN(hostname, hostname); 1976 FEAT_PROCESS_STR_FUN(osrelease, os_release); 1977 FEAT_PROCESS_STR_FUN(version, version); 1978 FEAT_PROCESS_STR_FUN(arch, arch); 1979 FEAT_PROCESS_STR_FUN(cpudesc, cpu_desc); 1980 FEAT_PROCESS_STR_FUN(cpuid, cpuid); 1981 1982 static int process_tracing_data(struct feat_fd *ff, void *data) 1983 { 1984 ssize_t ret = trace_report(ff->fd, data, false); 1985 1986 return ret < 0 ? -1 : 0; 1987 } 1988 1989 static int process_build_id(struct feat_fd *ff, void *data __maybe_unused) 1990 { 1991 if (perf_header__read_build_ids(ff->ph, ff->fd, ff->offset, ff->size)) 1992 pr_debug("Failed to read buildids, continuing...\n"); 1993 return 0; 1994 } 1995 1996 static int process_nrcpus(struct feat_fd *ff, void *data __maybe_unused) 1997 { 1998 int ret; 1999 u32 nr_cpus_avail, nr_cpus_online; 2000 2001 ret = do_read_u32(ff, &nr_cpus_avail); 2002 if (ret) 2003 return ret; 2004 2005 ret = do_read_u32(ff, &nr_cpus_online); 2006 if (ret) 2007 return ret; 2008 ff->ph->env.nr_cpus_avail = (int)nr_cpus_avail; 2009 ff->ph->env.nr_cpus_online = (int)nr_cpus_online; 2010 return 0; 2011 } 2012 2013 static int process_total_mem(struct feat_fd *ff, void *data __maybe_unused) 2014 { 2015 u64 total_mem; 2016 int ret; 2017 2018 ret = do_read_u64(ff, &total_mem); 2019 if (ret) 2020 return -1; 2021 ff->ph->env.total_mem = (unsigned long long)total_mem; 2022 return 0; 2023 } 2024 2025 static struct perf_evsel * 2026 perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) 2027 { 2028 struct perf_evsel *evsel; 2029 2030 evlist__for_each_entry(evlist, evsel) { 2031 if (evsel->idx == idx) 2032 return evsel; 2033 } 2034 2035 return NULL; 2036 } 2037 2038 static void 2039 perf_evlist__set_event_name(struct perf_evlist *evlist, 2040 struct perf_evsel *event) 2041 { 2042 struct perf_evsel *evsel; 2043 2044 if (!event->name) 2045 return; 2046 2047 evsel = perf_evlist__find_by_index(evlist, event->idx); 2048 if (!evsel) 2049 return; 2050 2051 if (evsel->name) 2052 return; 2053 2054 evsel->name = strdup(event->name); 2055 } 2056 2057 static int 2058 process_event_desc(struct feat_fd *ff, void *data __maybe_unused) 2059 { 2060 struct perf_session *session; 2061 struct perf_evsel *evsel, *events = read_event_desc(ff); 2062 2063 if (!events) 2064 return 0; 2065 2066 session = container_of(ff->ph, struct perf_session, header); 2067 2068 if (session->data->is_pipe) { 2069 /* Save events for reading later by print_event_desc, 2070 * since they can't be read again in pipe mode. */ 2071 ff->events = events; 2072 } 2073 2074 for (evsel = events; evsel->attr.size; evsel++) 2075 perf_evlist__set_event_name(session->evlist, evsel); 2076 2077 if (!session->data->is_pipe) 2078 free_event_desc(events); 2079 2080 return 0; 2081 } 2082 2083 static int process_cmdline(struct feat_fd *ff, void *data __maybe_unused) 2084 { 2085 char *str, *cmdline = NULL, **argv = NULL; 2086 u32 nr, i, len = 0; 2087 2088 if (do_read_u32(ff, &nr)) 2089 return -1; 2090 2091 ff->ph->env.nr_cmdline = nr; 2092 2093 cmdline = zalloc(ff->size + nr + 1); 2094 if (!cmdline) 2095 return -1; 2096 2097 argv = zalloc(sizeof(char *) * (nr + 1)); 2098 if (!argv) 2099 goto error; 2100 2101 for (i = 0; i < nr; i++) { 2102 str = do_read_string(ff); 2103 if (!str) 2104 goto error; 2105 2106 argv[i] = cmdline + len; 2107 memcpy(argv[i], str, strlen(str) + 1); 2108 len += strlen(str) + 1; 2109 free(str); 2110 } 2111 ff->ph->env.cmdline = cmdline; 2112 ff->ph->env.cmdline_argv = (const char **) argv; 2113 return 0; 2114 2115 error: 2116 free(argv); 2117 free(cmdline); 2118 return -1; 2119 } 2120 2121 static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused) 2122 { 2123 u32 nr, i; 2124 char *str; 2125 struct strbuf sb; 2126 int cpu_nr = ff->ph->env.nr_cpus_avail; 2127 u64 size = 0; 2128 struct perf_header *ph = ff->ph; 2129 bool do_core_id_test = true; 2130 2131 ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu)); 2132 if (!ph->env.cpu) 2133 return -1; 2134 2135 if (do_read_u32(ff, &nr)) 2136 goto free_cpu; 2137 2138 ph->env.nr_sibling_cores = nr; 2139 size += sizeof(u32); 2140 if (strbuf_init(&sb, 128) < 0) 2141 goto free_cpu; 2142 2143 for (i = 0; i < nr; i++) { 2144 str = do_read_string(ff); 2145 if (!str) 2146 goto error; 2147 2148 /* include a NULL character at the end */ 2149 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2150 goto error; 2151 size += string_size(str); 2152 free(str); 2153 } 2154 ph->env.sibling_cores = strbuf_detach(&sb, NULL); 2155 2156 if (do_read_u32(ff, &nr)) 2157 return -1; 2158 2159 ph->env.nr_sibling_threads = nr; 2160 size += sizeof(u32); 2161 2162 for (i = 0; i < nr; i++) { 2163 str = do_read_string(ff); 2164 if (!str) 2165 goto error; 2166 2167 /* include a NULL character at the end */ 2168 if (strbuf_add(&sb, str, strlen(str) + 1) < 0) 2169 goto error; 2170 size += string_size(str); 2171 free(str); 2172 } 2173 ph->env.sibling_threads = strbuf_detach(&sb, NULL); 2174 2175 /* 2176 * The header may be from old perf, 2177 * which doesn't include core id and socket id information. 2178 */ 2179 if (ff->size <= size) { 2180 zfree(&ph->env.cpu); 2181 return 0; 2182 } 2183 2184 /* On s390 the socket_id number is not related to the numbers of cpus. 2185 * The socket_id number might be higher than the numbers of cpus. 2186 * This depends on the configuration. 2187 */ 2188 if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4)) 2189 do_core_id_test = false; 2190 2191 for (i = 0; i < (u32)cpu_nr; i++) { 2192 if (do_read_u32(ff, &nr)) 2193 goto free_cpu; 2194 2195 ph->env.cpu[i].core_id = nr; 2196 2197 if (do_read_u32(ff, &nr)) 2198 goto free_cpu; 2199 2200 if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) { 2201 pr_debug("socket_id number is too big." 2202 "You may need to upgrade the perf tool.\n"); 2203 goto free_cpu; 2204 } 2205 2206 ph->env.cpu[i].socket_id = nr; 2207 } 2208 2209 return 0; 2210 2211 error: 2212 strbuf_release(&sb); 2213 free_cpu: 2214 zfree(&ph->env.cpu); 2215 return -1; 2216 } 2217 2218 static int process_numa_topology(struct feat_fd *ff, void *data __maybe_unused) 2219 { 2220 struct numa_node *nodes, *n; 2221 u32 nr, i; 2222 char *str; 2223 2224 /* nr nodes */ 2225 if (do_read_u32(ff, &nr)) 2226 return -1; 2227 2228 nodes = zalloc(sizeof(*nodes) * nr); 2229 if (!nodes) 2230 return -ENOMEM; 2231 2232 for (i = 0; i < nr; i++) { 2233 n = &nodes[i]; 2234 2235 /* node number */ 2236 if (do_read_u32(ff, &n->node)) 2237 goto error; 2238 2239 if (do_read_u64(ff, &n->mem_total)) 2240 goto error; 2241 2242 if (do_read_u64(ff, &n->mem_free)) 2243 goto error; 2244 2245 str = do_read_string(ff); 2246 if (!str) 2247 goto error; 2248 2249 n->map = cpu_map__new(str); 2250 if (!n->map) 2251 goto error; 2252 2253 free(str); 2254 } 2255 ff->ph->env.nr_numa_nodes = nr; 2256 ff->ph->env.numa_nodes = nodes; 2257 return 0; 2258 2259 error: 2260 free(nodes); 2261 return -1; 2262 } 2263 2264 static int process_pmu_mappings(struct feat_fd *ff, void *data __maybe_unused) 2265 { 2266 char *name; 2267 u32 pmu_num; 2268 u32 type; 2269 struct strbuf sb; 2270 2271 if (do_read_u32(ff, &pmu_num)) 2272 return -1; 2273 2274 if (!pmu_num) { 2275 pr_debug("pmu mappings not available\n"); 2276 return 0; 2277 } 2278 2279 ff->ph->env.nr_pmu_mappings = pmu_num; 2280 if (strbuf_init(&sb, 128) < 0) 2281 return -1; 2282 2283 while (pmu_num) { 2284 if (do_read_u32(ff, &type)) 2285 goto error; 2286 2287 name = do_read_string(ff); 2288 if (!name) 2289 goto error; 2290 2291 if (strbuf_addf(&sb, "%u:%s", type, name) < 0) 2292 goto error; 2293 /* include a NULL character at the end */ 2294 if (strbuf_add(&sb, "", 1) < 0) 2295 goto error; 2296 2297 if (!strcmp(name, "msr")) 2298 ff->ph->env.msr_pmu_type = type; 2299 2300 free(name); 2301 pmu_num--; 2302 } 2303 ff->ph->env.pmu_mappings = strbuf_detach(&sb, NULL); 2304 return 0; 2305 2306 error: 2307 strbuf_release(&sb); 2308 return -1; 2309 } 2310 2311 static int process_group_desc(struct feat_fd *ff, void *data __maybe_unused) 2312 { 2313 size_t ret = -1; 2314 u32 i, nr, nr_groups; 2315 struct perf_session *session; 2316 struct perf_evsel *evsel, *leader = NULL; 2317 struct group_desc { 2318 char *name; 2319 u32 leader_idx; 2320 u32 nr_members; 2321 } *desc; 2322 2323 if (do_read_u32(ff, &nr_groups)) 2324 return -1; 2325 2326 ff->ph->env.nr_groups = nr_groups; 2327 if (!nr_groups) { 2328 pr_debug("group desc not available\n"); 2329 return 0; 2330 } 2331 2332 desc = calloc(nr_groups, sizeof(*desc)); 2333 if (!desc) 2334 return -1; 2335 2336 for (i = 0; i < nr_groups; i++) { 2337 desc[i].name = do_read_string(ff); 2338 if (!desc[i].name) 2339 goto out_free; 2340 2341 if (do_read_u32(ff, &desc[i].leader_idx)) 2342 goto out_free; 2343 2344 if (do_read_u32(ff, &desc[i].nr_members)) 2345 goto out_free; 2346 } 2347 2348 /* 2349 * Rebuild group relationship based on the group_desc 2350 */ 2351 session = container_of(ff->ph, struct perf_session, header); 2352 session->evlist->nr_groups = nr_groups; 2353 2354 i = nr = 0; 2355 evlist__for_each_entry(session->evlist, evsel) { 2356 if (evsel->idx == (int) desc[i].leader_idx) { 2357 evsel->leader = evsel; 2358 /* {anon_group} is a dummy name */ 2359 if (strcmp(desc[i].name, "{anon_group}")) { 2360 evsel->group_name = desc[i].name; 2361 desc[i].name = NULL; 2362 } 2363 evsel->nr_members = desc[i].nr_members; 2364 2365 if (i >= nr_groups || nr > 0) { 2366 pr_debug("invalid group desc\n"); 2367 goto out_free; 2368 } 2369 2370 leader = evsel; 2371 nr = evsel->nr_members - 1; 2372 i++; 2373 } else if (nr) { 2374 /* This is a group member */ 2375 evsel->leader = leader; 2376 2377 nr--; 2378 } 2379 } 2380 2381 if (i != nr_groups || nr != 0) { 2382 pr_debug("invalid group desc\n"); 2383 goto out_free; 2384 } 2385 2386 ret = 0; 2387 out_free: 2388 for (i = 0; i < nr_groups; i++) 2389 zfree(&desc[i].name); 2390 free(desc); 2391 2392 return ret; 2393 } 2394 2395 static int process_auxtrace(struct feat_fd *ff, void *data __maybe_unused) 2396 { 2397 struct perf_session *session; 2398 int err; 2399 2400 session = container_of(ff->ph, struct perf_session, header); 2401 2402 err = auxtrace_index__process(ff->fd, ff->size, session, 2403 ff->ph->needs_swap); 2404 if (err < 0) 2405 pr_err("Failed to process auxtrace index\n"); 2406 return err; 2407 } 2408 2409 static int process_cache(struct feat_fd *ff, void *data __maybe_unused) 2410 { 2411 struct cpu_cache_level *caches; 2412 u32 cnt, i, version; 2413 2414 if (do_read_u32(ff, &version)) 2415 return -1; 2416 2417 if (version != 1) 2418 return -1; 2419 2420 if (do_read_u32(ff, &cnt)) 2421 return -1; 2422 2423 caches = zalloc(sizeof(*caches) * cnt); 2424 if (!caches) 2425 return -1; 2426 2427 for (i = 0; i < cnt; i++) { 2428 struct cpu_cache_level c; 2429 2430 #define _R(v) \ 2431 if (do_read_u32(ff, &c.v))\ 2432 goto out_free_caches; \ 2433 2434 _R(level) 2435 _R(line_size) 2436 _R(sets) 2437 _R(ways) 2438 #undef _R 2439 2440 #define _R(v) \ 2441 c.v = do_read_string(ff); \ 2442 if (!c.v) \ 2443 goto out_free_caches; 2444 2445 _R(type) 2446 _R(size) 2447 _R(map) 2448 #undef _R 2449 2450 caches[i] = c; 2451 } 2452 2453 ff->ph->env.caches = caches; 2454 ff->ph->env.caches_cnt = cnt; 2455 return 0; 2456 out_free_caches: 2457 free(caches); 2458 return -1; 2459 } 2460 2461 static int process_sample_time(struct feat_fd *ff, void *data __maybe_unused) 2462 { 2463 struct perf_session *session; 2464 u64 first_sample_time, last_sample_time; 2465 int ret; 2466 2467 session = container_of(ff->ph, struct perf_session, header); 2468 2469 ret = do_read_u64(ff, &first_sample_time); 2470 if (ret) 2471 return -1; 2472 2473 ret = do_read_u64(ff, &last_sample_time); 2474 if (ret) 2475 return -1; 2476 2477 session->evlist->first_sample_time = first_sample_time; 2478 session->evlist->last_sample_time = last_sample_time; 2479 return 0; 2480 } 2481 2482 static int process_mem_topology(struct feat_fd *ff, 2483 void *data __maybe_unused) 2484 { 2485 struct memory_node *nodes; 2486 u64 version, i, nr, bsize; 2487 int ret = -1; 2488 2489 if (do_read_u64(ff, &version)) 2490 return -1; 2491 2492 if (version != 1) 2493 return -1; 2494 2495 if (do_read_u64(ff, &bsize)) 2496 return -1; 2497 2498 if (do_read_u64(ff, &nr)) 2499 return -1; 2500 2501 nodes = zalloc(sizeof(*nodes) * nr); 2502 if (!nodes) 2503 return -1; 2504 2505 for (i = 0; i < nr; i++) { 2506 struct memory_node n; 2507 2508 #define _R(v) \ 2509 if (do_read_u64(ff, &n.v)) \ 2510 goto out; \ 2511 2512 _R(node) 2513 _R(size) 2514 2515 #undef _R 2516 2517 if (do_read_bitmap(ff, &n.set, &n.size)) 2518 goto out; 2519 2520 nodes[i] = n; 2521 } 2522 2523 ff->ph->env.memory_bsize = bsize; 2524 ff->ph->env.memory_nodes = nodes; 2525 ff->ph->env.nr_memory_nodes = nr; 2526 ret = 0; 2527 2528 out: 2529 if (ret) 2530 free(nodes); 2531 return ret; 2532 } 2533 2534 struct feature_ops { 2535 int (*write)(struct feat_fd *ff, struct perf_evlist *evlist); 2536 void (*print)(struct feat_fd *ff, FILE *fp); 2537 int (*process)(struct feat_fd *ff, void *data); 2538 const char *name; 2539 bool full_only; 2540 bool synthesize; 2541 }; 2542 2543 #define FEAT_OPR(n, func, __full_only) \ 2544 [HEADER_##n] = { \ 2545 .name = __stringify(n), \ 2546 .write = write_##func, \ 2547 .print = print_##func, \ 2548 .full_only = __full_only, \ 2549 .process = process_##func, \ 2550 .synthesize = true \ 2551 } 2552 2553 #define FEAT_OPN(n, func, __full_only) \ 2554 [HEADER_##n] = { \ 2555 .name = __stringify(n), \ 2556 .write = write_##func, \ 2557 .print = print_##func, \ 2558 .full_only = __full_only, \ 2559 .process = process_##func \ 2560 } 2561 2562 /* feature_ops not implemented: */ 2563 #define print_tracing_data NULL 2564 #define print_build_id NULL 2565 2566 #define process_branch_stack NULL 2567 #define process_stat NULL 2568 2569 2570 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 2571 FEAT_OPN(TRACING_DATA, tracing_data, false), 2572 FEAT_OPN(BUILD_ID, build_id, false), 2573 FEAT_OPR(HOSTNAME, hostname, false), 2574 FEAT_OPR(OSRELEASE, osrelease, false), 2575 FEAT_OPR(VERSION, version, false), 2576 FEAT_OPR(ARCH, arch, false), 2577 FEAT_OPR(NRCPUS, nrcpus, false), 2578 FEAT_OPR(CPUDESC, cpudesc, false), 2579 FEAT_OPR(CPUID, cpuid, false), 2580 FEAT_OPR(TOTAL_MEM, total_mem, false), 2581 FEAT_OPR(EVENT_DESC, event_desc, false), 2582 FEAT_OPR(CMDLINE, cmdline, false), 2583 FEAT_OPR(CPU_TOPOLOGY, cpu_topology, true), 2584 FEAT_OPR(NUMA_TOPOLOGY, numa_topology, true), 2585 FEAT_OPN(BRANCH_STACK, branch_stack, false), 2586 FEAT_OPR(PMU_MAPPINGS, pmu_mappings, false), 2587 FEAT_OPR(GROUP_DESC, group_desc, false), 2588 FEAT_OPN(AUXTRACE, auxtrace, false), 2589 FEAT_OPN(STAT, stat, false), 2590 FEAT_OPN(CACHE, cache, true), 2591 FEAT_OPR(SAMPLE_TIME, sample_time, false), 2592 FEAT_OPR(MEM_TOPOLOGY, mem_topology, true), 2593 }; 2594 2595 struct header_print_data { 2596 FILE *fp; 2597 bool full; /* extended list of headers */ 2598 }; 2599 2600 static int perf_file_section__fprintf_info(struct perf_file_section *section, 2601 struct perf_header *ph, 2602 int feat, int fd, void *data) 2603 { 2604 struct header_print_data *hd = data; 2605 struct feat_fd ff; 2606 2607 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 2608 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 2609 "%d, continuing...\n", section->offset, feat); 2610 return 0; 2611 } 2612 if (feat >= HEADER_LAST_FEATURE) { 2613 pr_warning("unknown feature %d\n", feat); 2614 return 0; 2615 } 2616 if (!feat_ops[feat].print) 2617 return 0; 2618 2619 ff = (struct feat_fd) { 2620 .fd = fd, 2621 .ph = ph, 2622 }; 2623 2624 if (!feat_ops[feat].full_only || hd->full) 2625 feat_ops[feat].print(&ff, hd->fp); 2626 else 2627 fprintf(hd->fp, "# %s info available, use -I to display\n", 2628 feat_ops[feat].name); 2629 2630 return 0; 2631 } 2632 2633 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 2634 { 2635 struct header_print_data hd; 2636 struct perf_header *header = &session->header; 2637 int fd = perf_data__fd(session->data); 2638 struct stat st; 2639 int ret, bit; 2640 2641 hd.fp = fp; 2642 hd.full = full; 2643 2644 ret = fstat(fd, &st); 2645 if (ret == -1) 2646 return -1; 2647 2648 fprintf(fp, "# captured on : %s", ctime(&st.st_ctime)); 2649 2650 fprintf(fp, "# header version : %u\n", header->version); 2651 fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset); 2652 fprintf(fp, "# data size : %" PRIu64 "\n", header->data_size); 2653 fprintf(fp, "# feat offset : %" PRIu64 "\n", header->feat_offset); 2654 2655 perf_header__process_sections(header, fd, &hd, 2656 perf_file_section__fprintf_info); 2657 2658 if (session->data->is_pipe) 2659 return 0; 2660 2661 fprintf(fp, "# missing features: "); 2662 for_each_clear_bit(bit, header->adds_features, HEADER_LAST_FEATURE) { 2663 if (bit) 2664 fprintf(fp, "%s ", feat_ops[bit].name); 2665 } 2666 2667 fprintf(fp, "\n"); 2668 return 0; 2669 } 2670 2671 static int do_write_feat(struct feat_fd *ff, int type, 2672 struct perf_file_section **p, 2673 struct perf_evlist *evlist) 2674 { 2675 int err; 2676 int ret = 0; 2677 2678 if (perf_header__has_feat(ff->ph, type)) { 2679 if (!feat_ops[type].write) 2680 return -1; 2681 2682 if (WARN(ff->buf, "Error: calling %s in pipe-mode.\n", __func__)) 2683 return -1; 2684 2685 (*p)->offset = lseek(ff->fd, 0, SEEK_CUR); 2686 2687 err = feat_ops[type].write(ff, evlist); 2688 if (err < 0) { 2689 pr_debug("failed to write feature %s\n", feat_ops[type].name); 2690 2691 /* undo anything written */ 2692 lseek(ff->fd, (*p)->offset, SEEK_SET); 2693 2694 return -1; 2695 } 2696 (*p)->size = lseek(ff->fd, 0, SEEK_CUR) - (*p)->offset; 2697 (*p)++; 2698 } 2699 return ret; 2700 } 2701 2702 static int perf_header__adds_write(struct perf_header *header, 2703 struct perf_evlist *evlist, int fd) 2704 { 2705 int nr_sections; 2706 struct feat_fd ff; 2707 struct perf_file_section *feat_sec, *p; 2708 int sec_size; 2709 u64 sec_start; 2710 int feat; 2711 int err; 2712 2713 ff = (struct feat_fd){ 2714 .fd = fd, 2715 .ph = header, 2716 }; 2717 2718 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2719 if (!nr_sections) 2720 return 0; 2721 2722 feat_sec = p = calloc(nr_sections, sizeof(*feat_sec)); 2723 if (feat_sec == NULL) 2724 return -ENOMEM; 2725 2726 sec_size = sizeof(*feat_sec) * nr_sections; 2727 2728 sec_start = header->feat_offset; 2729 lseek(fd, sec_start + sec_size, SEEK_SET); 2730 2731 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 2732 if (do_write_feat(&ff, feat, &p, evlist)) 2733 perf_header__clear_feat(header, feat); 2734 } 2735 2736 lseek(fd, sec_start, SEEK_SET); 2737 /* 2738 * may write more than needed due to dropped feature, but 2739 * this is okay, reader will skip the mising entries 2740 */ 2741 err = do_write(&ff, feat_sec, sec_size); 2742 if (err < 0) 2743 pr_debug("failed to write feature section\n"); 2744 free(feat_sec); 2745 return err; 2746 } 2747 2748 int perf_header__write_pipe(int fd) 2749 { 2750 struct perf_pipe_file_header f_header; 2751 struct feat_fd ff; 2752 int err; 2753 2754 ff = (struct feat_fd){ .fd = fd }; 2755 2756 f_header = (struct perf_pipe_file_header){ 2757 .magic = PERF_MAGIC, 2758 .size = sizeof(f_header), 2759 }; 2760 2761 err = do_write(&ff, &f_header, sizeof(f_header)); 2762 if (err < 0) { 2763 pr_debug("failed to write perf pipe header\n"); 2764 return err; 2765 } 2766 2767 return 0; 2768 } 2769 2770 int perf_session__write_header(struct perf_session *session, 2771 struct perf_evlist *evlist, 2772 int fd, bool at_exit) 2773 { 2774 struct perf_file_header f_header; 2775 struct perf_file_attr f_attr; 2776 struct perf_header *header = &session->header; 2777 struct perf_evsel *evsel; 2778 struct feat_fd ff; 2779 u64 attr_offset; 2780 int err; 2781 2782 ff = (struct feat_fd){ .fd = fd}; 2783 lseek(fd, sizeof(f_header), SEEK_SET); 2784 2785 evlist__for_each_entry(session->evlist, evsel) { 2786 evsel->id_offset = lseek(fd, 0, SEEK_CUR); 2787 err = do_write(&ff, evsel->id, evsel->ids * sizeof(u64)); 2788 if (err < 0) { 2789 pr_debug("failed to write perf header\n"); 2790 return err; 2791 } 2792 } 2793 2794 attr_offset = lseek(ff.fd, 0, SEEK_CUR); 2795 2796 evlist__for_each_entry(evlist, evsel) { 2797 f_attr = (struct perf_file_attr){ 2798 .attr = evsel->attr, 2799 .ids = { 2800 .offset = evsel->id_offset, 2801 .size = evsel->ids * sizeof(u64), 2802 } 2803 }; 2804 err = do_write(&ff, &f_attr, sizeof(f_attr)); 2805 if (err < 0) { 2806 pr_debug("failed to write perf header attribute\n"); 2807 return err; 2808 } 2809 } 2810 2811 if (!header->data_offset) 2812 header->data_offset = lseek(fd, 0, SEEK_CUR); 2813 header->feat_offset = header->data_offset + header->data_size; 2814 2815 if (at_exit) { 2816 err = perf_header__adds_write(header, evlist, fd); 2817 if (err < 0) 2818 return err; 2819 } 2820 2821 f_header = (struct perf_file_header){ 2822 .magic = PERF_MAGIC, 2823 .size = sizeof(f_header), 2824 .attr_size = sizeof(f_attr), 2825 .attrs = { 2826 .offset = attr_offset, 2827 .size = evlist->nr_entries * sizeof(f_attr), 2828 }, 2829 .data = { 2830 .offset = header->data_offset, 2831 .size = header->data_size, 2832 }, 2833 /* event_types is ignored, store zeros */ 2834 }; 2835 2836 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 2837 2838 lseek(fd, 0, SEEK_SET); 2839 err = do_write(&ff, &f_header, sizeof(f_header)); 2840 if (err < 0) { 2841 pr_debug("failed to write perf header\n"); 2842 return err; 2843 } 2844 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 2845 2846 return 0; 2847 } 2848 2849 static int perf_header__getbuffer64(struct perf_header *header, 2850 int fd, void *buf, size_t size) 2851 { 2852 if (readn(fd, buf, size) <= 0) 2853 return -1; 2854 2855 if (header->needs_swap) 2856 mem_bswap_64(buf, size); 2857 2858 return 0; 2859 } 2860 2861 int perf_header__process_sections(struct perf_header *header, int fd, 2862 void *data, 2863 int (*process)(struct perf_file_section *section, 2864 struct perf_header *ph, 2865 int feat, int fd, void *data)) 2866 { 2867 struct perf_file_section *feat_sec, *sec; 2868 int nr_sections; 2869 int sec_size; 2870 int feat; 2871 int err; 2872 2873 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 2874 if (!nr_sections) 2875 return 0; 2876 2877 feat_sec = sec = calloc(nr_sections, sizeof(*feat_sec)); 2878 if (!feat_sec) 2879 return -1; 2880 2881 sec_size = sizeof(*feat_sec) * nr_sections; 2882 2883 lseek(fd, header->feat_offset, SEEK_SET); 2884 2885 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 2886 if (err < 0) 2887 goto out_free; 2888 2889 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 2890 err = process(sec++, header, feat, fd, data); 2891 if (err < 0) 2892 goto out_free; 2893 } 2894 err = 0; 2895 out_free: 2896 free(feat_sec); 2897 return err; 2898 } 2899 2900 static const int attr_file_abi_sizes[] = { 2901 [0] = PERF_ATTR_SIZE_VER0, 2902 [1] = PERF_ATTR_SIZE_VER1, 2903 [2] = PERF_ATTR_SIZE_VER2, 2904 [3] = PERF_ATTR_SIZE_VER3, 2905 [4] = PERF_ATTR_SIZE_VER4, 2906 0, 2907 }; 2908 2909 /* 2910 * In the legacy file format, the magic number is not used to encode endianness. 2911 * hdr_sz was used to encode endianness. But given that hdr_sz can vary based 2912 * on ABI revisions, we need to try all combinations for all endianness to 2913 * detect the endianness. 2914 */ 2915 static int try_all_file_abis(uint64_t hdr_sz, struct perf_header *ph) 2916 { 2917 uint64_t ref_size, attr_size; 2918 int i; 2919 2920 for (i = 0 ; attr_file_abi_sizes[i]; i++) { 2921 ref_size = attr_file_abi_sizes[i] 2922 + sizeof(struct perf_file_section); 2923 if (hdr_sz != ref_size) { 2924 attr_size = bswap_64(hdr_sz); 2925 if (attr_size != ref_size) 2926 continue; 2927 2928 ph->needs_swap = true; 2929 } 2930 pr_debug("ABI%d perf.data file detected, need_swap=%d\n", 2931 i, 2932 ph->needs_swap); 2933 return 0; 2934 } 2935 /* could not determine endianness */ 2936 return -1; 2937 } 2938 2939 #define PERF_PIPE_HDR_VER0 16 2940 2941 static const size_t attr_pipe_abi_sizes[] = { 2942 [0] = PERF_PIPE_HDR_VER0, 2943 0, 2944 }; 2945 2946 /* 2947 * In the legacy pipe format, there is an implicit assumption that endiannesss 2948 * between host recording the samples, and host parsing the samples is the 2949 * same. This is not always the case given that the pipe output may always be 2950 * redirected into a file and analyzed on a different machine with possibly a 2951 * different endianness and perf_event ABI revsions in the perf tool itself. 2952 */ 2953 static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph) 2954 { 2955 u64 attr_size; 2956 int i; 2957 2958 for (i = 0 ; attr_pipe_abi_sizes[i]; i++) { 2959 if (hdr_sz != attr_pipe_abi_sizes[i]) { 2960 attr_size = bswap_64(hdr_sz); 2961 if (attr_size != hdr_sz) 2962 continue; 2963 2964 ph->needs_swap = true; 2965 } 2966 pr_debug("Pipe ABI%d perf.data file detected\n", i); 2967 return 0; 2968 } 2969 return -1; 2970 } 2971 2972 bool is_perf_magic(u64 magic) 2973 { 2974 if (!memcmp(&magic, __perf_magic1, sizeof(magic)) 2975 || magic == __perf_magic2 2976 || magic == __perf_magic2_sw) 2977 return true; 2978 2979 return false; 2980 } 2981 2982 static int check_magic_endian(u64 magic, uint64_t hdr_sz, 2983 bool is_pipe, struct perf_header *ph) 2984 { 2985 int ret; 2986 2987 /* check for legacy format */ 2988 ret = memcmp(&magic, __perf_magic1, sizeof(magic)); 2989 if (ret == 0) { 2990 ph->version = PERF_HEADER_VERSION_1; 2991 pr_debug("legacy perf.data format\n"); 2992 if (is_pipe) 2993 return try_all_pipe_abis(hdr_sz, ph); 2994 2995 return try_all_file_abis(hdr_sz, ph); 2996 } 2997 /* 2998 * the new magic number serves two purposes: 2999 * - unique number to identify actual perf.data files 3000 * - encode endianness of file 3001 */ 3002 ph->version = PERF_HEADER_VERSION_2; 3003 3004 /* check magic number with one endianness */ 3005 if (magic == __perf_magic2) 3006 return 0; 3007 3008 /* check magic number with opposite endianness */ 3009 if (magic != __perf_magic2_sw) 3010 return -1; 3011 3012 ph->needs_swap = true; 3013 3014 return 0; 3015 } 3016 3017 int perf_file_header__read(struct perf_file_header *header, 3018 struct perf_header *ph, int fd) 3019 { 3020 ssize_t ret; 3021 3022 lseek(fd, 0, SEEK_SET); 3023 3024 ret = readn(fd, header, sizeof(*header)); 3025 if (ret <= 0) 3026 return -1; 3027 3028 if (check_magic_endian(header->magic, 3029 header->attr_size, false, ph) < 0) { 3030 pr_debug("magic/endian check failed\n"); 3031 return -1; 3032 } 3033 3034 if (ph->needs_swap) { 3035 mem_bswap_64(header, offsetof(struct perf_file_header, 3036 adds_features)); 3037 } 3038 3039 if (header->size != sizeof(*header)) { 3040 /* Support the previous format */ 3041 if (header->size == offsetof(typeof(*header), adds_features)) 3042 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3043 else 3044 return -1; 3045 } else if (ph->needs_swap) { 3046 /* 3047 * feature bitmap is declared as an array of unsigned longs -- 3048 * not good since its size can differ between the host that 3049 * generated the data file and the host analyzing the file. 3050 * 3051 * We need to handle endianness, but we don't know the size of 3052 * the unsigned long where the file was generated. Take a best 3053 * guess at determining it: try 64-bit swap first (ie., file 3054 * created on a 64-bit host), and check if the hostname feature 3055 * bit is set (this feature bit is forced on as of fbe96f2). 3056 * If the bit is not, undo the 64-bit swap and try a 32-bit 3057 * swap. If the hostname bit is still not set (e.g., older data 3058 * file), punt and fallback to the original behavior -- 3059 * clearing all feature bits and setting buildid. 3060 */ 3061 mem_bswap_64(&header->adds_features, 3062 BITS_TO_U64(HEADER_FEAT_BITS)); 3063 3064 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3065 /* unswap as u64 */ 3066 mem_bswap_64(&header->adds_features, 3067 BITS_TO_U64(HEADER_FEAT_BITS)); 3068 3069 /* unswap as u32 */ 3070 mem_bswap_32(&header->adds_features, 3071 BITS_TO_U32(HEADER_FEAT_BITS)); 3072 } 3073 3074 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 3075 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 3076 set_bit(HEADER_BUILD_ID, header->adds_features); 3077 } 3078 } 3079 3080 memcpy(&ph->adds_features, &header->adds_features, 3081 sizeof(ph->adds_features)); 3082 3083 ph->data_offset = header->data.offset; 3084 ph->data_size = header->data.size; 3085 ph->feat_offset = header->data.offset + header->data.size; 3086 return 0; 3087 } 3088 3089 static int perf_file_section__process(struct perf_file_section *section, 3090 struct perf_header *ph, 3091 int feat, int fd, void *data) 3092 { 3093 struct feat_fd fdd = { 3094 .fd = fd, 3095 .ph = ph, 3096 .size = section->size, 3097 .offset = section->offset, 3098 }; 3099 3100 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 3101 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 3102 "%d, continuing...\n", section->offset, feat); 3103 return 0; 3104 } 3105 3106 if (feat >= HEADER_LAST_FEATURE) { 3107 pr_debug("unknown feature %d, continuing...\n", feat); 3108 return 0; 3109 } 3110 3111 if (!feat_ops[feat].process) 3112 return 0; 3113 3114 return feat_ops[feat].process(&fdd, data); 3115 } 3116 3117 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 3118 struct perf_header *ph, int fd, 3119 bool repipe) 3120 { 3121 struct feat_fd ff = { 3122 .fd = STDOUT_FILENO, 3123 .ph = ph, 3124 }; 3125 ssize_t ret; 3126 3127 ret = readn(fd, header, sizeof(*header)); 3128 if (ret <= 0) 3129 return -1; 3130 3131 if (check_magic_endian(header->magic, header->size, true, ph) < 0) { 3132 pr_debug("endian/magic failed\n"); 3133 return -1; 3134 } 3135 3136 if (ph->needs_swap) 3137 header->size = bswap_64(header->size); 3138 3139 if (repipe && do_write(&ff, header, sizeof(*header)) < 0) 3140 return -1; 3141 3142 return 0; 3143 } 3144 3145 static int perf_header__read_pipe(struct perf_session *session) 3146 { 3147 struct perf_header *header = &session->header; 3148 struct perf_pipe_file_header f_header; 3149 3150 if (perf_file_header__read_pipe(&f_header, header, 3151 perf_data__fd(session->data), 3152 session->repipe) < 0) { 3153 pr_debug("incompatible file format\n"); 3154 return -EINVAL; 3155 } 3156 3157 return 0; 3158 } 3159 3160 static int read_attr(int fd, struct perf_header *ph, 3161 struct perf_file_attr *f_attr) 3162 { 3163 struct perf_event_attr *attr = &f_attr->attr; 3164 size_t sz, left; 3165 size_t our_sz = sizeof(f_attr->attr); 3166 ssize_t ret; 3167 3168 memset(f_attr, 0, sizeof(*f_attr)); 3169 3170 /* read minimal guaranteed structure */ 3171 ret = readn(fd, attr, PERF_ATTR_SIZE_VER0); 3172 if (ret <= 0) { 3173 pr_debug("cannot read %d bytes of header attr\n", 3174 PERF_ATTR_SIZE_VER0); 3175 return -1; 3176 } 3177 3178 /* on file perf_event_attr size */ 3179 sz = attr->size; 3180 3181 if (ph->needs_swap) 3182 sz = bswap_32(sz); 3183 3184 if (sz == 0) { 3185 /* assume ABI0 */ 3186 sz = PERF_ATTR_SIZE_VER0; 3187 } else if (sz > our_sz) { 3188 pr_debug("file uses a more recent and unsupported ABI" 3189 " (%zu bytes extra)\n", sz - our_sz); 3190 return -1; 3191 } 3192 /* what we have not yet read and that we know about */ 3193 left = sz - PERF_ATTR_SIZE_VER0; 3194 if (left) { 3195 void *ptr = attr; 3196 ptr += PERF_ATTR_SIZE_VER0; 3197 3198 ret = readn(fd, ptr, left); 3199 } 3200 /* read perf_file_section, ids are read in caller */ 3201 ret = readn(fd, &f_attr->ids, sizeof(f_attr->ids)); 3202 3203 return ret <= 0 ? -1 : 0; 3204 } 3205 3206 static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, 3207 struct tep_handle *pevent) 3208 { 3209 struct event_format *event; 3210 char bf[128]; 3211 3212 /* already prepared */ 3213 if (evsel->tp_format) 3214 return 0; 3215 3216 if (pevent == NULL) { 3217 pr_debug("broken or missing trace data\n"); 3218 return -1; 3219 } 3220 3221 event = tep_find_event(pevent, evsel->attr.config); 3222 if (event == NULL) { 3223 pr_debug("cannot find event format for %d\n", (int)evsel->attr.config); 3224 return -1; 3225 } 3226 3227 if (!evsel->name) { 3228 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); 3229 evsel->name = strdup(bf); 3230 if (evsel->name == NULL) 3231 return -1; 3232 } 3233 3234 evsel->tp_format = event; 3235 return 0; 3236 } 3237 3238 static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, 3239 struct tep_handle *pevent) 3240 { 3241 struct perf_evsel *pos; 3242 3243 evlist__for_each_entry(evlist, pos) { 3244 if (pos->attr.type == PERF_TYPE_TRACEPOINT && 3245 perf_evsel__prepare_tracepoint_event(pos, pevent)) 3246 return -1; 3247 } 3248 3249 return 0; 3250 } 3251 3252 int perf_session__read_header(struct perf_session *session) 3253 { 3254 struct perf_data *data = session->data; 3255 struct perf_header *header = &session->header; 3256 struct perf_file_header f_header; 3257 struct perf_file_attr f_attr; 3258 u64 f_id; 3259 int nr_attrs, nr_ids, i, j; 3260 int fd = perf_data__fd(data); 3261 3262 session->evlist = perf_evlist__new(); 3263 if (session->evlist == NULL) 3264 return -ENOMEM; 3265 3266 session->evlist->env = &header->env; 3267 session->machines.host.env = &header->env; 3268 if (perf_data__is_pipe(data)) 3269 return perf_header__read_pipe(session); 3270 3271 if (perf_file_header__read(&f_header, header, fd) < 0) 3272 return -EINVAL; 3273 3274 /* 3275 * Sanity check that perf.data was written cleanly; data size is 3276 * initialized to 0 and updated only if the on_exit function is run. 3277 * If data size is still 0 then the file contains only partial 3278 * information. Just warn user and process it as much as it can. 3279 */ 3280 if (f_header.data.size == 0) { 3281 pr_warning("WARNING: The %s file's data size field is 0 which is unexpected.\n" 3282 "Was the 'perf record' command properly terminated?\n", 3283 data->file.path); 3284 } 3285 3286 nr_attrs = f_header.attrs.size / f_header.attr_size; 3287 lseek(fd, f_header.attrs.offset, SEEK_SET); 3288 3289 for (i = 0; i < nr_attrs; i++) { 3290 struct perf_evsel *evsel; 3291 off_t tmp; 3292 3293 if (read_attr(fd, header, &f_attr) < 0) 3294 goto out_errno; 3295 3296 if (header->needs_swap) { 3297 f_attr.ids.size = bswap_64(f_attr.ids.size); 3298 f_attr.ids.offset = bswap_64(f_attr.ids.offset); 3299 perf_event__attr_swap(&f_attr.attr); 3300 } 3301 3302 tmp = lseek(fd, 0, SEEK_CUR); 3303 evsel = perf_evsel__new(&f_attr.attr); 3304 3305 if (evsel == NULL) 3306 goto out_delete_evlist; 3307 3308 evsel->needs_swap = header->needs_swap; 3309 /* 3310 * Do it before so that if perf_evsel__alloc_id fails, this 3311 * entry gets purged too at perf_evlist__delete(). 3312 */ 3313 perf_evlist__add(session->evlist, evsel); 3314 3315 nr_ids = f_attr.ids.size / sizeof(u64); 3316 /* 3317 * We don't have the cpu and thread maps on the header, so 3318 * for allocating the perf_sample_id table we fake 1 cpu and 3319 * hattr->ids threads. 3320 */ 3321 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 3322 goto out_delete_evlist; 3323 3324 lseek(fd, f_attr.ids.offset, SEEK_SET); 3325 3326 for (j = 0; j < nr_ids; j++) { 3327 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 3328 goto out_errno; 3329 3330 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 3331 } 3332 3333 lseek(fd, tmp, SEEK_SET); 3334 } 3335 3336 perf_header__process_sections(header, fd, &session->tevent, 3337 perf_file_section__process); 3338 3339 if (perf_evlist__prepare_tracepoint_events(session->evlist, 3340 session->tevent.pevent)) 3341 goto out_delete_evlist; 3342 3343 return 0; 3344 out_errno: 3345 return -errno; 3346 3347 out_delete_evlist: 3348 perf_evlist__delete(session->evlist); 3349 session->evlist = NULL; 3350 return -ENOMEM; 3351 } 3352 3353 int perf_event__synthesize_attr(struct perf_tool *tool, 3354 struct perf_event_attr *attr, u32 ids, u64 *id, 3355 perf_event__handler_t process) 3356 { 3357 union perf_event *ev; 3358 size_t size; 3359 int err; 3360 3361 size = sizeof(struct perf_event_attr); 3362 size = PERF_ALIGN(size, sizeof(u64)); 3363 size += sizeof(struct perf_event_header); 3364 size += ids * sizeof(u64); 3365 3366 ev = malloc(size); 3367 3368 if (ev == NULL) 3369 return -ENOMEM; 3370 3371 ev->attr.attr = *attr; 3372 memcpy(ev->attr.id, id, ids * sizeof(u64)); 3373 3374 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 3375 ev->attr.header.size = (u16)size; 3376 3377 if (ev->attr.header.size == size) 3378 err = process(tool, ev, NULL, NULL); 3379 else 3380 err = -E2BIG; 3381 3382 free(ev); 3383 3384 return err; 3385 } 3386 3387 int perf_event__synthesize_features(struct perf_tool *tool, 3388 struct perf_session *session, 3389 struct perf_evlist *evlist, 3390 perf_event__handler_t process) 3391 { 3392 struct perf_header *header = &session->header; 3393 struct feat_fd ff; 3394 struct feature_event *fe; 3395 size_t sz, sz_hdr; 3396 int feat, ret; 3397 3398 sz_hdr = sizeof(fe->header); 3399 sz = sizeof(union perf_event); 3400 /* get a nice alignment */ 3401 sz = PERF_ALIGN(sz, page_size); 3402 3403 memset(&ff, 0, sizeof(ff)); 3404 3405 ff.buf = malloc(sz); 3406 if (!ff.buf) 3407 return -ENOMEM; 3408 3409 ff.size = sz - sz_hdr; 3410 3411 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 3412 if (!feat_ops[feat].synthesize) { 3413 pr_debug("No record header feature for header :%d\n", feat); 3414 continue; 3415 } 3416 3417 ff.offset = sizeof(*fe); 3418 3419 ret = feat_ops[feat].write(&ff, evlist); 3420 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 3421 pr_debug("Error writing feature\n"); 3422 continue; 3423 } 3424 /* ff.buf may have changed due to realloc in do_write() */ 3425 fe = ff.buf; 3426 memset(fe, 0, sizeof(*fe)); 3427 3428 fe->feat_id = feat; 3429 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3430 fe->header.size = ff.offset; 3431 3432 ret = process(tool, ff.buf, NULL, NULL); 3433 if (ret) { 3434 free(ff.buf); 3435 return ret; 3436 } 3437 } 3438 3439 /* Send HEADER_LAST_FEATURE mark. */ 3440 fe = ff.buf; 3441 fe->feat_id = HEADER_LAST_FEATURE; 3442 fe->header.type = PERF_RECORD_HEADER_FEATURE; 3443 fe->header.size = sizeof(*fe); 3444 3445 ret = process(tool, ff.buf, NULL, NULL); 3446 3447 free(ff.buf); 3448 return ret; 3449 } 3450 3451 int perf_event__process_feature(struct perf_tool *tool, 3452 union perf_event *event, 3453 struct perf_session *session __maybe_unused) 3454 { 3455 struct feat_fd ff = { .fd = 0 }; 3456 struct feature_event *fe = (struct feature_event *)event; 3457 int type = fe->header.type; 3458 u64 feat = fe->feat_id; 3459 3460 if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { 3461 pr_warning("invalid record type %d in pipe-mode\n", type); 3462 return 0; 3463 } 3464 if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) { 3465 pr_warning("invalid record type %d in pipe-mode\n", type); 3466 return -1; 3467 } 3468 3469 if (!feat_ops[feat].process) 3470 return 0; 3471 3472 ff.buf = (void *)fe->data; 3473 ff.size = event->header.size - sizeof(event->header); 3474 ff.ph = &session->header; 3475 3476 if (feat_ops[feat].process(&ff, NULL)) 3477 return -1; 3478 3479 if (!feat_ops[feat].print || !tool->show_feat_hdr) 3480 return 0; 3481 3482 if (!feat_ops[feat].full_only || 3483 tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { 3484 feat_ops[feat].print(&ff, stdout); 3485 } else { 3486 fprintf(stdout, "# %s info available, use -I to display\n", 3487 feat_ops[feat].name); 3488 } 3489 3490 return 0; 3491 } 3492 3493 static struct event_update_event * 3494 event_update_event__new(size_t size, u64 type, u64 id) 3495 { 3496 struct event_update_event *ev; 3497 3498 size += sizeof(*ev); 3499 size = PERF_ALIGN(size, sizeof(u64)); 3500 3501 ev = zalloc(size); 3502 if (ev) { 3503 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3504 ev->header.size = (u16)size; 3505 ev->type = type; 3506 ev->id = id; 3507 } 3508 return ev; 3509 } 3510 3511 int 3512 perf_event__synthesize_event_update_unit(struct perf_tool *tool, 3513 struct perf_evsel *evsel, 3514 perf_event__handler_t process) 3515 { 3516 struct event_update_event *ev; 3517 size_t size = strlen(evsel->unit); 3518 int err; 3519 3520 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->id[0]); 3521 if (ev == NULL) 3522 return -ENOMEM; 3523 3524 strncpy(ev->data, evsel->unit, size); 3525 err = process(tool, (union perf_event *)ev, NULL, NULL); 3526 free(ev); 3527 return err; 3528 } 3529 3530 int 3531 perf_event__synthesize_event_update_scale(struct perf_tool *tool, 3532 struct perf_evsel *evsel, 3533 perf_event__handler_t process) 3534 { 3535 struct event_update_event *ev; 3536 struct event_update_event_scale *ev_data; 3537 int err; 3538 3539 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->id[0]); 3540 if (ev == NULL) 3541 return -ENOMEM; 3542 3543 ev_data = (struct event_update_event_scale *) ev->data; 3544 ev_data->scale = evsel->scale; 3545 err = process(tool, (union perf_event*) ev, NULL, NULL); 3546 free(ev); 3547 return err; 3548 } 3549 3550 int 3551 perf_event__synthesize_event_update_name(struct perf_tool *tool, 3552 struct perf_evsel *evsel, 3553 perf_event__handler_t process) 3554 { 3555 struct event_update_event *ev; 3556 size_t len = strlen(evsel->name); 3557 int err; 3558 3559 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->id[0]); 3560 if (ev == NULL) 3561 return -ENOMEM; 3562 3563 strncpy(ev->data, evsel->name, len); 3564 err = process(tool, (union perf_event*) ev, NULL, NULL); 3565 free(ev); 3566 return err; 3567 } 3568 3569 int 3570 perf_event__synthesize_event_update_cpus(struct perf_tool *tool, 3571 struct perf_evsel *evsel, 3572 perf_event__handler_t process) 3573 { 3574 size_t size = sizeof(struct event_update_event); 3575 struct event_update_event *ev; 3576 int max, err; 3577 u16 type; 3578 3579 if (!evsel->own_cpus) 3580 return 0; 3581 3582 ev = cpu_map_data__alloc(evsel->own_cpus, &size, &type, &max); 3583 if (!ev) 3584 return -ENOMEM; 3585 3586 ev->header.type = PERF_RECORD_EVENT_UPDATE; 3587 ev->header.size = (u16)size; 3588 ev->type = PERF_EVENT_UPDATE__CPUS; 3589 ev->id = evsel->id[0]; 3590 3591 cpu_map_data__synthesize((struct cpu_map_data *) ev->data, 3592 evsel->own_cpus, 3593 type, max); 3594 3595 err = process(tool, (union perf_event*) ev, NULL, NULL); 3596 free(ev); 3597 return err; 3598 } 3599 3600 size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) 3601 { 3602 struct event_update_event *ev = &event->event_update; 3603 struct event_update_event_scale *ev_scale; 3604 struct event_update_event_cpus *ev_cpus; 3605 struct cpu_map *map; 3606 size_t ret; 3607 3608 ret = fprintf(fp, "\n... id: %" PRIu64 "\n", ev->id); 3609 3610 switch (ev->type) { 3611 case PERF_EVENT_UPDATE__SCALE: 3612 ev_scale = (struct event_update_event_scale *) ev->data; 3613 ret += fprintf(fp, "... scale: %f\n", ev_scale->scale); 3614 break; 3615 case PERF_EVENT_UPDATE__UNIT: 3616 ret += fprintf(fp, "... unit: %s\n", ev->data); 3617 break; 3618 case PERF_EVENT_UPDATE__NAME: 3619 ret += fprintf(fp, "... name: %s\n", ev->data); 3620 break; 3621 case PERF_EVENT_UPDATE__CPUS: 3622 ev_cpus = (struct event_update_event_cpus *) ev->data; 3623 ret += fprintf(fp, "... "); 3624 3625 map = cpu_map__new_data(&ev_cpus->cpus); 3626 if (map) 3627 ret += cpu_map__fprintf(map, fp); 3628 else 3629 ret += fprintf(fp, "failed to get cpus\n"); 3630 break; 3631 default: 3632 ret += fprintf(fp, "... unknown type\n"); 3633 break; 3634 } 3635 3636 return ret; 3637 } 3638 3639 int perf_event__synthesize_attrs(struct perf_tool *tool, 3640 struct perf_session *session, 3641 perf_event__handler_t process) 3642 { 3643 struct perf_evsel *evsel; 3644 int err = 0; 3645 3646 evlist__for_each_entry(session->evlist, evsel) { 3647 err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, 3648 evsel->id, process); 3649 if (err) { 3650 pr_debug("failed to create perf header attribute\n"); 3651 return err; 3652 } 3653 } 3654 3655 return err; 3656 } 3657 3658 static bool has_unit(struct perf_evsel *counter) 3659 { 3660 return counter->unit && *counter->unit; 3661 } 3662 3663 static bool has_scale(struct perf_evsel *counter) 3664 { 3665 return counter->scale != 1; 3666 } 3667 3668 int perf_event__synthesize_extra_attr(struct perf_tool *tool, 3669 struct perf_evlist *evsel_list, 3670 perf_event__handler_t process, 3671 bool is_pipe) 3672 { 3673 struct perf_evsel *counter; 3674 int err; 3675 3676 /* 3677 * Synthesize other events stuff not carried within 3678 * attr event - unit, scale, name 3679 */ 3680 evlist__for_each_entry(evsel_list, counter) { 3681 if (!counter->supported) 3682 continue; 3683 3684 /* 3685 * Synthesize unit and scale only if it's defined. 3686 */ 3687 if (has_unit(counter)) { 3688 err = perf_event__synthesize_event_update_unit(tool, counter, process); 3689 if (err < 0) { 3690 pr_err("Couldn't synthesize evsel unit.\n"); 3691 return err; 3692 } 3693 } 3694 3695 if (has_scale(counter)) { 3696 err = perf_event__synthesize_event_update_scale(tool, counter, process); 3697 if (err < 0) { 3698 pr_err("Couldn't synthesize evsel counter.\n"); 3699 return err; 3700 } 3701 } 3702 3703 if (counter->own_cpus) { 3704 err = perf_event__synthesize_event_update_cpus(tool, counter, process); 3705 if (err < 0) { 3706 pr_err("Couldn't synthesize evsel cpus.\n"); 3707 return err; 3708 } 3709 } 3710 3711 /* 3712 * Name is needed only for pipe output, 3713 * perf.data carries event names. 3714 */ 3715 if (is_pipe) { 3716 err = perf_event__synthesize_event_update_name(tool, counter, process); 3717 if (err < 0) { 3718 pr_err("Couldn't synthesize evsel name.\n"); 3719 return err; 3720 } 3721 } 3722 } 3723 return 0; 3724 } 3725 3726 int perf_event__process_attr(struct perf_tool *tool __maybe_unused, 3727 union perf_event *event, 3728 struct perf_evlist **pevlist) 3729 { 3730 u32 i, ids, n_ids; 3731 struct perf_evsel *evsel; 3732 struct perf_evlist *evlist = *pevlist; 3733 3734 if (evlist == NULL) { 3735 *pevlist = evlist = perf_evlist__new(); 3736 if (evlist == NULL) 3737 return -ENOMEM; 3738 } 3739 3740 evsel = perf_evsel__new(&event->attr.attr); 3741 if (evsel == NULL) 3742 return -ENOMEM; 3743 3744 perf_evlist__add(evlist, evsel); 3745 3746 ids = event->header.size; 3747 ids -= (void *)&event->attr.id - (void *)event; 3748 n_ids = ids / sizeof(u64); 3749 /* 3750 * We don't have the cpu and thread maps on the header, so 3751 * for allocating the perf_sample_id table we fake 1 cpu and 3752 * hattr->ids threads. 3753 */ 3754 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 3755 return -ENOMEM; 3756 3757 for (i = 0; i < n_ids; i++) { 3758 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 3759 } 3760 3761 return 0; 3762 } 3763 3764 int perf_event__process_event_update(struct perf_tool *tool __maybe_unused, 3765 union perf_event *event, 3766 struct perf_evlist **pevlist) 3767 { 3768 struct event_update_event *ev = &event->event_update; 3769 struct event_update_event_scale *ev_scale; 3770 struct event_update_event_cpus *ev_cpus; 3771 struct perf_evlist *evlist; 3772 struct perf_evsel *evsel; 3773 struct cpu_map *map; 3774 3775 if (!pevlist || *pevlist == NULL) 3776 return -EINVAL; 3777 3778 evlist = *pevlist; 3779 3780 evsel = perf_evlist__id2evsel(evlist, ev->id); 3781 if (evsel == NULL) 3782 return -EINVAL; 3783 3784 switch (ev->type) { 3785 case PERF_EVENT_UPDATE__UNIT: 3786 evsel->unit = strdup(ev->data); 3787 break; 3788 case PERF_EVENT_UPDATE__NAME: 3789 evsel->name = strdup(ev->data); 3790 break; 3791 case PERF_EVENT_UPDATE__SCALE: 3792 ev_scale = (struct event_update_event_scale *) ev->data; 3793 evsel->scale = ev_scale->scale; 3794 break; 3795 case PERF_EVENT_UPDATE__CPUS: 3796 ev_cpus = (struct event_update_event_cpus *) ev->data; 3797 3798 map = cpu_map__new_data(&ev_cpus->cpus); 3799 if (map) 3800 evsel->own_cpus = map; 3801 else 3802 pr_err("failed to get event_update cpus\n"); 3803 default: 3804 break; 3805 } 3806 3807 return 0; 3808 } 3809 3810 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 3811 struct perf_evlist *evlist, 3812 perf_event__handler_t process) 3813 { 3814 union perf_event ev; 3815 struct tracing_data *tdata; 3816 ssize_t size = 0, aligned_size = 0, padding; 3817 struct feat_fd ff; 3818 int err __maybe_unused = 0; 3819 3820 /* 3821 * We are going to store the size of the data followed 3822 * by the data contents. Since the fd descriptor is a pipe, 3823 * we cannot seek back to store the size of the data once 3824 * we know it. Instead we: 3825 * 3826 * - write the tracing data to the temp file 3827 * - get/write the data size to pipe 3828 * - write the tracing data from the temp file 3829 * to the pipe 3830 */ 3831 tdata = tracing_data_get(&evlist->entries, fd, true); 3832 if (!tdata) 3833 return -1; 3834 3835 memset(&ev, 0, sizeof(ev)); 3836 3837 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 3838 size = tdata->size; 3839 aligned_size = PERF_ALIGN(size, sizeof(u64)); 3840 padding = aligned_size - size; 3841 ev.tracing_data.header.size = sizeof(ev.tracing_data); 3842 ev.tracing_data.size = aligned_size; 3843 3844 process(tool, &ev, NULL, NULL); 3845 3846 /* 3847 * The put function will copy all the tracing data 3848 * stored in temp file to the pipe. 3849 */ 3850 tracing_data_put(tdata); 3851 3852 ff = (struct feat_fd){ .fd = fd }; 3853 if (write_padded(&ff, NULL, 0, padding)) 3854 return -1; 3855 3856 return aligned_size; 3857 } 3858 3859 int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused, 3860 union perf_event *event, 3861 struct perf_session *session) 3862 { 3863 ssize_t size_read, padding, size = event->tracing_data.size; 3864 int fd = perf_data__fd(session->data); 3865 off_t offset = lseek(fd, 0, SEEK_CUR); 3866 char buf[BUFSIZ]; 3867 3868 /* setup for reading amidst mmap */ 3869 lseek(fd, offset + sizeof(struct tracing_data_event), 3870 SEEK_SET); 3871 3872 size_read = trace_report(fd, &session->tevent, 3873 session->repipe); 3874 padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; 3875 3876 if (readn(fd, buf, padding) < 0) { 3877 pr_err("%s: reading input file", __func__); 3878 return -1; 3879 } 3880 if (session->repipe) { 3881 int retw = write(STDOUT_FILENO, buf, padding); 3882 if (retw <= 0 || retw != padding) { 3883 pr_err("%s: repiping tracing data padding", __func__); 3884 return -1; 3885 } 3886 } 3887 3888 if (size_read + padding != size) { 3889 pr_err("%s: tracing data size mismatch", __func__); 3890 return -1; 3891 } 3892 3893 perf_evlist__prepare_tracepoint_events(session->evlist, 3894 session->tevent.pevent); 3895 3896 return size_read + padding; 3897 } 3898 3899 int perf_event__synthesize_build_id(struct perf_tool *tool, 3900 struct dso *pos, u16 misc, 3901 perf_event__handler_t process, 3902 struct machine *machine) 3903 { 3904 union perf_event ev; 3905 size_t len; 3906 int err = 0; 3907 3908 if (!pos->hit) 3909 return err; 3910 3911 memset(&ev, 0, sizeof(ev)); 3912 3913 len = pos->long_name_len + 1; 3914 len = PERF_ALIGN(len, NAME_ALIGN); 3915 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 3916 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 3917 ev.build_id.header.misc = misc; 3918 ev.build_id.pid = machine->pid; 3919 ev.build_id.header.size = sizeof(ev.build_id) + len; 3920 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 3921 3922 err = process(tool, &ev, NULL, machine); 3923 3924 return err; 3925 } 3926 3927 int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, 3928 union perf_event *event, 3929 struct perf_session *session) 3930 { 3931 __event_process_build_id(&event->build_id, 3932 event->build_id.filename, 3933 session); 3934 return 0; 3935 } 3936