1 #define _FILE_OFFSET_BITS 64 2 3 #include "util.h" 4 #include <sys/types.h> 5 #include <byteswap.h> 6 #include <unistd.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <linux/list.h> 10 #include <linux/kernel.h> 11 #include <linux/bitops.h> 12 #include <sys/utsname.h> 13 14 #include "evlist.h" 15 #include "evsel.h" 16 #include "header.h" 17 #include "../perf.h" 18 #include "trace-event.h" 19 #include "session.h" 20 #include "symbol.h" 21 #include "debug.h" 22 #include "cpumap.h" 23 24 static bool no_buildid_cache = false; 25 26 static int event_count; 27 static struct perf_trace_event_type *events; 28 29 static u32 header_argc; 30 static const char **header_argv; 31 32 int perf_header__push_event(u64 id, const char *name) 33 { 34 if (strlen(name) > MAX_EVENT_NAME) 35 pr_warning("Event %s will be truncated\n", name); 36 37 if (!events) { 38 events = malloc(sizeof(struct perf_trace_event_type)); 39 if (events == NULL) 40 return -ENOMEM; 41 } else { 42 struct perf_trace_event_type *nevents; 43 44 nevents = realloc(events, (event_count + 1) * sizeof(*events)); 45 if (nevents == NULL) 46 return -ENOMEM; 47 events = nevents; 48 } 49 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type)); 50 events[event_count].event_id = id; 51 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1); 52 event_count++; 53 return 0; 54 } 55 56 char *perf_header__find_event(u64 id) 57 { 58 int i; 59 for (i = 0 ; i < event_count; i++) { 60 if (events[i].event_id == id) 61 return events[i].name; 62 } 63 return NULL; 64 } 65 66 static const char *__perf_magic = "PERFFILE"; 67 68 #define PERF_MAGIC (*(u64 *)__perf_magic) 69 70 struct perf_file_attr { 71 struct perf_event_attr attr; 72 struct perf_file_section ids; 73 }; 74 75 void perf_header__set_feat(struct perf_header *header, int feat) 76 { 77 set_bit(feat, header->adds_features); 78 } 79 80 void perf_header__clear_feat(struct perf_header *header, int feat) 81 { 82 clear_bit(feat, header->adds_features); 83 } 84 85 bool perf_header__has_feat(const struct perf_header *header, int feat) 86 { 87 return test_bit(feat, header->adds_features); 88 } 89 90 static int do_write(int fd, const void *buf, size_t size) 91 { 92 while (size) { 93 int ret = write(fd, buf, size); 94 95 if (ret < 0) 96 return -errno; 97 98 size -= ret; 99 buf += ret; 100 } 101 102 return 0; 103 } 104 105 #define NAME_ALIGN 64 106 107 static int write_padded(int fd, const void *bf, size_t count, 108 size_t count_aligned) 109 { 110 static const char zero_buf[NAME_ALIGN]; 111 int err = do_write(fd, bf, count); 112 113 if (!err) 114 err = do_write(fd, zero_buf, count_aligned - count); 115 116 return err; 117 } 118 119 static int do_write_string(int fd, const char *str) 120 { 121 u32 len, olen; 122 int ret; 123 124 olen = strlen(str) + 1; 125 len = ALIGN(olen, NAME_ALIGN); 126 127 /* write len, incl. \0 */ 128 ret = do_write(fd, &len, sizeof(len)); 129 if (ret < 0) 130 return ret; 131 132 return write_padded(fd, str, olen, len); 133 } 134 135 static char *do_read_string(int fd, struct perf_header *ph) 136 { 137 ssize_t sz, ret; 138 u32 len; 139 char *buf; 140 141 sz = read(fd, &len, sizeof(len)); 142 if (sz < (ssize_t)sizeof(len)) 143 return NULL; 144 145 if (ph->needs_swap) 146 len = bswap_32(len); 147 148 buf = malloc(len); 149 if (!buf) 150 return NULL; 151 152 ret = read(fd, buf, len); 153 if (ret == (ssize_t)len) { 154 /* 155 * strings are padded by zeroes 156 * thus the actual strlen of buf 157 * may be less than len 158 */ 159 return buf; 160 } 161 162 free(buf); 163 return NULL; 164 } 165 166 int 167 perf_header__set_cmdline(int argc, const char **argv) 168 { 169 int i; 170 171 header_argc = (u32)argc; 172 173 /* do not include NULL termination */ 174 header_argv = calloc(argc, sizeof(char *)); 175 if (!header_argv) 176 return -ENOMEM; 177 178 /* 179 * must copy argv contents because it gets moved 180 * around during option parsing 181 */ 182 for (i = 0; i < argc ; i++) 183 header_argv[i] = argv[i]; 184 185 return 0; 186 } 187 188 #define dsos__for_each_with_build_id(pos, head) \ 189 list_for_each_entry(pos, head, node) \ 190 if (!pos->has_build_id) \ 191 continue; \ 192 else 193 194 static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, 195 u16 misc, int fd) 196 { 197 struct dso *pos; 198 199 dsos__for_each_with_build_id(pos, head) { 200 int err; 201 struct build_id_event b; 202 size_t len; 203 204 if (!pos->hit) 205 continue; 206 len = pos->long_name_len + 1; 207 len = ALIGN(len, NAME_ALIGN); 208 memset(&b, 0, sizeof(b)); 209 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); 210 b.pid = pid; 211 b.header.misc = misc; 212 b.header.size = sizeof(b) + len; 213 err = do_write(fd, &b, sizeof(b)); 214 if (err < 0) 215 return err; 216 err = write_padded(fd, pos->long_name, 217 pos->long_name_len + 1, len); 218 if (err < 0) 219 return err; 220 } 221 222 return 0; 223 } 224 225 static int machine__write_buildid_table(struct machine *machine, int fd) 226 { 227 int err; 228 u16 kmisc = PERF_RECORD_MISC_KERNEL, 229 umisc = PERF_RECORD_MISC_USER; 230 231 if (!machine__is_host(machine)) { 232 kmisc = PERF_RECORD_MISC_GUEST_KERNEL; 233 umisc = PERF_RECORD_MISC_GUEST_USER; 234 } 235 236 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid, 237 kmisc, fd); 238 if (err == 0) 239 err = __dsos__write_buildid_table(&machine->user_dsos, 240 machine->pid, umisc, fd); 241 return err; 242 } 243 244 static int dsos__write_buildid_table(struct perf_header *header, int fd) 245 { 246 struct perf_session *session = container_of(header, 247 struct perf_session, header); 248 struct rb_node *nd; 249 int err = machine__write_buildid_table(&session->host_machine, fd); 250 251 if (err) 252 return err; 253 254 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 255 struct machine *pos = rb_entry(nd, struct machine, rb_node); 256 err = machine__write_buildid_table(pos, fd); 257 if (err) 258 break; 259 } 260 return err; 261 } 262 263 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, 264 const char *name, bool is_kallsyms) 265 { 266 const size_t size = PATH_MAX; 267 char *realname, *filename = zalloc(size), 268 *linkname = zalloc(size), *targetname; 269 int len, err = -1; 270 271 if (is_kallsyms) { 272 if (symbol_conf.kptr_restrict) { 273 pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); 274 return 0; 275 } 276 realname = (char *)name; 277 } else 278 realname = realpath(name, NULL); 279 280 if (realname == NULL || filename == NULL || linkname == NULL) 281 goto out_free; 282 283 len = snprintf(filename, size, "%s%s%s", 284 debugdir, is_kallsyms ? "/" : "", realname); 285 if (mkdir_p(filename, 0755)) 286 goto out_free; 287 288 snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); 289 290 if (access(filename, F_OK)) { 291 if (is_kallsyms) { 292 if (copyfile("/proc/kallsyms", filename)) 293 goto out_free; 294 } else if (link(realname, filename) && copyfile(name, filename)) 295 goto out_free; 296 } 297 298 len = snprintf(linkname, size, "%s/.build-id/%.2s", 299 debugdir, sbuild_id); 300 301 if (access(linkname, X_OK) && mkdir_p(linkname, 0755)) 302 goto out_free; 303 304 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2); 305 targetname = filename + strlen(debugdir) - 5; 306 memcpy(targetname, "../..", 5); 307 308 if (symlink(targetname, linkname) == 0) 309 err = 0; 310 out_free: 311 if (!is_kallsyms) 312 free(realname); 313 free(filename); 314 free(linkname); 315 return err; 316 } 317 318 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, 319 const char *name, const char *debugdir, 320 bool is_kallsyms) 321 { 322 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 323 324 build_id__sprintf(build_id, build_id_size, sbuild_id); 325 326 return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); 327 } 328 329 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) 330 { 331 const size_t size = PATH_MAX; 332 char *filename = zalloc(size), 333 *linkname = zalloc(size); 334 int err = -1; 335 336 if (filename == NULL || linkname == NULL) 337 goto out_free; 338 339 snprintf(linkname, size, "%s/.build-id/%.2s/%s", 340 debugdir, sbuild_id, sbuild_id + 2); 341 342 if (access(linkname, F_OK)) 343 goto out_free; 344 345 if (readlink(linkname, filename, size - 1) < 0) 346 goto out_free; 347 348 if (unlink(linkname)) 349 goto out_free; 350 351 /* 352 * Since the link is relative, we must make it absolute: 353 */ 354 snprintf(linkname, size, "%s/.build-id/%.2s/%s", 355 debugdir, sbuild_id, filename); 356 357 if (unlink(linkname)) 358 goto out_free; 359 360 err = 0; 361 out_free: 362 free(filename); 363 free(linkname); 364 return err; 365 } 366 367 static int dso__cache_build_id(struct dso *dso, const char *debugdir) 368 { 369 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; 370 371 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), 372 dso->long_name, debugdir, is_kallsyms); 373 } 374 375 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) 376 { 377 struct dso *pos; 378 int err = 0; 379 380 dsos__for_each_with_build_id(pos, head) 381 if (dso__cache_build_id(pos, debugdir)) 382 err = -1; 383 384 return err; 385 } 386 387 static int machine__cache_build_ids(struct machine *machine, const char *debugdir) 388 { 389 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir); 390 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir); 391 return ret; 392 } 393 394 static int perf_session__cache_build_ids(struct perf_session *session) 395 { 396 struct rb_node *nd; 397 int ret; 398 char debugdir[PATH_MAX]; 399 400 snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir); 401 402 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST) 403 return -1; 404 405 ret = machine__cache_build_ids(&session->host_machine, debugdir); 406 407 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 408 struct machine *pos = rb_entry(nd, struct machine, rb_node); 409 ret |= machine__cache_build_ids(pos, debugdir); 410 } 411 return ret ? -1 : 0; 412 } 413 414 static bool machine__read_build_ids(struct machine *machine, bool with_hits) 415 { 416 bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits); 417 ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits); 418 return ret; 419 } 420 421 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits) 422 { 423 struct rb_node *nd; 424 bool ret = machine__read_build_ids(&session->host_machine, with_hits); 425 426 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) { 427 struct machine *pos = rb_entry(nd, struct machine, rb_node); 428 ret |= machine__read_build_ids(pos, with_hits); 429 } 430 431 return ret; 432 } 433 434 static int write_trace_info(int fd, struct perf_header *h __used, 435 struct perf_evlist *evlist) 436 { 437 return read_tracing_data(fd, &evlist->entries); 438 } 439 440 441 static int write_build_id(int fd, struct perf_header *h, 442 struct perf_evlist *evlist __used) 443 { 444 struct perf_session *session; 445 int err; 446 447 session = container_of(h, struct perf_session, header); 448 449 if (!perf_session__read_build_ids(session, true)) 450 return -1; 451 452 err = dsos__write_buildid_table(h, fd); 453 if (err < 0) { 454 pr_debug("failed to write buildid table\n"); 455 return err; 456 } 457 if (!no_buildid_cache) 458 perf_session__cache_build_ids(session); 459 460 return 0; 461 } 462 463 static int write_hostname(int fd, struct perf_header *h __used, 464 struct perf_evlist *evlist __used) 465 { 466 struct utsname uts; 467 int ret; 468 469 ret = uname(&uts); 470 if (ret < 0) 471 return -1; 472 473 return do_write_string(fd, uts.nodename); 474 } 475 476 static int write_osrelease(int fd, struct perf_header *h __used, 477 struct perf_evlist *evlist __used) 478 { 479 struct utsname uts; 480 int ret; 481 482 ret = uname(&uts); 483 if (ret < 0) 484 return -1; 485 486 return do_write_string(fd, uts.release); 487 } 488 489 static int write_arch(int fd, struct perf_header *h __used, 490 struct perf_evlist *evlist __used) 491 { 492 struct utsname uts; 493 int ret; 494 495 ret = uname(&uts); 496 if (ret < 0) 497 return -1; 498 499 return do_write_string(fd, uts.machine); 500 } 501 502 static int write_version(int fd, struct perf_header *h __used, 503 struct perf_evlist *evlist __used) 504 { 505 return do_write_string(fd, perf_version_string); 506 } 507 508 static int write_cpudesc(int fd, struct perf_header *h __used, 509 struct perf_evlist *evlist __used) 510 { 511 #ifndef CPUINFO_PROC 512 #define CPUINFO_PROC NULL 513 #endif 514 FILE *file; 515 char *buf = NULL; 516 char *s, *p; 517 const char *search = CPUINFO_PROC; 518 size_t len = 0; 519 int ret = -1; 520 521 if (!search) 522 return -1; 523 524 file = fopen("/proc/cpuinfo", "r"); 525 if (!file) 526 return -1; 527 528 while (getline(&buf, &len, file) > 0) { 529 ret = strncmp(buf, search, strlen(search)); 530 if (!ret) 531 break; 532 } 533 534 if (ret) 535 goto done; 536 537 s = buf; 538 539 p = strchr(buf, ':'); 540 if (p && *(p+1) == ' ' && *(p+2)) 541 s = p + 2; 542 p = strchr(s, '\n'); 543 if (p) 544 *p = '\0'; 545 546 /* squash extra space characters (branding string) */ 547 p = s; 548 while (*p) { 549 if (isspace(*p)) { 550 char *r = p + 1; 551 char *q = r; 552 *p = ' '; 553 while (*q && isspace(*q)) 554 q++; 555 if (q != (p+1)) 556 while ((*r++ = *q++)); 557 } 558 p++; 559 } 560 ret = do_write_string(fd, s); 561 done: 562 free(buf); 563 fclose(file); 564 return ret; 565 } 566 567 static int write_nrcpus(int fd, struct perf_header *h __used, 568 struct perf_evlist *evlist __used) 569 { 570 long nr; 571 u32 nrc, nra; 572 int ret; 573 574 nr = sysconf(_SC_NPROCESSORS_CONF); 575 if (nr < 0) 576 return -1; 577 578 nrc = (u32)(nr & UINT_MAX); 579 580 nr = sysconf(_SC_NPROCESSORS_ONLN); 581 if (nr < 0) 582 return -1; 583 584 nra = (u32)(nr & UINT_MAX); 585 586 ret = do_write(fd, &nrc, sizeof(nrc)); 587 if (ret < 0) 588 return ret; 589 590 return do_write(fd, &nra, sizeof(nra)); 591 } 592 593 static int write_event_desc(int fd, struct perf_header *h __used, 594 struct perf_evlist *evlist) 595 { 596 struct perf_evsel *attr; 597 u32 nre = 0, nri, sz; 598 int ret; 599 600 list_for_each_entry(attr, &evlist->entries, node) 601 nre++; 602 603 /* 604 * write number of events 605 */ 606 ret = do_write(fd, &nre, sizeof(nre)); 607 if (ret < 0) 608 return ret; 609 610 /* 611 * size of perf_event_attr struct 612 */ 613 sz = (u32)sizeof(attr->attr); 614 ret = do_write(fd, &sz, sizeof(sz)); 615 if (ret < 0) 616 return ret; 617 618 list_for_each_entry(attr, &evlist->entries, node) { 619 620 ret = do_write(fd, &attr->attr, sz); 621 if (ret < 0) 622 return ret; 623 /* 624 * write number of unique id per event 625 * there is one id per instance of an event 626 * 627 * copy into an nri to be independent of the 628 * type of ids, 629 */ 630 nri = attr->ids; 631 ret = do_write(fd, &nri, sizeof(nri)); 632 if (ret < 0) 633 return ret; 634 635 /* 636 * write event string as passed on cmdline 637 */ 638 ret = do_write_string(fd, event_name(attr)); 639 if (ret < 0) 640 return ret; 641 /* 642 * write unique ids for this event 643 */ 644 ret = do_write(fd, attr->id, attr->ids * sizeof(u64)); 645 if (ret < 0) 646 return ret; 647 } 648 return 0; 649 } 650 651 static int write_cmdline(int fd, struct perf_header *h __used, 652 struct perf_evlist *evlist __used) 653 { 654 char buf[MAXPATHLEN]; 655 char proc[32]; 656 u32 i, n; 657 int ret; 658 659 /* 660 * actual atual path to perf binary 661 */ 662 sprintf(proc, "/proc/%d/exe", getpid()); 663 ret = readlink(proc, buf, sizeof(buf)); 664 if (ret <= 0) 665 return -1; 666 667 /* readlink() does not add null termination */ 668 buf[ret] = '\0'; 669 670 /* account for binary path */ 671 n = header_argc + 1; 672 673 ret = do_write(fd, &n, sizeof(n)); 674 if (ret < 0) 675 return ret; 676 677 ret = do_write_string(fd, buf); 678 if (ret < 0) 679 return ret; 680 681 for (i = 0 ; i < header_argc; i++) { 682 ret = do_write_string(fd, header_argv[i]); 683 if (ret < 0) 684 return ret; 685 } 686 return 0; 687 } 688 689 #define CORE_SIB_FMT \ 690 "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list" 691 #define THRD_SIB_FMT \ 692 "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list" 693 694 struct cpu_topo { 695 u32 core_sib; 696 u32 thread_sib; 697 char **core_siblings; 698 char **thread_siblings; 699 }; 700 701 static int build_cpu_topo(struct cpu_topo *tp, int cpu) 702 { 703 FILE *fp; 704 char filename[MAXPATHLEN]; 705 char *buf = NULL, *p; 706 size_t len = 0; 707 u32 i = 0; 708 int ret = -1; 709 710 sprintf(filename, CORE_SIB_FMT, cpu); 711 fp = fopen(filename, "r"); 712 if (!fp) 713 return -1; 714 715 if (getline(&buf, &len, fp) <= 0) 716 goto done; 717 718 fclose(fp); 719 720 p = strchr(buf, '\n'); 721 if (p) 722 *p = '\0'; 723 724 for (i = 0; i < tp->core_sib; i++) { 725 if (!strcmp(buf, tp->core_siblings[i])) 726 break; 727 } 728 if (i == tp->core_sib) { 729 tp->core_siblings[i] = buf; 730 tp->core_sib++; 731 buf = NULL; 732 len = 0; 733 } 734 735 sprintf(filename, THRD_SIB_FMT, cpu); 736 fp = fopen(filename, "r"); 737 if (!fp) 738 goto done; 739 740 if (getline(&buf, &len, fp) <= 0) 741 goto done; 742 743 p = strchr(buf, '\n'); 744 if (p) 745 *p = '\0'; 746 747 for (i = 0; i < tp->thread_sib; i++) { 748 if (!strcmp(buf, tp->thread_siblings[i])) 749 break; 750 } 751 if (i == tp->thread_sib) { 752 tp->thread_siblings[i] = buf; 753 tp->thread_sib++; 754 buf = NULL; 755 } 756 ret = 0; 757 done: 758 if(fp) 759 fclose(fp); 760 free(buf); 761 return ret; 762 } 763 764 static void free_cpu_topo(struct cpu_topo *tp) 765 { 766 u32 i; 767 768 if (!tp) 769 return; 770 771 for (i = 0 ; i < tp->core_sib; i++) 772 free(tp->core_siblings[i]); 773 774 for (i = 0 ; i < tp->thread_sib; i++) 775 free(tp->thread_siblings[i]); 776 777 free(tp); 778 } 779 780 static struct cpu_topo *build_cpu_topology(void) 781 { 782 struct cpu_topo *tp; 783 void *addr; 784 u32 nr, i; 785 size_t sz; 786 long ncpus; 787 int ret = -1; 788 789 ncpus = sysconf(_SC_NPROCESSORS_CONF); 790 if (ncpus < 0) 791 return NULL; 792 793 nr = (u32)(ncpus & UINT_MAX); 794 795 sz = nr * sizeof(char *); 796 797 addr = calloc(1, sizeof(*tp) + 2 * sz); 798 if (!addr) 799 return NULL; 800 801 tp = addr; 802 803 addr += sizeof(*tp); 804 tp->core_siblings = addr; 805 addr += sz; 806 tp->thread_siblings = addr; 807 808 for (i = 0; i < nr; i++) { 809 ret = build_cpu_topo(tp, i); 810 if (ret < 0) 811 break; 812 } 813 if (ret) { 814 free_cpu_topo(tp); 815 tp = NULL; 816 } 817 return tp; 818 } 819 820 static int write_cpu_topology(int fd, struct perf_header *h __used, 821 struct perf_evlist *evlist __used) 822 { 823 struct cpu_topo *tp; 824 u32 i; 825 int ret; 826 827 tp = build_cpu_topology(); 828 if (!tp) 829 return -1; 830 831 ret = do_write(fd, &tp->core_sib, sizeof(tp->core_sib)); 832 if (ret < 0) 833 goto done; 834 835 for (i = 0; i < tp->core_sib; i++) { 836 ret = do_write_string(fd, tp->core_siblings[i]); 837 if (ret < 0) 838 goto done; 839 } 840 ret = do_write(fd, &tp->thread_sib, sizeof(tp->thread_sib)); 841 if (ret < 0) 842 goto done; 843 844 for (i = 0; i < tp->thread_sib; i++) { 845 ret = do_write_string(fd, tp->thread_siblings[i]); 846 if (ret < 0) 847 break; 848 } 849 done: 850 free_cpu_topo(tp); 851 return ret; 852 } 853 854 855 856 static int write_total_mem(int fd, struct perf_header *h __used, 857 struct perf_evlist *evlist __used) 858 { 859 char *buf = NULL; 860 FILE *fp; 861 size_t len = 0; 862 int ret = -1, n; 863 uint64_t mem; 864 865 fp = fopen("/proc/meminfo", "r"); 866 if (!fp) 867 return -1; 868 869 while (getline(&buf, &len, fp) > 0) { 870 ret = strncmp(buf, "MemTotal:", 9); 871 if (!ret) 872 break; 873 } 874 if (!ret) { 875 n = sscanf(buf, "%*s %"PRIu64, &mem); 876 if (n == 1) 877 ret = do_write(fd, &mem, sizeof(mem)); 878 } 879 free(buf); 880 fclose(fp); 881 return ret; 882 } 883 884 static int write_topo_node(int fd, int node) 885 { 886 char str[MAXPATHLEN]; 887 char field[32]; 888 char *buf = NULL, *p; 889 size_t len = 0; 890 FILE *fp; 891 u64 mem_total, mem_free, mem; 892 int ret = -1; 893 894 sprintf(str, "/sys/devices/system/node/node%d/meminfo", node); 895 fp = fopen(str, "r"); 896 if (!fp) 897 return -1; 898 899 while (getline(&buf, &len, fp) > 0) { 900 /* skip over invalid lines */ 901 if (!strchr(buf, ':')) 902 continue; 903 if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2) 904 goto done; 905 if (!strcmp(field, "MemTotal:")) 906 mem_total = mem; 907 if (!strcmp(field, "MemFree:")) 908 mem_free = mem; 909 } 910 911 fclose(fp); 912 913 ret = do_write(fd, &mem_total, sizeof(u64)); 914 if (ret) 915 goto done; 916 917 ret = do_write(fd, &mem_free, sizeof(u64)); 918 if (ret) 919 goto done; 920 921 ret = -1; 922 sprintf(str, "/sys/devices/system/node/node%d/cpulist", node); 923 924 fp = fopen(str, "r"); 925 if (!fp) 926 goto done; 927 928 if (getline(&buf, &len, fp) <= 0) 929 goto done; 930 931 p = strchr(buf, '\n'); 932 if (p) 933 *p = '\0'; 934 935 ret = do_write_string(fd, buf); 936 done: 937 free(buf); 938 fclose(fp); 939 return ret; 940 } 941 942 static int write_numa_topology(int fd, struct perf_header *h __used, 943 struct perf_evlist *evlist __used) 944 { 945 char *buf = NULL; 946 size_t len = 0; 947 FILE *fp; 948 struct cpu_map *node_map = NULL; 949 char *c; 950 u32 nr, i, j; 951 int ret = -1; 952 953 fp = fopen("/sys/devices/system/node/online", "r"); 954 if (!fp) 955 return -1; 956 957 if (getline(&buf, &len, fp) <= 0) 958 goto done; 959 960 c = strchr(buf, '\n'); 961 if (c) 962 *c = '\0'; 963 964 node_map = cpu_map__new(buf); 965 if (!node_map) 966 goto done; 967 968 nr = (u32)node_map->nr; 969 970 ret = do_write(fd, &nr, sizeof(nr)); 971 if (ret < 0) 972 goto done; 973 974 for (i = 0; i < nr; i++) { 975 j = (u32)node_map->map[i]; 976 ret = do_write(fd, &j, sizeof(j)); 977 if (ret < 0) 978 break; 979 980 ret = write_topo_node(fd, i); 981 if (ret < 0) 982 break; 983 } 984 done: 985 free(buf); 986 fclose(fp); 987 free(node_map); 988 return ret; 989 } 990 991 /* 992 * default get_cpuid(): nothing gets recorded 993 * actual implementation must be in arch/$(ARCH)/util/header.c 994 */ 995 int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used) 996 { 997 return -1; 998 } 999 1000 static int write_cpuid(int fd, struct perf_header *h __used, 1001 struct perf_evlist *evlist __used) 1002 { 1003 char buffer[64]; 1004 int ret; 1005 1006 ret = get_cpuid(buffer, sizeof(buffer)); 1007 if (!ret) 1008 goto write_it; 1009 1010 return -1; 1011 write_it: 1012 return do_write_string(fd, buffer); 1013 } 1014 1015 static void print_hostname(struct perf_header *ph, int fd, FILE *fp) 1016 { 1017 char *str = do_read_string(fd, ph); 1018 fprintf(fp, "# hostname : %s\n", str); 1019 free(str); 1020 } 1021 1022 static void print_osrelease(struct perf_header *ph, int fd, FILE *fp) 1023 { 1024 char *str = do_read_string(fd, ph); 1025 fprintf(fp, "# os release : %s\n", str); 1026 free(str); 1027 } 1028 1029 static void print_arch(struct perf_header *ph, int fd, FILE *fp) 1030 { 1031 char *str = do_read_string(fd, ph); 1032 fprintf(fp, "# arch : %s\n", str); 1033 free(str); 1034 } 1035 1036 static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp) 1037 { 1038 char *str = do_read_string(fd, ph); 1039 fprintf(fp, "# cpudesc : %s\n", str); 1040 free(str); 1041 } 1042 1043 static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp) 1044 { 1045 ssize_t ret; 1046 u32 nr; 1047 1048 ret = read(fd, &nr, sizeof(nr)); 1049 if (ret != (ssize_t)sizeof(nr)) 1050 nr = -1; /* interpreted as error */ 1051 1052 if (ph->needs_swap) 1053 nr = bswap_32(nr); 1054 1055 fprintf(fp, "# nrcpus online : %u\n", nr); 1056 1057 ret = read(fd, &nr, sizeof(nr)); 1058 if (ret != (ssize_t)sizeof(nr)) 1059 nr = -1; /* interpreted as error */ 1060 1061 if (ph->needs_swap) 1062 nr = bswap_32(nr); 1063 1064 fprintf(fp, "# nrcpus avail : %u\n", nr); 1065 } 1066 1067 static void print_version(struct perf_header *ph, int fd, FILE *fp) 1068 { 1069 char *str = do_read_string(fd, ph); 1070 fprintf(fp, "# perf version : %s\n", str); 1071 free(str); 1072 } 1073 1074 static void print_cmdline(struct perf_header *ph, int fd, FILE *fp) 1075 { 1076 ssize_t ret; 1077 char *str; 1078 u32 nr, i; 1079 1080 ret = read(fd, &nr, sizeof(nr)); 1081 if (ret != (ssize_t)sizeof(nr)) 1082 return; 1083 1084 if (ph->needs_swap) 1085 nr = bswap_32(nr); 1086 1087 fprintf(fp, "# cmdline : "); 1088 1089 for (i = 0; i < nr; i++) { 1090 str = do_read_string(fd, ph); 1091 fprintf(fp, "%s ", str); 1092 free(str); 1093 } 1094 fputc('\n', fp); 1095 } 1096 1097 static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp) 1098 { 1099 ssize_t ret; 1100 u32 nr, i; 1101 char *str; 1102 1103 ret = read(fd, &nr, sizeof(nr)); 1104 if (ret != (ssize_t)sizeof(nr)) 1105 return; 1106 1107 if (ph->needs_swap) 1108 nr = bswap_32(nr); 1109 1110 for (i = 0; i < nr; i++) { 1111 str = do_read_string(fd, ph); 1112 fprintf(fp, "# sibling cores : %s\n", str); 1113 free(str); 1114 } 1115 1116 ret = read(fd, &nr, sizeof(nr)); 1117 if (ret != (ssize_t)sizeof(nr)) 1118 return; 1119 1120 if (ph->needs_swap) 1121 nr = bswap_32(nr); 1122 1123 for (i = 0; i < nr; i++) { 1124 str = do_read_string(fd, ph); 1125 fprintf(fp, "# sibling threads : %s\n", str); 1126 free(str); 1127 } 1128 } 1129 1130 static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) 1131 { 1132 struct perf_event_attr attr; 1133 uint64_t id; 1134 void *buf = NULL; 1135 char *str; 1136 u32 nre, sz, nr, i, j, msz; 1137 int ret; 1138 1139 /* number of events */ 1140 ret = read(fd, &nre, sizeof(nre)); 1141 if (ret != (ssize_t)sizeof(nre)) 1142 goto error; 1143 1144 if (ph->needs_swap) 1145 nre = bswap_32(nre); 1146 1147 ret = read(fd, &sz, sizeof(sz)); 1148 if (ret != (ssize_t)sizeof(sz)) 1149 goto error; 1150 1151 if (ph->needs_swap) 1152 sz = bswap_32(sz); 1153 1154 /* 1155 * ensure it is at least to our ABI rev 1156 */ 1157 if (sz < (u32)sizeof(attr)) 1158 goto error; 1159 1160 memset(&attr, 0, sizeof(attr)); 1161 1162 /* read entire region to sync up to next field */ 1163 buf = malloc(sz); 1164 if (!buf) 1165 goto error; 1166 1167 msz = sizeof(attr); 1168 if (sz < msz) 1169 msz = sz; 1170 1171 for (i = 0 ; i < nre; i++) { 1172 1173 ret = read(fd, buf, sz); 1174 if (ret != (ssize_t)sz) 1175 goto error; 1176 1177 if (ph->needs_swap) 1178 perf_event__attr_swap(buf); 1179 1180 memcpy(&attr, buf, msz); 1181 1182 ret = read(fd, &nr, sizeof(nr)); 1183 if (ret != (ssize_t)sizeof(nr)) 1184 goto error; 1185 1186 if (ph->needs_swap) 1187 nr = bswap_32(nr); 1188 1189 str = do_read_string(fd, ph); 1190 fprintf(fp, "# event : name = %s, ", str); 1191 free(str); 1192 1193 fprintf(fp, "type = %d, config = 0x%"PRIx64 1194 ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64, 1195 attr.type, 1196 (u64)attr.config, 1197 (u64)attr.config1, 1198 (u64)attr.config2); 1199 1200 fprintf(fp, ", excl_usr = %d, excl_kern = %d", 1201 attr.exclude_user, 1202 attr.exclude_kernel); 1203 1204 if (nr) 1205 fprintf(fp, ", id = {"); 1206 1207 for (j = 0 ; j < nr; j++) { 1208 ret = read(fd, &id, sizeof(id)); 1209 if (ret != (ssize_t)sizeof(id)) 1210 goto error; 1211 1212 if (ph->needs_swap) 1213 id = bswap_64(id); 1214 1215 if (j) 1216 fputc(',', fp); 1217 1218 fprintf(fp, " %"PRIu64, id); 1219 } 1220 if (nr && j == nr) 1221 fprintf(fp, " }"); 1222 fputc('\n', fp); 1223 } 1224 free(buf); 1225 return; 1226 error: 1227 fprintf(fp, "# event desc: not available or unable to read\n"); 1228 } 1229 1230 static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp) 1231 { 1232 uint64_t mem; 1233 ssize_t ret; 1234 1235 ret = read(fd, &mem, sizeof(mem)); 1236 if (ret != sizeof(mem)) 1237 goto error; 1238 1239 if (h->needs_swap) 1240 mem = bswap_64(mem); 1241 1242 fprintf(fp, "# total memory : %"PRIu64" kB\n", mem); 1243 return; 1244 error: 1245 fprintf(fp, "# total memory : unknown\n"); 1246 } 1247 1248 static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp) 1249 { 1250 ssize_t ret; 1251 u32 nr, c, i; 1252 char *str; 1253 uint64_t mem_total, mem_free; 1254 1255 /* nr nodes */ 1256 ret = read(fd, &nr, sizeof(nr)); 1257 if (ret != (ssize_t)sizeof(nr)) 1258 goto error; 1259 1260 if (h->needs_swap) 1261 nr = bswap_32(nr); 1262 1263 for (i = 0; i < nr; i++) { 1264 1265 /* node number */ 1266 ret = read(fd, &c, sizeof(c)); 1267 if (ret != (ssize_t)sizeof(c)) 1268 goto error; 1269 1270 if (h->needs_swap) 1271 c = bswap_32(c); 1272 1273 ret = read(fd, &mem_total, sizeof(u64)); 1274 if (ret != sizeof(u64)) 1275 goto error; 1276 1277 ret = read(fd, &mem_free, sizeof(u64)); 1278 if (ret != sizeof(u64)) 1279 goto error; 1280 1281 if (h->needs_swap) { 1282 mem_total = bswap_64(mem_total); 1283 mem_free = bswap_64(mem_free); 1284 } 1285 1286 fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," 1287 " free = %"PRIu64" kB\n", 1288 c, 1289 mem_total, 1290 mem_free); 1291 1292 str = do_read_string(fd, h); 1293 fprintf(fp, "# node%u cpu list : %s\n", c, str); 1294 free(str); 1295 } 1296 return; 1297 error: 1298 fprintf(fp, "# numa topology : not available\n"); 1299 } 1300 1301 static void print_cpuid(struct perf_header *ph, int fd, FILE *fp) 1302 { 1303 char *str = do_read_string(fd, ph); 1304 fprintf(fp, "# cpuid : %s\n", str); 1305 free(str); 1306 } 1307 1308 struct feature_ops { 1309 int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); 1310 void (*print)(struct perf_header *h, int fd, FILE *fp); 1311 const char *name; 1312 bool full_only; 1313 }; 1314 1315 #define FEAT_OPA(n, func) \ 1316 [n] = { .name = #n, .write = write_##func, .print = print_##func } 1317 #define FEAT_OPF(n, func) \ 1318 [n] = { .name = #n, .write = write_##func, .print = print_##func, .full_only = true } 1319 1320 /* feature_ops not implemented: */ 1321 #define print_trace_info NULL 1322 #define print_build_id NULL 1323 1324 static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { 1325 FEAT_OPA(HEADER_TRACE_INFO, trace_info), 1326 FEAT_OPA(HEADER_BUILD_ID, build_id), 1327 FEAT_OPA(HEADER_HOSTNAME, hostname), 1328 FEAT_OPA(HEADER_OSRELEASE, osrelease), 1329 FEAT_OPA(HEADER_VERSION, version), 1330 FEAT_OPA(HEADER_ARCH, arch), 1331 FEAT_OPA(HEADER_NRCPUS, nrcpus), 1332 FEAT_OPA(HEADER_CPUDESC, cpudesc), 1333 FEAT_OPA(HEADER_CPUID, cpuid), 1334 FEAT_OPA(HEADER_TOTAL_MEM, total_mem), 1335 FEAT_OPA(HEADER_EVENT_DESC, event_desc), 1336 FEAT_OPA(HEADER_CMDLINE, cmdline), 1337 FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), 1338 FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), 1339 }; 1340 1341 struct header_print_data { 1342 FILE *fp; 1343 bool full; /* extended list of headers */ 1344 }; 1345 1346 static int perf_file_section__fprintf_info(struct perf_file_section *section, 1347 struct perf_header *ph, 1348 int feat, int fd, void *data) 1349 { 1350 struct header_print_data *hd = data; 1351 1352 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 1353 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 1354 "%d, continuing...\n", section->offset, feat); 1355 return 0; 1356 } 1357 if (feat >= HEADER_LAST_FEATURE) { 1358 pr_warning("unknown feature %d\n", feat); 1359 return 0; 1360 } 1361 if (!feat_ops[feat].print) 1362 return 0; 1363 1364 if (!feat_ops[feat].full_only || hd->full) 1365 feat_ops[feat].print(ph, fd, hd->fp); 1366 else 1367 fprintf(hd->fp, "# %s info available, use -I to display\n", 1368 feat_ops[feat].name); 1369 1370 return 0; 1371 } 1372 1373 int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) 1374 { 1375 struct header_print_data hd; 1376 struct perf_header *header = &session->header; 1377 int fd = session->fd; 1378 hd.fp = fp; 1379 hd.full = full; 1380 1381 perf_header__process_sections(header, fd, &hd, 1382 perf_file_section__fprintf_info); 1383 return 0; 1384 } 1385 1386 static int do_write_feat(int fd, struct perf_header *h, int type, 1387 struct perf_file_section **p, 1388 struct perf_evlist *evlist) 1389 { 1390 int err; 1391 int ret = 0; 1392 1393 if (perf_header__has_feat(h, type)) { 1394 if (!feat_ops[type].write) 1395 return -1; 1396 1397 (*p)->offset = lseek(fd, 0, SEEK_CUR); 1398 1399 err = feat_ops[type].write(fd, h, evlist); 1400 if (err < 0) { 1401 pr_debug("failed to write feature %d\n", type); 1402 1403 /* undo anything written */ 1404 lseek(fd, (*p)->offset, SEEK_SET); 1405 1406 return -1; 1407 } 1408 (*p)->size = lseek(fd, 0, SEEK_CUR) - (*p)->offset; 1409 (*p)++; 1410 } 1411 return ret; 1412 } 1413 1414 static int perf_header__adds_write(struct perf_header *header, 1415 struct perf_evlist *evlist, int fd) 1416 { 1417 int nr_sections; 1418 struct perf_file_section *feat_sec, *p; 1419 int sec_size; 1420 u64 sec_start; 1421 int feat; 1422 int err; 1423 1424 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 1425 if (!nr_sections) 1426 return 0; 1427 1428 feat_sec = p = calloc(sizeof(*feat_sec), nr_sections); 1429 if (feat_sec == NULL) 1430 return -ENOMEM; 1431 1432 sec_size = sizeof(*feat_sec) * nr_sections; 1433 1434 sec_start = header->data_offset + header->data_size; 1435 lseek(fd, sec_start + sec_size, SEEK_SET); 1436 1437 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 1438 if (do_write_feat(fd, header, feat, &p, evlist)) 1439 perf_header__clear_feat(header, feat); 1440 } 1441 1442 lseek(fd, sec_start, SEEK_SET); 1443 /* 1444 * may write more than needed due to dropped feature, but 1445 * this is okay, reader will skip the mising entries 1446 */ 1447 err = do_write(fd, feat_sec, sec_size); 1448 if (err < 0) 1449 pr_debug("failed to write feature section\n"); 1450 free(feat_sec); 1451 return err; 1452 } 1453 1454 int perf_header__write_pipe(int fd) 1455 { 1456 struct perf_pipe_file_header f_header; 1457 int err; 1458 1459 f_header = (struct perf_pipe_file_header){ 1460 .magic = PERF_MAGIC, 1461 .size = sizeof(f_header), 1462 }; 1463 1464 err = do_write(fd, &f_header, sizeof(f_header)); 1465 if (err < 0) { 1466 pr_debug("failed to write perf pipe header\n"); 1467 return err; 1468 } 1469 1470 return 0; 1471 } 1472 1473 int perf_session__write_header(struct perf_session *session, 1474 struct perf_evlist *evlist, 1475 int fd, bool at_exit) 1476 { 1477 struct perf_file_header f_header; 1478 struct perf_file_attr f_attr; 1479 struct perf_header *header = &session->header; 1480 struct perf_evsel *attr, *pair = NULL; 1481 int err; 1482 1483 lseek(fd, sizeof(f_header), SEEK_SET); 1484 1485 if (session->evlist != evlist) 1486 pair = list_entry(session->evlist->entries.next, struct perf_evsel, node); 1487 1488 list_for_each_entry(attr, &evlist->entries, node) { 1489 attr->id_offset = lseek(fd, 0, SEEK_CUR); 1490 err = do_write(fd, attr->id, attr->ids * sizeof(u64)); 1491 if (err < 0) { 1492 out_err_write: 1493 pr_debug("failed to write perf header\n"); 1494 return err; 1495 } 1496 if (session->evlist != evlist) { 1497 err = do_write(fd, pair->id, pair->ids * sizeof(u64)); 1498 if (err < 0) 1499 goto out_err_write; 1500 attr->ids += pair->ids; 1501 pair = list_entry(pair->node.next, struct perf_evsel, node); 1502 } 1503 } 1504 1505 header->attr_offset = lseek(fd, 0, SEEK_CUR); 1506 1507 list_for_each_entry(attr, &evlist->entries, node) { 1508 f_attr = (struct perf_file_attr){ 1509 .attr = attr->attr, 1510 .ids = { 1511 .offset = attr->id_offset, 1512 .size = attr->ids * sizeof(u64), 1513 } 1514 }; 1515 err = do_write(fd, &f_attr, sizeof(f_attr)); 1516 if (err < 0) { 1517 pr_debug("failed to write perf header attribute\n"); 1518 return err; 1519 } 1520 } 1521 1522 header->event_offset = lseek(fd, 0, SEEK_CUR); 1523 header->event_size = event_count * sizeof(struct perf_trace_event_type); 1524 if (events) { 1525 err = do_write(fd, events, header->event_size); 1526 if (err < 0) { 1527 pr_debug("failed to write perf header events\n"); 1528 return err; 1529 } 1530 } 1531 1532 header->data_offset = lseek(fd, 0, SEEK_CUR); 1533 1534 if (at_exit) { 1535 err = perf_header__adds_write(header, evlist, fd); 1536 if (err < 0) 1537 return err; 1538 } 1539 1540 f_header = (struct perf_file_header){ 1541 .magic = PERF_MAGIC, 1542 .size = sizeof(f_header), 1543 .attr_size = sizeof(f_attr), 1544 .attrs = { 1545 .offset = header->attr_offset, 1546 .size = evlist->nr_entries * sizeof(f_attr), 1547 }, 1548 .data = { 1549 .offset = header->data_offset, 1550 .size = header->data_size, 1551 }, 1552 .event_types = { 1553 .offset = header->event_offset, 1554 .size = header->event_size, 1555 }, 1556 }; 1557 1558 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features)); 1559 1560 lseek(fd, 0, SEEK_SET); 1561 err = do_write(fd, &f_header, sizeof(f_header)); 1562 if (err < 0) { 1563 pr_debug("failed to write perf header\n"); 1564 return err; 1565 } 1566 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 1567 1568 header->frozen = 1; 1569 return 0; 1570 } 1571 1572 static int perf_header__getbuffer64(struct perf_header *header, 1573 int fd, void *buf, size_t size) 1574 { 1575 if (readn(fd, buf, size) <= 0) 1576 return -1; 1577 1578 if (header->needs_swap) 1579 mem_bswap_64(buf, size); 1580 1581 return 0; 1582 } 1583 1584 int perf_header__process_sections(struct perf_header *header, int fd, 1585 void *data, 1586 int (*process)(struct perf_file_section *section, 1587 struct perf_header *ph, 1588 int feat, int fd, void *data)) 1589 { 1590 struct perf_file_section *feat_sec, *sec; 1591 int nr_sections; 1592 int sec_size; 1593 int feat; 1594 int err; 1595 1596 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS); 1597 if (!nr_sections) 1598 return 0; 1599 1600 feat_sec = sec = calloc(sizeof(*feat_sec), nr_sections); 1601 if (!feat_sec) 1602 return -1; 1603 1604 sec_size = sizeof(*feat_sec) * nr_sections; 1605 1606 lseek(fd, header->data_offset + header->data_size, SEEK_SET); 1607 1608 err = perf_header__getbuffer64(header, fd, feat_sec, sec_size); 1609 if (err < 0) 1610 goto out_free; 1611 1612 for_each_set_bit(feat, header->adds_features, HEADER_LAST_FEATURE) { 1613 err = process(sec++, header, feat, fd, data); 1614 if (err < 0) 1615 goto out_free; 1616 } 1617 err = 0; 1618 out_free: 1619 free(feat_sec); 1620 return err; 1621 } 1622 1623 int perf_file_header__read(struct perf_file_header *header, 1624 struct perf_header *ph, int fd) 1625 { 1626 lseek(fd, 0, SEEK_SET); 1627 1628 if (readn(fd, header, sizeof(*header)) <= 0 || 1629 memcmp(&header->magic, __perf_magic, sizeof(header->magic))) 1630 return -1; 1631 1632 if (header->attr_size != sizeof(struct perf_file_attr)) { 1633 u64 attr_size = bswap_64(header->attr_size); 1634 1635 if (attr_size != sizeof(struct perf_file_attr)) 1636 return -1; 1637 1638 mem_bswap_64(header, offsetof(struct perf_file_header, 1639 adds_features)); 1640 ph->needs_swap = true; 1641 } 1642 1643 if (header->size != sizeof(*header)) { 1644 /* Support the previous format */ 1645 if (header->size == offsetof(typeof(*header), adds_features)) 1646 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 1647 else 1648 return -1; 1649 } else if (ph->needs_swap) { 1650 unsigned int i; 1651 /* 1652 * feature bitmap is declared as an array of unsigned longs -- 1653 * not good since its size can differ between the host that 1654 * generated the data file and the host analyzing the file. 1655 * 1656 * We need to handle endianness, but we don't know the size of 1657 * the unsigned long where the file was generated. Take a best 1658 * guess at determining it: try 64-bit swap first (ie., file 1659 * created on a 64-bit host), and check if the hostname feature 1660 * bit is set (this feature bit is forced on as of fbe96f2). 1661 * If the bit is not, undo the 64-bit swap and try a 32-bit 1662 * swap. If the hostname bit is still not set (e.g., older data 1663 * file), punt and fallback to the original behavior -- 1664 * clearing all feature bits and setting buildid. 1665 */ 1666 for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) 1667 header->adds_features[i] = bswap_64(header->adds_features[i]); 1668 1669 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 1670 for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) { 1671 header->adds_features[i] = bswap_64(header->adds_features[i]); 1672 header->adds_features[i] = bswap_32(header->adds_features[i]); 1673 } 1674 } 1675 1676 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 1677 bitmap_zero(header->adds_features, HEADER_FEAT_BITS); 1678 set_bit(HEADER_BUILD_ID, header->adds_features); 1679 } 1680 } 1681 1682 memcpy(&ph->adds_features, &header->adds_features, 1683 sizeof(ph->adds_features)); 1684 1685 ph->event_offset = header->event_types.offset; 1686 ph->event_size = header->event_types.size; 1687 ph->data_offset = header->data.offset; 1688 ph->data_size = header->data.size; 1689 return 0; 1690 } 1691 1692 static int __event_process_build_id(struct build_id_event *bev, 1693 char *filename, 1694 struct perf_session *session) 1695 { 1696 int err = -1; 1697 struct list_head *head; 1698 struct machine *machine; 1699 u16 misc; 1700 struct dso *dso; 1701 enum dso_kernel_type dso_type; 1702 1703 machine = perf_session__findnew_machine(session, bev->pid); 1704 if (!machine) 1705 goto out; 1706 1707 misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1708 1709 switch (misc) { 1710 case PERF_RECORD_MISC_KERNEL: 1711 dso_type = DSO_TYPE_KERNEL; 1712 head = &machine->kernel_dsos; 1713 break; 1714 case PERF_RECORD_MISC_GUEST_KERNEL: 1715 dso_type = DSO_TYPE_GUEST_KERNEL; 1716 head = &machine->kernel_dsos; 1717 break; 1718 case PERF_RECORD_MISC_USER: 1719 case PERF_RECORD_MISC_GUEST_USER: 1720 dso_type = DSO_TYPE_USER; 1721 head = &machine->user_dsos; 1722 break; 1723 default: 1724 goto out; 1725 } 1726 1727 dso = __dsos__findnew(head, filename); 1728 if (dso != NULL) { 1729 char sbuild_id[BUILD_ID_SIZE * 2 + 1]; 1730 1731 dso__set_build_id(dso, &bev->build_id); 1732 1733 if (filename[0] == '[') 1734 dso->kernel = dso_type; 1735 1736 build_id__sprintf(dso->build_id, sizeof(dso->build_id), 1737 sbuild_id); 1738 pr_debug("build id event received for %s: %s\n", 1739 dso->long_name, sbuild_id); 1740 } 1741 1742 err = 0; 1743 out: 1744 return err; 1745 } 1746 1747 static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, 1748 int input, u64 offset, u64 size) 1749 { 1750 struct perf_session *session = container_of(header, struct perf_session, header); 1751 struct { 1752 struct perf_event_header header; 1753 u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; 1754 char filename[0]; 1755 } old_bev; 1756 struct build_id_event bev; 1757 char filename[PATH_MAX]; 1758 u64 limit = offset + size; 1759 1760 while (offset < limit) { 1761 ssize_t len; 1762 1763 if (read(input, &old_bev, sizeof(old_bev)) != sizeof(old_bev)) 1764 return -1; 1765 1766 if (header->needs_swap) 1767 perf_event_header__bswap(&old_bev.header); 1768 1769 len = old_bev.header.size - sizeof(old_bev); 1770 if (read(input, filename, len) != len) 1771 return -1; 1772 1773 bev.header = old_bev.header; 1774 1775 /* 1776 * As the pid is the missing value, we need to fill 1777 * it properly. The header.misc value give us nice hint. 1778 */ 1779 bev.pid = HOST_KERNEL_ID; 1780 if (bev.header.misc == PERF_RECORD_MISC_GUEST_USER || 1781 bev.header.misc == PERF_RECORD_MISC_GUEST_KERNEL) 1782 bev.pid = DEFAULT_GUEST_KERNEL_ID; 1783 1784 memcpy(bev.build_id, old_bev.build_id, sizeof(bev.build_id)); 1785 __event_process_build_id(&bev, filename, session); 1786 1787 offset += bev.header.size; 1788 } 1789 1790 return 0; 1791 } 1792 1793 static int perf_header__read_build_ids(struct perf_header *header, 1794 int input, u64 offset, u64 size) 1795 { 1796 struct perf_session *session = container_of(header, struct perf_session, header); 1797 struct build_id_event bev; 1798 char filename[PATH_MAX]; 1799 u64 limit = offset + size, orig_offset = offset; 1800 int err = -1; 1801 1802 while (offset < limit) { 1803 ssize_t len; 1804 1805 if (read(input, &bev, sizeof(bev)) != sizeof(bev)) 1806 goto out; 1807 1808 if (header->needs_swap) 1809 perf_event_header__bswap(&bev.header); 1810 1811 len = bev.header.size - sizeof(bev); 1812 if (read(input, filename, len) != len) 1813 goto out; 1814 /* 1815 * The a1645ce1 changeset: 1816 * 1817 * "perf: 'perf kvm' tool for monitoring guest performance from host" 1818 * 1819 * Added a field to struct build_id_event that broke the file 1820 * format. 1821 * 1822 * Since the kernel build-id is the first entry, process the 1823 * table using the old format if the well known 1824 * '[kernel.kallsyms]' string for the kernel build-id has the 1825 * first 4 characters chopped off (where the pid_t sits). 1826 */ 1827 if (memcmp(filename, "nel.kallsyms]", 13) == 0) { 1828 if (lseek(input, orig_offset, SEEK_SET) == (off_t)-1) 1829 return -1; 1830 return perf_header__read_build_ids_abi_quirk(header, input, offset, size); 1831 } 1832 1833 __event_process_build_id(&bev, filename, session); 1834 1835 offset += bev.header.size; 1836 } 1837 err = 0; 1838 out: 1839 return err; 1840 } 1841 1842 static int perf_file_section__process(struct perf_file_section *section, 1843 struct perf_header *ph, 1844 int feat, int fd, void *data __used) 1845 { 1846 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) { 1847 pr_debug("Failed to lseek to %" PRIu64 " offset for feature " 1848 "%d, continuing...\n", section->offset, feat); 1849 return 0; 1850 } 1851 1852 if (feat >= HEADER_LAST_FEATURE) { 1853 pr_debug("unknown feature %d, continuing...\n", feat); 1854 return 0; 1855 } 1856 1857 switch (feat) { 1858 case HEADER_TRACE_INFO: 1859 trace_report(fd, false); 1860 break; 1861 case HEADER_BUILD_ID: 1862 if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) 1863 pr_debug("Failed to read buildids, continuing...\n"); 1864 break; 1865 default: 1866 break; 1867 } 1868 1869 return 0; 1870 } 1871 1872 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, 1873 struct perf_header *ph, int fd, 1874 bool repipe) 1875 { 1876 if (readn(fd, header, sizeof(*header)) <= 0 || 1877 memcmp(&header->magic, __perf_magic, sizeof(header->magic))) 1878 return -1; 1879 1880 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0) 1881 return -1; 1882 1883 if (header->size != sizeof(*header)) { 1884 u64 size = bswap_64(header->size); 1885 1886 if (size != sizeof(*header)) 1887 return -1; 1888 1889 ph->needs_swap = true; 1890 } 1891 1892 return 0; 1893 } 1894 1895 static int perf_header__read_pipe(struct perf_session *session, int fd) 1896 { 1897 struct perf_header *header = &session->header; 1898 struct perf_pipe_file_header f_header; 1899 1900 if (perf_file_header__read_pipe(&f_header, header, fd, 1901 session->repipe) < 0) { 1902 pr_debug("incompatible file format\n"); 1903 return -EINVAL; 1904 } 1905 1906 session->fd = fd; 1907 1908 return 0; 1909 } 1910 1911 int perf_session__read_header(struct perf_session *session, int fd) 1912 { 1913 struct perf_header *header = &session->header; 1914 struct perf_file_header f_header; 1915 struct perf_file_attr f_attr; 1916 u64 f_id; 1917 int nr_attrs, nr_ids, i, j; 1918 1919 session->evlist = perf_evlist__new(NULL, NULL); 1920 if (session->evlist == NULL) 1921 return -ENOMEM; 1922 1923 if (session->fd_pipe) 1924 return perf_header__read_pipe(session, fd); 1925 1926 if (perf_file_header__read(&f_header, header, fd) < 0) { 1927 pr_debug("incompatible file format\n"); 1928 return -EINVAL; 1929 } 1930 1931 nr_attrs = f_header.attrs.size / sizeof(f_attr); 1932 lseek(fd, f_header.attrs.offset, SEEK_SET); 1933 1934 for (i = 0; i < nr_attrs; i++) { 1935 struct perf_evsel *evsel; 1936 off_t tmp; 1937 1938 if (readn(fd, &f_attr, sizeof(f_attr)) <= 0) 1939 goto out_errno; 1940 1941 if (header->needs_swap) 1942 perf_event__attr_swap(&f_attr.attr); 1943 1944 tmp = lseek(fd, 0, SEEK_CUR); 1945 evsel = perf_evsel__new(&f_attr.attr, i); 1946 1947 if (evsel == NULL) 1948 goto out_delete_evlist; 1949 /* 1950 * Do it before so that if perf_evsel__alloc_id fails, this 1951 * entry gets purged too at perf_evlist__delete(). 1952 */ 1953 perf_evlist__add(session->evlist, evsel); 1954 1955 nr_ids = f_attr.ids.size / sizeof(u64); 1956 /* 1957 * We don't have the cpu and thread maps on the header, so 1958 * for allocating the perf_sample_id table we fake 1 cpu and 1959 * hattr->ids threads. 1960 */ 1961 if (perf_evsel__alloc_id(evsel, 1, nr_ids)) 1962 goto out_delete_evlist; 1963 1964 lseek(fd, f_attr.ids.offset, SEEK_SET); 1965 1966 for (j = 0; j < nr_ids; j++) { 1967 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id))) 1968 goto out_errno; 1969 1970 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id); 1971 } 1972 1973 lseek(fd, tmp, SEEK_SET); 1974 } 1975 1976 symbol_conf.nr_events = nr_attrs; 1977 1978 if (f_header.event_types.size) { 1979 lseek(fd, f_header.event_types.offset, SEEK_SET); 1980 events = malloc(f_header.event_types.size); 1981 if (events == NULL) 1982 return -ENOMEM; 1983 if (perf_header__getbuffer64(header, fd, events, 1984 f_header.event_types.size)) 1985 goto out_errno; 1986 event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); 1987 } 1988 1989 perf_header__process_sections(header, fd, NULL, 1990 perf_file_section__process); 1991 1992 lseek(fd, header->data_offset, SEEK_SET); 1993 1994 header->frozen = 1; 1995 return 0; 1996 out_errno: 1997 return -errno; 1998 1999 out_delete_evlist: 2000 perf_evlist__delete(session->evlist); 2001 session->evlist = NULL; 2002 return -ENOMEM; 2003 } 2004 2005 int perf_event__synthesize_attr(struct perf_tool *tool, 2006 struct perf_event_attr *attr, u16 ids, u64 *id, 2007 perf_event__handler_t process) 2008 { 2009 union perf_event *ev; 2010 size_t size; 2011 int err; 2012 2013 size = sizeof(struct perf_event_attr); 2014 size = ALIGN(size, sizeof(u64)); 2015 size += sizeof(struct perf_event_header); 2016 size += ids * sizeof(u64); 2017 2018 ev = malloc(size); 2019 2020 if (ev == NULL) 2021 return -ENOMEM; 2022 2023 ev->attr.attr = *attr; 2024 memcpy(ev->attr.id, id, ids * sizeof(u64)); 2025 2026 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 2027 ev->attr.header.size = size; 2028 2029 err = process(tool, ev, NULL, NULL); 2030 2031 free(ev); 2032 2033 return err; 2034 } 2035 2036 int perf_event__synthesize_attrs(struct perf_tool *tool, 2037 struct perf_session *session, 2038 perf_event__handler_t process) 2039 { 2040 struct perf_evsel *attr; 2041 int err = 0; 2042 2043 list_for_each_entry(attr, &session->evlist->entries, node) { 2044 err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids, 2045 attr->id, process); 2046 if (err) { 2047 pr_debug("failed to create perf header attribute\n"); 2048 return err; 2049 } 2050 } 2051 2052 return err; 2053 } 2054 2055 int perf_event__process_attr(union perf_event *event, 2056 struct perf_evlist **pevlist) 2057 { 2058 unsigned int i, ids, n_ids; 2059 struct perf_evsel *evsel; 2060 struct perf_evlist *evlist = *pevlist; 2061 2062 if (evlist == NULL) { 2063 *pevlist = evlist = perf_evlist__new(NULL, NULL); 2064 if (evlist == NULL) 2065 return -ENOMEM; 2066 } 2067 2068 evsel = perf_evsel__new(&event->attr.attr, evlist->nr_entries); 2069 if (evsel == NULL) 2070 return -ENOMEM; 2071 2072 perf_evlist__add(evlist, evsel); 2073 2074 ids = event->header.size; 2075 ids -= (void *)&event->attr.id - (void *)event; 2076 n_ids = ids / sizeof(u64); 2077 /* 2078 * We don't have the cpu and thread maps on the header, so 2079 * for allocating the perf_sample_id table we fake 1 cpu and 2080 * hattr->ids threads. 2081 */ 2082 if (perf_evsel__alloc_id(evsel, 1, n_ids)) 2083 return -ENOMEM; 2084 2085 for (i = 0; i < n_ids; i++) { 2086 perf_evlist__id_add(evlist, evsel, 0, i, event->attr.id[i]); 2087 } 2088 2089 return 0; 2090 } 2091 2092 int perf_event__synthesize_event_type(struct perf_tool *tool, 2093 u64 event_id, char *name, 2094 perf_event__handler_t process, 2095 struct machine *machine) 2096 { 2097 union perf_event ev; 2098 size_t size = 0; 2099 int err = 0; 2100 2101 memset(&ev, 0, sizeof(ev)); 2102 2103 ev.event_type.event_type.event_id = event_id; 2104 memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME); 2105 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1); 2106 2107 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; 2108 size = strlen(name); 2109 size = ALIGN(size, sizeof(u64)); 2110 ev.event_type.header.size = sizeof(ev.event_type) - 2111 (sizeof(ev.event_type.event_type.name) - size); 2112 2113 err = process(tool, &ev, NULL, machine); 2114 2115 return err; 2116 } 2117 2118 int perf_event__synthesize_event_types(struct perf_tool *tool, 2119 perf_event__handler_t process, 2120 struct machine *machine) 2121 { 2122 struct perf_trace_event_type *type; 2123 int i, err = 0; 2124 2125 for (i = 0; i < event_count; i++) { 2126 type = &events[i]; 2127 2128 err = perf_event__synthesize_event_type(tool, type->event_id, 2129 type->name, process, 2130 machine); 2131 if (err) { 2132 pr_debug("failed to create perf header event type\n"); 2133 return err; 2134 } 2135 } 2136 2137 return err; 2138 } 2139 2140 int perf_event__process_event_type(struct perf_tool *tool __unused, 2141 union perf_event *event) 2142 { 2143 if (perf_header__push_event(event->event_type.event_type.event_id, 2144 event->event_type.event_type.name) < 0) 2145 return -ENOMEM; 2146 2147 return 0; 2148 } 2149 2150 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, 2151 struct perf_evlist *evlist, 2152 perf_event__handler_t process) 2153 { 2154 union perf_event ev; 2155 struct tracing_data *tdata; 2156 ssize_t size = 0, aligned_size = 0, padding; 2157 int err __used = 0; 2158 2159 /* 2160 * We are going to store the size of the data followed 2161 * by the data contents. Since the fd descriptor is a pipe, 2162 * we cannot seek back to store the size of the data once 2163 * we know it. Instead we: 2164 * 2165 * - write the tracing data to the temp file 2166 * - get/write the data size to pipe 2167 * - write the tracing data from the temp file 2168 * to the pipe 2169 */ 2170 tdata = tracing_data_get(&evlist->entries, fd, true); 2171 if (!tdata) 2172 return -1; 2173 2174 memset(&ev, 0, sizeof(ev)); 2175 2176 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 2177 size = tdata->size; 2178 aligned_size = ALIGN(size, sizeof(u64)); 2179 padding = aligned_size - size; 2180 ev.tracing_data.header.size = sizeof(ev.tracing_data); 2181 ev.tracing_data.size = aligned_size; 2182 2183 process(tool, &ev, NULL, NULL); 2184 2185 /* 2186 * The put function will copy all the tracing data 2187 * stored in temp file to the pipe. 2188 */ 2189 tracing_data_put(tdata); 2190 2191 write_padded(fd, NULL, 0, padding); 2192 2193 return aligned_size; 2194 } 2195 2196 int perf_event__process_tracing_data(union perf_event *event, 2197 struct perf_session *session) 2198 { 2199 ssize_t size_read, padding, size = event->tracing_data.size; 2200 off_t offset = lseek(session->fd, 0, SEEK_CUR); 2201 char buf[BUFSIZ]; 2202 2203 /* setup for reading amidst mmap */ 2204 lseek(session->fd, offset + sizeof(struct tracing_data_event), 2205 SEEK_SET); 2206 2207 size_read = trace_report(session->fd, session->repipe); 2208 2209 padding = ALIGN(size_read, sizeof(u64)) - size_read; 2210 2211 if (read(session->fd, buf, padding) < 0) 2212 die("reading input file"); 2213 if (session->repipe) { 2214 int retw = write(STDOUT_FILENO, buf, padding); 2215 if (retw <= 0 || retw != padding) 2216 die("repiping tracing data padding"); 2217 } 2218 2219 if (size_read + padding != size) 2220 die("tracing data size mismatch"); 2221 2222 return size_read + padding; 2223 } 2224 2225 int perf_event__synthesize_build_id(struct perf_tool *tool, 2226 struct dso *pos, u16 misc, 2227 perf_event__handler_t process, 2228 struct machine *machine) 2229 { 2230 union perf_event ev; 2231 size_t len; 2232 int err = 0; 2233 2234 if (!pos->hit) 2235 return err; 2236 2237 memset(&ev, 0, sizeof(ev)); 2238 2239 len = pos->long_name_len + 1; 2240 len = ALIGN(len, NAME_ALIGN); 2241 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); 2242 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 2243 ev.build_id.header.misc = misc; 2244 ev.build_id.pid = machine->pid; 2245 ev.build_id.header.size = sizeof(ev.build_id) + len; 2246 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len); 2247 2248 err = process(tool, &ev, NULL, machine); 2249 2250 return err; 2251 } 2252 2253 int perf_event__process_build_id(struct perf_tool *tool __used, 2254 union perf_event *event, 2255 struct perf_session *session) 2256 { 2257 __event_process_build_id(&event->build_id, 2258 event->build_id.filename, 2259 session); 2260 return 0; 2261 } 2262 2263 void disable_buildid_cache(void) 2264 { 2265 no_buildid_cache = true; 2266 } 2267