1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include "util/cgroup.h" 4 #include "util/data.h" 5 #include "util/debug.h" 6 #include "util/dso.h" 7 #include "util/event.h" 8 #include "util/evlist.h" 9 #include "util/machine.h" 10 #include "util/map.h" 11 #include "util/map_symbol.h" 12 #include "util/branch.h" 13 #include "util/memswap.h" 14 #include "util/namespaces.h" 15 #include "util/session.h" 16 #include "util/stat.h" 17 #include "util/symbol.h" 18 #include "util/synthetic-events.h" 19 #include "util/target.h" 20 #include "util/time-utils.h" 21 #include <linux/bitops.h> 22 #include <linux/kernel.h> 23 #include <linux/string.h> 24 #include <linux/zalloc.h> 25 #include <linux/perf_event.h> 26 #include <asm/bug.h> 27 #include <perf/evsel.h> 28 #include <perf/cpumap.h> 29 #include <internal/lib.h> // page_size 30 #include <internal/threadmap.h> 31 #include <perf/threadmap.h> 32 #include <symbol/kallsyms.h> 33 #include <dirent.h> 34 #include <errno.h> 35 #include <inttypes.h> 36 #include <stdio.h> 37 #include <string.h> 38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 39 #include <api/fs/fs.h> 40 #include <api/io.h> 41 #include <api/io_dir.h> 42 #include <sys/types.h> 43 #include <sys/stat.h> 44 #include <fcntl.h> 45 #include <unistd.h> 46 47 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500 48 49 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT; 50 51 int perf_tool__process_synth_event(const struct perf_tool *tool, 52 union perf_event *event, 53 struct machine *machine, 54 perf_event__handler_t process) 55 { 56 struct perf_sample synth_sample = { 57 .pid = -1, 58 .tid = -1, 59 .time = -1, 60 .stream_id = -1, 61 .cpu = -1, 62 .period = 1, 63 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 64 }; 65 66 return process(tool, event, &synth_sample, machine); 67 }; 68 69 /* 70 * Assumes that the first 4095 bytes of /proc/pid/stat contains 71 * the comm, tgid and ppid. 72 */ 73 static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len, 74 pid_t *tgid, pid_t *ppid, bool *kernel) 75 { 76 char bf[4096]; 77 int fd; 78 size_t size = 0; 79 ssize_t n; 80 char *name, *tgids, *ppids, *vmpeak, *threads; 81 82 *tgid = -1; 83 *ppid = -1; 84 85 if (pid) 86 snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid); 87 else 88 snprintf(bf, sizeof(bf), "/proc/%d/status", tid); 89 90 fd = open(bf, O_RDONLY); 91 if (fd < 0) { 92 pr_debug("couldn't open %s\n", bf); 93 return -1; 94 } 95 96 n = read(fd, bf, sizeof(bf) - 1); 97 close(fd); 98 if (n <= 0) { 99 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 100 tid); 101 return -1; 102 } 103 bf[n] = '\0'; 104 105 name = strstr(bf, "Name:"); 106 tgids = strstr(name ?: bf, "Tgid:"); 107 ppids = strstr(tgids ?: bf, "PPid:"); 108 vmpeak = strstr(ppids ?: bf, "VmPeak:"); 109 110 if (vmpeak) 111 threads = NULL; 112 else 113 threads = strstr(ppids ?: bf, "Threads:"); 114 115 if (name) { 116 char *nl; 117 118 name = skip_spaces(name + 5); /* strlen("Name:") */ 119 nl = strchr(name, '\n'); 120 if (nl) 121 *nl = '\0'; 122 123 size = strlen(name); 124 if (size >= len) 125 size = len - 1; 126 memcpy(comm, name, size); 127 comm[size] = '\0'; 128 } else { 129 pr_debug("Name: string not found for pid %d\n", tid); 130 } 131 132 if (tgids) { 133 tgids += 5; /* strlen("Tgid:") */ 134 *tgid = atoi(tgids); 135 } else { 136 pr_debug("Tgid: string not found for pid %d\n", tid); 137 } 138 139 if (ppids) { 140 ppids += 5; /* strlen("PPid:") */ 141 *ppid = atoi(ppids); 142 } else { 143 pr_debug("PPid: string not found for pid %d\n", tid); 144 } 145 146 if (!vmpeak && threads) 147 *kernel = true; 148 else 149 *kernel = false; 150 151 return 0; 152 } 153 154 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid, 155 struct machine *machine, 156 pid_t *tgid, pid_t *ppid, bool *kernel) 157 { 158 size_t size; 159 160 *ppid = -1; 161 162 memset(&event->comm, 0, sizeof(event->comm)); 163 164 if (machine__is_host(machine)) { 165 if (perf_event__get_comm_ids(pid, tid, event->comm.comm, 166 sizeof(event->comm.comm), 167 tgid, ppid, kernel) != 0) { 168 return -1; 169 } 170 } else { 171 *tgid = machine->pid; 172 } 173 174 if (*tgid < 0) 175 return -1; 176 177 event->comm.pid = *tgid; 178 event->comm.header.type = PERF_RECORD_COMM; 179 180 size = strlen(event->comm.comm) + 1; 181 size = PERF_ALIGN(size, sizeof(u64)); 182 memset(event->comm.comm + size, 0, machine->id_hdr_size); 183 event->comm.header.size = (sizeof(event->comm) - 184 (sizeof(event->comm.comm) - size) + 185 machine->id_hdr_size); 186 event->comm.tid = tid; 187 188 return 0; 189 } 190 191 pid_t perf_event__synthesize_comm(const struct perf_tool *tool, 192 union perf_event *event, pid_t pid, 193 perf_event__handler_t process, 194 struct machine *machine) 195 { 196 pid_t tgid, ppid; 197 bool kernel_thread; 198 199 if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid, 200 &kernel_thread) != 0) 201 return -1; 202 203 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 204 return -1; 205 206 return tgid; 207 } 208 209 static void perf_event__get_ns_link_info(pid_t pid, const char *ns, 210 struct perf_ns_link_info *ns_link_info) 211 { 212 struct stat64 st; 213 char proc_ns[128]; 214 215 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns); 216 if (stat64(proc_ns, &st) == 0) { 217 ns_link_info->dev = st.st_dev; 218 ns_link_info->ino = st.st_ino; 219 } 220 } 221 222 int perf_event__synthesize_namespaces(const struct perf_tool *tool, 223 union perf_event *event, 224 pid_t pid, pid_t tgid, 225 perf_event__handler_t process, 226 struct machine *machine) 227 { 228 u32 idx; 229 struct perf_ns_link_info *ns_link_info; 230 231 if (!tool || !tool->namespace_events) 232 return 0; 233 234 memset(&event->namespaces, 0, (sizeof(event->namespaces) + 235 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 236 machine->id_hdr_size)); 237 238 event->namespaces.pid = tgid; 239 event->namespaces.tid = pid; 240 241 event->namespaces.nr_namespaces = NR_NAMESPACES; 242 243 ns_link_info = event->namespaces.link_info; 244 245 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) 246 perf_event__get_ns_link_info(pid, perf_ns__name(idx), 247 &ns_link_info[idx]); 248 249 event->namespaces.header.type = PERF_RECORD_NAMESPACES; 250 251 event->namespaces.header.size = (sizeof(event->namespaces) + 252 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 253 machine->id_hdr_size); 254 255 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 256 return -1; 257 258 return 0; 259 } 260 261 static int perf_event__synthesize_fork(const struct perf_tool *tool, 262 union perf_event *event, 263 pid_t pid, pid_t tgid, pid_t ppid, 264 perf_event__handler_t process, 265 struct machine *machine) 266 { 267 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 268 269 /* 270 * for main thread set parent to ppid from status file. For other 271 * threads set parent pid to main thread. ie., assume main thread 272 * spawns all threads in a process 273 */ 274 if (tgid == pid) { 275 event->fork.ppid = ppid; 276 event->fork.ptid = ppid; 277 } else { 278 event->fork.ppid = tgid; 279 event->fork.ptid = tgid; 280 } 281 event->fork.pid = tgid; 282 event->fork.tid = pid; 283 event->fork.header.type = PERF_RECORD_FORK; 284 event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC; 285 286 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 287 288 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 289 return -1; 290 291 return 0; 292 } 293 294 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end, 295 u32 *prot, u32 *flags, __u64 *offset, 296 u32 *maj, u32 *min, 297 __u64 *inode, 298 ssize_t pathname_size, char *pathname) 299 { 300 __u64 temp; 301 int ch; 302 char *start_pathname = pathname; 303 304 if (io__get_hex(io, start) != '-') 305 return false; 306 if (io__get_hex(io, end) != ' ') 307 return false; 308 309 /* map protection and flags bits */ 310 *prot = 0; 311 ch = io__get_char(io); 312 if (ch == 'r') 313 *prot |= PROT_READ; 314 else if (ch != '-') 315 return false; 316 ch = io__get_char(io); 317 if (ch == 'w') 318 *prot |= PROT_WRITE; 319 else if (ch != '-') 320 return false; 321 ch = io__get_char(io); 322 if (ch == 'x') 323 *prot |= PROT_EXEC; 324 else if (ch != '-') 325 return false; 326 ch = io__get_char(io); 327 if (ch == 's') 328 *flags = MAP_SHARED; 329 else if (ch == 'p') 330 *flags = MAP_PRIVATE; 331 else 332 return false; 333 if (io__get_char(io) != ' ') 334 return false; 335 336 if (io__get_hex(io, offset) != ' ') 337 return false; 338 339 if (io__get_hex(io, &temp) != ':') 340 return false; 341 *maj = temp; 342 if (io__get_hex(io, &temp) != ' ') 343 return false; 344 *min = temp; 345 346 ch = io__get_dec(io, inode); 347 if (ch != ' ') { 348 *pathname = '\0'; 349 return ch == '\n'; 350 } 351 do { 352 ch = io__get_char(io); 353 } while (ch == ' '); 354 while (true) { 355 if (ch < 0) 356 return false; 357 if (ch == '\0' || ch == '\n' || 358 (pathname + 1 - start_pathname) >= pathname_size) { 359 *pathname = '\0'; 360 return true; 361 } 362 *pathname++ = ch; 363 ch = io__get_char(io); 364 } 365 } 366 367 static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event, 368 struct machine *machine, 369 bool is_kernel) 370 { 371 struct build_id bid; 372 struct nsinfo *nsi; 373 struct nscookie nc; 374 struct dso *dso = NULL; 375 struct dso_id id; 376 int rc; 377 378 if (is_kernel) { 379 rc = sysfs__read_build_id("/sys/kernel/notes", &bid); 380 goto out; 381 } 382 383 id.maj = event->maj; 384 id.min = event->min; 385 id.ino = event->ino; 386 id.ino_generation = event->ino_generation; 387 388 dso = dsos__findnew_id(&machine->dsos, event->filename, &id); 389 if (dso && dso__has_build_id(dso)) { 390 bid = *dso__bid(dso); 391 rc = 0; 392 goto out; 393 } 394 395 nsi = nsinfo__new(event->pid); 396 nsinfo__mountns_enter(nsi, &nc); 397 398 rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1; 399 400 nsinfo__mountns_exit(&nc); 401 nsinfo__put(nsi); 402 403 out: 404 if (rc == 0) { 405 memcpy(event->build_id, bid.data, sizeof(bid.data)); 406 event->build_id_size = (u8) bid.size; 407 event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID; 408 event->__reserved_1 = 0; 409 event->__reserved_2 = 0; 410 411 if (dso && !dso__has_build_id(dso)) 412 dso__set_build_id(dso, &bid); 413 } else { 414 if (event->filename[0] == '/') { 415 pr_debug2("Failed to read build ID for %s\n", 416 event->filename); 417 } 418 } 419 dso__put(dso); 420 } 421 422 int perf_event__synthesize_mmap_events(const struct perf_tool *tool, 423 union perf_event *event, 424 pid_t pid, pid_t tgid, 425 perf_event__handler_t process, 426 struct machine *machine, 427 bool mmap_data) 428 { 429 unsigned long long t; 430 char bf[BUFSIZ]; 431 struct io io; 432 bool truncation = false; 433 unsigned long long timeout = proc_map_timeout * 1000000ULL; 434 int rc = 0; 435 const char *hugetlbfs_mnt = hugetlbfs__mountpoint(); 436 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0; 437 438 if (machine__is_default_guest(machine)) 439 return 0; 440 441 snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps", 442 machine->root_dir, pid, pid); 443 444 io.fd = open(bf, O_RDONLY, 0); 445 if (io.fd < 0) { 446 /* 447 * We raced with a task exiting - just return: 448 */ 449 pr_debug("couldn't open %s\n", bf); 450 return -1; 451 } 452 io__init(&io, io.fd, bf, sizeof(bf)); 453 454 event->header.type = PERF_RECORD_MMAP2; 455 t = rdclock(); 456 457 while (!io.eof) { 458 static const char anonstr[] = "//anon"; 459 size_t size, aligned_size; 460 461 /* ensure null termination since stack will be reused. */ 462 event->mmap2.filename[0] = '\0'; 463 464 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 465 if (!read_proc_maps_line(&io, 466 &event->mmap2.start, 467 &event->mmap2.len, 468 &event->mmap2.prot, 469 &event->mmap2.flags, 470 &event->mmap2.pgoff, 471 &event->mmap2.maj, 472 &event->mmap2.min, 473 &event->mmap2.ino, 474 sizeof(event->mmap2.filename), 475 event->mmap2.filename)) 476 continue; 477 478 if ((rdclock() - t) > timeout) { 479 pr_warning("Reading %s/proc/%d/task/%d/maps time out. " 480 "You may want to increase " 481 "the time limit by --proc-map-timeout\n", 482 machine->root_dir, pid, pid); 483 truncation = true; 484 goto out; 485 } 486 487 event->mmap2.ino_generation = 0; 488 489 /* 490 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 491 */ 492 if (machine__is_host(machine)) 493 event->header.misc = PERF_RECORD_MISC_USER; 494 else 495 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 496 497 if ((event->mmap2.prot & PROT_EXEC) == 0) { 498 if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0) 499 continue; 500 501 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 502 } 503 504 out: 505 if (truncation) 506 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 507 508 if (!strcmp(event->mmap2.filename, "")) 509 strcpy(event->mmap2.filename, anonstr); 510 511 if (hugetlbfs_mnt_len && 512 !strncmp(event->mmap2.filename, hugetlbfs_mnt, 513 hugetlbfs_mnt_len)) { 514 strcpy(event->mmap2.filename, anonstr); 515 event->mmap2.flags |= MAP_HUGETLB; 516 } 517 518 size = strlen(event->mmap2.filename) + 1; 519 aligned_size = PERF_ALIGN(size, sizeof(u64)); 520 event->mmap2.len -= event->mmap.start; 521 event->mmap2.header.size = (sizeof(event->mmap2) - 522 (sizeof(event->mmap2.filename) - aligned_size)); 523 memset(event->mmap2.filename + size, 0, machine->id_hdr_size + 524 (aligned_size - size)); 525 event->mmap2.header.size += machine->id_hdr_size; 526 event->mmap2.pid = tgid; 527 event->mmap2.tid = pid; 528 529 if (symbol_conf.buildid_mmap2) 530 perf_record_mmap2__read_build_id(&event->mmap2, machine, false); 531 532 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 533 rc = -1; 534 break; 535 } 536 537 if (truncation) 538 break; 539 } 540 541 close(io.fd); 542 return rc; 543 } 544 545 #ifdef HAVE_FILE_HANDLE 546 static int perf_event__synthesize_cgroup(const struct perf_tool *tool, 547 union perf_event *event, 548 char *path, size_t mount_len, 549 perf_event__handler_t process, 550 struct machine *machine) 551 { 552 size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path); 553 size_t path_len = strlen(path) - mount_len + 1; 554 struct { 555 struct file_handle fh; 556 uint64_t cgroup_id; 557 } handle; 558 int mount_id; 559 560 while (path_len % sizeof(u64)) 561 path[mount_len + path_len++] = '\0'; 562 563 memset(&event->cgroup, 0, event_size); 564 565 event->cgroup.header.type = PERF_RECORD_CGROUP; 566 event->cgroup.header.size = event_size + path_len + machine->id_hdr_size; 567 568 handle.fh.handle_bytes = sizeof(handle.cgroup_id); 569 if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) { 570 pr_debug("stat failed: %s\n", path); 571 return -1; 572 } 573 574 event->cgroup.id = handle.cgroup_id; 575 strncpy(event->cgroup.path, path + mount_len, path_len); 576 memset(event->cgroup.path + path_len, 0, machine->id_hdr_size); 577 578 if (perf_tool__process_synth_event(tool, event, machine, process) < 0) { 579 pr_debug("process synth event failed\n"); 580 return -1; 581 } 582 583 return 0; 584 } 585 586 static int perf_event__walk_cgroup_tree(const struct perf_tool *tool, 587 union perf_event *event, 588 char *path, size_t mount_len, 589 perf_event__handler_t process, 590 struct machine *machine) 591 { 592 size_t pos = strlen(path); 593 DIR *d; 594 struct dirent *dent; 595 int ret = 0; 596 597 if (perf_event__synthesize_cgroup(tool, event, path, mount_len, 598 process, machine) < 0) 599 return -1; 600 601 d = opendir(path); 602 if (d == NULL) { 603 pr_debug("failed to open directory: %s\n", path); 604 return -1; 605 } 606 607 while ((dent = readdir(d)) != NULL) { 608 if (dent->d_type != DT_DIR) 609 continue; 610 if (!strcmp(dent->d_name, ".") || 611 !strcmp(dent->d_name, "..")) 612 continue; 613 614 /* any sane path should be less than PATH_MAX */ 615 if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX) 616 continue; 617 618 if (path[pos - 1] != '/') 619 strcat(path, "/"); 620 strcat(path, dent->d_name); 621 622 ret = perf_event__walk_cgroup_tree(tool, event, path, 623 mount_len, process, machine); 624 if (ret < 0) 625 break; 626 627 path[pos] = '\0'; 628 } 629 630 closedir(d); 631 return ret; 632 } 633 634 int perf_event__synthesize_cgroups(const struct perf_tool *tool, 635 perf_event__handler_t process, 636 struct machine *machine) 637 { 638 union perf_event event; 639 char cgrp_root[PATH_MAX]; 640 size_t mount_len; /* length of mount point in the path */ 641 642 if (!tool || !tool->cgroup_events) 643 return 0; 644 645 if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) { 646 pr_debug("cannot find cgroup mount point\n"); 647 return -1; 648 } 649 650 mount_len = strlen(cgrp_root); 651 /* make sure the path starts with a slash (after mount point) */ 652 strcat(cgrp_root, "/"); 653 654 if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len, 655 process, machine) < 0) 656 return -1; 657 658 return 0; 659 } 660 #else 661 int perf_event__synthesize_cgroups(const struct perf_tool *tool __maybe_unused, 662 perf_event__handler_t process __maybe_unused, 663 struct machine *machine __maybe_unused) 664 { 665 return -1; 666 } 667 #endif 668 669 struct perf_event__synthesize_modules_maps_cb_args { 670 const struct perf_tool *tool; 671 perf_event__handler_t process; 672 struct machine *machine; 673 union perf_event *event; 674 }; 675 676 static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data) 677 { 678 struct perf_event__synthesize_modules_maps_cb_args *args = data; 679 union perf_event *event = args->event; 680 struct dso *dso; 681 size_t size; 682 683 if (!__map__is_kmodule(map)) 684 return 0; 685 686 dso = map__dso(map); 687 if (symbol_conf.buildid_mmap2) { 688 size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64)); 689 event->mmap2.header.type = PERF_RECORD_MMAP2; 690 event->mmap2.header.size = (sizeof(event->mmap2) - 691 (sizeof(event->mmap2.filename) - size)); 692 memset(event->mmap2.filename + size, 0, args->machine->id_hdr_size); 693 event->mmap2.header.size += args->machine->id_hdr_size; 694 event->mmap2.start = map__start(map); 695 event->mmap2.len = map__size(map); 696 event->mmap2.pid = args->machine->pid; 697 698 memcpy(event->mmap2.filename, dso__long_name(dso), dso__long_name_len(dso) + 1); 699 700 perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false); 701 } else { 702 size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64)); 703 event->mmap.header.type = PERF_RECORD_MMAP; 704 event->mmap.header.size = (sizeof(event->mmap) - 705 (sizeof(event->mmap.filename) - size)); 706 memset(event->mmap.filename + size, 0, args->machine->id_hdr_size); 707 event->mmap.header.size += args->machine->id_hdr_size; 708 event->mmap.start = map__start(map); 709 event->mmap.len = map__size(map); 710 event->mmap.pid = args->machine->pid; 711 712 memcpy(event->mmap.filename, dso__long_name(dso), dso__long_name_len(dso) + 1); 713 } 714 715 if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0) 716 return -1; 717 718 return 0; 719 } 720 721 int perf_event__synthesize_modules(const struct perf_tool *tool, perf_event__handler_t process, 722 struct machine *machine) 723 { 724 int rc; 725 struct maps *maps = machine__kernel_maps(machine); 726 struct perf_event__synthesize_modules_maps_cb_args args = { 727 .tool = tool, 728 .process = process, 729 .machine = machine, 730 }; 731 size_t size = symbol_conf.buildid_mmap2 732 ? sizeof(args.event->mmap2) 733 : sizeof(args.event->mmap); 734 735 args.event = zalloc(size + machine->id_hdr_size); 736 if (args.event == NULL) { 737 pr_debug("Not enough memory synthesizing mmap event " 738 "for kernel modules\n"); 739 return -1; 740 } 741 742 /* 743 * kernel uses 0 for user space maps, see kernel/perf_event.c 744 * __perf_event_mmap 745 */ 746 if (machine__is_host(machine)) 747 args.event->header.misc = PERF_RECORD_MISC_KERNEL; 748 else 749 args.event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 750 751 rc = maps__for_each_map(maps, perf_event__synthesize_modules_maps_cb, &args); 752 753 free(args.event); 754 return rc; 755 } 756 757 static int filter_task(const struct dirent *dirent) 758 { 759 return isdigit(dirent->d_name[0]); 760 } 761 762 static int __event__synthesize_thread(union perf_event *comm_event, 763 union perf_event *mmap_event, 764 union perf_event *fork_event, 765 union perf_event *namespaces_event, 766 pid_t pid, int full, perf_event__handler_t process, 767 const struct perf_tool *tool, struct machine *machine, 768 bool needs_mmap, bool mmap_data) 769 { 770 char filename[PATH_MAX]; 771 struct io_dir iod; 772 struct io_dirent64 *dent; 773 pid_t tgid, ppid; 774 int rc = 0; 775 776 /* special case: only send one comm event using passed in pid */ 777 if (!full) { 778 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 779 process, machine); 780 781 if (tgid == -1) 782 return -1; 783 784 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid, 785 tgid, process, machine) < 0) 786 return -1; 787 788 /* 789 * send mmap only for thread group leader 790 * see thread__init_maps() 791 */ 792 if (pid == tgid && needs_mmap && 793 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 794 process, machine, mmap_data)) 795 return -1; 796 797 return 0; 798 } 799 800 if (machine__is_default_guest(machine)) 801 return 0; 802 803 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 804 machine->root_dir, pid); 805 806 io_dir__init(&iod, open(filename, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); 807 if (iod.dirfd < 0) 808 return -1; 809 810 while ((dent = io_dir__readdir(&iod)) != NULL) { 811 char *end; 812 pid_t _pid; 813 bool kernel_thread = false; 814 815 if (!isdigit(dent->d_name[0])) 816 continue; 817 818 _pid = strtol(dent->d_name, &end, 10); 819 if (*end) 820 continue; 821 822 /* some threads may exit just after scan, ignore it */ 823 if (perf_event__prepare_comm(comm_event, pid, _pid, machine, 824 &tgid, &ppid, &kernel_thread) != 0) 825 continue; 826 827 rc = -1; 828 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 829 ppid, process, machine) < 0) 830 break; 831 832 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid, 833 tgid, process, machine) < 0) 834 break; 835 836 /* 837 * Send the prepared comm event 838 */ 839 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 840 break; 841 842 rc = 0; 843 if (_pid == pid && !kernel_thread && needs_mmap) { 844 /* process the parent's maps too */ 845 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 846 process, machine, mmap_data); 847 if (rc) 848 break; 849 } 850 } 851 852 close(iod.dirfd); 853 854 return rc; 855 } 856 857 int perf_event__synthesize_thread_map(const struct perf_tool *tool, 858 struct perf_thread_map *threads, 859 perf_event__handler_t process, 860 struct machine *machine, 861 bool needs_mmap, bool mmap_data) 862 { 863 union perf_event *comm_event, *mmap_event, *fork_event; 864 union perf_event *namespaces_event; 865 int err = -1, thread, j; 866 867 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 868 if (comm_event == NULL) 869 goto out; 870 871 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 872 if (mmap_event == NULL) 873 goto out_free_comm; 874 875 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 876 if (fork_event == NULL) 877 goto out_free_mmap; 878 879 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 880 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 881 machine->id_hdr_size); 882 if (namespaces_event == NULL) 883 goto out_free_fork; 884 885 err = 0; 886 for (thread = 0; thread < threads->nr; ++thread) { 887 if (__event__synthesize_thread(comm_event, mmap_event, 888 fork_event, namespaces_event, 889 perf_thread_map__pid(threads, thread), 0, 890 process, tool, machine, 891 needs_mmap, mmap_data)) { 892 err = -1; 893 break; 894 } 895 896 /* 897 * comm.pid is set to thread group id by 898 * perf_event__synthesize_comm 899 */ 900 if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) { 901 bool need_leader = true; 902 903 /* is thread group leader in thread_map? */ 904 for (j = 0; j < threads->nr; ++j) { 905 if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) { 906 need_leader = false; 907 break; 908 } 909 } 910 911 /* if not, generate events for it */ 912 if (need_leader && 913 __event__synthesize_thread(comm_event, mmap_event, 914 fork_event, namespaces_event, 915 comm_event->comm.pid, 0, 916 process, tool, machine, 917 needs_mmap, mmap_data)) { 918 err = -1; 919 break; 920 } 921 } 922 } 923 free(namespaces_event); 924 out_free_fork: 925 free(fork_event); 926 out_free_mmap: 927 free(mmap_event); 928 out_free_comm: 929 free(comm_event); 930 out: 931 return err; 932 } 933 934 static int __perf_event__synthesize_threads(const struct perf_tool *tool, 935 perf_event__handler_t process, 936 struct machine *machine, 937 bool needs_mmap, 938 bool mmap_data, 939 struct dirent **dirent, 940 int start, 941 int num) 942 { 943 union perf_event *comm_event, *mmap_event, *fork_event; 944 union perf_event *namespaces_event; 945 int err = -1; 946 char *end; 947 pid_t pid; 948 int i; 949 950 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 951 if (comm_event == NULL) 952 goto out; 953 954 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 955 if (mmap_event == NULL) 956 goto out_free_comm; 957 958 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 959 if (fork_event == NULL) 960 goto out_free_mmap; 961 962 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 963 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 964 machine->id_hdr_size); 965 if (namespaces_event == NULL) 966 goto out_free_fork; 967 968 for (i = start; i < start + num; i++) { 969 if (!isdigit(dirent[i]->d_name[0])) 970 continue; 971 972 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10); 973 /* only interested in proper numerical dirents */ 974 if (*end) 975 continue; 976 /* 977 * We may race with exiting thread, so don't stop just because 978 * one thread couldn't be synthesized. 979 */ 980 __event__synthesize_thread(comm_event, mmap_event, fork_event, 981 namespaces_event, pid, 1, process, 982 tool, machine, needs_mmap, mmap_data); 983 } 984 err = 0; 985 986 free(namespaces_event); 987 out_free_fork: 988 free(fork_event); 989 out_free_mmap: 990 free(mmap_event); 991 out_free_comm: 992 free(comm_event); 993 out: 994 return err; 995 } 996 997 struct synthesize_threads_arg { 998 const struct perf_tool *tool; 999 perf_event__handler_t process; 1000 struct machine *machine; 1001 bool needs_mmap; 1002 bool mmap_data; 1003 struct dirent **dirent; 1004 int num; 1005 int start; 1006 }; 1007 1008 static void *synthesize_threads_worker(void *arg) 1009 { 1010 struct synthesize_threads_arg *args = arg; 1011 1012 __perf_event__synthesize_threads(args->tool, args->process, 1013 args->machine, 1014 args->needs_mmap, args->mmap_data, 1015 args->dirent, 1016 args->start, args->num); 1017 return NULL; 1018 } 1019 1020 int perf_event__synthesize_threads(const struct perf_tool *tool, 1021 perf_event__handler_t process, 1022 struct machine *machine, 1023 bool needs_mmap, bool mmap_data, 1024 unsigned int nr_threads_synthesize) 1025 { 1026 struct synthesize_threads_arg *args = NULL; 1027 pthread_t *synthesize_threads = NULL; 1028 char proc_path[PATH_MAX]; 1029 struct dirent **dirent; 1030 int num_per_thread; 1031 int m, n, i, j; 1032 int thread_nr; 1033 int base = 0; 1034 int err = -1; 1035 1036 1037 if (machine__is_default_guest(machine)) 1038 return 0; 1039 1040 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 1041 n = scandir(proc_path, &dirent, filter_task, NULL); 1042 if (n < 0) 1043 return err; 1044 1045 if (nr_threads_synthesize == UINT_MAX) 1046 thread_nr = sysconf(_SC_NPROCESSORS_ONLN); 1047 else 1048 thread_nr = nr_threads_synthesize; 1049 1050 if (thread_nr <= 1) { 1051 err = __perf_event__synthesize_threads(tool, process, 1052 machine, 1053 needs_mmap, mmap_data, 1054 dirent, base, n); 1055 goto free_dirent; 1056 } 1057 if (thread_nr > n) 1058 thread_nr = n; 1059 1060 synthesize_threads = calloc(thread_nr, sizeof(pthread_t)); 1061 if (synthesize_threads == NULL) 1062 goto free_dirent; 1063 1064 args = calloc(thread_nr, sizeof(*args)); 1065 if (args == NULL) 1066 goto free_threads; 1067 1068 num_per_thread = n / thread_nr; 1069 m = n % thread_nr; 1070 for (i = 0; i < thread_nr; i++) { 1071 args[i].tool = tool; 1072 args[i].process = process; 1073 args[i].machine = machine; 1074 args[i].needs_mmap = needs_mmap; 1075 args[i].mmap_data = mmap_data; 1076 args[i].dirent = dirent; 1077 } 1078 for (i = 0; i < m; i++) { 1079 args[i].num = num_per_thread + 1; 1080 args[i].start = i * args[i].num; 1081 } 1082 if (i != 0) 1083 base = args[i-1].start + args[i-1].num; 1084 for (j = i; j < thread_nr; j++) { 1085 args[j].num = num_per_thread; 1086 args[j].start = base + (j - i) * args[i].num; 1087 } 1088 1089 for (i = 0; i < thread_nr; i++) { 1090 if (pthread_create(&synthesize_threads[i], NULL, 1091 synthesize_threads_worker, &args[i])) 1092 goto out_join; 1093 } 1094 err = 0; 1095 out_join: 1096 for (i = 0; i < thread_nr; i++) 1097 pthread_join(synthesize_threads[i], NULL); 1098 free(args); 1099 free_threads: 1100 free(synthesize_threads); 1101 free_dirent: 1102 for (i = 0; i < n; i++) 1103 zfree(&dirent[i]); 1104 free(dirent); 1105 1106 return err; 1107 } 1108 1109 int __weak perf_event__synthesize_extra_kmaps(const struct perf_tool *tool __maybe_unused, 1110 perf_event__handler_t process __maybe_unused, 1111 struct machine *machine __maybe_unused) 1112 { 1113 return 0; 1114 } 1115 1116 static int __perf_event__synthesize_kernel_mmap(const struct perf_tool *tool, 1117 perf_event__handler_t process, 1118 struct machine *machine) 1119 { 1120 union perf_event *event; 1121 size_t size = symbol_conf.buildid_mmap2 ? 1122 sizeof(event->mmap2) : sizeof(event->mmap); 1123 struct map *map = machine__kernel_map(machine); 1124 struct kmap *kmap; 1125 int err; 1126 1127 if (map == NULL) 1128 return -1; 1129 1130 kmap = map__kmap(map); 1131 if (!kmap->ref_reloc_sym) 1132 return -1; 1133 1134 /* 1135 * We should get this from /sys/kernel/sections/.text, but till that is 1136 * available use this, and after it is use this as a fallback for older 1137 * kernels. 1138 */ 1139 event = zalloc(size + machine->id_hdr_size); 1140 if (event == NULL) { 1141 pr_debug("Not enough memory synthesizing mmap event " 1142 "for kernel modules\n"); 1143 return -1; 1144 } 1145 1146 if (machine__is_host(machine)) { 1147 /* 1148 * kernel uses PERF_RECORD_MISC_USER for user space maps, 1149 * see kernel/perf_event.c __perf_event_mmap 1150 */ 1151 event->header.misc = PERF_RECORD_MISC_KERNEL; 1152 } else { 1153 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 1154 } 1155 1156 if (symbol_conf.buildid_mmap2) { 1157 size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename), 1158 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 1159 size = PERF_ALIGN(size, sizeof(u64)); 1160 event->mmap2.header.type = PERF_RECORD_MMAP2; 1161 event->mmap2.header.size = (sizeof(event->mmap2) - 1162 (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size); 1163 event->mmap2.pgoff = kmap->ref_reloc_sym->addr; 1164 event->mmap2.start = map__start(map); 1165 event->mmap2.len = map__end(map) - event->mmap.start; 1166 event->mmap2.pid = machine->pid; 1167 1168 perf_record_mmap2__read_build_id(&event->mmap2, machine, true); 1169 } else { 1170 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 1171 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 1172 size = PERF_ALIGN(size, sizeof(u64)); 1173 event->mmap.header.type = PERF_RECORD_MMAP; 1174 event->mmap.header.size = (sizeof(event->mmap) - 1175 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 1176 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 1177 event->mmap.start = map__start(map); 1178 event->mmap.len = map__end(map) - event->mmap.start; 1179 event->mmap.pid = machine->pid; 1180 } 1181 1182 err = perf_tool__process_synth_event(tool, event, machine, process); 1183 free(event); 1184 1185 return err; 1186 } 1187 1188 int perf_event__synthesize_kernel_mmap(const struct perf_tool *tool, 1189 perf_event__handler_t process, 1190 struct machine *machine) 1191 { 1192 int err; 1193 1194 err = __perf_event__synthesize_kernel_mmap(tool, process, machine); 1195 if (err < 0) 1196 return err; 1197 1198 return perf_event__synthesize_extra_kmaps(tool, process, machine); 1199 } 1200 1201 int perf_event__synthesize_thread_map2(const struct perf_tool *tool, 1202 struct perf_thread_map *threads, 1203 perf_event__handler_t process, 1204 struct machine *machine) 1205 { 1206 union perf_event *event; 1207 int i, err, size; 1208 1209 size = sizeof(event->thread_map); 1210 size += threads->nr * sizeof(event->thread_map.entries[0]); 1211 1212 event = zalloc(size); 1213 if (!event) 1214 return -ENOMEM; 1215 1216 event->header.type = PERF_RECORD_THREAD_MAP; 1217 event->header.size = size; 1218 event->thread_map.nr = threads->nr; 1219 1220 for (i = 0; i < threads->nr; i++) { 1221 struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i]; 1222 char *comm = perf_thread_map__comm(threads, i); 1223 1224 if (!comm) 1225 comm = (char *) ""; 1226 1227 entry->pid = perf_thread_map__pid(threads, i); 1228 strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 1229 } 1230 1231 err = process(tool, event, NULL, machine); 1232 1233 free(event); 1234 return err; 1235 } 1236 1237 struct synthesize_cpu_map_data { 1238 const struct perf_cpu_map *map; 1239 int nr; 1240 int min_cpu; 1241 int max_cpu; 1242 int has_any_cpu; 1243 int type; 1244 size_t size; 1245 struct perf_record_cpu_map_data *data; 1246 }; 1247 1248 static void synthesize_cpus(struct synthesize_cpu_map_data *data) 1249 { 1250 data->data->type = PERF_CPU_MAP__CPUS; 1251 data->data->cpus_data.nr = data->nr; 1252 for (int i = 0; i < data->nr; i++) 1253 data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu; 1254 } 1255 1256 static void synthesize_mask(struct synthesize_cpu_map_data *data) 1257 { 1258 int idx; 1259 struct perf_cpu cpu; 1260 1261 /* Due to padding, the 4bytes per entry mask variant is always smaller. */ 1262 data->data->type = PERF_CPU_MAP__MASK; 1263 data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu); 1264 data->data->mask32_data.long_size = 4; 1265 1266 perf_cpu_map__for_each_cpu(cpu, idx, data->map) { 1267 int bit_word = cpu.cpu / 32; 1268 u32 bit_mask = 1U << (cpu.cpu & 31); 1269 1270 data->data->mask32_data.mask[bit_word] |= bit_mask; 1271 } 1272 } 1273 1274 static void synthesize_range_cpus(struct synthesize_cpu_map_data *data) 1275 { 1276 data->data->type = PERF_CPU_MAP__RANGE_CPUS; 1277 data->data->range_cpu_data.any_cpu = data->has_any_cpu; 1278 data->data->range_cpu_data.start_cpu = data->min_cpu; 1279 data->data->range_cpu_data.end_cpu = data->max_cpu; 1280 } 1281 1282 static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data, 1283 size_t header_size) 1284 { 1285 size_t size_cpus, size_mask; 1286 1287 syn_data->nr = perf_cpu_map__nr(syn_data->map); 1288 syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0; 1289 1290 syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu; 1291 syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu; 1292 if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) { 1293 /* A consecutive range of CPUs can be encoded using a range. */ 1294 assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64)); 1295 syn_data->type = PERF_CPU_MAP__RANGE_CPUS; 1296 syn_data->size = header_size + sizeof(u64); 1297 return zalloc(syn_data->size); 1298 } 1299 1300 size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16); 1301 /* Due to padding, the 4bytes per entry mask variant is always smaller. */ 1302 size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) + 1303 BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32); 1304 if (syn_data->has_any_cpu || size_cpus < size_mask) { 1305 /* Follow the CPU map encoding. */ 1306 syn_data->type = PERF_CPU_MAP__CPUS; 1307 syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64)); 1308 return zalloc(syn_data->size); 1309 } 1310 /* Encode using a bitmask. */ 1311 syn_data->type = PERF_CPU_MAP__MASK; 1312 syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64)); 1313 return zalloc(syn_data->size); 1314 } 1315 1316 static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data) 1317 { 1318 switch (data->type) { 1319 case PERF_CPU_MAP__CPUS: 1320 synthesize_cpus(data); 1321 break; 1322 case PERF_CPU_MAP__MASK: 1323 synthesize_mask(data); 1324 break; 1325 case PERF_CPU_MAP__RANGE_CPUS: 1326 synthesize_range_cpus(data); 1327 break; 1328 default: 1329 break; 1330 } 1331 } 1332 1333 static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map) 1334 { 1335 struct synthesize_cpu_map_data syn_data = { .map = map }; 1336 struct perf_record_cpu_map *event; 1337 1338 1339 event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header)); 1340 if (!event) 1341 return NULL; 1342 1343 syn_data.data = &event->data; 1344 event->header.type = PERF_RECORD_CPU_MAP; 1345 event->header.size = syn_data.size; 1346 cpu_map_data__synthesize(&syn_data); 1347 return event; 1348 } 1349 1350 1351 int perf_event__synthesize_cpu_map(const struct perf_tool *tool, 1352 const struct perf_cpu_map *map, 1353 perf_event__handler_t process, 1354 struct machine *machine) 1355 { 1356 struct perf_record_cpu_map *event; 1357 int err; 1358 1359 event = cpu_map_event__new(map); 1360 if (!event) 1361 return -ENOMEM; 1362 1363 err = process(tool, (union perf_event *) event, NULL, machine); 1364 1365 free(event); 1366 return err; 1367 } 1368 1369 int perf_event__synthesize_stat_config(const struct perf_tool *tool, 1370 struct perf_stat_config *config, 1371 perf_event__handler_t process, 1372 struct machine *machine) 1373 { 1374 struct perf_record_stat_config *event; 1375 int size, i = 0, err; 1376 1377 size = sizeof(*event); 1378 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 1379 1380 event = zalloc(size); 1381 if (!event) 1382 return -ENOMEM; 1383 1384 event->header.type = PERF_RECORD_STAT_CONFIG; 1385 event->header.size = size; 1386 event->nr = PERF_STAT_CONFIG_TERM__MAX; 1387 1388 #define ADD(__term, __val) \ 1389 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 1390 event->data[i].val = __val; \ 1391 i++; 1392 1393 ADD(AGGR_MODE, config->aggr_mode) 1394 ADD(INTERVAL, config->interval) 1395 ADD(SCALE, config->scale) 1396 ADD(AGGR_LEVEL, config->aggr_level) 1397 1398 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 1399 "stat config terms unbalanced\n"); 1400 #undef ADD 1401 1402 err = process(tool, (union perf_event *) event, NULL, machine); 1403 1404 free(event); 1405 return err; 1406 } 1407 1408 int perf_event__synthesize_stat(const struct perf_tool *tool, 1409 struct perf_cpu cpu, u32 thread, u64 id, 1410 struct perf_counts_values *count, 1411 perf_event__handler_t process, 1412 struct machine *machine) 1413 { 1414 struct perf_record_stat event; 1415 1416 event.header.type = PERF_RECORD_STAT; 1417 event.header.size = sizeof(event); 1418 event.header.misc = 0; 1419 1420 event.id = id; 1421 event.cpu = cpu.cpu; 1422 event.thread = thread; 1423 event.val = count->val; 1424 event.ena = count->ena; 1425 event.run = count->run; 1426 1427 return process(tool, (union perf_event *) &event, NULL, machine); 1428 } 1429 1430 int perf_event__synthesize_stat_round(const struct perf_tool *tool, 1431 u64 evtime, u64 type, 1432 perf_event__handler_t process, 1433 struct machine *machine) 1434 { 1435 struct perf_record_stat_round event; 1436 1437 event.header.type = PERF_RECORD_STAT_ROUND; 1438 event.header.size = sizeof(event); 1439 event.header.misc = 0; 1440 1441 event.time = evtime; 1442 event.type = type; 1443 1444 return process(tool, (union perf_event *) &event, NULL, machine); 1445 } 1446 1447 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format) 1448 { 1449 size_t sz, result = sizeof(struct perf_record_sample); 1450 1451 if (type & PERF_SAMPLE_IDENTIFIER) 1452 result += sizeof(u64); 1453 1454 if (type & PERF_SAMPLE_IP) 1455 result += sizeof(u64); 1456 1457 if (type & PERF_SAMPLE_TID) 1458 result += sizeof(u64); 1459 1460 if (type & PERF_SAMPLE_TIME) 1461 result += sizeof(u64); 1462 1463 if (type & PERF_SAMPLE_ADDR) 1464 result += sizeof(u64); 1465 1466 if (type & PERF_SAMPLE_ID) 1467 result += sizeof(u64); 1468 1469 if (type & PERF_SAMPLE_STREAM_ID) 1470 result += sizeof(u64); 1471 1472 if (type & PERF_SAMPLE_CPU) 1473 result += sizeof(u64); 1474 1475 if (type & PERF_SAMPLE_PERIOD) 1476 result += sizeof(u64); 1477 1478 if (type & PERF_SAMPLE_READ) { 1479 result += sizeof(u64); 1480 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1481 result += sizeof(u64); 1482 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1483 result += sizeof(u64); 1484 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1485 if (read_format & PERF_FORMAT_GROUP) { 1486 sz = sample_read_value_size(read_format); 1487 result += sz * sample->read.group.nr; 1488 } else { 1489 result += sizeof(u64); 1490 if (read_format & PERF_FORMAT_LOST) 1491 result += sizeof(u64); 1492 } 1493 } 1494 1495 if (type & PERF_SAMPLE_CALLCHAIN) { 1496 sz = (sample->callchain->nr + 1) * sizeof(u64); 1497 result += sz; 1498 } 1499 1500 if (type & PERF_SAMPLE_RAW) { 1501 result += sizeof(u32); 1502 result += sample->raw_size; 1503 } 1504 1505 if (type & PERF_SAMPLE_BRANCH_STACK) { 1506 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 1507 /* nr, hw_idx */ 1508 sz += 2 * sizeof(u64); 1509 result += sz; 1510 } 1511 1512 if (type & PERF_SAMPLE_REGS_USER) { 1513 if (sample->user_regs && sample->user_regs->abi) { 1514 result += sizeof(u64); 1515 sz = hweight64(sample->user_regs->mask) * sizeof(u64); 1516 result += sz; 1517 } else { 1518 result += sizeof(u64); 1519 } 1520 } 1521 1522 if (type & PERF_SAMPLE_STACK_USER) { 1523 sz = sample->user_stack.size; 1524 result += sizeof(u64); 1525 if (sz) { 1526 result += sz; 1527 result += sizeof(u64); 1528 } 1529 } 1530 1531 if (type & PERF_SAMPLE_WEIGHT_TYPE) 1532 result += sizeof(u64); 1533 1534 if (type & PERF_SAMPLE_DATA_SRC) 1535 result += sizeof(u64); 1536 1537 if (type & PERF_SAMPLE_TRANSACTION) 1538 result += sizeof(u64); 1539 1540 if (type & PERF_SAMPLE_REGS_INTR) { 1541 if (sample->intr_regs && sample->intr_regs->abi) { 1542 result += sizeof(u64); 1543 sz = hweight64(sample->intr_regs->mask) * sizeof(u64); 1544 result += sz; 1545 } else { 1546 result += sizeof(u64); 1547 } 1548 } 1549 1550 if (type & PERF_SAMPLE_PHYS_ADDR) 1551 result += sizeof(u64); 1552 1553 if (type & PERF_SAMPLE_CGROUP) 1554 result += sizeof(u64); 1555 1556 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) 1557 result += sizeof(u64); 1558 1559 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) 1560 result += sizeof(u64); 1561 1562 if (type & PERF_SAMPLE_AUX) { 1563 result += sizeof(u64); 1564 result += sample->aux_sample.size; 1565 } 1566 1567 return result; 1568 } 1569 1570 void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data, 1571 __u64 *array, u64 type __maybe_unused) 1572 { 1573 *array = data->weight; 1574 } 1575 1576 static __u64 *copy_read_group_values(__u64 *array, __u64 read_format, 1577 const struct perf_sample *sample) 1578 { 1579 size_t sz = sample_read_value_size(read_format); 1580 struct sample_read_value *v = sample->read.group.values; 1581 1582 sample_read_group__for_each(v, sample->read.group.nr, read_format) { 1583 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1584 memcpy(array, v, sz); 1585 array = (void *)array + sz; 1586 } 1587 return array; 1588 } 1589 1590 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, 1591 const struct perf_sample *sample) 1592 { 1593 __u64 *array; 1594 size_t sz; 1595 /* 1596 * used for cross-endian analysis. See git commit 65014ab3 1597 * for why this goofiness is needed. 1598 */ 1599 union u64_swap u; 1600 1601 array = event->sample.array; 1602 1603 if (type & PERF_SAMPLE_IDENTIFIER) { 1604 *array = sample->id; 1605 array++; 1606 } 1607 1608 if (type & PERF_SAMPLE_IP) { 1609 *array = sample->ip; 1610 array++; 1611 } 1612 1613 if (type & PERF_SAMPLE_TID) { 1614 u.val32[0] = sample->pid; 1615 u.val32[1] = sample->tid; 1616 *array = u.val64; 1617 array++; 1618 } 1619 1620 if (type & PERF_SAMPLE_TIME) { 1621 *array = sample->time; 1622 array++; 1623 } 1624 1625 if (type & PERF_SAMPLE_ADDR) { 1626 *array = sample->addr; 1627 array++; 1628 } 1629 1630 if (type & PERF_SAMPLE_ID) { 1631 *array = sample->id; 1632 array++; 1633 } 1634 1635 if (type & PERF_SAMPLE_STREAM_ID) { 1636 *array = sample->stream_id; 1637 array++; 1638 } 1639 1640 if (type & PERF_SAMPLE_CPU) { 1641 u.val32[0] = sample->cpu; 1642 u.val32[1] = 0; 1643 *array = u.val64; 1644 array++; 1645 } 1646 1647 if (type & PERF_SAMPLE_PERIOD) { 1648 *array = sample->period; 1649 array++; 1650 } 1651 1652 if (type & PERF_SAMPLE_READ) { 1653 if (read_format & PERF_FORMAT_GROUP) 1654 *array = sample->read.group.nr; 1655 else 1656 *array = sample->read.one.value; 1657 array++; 1658 1659 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1660 *array = sample->read.time_enabled; 1661 array++; 1662 } 1663 1664 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1665 *array = sample->read.time_running; 1666 array++; 1667 } 1668 1669 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1670 if (read_format & PERF_FORMAT_GROUP) { 1671 array = copy_read_group_values(array, read_format, 1672 sample); 1673 } else { 1674 *array = sample->read.one.id; 1675 array++; 1676 1677 if (read_format & PERF_FORMAT_LOST) { 1678 *array = sample->read.one.lost; 1679 array++; 1680 } 1681 } 1682 } 1683 1684 if (type & PERF_SAMPLE_CALLCHAIN) { 1685 sz = (sample->callchain->nr + 1) * sizeof(u64); 1686 memcpy(array, sample->callchain, sz); 1687 array = (void *)array + sz; 1688 } 1689 1690 if (type & PERF_SAMPLE_RAW) { 1691 u32 *array32 = (void *)array; 1692 1693 *array32 = sample->raw_size; 1694 array32++; 1695 1696 memcpy(array32, sample->raw_data, sample->raw_size); 1697 array = (void *)(array32 + (sample->raw_size / sizeof(u32))); 1698 1699 /* make sure the array is 64-bit aligned */ 1700 BUG_ON(((long)array) % sizeof(u64)); 1701 } 1702 1703 if (type & PERF_SAMPLE_BRANCH_STACK) { 1704 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 1705 /* nr, hw_idx */ 1706 sz += 2 * sizeof(u64); 1707 memcpy(array, sample->branch_stack, sz); 1708 array = (void *)array + sz; 1709 } 1710 1711 if (type & PERF_SAMPLE_REGS_USER) { 1712 if (sample->user_regs && sample->user_regs->abi) { 1713 *array++ = sample->user_regs->abi; 1714 sz = hweight64(sample->user_regs->mask) * sizeof(u64); 1715 memcpy(array, sample->user_regs->regs, sz); 1716 array = (void *)array + sz; 1717 } else { 1718 *array++ = 0; 1719 } 1720 } 1721 1722 if (type & PERF_SAMPLE_STACK_USER) { 1723 sz = sample->user_stack.size; 1724 *array++ = sz; 1725 if (sz) { 1726 memcpy(array, sample->user_stack.data, sz); 1727 array = (void *)array + sz; 1728 *array++ = sz; 1729 } 1730 } 1731 1732 if (type & PERF_SAMPLE_WEIGHT_TYPE) { 1733 arch_perf_synthesize_sample_weight(sample, array, type); 1734 array++; 1735 } 1736 1737 if (type & PERF_SAMPLE_DATA_SRC) { 1738 *array = sample->data_src; 1739 array++; 1740 } 1741 1742 if (type & PERF_SAMPLE_TRANSACTION) { 1743 *array = sample->transaction; 1744 array++; 1745 } 1746 1747 if (type & PERF_SAMPLE_REGS_INTR) { 1748 if (sample->intr_regs && sample->intr_regs->abi) { 1749 *array++ = sample->intr_regs->abi; 1750 sz = hweight64(sample->intr_regs->mask) * sizeof(u64); 1751 memcpy(array, sample->intr_regs->regs, sz); 1752 array = (void *)array + sz; 1753 } else { 1754 *array++ = 0; 1755 } 1756 } 1757 1758 if (type & PERF_SAMPLE_PHYS_ADDR) { 1759 *array = sample->phys_addr; 1760 array++; 1761 } 1762 1763 if (type & PERF_SAMPLE_CGROUP) { 1764 *array = sample->cgroup; 1765 array++; 1766 } 1767 1768 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) { 1769 *array = sample->data_page_size; 1770 array++; 1771 } 1772 1773 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) { 1774 *array = sample->code_page_size; 1775 array++; 1776 } 1777 1778 if (type & PERF_SAMPLE_AUX) { 1779 sz = sample->aux_sample.size; 1780 *array++ = sz; 1781 memcpy(array, sample->aux_sample.data, sz); 1782 array = (void *)array + sz; 1783 } 1784 1785 return 0; 1786 } 1787 1788 int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample) 1789 { 1790 __u64 *start = array; 1791 1792 /* 1793 * used for cross-endian analysis. See git commit 65014ab3 1794 * for why this goofiness is needed. 1795 */ 1796 union u64_swap u; 1797 1798 if (type & PERF_SAMPLE_TID) { 1799 u.val32[0] = sample->pid; 1800 u.val32[1] = sample->tid; 1801 *array = u.val64; 1802 array++; 1803 } 1804 1805 if (type & PERF_SAMPLE_TIME) { 1806 *array = sample->time; 1807 array++; 1808 } 1809 1810 if (type & PERF_SAMPLE_ID) { 1811 *array = sample->id; 1812 array++; 1813 } 1814 1815 if (type & PERF_SAMPLE_STREAM_ID) { 1816 *array = sample->stream_id; 1817 array++; 1818 } 1819 1820 if (type & PERF_SAMPLE_CPU) { 1821 u.val32[0] = sample->cpu; 1822 u.val32[1] = 0; 1823 *array = u.val64; 1824 array++; 1825 } 1826 1827 if (type & PERF_SAMPLE_IDENTIFIER) { 1828 *array = sample->id; 1829 array++; 1830 } 1831 1832 return (void *)array - (void *)start; 1833 } 1834 1835 int __perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process, 1836 struct evlist *evlist, struct machine *machine, size_t from) 1837 { 1838 union perf_event *ev; 1839 struct evsel *evsel; 1840 size_t nr = 0, i = 0, sz, max_nr, n, pos; 1841 size_t e1_sz = sizeof(struct id_index_entry); 1842 size_t e2_sz = sizeof(struct id_index_entry_2); 1843 size_t etot_sz = e1_sz + e2_sz; 1844 bool e2_needed = false; 1845 int err; 1846 1847 max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz; 1848 1849 pos = 0; 1850 evlist__for_each_entry(evlist, evsel) { 1851 if (pos++ < from) 1852 continue; 1853 nr += evsel->core.ids; 1854 } 1855 1856 if (!nr) 1857 return 0; 1858 1859 pr_debug2("Synthesizing id index\n"); 1860 1861 n = nr > max_nr ? max_nr : nr; 1862 sz = sizeof(struct perf_record_id_index) + n * etot_sz; 1863 ev = zalloc(sz); 1864 if (!ev) 1865 return -ENOMEM; 1866 1867 sz = sizeof(struct perf_record_id_index) + n * e1_sz; 1868 1869 ev->id_index.header.type = PERF_RECORD_ID_INDEX; 1870 ev->id_index.nr = n; 1871 1872 pos = 0; 1873 evlist__for_each_entry(evlist, evsel) { 1874 u32 j; 1875 1876 if (pos++ < from) 1877 continue; 1878 for (j = 0; j < evsel->core.ids; j++, i++) { 1879 struct id_index_entry *e; 1880 struct id_index_entry_2 *e2; 1881 struct perf_sample_id *sid; 1882 1883 if (i >= n) { 1884 ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0); 1885 err = process(tool, ev, NULL, machine); 1886 if (err) 1887 goto out_err; 1888 nr -= n; 1889 i = 0; 1890 e2_needed = false; 1891 } 1892 1893 e = &ev->id_index.entries[i]; 1894 1895 e->id = evsel->core.id[j]; 1896 1897 sid = evlist__id2sid(evlist, e->id); 1898 if (!sid) { 1899 free(ev); 1900 return -ENOENT; 1901 } 1902 1903 e->idx = sid->idx; 1904 e->cpu = sid->cpu.cpu; 1905 e->tid = sid->tid; 1906 1907 if (sid->machine_pid) 1908 e2_needed = true; 1909 1910 e2 = (void *)ev + sz; 1911 e2[i].machine_pid = sid->machine_pid; 1912 e2[i].vcpu = sid->vcpu.cpu; 1913 } 1914 } 1915 1916 sz = sizeof(struct perf_record_id_index) + nr * e1_sz; 1917 ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0); 1918 ev->id_index.nr = nr; 1919 1920 err = process(tool, ev, NULL, machine); 1921 out_err: 1922 free(ev); 1923 1924 return err; 1925 } 1926 1927 int perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process, 1928 struct evlist *evlist, struct machine *machine) 1929 { 1930 return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0); 1931 } 1932 1933 int __machine__synthesize_threads(struct machine *machine, const struct perf_tool *tool, 1934 struct target *target, struct perf_thread_map *threads, 1935 perf_event__handler_t process, bool needs_mmap, 1936 bool data_mmap, unsigned int nr_threads_synthesize) 1937 { 1938 /* 1939 * When perf runs in non-root PID namespace, and the namespace's proc FS 1940 * is not mounted, nsinfo__is_in_root_namespace() returns false. 1941 * In this case, the proc FS is coming for the parent namespace, thus 1942 * perf tool will wrongly gather process info from its parent PID 1943 * namespace. 1944 * 1945 * To avoid the confusion that the perf tool runs in a child PID 1946 * namespace but it synthesizes thread info from its parent PID 1947 * namespace, returns failure with warning. 1948 */ 1949 if (!nsinfo__is_in_root_namespace()) { 1950 pr_err("Perf runs in non-root PID namespace but it tries to "); 1951 pr_err("gather process info from its parent PID namespace.\n"); 1952 pr_err("Please mount the proc file system properly, e.g. "); 1953 pr_err("add the option '--mount-proc' for unshare command.\n"); 1954 return -EPERM; 1955 } 1956 1957 if (target__has_task(target)) 1958 return perf_event__synthesize_thread_map(tool, threads, process, machine, 1959 needs_mmap, data_mmap); 1960 else if (target__has_cpu(target)) 1961 return perf_event__synthesize_threads(tool, process, machine, 1962 needs_mmap, data_mmap, 1963 nr_threads_synthesize); 1964 /* command specified */ 1965 return 0; 1966 } 1967 1968 int machine__synthesize_threads(struct machine *machine, struct target *target, 1969 struct perf_thread_map *threads, bool needs_mmap, 1970 bool data_mmap, unsigned int nr_threads_synthesize) 1971 { 1972 return __machine__synthesize_threads(machine, NULL, target, threads, 1973 perf_event__process, needs_mmap, 1974 data_mmap, nr_threads_synthesize); 1975 } 1976 1977 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id) 1978 { 1979 struct perf_record_event_update *ev; 1980 1981 size += sizeof(*ev); 1982 size = PERF_ALIGN(size, sizeof(u64)); 1983 1984 ev = zalloc(size); 1985 if (ev) { 1986 ev->header.type = PERF_RECORD_EVENT_UPDATE; 1987 ev->header.size = (u16)size; 1988 ev->type = type; 1989 ev->id = id; 1990 } 1991 return ev; 1992 } 1993 1994 int perf_event__synthesize_event_update_unit(const struct perf_tool *tool, struct evsel *evsel, 1995 perf_event__handler_t process) 1996 { 1997 size_t size = strlen(evsel->unit); 1998 struct perf_record_event_update *ev; 1999 int err; 2000 2001 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]); 2002 if (ev == NULL) 2003 return -ENOMEM; 2004 2005 strlcpy(ev->unit, evsel->unit, size + 1); 2006 err = process(tool, (union perf_event *)ev, NULL, NULL); 2007 free(ev); 2008 return err; 2009 } 2010 2011 int perf_event__synthesize_event_update_scale(const struct perf_tool *tool, struct evsel *evsel, 2012 perf_event__handler_t process) 2013 { 2014 struct perf_record_event_update *ev; 2015 struct perf_record_event_update_scale *ev_data; 2016 int err; 2017 2018 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]); 2019 if (ev == NULL) 2020 return -ENOMEM; 2021 2022 ev->scale.scale = evsel->scale; 2023 err = process(tool, (union perf_event *)ev, NULL, NULL); 2024 free(ev); 2025 return err; 2026 } 2027 2028 int perf_event__synthesize_event_update_name(const struct perf_tool *tool, struct evsel *evsel, 2029 perf_event__handler_t process) 2030 { 2031 struct perf_record_event_update *ev; 2032 size_t len = strlen(evsel__name(evsel)); 2033 int err; 2034 2035 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]); 2036 if (ev == NULL) 2037 return -ENOMEM; 2038 2039 strlcpy(ev->name, evsel->name, len + 1); 2040 err = process(tool, (union perf_event *)ev, NULL, NULL); 2041 free(ev); 2042 return err; 2043 } 2044 2045 int perf_event__synthesize_event_update_cpus(const struct perf_tool *tool, struct evsel *evsel, 2046 perf_event__handler_t process) 2047 { 2048 struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus }; 2049 struct perf_record_event_update *ev; 2050 int err; 2051 2052 ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64)); 2053 if (!ev) 2054 return -ENOMEM; 2055 2056 syn_data.data = &ev->cpus.cpus; 2057 ev->header.type = PERF_RECORD_EVENT_UPDATE; 2058 ev->header.size = (u16)syn_data.size; 2059 ev->type = PERF_EVENT_UPDATE__CPUS; 2060 ev->id = evsel->core.id[0]; 2061 cpu_map_data__synthesize(&syn_data); 2062 2063 err = process(tool, (union perf_event *)ev, NULL, NULL); 2064 free(ev); 2065 return err; 2066 } 2067 2068 int perf_event__synthesize_attrs(const struct perf_tool *tool, struct evlist *evlist, 2069 perf_event__handler_t process) 2070 { 2071 struct evsel *evsel; 2072 int err = 0; 2073 2074 evlist__for_each_entry(evlist, evsel) { 2075 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids, 2076 evsel->core.id, process); 2077 if (err) { 2078 pr_debug("failed to create perf header attribute\n"); 2079 return err; 2080 } 2081 } 2082 2083 return err; 2084 } 2085 2086 static bool has_unit(struct evsel *evsel) 2087 { 2088 return evsel->unit && *evsel->unit; 2089 } 2090 2091 static bool has_scale(struct evsel *evsel) 2092 { 2093 return evsel->scale != 1; 2094 } 2095 2096 int perf_event__synthesize_extra_attr(const struct perf_tool *tool, struct evlist *evsel_list, 2097 perf_event__handler_t process, bool is_pipe) 2098 { 2099 struct evsel *evsel; 2100 int err; 2101 2102 /* 2103 * Synthesize other events stuff not carried within 2104 * attr event - unit, scale, name 2105 */ 2106 evlist__for_each_entry(evsel_list, evsel) { 2107 if (!evsel->supported) 2108 continue; 2109 2110 /* 2111 * Synthesize unit and scale only if it's defined. 2112 */ 2113 if (has_unit(evsel)) { 2114 err = perf_event__synthesize_event_update_unit(tool, evsel, process); 2115 if (err < 0) { 2116 pr_err("Couldn't synthesize evsel unit.\n"); 2117 return err; 2118 } 2119 } 2120 2121 if (has_scale(evsel)) { 2122 err = perf_event__synthesize_event_update_scale(tool, evsel, process); 2123 if (err < 0) { 2124 pr_err("Couldn't synthesize evsel evsel.\n"); 2125 return err; 2126 } 2127 } 2128 2129 if (evsel->core.own_cpus) { 2130 err = perf_event__synthesize_event_update_cpus(tool, evsel, process); 2131 if (err < 0) { 2132 pr_err("Couldn't synthesize evsel cpus.\n"); 2133 return err; 2134 } 2135 } 2136 2137 /* 2138 * Name is needed only for pipe output, 2139 * perf.data carries event names. 2140 */ 2141 if (is_pipe) { 2142 err = perf_event__synthesize_event_update_name(tool, evsel, process); 2143 if (err < 0) { 2144 pr_err("Couldn't synthesize evsel name.\n"); 2145 return err; 2146 } 2147 } 2148 } 2149 return 0; 2150 } 2151 2152 int perf_event__synthesize_attr(const struct perf_tool *tool, struct perf_event_attr *attr, 2153 u32 ids, u64 *id, perf_event__handler_t process) 2154 { 2155 union perf_event *ev; 2156 size_t size; 2157 int err; 2158 2159 size = sizeof(struct perf_event_attr); 2160 size = PERF_ALIGN(size, sizeof(u64)); 2161 size += sizeof(struct perf_event_header); 2162 size += ids * sizeof(u64); 2163 2164 ev = zalloc(size); 2165 2166 if (ev == NULL) 2167 return -ENOMEM; 2168 2169 ev->attr.attr = *attr; 2170 memcpy(perf_record_header_attr_id(ev), id, ids * sizeof(u64)); 2171 2172 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 2173 ev->attr.header.size = (u16)size; 2174 2175 if (ev->attr.header.size == size) 2176 err = process(tool, ev, NULL, NULL); 2177 else 2178 err = -E2BIG; 2179 2180 free(ev); 2181 2182 return err; 2183 } 2184 2185 #ifdef HAVE_LIBTRACEEVENT 2186 int perf_event__synthesize_tracing_data(const struct perf_tool *tool, int fd, struct evlist *evlist, 2187 perf_event__handler_t process) 2188 { 2189 union perf_event ev; 2190 struct tracing_data *tdata; 2191 ssize_t size = 0, aligned_size = 0, padding; 2192 struct feat_fd ff; 2193 2194 /* 2195 * We are going to store the size of the data followed 2196 * by the data contents. Since the fd descriptor is a pipe, 2197 * we cannot seek back to store the size of the data once 2198 * we know it. Instead we: 2199 * 2200 * - write the tracing data to the temp file 2201 * - get/write the data size to pipe 2202 * - write the tracing data from the temp file 2203 * to the pipe 2204 */ 2205 tdata = tracing_data_get(&evlist->core.entries, fd, true); 2206 if (!tdata) 2207 return -1; 2208 2209 memset(&ev, 0, sizeof(ev.tracing_data)); 2210 2211 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 2212 size = tdata->size; 2213 aligned_size = PERF_ALIGN(size, sizeof(u64)); 2214 padding = aligned_size - size; 2215 ev.tracing_data.header.size = sizeof(ev.tracing_data); 2216 ev.tracing_data.size = aligned_size; 2217 2218 process(tool, &ev, NULL, NULL); 2219 2220 /* 2221 * The put function will copy all the tracing data 2222 * stored in temp file to the pipe. 2223 */ 2224 tracing_data_put(tdata); 2225 2226 ff = (struct feat_fd){ .fd = fd }; 2227 if (write_padded(&ff, NULL, 0, padding)) 2228 return -1; 2229 2230 return aligned_size; 2231 } 2232 #endif 2233 2234 int perf_event__synthesize_build_id(const struct perf_tool *tool, 2235 struct perf_sample *sample, 2236 struct machine *machine, 2237 perf_event__handler_t process, 2238 const struct evsel *evsel, 2239 __u16 misc, 2240 const struct build_id *bid, 2241 const char *filename) 2242 { 2243 union perf_event ev; 2244 size_t len; 2245 2246 len = sizeof(ev.build_id) + strlen(filename) + 1; 2247 len = PERF_ALIGN(len, sizeof(u64)); 2248 2249 memset(&ev, 0, len); 2250 2251 ev.build_id.size = min(bid->size, sizeof(ev.build_id.build_id)); 2252 memcpy(ev.build_id.build_id, bid->data, ev.build_id.size); 2253 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 2254 ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE; 2255 ev.build_id.pid = machine->pid; 2256 ev.build_id.header.size = len; 2257 strcpy(ev.build_id.filename, filename); 2258 2259 if (evsel) { 2260 void *array = &ev; 2261 int ret; 2262 2263 array += ev.header.size; 2264 ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample); 2265 if (ret < 0) 2266 return ret; 2267 2268 if (ret & 7) { 2269 pr_err("Bad id sample size %d\n", ret); 2270 return -EINVAL; 2271 } 2272 2273 ev.header.size += ret; 2274 } 2275 2276 return process(tool, &ev, sample, machine); 2277 } 2278 2279 int perf_event__synthesize_mmap2_build_id(const struct perf_tool *tool, 2280 struct perf_sample *sample, 2281 struct machine *machine, 2282 perf_event__handler_t process, 2283 const struct evsel *evsel, 2284 __u16 misc, 2285 __u32 pid, __u32 tid, 2286 __u64 start, __u64 len, __u64 pgoff, 2287 const struct build_id *bid, 2288 __u32 prot, __u32 flags, 2289 const char *filename) 2290 { 2291 union perf_event ev; 2292 size_t ev_len; 2293 void *array; 2294 int ret; 2295 2296 ev_len = sizeof(ev.mmap2) - sizeof(ev.mmap2.filename) + strlen(filename) + 1; 2297 ev_len = PERF_ALIGN(ev_len, sizeof(u64)); 2298 2299 memset(&ev, 0, ev_len); 2300 2301 ev.mmap2.header.type = PERF_RECORD_MMAP2; 2302 ev.mmap2.header.misc = misc | PERF_RECORD_MISC_MMAP_BUILD_ID; 2303 ev.mmap2.header.size = ev_len; 2304 2305 ev.mmap2.pid = pid; 2306 ev.mmap2.tid = tid; 2307 ev.mmap2.start = start; 2308 ev.mmap2.len = len; 2309 ev.mmap2.pgoff = pgoff; 2310 2311 ev.mmap2.build_id_size = min(bid->size, sizeof(ev.mmap2.build_id)); 2312 memcpy(ev.mmap2.build_id, bid->data, ev.mmap2.build_id_size); 2313 2314 ev.mmap2.prot = prot; 2315 ev.mmap2.flags = flags; 2316 2317 memcpy(ev.mmap2.filename, filename, min(strlen(filename), sizeof(ev.mmap.filename))); 2318 2319 array = &ev; 2320 array += ev.header.size; 2321 ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample); 2322 if (ret < 0) 2323 return ret; 2324 2325 if (ret & 7) { 2326 pr_err("Bad id sample size %d\n", ret); 2327 return -EINVAL; 2328 } 2329 2330 ev.header.size += ret; 2331 2332 return process(tool, &ev, sample, machine); 2333 } 2334 2335 int perf_event__synthesize_stat_events(struct perf_stat_config *config, const struct perf_tool *tool, 2336 struct evlist *evlist, perf_event__handler_t process, bool attrs) 2337 { 2338 int err; 2339 2340 if (attrs) { 2341 err = perf_event__synthesize_attrs(tool, evlist, process); 2342 if (err < 0) { 2343 pr_err("Couldn't synthesize attrs.\n"); 2344 return err; 2345 } 2346 } 2347 2348 err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs); 2349 err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL); 2350 if (err < 0) { 2351 pr_err("Couldn't synthesize thread map.\n"); 2352 return err; 2353 } 2354 2355 err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL); 2356 if (err < 0) { 2357 pr_err("Couldn't synthesize thread map.\n"); 2358 return err; 2359 } 2360 2361 err = perf_event__synthesize_stat_config(tool, config, process, NULL); 2362 if (err < 0) { 2363 pr_err("Couldn't synthesize config.\n"); 2364 return err; 2365 } 2366 2367 return 0; 2368 } 2369 2370 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE]; 2371 2372 int perf_event__synthesize_features(const struct perf_tool *tool, struct perf_session *session, 2373 struct evlist *evlist, perf_event__handler_t process) 2374 { 2375 struct perf_header *header = &session->header; 2376 struct perf_record_header_feature *fe; 2377 struct feat_fd ff; 2378 size_t sz, sz_hdr; 2379 int feat, ret; 2380 2381 sz_hdr = sizeof(fe->header); 2382 sz = sizeof(union perf_event); 2383 /* get a nice alignment */ 2384 sz = PERF_ALIGN(sz, page_size); 2385 2386 memset(&ff, 0, sizeof(ff)); 2387 2388 ff.buf = malloc(sz); 2389 if (!ff.buf) 2390 return -ENOMEM; 2391 2392 ff.size = sz - sz_hdr; 2393 ff.ph = &session->header; 2394 2395 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 2396 if (!feat_ops[feat].synthesize) { 2397 pr_debug("No record header feature for header :%d\n", feat); 2398 continue; 2399 } 2400 2401 ff.offset = sizeof(*fe); 2402 2403 ret = feat_ops[feat].write(&ff, evlist); 2404 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 2405 pr_debug("Error writing feature\n"); 2406 continue; 2407 } 2408 /* ff.buf may have changed due to realloc in do_write() */ 2409 fe = ff.buf; 2410 memset(fe, 0, sizeof(*fe)); 2411 2412 fe->feat_id = feat; 2413 fe->header.type = PERF_RECORD_HEADER_FEATURE; 2414 fe->header.size = ff.offset; 2415 2416 ret = process(tool, ff.buf, NULL, NULL); 2417 if (ret) { 2418 free(ff.buf); 2419 return ret; 2420 } 2421 } 2422 2423 /* Send HEADER_LAST_FEATURE mark. */ 2424 fe = ff.buf; 2425 fe->feat_id = HEADER_LAST_FEATURE; 2426 fe->header.type = PERF_RECORD_HEADER_FEATURE; 2427 fe->header.size = sizeof(*fe); 2428 2429 ret = process(tool, ff.buf, NULL, NULL); 2430 2431 free(ff.buf); 2432 return ret; 2433 } 2434 2435 int perf_event__synthesize_for_pipe(const struct perf_tool *tool, 2436 struct perf_session *session, 2437 struct perf_data *data, 2438 perf_event__handler_t process) 2439 { 2440 int err; 2441 int ret = 0; 2442 struct evlist *evlist = session->evlist; 2443 2444 /* 2445 * We need to synthesize events first, because some 2446 * features works on top of them (on report side). 2447 */ 2448 err = perf_event__synthesize_attrs(tool, evlist, process); 2449 if (err < 0) { 2450 pr_err("Couldn't synthesize attrs.\n"); 2451 return err; 2452 } 2453 ret += err; 2454 2455 err = perf_event__synthesize_features(tool, session, evlist, process); 2456 if (err < 0) { 2457 pr_err("Couldn't synthesize features.\n"); 2458 return err; 2459 } 2460 ret += err; 2461 2462 #ifdef HAVE_LIBTRACEEVENT 2463 if (have_tracepoints(&evlist->core.entries)) { 2464 int fd = perf_data__fd(data); 2465 2466 /* 2467 * FIXME err <= 0 here actually means that 2468 * there were no tracepoints so its not really 2469 * an error, just that we don't need to 2470 * synthesize anything. We really have to 2471 * return this more properly and also 2472 * propagate errors that now are calling die() 2473 */ 2474 err = perf_event__synthesize_tracing_data(tool, fd, evlist, 2475 process); 2476 if (err <= 0) { 2477 pr_err("Couldn't record tracing data.\n"); 2478 return err; 2479 } 2480 ret += err; 2481 } 2482 #else 2483 (void)data; 2484 #endif 2485 2486 return ret; 2487 } 2488 2489 int parse_synth_opt(char *synth) 2490 { 2491 char *p, *q; 2492 int ret = 0; 2493 2494 if (synth == NULL) 2495 return -1; 2496 2497 for (q = synth; (p = strsep(&q, ",")); p = q) { 2498 if (!strcasecmp(p, "no") || !strcasecmp(p, "none")) 2499 return 0; 2500 2501 if (!strcasecmp(p, "all")) 2502 return PERF_SYNTH_ALL; 2503 2504 if (!strcasecmp(p, "task")) 2505 ret |= PERF_SYNTH_TASK; 2506 else if (!strcasecmp(p, "mmap")) 2507 ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP; 2508 else if (!strcasecmp(p, "cgroup")) 2509 ret |= PERF_SYNTH_CGROUP; 2510 else 2511 return -1; 2512 } 2513 2514 return ret; 2515 } 2516