1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include "util/cgroup.h" 4 #include "util/data.h" 5 #include "util/debug.h" 6 #include "util/dso.h" 7 #include "util/event.h" 8 #include "util/evlist.h" 9 #include "util/machine.h" 10 #include "util/map.h" 11 #include "util/map_symbol.h" 12 #include "util/branch.h" 13 #include "util/memswap.h" 14 #include "util/namespaces.h" 15 #include "util/session.h" 16 #include "util/stat.h" 17 #include "util/symbol.h" 18 #include "util/synthetic-events.h" 19 #include "util/target.h" 20 #include "util/time-utils.h" 21 #include <linux/bitops.h> 22 #include <linux/kernel.h> 23 #include <linux/string.h> 24 #include <linux/zalloc.h> 25 #include <linux/perf_event.h> 26 #include <asm/bug.h> 27 #include <perf/evsel.h> 28 #include <perf/cpumap.h> 29 #include <internal/lib.h> // page_size 30 #include <internal/threadmap.h> 31 #include <perf/threadmap.h> 32 #include <symbol/kallsyms.h> 33 #include <dirent.h> 34 #include <errno.h> 35 #include <inttypes.h> 36 #include <stdio.h> 37 #include <string.h> 38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 39 #include <api/fs/fs.h> 40 #include <api/io.h> 41 #include <sys/types.h> 42 #include <sys/stat.h> 43 #include <fcntl.h> 44 #include <unistd.h> 45 46 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500 47 48 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT; 49 50 int perf_tool__process_synth_event(const struct perf_tool *tool, 51 union perf_event *event, 52 struct machine *machine, 53 perf_event__handler_t process) 54 { 55 struct perf_sample synth_sample = { 56 .pid = -1, 57 .tid = -1, 58 .time = -1, 59 .stream_id = -1, 60 .cpu = -1, 61 .period = 1, 62 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 63 }; 64 65 return process(tool, event, &synth_sample, machine); 66 }; 67 68 /* 69 * Assumes that the first 4095 bytes of /proc/pid/stat contains 70 * the comm, tgid and ppid. 71 */ 72 static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len, 73 pid_t *tgid, pid_t *ppid, bool *kernel) 74 { 75 char bf[4096]; 76 int fd; 77 size_t size = 0; 78 ssize_t n; 79 char *name, *tgids, *ppids, *vmpeak, *threads; 80 81 *tgid = -1; 82 *ppid = -1; 83 84 if (pid) 85 snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid); 86 else 87 snprintf(bf, sizeof(bf), "/proc/%d/status", tid); 88 89 fd = open(bf, O_RDONLY); 90 if (fd < 0) { 91 pr_debug("couldn't open %s\n", bf); 92 return -1; 93 } 94 95 n = read(fd, bf, sizeof(bf) - 1); 96 close(fd); 97 if (n <= 0) { 98 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 99 tid); 100 return -1; 101 } 102 bf[n] = '\0'; 103 104 name = strstr(bf, "Name:"); 105 tgids = strstr(name ?: bf, "Tgid:"); 106 ppids = strstr(tgids ?: bf, "PPid:"); 107 vmpeak = strstr(ppids ?: bf, "VmPeak:"); 108 109 if (vmpeak) 110 threads = NULL; 111 else 112 threads = strstr(ppids ?: bf, "Threads:"); 113 114 if (name) { 115 char *nl; 116 117 name = skip_spaces(name + 5); /* strlen("Name:") */ 118 nl = strchr(name, '\n'); 119 if (nl) 120 *nl = '\0'; 121 122 size = strlen(name); 123 if (size >= len) 124 size = len - 1; 125 memcpy(comm, name, size); 126 comm[size] = '\0'; 127 } else { 128 pr_debug("Name: string not found for pid %d\n", tid); 129 } 130 131 if (tgids) { 132 tgids += 5; /* strlen("Tgid:") */ 133 *tgid = atoi(tgids); 134 } else { 135 pr_debug("Tgid: string not found for pid %d\n", tid); 136 } 137 138 if (ppids) { 139 ppids += 5; /* strlen("PPid:") */ 140 *ppid = atoi(ppids); 141 } else { 142 pr_debug("PPid: string not found for pid %d\n", tid); 143 } 144 145 if (!vmpeak && threads) 146 *kernel = true; 147 else 148 *kernel = false; 149 150 return 0; 151 } 152 153 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid, 154 struct machine *machine, 155 pid_t *tgid, pid_t *ppid, bool *kernel) 156 { 157 size_t size; 158 159 *ppid = -1; 160 161 memset(&event->comm, 0, sizeof(event->comm)); 162 163 if (machine__is_host(machine)) { 164 if (perf_event__get_comm_ids(pid, tid, event->comm.comm, 165 sizeof(event->comm.comm), 166 tgid, ppid, kernel) != 0) { 167 return -1; 168 } 169 } else { 170 *tgid = machine->pid; 171 } 172 173 if (*tgid < 0) 174 return -1; 175 176 event->comm.pid = *tgid; 177 event->comm.header.type = PERF_RECORD_COMM; 178 179 size = strlen(event->comm.comm) + 1; 180 size = PERF_ALIGN(size, sizeof(u64)); 181 memset(event->comm.comm + size, 0, machine->id_hdr_size); 182 event->comm.header.size = (sizeof(event->comm) - 183 (sizeof(event->comm.comm) - size) + 184 machine->id_hdr_size); 185 event->comm.tid = tid; 186 187 return 0; 188 } 189 190 pid_t perf_event__synthesize_comm(const struct perf_tool *tool, 191 union perf_event *event, pid_t pid, 192 perf_event__handler_t process, 193 struct machine *machine) 194 { 195 pid_t tgid, ppid; 196 bool kernel_thread; 197 198 if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid, 199 &kernel_thread) != 0) 200 return -1; 201 202 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 203 return -1; 204 205 return tgid; 206 } 207 208 static void perf_event__get_ns_link_info(pid_t pid, const char *ns, 209 struct perf_ns_link_info *ns_link_info) 210 { 211 struct stat64 st; 212 char proc_ns[128]; 213 214 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns); 215 if (stat64(proc_ns, &st) == 0) { 216 ns_link_info->dev = st.st_dev; 217 ns_link_info->ino = st.st_ino; 218 } 219 } 220 221 int perf_event__synthesize_namespaces(const struct perf_tool *tool, 222 union perf_event *event, 223 pid_t pid, pid_t tgid, 224 perf_event__handler_t process, 225 struct machine *machine) 226 { 227 u32 idx; 228 struct perf_ns_link_info *ns_link_info; 229 230 if (!tool || !tool->namespace_events) 231 return 0; 232 233 memset(&event->namespaces, 0, (sizeof(event->namespaces) + 234 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 235 machine->id_hdr_size)); 236 237 event->namespaces.pid = tgid; 238 event->namespaces.tid = pid; 239 240 event->namespaces.nr_namespaces = NR_NAMESPACES; 241 242 ns_link_info = event->namespaces.link_info; 243 244 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) 245 perf_event__get_ns_link_info(pid, perf_ns__name(idx), 246 &ns_link_info[idx]); 247 248 event->namespaces.header.type = PERF_RECORD_NAMESPACES; 249 250 event->namespaces.header.size = (sizeof(event->namespaces) + 251 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 252 machine->id_hdr_size); 253 254 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 255 return -1; 256 257 return 0; 258 } 259 260 static int perf_event__synthesize_fork(const struct perf_tool *tool, 261 union perf_event *event, 262 pid_t pid, pid_t tgid, pid_t ppid, 263 perf_event__handler_t process, 264 struct machine *machine) 265 { 266 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 267 268 /* 269 * for main thread set parent to ppid from status file. For other 270 * threads set parent pid to main thread. ie., assume main thread 271 * spawns all threads in a process 272 */ 273 if (tgid == pid) { 274 event->fork.ppid = ppid; 275 event->fork.ptid = ppid; 276 } else { 277 event->fork.ppid = tgid; 278 event->fork.ptid = tgid; 279 } 280 event->fork.pid = tgid; 281 event->fork.tid = pid; 282 event->fork.header.type = PERF_RECORD_FORK; 283 event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC; 284 285 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 286 287 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 288 return -1; 289 290 return 0; 291 } 292 293 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end, 294 u32 *prot, u32 *flags, __u64 *offset, 295 u32 *maj, u32 *min, 296 __u64 *inode, 297 ssize_t pathname_size, char *pathname) 298 { 299 __u64 temp; 300 int ch; 301 char *start_pathname = pathname; 302 303 if (io__get_hex(io, start) != '-') 304 return false; 305 if (io__get_hex(io, end) != ' ') 306 return false; 307 308 /* map protection and flags bits */ 309 *prot = 0; 310 ch = io__get_char(io); 311 if (ch == 'r') 312 *prot |= PROT_READ; 313 else if (ch != '-') 314 return false; 315 ch = io__get_char(io); 316 if (ch == 'w') 317 *prot |= PROT_WRITE; 318 else if (ch != '-') 319 return false; 320 ch = io__get_char(io); 321 if (ch == 'x') 322 *prot |= PROT_EXEC; 323 else if (ch != '-') 324 return false; 325 ch = io__get_char(io); 326 if (ch == 's') 327 *flags = MAP_SHARED; 328 else if (ch == 'p') 329 *flags = MAP_PRIVATE; 330 else 331 return false; 332 if (io__get_char(io) != ' ') 333 return false; 334 335 if (io__get_hex(io, offset) != ' ') 336 return false; 337 338 if (io__get_hex(io, &temp) != ':') 339 return false; 340 *maj = temp; 341 if (io__get_hex(io, &temp) != ' ') 342 return false; 343 *min = temp; 344 345 ch = io__get_dec(io, inode); 346 if (ch != ' ') { 347 *pathname = '\0'; 348 return ch == '\n'; 349 } 350 do { 351 ch = io__get_char(io); 352 } while (ch == ' '); 353 while (true) { 354 if (ch < 0) 355 return false; 356 if (ch == '\0' || ch == '\n' || 357 (pathname + 1 - start_pathname) >= pathname_size) { 358 *pathname = '\0'; 359 return true; 360 } 361 *pathname++ = ch; 362 ch = io__get_char(io); 363 } 364 } 365 366 static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event, 367 struct machine *machine, 368 bool is_kernel) 369 { 370 struct build_id bid; 371 struct nsinfo *nsi; 372 struct nscookie nc; 373 struct dso *dso = NULL; 374 struct dso_id id; 375 int rc; 376 377 if (is_kernel) { 378 rc = sysfs__read_build_id("/sys/kernel/notes", &bid); 379 goto out; 380 } 381 382 id.maj = event->maj; 383 id.min = event->min; 384 id.ino = event->ino; 385 id.ino_generation = event->ino_generation; 386 387 dso = dsos__findnew_id(&machine->dsos, event->filename, &id); 388 if (dso && dso__has_build_id(dso)) { 389 bid = *dso__bid(dso); 390 rc = 0; 391 goto out; 392 } 393 394 nsi = nsinfo__new(event->pid); 395 nsinfo__mountns_enter(nsi, &nc); 396 397 rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1; 398 399 nsinfo__mountns_exit(&nc); 400 nsinfo__put(nsi); 401 402 out: 403 if (rc == 0) { 404 memcpy(event->build_id, bid.data, sizeof(bid.data)); 405 event->build_id_size = (u8) bid.size; 406 event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID; 407 event->__reserved_1 = 0; 408 event->__reserved_2 = 0; 409 410 if (dso && !dso__has_build_id(dso)) 411 dso__set_build_id(dso, &bid); 412 } else { 413 if (event->filename[0] == '/') { 414 pr_debug2("Failed to read build ID for %s\n", 415 event->filename); 416 } 417 } 418 dso__put(dso); 419 } 420 421 int perf_event__synthesize_mmap_events(const struct perf_tool *tool, 422 union perf_event *event, 423 pid_t pid, pid_t tgid, 424 perf_event__handler_t process, 425 struct machine *machine, 426 bool mmap_data) 427 { 428 unsigned long long t; 429 char bf[BUFSIZ]; 430 struct io io; 431 bool truncation = false; 432 unsigned long long timeout = proc_map_timeout * 1000000ULL; 433 int rc = 0; 434 const char *hugetlbfs_mnt = hugetlbfs__mountpoint(); 435 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0; 436 437 if (machine__is_default_guest(machine)) 438 return 0; 439 440 snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps", 441 machine->root_dir, pid, pid); 442 443 io.fd = open(bf, O_RDONLY, 0); 444 if (io.fd < 0) { 445 /* 446 * We raced with a task exiting - just return: 447 */ 448 pr_debug("couldn't open %s\n", bf); 449 return -1; 450 } 451 io__init(&io, io.fd, bf, sizeof(bf)); 452 453 event->header.type = PERF_RECORD_MMAP2; 454 t = rdclock(); 455 456 while (!io.eof) { 457 static const char anonstr[] = "//anon"; 458 size_t size, aligned_size; 459 460 /* ensure null termination since stack will be reused. */ 461 event->mmap2.filename[0] = '\0'; 462 463 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 464 if (!read_proc_maps_line(&io, 465 &event->mmap2.start, 466 &event->mmap2.len, 467 &event->mmap2.prot, 468 &event->mmap2.flags, 469 &event->mmap2.pgoff, 470 &event->mmap2.maj, 471 &event->mmap2.min, 472 &event->mmap2.ino, 473 sizeof(event->mmap2.filename), 474 event->mmap2.filename)) 475 continue; 476 477 if ((rdclock() - t) > timeout) { 478 pr_warning("Reading %s/proc/%d/task/%d/maps time out. " 479 "You may want to increase " 480 "the time limit by --proc-map-timeout\n", 481 machine->root_dir, pid, pid); 482 truncation = true; 483 goto out; 484 } 485 486 event->mmap2.ino_generation = 0; 487 488 /* 489 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 490 */ 491 if (machine__is_host(machine)) 492 event->header.misc = PERF_RECORD_MISC_USER; 493 else 494 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 495 496 if ((event->mmap2.prot & PROT_EXEC) == 0) { 497 if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0) 498 continue; 499 500 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 501 } 502 503 out: 504 if (truncation) 505 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 506 507 if (!strcmp(event->mmap2.filename, "")) 508 strcpy(event->mmap2.filename, anonstr); 509 510 if (hugetlbfs_mnt_len && 511 !strncmp(event->mmap2.filename, hugetlbfs_mnt, 512 hugetlbfs_mnt_len)) { 513 strcpy(event->mmap2.filename, anonstr); 514 event->mmap2.flags |= MAP_HUGETLB; 515 } 516 517 size = strlen(event->mmap2.filename) + 1; 518 aligned_size = PERF_ALIGN(size, sizeof(u64)); 519 event->mmap2.len -= event->mmap.start; 520 event->mmap2.header.size = (sizeof(event->mmap2) - 521 (sizeof(event->mmap2.filename) - aligned_size)); 522 memset(event->mmap2.filename + size, 0, machine->id_hdr_size + 523 (aligned_size - size)); 524 event->mmap2.header.size += machine->id_hdr_size; 525 event->mmap2.pid = tgid; 526 event->mmap2.tid = pid; 527 528 if (symbol_conf.buildid_mmap2) 529 perf_record_mmap2__read_build_id(&event->mmap2, machine, false); 530 531 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 532 rc = -1; 533 break; 534 } 535 536 if (truncation) 537 break; 538 } 539 540 close(io.fd); 541 return rc; 542 } 543 544 #ifdef HAVE_FILE_HANDLE 545 static int perf_event__synthesize_cgroup(const struct perf_tool *tool, 546 union perf_event *event, 547 char *path, size_t mount_len, 548 perf_event__handler_t process, 549 struct machine *machine) 550 { 551 size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path); 552 size_t path_len = strlen(path) - mount_len + 1; 553 struct { 554 struct file_handle fh; 555 uint64_t cgroup_id; 556 } handle; 557 int mount_id; 558 559 while (path_len % sizeof(u64)) 560 path[mount_len + path_len++] = '\0'; 561 562 memset(&event->cgroup, 0, event_size); 563 564 event->cgroup.header.type = PERF_RECORD_CGROUP; 565 event->cgroup.header.size = event_size + path_len + machine->id_hdr_size; 566 567 handle.fh.handle_bytes = sizeof(handle.cgroup_id); 568 if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) { 569 pr_debug("stat failed: %s\n", path); 570 return -1; 571 } 572 573 event->cgroup.id = handle.cgroup_id; 574 strncpy(event->cgroup.path, path + mount_len, path_len); 575 memset(event->cgroup.path + path_len, 0, machine->id_hdr_size); 576 577 if (perf_tool__process_synth_event(tool, event, machine, process) < 0) { 578 pr_debug("process synth event failed\n"); 579 return -1; 580 } 581 582 return 0; 583 } 584 585 static int perf_event__walk_cgroup_tree(const struct perf_tool *tool, 586 union perf_event *event, 587 char *path, size_t mount_len, 588 perf_event__handler_t process, 589 struct machine *machine) 590 { 591 size_t pos = strlen(path); 592 DIR *d; 593 struct dirent *dent; 594 int ret = 0; 595 596 if (perf_event__synthesize_cgroup(tool, event, path, mount_len, 597 process, machine) < 0) 598 return -1; 599 600 d = opendir(path); 601 if (d == NULL) { 602 pr_debug("failed to open directory: %s\n", path); 603 return -1; 604 } 605 606 while ((dent = readdir(d)) != NULL) { 607 if (dent->d_type != DT_DIR) 608 continue; 609 if (!strcmp(dent->d_name, ".") || 610 !strcmp(dent->d_name, "..")) 611 continue; 612 613 /* any sane path should be less than PATH_MAX */ 614 if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX) 615 continue; 616 617 if (path[pos - 1] != '/') 618 strcat(path, "/"); 619 strcat(path, dent->d_name); 620 621 ret = perf_event__walk_cgroup_tree(tool, event, path, 622 mount_len, process, machine); 623 if (ret < 0) 624 break; 625 626 path[pos] = '\0'; 627 } 628 629 closedir(d); 630 return ret; 631 } 632 633 int perf_event__synthesize_cgroups(const struct perf_tool *tool, 634 perf_event__handler_t process, 635 struct machine *machine) 636 { 637 union perf_event event; 638 char cgrp_root[PATH_MAX]; 639 size_t mount_len; /* length of mount point in the path */ 640 641 if (!tool || !tool->cgroup_events) 642 return 0; 643 644 if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) { 645 pr_debug("cannot find cgroup mount point\n"); 646 return -1; 647 } 648 649 mount_len = strlen(cgrp_root); 650 /* make sure the path starts with a slash (after mount point) */ 651 strcat(cgrp_root, "/"); 652 653 if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len, 654 process, machine) < 0) 655 return -1; 656 657 return 0; 658 } 659 #else 660 int perf_event__synthesize_cgroups(const struct perf_tool *tool __maybe_unused, 661 perf_event__handler_t process __maybe_unused, 662 struct machine *machine __maybe_unused) 663 { 664 return -1; 665 } 666 #endif 667 668 struct perf_event__synthesize_modules_maps_cb_args { 669 const struct perf_tool *tool; 670 perf_event__handler_t process; 671 struct machine *machine; 672 union perf_event *event; 673 }; 674 675 static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data) 676 { 677 struct perf_event__synthesize_modules_maps_cb_args *args = data; 678 union perf_event *event = args->event; 679 struct dso *dso; 680 size_t size; 681 682 if (!__map__is_kmodule(map)) 683 return 0; 684 685 dso = map__dso(map); 686 if (symbol_conf.buildid_mmap2) { 687 size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64)); 688 event->mmap2.header.type = PERF_RECORD_MMAP2; 689 event->mmap2.header.size = (sizeof(event->mmap2) - 690 (sizeof(event->mmap2.filename) - size)); 691 memset(event->mmap2.filename + size, 0, args->machine->id_hdr_size); 692 event->mmap2.header.size += args->machine->id_hdr_size; 693 event->mmap2.start = map__start(map); 694 event->mmap2.len = map__size(map); 695 event->mmap2.pid = args->machine->pid; 696 697 memcpy(event->mmap2.filename, dso__long_name(dso), dso__long_name_len(dso) + 1); 698 699 perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false); 700 } else { 701 size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64)); 702 event->mmap.header.type = PERF_RECORD_MMAP; 703 event->mmap.header.size = (sizeof(event->mmap) - 704 (sizeof(event->mmap.filename) - size)); 705 memset(event->mmap.filename + size, 0, args->machine->id_hdr_size); 706 event->mmap.header.size += args->machine->id_hdr_size; 707 event->mmap.start = map__start(map); 708 event->mmap.len = map__size(map); 709 event->mmap.pid = args->machine->pid; 710 711 memcpy(event->mmap.filename, dso__long_name(dso), dso__long_name_len(dso) + 1); 712 } 713 714 if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0) 715 return -1; 716 717 return 0; 718 } 719 720 int perf_event__synthesize_modules(const struct perf_tool *tool, perf_event__handler_t process, 721 struct machine *machine) 722 { 723 int rc; 724 struct maps *maps = machine__kernel_maps(machine); 725 struct perf_event__synthesize_modules_maps_cb_args args = { 726 .tool = tool, 727 .process = process, 728 .machine = machine, 729 }; 730 size_t size = symbol_conf.buildid_mmap2 731 ? sizeof(args.event->mmap2) 732 : sizeof(args.event->mmap); 733 734 args.event = zalloc(size + machine->id_hdr_size); 735 if (args.event == NULL) { 736 pr_debug("Not enough memory synthesizing mmap event " 737 "for kernel modules\n"); 738 return -1; 739 } 740 741 /* 742 * kernel uses 0 for user space maps, see kernel/perf_event.c 743 * __perf_event_mmap 744 */ 745 if (machine__is_host(machine)) 746 args.event->header.misc = PERF_RECORD_MISC_KERNEL; 747 else 748 args.event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 749 750 rc = maps__for_each_map(maps, perf_event__synthesize_modules_maps_cb, &args); 751 752 free(args.event); 753 return rc; 754 } 755 756 static int filter_task(const struct dirent *dirent) 757 { 758 return isdigit(dirent->d_name[0]); 759 } 760 761 static int __event__synthesize_thread(union perf_event *comm_event, 762 union perf_event *mmap_event, 763 union perf_event *fork_event, 764 union perf_event *namespaces_event, 765 pid_t pid, int full, perf_event__handler_t process, 766 const struct perf_tool *tool, struct machine *machine, 767 bool needs_mmap, bool mmap_data) 768 { 769 char filename[PATH_MAX]; 770 struct dirent **dirent; 771 pid_t tgid, ppid; 772 int rc = 0; 773 int i, n; 774 775 /* special case: only send one comm event using passed in pid */ 776 if (!full) { 777 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 778 process, machine); 779 780 if (tgid == -1) 781 return -1; 782 783 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid, 784 tgid, process, machine) < 0) 785 return -1; 786 787 /* 788 * send mmap only for thread group leader 789 * see thread__init_maps() 790 */ 791 if (pid == tgid && needs_mmap && 792 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 793 process, machine, mmap_data)) 794 return -1; 795 796 return 0; 797 } 798 799 if (machine__is_default_guest(machine)) 800 return 0; 801 802 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 803 machine->root_dir, pid); 804 805 n = scandir(filename, &dirent, filter_task, NULL); 806 if (n < 0) 807 return n; 808 809 for (i = 0; i < n; i++) { 810 char *end; 811 pid_t _pid; 812 bool kernel_thread = false; 813 814 _pid = strtol(dirent[i]->d_name, &end, 10); 815 if (*end) 816 continue; 817 818 /* some threads may exit just after scan, ignore it */ 819 if (perf_event__prepare_comm(comm_event, pid, _pid, machine, 820 &tgid, &ppid, &kernel_thread) != 0) 821 continue; 822 823 rc = -1; 824 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 825 ppid, process, machine) < 0) 826 break; 827 828 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid, 829 tgid, process, machine) < 0) 830 break; 831 832 /* 833 * Send the prepared comm event 834 */ 835 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 836 break; 837 838 rc = 0; 839 if (_pid == pid && !kernel_thread && needs_mmap) { 840 /* process the parent's maps too */ 841 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 842 process, machine, mmap_data); 843 if (rc) 844 break; 845 } 846 } 847 848 for (i = 0; i < n; i++) 849 zfree(&dirent[i]); 850 free(dirent); 851 852 return rc; 853 } 854 855 int perf_event__synthesize_thread_map(const struct perf_tool *tool, 856 struct perf_thread_map *threads, 857 perf_event__handler_t process, 858 struct machine *machine, 859 bool needs_mmap, bool mmap_data) 860 { 861 union perf_event *comm_event, *mmap_event, *fork_event; 862 union perf_event *namespaces_event; 863 int err = -1, thread, j; 864 865 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 866 if (comm_event == NULL) 867 goto out; 868 869 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 870 if (mmap_event == NULL) 871 goto out_free_comm; 872 873 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 874 if (fork_event == NULL) 875 goto out_free_mmap; 876 877 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 878 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 879 machine->id_hdr_size); 880 if (namespaces_event == NULL) 881 goto out_free_fork; 882 883 err = 0; 884 for (thread = 0; thread < threads->nr; ++thread) { 885 if (__event__synthesize_thread(comm_event, mmap_event, 886 fork_event, namespaces_event, 887 perf_thread_map__pid(threads, thread), 0, 888 process, tool, machine, 889 needs_mmap, mmap_data)) { 890 err = -1; 891 break; 892 } 893 894 /* 895 * comm.pid is set to thread group id by 896 * perf_event__synthesize_comm 897 */ 898 if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) { 899 bool need_leader = true; 900 901 /* is thread group leader in thread_map? */ 902 for (j = 0; j < threads->nr; ++j) { 903 if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) { 904 need_leader = false; 905 break; 906 } 907 } 908 909 /* if not, generate events for it */ 910 if (need_leader && 911 __event__synthesize_thread(comm_event, mmap_event, 912 fork_event, namespaces_event, 913 comm_event->comm.pid, 0, 914 process, tool, machine, 915 needs_mmap, mmap_data)) { 916 err = -1; 917 break; 918 } 919 } 920 } 921 free(namespaces_event); 922 out_free_fork: 923 free(fork_event); 924 out_free_mmap: 925 free(mmap_event); 926 out_free_comm: 927 free(comm_event); 928 out: 929 return err; 930 } 931 932 static int __perf_event__synthesize_threads(const struct perf_tool *tool, 933 perf_event__handler_t process, 934 struct machine *machine, 935 bool needs_mmap, 936 bool mmap_data, 937 struct dirent **dirent, 938 int start, 939 int num) 940 { 941 union perf_event *comm_event, *mmap_event, *fork_event; 942 union perf_event *namespaces_event; 943 int err = -1; 944 char *end; 945 pid_t pid; 946 int i; 947 948 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 949 if (comm_event == NULL) 950 goto out; 951 952 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 953 if (mmap_event == NULL) 954 goto out_free_comm; 955 956 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 957 if (fork_event == NULL) 958 goto out_free_mmap; 959 960 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 961 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 962 machine->id_hdr_size); 963 if (namespaces_event == NULL) 964 goto out_free_fork; 965 966 for (i = start; i < start + num; i++) { 967 if (!isdigit(dirent[i]->d_name[0])) 968 continue; 969 970 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10); 971 /* only interested in proper numerical dirents */ 972 if (*end) 973 continue; 974 /* 975 * We may race with exiting thread, so don't stop just because 976 * one thread couldn't be synthesized. 977 */ 978 __event__synthesize_thread(comm_event, mmap_event, fork_event, 979 namespaces_event, pid, 1, process, 980 tool, machine, needs_mmap, mmap_data); 981 } 982 err = 0; 983 984 free(namespaces_event); 985 out_free_fork: 986 free(fork_event); 987 out_free_mmap: 988 free(mmap_event); 989 out_free_comm: 990 free(comm_event); 991 out: 992 return err; 993 } 994 995 struct synthesize_threads_arg { 996 const struct perf_tool *tool; 997 perf_event__handler_t process; 998 struct machine *machine; 999 bool needs_mmap; 1000 bool mmap_data; 1001 struct dirent **dirent; 1002 int num; 1003 int start; 1004 }; 1005 1006 static void *synthesize_threads_worker(void *arg) 1007 { 1008 struct synthesize_threads_arg *args = arg; 1009 1010 __perf_event__synthesize_threads(args->tool, args->process, 1011 args->machine, 1012 args->needs_mmap, args->mmap_data, 1013 args->dirent, 1014 args->start, args->num); 1015 return NULL; 1016 } 1017 1018 int perf_event__synthesize_threads(const struct perf_tool *tool, 1019 perf_event__handler_t process, 1020 struct machine *machine, 1021 bool needs_mmap, bool mmap_data, 1022 unsigned int nr_threads_synthesize) 1023 { 1024 struct synthesize_threads_arg *args = NULL; 1025 pthread_t *synthesize_threads = NULL; 1026 char proc_path[PATH_MAX]; 1027 struct dirent **dirent; 1028 int num_per_thread; 1029 int m, n, i, j; 1030 int thread_nr; 1031 int base = 0; 1032 int err = -1; 1033 1034 1035 if (machine__is_default_guest(machine)) 1036 return 0; 1037 1038 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 1039 n = scandir(proc_path, &dirent, filter_task, NULL); 1040 if (n < 0) 1041 return err; 1042 1043 if (nr_threads_synthesize == UINT_MAX) 1044 thread_nr = sysconf(_SC_NPROCESSORS_ONLN); 1045 else 1046 thread_nr = nr_threads_synthesize; 1047 1048 if (thread_nr <= 1) { 1049 err = __perf_event__synthesize_threads(tool, process, 1050 machine, 1051 needs_mmap, mmap_data, 1052 dirent, base, n); 1053 goto free_dirent; 1054 } 1055 if (thread_nr > n) 1056 thread_nr = n; 1057 1058 synthesize_threads = calloc(thread_nr, sizeof(pthread_t)); 1059 if (synthesize_threads == NULL) 1060 goto free_dirent; 1061 1062 args = calloc(thread_nr, sizeof(*args)); 1063 if (args == NULL) 1064 goto free_threads; 1065 1066 num_per_thread = n / thread_nr; 1067 m = n % thread_nr; 1068 for (i = 0; i < thread_nr; i++) { 1069 args[i].tool = tool; 1070 args[i].process = process; 1071 args[i].machine = machine; 1072 args[i].needs_mmap = needs_mmap; 1073 args[i].mmap_data = mmap_data; 1074 args[i].dirent = dirent; 1075 } 1076 for (i = 0; i < m; i++) { 1077 args[i].num = num_per_thread + 1; 1078 args[i].start = i * args[i].num; 1079 } 1080 if (i != 0) 1081 base = args[i-1].start + args[i-1].num; 1082 for (j = i; j < thread_nr; j++) { 1083 args[j].num = num_per_thread; 1084 args[j].start = base + (j - i) * args[i].num; 1085 } 1086 1087 for (i = 0; i < thread_nr; i++) { 1088 if (pthread_create(&synthesize_threads[i], NULL, 1089 synthesize_threads_worker, &args[i])) 1090 goto out_join; 1091 } 1092 err = 0; 1093 out_join: 1094 for (i = 0; i < thread_nr; i++) 1095 pthread_join(synthesize_threads[i], NULL); 1096 free(args); 1097 free_threads: 1098 free(synthesize_threads); 1099 free_dirent: 1100 for (i = 0; i < n; i++) 1101 zfree(&dirent[i]); 1102 free(dirent); 1103 1104 return err; 1105 } 1106 1107 int __weak perf_event__synthesize_extra_kmaps(const struct perf_tool *tool __maybe_unused, 1108 perf_event__handler_t process __maybe_unused, 1109 struct machine *machine __maybe_unused) 1110 { 1111 return 0; 1112 } 1113 1114 static int __perf_event__synthesize_kernel_mmap(const struct perf_tool *tool, 1115 perf_event__handler_t process, 1116 struct machine *machine) 1117 { 1118 union perf_event *event; 1119 size_t size = symbol_conf.buildid_mmap2 ? 1120 sizeof(event->mmap2) : sizeof(event->mmap); 1121 struct map *map = machine__kernel_map(machine); 1122 struct kmap *kmap; 1123 int err; 1124 1125 if (map == NULL) 1126 return -1; 1127 1128 kmap = map__kmap(map); 1129 if (!kmap->ref_reloc_sym) 1130 return -1; 1131 1132 /* 1133 * We should get this from /sys/kernel/sections/.text, but till that is 1134 * available use this, and after it is use this as a fallback for older 1135 * kernels. 1136 */ 1137 event = zalloc(size + machine->id_hdr_size); 1138 if (event == NULL) { 1139 pr_debug("Not enough memory synthesizing mmap event " 1140 "for kernel modules\n"); 1141 return -1; 1142 } 1143 1144 if (machine__is_host(machine)) { 1145 /* 1146 * kernel uses PERF_RECORD_MISC_USER for user space maps, 1147 * see kernel/perf_event.c __perf_event_mmap 1148 */ 1149 event->header.misc = PERF_RECORD_MISC_KERNEL; 1150 } else { 1151 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 1152 } 1153 1154 if (symbol_conf.buildid_mmap2) { 1155 size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename), 1156 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 1157 size = PERF_ALIGN(size, sizeof(u64)); 1158 event->mmap2.header.type = PERF_RECORD_MMAP2; 1159 event->mmap2.header.size = (sizeof(event->mmap2) - 1160 (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size); 1161 event->mmap2.pgoff = kmap->ref_reloc_sym->addr; 1162 event->mmap2.start = map__start(map); 1163 event->mmap2.len = map__end(map) - event->mmap.start; 1164 event->mmap2.pid = machine->pid; 1165 1166 perf_record_mmap2__read_build_id(&event->mmap2, machine, true); 1167 } else { 1168 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 1169 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; 1170 size = PERF_ALIGN(size, sizeof(u64)); 1171 event->mmap.header.type = PERF_RECORD_MMAP; 1172 event->mmap.header.size = (sizeof(event->mmap) - 1173 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 1174 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 1175 event->mmap.start = map__start(map); 1176 event->mmap.len = map__end(map) - event->mmap.start; 1177 event->mmap.pid = machine->pid; 1178 } 1179 1180 err = perf_tool__process_synth_event(tool, event, machine, process); 1181 free(event); 1182 1183 return err; 1184 } 1185 1186 int perf_event__synthesize_kernel_mmap(const struct perf_tool *tool, 1187 perf_event__handler_t process, 1188 struct machine *machine) 1189 { 1190 int err; 1191 1192 err = __perf_event__synthesize_kernel_mmap(tool, process, machine); 1193 if (err < 0) 1194 return err; 1195 1196 return perf_event__synthesize_extra_kmaps(tool, process, machine); 1197 } 1198 1199 int perf_event__synthesize_thread_map2(const struct perf_tool *tool, 1200 struct perf_thread_map *threads, 1201 perf_event__handler_t process, 1202 struct machine *machine) 1203 { 1204 union perf_event *event; 1205 int i, err, size; 1206 1207 size = sizeof(event->thread_map); 1208 size += threads->nr * sizeof(event->thread_map.entries[0]); 1209 1210 event = zalloc(size); 1211 if (!event) 1212 return -ENOMEM; 1213 1214 event->header.type = PERF_RECORD_THREAD_MAP; 1215 event->header.size = size; 1216 event->thread_map.nr = threads->nr; 1217 1218 for (i = 0; i < threads->nr; i++) { 1219 struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i]; 1220 char *comm = perf_thread_map__comm(threads, i); 1221 1222 if (!comm) 1223 comm = (char *) ""; 1224 1225 entry->pid = perf_thread_map__pid(threads, i); 1226 strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 1227 } 1228 1229 err = process(tool, event, NULL, machine); 1230 1231 free(event); 1232 return err; 1233 } 1234 1235 struct synthesize_cpu_map_data { 1236 const struct perf_cpu_map *map; 1237 int nr; 1238 int min_cpu; 1239 int max_cpu; 1240 int has_any_cpu; 1241 int type; 1242 size_t size; 1243 struct perf_record_cpu_map_data *data; 1244 }; 1245 1246 static void synthesize_cpus(struct synthesize_cpu_map_data *data) 1247 { 1248 data->data->type = PERF_CPU_MAP__CPUS; 1249 data->data->cpus_data.nr = data->nr; 1250 for (int i = 0; i < data->nr; i++) 1251 data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu; 1252 } 1253 1254 static void synthesize_mask(struct synthesize_cpu_map_data *data) 1255 { 1256 int idx; 1257 struct perf_cpu cpu; 1258 1259 /* Due to padding, the 4bytes per entry mask variant is always smaller. */ 1260 data->data->type = PERF_CPU_MAP__MASK; 1261 data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu); 1262 data->data->mask32_data.long_size = 4; 1263 1264 perf_cpu_map__for_each_cpu(cpu, idx, data->map) { 1265 int bit_word = cpu.cpu / 32; 1266 u32 bit_mask = 1U << (cpu.cpu & 31); 1267 1268 data->data->mask32_data.mask[bit_word] |= bit_mask; 1269 } 1270 } 1271 1272 static void synthesize_range_cpus(struct synthesize_cpu_map_data *data) 1273 { 1274 data->data->type = PERF_CPU_MAP__RANGE_CPUS; 1275 data->data->range_cpu_data.any_cpu = data->has_any_cpu; 1276 data->data->range_cpu_data.start_cpu = data->min_cpu; 1277 data->data->range_cpu_data.end_cpu = data->max_cpu; 1278 } 1279 1280 static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data, 1281 size_t header_size) 1282 { 1283 size_t size_cpus, size_mask; 1284 1285 syn_data->nr = perf_cpu_map__nr(syn_data->map); 1286 syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0; 1287 1288 syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu; 1289 syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu; 1290 if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) { 1291 /* A consecutive range of CPUs can be encoded using a range. */ 1292 assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64)); 1293 syn_data->type = PERF_CPU_MAP__RANGE_CPUS; 1294 syn_data->size = header_size + sizeof(u64); 1295 return zalloc(syn_data->size); 1296 } 1297 1298 size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16); 1299 /* Due to padding, the 4bytes per entry mask variant is always smaller. */ 1300 size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) + 1301 BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32); 1302 if (syn_data->has_any_cpu || size_cpus < size_mask) { 1303 /* Follow the CPU map encoding. */ 1304 syn_data->type = PERF_CPU_MAP__CPUS; 1305 syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64)); 1306 return zalloc(syn_data->size); 1307 } 1308 /* Encode using a bitmask. */ 1309 syn_data->type = PERF_CPU_MAP__MASK; 1310 syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64)); 1311 return zalloc(syn_data->size); 1312 } 1313 1314 static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data) 1315 { 1316 switch (data->type) { 1317 case PERF_CPU_MAP__CPUS: 1318 synthesize_cpus(data); 1319 break; 1320 case PERF_CPU_MAP__MASK: 1321 synthesize_mask(data); 1322 break; 1323 case PERF_CPU_MAP__RANGE_CPUS: 1324 synthesize_range_cpus(data); 1325 break; 1326 default: 1327 break; 1328 } 1329 } 1330 1331 static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map) 1332 { 1333 struct synthesize_cpu_map_data syn_data = { .map = map }; 1334 struct perf_record_cpu_map *event; 1335 1336 1337 event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header)); 1338 if (!event) 1339 return NULL; 1340 1341 syn_data.data = &event->data; 1342 event->header.type = PERF_RECORD_CPU_MAP; 1343 event->header.size = syn_data.size; 1344 cpu_map_data__synthesize(&syn_data); 1345 return event; 1346 } 1347 1348 1349 int perf_event__synthesize_cpu_map(const struct perf_tool *tool, 1350 const struct perf_cpu_map *map, 1351 perf_event__handler_t process, 1352 struct machine *machine) 1353 { 1354 struct perf_record_cpu_map *event; 1355 int err; 1356 1357 event = cpu_map_event__new(map); 1358 if (!event) 1359 return -ENOMEM; 1360 1361 err = process(tool, (union perf_event *) event, NULL, machine); 1362 1363 free(event); 1364 return err; 1365 } 1366 1367 int perf_event__synthesize_stat_config(const struct perf_tool *tool, 1368 struct perf_stat_config *config, 1369 perf_event__handler_t process, 1370 struct machine *machine) 1371 { 1372 struct perf_record_stat_config *event; 1373 int size, i = 0, err; 1374 1375 size = sizeof(*event); 1376 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 1377 1378 event = zalloc(size); 1379 if (!event) 1380 return -ENOMEM; 1381 1382 event->header.type = PERF_RECORD_STAT_CONFIG; 1383 event->header.size = size; 1384 event->nr = PERF_STAT_CONFIG_TERM__MAX; 1385 1386 #define ADD(__term, __val) \ 1387 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 1388 event->data[i].val = __val; \ 1389 i++; 1390 1391 ADD(AGGR_MODE, config->aggr_mode) 1392 ADD(INTERVAL, config->interval) 1393 ADD(SCALE, config->scale) 1394 ADD(AGGR_LEVEL, config->aggr_level) 1395 1396 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 1397 "stat config terms unbalanced\n"); 1398 #undef ADD 1399 1400 err = process(tool, (union perf_event *) event, NULL, machine); 1401 1402 free(event); 1403 return err; 1404 } 1405 1406 int perf_event__synthesize_stat(const struct perf_tool *tool, 1407 struct perf_cpu cpu, u32 thread, u64 id, 1408 struct perf_counts_values *count, 1409 perf_event__handler_t process, 1410 struct machine *machine) 1411 { 1412 struct perf_record_stat event; 1413 1414 event.header.type = PERF_RECORD_STAT; 1415 event.header.size = sizeof(event); 1416 event.header.misc = 0; 1417 1418 event.id = id; 1419 event.cpu = cpu.cpu; 1420 event.thread = thread; 1421 event.val = count->val; 1422 event.ena = count->ena; 1423 event.run = count->run; 1424 1425 return process(tool, (union perf_event *) &event, NULL, machine); 1426 } 1427 1428 int perf_event__synthesize_stat_round(const struct perf_tool *tool, 1429 u64 evtime, u64 type, 1430 perf_event__handler_t process, 1431 struct machine *machine) 1432 { 1433 struct perf_record_stat_round event; 1434 1435 event.header.type = PERF_RECORD_STAT_ROUND; 1436 event.header.size = sizeof(event); 1437 event.header.misc = 0; 1438 1439 event.time = evtime; 1440 event.type = type; 1441 1442 return process(tool, (union perf_event *) &event, NULL, machine); 1443 } 1444 1445 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format) 1446 { 1447 size_t sz, result = sizeof(struct perf_record_sample); 1448 1449 if (type & PERF_SAMPLE_IDENTIFIER) 1450 result += sizeof(u64); 1451 1452 if (type & PERF_SAMPLE_IP) 1453 result += sizeof(u64); 1454 1455 if (type & PERF_SAMPLE_TID) 1456 result += sizeof(u64); 1457 1458 if (type & PERF_SAMPLE_TIME) 1459 result += sizeof(u64); 1460 1461 if (type & PERF_SAMPLE_ADDR) 1462 result += sizeof(u64); 1463 1464 if (type & PERF_SAMPLE_ID) 1465 result += sizeof(u64); 1466 1467 if (type & PERF_SAMPLE_STREAM_ID) 1468 result += sizeof(u64); 1469 1470 if (type & PERF_SAMPLE_CPU) 1471 result += sizeof(u64); 1472 1473 if (type & PERF_SAMPLE_PERIOD) 1474 result += sizeof(u64); 1475 1476 if (type & PERF_SAMPLE_READ) { 1477 result += sizeof(u64); 1478 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1479 result += sizeof(u64); 1480 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1481 result += sizeof(u64); 1482 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1483 if (read_format & PERF_FORMAT_GROUP) { 1484 sz = sample_read_value_size(read_format); 1485 result += sz * sample->read.group.nr; 1486 } else { 1487 result += sizeof(u64); 1488 if (read_format & PERF_FORMAT_LOST) 1489 result += sizeof(u64); 1490 } 1491 } 1492 1493 if (type & PERF_SAMPLE_CALLCHAIN) { 1494 sz = (sample->callchain->nr + 1) * sizeof(u64); 1495 result += sz; 1496 } 1497 1498 if (type & PERF_SAMPLE_RAW) { 1499 result += sizeof(u32); 1500 result += sample->raw_size; 1501 } 1502 1503 if (type & PERF_SAMPLE_BRANCH_STACK) { 1504 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 1505 /* nr, hw_idx */ 1506 sz += 2 * sizeof(u64); 1507 result += sz; 1508 } 1509 1510 if (type & PERF_SAMPLE_REGS_USER) { 1511 if (sample->user_regs.abi) { 1512 result += sizeof(u64); 1513 sz = hweight64(sample->user_regs.mask) * sizeof(u64); 1514 result += sz; 1515 } else { 1516 result += sizeof(u64); 1517 } 1518 } 1519 1520 if (type & PERF_SAMPLE_STACK_USER) { 1521 sz = sample->user_stack.size; 1522 result += sizeof(u64); 1523 if (sz) { 1524 result += sz; 1525 result += sizeof(u64); 1526 } 1527 } 1528 1529 if (type & PERF_SAMPLE_WEIGHT_TYPE) 1530 result += sizeof(u64); 1531 1532 if (type & PERF_SAMPLE_DATA_SRC) 1533 result += sizeof(u64); 1534 1535 if (type & PERF_SAMPLE_TRANSACTION) 1536 result += sizeof(u64); 1537 1538 if (type & PERF_SAMPLE_REGS_INTR) { 1539 if (sample->intr_regs.abi) { 1540 result += sizeof(u64); 1541 sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 1542 result += sz; 1543 } else { 1544 result += sizeof(u64); 1545 } 1546 } 1547 1548 if (type & PERF_SAMPLE_PHYS_ADDR) 1549 result += sizeof(u64); 1550 1551 if (type & PERF_SAMPLE_CGROUP) 1552 result += sizeof(u64); 1553 1554 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) 1555 result += sizeof(u64); 1556 1557 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) 1558 result += sizeof(u64); 1559 1560 if (type & PERF_SAMPLE_AUX) { 1561 result += sizeof(u64); 1562 result += sample->aux_sample.size; 1563 } 1564 1565 return result; 1566 } 1567 1568 void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data, 1569 __u64 *array, u64 type __maybe_unused) 1570 { 1571 *array = data->weight; 1572 } 1573 1574 static __u64 *copy_read_group_values(__u64 *array, __u64 read_format, 1575 const struct perf_sample *sample) 1576 { 1577 size_t sz = sample_read_value_size(read_format); 1578 struct sample_read_value *v = sample->read.group.values; 1579 1580 sample_read_group__for_each(v, sample->read.group.nr, read_format) { 1581 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1582 memcpy(array, v, sz); 1583 array = (void *)array + sz; 1584 } 1585 return array; 1586 } 1587 1588 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format, 1589 const struct perf_sample *sample) 1590 { 1591 __u64 *array; 1592 size_t sz; 1593 /* 1594 * used for cross-endian analysis. See git commit 65014ab3 1595 * for why this goofiness is needed. 1596 */ 1597 union u64_swap u; 1598 1599 array = event->sample.array; 1600 1601 if (type & PERF_SAMPLE_IDENTIFIER) { 1602 *array = sample->id; 1603 array++; 1604 } 1605 1606 if (type & PERF_SAMPLE_IP) { 1607 *array = sample->ip; 1608 array++; 1609 } 1610 1611 if (type & PERF_SAMPLE_TID) { 1612 u.val32[0] = sample->pid; 1613 u.val32[1] = sample->tid; 1614 *array = u.val64; 1615 array++; 1616 } 1617 1618 if (type & PERF_SAMPLE_TIME) { 1619 *array = sample->time; 1620 array++; 1621 } 1622 1623 if (type & PERF_SAMPLE_ADDR) { 1624 *array = sample->addr; 1625 array++; 1626 } 1627 1628 if (type & PERF_SAMPLE_ID) { 1629 *array = sample->id; 1630 array++; 1631 } 1632 1633 if (type & PERF_SAMPLE_STREAM_ID) { 1634 *array = sample->stream_id; 1635 array++; 1636 } 1637 1638 if (type & PERF_SAMPLE_CPU) { 1639 u.val32[0] = sample->cpu; 1640 u.val32[1] = 0; 1641 *array = u.val64; 1642 array++; 1643 } 1644 1645 if (type & PERF_SAMPLE_PERIOD) { 1646 *array = sample->period; 1647 array++; 1648 } 1649 1650 if (type & PERF_SAMPLE_READ) { 1651 if (read_format & PERF_FORMAT_GROUP) 1652 *array = sample->read.group.nr; 1653 else 1654 *array = sample->read.one.value; 1655 array++; 1656 1657 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1658 *array = sample->read.time_enabled; 1659 array++; 1660 } 1661 1662 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1663 *array = sample->read.time_running; 1664 array++; 1665 } 1666 1667 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1668 if (read_format & PERF_FORMAT_GROUP) { 1669 array = copy_read_group_values(array, read_format, 1670 sample); 1671 } else { 1672 *array = sample->read.one.id; 1673 array++; 1674 1675 if (read_format & PERF_FORMAT_LOST) { 1676 *array = sample->read.one.lost; 1677 array++; 1678 } 1679 } 1680 } 1681 1682 if (type & PERF_SAMPLE_CALLCHAIN) { 1683 sz = (sample->callchain->nr + 1) * sizeof(u64); 1684 memcpy(array, sample->callchain, sz); 1685 array = (void *)array + sz; 1686 } 1687 1688 if (type & PERF_SAMPLE_RAW) { 1689 u32 *array32 = (void *)array; 1690 1691 *array32 = sample->raw_size; 1692 array32++; 1693 1694 memcpy(array32, sample->raw_data, sample->raw_size); 1695 array = (void *)(array32 + (sample->raw_size / sizeof(u32))); 1696 1697 /* make sure the array is 64-bit aligned */ 1698 BUG_ON(((long)array) % sizeof(u64)); 1699 } 1700 1701 if (type & PERF_SAMPLE_BRANCH_STACK) { 1702 sz = sample->branch_stack->nr * sizeof(struct branch_entry); 1703 /* nr, hw_idx */ 1704 sz += 2 * sizeof(u64); 1705 memcpy(array, sample->branch_stack, sz); 1706 array = (void *)array + sz; 1707 } 1708 1709 if (type & PERF_SAMPLE_REGS_USER) { 1710 if (sample->user_regs.abi) { 1711 *array++ = sample->user_regs.abi; 1712 sz = hweight64(sample->user_regs.mask) * sizeof(u64); 1713 memcpy(array, sample->user_regs.regs, sz); 1714 array = (void *)array + sz; 1715 } else { 1716 *array++ = 0; 1717 } 1718 } 1719 1720 if (type & PERF_SAMPLE_STACK_USER) { 1721 sz = sample->user_stack.size; 1722 *array++ = sz; 1723 if (sz) { 1724 memcpy(array, sample->user_stack.data, sz); 1725 array = (void *)array + sz; 1726 *array++ = sz; 1727 } 1728 } 1729 1730 if (type & PERF_SAMPLE_WEIGHT_TYPE) { 1731 arch_perf_synthesize_sample_weight(sample, array, type); 1732 array++; 1733 } 1734 1735 if (type & PERF_SAMPLE_DATA_SRC) { 1736 *array = sample->data_src; 1737 array++; 1738 } 1739 1740 if (type & PERF_SAMPLE_TRANSACTION) { 1741 *array = sample->transaction; 1742 array++; 1743 } 1744 1745 if (type & PERF_SAMPLE_REGS_INTR) { 1746 if (sample->intr_regs.abi) { 1747 *array++ = sample->intr_regs.abi; 1748 sz = hweight64(sample->intr_regs.mask) * sizeof(u64); 1749 memcpy(array, sample->intr_regs.regs, sz); 1750 array = (void *)array + sz; 1751 } else { 1752 *array++ = 0; 1753 } 1754 } 1755 1756 if (type & PERF_SAMPLE_PHYS_ADDR) { 1757 *array = sample->phys_addr; 1758 array++; 1759 } 1760 1761 if (type & PERF_SAMPLE_CGROUP) { 1762 *array = sample->cgroup; 1763 array++; 1764 } 1765 1766 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) { 1767 *array = sample->data_page_size; 1768 array++; 1769 } 1770 1771 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) { 1772 *array = sample->code_page_size; 1773 array++; 1774 } 1775 1776 if (type & PERF_SAMPLE_AUX) { 1777 sz = sample->aux_sample.size; 1778 *array++ = sz; 1779 memcpy(array, sample->aux_sample.data, sz); 1780 array = (void *)array + sz; 1781 } 1782 1783 return 0; 1784 } 1785 1786 int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample) 1787 { 1788 __u64 *start = array; 1789 1790 /* 1791 * used for cross-endian analysis. See git commit 65014ab3 1792 * for why this goofiness is needed. 1793 */ 1794 union u64_swap u; 1795 1796 if (type & PERF_SAMPLE_TID) { 1797 u.val32[0] = sample->pid; 1798 u.val32[1] = sample->tid; 1799 *array = u.val64; 1800 array++; 1801 } 1802 1803 if (type & PERF_SAMPLE_TIME) { 1804 *array = sample->time; 1805 array++; 1806 } 1807 1808 if (type & PERF_SAMPLE_ID) { 1809 *array = sample->id; 1810 array++; 1811 } 1812 1813 if (type & PERF_SAMPLE_STREAM_ID) { 1814 *array = sample->stream_id; 1815 array++; 1816 } 1817 1818 if (type & PERF_SAMPLE_CPU) { 1819 u.val32[0] = sample->cpu; 1820 u.val32[1] = 0; 1821 *array = u.val64; 1822 array++; 1823 } 1824 1825 if (type & PERF_SAMPLE_IDENTIFIER) { 1826 *array = sample->id; 1827 array++; 1828 } 1829 1830 return (void *)array - (void *)start; 1831 } 1832 1833 int __perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process, 1834 struct evlist *evlist, struct machine *machine, size_t from) 1835 { 1836 union perf_event *ev; 1837 struct evsel *evsel; 1838 size_t nr = 0, i = 0, sz, max_nr, n, pos; 1839 size_t e1_sz = sizeof(struct id_index_entry); 1840 size_t e2_sz = sizeof(struct id_index_entry_2); 1841 size_t etot_sz = e1_sz + e2_sz; 1842 bool e2_needed = false; 1843 int err; 1844 1845 max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz; 1846 1847 pos = 0; 1848 evlist__for_each_entry(evlist, evsel) { 1849 if (pos++ < from) 1850 continue; 1851 nr += evsel->core.ids; 1852 } 1853 1854 if (!nr) 1855 return 0; 1856 1857 pr_debug2("Synthesizing id index\n"); 1858 1859 n = nr > max_nr ? max_nr : nr; 1860 sz = sizeof(struct perf_record_id_index) + n * etot_sz; 1861 ev = zalloc(sz); 1862 if (!ev) 1863 return -ENOMEM; 1864 1865 sz = sizeof(struct perf_record_id_index) + n * e1_sz; 1866 1867 ev->id_index.header.type = PERF_RECORD_ID_INDEX; 1868 ev->id_index.nr = n; 1869 1870 pos = 0; 1871 evlist__for_each_entry(evlist, evsel) { 1872 u32 j; 1873 1874 if (pos++ < from) 1875 continue; 1876 for (j = 0; j < evsel->core.ids; j++, i++) { 1877 struct id_index_entry *e; 1878 struct id_index_entry_2 *e2; 1879 struct perf_sample_id *sid; 1880 1881 if (i >= n) { 1882 ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0); 1883 err = process(tool, ev, NULL, machine); 1884 if (err) 1885 goto out_err; 1886 nr -= n; 1887 i = 0; 1888 e2_needed = false; 1889 } 1890 1891 e = &ev->id_index.entries[i]; 1892 1893 e->id = evsel->core.id[j]; 1894 1895 sid = evlist__id2sid(evlist, e->id); 1896 if (!sid) { 1897 free(ev); 1898 return -ENOENT; 1899 } 1900 1901 e->idx = sid->idx; 1902 e->cpu = sid->cpu.cpu; 1903 e->tid = sid->tid; 1904 1905 if (sid->machine_pid) 1906 e2_needed = true; 1907 1908 e2 = (void *)ev + sz; 1909 e2[i].machine_pid = sid->machine_pid; 1910 e2[i].vcpu = sid->vcpu.cpu; 1911 } 1912 } 1913 1914 sz = sizeof(struct perf_record_id_index) + nr * e1_sz; 1915 ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0); 1916 ev->id_index.nr = nr; 1917 1918 err = process(tool, ev, NULL, machine); 1919 out_err: 1920 free(ev); 1921 1922 return err; 1923 } 1924 1925 int perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process, 1926 struct evlist *evlist, struct machine *machine) 1927 { 1928 return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0); 1929 } 1930 1931 int __machine__synthesize_threads(struct machine *machine, const struct perf_tool *tool, 1932 struct target *target, struct perf_thread_map *threads, 1933 perf_event__handler_t process, bool needs_mmap, 1934 bool data_mmap, unsigned int nr_threads_synthesize) 1935 { 1936 /* 1937 * When perf runs in non-root PID namespace, and the namespace's proc FS 1938 * is not mounted, nsinfo__is_in_root_namespace() returns false. 1939 * In this case, the proc FS is coming for the parent namespace, thus 1940 * perf tool will wrongly gather process info from its parent PID 1941 * namespace. 1942 * 1943 * To avoid the confusion that the perf tool runs in a child PID 1944 * namespace but it synthesizes thread info from its parent PID 1945 * namespace, returns failure with warning. 1946 */ 1947 if (!nsinfo__is_in_root_namespace()) { 1948 pr_err("Perf runs in non-root PID namespace but it tries to "); 1949 pr_err("gather process info from its parent PID namespace.\n"); 1950 pr_err("Please mount the proc file system properly, e.g. "); 1951 pr_err("add the option '--mount-proc' for unshare command.\n"); 1952 return -EPERM; 1953 } 1954 1955 if (target__has_task(target)) 1956 return perf_event__synthesize_thread_map(tool, threads, process, machine, 1957 needs_mmap, data_mmap); 1958 else if (target__has_cpu(target)) 1959 return perf_event__synthesize_threads(tool, process, machine, 1960 needs_mmap, data_mmap, 1961 nr_threads_synthesize); 1962 /* command specified */ 1963 return 0; 1964 } 1965 1966 int machine__synthesize_threads(struct machine *machine, struct target *target, 1967 struct perf_thread_map *threads, bool needs_mmap, 1968 bool data_mmap, unsigned int nr_threads_synthesize) 1969 { 1970 return __machine__synthesize_threads(machine, NULL, target, threads, 1971 perf_event__process, needs_mmap, 1972 data_mmap, nr_threads_synthesize); 1973 } 1974 1975 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id) 1976 { 1977 struct perf_record_event_update *ev; 1978 1979 size += sizeof(*ev); 1980 size = PERF_ALIGN(size, sizeof(u64)); 1981 1982 ev = zalloc(size); 1983 if (ev) { 1984 ev->header.type = PERF_RECORD_EVENT_UPDATE; 1985 ev->header.size = (u16)size; 1986 ev->type = type; 1987 ev->id = id; 1988 } 1989 return ev; 1990 } 1991 1992 int perf_event__synthesize_event_update_unit(const struct perf_tool *tool, struct evsel *evsel, 1993 perf_event__handler_t process) 1994 { 1995 size_t size = strlen(evsel->unit); 1996 struct perf_record_event_update *ev; 1997 int err; 1998 1999 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]); 2000 if (ev == NULL) 2001 return -ENOMEM; 2002 2003 strlcpy(ev->unit, evsel->unit, size + 1); 2004 err = process(tool, (union perf_event *)ev, NULL, NULL); 2005 free(ev); 2006 return err; 2007 } 2008 2009 int perf_event__synthesize_event_update_scale(const struct perf_tool *tool, struct evsel *evsel, 2010 perf_event__handler_t process) 2011 { 2012 struct perf_record_event_update *ev; 2013 struct perf_record_event_update_scale *ev_data; 2014 int err; 2015 2016 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]); 2017 if (ev == NULL) 2018 return -ENOMEM; 2019 2020 ev->scale.scale = evsel->scale; 2021 err = process(tool, (union perf_event *)ev, NULL, NULL); 2022 free(ev); 2023 return err; 2024 } 2025 2026 int perf_event__synthesize_event_update_name(const struct perf_tool *tool, struct evsel *evsel, 2027 perf_event__handler_t process) 2028 { 2029 struct perf_record_event_update *ev; 2030 size_t len = strlen(evsel__name(evsel)); 2031 int err; 2032 2033 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]); 2034 if (ev == NULL) 2035 return -ENOMEM; 2036 2037 strlcpy(ev->name, evsel->name, len + 1); 2038 err = process(tool, (union perf_event *)ev, NULL, NULL); 2039 free(ev); 2040 return err; 2041 } 2042 2043 int perf_event__synthesize_event_update_cpus(const struct perf_tool *tool, struct evsel *evsel, 2044 perf_event__handler_t process) 2045 { 2046 struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus }; 2047 struct perf_record_event_update *ev; 2048 int err; 2049 2050 ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64)); 2051 if (!ev) 2052 return -ENOMEM; 2053 2054 syn_data.data = &ev->cpus.cpus; 2055 ev->header.type = PERF_RECORD_EVENT_UPDATE; 2056 ev->header.size = (u16)syn_data.size; 2057 ev->type = PERF_EVENT_UPDATE__CPUS; 2058 ev->id = evsel->core.id[0]; 2059 cpu_map_data__synthesize(&syn_data); 2060 2061 err = process(tool, (union perf_event *)ev, NULL, NULL); 2062 free(ev); 2063 return err; 2064 } 2065 2066 int perf_event__synthesize_attrs(const struct perf_tool *tool, struct evlist *evlist, 2067 perf_event__handler_t process) 2068 { 2069 struct evsel *evsel; 2070 int err = 0; 2071 2072 evlist__for_each_entry(evlist, evsel) { 2073 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids, 2074 evsel->core.id, process); 2075 if (err) { 2076 pr_debug("failed to create perf header attribute\n"); 2077 return err; 2078 } 2079 } 2080 2081 return err; 2082 } 2083 2084 static bool has_unit(struct evsel *evsel) 2085 { 2086 return evsel->unit && *evsel->unit; 2087 } 2088 2089 static bool has_scale(struct evsel *evsel) 2090 { 2091 return evsel->scale != 1; 2092 } 2093 2094 int perf_event__synthesize_extra_attr(const struct perf_tool *tool, struct evlist *evsel_list, 2095 perf_event__handler_t process, bool is_pipe) 2096 { 2097 struct evsel *evsel; 2098 int err; 2099 2100 /* 2101 * Synthesize other events stuff not carried within 2102 * attr event - unit, scale, name 2103 */ 2104 evlist__for_each_entry(evsel_list, evsel) { 2105 if (!evsel->supported) 2106 continue; 2107 2108 /* 2109 * Synthesize unit and scale only if it's defined. 2110 */ 2111 if (has_unit(evsel)) { 2112 err = perf_event__synthesize_event_update_unit(tool, evsel, process); 2113 if (err < 0) { 2114 pr_err("Couldn't synthesize evsel unit.\n"); 2115 return err; 2116 } 2117 } 2118 2119 if (has_scale(evsel)) { 2120 err = perf_event__synthesize_event_update_scale(tool, evsel, process); 2121 if (err < 0) { 2122 pr_err("Couldn't synthesize evsel evsel.\n"); 2123 return err; 2124 } 2125 } 2126 2127 if (evsel->core.own_cpus) { 2128 err = perf_event__synthesize_event_update_cpus(tool, evsel, process); 2129 if (err < 0) { 2130 pr_err("Couldn't synthesize evsel cpus.\n"); 2131 return err; 2132 } 2133 } 2134 2135 /* 2136 * Name is needed only for pipe output, 2137 * perf.data carries event names. 2138 */ 2139 if (is_pipe) { 2140 err = perf_event__synthesize_event_update_name(tool, evsel, process); 2141 if (err < 0) { 2142 pr_err("Couldn't synthesize evsel name.\n"); 2143 return err; 2144 } 2145 } 2146 } 2147 return 0; 2148 } 2149 2150 int perf_event__synthesize_attr(const struct perf_tool *tool, struct perf_event_attr *attr, 2151 u32 ids, u64 *id, perf_event__handler_t process) 2152 { 2153 union perf_event *ev; 2154 size_t size; 2155 int err; 2156 2157 size = sizeof(struct perf_event_attr); 2158 size = PERF_ALIGN(size, sizeof(u64)); 2159 size += sizeof(struct perf_event_header); 2160 size += ids * sizeof(u64); 2161 2162 ev = zalloc(size); 2163 2164 if (ev == NULL) 2165 return -ENOMEM; 2166 2167 ev->attr.attr = *attr; 2168 memcpy(perf_record_header_attr_id(ev), id, ids * sizeof(u64)); 2169 2170 ev->attr.header.type = PERF_RECORD_HEADER_ATTR; 2171 ev->attr.header.size = (u16)size; 2172 2173 if (ev->attr.header.size == size) 2174 err = process(tool, ev, NULL, NULL); 2175 else 2176 err = -E2BIG; 2177 2178 free(ev); 2179 2180 return err; 2181 } 2182 2183 #ifdef HAVE_LIBTRACEEVENT 2184 int perf_event__synthesize_tracing_data(const struct perf_tool *tool, int fd, struct evlist *evlist, 2185 perf_event__handler_t process) 2186 { 2187 union perf_event ev; 2188 struct tracing_data *tdata; 2189 ssize_t size = 0, aligned_size = 0, padding; 2190 struct feat_fd ff; 2191 2192 /* 2193 * We are going to store the size of the data followed 2194 * by the data contents. Since the fd descriptor is a pipe, 2195 * we cannot seek back to store the size of the data once 2196 * we know it. Instead we: 2197 * 2198 * - write the tracing data to the temp file 2199 * - get/write the data size to pipe 2200 * - write the tracing data from the temp file 2201 * to the pipe 2202 */ 2203 tdata = tracing_data_get(&evlist->core.entries, fd, true); 2204 if (!tdata) 2205 return -1; 2206 2207 memset(&ev, 0, sizeof(ev.tracing_data)); 2208 2209 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 2210 size = tdata->size; 2211 aligned_size = PERF_ALIGN(size, sizeof(u64)); 2212 padding = aligned_size - size; 2213 ev.tracing_data.header.size = sizeof(ev.tracing_data); 2214 ev.tracing_data.size = aligned_size; 2215 2216 process(tool, &ev, NULL, NULL); 2217 2218 /* 2219 * The put function will copy all the tracing data 2220 * stored in temp file to the pipe. 2221 */ 2222 tracing_data_put(tdata); 2223 2224 ff = (struct feat_fd){ .fd = fd }; 2225 if (write_padded(&ff, NULL, 0, padding)) 2226 return -1; 2227 2228 return aligned_size; 2229 } 2230 #endif 2231 2232 int perf_event__synthesize_build_id(const struct perf_tool *tool, 2233 struct perf_sample *sample, 2234 struct machine *machine, 2235 perf_event__handler_t process, 2236 const struct evsel *evsel, 2237 __u16 misc, 2238 const struct build_id *bid, 2239 const char *filename) 2240 { 2241 union perf_event ev; 2242 size_t len; 2243 2244 len = sizeof(ev.build_id) + strlen(filename) + 1; 2245 len = PERF_ALIGN(len, sizeof(u64)); 2246 2247 memset(&ev, 0, len); 2248 2249 ev.build_id.size = min(bid->size, sizeof(ev.build_id.build_id)); 2250 memcpy(ev.build_id.build_id, bid->data, ev.build_id.size); 2251 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; 2252 ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE; 2253 ev.build_id.pid = machine->pid; 2254 ev.build_id.header.size = len; 2255 strcpy(ev.build_id.filename, filename); 2256 2257 if (evsel) { 2258 void *array = &ev; 2259 int ret; 2260 2261 array += ev.header.size; 2262 ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample); 2263 if (ret < 0) 2264 return ret; 2265 2266 if (ret & 7) { 2267 pr_err("Bad id sample size %d\n", ret); 2268 return -EINVAL; 2269 } 2270 2271 ev.header.size += ret; 2272 } 2273 2274 return process(tool, &ev, sample, machine); 2275 } 2276 2277 int perf_event__synthesize_mmap2_build_id(const struct perf_tool *tool, 2278 struct perf_sample *sample, 2279 struct machine *machine, 2280 perf_event__handler_t process, 2281 const struct evsel *evsel, 2282 __u16 misc, 2283 __u32 pid, __u32 tid, 2284 __u64 start, __u64 len, __u64 pgoff, 2285 const struct build_id *bid, 2286 __u32 prot, __u32 flags, 2287 const char *filename) 2288 { 2289 union perf_event ev; 2290 size_t ev_len; 2291 void *array; 2292 int ret; 2293 2294 ev_len = sizeof(ev.mmap2) - sizeof(ev.mmap2.filename) + strlen(filename) + 1; 2295 ev_len = PERF_ALIGN(ev_len, sizeof(u64)); 2296 2297 memset(&ev, 0, ev_len); 2298 2299 ev.mmap2.header.type = PERF_RECORD_MMAP2; 2300 ev.mmap2.header.misc = misc | PERF_RECORD_MISC_MMAP_BUILD_ID; 2301 ev.mmap2.header.size = ev_len; 2302 2303 ev.mmap2.pid = pid; 2304 ev.mmap2.tid = tid; 2305 ev.mmap2.start = start; 2306 ev.mmap2.len = len; 2307 ev.mmap2.pgoff = pgoff; 2308 2309 ev.mmap2.build_id_size = min(bid->size, sizeof(ev.mmap2.build_id)); 2310 memcpy(ev.mmap2.build_id, bid->data, ev.mmap2.build_id_size); 2311 2312 ev.mmap2.prot = prot; 2313 ev.mmap2.flags = flags; 2314 2315 memcpy(ev.mmap2.filename, filename, min(strlen(filename), sizeof(ev.mmap.filename))); 2316 2317 array = &ev; 2318 array += ev.header.size; 2319 ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample); 2320 if (ret < 0) 2321 return ret; 2322 2323 if (ret & 7) { 2324 pr_err("Bad id sample size %d\n", ret); 2325 return -EINVAL; 2326 } 2327 2328 ev.header.size += ret; 2329 2330 return process(tool, &ev, sample, machine); 2331 } 2332 2333 int perf_event__synthesize_stat_events(struct perf_stat_config *config, const struct perf_tool *tool, 2334 struct evlist *evlist, perf_event__handler_t process, bool attrs) 2335 { 2336 int err; 2337 2338 if (attrs) { 2339 err = perf_event__synthesize_attrs(tool, evlist, process); 2340 if (err < 0) { 2341 pr_err("Couldn't synthesize attrs.\n"); 2342 return err; 2343 } 2344 } 2345 2346 err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs); 2347 err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL); 2348 if (err < 0) { 2349 pr_err("Couldn't synthesize thread map.\n"); 2350 return err; 2351 } 2352 2353 err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL); 2354 if (err < 0) { 2355 pr_err("Couldn't synthesize thread map.\n"); 2356 return err; 2357 } 2358 2359 err = perf_event__synthesize_stat_config(tool, config, process, NULL); 2360 if (err < 0) { 2361 pr_err("Couldn't synthesize config.\n"); 2362 return err; 2363 } 2364 2365 return 0; 2366 } 2367 2368 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE]; 2369 2370 int perf_event__synthesize_features(const struct perf_tool *tool, struct perf_session *session, 2371 struct evlist *evlist, perf_event__handler_t process) 2372 { 2373 struct perf_header *header = &session->header; 2374 struct perf_record_header_feature *fe; 2375 struct feat_fd ff; 2376 size_t sz, sz_hdr; 2377 int feat, ret; 2378 2379 sz_hdr = sizeof(fe->header); 2380 sz = sizeof(union perf_event); 2381 /* get a nice alignment */ 2382 sz = PERF_ALIGN(sz, page_size); 2383 2384 memset(&ff, 0, sizeof(ff)); 2385 2386 ff.buf = malloc(sz); 2387 if (!ff.buf) 2388 return -ENOMEM; 2389 2390 ff.size = sz - sz_hdr; 2391 ff.ph = &session->header; 2392 2393 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) { 2394 if (!feat_ops[feat].synthesize) { 2395 pr_debug("No record header feature for header :%d\n", feat); 2396 continue; 2397 } 2398 2399 ff.offset = sizeof(*fe); 2400 2401 ret = feat_ops[feat].write(&ff, evlist); 2402 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) { 2403 pr_debug("Error writing feature\n"); 2404 continue; 2405 } 2406 /* ff.buf may have changed due to realloc in do_write() */ 2407 fe = ff.buf; 2408 memset(fe, 0, sizeof(*fe)); 2409 2410 fe->feat_id = feat; 2411 fe->header.type = PERF_RECORD_HEADER_FEATURE; 2412 fe->header.size = ff.offset; 2413 2414 ret = process(tool, ff.buf, NULL, NULL); 2415 if (ret) { 2416 free(ff.buf); 2417 return ret; 2418 } 2419 } 2420 2421 /* Send HEADER_LAST_FEATURE mark. */ 2422 fe = ff.buf; 2423 fe->feat_id = HEADER_LAST_FEATURE; 2424 fe->header.type = PERF_RECORD_HEADER_FEATURE; 2425 fe->header.size = sizeof(*fe); 2426 2427 ret = process(tool, ff.buf, NULL, NULL); 2428 2429 free(ff.buf); 2430 return ret; 2431 } 2432 2433 int perf_event__synthesize_for_pipe(const struct perf_tool *tool, 2434 struct perf_session *session, 2435 struct perf_data *data, 2436 perf_event__handler_t process) 2437 { 2438 int err; 2439 int ret = 0; 2440 struct evlist *evlist = session->evlist; 2441 2442 /* 2443 * We need to synthesize events first, because some 2444 * features works on top of them (on report side). 2445 */ 2446 err = perf_event__synthesize_attrs(tool, evlist, process); 2447 if (err < 0) { 2448 pr_err("Couldn't synthesize attrs.\n"); 2449 return err; 2450 } 2451 ret += err; 2452 2453 err = perf_event__synthesize_features(tool, session, evlist, process); 2454 if (err < 0) { 2455 pr_err("Couldn't synthesize features.\n"); 2456 return err; 2457 } 2458 ret += err; 2459 2460 #ifdef HAVE_LIBTRACEEVENT 2461 if (have_tracepoints(&evlist->core.entries)) { 2462 int fd = perf_data__fd(data); 2463 2464 /* 2465 * FIXME err <= 0 here actually means that 2466 * there were no tracepoints so its not really 2467 * an error, just that we don't need to 2468 * synthesize anything. We really have to 2469 * return this more properly and also 2470 * propagate errors that now are calling die() 2471 */ 2472 err = perf_event__synthesize_tracing_data(tool, fd, evlist, 2473 process); 2474 if (err <= 0) { 2475 pr_err("Couldn't record tracing data.\n"); 2476 return err; 2477 } 2478 ret += err; 2479 } 2480 #else 2481 (void)data; 2482 #endif 2483 2484 return ret; 2485 } 2486 2487 int parse_synth_opt(char *synth) 2488 { 2489 char *p, *q; 2490 int ret = 0; 2491 2492 if (synth == NULL) 2493 return -1; 2494 2495 for (q = synth; (p = strsep(&q, ",")); p = q) { 2496 if (!strcasecmp(p, "no") || !strcasecmp(p, "none")) 2497 return 0; 2498 2499 if (!strcasecmp(p, "all")) 2500 return PERF_SYNTH_ALL; 2501 2502 if (!strcasecmp(p, "task")) 2503 ret |= PERF_SYNTH_TASK; 2504 else if (!strcasecmp(p, "mmap")) 2505 ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP; 2506 else if (!strcasecmp(p, "cgroup")) 2507 ret |= PERF_SYNTH_CGROUP; 2508 else 2509 return -1; 2510 } 2511 2512 return ret; 2513 } 2514