1 #include <dirent.h> 2 #include <errno.h> 3 #include <fcntl.h> 4 #include <inttypes.h> 5 #include <linux/kernel.h> 6 #include <linux/types.h> 7 #include <sys/types.h> 8 #include <sys/stat.h> 9 #include <unistd.h> 10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */ 11 #include <api/fs/fs.h> 12 #include <linux/perf_event.h> 13 #include "event.h" 14 #include "debug.h" 15 #include "hist.h" 16 #include "machine.h" 17 #include "sort.h" 18 #include "string2.h" 19 #include "strlist.h" 20 #include "thread.h" 21 #include "thread_map.h" 22 #include "sane_ctype.h" 23 #include "symbol/kallsyms.h" 24 #include "asm/bug.h" 25 #include "stat.h" 26 27 static const char *perf_event__names[] = { 28 [0] = "TOTAL", 29 [PERF_RECORD_MMAP] = "MMAP", 30 [PERF_RECORD_MMAP2] = "MMAP2", 31 [PERF_RECORD_LOST] = "LOST", 32 [PERF_RECORD_COMM] = "COMM", 33 [PERF_RECORD_EXIT] = "EXIT", 34 [PERF_RECORD_THROTTLE] = "THROTTLE", 35 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 36 [PERF_RECORD_FORK] = "FORK", 37 [PERF_RECORD_READ] = "READ", 38 [PERF_RECORD_SAMPLE] = "SAMPLE", 39 [PERF_RECORD_AUX] = "AUX", 40 [PERF_RECORD_ITRACE_START] = "ITRACE_START", 41 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES", 42 [PERF_RECORD_SWITCH] = "SWITCH", 43 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE", 44 [PERF_RECORD_NAMESPACES] = "NAMESPACES", 45 [PERF_RECORD_HEADER_ATTR] = "ATTR", 46 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 47 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 48 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 49 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 50 [PERF_RECORD_ID_INDEX] = "ID_INDEX", 51 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO", 52 [PERF_RECORD_AUXTRACE] = "AUXTRACE", 53 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR", 54 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP", 55 [PERF_RECORD_CPU_MAP] = "CPU_MAP", 56 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG", 57 [PERF_RECORD_STAT] = "STAT", 58 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND", 59 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE", 60 [PERF_RECORD_TIME_CONV] = "TIME_CONV", 61 [PERF_RECORD_HEADER_FEATURE] = "FEATURE", 62 }; 63 64 static const char *perf_ns__names[] = { 65 [NET_NS_INDEX] = "net", 66 [UTS_NS_INDEX] = "uts", 67 [IPC_NS_INDEX] = "ipc", 68 [PID_NS_INDEX] = "pid", 69 [USER_NS_INDEX] = "user", 70 [MNT_NS_INDEX] = "mnt", 71 [CGROUP_NS_INDEX] = "cgroup", 72 }; 73 74 const char *perf_event__name(unsigned int id) 75 { 76 if (id >= ARRAY_SIZE(perf_event__names)) 77 return "INVALID"; 78 if (!perf_event__names[id]) 79 return "UNKNOWN"; 80 return perf_event__names[id]; 81 } 82 83 static const char *perf_ns__name(unsigned int id) 84 { 85 if (id >= ARRAY_SIZE(perf_ns__names)) 86 return "UNKNOWN"; 87 return perf_ns__names[id]; 88 } 89 90 static int perf_tool__process_synth_event(struct perf_tool *tool, 91 union perf_event *event, 92 struct machine *machine, 93 perf_event__handler_t process) 94 { 95 struct perf_sample synth_sample = { 96 .pid = -1, 97 .tid = -1, 98 .time = -1, 99 .stream_id = -1, 100 .cpu = -1, 101 .period = 1, 102 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 103 }; 104 105 return process(tool, event, &synth_sample, machine); 106 }; 107 108 /* 109 * Assumes that the first 4095 bytes of /proc/pid/stat contains 110 * the comm, tgid and ppid. 111 */ 112 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, 113 pid_t *tgid, pid_t *ppid) 114 { 115 char filename[PATH_MAX]; 116 char bf[4096]; 117 int fd; 118 size_t size = 0; 119 ssize_t n; 120 char *name, *tgids, *ppids; 121 122 *tgid = -1; 123 *ppid = -1; 124 125 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 126 127 fd = open(filename, O_RDONLY); 128 if (fd < 0) { 129 pr_debug("couldn't open %s\n", filename); 130 return -1; 131 } 132 133 n = read(fd, bf, sizeof(bf) - 1); 134 close(fd); 135 if (n <= 0) { 136 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 137 pid); 138 return -1; 139 } 140 bf[n] = '\0'; 141 142 name = strstr(bf, "Name:"); 143 tgids = strstr(bf, "Tgid:"); 144 ppids = strstr(bf, "PPid:"); 145 146 if (name) { 147 char *nl; 148 149 name += 5; /* strlen("Name:") */ 150 name = ltrim(name); 151 152 nl = strchr(name, '\n'); 153 if (nl) 154 *nl = '\0'; 155 156 size = strlen(name); 157 if (size >= len) 158 size = len - 1; 159 memcpy(comm, name, size); 160 comm[size] = '\0'; 161 } else { 162 pr_debug("Name: string not found for pid %d\n", pid); 163 } 164 165 if (tgids) { 166 tgids += 5; /* strlen("Tgid:") */ 167 *tgid = atoi(tgids); 168 } else { 169 pr_debug("Tgid: string not found for pid %d\n", pid); 170 } 171 172 if (ppids) { 173 ppids += 5; /* strlen("PPid:") */ 174 *ppid = atoi(ppids); 175 } else { 176 pr_debug("PPid: string not found for pid %d\n", pid); 177 } 178 179 return 0; 180 } 181 182 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, 183 struct machine *machine, 184 pid_t *tgid, pid_t *ppid) 185 { 186 size_t size; 187 188 *ppid = -1; 189 190 memset(&event->comm, 0, sizeof(event->comm)); 191 192 if (machine__is_host(machine)) { 193 if (perf_event__get_comm_ids(pid, event->comm.comm, 194 sizeof(event->comm.comm), 195 tgid, ppid) != 0) { 196 return -1; 197 } 198 } else { 199 *tgid = machine->pid; 200 } 201 202 if (*tgid < 0) 203 return -1; 204 205 event->comm.pid = *tgid; 206 event->comm.header.type = PERF_RECORD_COMM; 207 208 size = strlen(event->comm.comm) + 1; 209 size = PERF_ALIGN(size, sizeof(u64)); 210 memset(event->comm.comm + size, 0, machine->id_hdr_size); 211 event->comm.header.size = (sizeof(event->comm) - 212 (sizeof(event->comm.comm) - size) + 213 machine->id_hdr_size); 214 event->comm.tid = pid; 215 216 return 0; 217 } 218 219 pid_t perf_event__synthesize_comm(struct perf_tool *tool, 220 union perf_event *event, pid_t pid, 221 perf_event__handler_t process, 222 struct machine *machine) 223 { 224 pid_t tgid, ppid; 225 226 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 227 return -1; 228 229 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 230 return -1; 231 232 return tgid; 233 } 234 235 static void perf_event__get_ns_link_info(pid_t pid, const char *ns, 236 struct perf_ns_link_info *ns_link_info) 237 { 238 struct stat64 st; 239 char proc_ns[128]; 240 241 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns); 242 if (stat64(proc_ns, &st) == 0) { 243 ns_link_info->dev = st.st_dev; 244 ns_link_info->ino = st.st_ino; 245 } 246 } 247 248 int perf_event__synthesize_namespaces(struct perf_tool *tool, 249 union perf_event *event, 250 pid_t pid, pid_t tgid, 251 perf_event__handler_t process, 252 struct machine *machine) 253 { 254 u32 idx; 255 struct perf_ns_link_info *ns_link_info; 256 257 if (!tool || !tool->namespace_events) 258 return 0; 259 260 memset(&event->namespaces, 0, (sizeof(event->namespaces) + 261 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 262 machine->id_hdr_size)); 263 264 event->namespaces.pid = tgid; 265 event->namespaces.tid = pid; 266 267 event->namespaces.nr_namespaces = NR_NAMESPACES; 268 269 ns_link_info = event->namespaces.link_info; 270 271 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++) 272 perf_event__get_ns_link_info(pid, perf_ns__name(idx), 273 &ns_link_info[idx]); 274 275 event->namespaces.header.type = PERF_RECORD_NAMESPACES; 276 277 event->namespaces.header.size = (sizeof(event->namespaces) + 278 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 279 machine->id_hdr_size); 280 281 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 282 return -1; 283 284 return 0; 285 } 286 287 static int perf_event__synthesize_fork(struct perf_tool *tool, 288 union perf_event *event, 289 pid_t pid, pid_t tgid, pid_t ppid, 290 perf_event__handler_t process, 291 struct machine *machine) 292 { 293 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 294 295 /* 296 * for main thread set parent to ppid from status file. For other 297 * threads set parent pid to main thread. ie., assume main thread 298 * spawns all threads in a process 299 */ 300 if (tgid == pid) { 301 event->fork.ppid = ppid; 302 event->fork.ptid = ppid; 303 } else { 304 event->fork.ppid = tgid; 305 event->fork.ptid = tgid; 306 } 307 event->fork.pid = tgid; 308 event->fork.tid = pid; 309 event->fork.header.type = PERF_RECORD_FORK; 310 311 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 312 313 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 314 return -1; 315 316 return 0; 317 } 318 319 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 320 union perf_event *event, 321 pid_t pid, pid_t tgid, 322 perf_event__handler_t process, 323 struct machine *machine, 324 bool mmap_data, 325 unsigned int proc_map_timeout) 326 { 327 char filename[PATH_MAX]; 328 FILE *fp; 329 unsigned long long t; 330 bool truncation = false; 331 unsigned long long timeout = proc_map_timeout * 1000000ULL; 332 int rc = 0; 333 const char *hugetlbfs_mnt = hugetlbfs__mountpoint(); 334 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0; 335 336 if (machine__is_default_guest(machine)) 337 return 0; 338 339 snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps", 340 machine->root_dir, pid, pid); 341 342 fp = fopen(filename, "r"); 343 if (fp == NULL) { 344 /* 345 * We raced with a task exiting - just return: 346 */ 347 pr_debug("couldn't open %s\n", filename); 348 return -1; 349 } 350 351 event->header.type = PERF_RECORD_MMAP2; 352 t = rdclock(); 353 354 while (1) { 355 char bf[BUFSIZ]; 356 char prot[5]; 357 char execname[PATH_MAX]; 358 char anonstr[] = "//anon"; 359 unsigned int ino; 360 size_t size; 361 ssize_t n; 362 363 if (fgets(bf, sizeof(bf), fp) == NULL) 364 break; 365 366 if ((rdclock() - t) > timeout) { 367 pr_warning("Reading %s time out. " 368 "You may want to increase " 369 "the time limit by --proc-map-timeout\n", 370 filename); 371 truncation = true; 372 goto out; 373 } 374 375 /* ensure null termination since stack will be reused. */ 376 strcpy(execname, ""); 377 378 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 379 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n", 380 &event->mmap2.start, &event->mmap2.len, prot, 381 &event->mmap2.pgoff, &event->mmap2.maj, 382 &event->mmap2.min, 383 &ino, execname); 384 385 /* 386 * Anon maps don't have the execname. 387 */ 388 if (n < 7) 389 continue; 390 391 event->mmap2.ino = (u64)ino; 392 393 /* 394 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 395 */ 396 if (machine__is_host(machine)) 397 event->header.misc = PERF_RECORD_MISC_USER; 398 else 399 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 400 401 /* map protection and flags bits */ 402 event->mmap2.prot = 0; 403 event->mmap2.flags = 0; 404 if (prot[0] == 'r') 405 event->mmap2.prot |= PROT_READ; 406 if (prot[1] == 'w') 407 event->mmap2.prot |= PROT_WRITE; 408 if (prot[2] == 'x') 409 event->mmap2.prot |= PROT_EXEC; 410 411 if (prot[3] == 's') 412 event->mmap2.flags |= MAP_SHARED; 413 else 414 event->mmap2.flags |= MAP_PRIVATE; 415 416 if (prot[2] != 'x') { 417 if (!mmap_data || prot[0] != 'r') 418 continue; 419 420 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 421 } 422 423 out: 424 if (truncation) 425 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 426 427 if (!strcmp(execname, "")) 428 strcpy(execname, anonstr); 429 430 if (hugetlbfs_mnt_len && 431 !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) { 432 strcpy(execname, anonstr); 433 event->mmap2.flags |= MAP_HUGETLB; 434 } 435 436 size = strlen(execname) + 1; 437 memcpy(event->mmap2.filename, execname, size); 438 size = PERF_ALIGN(size, sizeof(u64)); 439 event->mmap2.len -= event->mmap.start; 440 event->mmap2.header.size = (sizeof(event->mmap2) - 441 (sizeof(event->mmap2.filename) - size)); 442 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 443 event->mmap2.header.size += machine->id_hdr_size; 444 event->mmap2.pid = tgid; 445 event->mmap2.tid = pid; 446 447 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 448 rc = -1; 449 break; 450 } 451 452 if (truncation) 453 break; 454 } 455 456 fclose(fp); 457 return rc; 458 } 459 460 int perf_event__synthesize_modules(struct perf_tool *tool, 461 perf_event__handler_t process, 462 struct machine *machine) 463 { 464 int rc = 0; 465 struct map *pos; 466 struct map_groups *kmaps = &machine->kmaps; 467 struct maps *maps = &kmaps->maps[MAP__FUNCTION]; 468 union perf_event *event = zalloc((sizeof(event->mmap) + 469 machine->id_hdr_size)); 470 if (event == NULL) { 471 pr_debug("Not enough memory synthesizing mmap event " 472 "for kernel modules\n"); 473 return -1; 474 } 475 476 event->header.type = PERF_RECORD_MMAP; 477 478 /* 479 * kernel uses 0 for user space maps, see kernel/perf_event.c 480 * __perf_event_mmap 481 */ 482 if (machine__is_host(machine)) 483 event->header.misc = PERF_RECORD_MISC_KERNEL; 484 else 485 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 486 487 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 488 size_t size; 489 490 if (__map__is_kernel(pos)) 491 continue; 492 493 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 494 event->mmap.header.type = PERF_RECORD_MMAP; 495 event->mmap.header.size = (sizeof(event->mmap) - 496 (sizeof(event->mmap.filename) - size)); 497 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 498 event->mmap.header.size += machine->id_hdr_size; 499 event->mmap.start = pos->start; 500 event->mmap.len = pos->end - pos->start; 501 event->mmap.pid = machine->pid; 502 503 memcpy(event->mmap.filename, pos->dso->long_name, 504 pos->dso->long_name_len + 1); 505 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 506 rc = -1; 507 break; 508 } 509 } 510 511 free(event); 512 return rc; 513 } 514 515 static int __event__synthesize_thread(union perf_event *comm_event, 516 union perf_event *mmap_event, 517 union perf_event *fork_event, 518 union perf_event *namespaces_event, 519 pid_t pid, int full, 520 perf_event__handler_t process, 521 struct perf_tool *tool, 522 struct machine *machine, 523 bool mmap_data, 524 unsigned int proc_map_timeout) 525 { 526 char filename[PATH_MAX]; 527 DIR *tasks; 528 struct dirent *dirent; 529 pid_t tgid, ppid; 530 int rc = 0; 531 532 /* special case: only send one comm event using passed in pid */ 533 if (!full) { 534 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 535 process, machine); 536 537 if (tgid == -1) 538 return -1; 539 540 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid, 541 tgid, process, machine) < 0) 542 return -1; 543 544 545 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 546 process, machine, mmap_data, 547 proc_map_timeout); 548 } 549 550 if (machine__is_default_guest(machine)) 551 return 0; 552 553 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 554 machine->root_dir, pid); 555 556 tasks = opendir(filename); 557 if (tasks == NULL) { 558 pr_debug("couldn't open %s\n", filename); 559 return 0; 560 } 561 562 while ((dirent = readdir(tasks)) != NULL) { 563 char *end; 564 pid_t _pid; 565 566 _pid = strtol(dirent->d_name, &end, 10); 567 if (*end) 568 continue; 569 570 rc = -1; 571 if (perf_event__prepare_comm(comm_event, _pid, machine, 572 &tgid, &ppid) != 0) 573 break; 574 575 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 576 ppid, process, machine) < 0) 577 break; 578 579 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid, 580 tgid, process, machine) < 0) 581 break; 582 583 /* 584 * Send the prepared comm event 585 */ 586 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 587 break; 588 589 rc = 0; 590 if (_pid == pid) { 591 /* process the parent's maps too */ 592 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 593 process, machine, mmap_data, proc_map_timeout); 594 if (rc) 595 break; 596 } 597 } 598 599 closedir(tasks); 600 return rc; 601 } 602 603 int perf_event__synthesize_thread_map(struct perf_tool *tool, 604 struct thread_map *threads, 605 perf_event__handler_t process, 606 struct machine *machine, 607 bool mmap_data, 608 unsigned int proc_map_timeout) 609 { 610 union perf_event *comm_event, *mmap_event, *fork_event; 611 union perf_event *namespaces_event; 612 int err = -1, thread, j; 613 614 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 615 if (comm_event == NULL) 616 goto out; 617 618 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 619 if (mmap_event == NULL) 620 goto out_free_comm; 621 622 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 623 if (fork_event == NULL) 624 goto out_free_mmap; 625 626 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 627 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 628 machine->id_hdr_size); 629 if (namespaces_event == NULL) 630 goto out_free_fork; 631 632 err = 0; 633 for (thread = 0; thread < threads->nr; ++thread) { 634 if (__event__synthesize_thread(comm_event, mmap_event, 635 fork_event, namespaces_event, 636 thread_map__pid(threads, thread), 0, 637 process, tool, machine, 638 mmap_data, proc_map_timeout)) { 639 err = -1; 640 break; 641 } 642 643 /* 644 * comm.pid is set to thread group id by 645 * perf_event__synthesize_comm 646 */ 647 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) { 648 bool need_leader = true; 649 650 /* is thread group leader in thread_map? */ 651 for (j = 0; j < threads->nr; ++j) { 652 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) { 653 need_leader = false; 654 break; 655 } 656 } 657 658 /* if not, generate events for it */ 659 if (need_leader && 660 __event__synthesize_thread(comm_event, mmap_event, 661 fork_event, namespaces_event, 662 comm_event->comm.pid, 0, 663 process, tool, machine, 664 mmap_data, proc_map_timeout)) { 665 err = -1; 666 break; 667 } 668 } 669 } 670 free(namespaces_event); 671 out_free_fork: 672 free(fork_event); 673 out_free_mmap: 674 free(mmap_event); 675 out_free_comm: 676 free(comm_event); 677 out: 678 return err; 679 } 680 681 static int __perf_event__synthesize_threads(struct perf_tool *tool, 682 perf_event__handler_t process, 683 struct machine *machine, 684 bool mmap_data, 685 unsigned int proc_map_timeout, 686 struct dirent **dirent, 687 int start, 688 int num) 689 { 690 union perf_event *comm_event, *mmap_event, *fork_event; 691 union perf_event *namespaces_event; 692 int err = -1; 693 char *end; 694 pid_t pid; 695 int i; 696 697 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 698 if (comm_event == NULL) 699 goto out; 700 701 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 702 if (mmap_event == NULL) 703 goto out_free_comm; 704 705 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 706 if (fork_event == NULL) 707 goto out_free_mmap; 708 709 namespaces_event = malloc(sizeof(namespaces_event->namespaces) + 710 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) + 711 machine->id_hdr_size); 712 if (namespaces_event == NULL) 713 goto out_free_fork; 714 715 for (i = start; i < start + num; i++) { 716 if (!isdigit(dirent[i]->d_name[0])) 717 continue; 718 719 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10); 720 /* only interested in proper numerical dirents */ 721 if (*end) 722 continue; 723 /* 724 * We may race with exiting thread, so don't stop just because 725 * one thread couldn't be synthesized. 726 */ 727 __event__synthesize_thread(comm_event, mmap_event, fork_event, 728 namespaces_event, pid, 1, process, 729 tool, machine, mmap_data, 730 proc_map_timeout); 731 } 732 err = 0; 733 734 free(namespaces_event); 735 out_free_fork: 736 free(fork_event); 737 out_free_mmap: 738 free(mmap_event); 739 out_free_comm: 740 free(comm_event); 741 out: 742 return err; 743 } 744 745 struct synthesize_threads_arg { 746 struct perf_tool *tool; 747 perf_event__handler_t process; 748 struct machine *machine; 749 bool mmap_data; 750 unsigned int proc_map_timeout; 751 struct dirent **dirent; 752 int num; 753 int start; 754 }; 755 756 static void *synthesize_threads_worker(void *arg) 757 { 758 struct synthesize_threads_arg *args = arg; 759 760 __perf_event__synthesize_threads(args->tool, args->process, 761 args->machine, args->mmap_data, 762 args->proc_map_timeout, args->dirent, 763 args->start, args->num); 764 return NULL; 765 } 766 767 int perf_event__synthesize_threads(struct perf_tool *tool, 768 perf_event__handler_t process, 769 struct machine *machine, 770 bool mmap_data, 771 unsigned int proc_map_timeout, 772 unsigned int nr_threads_synthesize) 773 { 774 struct synthesize_threads_arg *args = NULL; 775 pthread_t *synthesize_threads = NULL; 776 char proc_path[PATH_MAX]; 777 struct dirent **dirent; 778 int num_per_thread; 779 int m, n, i, j; 780 int thread_nr; 781 int base = 0; 782 int err = -1; 783 784 785 if (machine__is_default_guest(machine)) 786 return 0; 787 788 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 789 n = scandir(proc_path, &dirent, 0, alphasort); 790 if (n < 0) 791 return err; 792 793 if (nr_threads_synthesize == UINT_MAX) 794 thread_nr = sysconf(_SC_NPROCESSORS_ONLN); 795 else 796 thread_nr = nr_threads_synthesize; 797 798 if (thread_nr <= 1) { 799 err = __perf_event__synthesize_threads(tool, process, 800 machine, mmap_data, 801 proc_map_timeout, 802 dirent, base, n); 803 goto free_dirent; 804 } 805 if (thread_nr > n) 806 thread_nr = n; 807 808 synthesize_threads = calloc(sizeof(pthread_t), thread_nr); 809 if (synthesize_threads == NULL) 810 goto free_dirent; 811 812 args = calloc(sizeof(*args), thread_nr); 813 if (args == NULL) 814 goto free_threads; 815 816 num_per_thread = n / thread_nr; 817 m = n % thread_nr; 818 for (i = 0; i < thread_nr; i++) { 819 args[i].tool = tool; 820 args[i].process = process; 821 args[i].machine = machine; 822 args[i].mmap_data = mmap_data; 823 args[i].proc_map_timeout = proc_map_timeout; 824 args[i].dirent = dirent; 825 } 826 for (i = 0; i < m; i++) { 827 args[i].num = num_per_thread + 1; 828 args[i].start = i * args[i].num; 829 } 830 if (i != 0) 831 base = args[i-1].start + args[i-1].num; 832 for (j = i; j < thread_nr; j++) { 833 args[j].num = num_per_thread; 834 args[j].start = base + (j - i) * args[i].num; 835 } 836 837 for (i = 0; i < thread_nr; i++) { 838 if (pthread_create(&synthesize_threads[i], NULL, 839 synthesize_threads_worker, &args[i])) 840 goto out_join; 841 } 842 err = 0; 843 out_join: 844 for (i = 0; i < thread_nr; i++) 845 pthread_join(synthesize_threads[i], NULL); 846 free(args); 847 free_threads: 848 free(synthesize_threads); 849 free_dirent: 850 for (i = 0; i < n; i++) 851 free(dirent[i]); 852 free(dirent); 853 854 return err; 855 } 856 857 struct process_symbol_args { 858 const char *name; 859 u64 start; 860 }; 861 862 static int find_symbol_cb(void *arg, const char *name, char type, 863 u64 start) 864 { 865 struct process_symbol_args *args = arg; 866 867 /* 868 * Must be a function or at least an alias, as in PARISC64, where "_text" is 869 * an 'A' to the same address as "_stext". 870 */ 871 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 872 type == 'A') || strcmp(name, args->name)) 873 return 0; 874 875 args->start = start; 876 return 1; 877 } 878 879 int kallsyms__get_function_start(const char *kallsyms_filename, 880 const char *symbol_name, u64 *addr) 881 { 882 struct process_symbol_args args = { .name = symbol_name, }; 883 884 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) 885 return -1; 886 887 *addr = args.start; 888 return 0; 889 } 890 891 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 892 perf_event__handler_t process, 893 struct machine *machine) 894 { 895 size_t size; 896 const char *mmap_name; 897 char name_buff[PATH_MAX]; 898 struct map *map = machine__kernel_map(machine); 899 struct kmap *kmap; 900 int err; 901 union perf_event *event; 902 903 if (symbol_conf.kptr_restrict) 904 return -1; 905 if (map == NULL) 906 return -1; 907 908 /* 909 * We should get this from /sys/kernel/sections/.text, but till that is 910 * available use this, and after it is use this as a fallback for older 911 * kernels. 912 */ 913 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); 914 if (event == NULL) { 915 pr_debug("Not enough memory synthesizing mmap event " 916 "for kernel modules\n"); 917 return -1; 918 } 919 920 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 921 if (machine__is_host(machine)) { 922 /* 923 * kernel uses PERF_RECORD_MISC_USER for user space maps, 924 * see kernel/perf_event.c __perf_event_mmap 925 */ 926 event->header.misc = PERF_RECORD_MISC_KERNEL; 927 } else { 928 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 929 } 930 931 kmap = map__kmap(map); 932 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 933 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; 934 size = PERF_ALIGN(size, sizeof(u64)); 935 event->mmap.header.type = PERF_RECORD_MMAP; 936 event->mmap.header.size = (sizeof(event->mmap) - 937 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 938 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 939 event->mmap.start = map->start; 940 event->mmap.len = map->end - event->mmap.start; 941 event->mmap.pid = machine->pid; 942 943 err = perf_tool__process_synth_event(tool, event, machine, process); 944 free(event); 945 946 return err; 947 } 948 949 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 950 struct thread_map *threads, 951 perf_event__handler_t process, 952 struct machine *machine) 953 { 954 union perf_event *event; 955 int i, err, size; 956 957 size = sizeof(event->thread_map); 958 size += threads->nr * sizeof(event->thread_map.entries[0]); 959 960 event = zalloc(size); 961 if (!event) 962 return -ENOMEM; 963 964 event->header.type = PERF_RECORD_THREAD_MAP; 965 event->header.size = size; 966 event->thread_map.nr = threads->nr; 967 968 for (i = 0; i < threads->nr; i++) { 969 struct thread_map_event_entry *entry = &event->thread_map.entries[i]; 970 char *comm = thread_map__comm(threads, i); 971 972 if (!comm) 973 comm = (char *) ""; 974 975 entry->pid = thread_map__pid(threads, i); 976 strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 977 } 978 979 err = process(tool, event, NULL, machine); 980 981 free(event); 982 return err; 983 } 984 985 static void synthesize_cpus(struct cpu_map_entries *cpus, 986 struct cpu_map *map) 987 { 988 int i; 989 990 cpus->nr = map->nr; 991 992 for (i = 0; i < map->nr; i++) 993 cpus->cpu[i] = map->map[i]; 994 } 995 996 static void synthesize_mask(struct cpu_map_mask *mask, 997 struct cpu_map *map, int max) 998 { 999 int i; 1000 1001 mask->nr = BITS_TO_LONGS(max); 1002 mask->long_size = sizeof(long); 1003 1004 for (i = 0; i < map->nr; i++) 1005 set_bit(map->map[i], mask->mask); 1006 } 1007 1008 static size_t cpus_size(struct cpu_map *map) 1009 { 1010 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); 1011 } 1012 1013 static size_t mask_size(struct cpu_map *map, int *max) 1014 { 1015 int i; 1016 1017 *max = 0; 1018 1019 for (i = 0; i < map->nr; i++) { 1020 /* bit possition of the cpu is + 1 */ 1021 int bit = map->map[i] + 1; 1022 1023 if (bit > *max) 1024 *max = bit; 1025 } 1026 1027 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long); 1028 } 1029 1030 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max) 1031 { 1032 size_t size_cpus, size_mask; 1033 bool is_dummy = cpu_map__empty(map); 1034 1035 /* 1036 * Both array and mask data have variable size based 1037 * on the number of cpus and their actual values. 1038 * The size of the 'struct cpu_map_data' is: 1039 * 1040 * array = size of 'struct cpu_map_entries' + 1041 * number of cpus * sizeof(u64) 1042 * 1043 * mask = size of 'struct cpu_map_mask' + 1044 * maximum cpu bit converted to size of longs 1045 * 1046 * and finaly + the size of 'struct cpu_map_data'. 1047 */ 1048 size_cpus = cpus_size(map); 1049 size_mask = mask_size(map, max); 1050 1051 if (is_dummy || (size_cpus < size_mask)) { 1052 *size += size_cpus; 1053 *type = PERF_CPU_MAP__CPUS; 1054 } else { 1055 *size += size_mask; 1056 *type = PERF_CPU_MAP__MASK; 1057 } 1058 1059 *size += sizeof(struct cpu_map_data); 1060 return zalloc(*size); 1061 } 1062 1063 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map, 1064 u16 type, int max) 1065 { 1066 data->type = type; 1067 1068 switch (type) { 1069 case PERF_CPU_MAP__CPUS: 1070 synthesize_cpus((struct cpu_map_entries *) data->data, map); 1071 break; 1072 case PERF_CPU_MAP__MASK: 1073 synthesize_mask((struct cpu_map_mask *) data->data, map, max); 1074 default: 1075 break; 1076 }; 1077 } 1078 1079 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map) 1080 { 1081 size_t size = sizeof(struct cpu_map_event); 1082 struct cpu_map_event *event; 1083 int max; 1084 u16 type; 1085 1086 event = cpu_map_data__alloc(map, &size, &type, &max); 1087 if (!event) 1088 return NULL; 1089 1090 event->header.type = PERF_RECORD_CPU_MAP; 1091 event->header.size = size; 1092 event->data.type = type; 1093 1094 cpu_map_data__synthesize(&event->data, map, type, max); 1095 return event; 1096 } 1097 1098 int perf_event__synthesize_cpu_map(struct perf_tool *tool, 1099 struct cpu_map *map, 1100 perf_event__handler_t process, 1101 struct machine *machine) 1102 { 1103 struct cpu_map_event *event; 1104 int err; 1105 1106 event = cpu_map_event__new(map); 1107 if (!event) 1108 return -ENOMEM; 1109 1110 err = process(tool, (union perf_event *) event, NULL, machine); 1111 1112 free(event); 1113 return err; 1114 } 1115 1116 int perf_event__synthesize_stat_config(struct perf_tool *tool, 1117 struct perf_stat_config *config, 1118 perf_event__handler_t process, 1119 struct machine *machine) 1120 { 1121 struct stat_config_event *event; 1122 int size, i = 0, err; 1123 1124 size = sizeof(*event); 1125 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 1126 1127 event = zalloc(size); 1128 if (!event) 1129 return -ENOMEM; 1130 1131 event->header.type = PERF_RECORD_STAT_CONFIG; 1132 event->header.size = size; 1133 event->nr = PERF_STAT_CONFIG_TERM__MAX; 1134 1135 #define ADD(__term, __val) \ 1136 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 1137 event->data[i].val = __val; \ 1138 i++; 1139 1140 ADD(AGGR_MODE, config->aggr_mode) 1141 ADD(INTERVAL, config->interval) 1142 ADD(SCALE, config->scale) 1143 1144 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 1145 "stat config terms unbalanced\n"); 1146 #undef ADD 1147 1148 err = process(tool, (union perf_event *) event, NULL, machine); 1149 1150 free(event); 1151 return err; 1152 } 1153 1154 int perf_event__synthesize_stat(struct perf_tool *tool, 1155 u32 cpu, u32 thread, u64 id, 1156 struct perf_counts_values *count, 1157 perf_event__handler_t process, 1158 struct machine *machine) 1159 { 1160 struct stat_event event; 1161 1162 event.header.type = PERF_RECORD_STAT; 1163 event.header.size = sizeof(event); 1164 event.header.misc = 0; 1165 1166 event.id = id; 1167 event.cpu = cpu; 1168 event.thread = thread; 1169 event.val = count->val; 1170 event.ena = count->ena; 1171 event.run = count->run; 1172 1173 return process(tool, (union perf_event *) &event, NULL, machine); 1174 } 1175 1176 int perf_event__synthesize_stat_round(struct perf_tool *tool, 1177 u64 evtime, u64 type, 1178 perf_event__handler_t process, 1179 struct machine *machine) 1180 { 1181 struct stat_round_event event; 1182 1183 event.header.type = PERF_RECORD_STAT_ROUND; 1184 event.header.size = sizeof(event); 1185 event.header.misc = 0; 1186 1187 event.time = evtime; 1188 event.type = type; 1189 1190 return process(tool, (union perf_event *) &event, NULL, machine); 1191 } 1192 1193 void perf_event__read_stat_config(struct perf_stat_config *config, 1194 struct stat_config_event *event) 1195 { 1196 unsigned i; 1197 1198 for (i = 0; i < event->nr; i++) { 1199 1200 switch (event->data[i].tag) { 1201 #define CASE(__term, __val) \ 1202 case PERF_STAT_CONFIG_TERM__##__term: \ 1203 config->__val = event->data[i].val; \ 1204 break; 1205 1206 CASE(AGGR_MODE, aggr_mode) 1207 CASE(SCALE, scale) 1208 CASE(INTERVAL, interval) 1209 #undef CASE 1210 default: 1211 pr_warning("unknown stat config term %" PRIu64 "\n", 1212 event->data[i].tag); 1213 } 1214 } 1215 } 1216 1217 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) 1218 { 1219 const char *s; 1220 1221 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) 1222 s = " exec"; 1223 else 1224 s = ""; 1225 1226 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); 1227 } 1228 1229 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp) 1230 { 1231 size_t ret = 0; 1232 struct perf_ns_link_info *ns_link_info; 1233 u32 nr_namespaces, idx; 1234 1235 ns_link_info = event->namespaces.link_info; 1236 nr_namespaces = event->namespaces.nr_namespaces; 1237 1238 ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[", 1239 event->namespaces.pid, 1240 event->namespaces.tid, 1241 nr_namespaces); 1242 1243 for (idx = 0; idx < nr_namespaces; idx++) { 1244 if (idx && (idx % 4 == 0)) 1245 ret += fprintf(fp, "\n\t\t "); 1246 1247 ret += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx, 1248 perf_ns__name(idx), (u64)ns_link_info[idx].dev, 1249 (u64)ns_link_info[idx].ino, 1250 ((idx + 1) != nr_namespaces) ? ", " : "]\n"); 1251 } 1252 1253 return ret; 1254 } 1255 1256 int perf_event__process_comm(struct perf_tool *tool __maybe_unused, 1257 union perf_event *event, 1258 struct perf_sample *sample, 1259 struct machine *machine) 1260 { 1261 return machine__process_comm_event(machine, event, sample); 1262 } 1263 1264 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused, 1265 union perf_event *event, 1266 struct perf_sample *sample, 1267 struct machine *machine) 1268 { 1269 return machine__process_namespaces_event(machine, event, sample); 1270 } 1271 1272 int perf_event__process_lost(struct perf_tool *tool __maybe_unused, 1273 union perf_event *event, 1274 struct perf_sample *sample, 1275 struct machine *machine) 1276 { 1277 return machine__process_lost_event(machine, event, sample); 1278 } 1279 1280 int perf_event__process_aux(struct perf_tool *tool __maybe_unused, 1281 union perf_event *event, 1282 struct perf_sample *sample __maybe_unused, 1283 struct machine *machine) 1284 { 1285 return machine__process_aux_event(machine, event); 1286 } 1287 1288 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused, 1289 union perf_event *event, 1290 struct perf_sample *sample __maybe_unused, 1291 struct machine *machine) 1292 { 1293 return machine__process_itrace_start_event(machine, event); 1294 } 1295 1296 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, 1297 union perf_event *event, 1298 struct perf_sample *sample, 1299 struct machine *machine) 1300 { 1301 return machine__process_lost_samples_event(machine, event, sample); 1302 } 1303 1304 int perf_event__process_switch(struct perf_tool *tool __maybe_unused, 1305 union perf_event *event, 1306 struct perf_sample *sample __maybe_unused, 1307 struct machine *machine) 1308 { 1309 return machine__process_switch_event(machine, event); 1310 } 1311 1312 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 1313 { 1314 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", 1315 event->mmap.pid, event->mmap.tid, event->mmap.start, 1316 event->mmap.len, event->mmap.pgoff, 1317 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', 1318 event->mmap.filename); 1319 } 1320 1321 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 1322 { 1323 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 1324 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n", 1325 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 1326 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 1327 event->mmap2.min, event->mmap2.ino, 1328 event->mmap2.ino_generation, 1329 (event->mmap2.prot & PROT_READ) ? 'r' : '-', 1330 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', 1331 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', 1332 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', 1333 event->mmap2.filename); 1334 } 1335 1336 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp) 1337 { 1338 struct thread_map *threads = thread_map__new_event(&event->thread_map); 1339 size_t ret; 1340 1341 ret = fprintf(fp, " nr: "); 1342 1343 if (threads) 1344 ret += thread_map__fprintf(threads, fp); 1345 else 1346 ret += fprintf(fp, "failed to get threads from event\n"); 1347 1348 thread_map__put(threads); 1349 return ret; 1350 } 1351 1352 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp) 1353 { 1354 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data); 1355 size_t ret; 1356 1357 ret = fprintf(fp, ": "); 1358 1359 if (cpus) 1360 ret += cpu_map__fprintf(cpus, fp); 1361 else 1362 ret += fprintf(fp, "failed to get cpumap from event\n"); 1363 1364 cpu_map__put(cpus); 1365 return ret; 1366 } 1367 1368 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, 1369 union perf_event *event, 1370 struct perf_sample *sample, 1371 struct machine *machine) 1372 { 1373 return machine__process_mmap_event(machine, event, sample); 1374 } 1375 1376 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, 1377 union perf_event *event, 1378 struct perf_sample *sample, 1379 struct machine *machine) 1380 { 1381 return machine__process_mmap2_event(machine, event, sample); 1382 } 1383 1384 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) 1385 { 1386 return fprintf(fp, "(%d:%d):(%d:%d)\n", 1387 event->fork.pid, event->fork.tid, 1388 event->fork.ppid, event->fork.ptid); 1389 } 1390 1391 int perf_event__process_fork(struct perf_tool *tool __maybe_unused, 1392 union perf_event *event, 1393 struct perf_sample *sample, 1394 struct machine *machine) 1395 { 1396 return machine__process_fork_event(machine, event, sample); 1397 } 1398 1399 int perf_event__process_exit(struct perf_tool *tool __maybe_unused, 1400 union perf_event *event, 1401 struct perf_sample *sample, 1402 struct machine *machine) 1403 { 1404 return machine__process_exit_event(machine, event, sample); 1405 } 1406 1407 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) 1408 { 1409 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n", 1410 event->aux.aux_offset, event->aux.aux_size, 1411 event->aux.flags, 1412 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", 1413 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "", 1414 event->aux.flags & PERF_AUX_FLAG_PARTIAL ? "P" : ""); 1415 } 1416 1417 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) 1418 { 1419 return fprintf(fp, " pid: %u tid: %u\n", 1420 event->itrace_start.pid, event->itrace_start.tid); 1421 } 1422 1423 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) 1424 { 1425 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1426 const char *in_out = out ? "OUT" : "IN "; 1427 1428 if (event->header.type == PERF_RECORD_SWITCH) 1429 return fprintf(fp, " %s\n", in_out); 1430 1431 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n", 1432 in_out, out ? "next" : "prev", 1433 event->context_switch.next_prev_pid, 1434 event->context_switch.next_prev_tid); 1435 } 1436 1437 size_t perf_event__fprintf(union perf_event *event, FILE *fp) 1438 { 1439 size_t ret = fprintf(fp, "PERF_RECORD_%s", 1440 perf_event__name(event->header.type)); 1441 1442 switch (event->header.type) { 1443 case PERF_RECORD_COMM: 1444 ret += perf_event__fprintf_comm(event, fp); 1445 break; 1446 case PERF_RECORD_FORK: 1447 case PERF_RECORD_EXIT: 1448 ret += perf_event__fprintf_task(event, fp); 1449 break; 1450 case PERF_RECORD_MMAP: 1451 ret += perf_event__fprintf_mmap(event, fp); 1452 break; 1453 case PERF_RECORD_NAMESPACES: 1454 ret += perf_event__fprintf_namespaces(event, fp); 1455 break; 1456 case PERF_RECORD_MMAP2: 1457 ret += perf_event__fprintf_mmap2(event, fp); 1458 break; 1459 case PERF_RECORD_AUX: 1460 ret += perf_event__fprintf_aux(event, fp); 1461 break; 1462 case PERF_RECORD_ITRACE_START: 1463 ret += perf_event__fprintf_itrace_start(event, fp); 1464 break; 1465 case PERF_RECORD_SWITCH: 1466 case PERF_RECORD_SWITCH_CPU_WIDE: 1467 ret += perf_event__fprintf_switch(event, fp); 1468 break; 1469 default: 1470 ret += fprintf(fp, "\n"); 1471 } 1472 1473 return ret; 1474 } 1475 1476 int perf_event__process(struct perf_tool *tool __maybe_unused, 1477 union perf_event *event, 1478 struct perf_sample *sample, 1479 struct machine *machine) 1480 { 1481 return machine__process_event(machine, event, sample); 1482 } 1483 1484 void thread__find_addr_map(struct thread *thread, u8 cpumode, 1485 enum map_type type, u64 addr, 1486 struct addr_location *al) 1487 { 1488 struct map_groups *mg = thread->mg; 1489 struct machine *machine = mg->machine; 1490 bool load_map = false; 1491 1492 al->machine = machine; 1493 al->thread = thread; 1494 al->addr = addr; 1495 al->cpumode = cpumode; 1496 al->filtered = 0; 1497 1498 if (machine == NULL) { 1499 al->map = NULL; 1500 return; 1501 } 1502 1503 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 1504 al->level = 'k'; 1505 mg = &machine->kmaps; 1506 load_map = true; 1507 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 1508 al->level = '.'; 1509 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 1510 al->level = 'g'; 1511 mg = &machine->kmaps; 1512 load_map = true; 1513 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { 1514 al->level = 'u'; 1515 } else { 1516 al->level = 'H'; 1517 al->map = NULL; 1518 1519 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 1520 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 1521 !perf_guest) 1522 al->filtered |= (1 << HIST_FILTER__GUEST); 1523 if ((cpumode == PERF_RECORD_MISC_USER || 1524 cpumode == PERF_RECORD_MISC_KERNEL) && 1525 !perf_host) 1526 al->filtered |= (1 << HIST_FILTER__HOST); 1527 1528 return; 1529 } 1530 try_again: 1531 al->map = map_groups__find(mg, type, al->addr); 1532 if (al->map == NULL) { 1533 /* 1534 * If this is outside of all known maps, and is a negative 1535 * address, try to look it up in the kernel dso, as it might be 1536 * a vsyscall or vdso (which executes in user-mode). 1537 * 1538 * XXX This is nasty, we should have a symbol list in the 1539 * "[vdso]" dso, but for now lets use the old trick of looking 1540 * in the whole kernel symbol list. 1541 */ 1542 if (cpumode == PERF_RECORD_MISC_USER && machine && 1543 mg != &machine->kmaps && 1544 machine__kernel_ip(machine, al->addr)) { 1545 mg = &machine->kmaps; 1546 load_map = true; 1547 goto try_again; 1548 } 1549 } else { 1550 /* 1551 * Kernel maps might be changed when loading symbols so loading 1552 * must be done prior to using kernel maps. 1553 */ 1554 if (load_map) 1555 map__load(al->map); 1556 al->addr = al->map->map_ip(al->map, al->addr); 1557 } 1558 } 1559 1560 void thread__find_addr_location(struct thread *thread, 1561 u8 cpumode, enum map_type type, u64 addr, 1562 struct addr_location *al) 1563 { 1564 thread__find_addr_map(thread, cpumode, type, addr, al); 1565 if (al->map != NULL) 1566 al->sym = map__find_symbol(al->map, al->addr); 1567 else 1568 al->sym = NULL; 1569 } 1570 1571 /* 1572 * Callers need to drop the reference to al->thread, obtained in 1573 * machine__findnew_thread() 1574 */ 1575 int machine__resolve(struct machine *machine, struct addr_location *al, 1576 struct perf_sample *sample) 1577 { 1578 struct thread *thread = machine__findnew_thread(machine, sample->pid, 1579 sample->tid); 1580 1581 if (thread == NULL) 1582 return -1; 1583 1584 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 1585 /* 1586 * Have we already created the kernel maps for this machine? 1587 * 1588 * This should have happened earlier, when we processed the kernel MMAP 1589 * events, but for older perf.data files there was no such thing, so do 1590 * it now. 1591 */ 1592 if (sample->cpumode == PERF_RECORD_MISC_KERNEL && 1593 machine__kernel_map(machine) == NULL) 1594 machine__create_kernel_maps(machine); 1595 1596 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al); 1597 dump_printf(" ...... dso: %s\n", 1598 al->map ? al->map->dso->long_name : 1599 al->level == 'H' ? "[hypervisor]" : "<not found>"); 1600 1601 if (thread__is_filtered(thread)) 1602 al->filtered |= (1 << HIST_FILTER__THREAD); 1603 1604 al->sym = NULL; 1605 al->cpu = sample->cpu; 1606 al->socket = -1; 1607 1608 if (al->cpu >= 0) { 1609 struct perf_env *env = machine->env; 1610 1611 if (env && env->cpu) 1612 al->socket = env->cpu[al->cpu].socket_id; 1613 } 1614 1615 if (al->map) { 1616 struct dso *dso = al->map->dso; 1617 1618 if (symbol_conf.dso_list && 1619 (!dso || !(strlist__has_entry(symbol_conf.dso_list, 1620 dso->short_name) || 1621 (dso->short_name != dso->long_name && 1622 strlist__has_entry(symbol_conf.dso_list, 1623 dso->long_name))))) { 1624 al->filtered |= (1 << HIST_FILTER__DSO); 1625 } 1626 1627 al->sym = map__find_symbol(al->map, al->addr); 1628 } 1629 1630 if (symbol_conf.sym_list && 1631 (!al->sym || !strlist__has_entry(symbol_conf.sym_list, 1632 al->sym->name))) { 1633 al->filtered |= (1 << HIST_FILTER__SYMBOL); 1634 } 1635 1636 return 0; 1637 } 1638 1639 /* 1640 * The preprocess_sample method will return with reference counts for the 1641 * in it, when done using (and perhaps getting ref counts if needing to 1642 * keep a pointer to one of those entries) it must be paired with 1643 * addr_location__put(), so that the refcounts can be decremented. 1644 */ 1645 void addr_location__put(struct addr_location *al) 1646 { 1647 thread__zput(al->thread); 1648 } 1649 1650 bool is_bts_event(struct perf_event_attr *attr) 1651 { 1652 return attr->type == PERF_TYPE_HARDWARE && 1653 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 1654 attr->sample_period == 1; 1655 } 1656 1657 bool sample_addr_correlates_sym(struct perf_event_attr *attr) 1658 { 1659 if (attr->type == PERF_TYPE_SOFTWARE && 1660 (attr->config == PERF_COUNT_SW_PAGE_FAULTS || 1661 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 1662 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)) 1663 return true; 1664 1665 if (is_bts_event(attr)) 1666 return true; 1667 1668 return false; 1669 } 1670 1671 void thread__resolve(struct thread *thread, struct addr_location *al, 1672 struct perf_sample *sample) 1673 { 1674 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al); 1675 if (!al->map) 1676 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE, 1677 sample->addr, al); 1678 1679 al->cpu = sample->cpu; 1680 al->sym = NULL; 1681 1682 if (al->map) 1683 al->sym = map__find_symbol(al->map, al->addr); 1684 } 1685