1 #include <linux/types.h> 2 #include "event.h" 3 #include "debug.h" 4 #include "session.h" 5 #include "sort.h" 6 #include "string.h" 7 #include "strlist.h" 8 #include "thread.h" 9 10 const char *event__name[] = { 11 [0] = "TOTAL", 12 [PERF_RECORD_MMAP] = "MMAP", 13 [PERF_RECORD_LOST] = "LOST", 14 [PERF_RECORD_COMM] = "COMM", 15 [PERF_RECORD_EXIT] = "EXIT", 16 [PERF_RECORD_THROTTLE] = "THROTTLE", 17 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 18 [PERF_RECORD_FORK] = "FORK", 19 [PERF_RECORD_READ] = "READ", 20 [PERF_RECORD_SAMPLE] = "SAMPLE", 21 [PERF_RECORD_HEADER_ATTR] = "ATTR", 22 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 23 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 24 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 25 }; 26 27 static pid_t event__synthesize_comm(pid_t pid, int full, 28 event__handler_t process, 29 struct perf_session *session) 30 { 31 event_t ev; 32 char filename[PATH_MAX]; 33 char bf[BUFSIZ]; 34 FILE *fp; 35 size_t size = 0; 36 DIR *tasks; 37 struct dirent dirent, *next; 38 pid_t tgid = 0; 39 40 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 41 42 fp = fopen(filename, "r"); 43 if (fp == NULL) { 44 out_race: 45 /* 46 * We raced with a task exiting - just return: 47 */ 48 pr_debug("couldn't open %s\n", filename); 49 return 0; 50 } 51 52 memset(&ev.comm, 0, sizeof(ev.comm)); 53 while (!ev.comm.comm[0] || !ev.comm.pid) { 54 if (fgets(bf, sizeof(bf), fp) == NULL) 55 goto out_failure; 56 57 if (memcmp(bf, "Name:", 5) == 0) { 58 char *name = bf + 5; 59 while (*name && isspace(*name)) 60 ++name; 61 size = strlen(name) - 1; 62 memcpy(ev.comm.comm, name, size++); 63 } else if (memcmp(bf, "Tgid:", 5) == 0) { 64 char *tgids = bf + 5; 65 while (*tgids && isspace(*tgids)) 66 ++tgids; 67 tgid = ev.comm.pid = atoi(tgids); 68 } 69 } 70 71 ev.comm.header.type = PERF_RECORD_COMM; 72 size = ALIGN(size, sizeof(u64)); 73 ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size); 74 75 if (!full) { 76 ev.comm.tid = pid; 77 78 process(&ev, session); 79 goto out_fclose; 80 } 81 82 snprintf(filename, sizeof(filename), "/proc/%d/task", pid); 83 84 tasks = opendir(filename); 85 if (tasks == NULL) 86 goto out_race; 87 88 while (!readdir_r(tasks, &dirent, &next) && next) { 89 char *end; 90 pid = strtol(dirent.d_name, &end, 10); 91 if (*end) 92 continue; 93 94 ev.comm.tid = pid; 95 96 process(&ev, session); 97 } 98 closedir(tasks); 99 100 out_fclose: 101 fclose(fp); 102 return tgid; 103 104 out_failure: 105 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); 106 return -1; 107 } 108 109 static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, 110 event__handler_t process, 111 struct perf_session *session) 112 { 113 char filename[PATH_MAX]; 114 FILE *fp; 115 116 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); 117 118 fp = fopen(filename, "r"); 119 if (fp == NULL) { 120 /* 121 * We raced with a task exiting - just return: 122 */ 123 pr_debug("couldn't open %s\n", filename); 124 return -1; 125 } 126 127 while (1) { 128 char bf[BUFSIZ], *pbf = bf; 129 event_t ev = { 130 .header = { 131 .type = PERF_RECORD_MMAP, 132 /* 133 * Just like the kernel, see __perf_event_mmap 134 * in kernel/perf_event.c 135 */ 136 .misc = PERF_RECORD_MISC_USER, 137 }, 138 }; 139 int n; 140 size_t size; 141 if (fgets(bf, sizeof(bf), fp) == NULL) 142 break; 143 144 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 145 n = hex2u64(pbf, &ev.mmap.start); 146 if (n < 0) 147 continue; 148 pbf += n + 1; 149 n = hex2u64(pbf, &ev.mmap.len); 150 if (n < 0) 151 continue; 152 pbf += n + 3; 153 if (*pbf == 'x') { /* vm_exec */ 154 u64 vm_pgoff; 155 char *execname = strchr(bf, '/'); 156 157 /* Catch VDSO */ 158 if (execname == NULL) 159 execname = strstr(bf, "[vdso]"); 160 161 if (execname == NULL) 162 continue; 163 164 pbf += 3; 165 n = hex2u64(pbf, &vm_pgoff); 166 /* pgoff is in bytes, not pages */ 167 if (n >= 0) 168 ev.mmap.pgoff = vm_pgoff << getpagesize(); 169 else 170 ev.mmap.pgoff = 0; 171 172 size = strlen(execname); 173 execname[size - 1] = '\0'; /* Remove \n */ 174 memcpy(ev.mmap.filename, execname, size); 175 size = ALIGN(size, sizeof(u64)); 176 ev.mmap.len -= ev.mmap.start; 177 ev.mmap.header.size = (sizeof(ev.mmap) - 178 (sizeof(ev.mmap.filename) - size)); 179 ev.mmap.pid = tgid; 180 ev.mmap.tid = pid; 181 182 process(&ev, session); 183 } 184 } 185 186 fclose(fp); 187 return 0; 188 } 189 190 int event__synthesize_modules(event__handler_t process, 191 struct perf_session *session, 192 struct machine *machine) 193 { 194 struct rb_node *nd; 195 struct map_groups *kmaps = &machine->kmaps; 196 u16 misc; 197 198 /* 199 * kernel uses 0 for user space maps, see kernel/perf_event.c 200 * __perf_event_mmap 201 */ 202 if (machine__is_host(machine)) 203 misc = PERF_RECORD_MISC_KERNEL; 204 else 205 misc = PERF_RECORD_MISC_GUEST_KERNEL; 206 207 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); 208 nd; nd = rb_next(nd)) { 209 event_t ev; 210 size_t size; 211 struct map *pos = rb_entry(nd, struct map, rb_node); 212 213 if (pos->dso->kernel) 214 continue; 215 216 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 217 memset(&ev, 0, sizeof(ev)); 218 ev.mmap.header.misc = misc; 219 ev.mmap.header.type = PERF_RECORD_MMAP; 220 ev.mmap.header.size = (sizeof(ev.mmap) - 221 (sizeof(ev.mmap.filename) - size)); 222 ev.mmap.start = pos->start; 223 ev.mmap.len = pos->end - pos->start; 224 ev.mmap.pid = machine->pid; 225 226 memcpy(ev.mmap.filename, pos->dso->long_name, 227 pos->dso->long_name_len + 1); 228 process(&ev, session); 229 } 230 231 return 0; 232 } 233 234 int event__synthesize_thread(pid_t pid, event__handler_t process, 235 struct perf_session *session) 236 { 237 pid_t tgid = event__synthesize_comm(pid, 1, process, session); 238 if (tgid == -1) 239 return -1; 240 return event__synthesize_mmap_events(pid, tgid, process, session); 241 } 242 243 void event__synthesize_threads(event__handler_t process, 244 struct perf_session *session) 245 { 246 DIR *proc; 247 struct dirent dirent, *next; 248 249 proc = opendir("/proc"); 250 251 while (!readdir_r(proc, &dirent, &next) && next) { 252 char *end; 253 pid_t pid = strtol(dirent.d_name, &end, 10); 254 255 if (*end) /* only interested in proper numerical dirents */ 256 continue; 257 258 event__synthesize_thread(pid, process, session); 259 } 260 261 closedir(proc); 262 } 263 264 struct process_symbol_args { 265 const char *name; 266 u64 start; 267 }; 268 269 static int find_symbol_cb(void *arg, const char *name, char type, u64 start) 270 { 271 struct process_symbol_args *args = arg; 272 273 /* 274 * Must be a function or at least an alias, as in PARISC64, where "_text" is 275 * an 'A' to the same address as "_stext". 276 */ 277 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 278 type == 'A') || strcmp(name, args->name)) 279 return 0; 280 281 args->start = start; 282 return 1; 283 } 284 285 int event__synthesize_kernel_mmap(event__handler_t process, 286 struct perf_session *session, 287 struct machine *machine, 288 const char *symbol_name) 289 { 290 size_t size; 291 const char *filename, *mmap_name; 292 char path[PATH_MAX]; 293 char name_buff[PATH_MAX]; 294 struct map *map; 295 296 event_t ev = { 297 .header = { 298 .type = PERF_RECORD_MMAP, 299 }, 300 }; 301 /* 302 * We should get this from /sys/kernel/sections/.text, but till that is 303 * available use this, and after it is use this as a fallback for older 304 * kernels. 305 */ 306 struct process_symbol_args args = { .name = symbol_name, }; 307 308 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 309 if (machine__is_host(machine)) { 310 /* 311 * kernel uses PERF_RECORD_MISC_USER for user space maps, 312 * see kernel/perf_event.c __perf_event_mmap 313 */ 314 ev.header.misc = PERF_RECORD_MISC_KERNEL; 315 filename = "/proc/kallsyms"; 316 } else { 317 ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 318 if (machine__is_default_guest(machine)) 319 filename = (char *) symbol_conf.default_guest_kallsyms; 320 else { 321 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 322 filename = path; 323 } 324 } 325 326 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) 327 return -ENOENT; 328 329 map = machine->vmlinux_maps[MAP__FUNCTION]; 330 size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename), 331 "%s%s", mmap_name, symbol_name) + 1; 332 size = ALIGN(size, sizeof(u64)); 333 ev.mmap.header.size = (sizeof(ev.mmap) - 334 (sizeof(ev.mmap.filename) - size)); 335 ev.mmap.pgoff = args.start; 336 ev.mmap.start = map->start; 337 ev.mmap.len = map->end - ev.mmap.start; 338 ev.mmap.pid = machine->pid; 339 340 return process(&ev, session); 341 } 342 343 static void thread__comm_adjust(struct thread *self) 344 { 345 char *comm = self->comm; 346 347 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 348 (!symbol_conf.comm_list || 349 strlist__has_entry(symbol_conf.comm_list, comm))) { 350 unsigned int slen = strlen(comm); 351 352 if (slen > comms__col_width) { 353 comms__col_width = slen; 354 threads__col_width = slen + 6; 355 } 356 } 357 } 358 359 static int thread__set_comm_adjust(struct thread *self, const char *comm) 360 { 361 int ret = thread__set_comm(self, comm); 362 363 if (ret) 364 return ret; 365 366 thread__comm_adjust(self); 367 368 return 0; 369 } 370 371 int event__process_comm(event_t *self, struct perf_session *session) 372 { 373 struct thread *thread = perf_session__findnew(session, self->comm.pid); 374 375 dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid); 376 377 if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) { 378 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 379 return -1; 380 } 381 382 return 0; 383 } 384 385 int event__process_lost(event_t *self, struct perf_session *session) 386 { 387 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); 388 session->hists.stats.total_lost += self->lost.lost; 389 return 0; 390 } 391 392 static void event_set_kernel_mmap_len(struct map **maps, event_t *self) 393 { 394 maps[MAP__FUNCTION]->start = self->mmap.start; 395 maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len; 396 /* 397 * Be a bit paranoid here, some perf.data file came with 398 * a zero sized synthesized MMAP event for the kernel. 399 */ 400 if (maps[MAP__FUNCTION]->end == 0) 401 maps[MAP__FUNCTION]->end = ~0UL; 402 } 403 404 static int event__process_kernel_mmap(event_t *self, 405 struct perf_session *session) 406 { 407 struct map *map; 408 char kmmap_prefix[PATH_MAX]; 409 struct machine *machine; 410 enum dso_kernel_type kernel_type; 411 bool is_kernel_mmap; 412 413 machine = perf_session__findnew_machine(session, self->mmap.pid); 414 if (!machine) { 415 pr_err("Can't find id %d's machine\n", self->mmap.pid); 416 goto out_problem; 417 } 418 419 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 420 if (machine__is_host(machine)) 421 kernel_type = DSO_TYPE_KERNEL; 422 else 423 kernel_type = DSO_TYPE_GUEST_KERNEL; 424 425 is_kernel_mmap = memcmp(self->mmap.filename, 426 kmmap_prefix, 427 strlen(kmmap_prefix)) == 0; 428 if (self->mmap.filename[0] == '/' || 429 (!is_kernel_mmap && self->mmap.filename[0] == '[')) { 430 431 char short_module_name[1024]; 432 char *name, *dot; 433 434 if (self->mmap.filename[0] == '/') { 435 name = strrchr(self->mmap.filename, '/'); 436 if (name == NULL) 437 goto out_problem; 438 439 ++name; /* skip / */ 440 dot = strrchr(name, '.'); 441 if (dot == NULL) 442 goto out_problem; 443 snprintf(short_module_name, sizeof(short_module_name), 444 "[%.*s]", (int)(dot - name), name); 445 strxfrchar(short_module_name, '-', '_'); 446 } else 447 strcpy(short_module_name, self->mmap.filename); 448 449 map = machine__new_module(machine, self->mmap.start, 450 self->mmap.filename); 451 if (map == NULL) 452 goto out_problem; 453 454 name = strdup(short_module_name); 455 if (name == NULL) 456 goto out_problem; 457 458 map->dso->short_name = name; 459 map->end = map->start + self->mmap.len; 460 } else if (is_kernel_mmap) { 461 const char *symbol_name = (self->mmap.filename + 462 strlen(kmmap_prefix)); 463 /* 464 * Should be there already, from the build-id table in 465 * the header. 466 */ 467 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, 468 kmmap_prefix); 469 if (kernel == NULL) 470 goto out_problem; 471 472 kernel->kernel = kernel_type; 473 if (__machine__create_kernel_maps(machine, kernel) < 0) 474 goto out_problem; 475 476 event_set_kernel_mmap_len(machine->vmlinux_maps, self); 477 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 478 symbol_name, 479 self->mmap.pgoff); 480 if (machine__is_default_guest(machine)) { 481 /* 482 * preload dso of guest kernel and modules 483 */ 484 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], 485 NULL); 486 } 487 } 488 return 0; 489 out_problem: 490 return -1; 491 } 492 493 int event__process_mmap(event_t *self, struct perf_session *session) 494 { 495 struct machine *machine; 496 struct thread *thread; 497 struct map *map; 498 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 499 int ret = 0; 500 501 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n", 502 self->mmap.pid, self->mmap.tid, self->mmap.start, 503 self->mmap.len, self->mmap.pgoff, self->mmap.filename); 504 505 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 506 cpumode == PERF_RECORD_MISC_KERNEL) { 507 ret = event__process_kernel_mmap(self, session); 508 if (ret < 0) 509 goto out_problem; 510 return 0; 511 } 512 513 machine = perf_session__find_host_machine(session); 514 if (machine == NULL) 515 goto out_problem; 516 thread = perf_session__findnew(session, self->mmap.pid); 517 map = map__new(&machine->user_dsos, self->mmap.start, 518 self->mmap.len, self->mmap.pgoff, 519 self->mmap.pid, self->mmap.filename, 520 MAP__FUNCTION, session->cwd, session->cwdlen); 521 522 if (thread == NULL || map == NULL) 523 goto out_problem; 524 525 thread__insert_map(thread, map); 526 return 0; 527 528 out_problem: 529 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 530 return 0; 531 } 532 533 int event__process_task(event_t *self, struct perf_session *session) 534 { 535 struct thread *thread = perf_session__findnew(session, self->fork.pid); 536 struct thread *parent = perf_session__findnew(session, self->fork.ppid); 537 538 dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, 539 self->fork.ppid, self->fork.ptid); 540 /* 541 * A thread clone will have the same PID for both parent and child. 542 */ 543 if (thread == parent) 544 return 0; 545 546 if (self->header.type == PERF_RECORD_EXIT) 547 return 0; 548 549 if (thread == NULL || parent == NULL || 550 thread__fork(thread, parent) < 0) { 551 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 552 return -1; 553 } 554 555 return 0; 556 } 557 558 void thread__find_addr_map(struct thread *self, 559 struct perf_session *session, u8 cpumode, 560 enum map_type type, pid_t pid, u64 addr, 561 struct addr_location *al) 562 { 563 struct map_groups *mg = &self->mg; 564 struct machine *machine = NULL; 565 566 al->thread = self; 567 al->addr = addr; 568 al->cpumode = cpumode; 569 al->filtered = false; 570 571 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 572 al->level = 'k'; 573 machine = perf_session__find_host_machine(session); 574 if (machine == NULL) { 575 al->map = NULL; 576 return; 577 } 578 mg = &machine->kmaps; 579 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 580 al->level = '.'; 581 machine = perf_session__find_host_machine(session); 582 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 583 al->level = 'g'; 584 machine = perf_session__find_machine(session, pid); 585 if (machine == NULL) { 586 al->map = NULL; 587 return; 588 } 589 mg = &machine->kmaps; 590 } else { 591 /* 592 * 'u' means guest os user space. 593 * TODO: We don't support guest user space. Might support late. 594 */ 595 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) 596 al->level = 'u'; 597 else 598 al->level = 'H'; 599 al->map = NULL; 600 601 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 602 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 603 !perf_guest) 604 al->filtered = true; 605 if ((cpumode == PERF_RECORD_MISC_USER || 606 cpumode == PERF_RECORD_MISC_KERNEL) && 607 !perf_host) 608 al->filtered = true; 609 610 return; 611 } 612 try_again: 613 al->map = map_groups__find(mg, type, al->addr); 614 if (al->map == NULL) { 615 /* 616 * If this is outside of all known maps, and is a negative 617 * address, try to look it up in the kernel dso, as it might be 618 * a vsyscall or vdso (which executes in user-mode). 619 * 620 * XXX This is nasty, we should have a symbol list in the 621 * "[vdso]" dso, but for now lets use the old trick of looking 622 * in the whole kernel symbol list. 623 */ 624 if ((long long)al->addr < 0 && 625 cpumode == PERF_RECORD_MISC_KERNEL && 626 machine && mg != &machine->kmaps) { 627 mg = &machine->kmaps; 628 goto try_again; 629 } 630 } else 631 al->addr = al->map->map_ip(al->map, al->addr); 632 } 633 634 void thread__find_addr_location(struct thread *self, 635 struct perf_session *session, u8 cpumode, 636 enum map_type type, pid_t pid, u64 addr, 637 struct addr_location *al, 638 symbol_filter_t filter) 639 { 640 thread__find_addr_map(self, session, cpumode, type, pid, addr, al); 641 if (al->map != NULL) 642 al->sym = map__find_symbol(al->map, al->addr, filter); 643 else 644 al->sym = NULL; 645 } 646 647 static void dso__calc_col_width(struct dso *self) 648 { 649 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 650 (!symbol_conf.dso_list || 651 strlist__has_entry(symbol_conf.dso_list, self->name))) { 652 u16 slen = self->short_name_len; 653 if (verbose) 654 slen = self->long_name_len; 655 if (dsos__col_width < slen) 656 dsos__col_width = slen; 657 } 658 659 self->slen_calculated = 1; 660 } 661 662 int event__preprocess_sample(const event_t *self, struct perf_session *session, 663 struct addr_location *al, symbol_filter_t filter) 664 { 665 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 666 struct thread *thread = perf_session__findnew(session, self->ip.pid); 667 668 if (thread == NULL) 669 return -1; 670 671 if (symbol_conf.comm_list && 672 !strlist__has_entry(symbol_conf.comm_list, thread->comm)) 673 goto out_filtered; 674 675 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 676 /* 677 * Have we already created the kernel maps for the host machine? 678 * 679 * This should have happened earlier, when we processed the kernel MMAP 680 * events, but for older perf.data files there was no such thing, so do 681 * it now. 682 */ 683 if (cpumode == PERF_RECORD_MISC_KERNEL && 684 session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL) 685 machine__create_kernel_maps(&session->host_machine); 686 687 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, 688 self->ip.pid, self->ip.ip, al); 689 dump_printf(" ...... dso: %s\n", 690 al->map ? al->map->dso->long_name : 691 al->level == 'H' ? "[hypervisor]" : "<not found>"); 692 al->sym = NULL; 693 694 if (al->map) { 695 if (symbol_conf.dso_list && 696 (!al->map || !al->map->dso || 697 !(strlist__has_entry(symbol_conf.dso_list, 698 al->map->dso->short_name) || 699 (al->map->dso->short_name != al->map->dso->long_name && 700 strlist__has_entry(symbol_conf.dso_list, 701 al->map->dso->long_name))))) 702 goto out_filtered; 703 /* 704 * We have to do this here as we may have a dso with no symbol 705 * hit that has a name longer than the ones with symbols 706 * sampled. 707 */ 708 if (!sort_dso.elide && !al->map->dso->slen_calculated) 709 dso__calc_col_width(al->map->dso); 710 711 al->sym = map__find_symbol(al->map, al->addr, filter); 712 } else { 713 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 714 715 if (dsos__col_width < unresolved_col_width && 716 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 717 !symbol_conf.dso_list) 718 dsos__col_width = unresolved_col_width; 719 } 720 721 if (symbol_conf.sym_list && al->sym && 722 !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) 723 goto out_filtered; 724 725 return 0; 726 727 out_filtered: 728 al->filtered = true; 729 return 0; 730 } 731 732 int event__parse_sample(event_t *event, u64 type, struct sample_data *data) 733 { 734 u64 *array = event->sample.array; 735 736 if (type & PERF_SAMPLE_IP) { 737 data->ip = event->ip.ip; 738 array++; 739 } 740 741 if (type & PERF_SAMPLE_TID) { 742 u32 *p = (u32 *)array; 743 data->pid = p[0]; 744 data->tid = p[1]; 745 array++; 746 } 747 748 if (type & PERF_SAMPLE_TIME) { 749 data->time = *array; 750 array++; 751 } 752 753 if (type & PERF_SAMPLE_ADDR) { 754 data->addr = *array; 755 array++; 756 } 757 758 data->id = -1ULL; 759 if (type & PERF_SAMPLE_ID) { 760 data->id = *array; 761 array++; 762 } 763 764 if (type & PERF_SAMPLE_STREAM_ID) { 765 data->stream_id = *array; 766 array++; 767 } 768 769 if (type & PERF_SAMPLE_CPU) { 770 u32 *p = (u32 *)array; 771 data->cpu = *p; 772 array++; 773 } 774 775 if (type & PERF_SAMPLE_PERIOD) { 776 data->period = *array; 777 array++; 778 } 779 780 if (type & PERF_SAMPLE_READ) { 781 pr_debug("PERF_SAMPLE_READ is unsuported for now\n"); 782 return -1; 783 } 784 785 if (type & PERF_SAMPLE_CALLCHAIN) { 786 data->callchain = (struct ip_callchain *)array; 787 array += 1 + data->callchain->nr; 788 } 789 790 if (type & PERF_SAMPLE_RAW) { 791 u32 *p = (u32 *)array; 792 data->raw_size = *p; 793 p++; 794 data->raw_data = p; 795 } 796 797 return 0; 798 } 799