1 #include <linux/types.h> 2 #include "event.h" 3 #include "debug.h" 4 #include "session.h" 5 #include "sort.h" 6 #include "string.h" 7 #include "strlist.h" 8 #include "thread.h" 9 10 const char *event__name[] = { 11 [0] = "TOTAL", 12 [PERF_RECORD_MMAP] = "MMAP", 13 [PERF_RECORD_LOST] = "LOST", 14 [PERF_RECORD_COMM] = "COMM", 15 [PERF_RECORD_EXIT] = "EXIT", 16 [PERF_RECORD_THROTTLE] = "THROTTLE", 17 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 18 [PERF_RECORD_FORK] = "FORK", 19 [PERF_RECORD_READ] = "READ", 20 [PERF_RECORD_SAMPLE] = "SAMPLE", 21 [PERF_RECORD_HEADER_ATTR] = "ATTR", 22 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 23 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 24 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 25 }; 26 27 static pid_t event__synthesize_comm(pid_t pid, int full, 28 event__handler_t process, 29 struct perf_session *session) 30 { 31 event_t ev; 32 char filename[PATH_MAX]; 33 char bf[BUFSIZ]; 34 FILE *fp; 35 size_t size = 0; 36 DIR *tasks; 37 struct dirent dirent, *next; 38 pid_t tgid = 0; 39 40 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 41 42 fp = fopen(filename, "r"); 43 if (fp == NULL) { 44 out_race: 45 /* 46 * We raced with a task exiting - just return: 47 */ 48 pr_debug("couldn't open %s\n", filename); 49 return 0; 50 } 51 52 memset(&ev.comm, 0, sizeof(ev.comm)); 53 while (!ev.comm.comm[0] || !ev.comm.pid) { 54 if (fgets(bf, sizeof(bf), fp) == NULL) 55 goto out_failure; 56 57 if (memcmp(bf, "Name:", 5) == 0) { 58 char *name = bf + 5; 59 while (*name && isspace(*name)) 60 ++name; 61 size = strlen(name) - 1; 62 memcpy(ev.comm.comm, name, size++); 63 } else if (memcmp(bf, "Tgid:", 5) == 0) { 64 char *tgids = bf + 5; 65 while (*tgids && isspace(*tgids)) 66 ++tgids; 67 tgid = ev.comm.pid = atoi(tgids); 68 } 69 } 70 71 ev.comm.header.type = PERF_RECORD_COMM; 72 size = ALIGN(size, sizeof(u64)); 73 ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size); 74 75 if (!full) { 76 ev.comm.tid = pid; 77 78 process(&ev, session); 79 goto out_fclose; 80 } 81 82 snprintf(filename, sizeof(filename), "/proc/%d/task", pid); 83 84 tasks = opendir(filename); 85 if (tasks == NULL) 86 goto out_race; 87 88 while (!readdir_r(tasks, &dirent, &next) && next) { 89 char *end; 90 pid = strtol(dirent.d_name, &end, 10); 91 if (*end) 92 continue; 93 94 ev.comm.tid = pid; 95 96 process(&ev, session); 97 } 98 closedir(tasks); 99 100 out_fclose: 101 fclose(fp); 102 return tgid; 103 104 out_failure: 105 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); 106 return -1; 107 } 108 109 static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, 110 event__handler_t process, 111 struct perf_session *session) 112 { 113 char filename[PATH_MAX]; 114 FILE *fp; 115 116 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); 117 118 fp = fopen(filename, "r"); 119 if (fp == NULL) { 120 /* 121 * We raced with a task exiting - just return: 122 */ 123 pr_debug("couldn't open %s\n", filename); 124 return -1; 125 } 126 127 while (1) { 128 char bf[BUFSIZ], *pbf = bf; 129 event_t ev = { 130 .header = { 131 .type = PERF_RECORD_MMAP, 132 /* 133 * Just like the kernel, see __perf_event_mmap 134 * in kernel/perf_event.c 135 */ 136 .misc = PERF_RECORD_MISC_USER, 137 }, 138 }; 139 int n; 140 size_t size; 141 if (fgets(bf, sizeof(bf), fp) == NULL) 142 break; 143 144 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 145 n = hex2u64(pbf, &ev.mmap.start); 146 if (n < 0) 147 continue; 148 pbf += n + 1; 149 n = hex2u64(pbf, &ev.mmap.len); 150 if (n < 0) 151 continue; 152 pbf += n + 3; 153 if (*pbf == 'x') { /* vm_exec */ 154 u64 vm_pgoff; 155 char *execname = strchr(bf, '/'); 156 157 /* Catch VDSO */ 158 if (execname == NULL) 159 execname = strstr(bf, "[vdso]"); 160 161 if (execname == NULL) 162 continue; 163 164 pbf += 3; 165 n = hex2u64(pbf, &vm_pgoff); 166 /* pgoff is in bytes, not pages */ 167 if (n >= 0) 168 ev.mmap.pgoff = vm_pgoff << getpagesize(); 169 else 170 ev.mmap.pgoff = 0; 171 172 size = strlen(execname); 173 execname[size - 1] = '\0'; /* Remove \n */ 174 memcpy(ev.mmap.filename, execname, size); 175 size = ALIGN(size, sizeof(u64)); 176 ev.mmap.len -= ev.mmap.start; 177 ev.mmap.header.size = (sizeof(ev.mmap) - 178 (sizeof(ev.mmap.filename) - size)); 179 ev.mmap.pid = tgid; 180 ev.mmap.tid = pid; 181 182 process(&ev, session); 183 } 184 } 185 186 fclose(fp); 187 return 0; 188 } 189 190 int event__synthesize_modules(event__handler_t process, 191 struct perf_session *session, 192 struct machine *machine) 193 { 194 struct rb_node *nd; 195 struct map_groups *kmaps = &machine->kmaps; 196 u16 misc; 197 198 /* 199 * kernel uses 0 for user space maps, see kernel/perf_event.c 200 * __perf_event_mmap 201 */ 202 if (machine__is_host(machine)) 203 misc = PERF_RECORD_MISC_KERNEL; 204 else 205 misc = PERF_RECORD_MISC_GUEST_KERNEL; 206 207 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); 208 nd; nd = rb_next(nd)) { 209 event_t ev; 210 size_t size; 211 struct map *pos = rb_entry(nd, struct map, rb_node); 212 213 if (pos->dso->kernel) 214 continue; 215 216 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 217 memset(&ev, 0, sizeof(ev)); 218 ev.mmap.header.misc = misc; 219 ev.mmap.header.type = PERF_RECORD_MMAP; 220 ev.mmap.header.size = (sizeof(ev.mmap) - 221 (sizeof(ev.mmap.filename) - size)); 222 ev.mmap.start = pos->start; 223 ev.mmap.len = pos->end - pos->start; 224 ev.mmap.pid = machine->pid; 225 226 memcpy(ev.mmap.filename, pos->dso->long_name, 227 pos->dso->long_name_len + 1); 228 process(&ev, session); 229 } 230 231 return 0; 232 } 233 234 int event__synthesize_thread(pid_t pid, event__handler_t process, 235 struct perf_session *session) 236 { 237 pid_t tgid = event__synthesize_comm(pid, 1, process, session); 238 if (tgid == -1) 239 return -1; 240 return event__synthesize_mmap_events(pid, tgid, process, session); 241 } 242 243 void event__synthesize_threads(event__handler_t process, 244 struct perf_session *session) 245 { 246 DIR *proc; 247 struct dirent dirent, *next; 248 249 proc = opendir("/proc"); 250 251 while (!readdir_r(proc, &dirent, &next) && next) { 252 char *end; 253 pid_t pid = strtol(dirent.d_name, &end, 10); 254 255 if (*end) /* only interested in proper numerical dirents */ 256 continue; 257 258 event__synthesize_thread(pid, process, session); 259 } 260 261 closedir(proc); 262 } 263 264 struct process_symbol_args { 265 const char *name; 266 u64 start; 267 }; 268 269 static int find_symbol_cb(void *arg, const char *name, char type, u64 start) 270 { 271 struct process_symbol_args *args = arg; 272 273 /* 274 * Must be a function or at least an alias, as in PARISC64, where "_text" is 275 * an 'A' to the same address as "_stext". 276 */ 277 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 278 type == 'A') || strcmp(name, args->name)) 279 return 0; 280 281 args->start = start; 282 return 1; 283 } 284 285 int event__synthesize_kernel_mmap(event__handler_t process, 286 struct perf_session *session, 287 struct machine *machine, 288 const char *symbol_name) 289 { 290 size_t size; 291 const char *filename, *mmap_name; 292 char path[PATH_MAX]; 293 char name_buff[PATH_MAX]; 294 struct map *map; 295 296 event_t ev = { 297 .header = { 298 .type = PERF_RECORD_MMAP, 299 }, 300 }; 301 /* 302 * We should get this from /sys/kernel/sections/.text, but till that is 303 * available use this, and after it is use this as a fallback for older 304 * kernels. 305 */ 306 struct process_symbol_args args = { .name = symbol_name, }; 307 308 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 309 if (machine__is_host(machine)) { 310 /* 311 * kernel uses PERF_RECORD_MISC_USER for user space maps, 312 * see kernel/perf_event.c __perf_event_mmap 313 */ 314 ev.header.misc = PERF_RECORD_MISC_KERNEL; 315 filename = "/proc/kallsyms"; 316 } else { 317 ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 318 if (machine__is_default_guest(machine)) 319 filename = (char *) symbol_conf.default_guest_kallsyms; 320 else { 321 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 322 filename = path; 323 } 324 } 325 326 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) 327 return -ENOENT; 328 329 map = machine->vmlinux_maps[MAP__FUNCTION]; 330 size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename), 331 "%s%s", mmap_name, symbol_name) + 1; 332 size = ALIGN(size, sizeof(u64)); 333 ev.mmap.header.size = (sizeof(ev.mmap) - 334 (sizeof(ev.mmap.filename) - size)); 335 ev.mmap.pgoff = args.start; 336 ev.mmap.start = map->start; 337 ev.mmap.len = map->end - ev.mmap.start; 338 ev.mmap.pid = machine->pid; 339 340 return process(&ev, session); 341 } 342 343 static void thread__comm_adjust(struct thread *self) 344 { 345 char *comm = self->comm; 346 347 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 348 (!symbol_conf.comm_list || 349 strlist__has_entry(symbol_conf.comm_list, comm))) { 350 unsigned int slen = strlen(comm); 351 352 if (slen > comms__col_width) { 353 comms__col_width = slen; 354 threads__col_width = slen + 6; 355 } 356 } 357 } 358 359 static int thread__set_comm_adjust(struct thread *self, const char *comm) 360 { 361 int ret = thread__set_comm(self, comm); 362 363 if (ret) 364 return ret; 365 366 thread__comm_adjust(self); 367 368 return 0; 369 } 370 371 int event__process_comm(event_t *self, struct perf_session *session) 372 { 373 struct thread *thread = perf_session__findnew(session, self->comm.tid); 374 375 dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid); 376 377 if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) { 378 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 379 return -1; 380 } 381 382 return 0; 383 } 384 385 int event__process_lost(event_t *self, struct perf_session *session) 386 { 387 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); 388 session->hists.stats.total_lost += self->lost.lost; 389 return 0; 390 } 391 392 static void event_set_kernel_mmap_len(struct map **maps, event_t *self) 393 { 394 maps[MAP__FUNCTION]->start = self->mmap.start; 395 maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len; 396 /* 397 * Be a bit paranoid here, some perf.data file came with 398 * a zero sized synthesized MMAP event for the kernel. 399 */ 400 if (maps[MAP__FUNCTION]->end == 0) 401 maps[MAP__FUNCTION]->end = ~0UL; 402 } 403 404 static int event__process_kernel_mmap(event_t *self, 405 struct perf_session *session) 406 { 407 struct map *map; 408 char kmmap_prefix[PATH_MAX]; 409 struct machine *machine; 410 enum dso_kernel_type kernel_type; 411 bool is_kernel_mmap; 412 413 machine = perf_session__findnew_machine(session, self->mmap.pid); 414 if (!machine) { 415 pr_err("Can't find id %d's machine\n", self->mmap.pid); 416 goto out_problem; 417 } 418 419 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 420 if (machine__is_host(machine)) 421 kernel_type = DSO_TYPE_KERNEL; 422 else 423 kernel_type = DSO_TYPE_GUEST_KERNEL; 424 425 is_kernel_mmap = memcmp(self->mmap.filename, 426 kmmap_prefix, 427 strlen(kmmap_prefix)) == 0; 428 if (self->mmap.filename[0] == '/' || 429 (!is_kernel_mmap && self->mmap.filename[0] == '[')) { 430 431 char short_module_name[1024]; 432 char *name, *dot; 433 434 if (self->mmap.filename[0] == '/') { 435 name = strrchr(self->mmap.filename, '/'); 436 if (name == NULL) 437 goto out_problem; 438 439 ++name; /* skip / */ 440 dot = strrchr(name, '.'); 441 if (dot == NULL) 442 goto out_problem; 443 snprintf(short_module_name, sizeof(short_module_name), 444 "[%.*s]", (int)(dot - name), name); 445 strxfrchar(short_module_name, '-', '_'); 446 } else 447 strcpy(short_module_name, self->mmap.filename); 448 449 map = machine__new_module(machine, self->mmap.start, 450 self->mmap.filename); 451 if (map == NULL) 452 goto out_problem; 453 454 name = strdup(short_module_name); 455 if (name == NULL) 456 goto out_problem; 457 458 map->dso->short_name = name; 459 map->end = map->start + self->mmap.len; 460 } else if (is_kernel_mmap) { 461 const char *symbol_name = (self->mmap.filename + 462 strlen(kmmap_prefix)); 463 /* 464 * Should be there already, from the build-id table in 465 * the header. 466 */ 467 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, 468 kmmap_prefix); 469 if (kernel == NULL) 470 goto out_problem; 471 472 kernel->kernel = kernel_type; 473 if (__machine__create_kernel_maps(machine, kernel) < 0) 474 goto out_problem; 475 476 event_set_kernel_mmap_len(machine->vmlinux_maps, self); 477 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 478 symbol_name, 479 self->mmap.pgoff); 480 if (machine__is_default_guest(machine)) { 481 /* 482 * preload dso of guest kernel and modules 483 */ 484 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], 485 NULL); 486 } 487 } 488 return 0; 489 out_problem: 490 return -1; 491 } 492 493 int event__process_mmap(event_t *self, struct perf_session *session) 494 { 495 struct machine *machine; 496 struct thread *thread; 497 struct map *map; 498 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 499 int ret = 0; 500 501 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n", 502 self->mmap.pid, self->mmap.tid, self->mmap.start, 503 self->mmap.len, self->mmap.pgoff, self->mmap.filename); 504 505 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 506 cpumode == PERF_RECORD_MISC_KERNEL) { 507 ret = event__process_kernel_mmap(self, session); 508 if (ret < 0) 509 goto out_problem; 510 return 0; 511 } 512 513 machine = perf_session__find_host_machine(session); 514 if (machine == NULL) 515 goto out_problem; 516 thread = perf_session__findnew(session, self->mmap.pid); 517 map = map__new(&machine->user_dsos, self->mmap.start, 518 self->mmap.len, self->mmap.pgoff, 519 self->mmap.pid, self->mmap.filename, 520 MAP__FUNCTION, session->cwd, session->cwdlen); 521 522 if (thread == NULL || map == NULL) 523 goto out_problem; 524 525 thread__insert_map(thread, map); 526 return 0; 527 528 out_problem: 529 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 530 return 0; 531 } 532 533 int event__process_task(event_t *self, struct perf_session *session) 534 { 535 struct thread *thread = perf_session__findnew(session, self->fork.tid); 536 struct thread *parent = perf_session__findnew(session, self->fork.ptid); 537 538 dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, 539 self->fork.ppid, self->fork.ptid); 540 541 if (self->header.type == PERF_RECORD_EXIT) 542 return 0; 543 544 if (thread == NULL || parent == NULL || 545 thread__fork(thread, parent) < 0) { 546 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 547 return -1; 548 } 549 550 return 0; 551 } 552 553 void thread__find_addr_map(struct thread *self, 554 struct perf_session *session, u8 cpumode, 555 enum map_type type, pid_t pid, u64 addr, 556 struct addr_location *al) 557 { 558 struct map_groups *mg = &self->mg; 559 struct machine *machine = NULL; 560 561 al->thread = self; 562 al->addr = addr; 563 al->cpumode = cpumode; 564 al->filtered = false; 565 566 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 567 al->level = 'k'; 568 machine = perf_session__find_host_machine(session); 569 if (machine == NULL) { 570 al->map = NULL; 571 return; 572 } 573 mg = &machine->kmaps; 574 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 575 al->level = '.'; 576 machine = perf_session__find_host_machine(session); 577 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 578 al->level = 'g'; 579 machine = perf_session__find_machine(session, pid); 580 if (machine == NULL) { 581 al->map = NULL; 582 return; 583 } 584 mg = &machine->kmaps; 585 } else { 586 /* 587 * 'u' means guest os user space. 588 * TODO: We don't support guest user space. Might support late. 589 */ 590 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) 591 al->level = 'u'; 592 else 593 al->level = 'H'; 594 al->map = NULL; 595 596 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 597 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 598 !perf_guest) 599 al->filtered = true; 600 if ((cpumode == PERF_RECORD_MISC_USER || 601 cpumode == PERF_RECORD_MISC_KERNEL) && 602 !perf_host) 603 al->filtered = true; 604 605 return; 606 } 607 try_again: 608 al->map = map_groups__find(mg, type, al->addr); 609 if (al->map == NULL) { 610 /* 611 * If this is outside of all known maps, and is a negative 612 * address, try to look it up in the kernel dso, as it might be 613 * a vsyscall or vdso (which executes in user-mode). 614 * 615 * XXX This is nasty, we should have a symbol list in the 616 * "[vdso]" dso, but for now lets use the old trick of looking 617 * in the whole kernel symbol list. 618 */ 619 if ((long long)al->addr < 0 && 620 cpumode == PERF_RECORD_MISC_KERNEL && 621 machine && mg != &machine->kmaps) { 622 mg = &machine->kmaps; 623 goto try_again; 624 } 625 } else 626 al->addr = al->map->map_ip(al->map, al->addr); 627 } 628 629 void thread__find_addr_location(struct thread *self, 630 struct perf_session *session, u8 cpumode, 631 enum map_type type, pid_t pid, u64 addr, 632 struct addr_location *al, 633 symbol_filter_t filter) 634 { 635 thread__find_addr_map(self, session, cpumode, type, pid, addr, al); 636 if (al->map != NULL) 637 al->sym = map__find_symbol(al->map, al->addr, filter); 638 else 639 al->sym = NULL; 640 } 641 642 static void dso__calc_col_width(struct dso *self) 643 { 644 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 645 (!symbol_conf.dso_list || 646 strlist__has_entry(symbol_conf.dso_list, self->name))) { 647 u16 slen = self->short_name_len; 648 if (verbose) 649 slen = self->long_name_len; 650 if (dsos__col_width < slen) 651 dsos__col_width = slen; 652 } 653 654 self->slen_calculated = 1; 655 } 656 657 int event__preprocess_sample(const event_t *self, struct perf_session *session, 658 struct addr_location *al, symbol_filter_t filter) 659 { 660 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 661 struct thread *thread = perf_session__findnew(session, self->ip.pid); 662 663 if (thread == NULL) 664 return -1; 665 666 if (symbol_conf.comm_list && 667 !strlist__has_entry(symbol_conf.comm_list, thread->comm)) 668 goto out_filtered; 669 670 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 671 /* 672 * Have we already created the kernel maps for the host machine? 673 * 674 * This should have happened earlier, when we processed the kernel MMAP 675 * events, but for older perf.data files there was no such thing, so do 676 * it now. 677 */ 678 if (cpumode == PERF_RECORD_MISC_KERNEL && 679 session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL) 680 machine__create_kernel_maps(&session->host_machine); 681 682 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, 683 self->ip.pid, self->ip.ip, al); 684 dump_printf(" ...... dso: %s\n", 685 al->map ? al->map->dso->long_name : 686 al->level == 'H' ? "[hypervisor]" : "<not found>"); 687 al->sym = NULL; 688 689 if (al->map) { 690 if (symbol_conf.dso_list && 691 (!al->map || !al->map->dso || 692 !(strlist__has_entry(symbol_conf.dso_list, 693 al->map->dso->short_name) || 694 (al->map->dso->short_name != al->map->dso->long_name && 695 strlist__has_entry(symbol_conf.dso_list, 696 al->map->dso->long_name))))) 697 goto out_filtered; 698 /* 699 * We have to do this here as we may have a dso with no symbol 700 * hit that has a name longer than the ones with symbols 701 * sampled. 702 */ 703 if (!sort_dso.elide && !al->map->dso->slen_calculated) 704 dso__calc_col_width(al->map->dso); 705 706 al->sym = map__find_symbol(al->map, al->addr, filter); 707 } else { 708 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 709 710 if (dsos__col_width < unresolved_col_width && 711 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 712 !symbol_conf.dso_list) 713 dsos__col_width = unresolved_col_width; 714 } 715 716 if (symbol_conf.sym_list && al->sym && 717 !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) 718 goto out_filtered; 719 720 return 0; 721 722 out_filtered: 723 al->filtered = true; 724 return 0; 725 } 726 727 int event__parse_sample(event_t *event, u64 type, struct sample_data *data) 728 { 729 u64 *array = event->sample.array; 730 731 if (type & PERF_SAMPLE_IP) { 732 data->ip = event->ip.ip; 733 array++; 734 } 735 736 if (type & PERF_SAMPLE_TID) { 737 u32 *p = (u32 *)array; 738 data->pid = p[0]; 739 data->tid = p[1]; 740 array++; 741 } 742 743 if (type & PERF_SAMPLE_TIME) { 744 data->time = *array; 745 array++; 746 } 747 748 if (type & PERF_SAMPLE_ADDR) { 749 data->addr = *array; 750 array++; 751 } 752 753 data->id = -1ULL; 754 if (type & PERF_SAMPLE_ID) { 755 data->id = *array; 756 array++; 757 } 758 759 if (type & PERF_SAMPLE_STREAM_ID) { 760 data->stream_id = *array; 761 array++; 762 } 763 764 if (type & PERF_SAMPLE_CPU) { 765 u32 *p = (u32 *)array; 766 data->cpu = *p; 767 array++; 768 } 769 770 if (type & PERF_SAMPLE_PERIOD) { 771 data->period = *array; 772 array++; 773 } 774 775 if (type & PERF_SAMPLE_READ) { 776 pr_debug("PERF_SAMPLE_READ is unsuported for now\n"); 777 return -1; 778 } 779 780 if (type & PERF_SAMPLE_CALLCHAIN) { 781 data->callchain = (struct ip_callchain *)array; 782 array += 1 + data->callchain->nr; 783 } 784 785 if (type & PERF_SAMPLE_RAW) { 786 u32 *p = (u32 *)array; 787 data->raw_size = *p; 788 p++; 789 data->raw_data = p; 790 } 791 792 return 0; 793 } 794