1 #include <linux/types.h> 2 #include "event.h" 3 #include "debug.h" 4 #include "session.h" 5 #include "sort.h" 6 #include "string.h" 7 #include "strlist.h" 8 #include "thread.h" 9 10 const char *event__name[] = { 11 [0] = "TOTAL", 12 [PERF_RECORD_MMAP] = "MMAP", 13 [PERF_RECORD_LOST] = "LOST", 14 [PERF_RECORD_COMM] = "COMM", 15 [PERF_RECORD_EXIT] = "EXIT", 16 [PERF_RECORD_THROTTLE] = "THROTTLE", 17 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 18 [PERF_RECORD_FORK] = "FORK", 19 [PERF_RECORD_READ] = "READ", 20 [PERF_RECORD_SAMPLE] = "SAMPLE", 21 [PERF_RECORD_HEADER_ATTR] = "ATTR", 22 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 23 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 24 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 25 }; 26 27 static pid_t event__synthesize_comm(pid_t pid, int full, 28 event__handler_t process, 29 struct perf_session *session) 30 { 31 event_t ev; 32 char filename[PATH_MAX]; 33 char bf[BUFSIZ]; 34 FILE *fp; 35 size_t size = 0; 36 DIR *tasks; 37 struct dirent dirent, *next; 38 pid_t tgid = 0; 39 40 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 41 42 fp = fopen(filename, "r"); 43 if (fp == NULL) { 44 out_race: 45 /* 46 * We raced with a task exiting - just return: 47 */ 48 pr_debug("couldn't open %s\n", filename); 49 return 0; 50 } 51 52 memset(&ev.comm, 0, sizeof(ev.comm)); 53 while (!ev.comm.comm[0] || !ev.comm.pid) { 54 if (fgets(bf, sizeof(bf), fp) == NULL) 55 goto out_failure; 56 57 if (memcmp(bf, "Name:", 5) == 0) { 58 char *name = bf + 5; 59 while (*name && isspace(*name)) 60 ++name; 61 size = strlen(name) - 1; 62 memcpy(ev.comm.comm, name, size++); 63 } else if (memcmp(bf, "Tgid:", 5) == 0) { 64 char *tgids = bf + 5; 65 while (*tgids && isspace(*tgids)) 66 ++tgids; 67 tgid = ev.comm.pid = atoi(tgids); 68 } 69 } 70 71 ev.comm.header.type = PERF_RECORD_COMM; 72 size = ALIGN(size, sizeof(u64)); 73 ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size); 74 75 if (!full) { 76 ev.comm.tid = pid; 77 78 process(&ev, session); 79 goto out_fclose; 80 } 81 82 snprintf(filename, sizeof(filename), "/proc/%d/task", pid); 83 84 tasks = opendir(filename); 85 if (tasks == NULL) 86 goto out_race; 87 88 while (!readdir_r(tasks, &dirent, &next) && next) { 89 char *end; 90 pid = strtol(dirent.d_name, &end, 10); 91 if (*end) 92 continue; 93 94 ev.comm.tid = pid; 95 96 process(&ev, session); 97 } 98 closedir(tasks); 99 100 out_fclose: 101 fclose(fp); 102 return tgid; 103 104 out_failure: 105 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); 106 return -1; 107 } 108 109 static int event__synthesize_mmap_events(pid_t pid, pid_t tgid, 110 event__handler_t process, 111 struct perf_session *session) 112 { 113 char filename[PATH_MAX]; 114 FILE *fp; 115 116 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); 117 118 fp = fopen(filename, "r"); 119 if (fp == NULL) { 120 /* 121 * We raced with a task exiting - just return: 122 */ 123 pr_debug("couldn't open %s\n", filename); 124 return -1; 125 } 126 127 while (1) { 128 char bf[BUFSIZ], *pbf = bf; 129 event_t ev = { 130 .header = { 131 .type = PERF_RECORD_MMAP, 132 /* 133 * Just like the kernel, see __perf_event_mmap 134 * in kernel/perf_event.c 135 */ 136 .misc = PERF_RECORD_MISC_USER, 137 }, 138 }; 139 int n; 140 size_t size; 141 if (fgets(bf, sizeof(bf), fp) == NULL) 142 break; 143 144 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 145 n = hex2u64(pbf, &ev.mmap.start); 146 if (n < 0) 147 continue; 148 pbf += n + 1; 149 n = hex2u64(pbf, &ev.mmap.len); 150 if (n < 0) 151 continue; 152 pbf += n + 3; 153 if (*pbf == 'x') { /* vm_exec */ 154 char *execname = strchr(bf, '/'); 155 156 /* Catch VDSO */ 157 if (execname == NULL) 158 execname = strstr(bf, "[vdso]"); 159 160 if (execname == NULL) 161 continue; 162 163 pbf += 3; 164 n = hex2u64(pbf, &ev.mmap.pgoff); 165 166 size = strlen(execname); 167 execname[size - 1] = '\0'; /* Remove \n */ 168 memcpy(ev.mmap.filename, execname, size); 169 size = ALIGN(size, sizeof(u64)); 170 ev.mmap.len -= ev.mmap.start; 171 ev.mmap.header.size = (sizeof(ev.mmap) - 172 (sizeof(ev.mmap.filename) - size)); 173 ev.mmap.pid = tgid; 174 ev.mmap.tid = pid; 175 176 process(&ev, session); 177 } 178 } 179 180 fclose(fp); 181 return 0; 182 } 183 184 int event__synthesize_modules(event__handler_t process, 185 struct perf_session *session, 186 struct machine *machine) 187 { 188 struct rb_node *nd; 189 struct map_groups *kmaps = &machine->kmaps; 190 u16 misc; 191 192 /* 193 * kernel uses 0 for user space maps, see kernel/perf_event.c 194 * __perf_event_mmap 195 */ 196 if (machine__is_host(machine)) 197 misc = PERF_RECORD_MISC_KERNEL; 198 else 199 misc = PERF_RECORD_MISC_GUEST_KERNEL; 200 201 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); 202 nd; nd = rb_next(nd)) { 203 event_t ev; 204 size_t size; 205 struct map *pos = rb_entry(nd, struct map, rb_node); 206 207 if (pos->dso->kernel) 208 continue; 209 210 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 211 memset(&ev, 0, sizeof(ev)); 212 ev.mmap.header.misc = misc; 213 ev.mmap.header.type = PERF_RECORD_MMAP; 214 ev.mmap.header.size = (sizeof(ev.mmap) - 215 (sizeof(ev.mmap.filename) - size)); 216 ev.mmap.start = pos->start; 217 ev.mmap.len = pos->end - pos->start; 218 ev.mmap.pid = machine->pid; 219 220 memcpy(ev.mmap.filename, pos->dso->long_name, 221 pos->dso->long_name_len + 1); 222 process(&ev, session); 223 } 224 225 return 0; 226 } 227 228 int event__synthesize_thread(pid_t pid, event__handler_t process, 229 struct perf_session *session) 230 { 231 pid_t tgid = event__synthesize_comm(pid, 1, process, session); 232 if (tgid == -1) 233 return -1; 234 return event__synthesize_mmap_events(pid, tgid, process, session); 235 } 236 237 void event__synthesize_threads(event__handler_t process, 238 struct perf_session *session) 239 { 240 DIR *proc; 241 struct dirent dirent, *next; 242 243 proc = opendir("/proc"); 244 245 while (!readdir_r(proc, &dirent, &next) && next) { 246 char *end; 247 pid_t pid = strtol(dirent.d_name, &end, 10); 248 249 if (*end) /* only interested in proper numerical dirents */ 250 continue; 251 252 event__synthesize_thread(pid, process, session); 253 } 254 255 closedir(proc); 256 } 257 258 struct process_symbol_args { 259 const char *name; 260 u64 start; 261 }; 262 263 static int find_symbol_cb(void *arg, const char *name, char type, u64 start) 264 { 265 struct process_symbol_args *args = arg; 266 267 /* 268 * Must be a function or at least an alias, as in PARISC64, where "_text" is 269 * an 'A' to the same address as "_stext". 270 */ 271 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 272 type == 'A') || strcmp(name, args->name)) 273 return 0; 274 275 args->start = start; 276 return 1; 277 } 278 279 int event__synthesize_kernel_mmap(event__handler_t process, 280 struct perf_session *session, 281 struct machine *machine, 282 const char *symbol_name) 283 { 284 size_t size; 285 const char *filename, *mmap_name; 286 char path[PATH_MAX]; 287 char name_buff[PATH_MAX]; 288 struct map *map; 289 290 event_t ev = { 291 .header = { 292 .type = PERF_RECORD_MMAP, 293 }, 294 }; 295 /* 296 * We should get this from /sys/kernel/sections/.text, but till that is 297 * available use this, and after it is use this as a fallback for older 298 * kernels. 299 */ 300 struct process_symbol_args args = { .name = symbol_name, }; 301 302 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 303 if (machine__is_host(machine)) { 304 /* 305 * kernel uses PERF_RECORD_MISC_USER for user space maps, 306 * see kernel/perf_event.c __perf_event_mmap 307 */ 308 ev.header.misc = PERF_RECORD_MISC_KERNEL; 309 filename = "/proc/kallsyms"; 310 } else { 311 ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 312 if (machine__is_default_guest(machine)) 313 filename = (char *) symbol_conf.default_guest_kallsyms; 314 else { 315 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 316 filename = path; 317 } 318 } 319 320 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) 321 return -ENOENT; 322 323 map = machine->vmlinux_maps[MAP__FUNCTION]; 324 size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename), 325 "%s%s", mmap_name, symbol_name) + 1; 326 size = ALIGN(size, sizeof(u64)); 327 ev.mmap.header.size = (sizeof(ev.mmap) - 328 (sizeof(ev.mmap.filename) - size)); 329 ev.mmap.pgoff = args.start; 330 ev.mmap.start = map->start; 331 ev.mmap.len = map->end - ev.mmap.start; 332 ev.mmap.pid = machine->pid; 333 334 return process(&ev, session); 335 } 336 337 static void thread__comm_adjust(struct thread *self, struct hists *hists) 338 { 339 char *comm = self->comm; 340 341 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 342 (!symbol_conf.comm_list || 343 strlist__has_entry(symbol_conf.comm_list, comm))) { 344 u16 slen = strlen(comm); 345 346 if (hists__new_col_len(hists, HISTC_COMM, slen)) 347 hists__set_col_len(hists, HISTC_THREAD, slen + 6); 348 } 349 } 350 351 static int thread__set_comm_adjust(struct thread *self, const char *comm, 352 struct hists *hists) 353 { 354 int ret = thread__set_comm(self, comm); 355 356 if (ret) 357 return ret; 358 359 thread__comm_adjust(self, hists); 360 361 return 0; 362 } 363 364 int event__process_comm(event_t *self, struct perf_session *session) 365 { 366 struct thread *thread = perf_session__findnew(session, self->comm.tid); 367 368 dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid); 369 370 if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm, 371 &session->hists)) { 372 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 373 return -1; 374 } 375 376 return 0; 377 } 378 379 int event__process_lost(event_t *self, struct perf_session *session) 380 { 381 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost); 382 session->hists.stats.total_lost += self->lost.lost; 383 return 0; 384 } 385 386 static void event_set_kernel_mmap_len(struct map **maps, event_t *self) 387 { 388 maps[MAP__FUNCTION]->start = self->mmap.start; 389 maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len; 390 /* 391 * Be a bit paranoid here, some perf.data file came with 392 * a zero sized synthesized MMAP event for the kernel. 393 */ 394 if (maps[MAP__FUNCTION]->end == 0) 395 maps[MAP__FUNCTION]->end = ~0UL; 396 } 397 398 static int event__process_kernel_mmap(event_t *self, 399 struct perf_session *session) 400 { 401 struct map *map; 402 char kmmap_prefix[PATH_MAX]; 403 struct machine *machine; 404 enum dso_kernel_type kernel_type; 405 bool is_kernel_mmap; 406 407 machine = perf_session__findnew_machine(session, self->mmap.pid); 408 if (!machine) { 409 pr_err("Can't find id %d's machine\n", self->mmap.pid); 410 goto out_problem; 411 } 412 413 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 414 if (machine__is_host(machine)) 415 kernel_type = DSO_TYPE_KERNEL; 416 else 417 kernel_type = DSO_TYPE_GUEST_KERNEL; 418 419 is_kernel_mmap = memcmp(self->mmap.filename, 420 kmmap_prefix, 421 strlen(kmmap_prefix)) == 0; 422 if (self->mmap.filename[0] == '/' || 423 (!is_kernel_mmap && self->mmap.filename[0] == '[')) { 424 425 char short_module_name[1024]; 426 char *name, *dot; 427 428 if (self->mmap.filename[0] == '/') { 429 name = strrchr(self->mmap.filename, '/'); 430 if (name == NULL) 431 goto out_problem; 432 433 ++name; /* skip / */ 434 dot = strrchr(name, '.'); 435 if (dot == NULL) 436 goto out_problem; 437 snprintf(short_module_name, sizeof(short_module_name), 438 "[%.*s]", (int)(dot - name), name); 439 strxfrchar(short_module_name, '-', '_'); 440 } else 441 strcpy(short_module_name, self->mmap.filename); 442 443 map = machine__new_module(machine, self->mmap.start, 444 self->mmap.filename); 445 if (map == NULL) 446 goto out_problem; 447 448 name = strdup(short_module_name); 449 if (name == NULL) 450 goto out_problem; 451 452 map->dso->short_name = name; 453 map->dso->sname_alloc = 1; 454 map->end = map->start + self->mmap.len; 455 } else if (is_kernel_mmap) { 456 const char *symbol_name = (self->mmap.filename + 457 strlen(kmmap_prefix)); 458 /* 459 * Should be there already, from the build-id table in 460 * the header. 461 */ 462 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, 463 kmmap_prefix); 464 if (kernel == NULL) 465 goto out_problem; 466 467 kernel->kernel = kernel_type; 468 if (__machine__create_kernel_maps(machine, kernel) < 0) 469 goto out_problem; 470 471 event_set_kernel_mmap_len(machine->vmlinux_maps, self); 472 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 473 symbol_name, 474 self->mmap.pgoff); 475 if (machine__is_default_guest(machine)) { 476 /* 477 * preload dso of guest kernel and modules 478 */ 479 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], 480 NULL); 481 } 482 } 483 return 0; 484 out_problem: 485 return -1; 486 } 487 488 int event__process_mmap(event_t *self, struct perf_session *session) 489 { 490 struct machine *machine; 491 struct thread *thread; 492 struct map *map; 493 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 494 int ret = 0; 495 496 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n", 497 self->mmap.pid, self->mmap.tid, self->mmap.start, 498 self->mmap.len, self->mmap.pgoff, self->mmap.filename); 499 500 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 501 cpumode == PERF_RECORD_MISC_KERNEL) { 502 ret = event__process_kernel_mmap(self, session); 503 if (ret < 0) 504 goto out_problem; 505 return 0; 506 } 507 508 machine = perf_session__find_host_machine(session); 509 if (machine == NULL) 510 goto out_problem; 511 thread = perf_session__findnew(session, self->mmap.pid); 512 if (thread == NULL) 513 goto out_problem; 514 map = map__new(&machine->user_dsos, self->mmap.start, 515 self->mmap.len, self->mmap.pgoff, 516 self->mmap.pid, self->mmap.filename, 517 MAP__FUNCTION); 518 if (map == NULL) 519 goto out_problem; 520 521 thread__insert_map(thread, map); 522 return 0; 523 524 out_problem: 525 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 526 return 0; 527 } 528 529 int event__process_task(event_t *self, struct perf_session *session) 530 { 531 struct thread *thread = perf_session__findnew(session, self->fork.tid); 532 struct thread *parent = perf_session__findnew(session, self->fork.ptid); 533 534 dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid, 535 self->fork.ppid, self->fork.ptid); 536 537 if (self->header.type == PERF_RECORD_EXIT) { 538 perf_session__remove_thread(session, thread); 539 return 0; 540 } 541 542 if (thread == NULL || parent == NULL || 543 thread__fork(thread, parent) < 0) { 544 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 545 return -1; 546 } 547 548 return 0; 549 } 550 551 int event__process(event_t *event, struct perf_session *session) 552 { 553 switch (event->header.type) { 554 case PERF_RECORD_COMM: 555 event__process_comm(event, session); 556 break; 557 case PERF_RECORD_MMAP: 558 event__process_mmap(event, session); 559 break; 560 case PERF_RECORD_FORK: 561 case PERF_RECORD_EXIT: 562 event__process_task(event, session); 563 break; 564 default: 565 break; 566 } 567 568 return 0; 569 } 570 571 void thread__find_addr_map(struct thread *self, 572 struct perf_session *session, u8 cpumode, 573 enum map_type type, pid_t pid, u64 addr, 574 struct addr_location *al) 575 { 576 struct map_groups *mg = &self->mg; 577 struct machine *machine = NULL; 578 579 al->thread = self; 580 al->addr = addr; 581 al->cpumode = cpumode; 582 al->filtered = false; 583 584 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 585 al->level = 'k'; 586 machine = perf_session__find_host_machine(session); 587 if (machine == NULL) { 588 al->map = NULL; 589 return; 590 } 591 mg = &machine->kmaps; 592 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 593 al->level = '.'; 594 machine = perf_session__find_host_machine(session); 595 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 596 al->level = 'g'; 597 machine = perf_session__find_machine(session, pid); 598 if (machine == NULL) { 599 al->map = NULL; 600 return; 601 } 602 mg = &machine->kmaps; 603 } else { 604 /* 605 * 'u' means guest os user space. 606 * TODO: We don't support guest user space. Might support late. 607 */ 608 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) 609 al->level = 'u'; 610 else 611 al->level = 'H'; 612 al->map = NULL; 613 614 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 615 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 616 !perf_guest) 617 al->filtered = true; 618 if ((cpumode == PERF_RECORD_MISC_USER || 619 cpumode == PERF_RECORD_MISC_KERNEL) && 620 !perf_host) 621 al->filtered = true; 622 623 return; 624 } 625 try_again: 626 al->map = map_groups__find(mg, type, al->addr); 627 if (al->map == NULL) { 628 /* 629 * If this is outside of all known maps, and is a negative 630 * address, try to look it up in the kernel dso, as it might be 631 * a vsyscall or vdso (which executes in user-mode). 632 * 633 * XXX This is nasty, we should have a symbol list in the 634 * "[vdso]" dso, but for now lets use the old trick of looking 635 * in the whole kernel symbol list. 636 */ 637 if ((long long)al->addr < 0 && 638 cpumode == PERF_RECORD_MISC_KERNEL && 639 machine && mg != &machine->kmaps) { 640 mg = &machine->kmaps; 641 goto try_again; 642 } 643 } else 644 al->addr = al->map->map_ip(al->map, al->addr); 645 } 646 647 void thread__find_addr_location(struct thread *self, 648 struct perf_session *session, u8 cpumode, 649 enum map_type type, pid_t pid, u64 addr, 650 struct addr_location *al, 651 symbol_filter_t filter) 652 { 653 thread__find_addr_map(self, session, cpumode, type, pid, addr, al); 654 if (al->map != NULL) 655 al->sym = map__find_symbol(al->map, al->addr, filter); 656 else 657 al->sym = NULL; 658 } 659 660 static void dso__calc_col_width(struct dso *self, struct hists *hists) 661 { 662 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep && 663 (!symbol_conf.dso_list || 664 strlist__has_entry(symbol_conf.dso_list, self->name))) { 665 u16 slen = dso__name_len(self); 666 hists__new_col_len(hists, HISTC_DSO, slen); 667 } 668 669 self->slen_calculated = 1; 670 } 671 672 int event__preprocess_sample(const event_t *self, struct perf_session *session, 673 struct addr_location *al, struct sample_data *data, 674 symbol_filter_t filter) 675 { 676 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 677 struct thread *thread; 678 679 event__parse_sample(self, session->sample_type, data); 680 681 dump_printf("(IP, %d): %d/%d: %#Lx period: %Ld cpu:%d\n", 682 self->header.misc, data->pid, data->tid, data->ip, 683 data->period, data->cpu); 684 685 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) { 686 unsigned int i; 687 688 dump_printf("... chain: nr:%Lu\n", data->callchain->nr); 689 690 if (!ip_callchain__valid(data->callchain, self)) { 691 pr_debug("call-chain problem with event, " 692 "skipping it.\n"); 693 goto out_filtered; 694 } 695 696 if (dump_trace) { 697 for (i = 0; i < data->callchain->nr; i++) 698 dump_printf("..... %2d: %016Lx\n", 699 i, data->callchain->ips[i]); 700 } 701 } 702 thread = perf_session__findnew(session, self->ip.pid); 703 if (thread == NULL) 704 return -1; 705 706 if (symbol_conf.comm_list && 707 !strlist__has_entry(symbol_conf.comm_list, thread->comm)) 708 goto out_filtered; 709 710 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 711 /* 712 * Have we already created the kernel maps for the host machine? 713 * 714 * This should have happened earlier, when we processed the kernel MMAP 715 * events, but for older perf.data files there was no such thing, so do 716 * it now. 717 */ 718 if (cpumode == PERF_RECORD_MISC_KERNEL && 719 session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL) 720 machine__create_kernel_maps(&session->host_machine); 721 722 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, 723 self->ip.pid, self->ip.ip, al); 724 dump_printf(" ...... dso: %s\n", 725 al->map ? al->map->dso->long_name : 726 al->level == 'H' ? "[hypervisor]" : "<not found>"); 727 al->sym = NULL; 728 al->cpu = data->cpu; 729 730 if (al->map) { 731 if (symbol_conf.dso_list && 732 (!al->map || !al->map->dso || 733 !(strlist__has_entry(symbol_conf.dso_list, 734 al->map->dso->short_name) || 735 (al->map->dso->short_name != al->map->dso->long_name && 736 strlist__has_entry(symbol_conf.dso_list, 737 al->map->dso->long_name))))) 738 goto out_filtered; 739 /* 740 * We have to do this here as we may have a dso with no symbol 741 * hit that has a name longer than the ones with symbols 742 * sampled. 743 */ 744 if (!sort_dso.elide && !al->map->dso->slen_calculated) 745 dso__calc_col_width(al->map->dso, &session->hists); 746 747 al->sym = map__find_symbol(al->map, al->addr, filter); 748 } else { 749 const unsigned int unresolved_col_width = BITS_PER_LONG / 4; 750 751 if (hists__col_len(&session->hists, HISTC_DSO) < unresolved_col_width && 752 !symbol_conf.col_width_list_str && !symbol_conf.field_sep && 753 !symbol_conf.dso_list) 754 hists__set_col_len(&session->hists, HISTC_DSO, 755 unresolved_col_width); 756 } 757 758 if (symbol_conf.sym_list && al->sym && 759 !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) 760 goto out_filtered; 761 762 return 0; 763 764 out_filtered: 765 al->filtered = true; 766 return 0; 767 } 768 769 int event__parse_sample(const event_t *event, u64 type, struct sample_data *data) 770 { 771 const u64 *array = event->sample.array; 772 773 if (type & PERF_SAMPLE_IP) { 774 data->ip = event->ip.ip; 775 array++; 776 } 777 778 if (type & PERF_SAMPLE_TID) { 779 u32 *p = (u32 *)array; 780 data->pid = p[0]; 781 data->tid = p[1]; 782 array++; 783 } 784 785 if (type & PERF_SAMPLE_TIME) { 786 data->time = *array; 787 array++; 788 } 789 790 if (type & PERF_SAMPLE_ADDR) { 791 data->addr = *array; 792 array++; 793 } 794 795 data->id = -1ULL; 796 if (type & PERF_SAMPLE_ID) { 797 data->id = *array; 798 array++; 799 } 800 801 if (type & PERF_SAMPLE_STREAM_ID) { 802 data->stream_id = *array; 803 array++; 804 } 805 806 if (type & PERF_SAMPLE_CPU) { 807 u32 *p = (u32 *)array; 808 data->cpu = *p; 809 array++; 810 } else 811 data->cpu = -1; 812 813 if (type & PERF_SAMPLE_PERIOD) { 814 data->period = *array; 815 array++; 816 } 817 818 if (type & PERF_SAMPLE_READ) { 819 pr_debug("PERF_SAMPLE_READ is unsuported for now\n"); 820 return -1; 821 } 822 823 if (type & PERF_SAMPLE_CALLCHAIN) { 824 data->callchain = (struct ip_callchain *)array; 825 array += 1 + data->callchain->nr; 826 } 827 828 if (type & PERF_SAMPLE_RAW) { 829 u32 *p = (u32 *)array; 830 data->raw_size = *p; 831 p++; 832 data->raw_data = p; 833 } 834 835 return 0; 836 } 837