xref: /linux/tools/perf/util/event.c (revision 36239c6704b71da7fb8e2a9429e159a84d0c5a3e)
1 #include <linux/types.h>
2 #include "event.h"
3 #include "debug.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "string.h"
7 #include "strlist.h"
8 #include "thread.h"
9 
10 const char *event__name[] = {
11 	[0]			 = "TOTAL",
12 	[PERF_RECORD_MMAP]	 = "MMAP",
13 	[PERF_RECORD_LOST]	 = "LOST",
14 	[PERF_RECORD_COMM]	 = "COMM",
15 	[PERF_RECORD_EXIT]	 = "EXIT",
16 	[PERF_RECORD_THROTTLE]	 = "THROTTLE",
17 	[PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
18 	[PERF_RECORD_FORK]	 = "FORK",
19 	[PERF_RECORD_READ]	 = "READ",
20 	[PERF_RECORD_SAMPLE]	 = "SAMPLE",
21 	[PERF_RECORD_HEADER_ATTR]	 = "ATTR",
22 	[PERF_RECORD_HEADER_EVENT_TYPE]	 = "EVENT_TYPE",
23 	[PERF_RECORD_HEADER_TRACING_DATA]	 = "TRACING_DATA",
24 	[PERF_RECORD_HEADER_BUILD_ID]	 = "BUILD_ID",
25 };
26 
27 static pid_t event__synthesize_comm(pid_t pid, int full,
28 				    event__handler_t process,
29 				    struct perf_session *session)
30 {
31 	event_t ev;
32 	char filename[PATH_MAX];
33 	char bf[BUFSIZ];
34 	FILE *fp;
35 	size_t size = 0;
36 	DIR *tasks;
37 	struct dirent dirent, *next;
38 	pid_t tgid = 0;
39 
40 	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
41 
42 	fp = fopen(filename, "r");
43 	if (fp == NULL) {
44 out_race:
45 		/*
46 		 * We raced with a task exiting - just return:
47 		 */
48 		pr_debug("couldn't open %s\n", filename);
49 		return 0;
50 	}
51 
52 	memset(&ev.comm, 0, sizeof(ev.comm));
53 	while (!ev.comm.comm[0] || !ev.comm.pid) {
54 		if (fgets(bf, sizeof(bf), fp) == NULL)
55 			goto out_failure;
56 
57 		if (memcmp(bf, "Name:", 5) == 0) {
58 			char *name = bf + 5;
59 			while (*name && isspace(*name))
60 				++name;
61 			size = strlen(name) - 1;
62 			memcpy(ev.comm.comm, name, size++);
63 		} else if (memcmp(bf, "Tgid:", 5) == 0) {
64 			char *tgids = bf + 5;
65 			while (*tgids && isspace(*tgids))
66 				++tgids;
67 			tgid = ev.comm.pid = atoi(tgids);
68 		}
69 	}
70 
71 	ev.comm.header.type = PERF_RECORD_COMM;
72 	size = ALIGN(size, sizeof(u64));
73 	ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size);
74 
75 	if (!full) {
76 		ev.comm.tid = pid;
77 
78 		process(&ev, session);
79 		goto out_fclose;
80 	}
81 
82 	snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
83 
84 	tasks = opendir(filename);
85 	if (tasks == NULL)
86 		goto out_race;
87 
88 	while (!readdir_r(tasks, &dirent, &next) && next) {
89 		char *end;
90 		pid = strtol(dirent.d_name, &end, 10);
91 		if (*end)
92 			continue;
93 
94 		ev.comm.tid = pid;
95 
96 		process(&ev, session);
97 	}
98 	closedir(tasks);
99 
100 out_fclose:
101 	fclose(fp);
102 	return tgid;
103 
104 out_failure:
105 	pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
106 	return -1;
107 }
108 
109 static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
110 					 event__handler_t process,
111 					 struct perf_session *session)
112 {
113 	char filename[PATH_MAX];
114 	FILE *fp;
115 
116 	snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
117 
118 	fp = fopen(filename, "r");
119 	if (fp == NULL) {
120 		/*
121 		 * We raced with a task exiting - just return:
122 		 */
123 		pr_debug("couldn't open %s\n", filename);
124 		return -1;
125 	}
126 
127 	while (1) {
128 		char bf[BUFSIZ], *pbf = bf;
129 		event_t ev = {
130 			.header = {
131 				.type = PERF_RECORD_MMAP,
132 				/*
133 				 * Just like the kernel, see __perf_event_mmap
134 				 * in kernel/perf_event.c
135 				 */
136 				.misc = PERF_RECORD_MISC_USER,
137 			 },
138 		};
139 		int n;
140 		size_t size;
141 		if (fgets(bf, sizeof(bf), fp) == NULL)
142 			break;
143 
144 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
145 		n = hex2u64(pbf, &ev.mmap.start);
146 		if (n < 0)
147 			continue;
148 		pbf += n + 1;
149 		n = hex2u64(pbf, &ev.mmap.len);
150 		if (n < 0)
151 			continue;
152 		pbf += n + 3;
153 		if (*pbf == 'x') { /* vm_exec */
154 			u64 vm_pgoff;
155 			char *execname = strchr(bf, '/');
156 
157 			/* Catch VDSO */
158 			if (execname == NULL)
159 				execname = strstr(bf, "[vdso]");
160 
161 			if (execname == NULL)
162 				continue;
163 
164 			pbf += 3;
165 			n = hex2u64(pbf, &vm_pgoff);
166 			/* pgoff is in bytes, not pages */
167 			if (n >= 0)
168 				ev.mmap.pgoff = vm_pgoff << getpagesize();
169 			else
170 				ev.mmap.pgoff = 0;
171 
172 			size = strlen(execname);
173 			execname[size - 1] = '\0'; /* Remove \n */
174 			memcpy(ev.mmap.filename, execname, size);
175 			size = ALIGN(size, sizeof(u64));
176 			ev.mmap.len -= ev.mmap.start;
177 			ev.mmap.header.size = (sizeof(ev.mmap) -
178 					       (sizeof(ev.mmap.filename) - size));
179 			ev.mmap.pid = tgid;
180 			ev.mmap.tid = pid;
181 
182 			process(&ev, session);
183 		}
184 	}
185 
186 	fclose(fp);
187 	return 0;
188 }
189 
190 int event__synthesize_modules(event__handler_t process,
191 			      struct perf_session *session,
192 			      struct machine *machine)
193 {
194 	struct rb_node *nd;
195 	struct map_groups *kmaps = &machine->kmaps;
196 	u16 misc;
197 
198 	/*
199 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
200 	 * __perf_event_mmap
201 	 */
202 	if (machine__is_host(machine))
203 		misc = PERF_RECORD_MISC_KERNEL;
204 	else
205 		misc = PERF_RECORD_MISC_GUEST_KERNEL;
206 
207 	for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
208 	     nd; nd = rb_next(nd)) {
209 		event_t ev;
210 		size_t size;
211 		struct map *pos = rb_entry(nd, struct map, rb_node);
212 
213 		if (pos->dso->kernel)
214 			continue;
215 
216 		size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
217 		memset(&ev, 0, sizeof(ev));
218 		ev.mmap.header.misc = misc;
219 		ev.mmap.header.type = PERF_RECORD_MMAP;
220 		ev.mmap.header.size = (sizeof(ev.mmap) -
221 				        (sizeof(ev.mmap.filename) - size));
222 		ev.mmap.start = pos->start;
223 		ev.mmap.len   = pos->end - pos->start;
224 		ev.mmap.pid   = machine->pid;
225 
226 		memcpy(ev.mmap.filename, pos->dso->long_name,
227 		       pos->dso->long_name_len + 1);
228 		process(&ev, session);
229 	}
230 
231 	return 0;
232 }
233 
234 int event__synthesize_thread(pid_t pid, event__handler_t process,
235 			     struct perf_session *session)
236 {
237 	pid_t tgid = event__synthesize_comm(pid, 1, process, session);
238 	if (tgid == -1)
239 		return -1;
240 	return event__synthesize_mmap_events(pid, tgid, process, session);
241 }
242 
243 void event__synthesize_threads(event__handler_t process,
244 			       struct perf_session *session)
245 {
246 	DIR *proc;
247 	struct dirent dirent, *next;
248 
249 	proc = opendir("/proc");
250 
251 	while (!readdir_r(proc, &dirent, &next) && next) {
252 		char *end;
253 		pid_t pid = strtol(dirent.d_name, &end, 10);
254 
255 		if (*end) /* only interested in proper numerical dirents */
256 			continue;
257 
258 		event__synthesize_thread(pid, process, session);
259 	}
260 
261 	closedir(proc);
262 }
263 
264 struct process_symbol_args {
265 	const char *name;
266 	u64	   start;
267 };
268 
269 static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
270 {
271 	struct process_symbol_args *args = arg;
272 
273 	/*
274 	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
275 	 * an 'A' to the same address as "_stext".
276 	 */
277 	if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
278 	      type == 'A') || strcmp(name, args->name))
279 		return 0;
280 
281 	args->start = start;
282 	return 1;
283 }
284 
285 int event__synthesize_kernel_mmap(event__handler_t process,
286 				  struct perf_session *session,
287 				  struct machine *machine,
288 				  const char *symbol_name)
289 {
290 	size_t size;
291 	const char *filename, *mmap_name;
292 	char path[PATH_MAX];
293 	char name_buff[PATH_MAX];
294 	struct map *map;
295 
296 	event_t ev = {
297 		.header = {
298 			.type = PERF_RECORD_MMAP,
299 		},
300 	};
301 	/*
302 	 * We should get this from /sys/kernel/sections/.text, but till that is
303 	 * available use this, and after it is use this as a fallback for older
304 	 * kernels.
305 	 */
306 	struct process_symbol_args args = { .name = symbol_name, };
307 
308 	mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff));
309 	if (machine__is_host(machine)) {
310 		/*
311 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
312 		 * see kernel/perf_event.c __perf_event_mmap
313 		 */
314 		ev.header.misc = PERF_RECORD_MISC_KERNEL;
315 		filename = "/proc/kallsyms";
316 	} else {
317 		ev.header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
318 		if (machine__is_default_guest(machine))
319 			filename = (char *) symbol_conf.default_guest_kallsyms;
320 		else {
321 			sprintf(path, "%s/proc/kallsyms", machine->root_dir);
322 			filename = path;
323 		}
324 	}
325 
326 	if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0)
327 		return -ENOENT;
328 
329 	map = machine->vmlinux_maps[MAP__FUNCTION];
330 	size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
331 			"%s%s", mmap_name, symbol_name) + 1;
332 	size = ALIGN(size, sizeof(u64));
333 	ev.mmap.header.size = (sizeof(ev.mmap) -
334 			(sizeof(ev.mmap.filename) - size));
335 	ev.mmap.pgoff = args.start;
336 	ev.mmap.start = map->start;
337 	ev.mmap.len   = map->end - ev.mmap.start;
338 	ev.mmap.pid   = machine->pid;
339 
340 	return process(&ev, session);
341 }
342 
343 static void thread__comm_adjust(struct thread *self)
344 {
345 	char *comm = self->comm;
346 
347 	if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
348 	    (!symbol_conf.comm_list ||
349 	     strlist__has_entry(symbol_conf.comm_list, comm))) {
350 		unsigned int slen = strlen(comm);
351 
352 		if (slen > comms__col_width) {
353 			comms__col_width = slen;
354 			threads__col_width = slen + 6;
355 		}
356 	}
357 }
358 
359 static int thread__set_comm_adjust(struct thread *self, const char *comm)
360 {
361 	int ret = thread__set_comm(self, comm);
362 
363 	if (ret)
364 		return ret;
365 
366 	thread__comm_adjust(self);
367 
368 	return 0;
369 }
370 
371 int event__process_comm(event_t *self, struct perf_session *session)
372 {
373 	struct thread *thread = perf_session__findnew(session, self->comm.tid);
374 
375 	dump_printf(": %s:%d\n", self->comm.comm, self->comm.tid);
376 
377 	if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) {
378 		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
379 		return -1;
380 	}
381 
382 	return 0;
383 }
384 
385 int event__process_lost(event_t *self, struct perf_session *session)
386 {
387 	dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
388 	session->hists.stats.total_lost += self->lost.lost;
389 	return 0;
390 }
391 
392 static void event_set_kernel_mmap_len(struct map **maps, event_t *self)
393 {
394 	maps[MAP__FUNCTION]->start = self->mmap.start;
395 	maps[MAP__FUNCTION]->end   = self->mmap.start + self->mmap.len;
396 	/*
397 	 * Be a bit paranoid here, some perf.data file came with
398 	 * a zero sized synthesized MMAP event for the kernel.
399 	 */
400 	if (maps[MAP__FUNCTION]->end == 0)
401 		maps[MAP__FUNCTION]->end = ~0UL;
402 }
403 
404 static int event__process_kernel_mmap(event_t *self,
405 			struct perf_session *session)
406 {
407 	struct map *map;
408 	char kmmap_prefix[PATH_MAX];
409 	struct machine *machine;
410 	enum dso_kernel_type kernel_type;
411 	bool is_kernel_mmap;
412 
413 	machine = perf_session__findnew_machine(session, self->mmap.pid);
414 	if (!machine) {
415 		pr_err("Can't find id %d's machine\n", self->mmap.pid);
416 		goto out_problem;
417 	}
418 
419 	machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
420 	if (machine__is_host(machine))
421 		kernel_type = DSO_TYPE_KERNEL;
422 	else
423 		kernel_type = DSO_TYPE_GUEST_KERNEL;
424 
425 	is_kernel_mmap = memcmp(self->mmap.filename,
426 				kmmap_prefix,
427 				strlen(kmmap_prefix)) == 0;
428 	if (self->mmap.filename[0] == '/' ||
429 	    (!is_kernel_mmap && self->mmap.filename[0] == '[')) {
430 
431 		char short_module_name[1024];
432 		char *name, *dot;
433 
434 		if (self->mmap.filename[0] == '/') {
435 			name = strrchr(self->mmap.filename, '/');
436 			if (name == NULL)
437 				goto out_problem;
438 
439 			++name; /* skip / */
440 			dot = strrchr(name, '.');
441 			if (dot == NULL)
442 				goto out_problem;
443 			snprintf(short_module_name, sizeof(short_module_name),
444 					"[%.*s]", (int)(dot - name), name);
445 			strxfrchar(short_module_name, '-', '_');
446 		} else
447 			strcpy(short_module_name, self->mmap.filename);
448 
449 		map = machine__new_module(machine, self->mmap.start,
450 					  self->mmap.filename);
451 		if (map == NULL)
452 			goto out_problem;
453 
454 		name = strdup(short_module_name);
455 		if (name == NULL)
456 			goto out_problem;
457 
458 		map->dso->short_name = name;
459 		map->end = map->start + self->mmap.len;
460 	} else if (is_kernel_mmap) {
461 		const char *symbol_name = (self->mmap.filename +
462 				strlen(kmmap_prefix));
463 		/*
464 		 * Should be there already, from the build-id table in
465 		 * the header.
466 		 */
467 		struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
468 						     kmmap_prefix);
469 		if (kernel == NULL)
470 			goto out_problem;
471 
472 		kernel->kernel = kernel_type;
473 		if (__machine__create_kernel_maps(machine, kernel) < 0)
474 			goto out_problem;
475 
476 		event_set_kernel_mmap_len(machine->vmlinux_maps, self);
477 		perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
478 							 symbol_name,
479 							 self->mmap.pgoff);
480 		if (machine__is_default_guest(machine)) {
481 			/*
482 			 * preload dso of guest kernel and modules
483 			 */
484 			dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
485 				  NULL);
486 		}
487 	}
488 	return 0;
489 out_problem:
490 	return -1;
491 }
492 
493 int event__process_mmap(event_t *self, struct perf_session *session)
494 {
495 	struct machine *machine;
496 	struct thread *thread;
497 	struct map *map;
498 	u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
499 	int ret = 0;
500 
501 	dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
502 			self->mmap.pid, self->mmap.tid, self->mmap.start,
503 			self->mmap.len, self->mmap.pgoff, self->mmap.filename);
504 
505 	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
506 	    cpumode == PERF_RECORD_MISC_KERNEL) {
507 		ret = event__process_kernel_mmap(self, session);
508 		if (ret < 0)
509 			goto out_problem;
510 		return 0;
511 	}
512 
513 	machine = perf_session__find_host_machine(session);
514 	if (machine == NULL)
515 		goto out_problem;
516 	thread = perf_session__findnew(session, self->mmap.pid);
517 	map = map__new(&machine->user_dsos, self->mmap.start,
518 			self->mmap.len, self->mmap.pgoff,
519 			self->mmap.pid, self->mmap.filename,
520 			MAP__FUNCTION, session->cwd, session->cwdlen);
521 
522 	if (thread == NULL || map == NULL)
523 		goto out_problem;
524 
525 	thread__insert_map(thread, map);
526 	return 0;
527 
528 out_problem:
529 	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
530 	return 0;
531 }
532 
533 int event__process_task(event_t *self, struct perf_session *session)
534 {
535 	struct thread *thread = perf_session__findnew(session, self->fork.tid);
536 	struct thread *parent = perf_session__findnew(session, self->fork.ptid);
537 
538 	dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
539 		    self->fork.ppid, self->fork.ptid);
540 
541 	if (self->header.type == PERF_RECORD_EXIT) {
542 		perf_session__remove_thread(session, thread);
543 		return 0;
544 	}
545 
546 	if (thread == NULL || parent == NULL ||
547 	    thread__fork(thread, parent) < 0) {
548 		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
549 		return -1;
550 	}
551 
552 	return 0;
553 }
554 
555 void thread__find_addr_map(struct thread *self,
556 			   struct perf_session *session, u8 cpumode,
557 			   enum map_type type, pid_t pid, u64 addr,
558 			   struct addr_location *al)
559 {
560 	struct map_groups *mg = &self->mg;
561 	struct machine *machine = NULL;
562 
563 	al->thread = self;
564 	al->addr = addr;
565 	al->cpumode = cpumode;
566 	al->filtered = false;
567 
568 	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
569 		al->level = 'k';
570 		machine = perf_session__find_host_machine(session);
571 		if (machine == NULL) {
572 			al->map = NULL;
573 			return;
574 		}
575 		mg = &machine->kmaps;
576 	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
577 		al->level = '.';
578 		machine = perf_session__find_host_machine(session);
579 	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
580 		al->level = 'g';
581 		machine = perf_session__find_machine(session, pid);
582 		if (machine == NULL) {
583 			al->map = NULL;
584 			return;
585 		}
586 		mg = &machine->kmaps;
587 	} else {
588 		/*
589 		 * 'u' means guest os user space.
590 		 * TODO: We don't support guest user space. Might support late.
591 		 */
592 		if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest)
593 			al->level = 'u';
594 		else
595 			al->level = 'H';
596 		al->map = NULL;
597 
598 		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
599 			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
600 			!perf_guest)
601 			al->filtered = true;
602 		if ((cpumode == PERF_RECORD_MISC_USER ||
603 			cpumode == PERF_RECORD_MISC_KERNEL) &&
604 			!perf_host)
605 			al->filtered = true;
606 
607 		return;
608 	}
609 try_again:
610 	al->map = map_groups__find(mg, type, al->addr);
611 	if (al->map == NULL) {
612 		/*
613 		 * If this is outside of all known maps, and is a negative
614 		 * address, try to look it up in the kernel dso, as it might be
615 		 * a vsyscall or vdso (which executes in user-mode).
616 		 *
617 		 * XXX This is nasty, we should have a symbol list in the
618 		 * "[vdso]" dso, but for now lets use the old trick of looking
619 		 * in the whole kernel symbol list.
620 		 */
621 		if ((long long)al->addr < 0 &&
622 		    cpumode == PERF_RECORD_MISC_KERNEL &&
623 		    machine && mg != &machine->kmaps) {
624 			mg = &machine->kmaps;
625 			goto try_again;
626 		}
627 	} else
628 		al->addr = al->map->map_ip(al->map, al->addr);
629 }
630 
631 void thread__find_addr_location(struct thread *self,
632 				struct perf_session *session, u8 cpumode,
633 				enum map_type type, pid_t pid, u64 addr,
634 				struct addr_location *al,
635 				symbol_filter_t filter)
636 {
637 	thread__find_addr_map(self, session, cpumode, type, pid, addr, al);
638 	if (al->map != NULL)
639 		al->sym = map__find_symbol(al->map, al->addr, filter);
640 	else
641 		al->sym = NULL;
642 }
643 
644 static void dso__calc_col_width(struct dso *self)
645 {
646 	if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
647 	    (!symbol_conf.dso_list ||
648 	     strlist__has_entry(symbol_conf.dso_list, self->name))) {
649 		u16 slen = self->short_name_len;
650 		if (verbose)
651 			slen = self->long_name_len;
652 		if (dsos__col_width < slen)
653 			dsos__col_width = slen;
654 	}
655 
656 	self->slen_calculated = 1;
657 }
658 
659 int event__preprocess_sample(const event_t *self, struct perf_session *session,
660 			     struct addr_location *al, symbol_filter_t filter)
661 {
662 	u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
663 	struct thread *thread = perf_session__findnew(session, self->ip.pid);
664 
665 	if (thread == NULL)
666 		return -1;
667 
668 	if (symbol_conf.comm_list &&
669 	    !strlist__has_entry(symbol_conf.comm_list, thread->comm))
670 		goto out_filtered;
671 
672 	dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
673 	/*
674 	 * Have we already created the kernel maps for the host machine?
675 	 *
676 	 * This should have happened earlier, when we processed the kernel MMAP
677 	 * events, but for older perf.data files there was no such thing, so do
678 	 * it now.
679 	 */
680 	if (cpumode == PERF_RECORD_MISC_KERNEL &&
681 	    session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL)
682 		machine__create_kernel_maps(&session->host_machine);
683 
684 	thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION,
685 			      self->ip.pid, self->ip.ip, al);
686 	dump_printf(" ...... dso: %s\n",
687 		    al->map ? al->map->dso->long_name :
688 			al->level == 'H' ? "[hypervisor]" : "<not found>");
689 	al->sym = NULL;
690 
691 	if (al->map) {
692 		if (symbol_conf.dso_list &&
693 		    (!al->map || !al->map->dso ||
694 		     !(strlist__has_entry(symbol_conf.dso_list,
695 					  al->map->dso->short_name) ||
696 		       (al->map->dso->short_name != al->map->dso->long_name &&
697 			strlist__has_entry(symbol_conf.dso_list,
698 					   al->map->dso->long_name)))))
699 			goto out_filtered;
700 		/*
701 		 * We have to do this here as we may have a dso with no symbol
702 		 * hit that has a name longer than the ones with symbols
703 		 * sampled.
704 		 */
705 		if (!sort_dso.elide && !al->map->dso->slen_calculated)
706 			dso__calc_col_width(al->map->dso);
707 
708 		al->sym = map__find_symbol(al->map, al->addr, filter);
709 	} else {
710 		const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
711 
712 		if (dsos__col_width < unresolved_col_width &&
713 		    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
714 		    !symbol_conf.dso_list)
715 			dsos__col_width = unresolved_col_width;
716 	}
717 
718 	if (symbol_conf.sym_list && al->sym &&
719 	    !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
720 		goto out_filtered;
721 
722 	return 0;
723 
724 out_filtered:
725 	al->filtered = true;
726 	return 0;
727 }
728 
729 int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
730 {
731 	u64 *array = event->sample.array;
732 
733 	if (type & PERF_SAMPLE_IP) {
734 		data->ip = event->ip.ip;
735 		array++;
736 	}
737 
738 	if (type & PERF_SAMPLE_TID) {
739 		u32 *p = (u32 *)array;
740 		data->pid = p[0];
741 		data->tid = p[1];
742 		array++;
743 	}
744 
745 	if (type & PERF_SAMPLE_TIME) {
746 		data->time = *array;
747 		array++;
748 	}
749 
750 	if (type & PERF_SAMPLE_ADDR) {
751 		data->addr = *array;
752 		array++;
753 	}
754 
755 	data->id = -1ULL;
756 	if (type & PERF_SAMPLE_ID) {
757 		data->id = *array;
758 		array++;
759 	}
760 
761 	if (type & PERF_SAMPLE_STREAM_ID) {
762 		data->stream_id = *array;
763 		array++;
764 	}
765 
766 	if (type & PERF_SAMPLE_CPU) {
767 		u32 *p = (u32 *)array;
768 		data->cpu = *p;
769 		array++;
770 	}
771 
772 	if (type & PERF_SAMPLE_PERIOD) {
773 		data->period = *array;
774 		array++;
775 	}
776 
777 	if (type & PERF_SAMPLE_READ) {
778 		pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
779 		return -1;
780 	}
781 
782 	if (type & PERF_SAMPLE_CALLCHAIN) {
783 		data->callchain = (struct ip_callchain *)array;
784 		array += 1 + data->callchain->nr;
785 	}
786 
787 	if (type & PERF_SAMPLE_RAW) {
788 		u32 *p = (u32 *)array;
789 		data->raw_size = *p;
790 		p++;
791 		data->raw_data = p;
792 	}
793 
794 	return 0;
795 }
796