xref: /linux/tools/perf/util/machine.c (revision 37fbe0a4a0a9afe3b0fe843a4775a85ccf430deb)
1 #include "callchain.h"
2 #include "debug.h"
3 #include "event.h"
4 #include "evsel.h"
5 #include "hist.h"
6 #include "machine.h"
7 #include "map.h"
8 #include "sort.h"
9 #include "strlist.h"
10 #include "thread.h"
11 #include "vdso.h"
12 #include <stdbool.h>
13 #include <symbol/kallsyms.h>
14 #include "unwind.h"
15 #include "linux/hash.h"
16 
17 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
18 
19 static void dsos__init(struct dsos *dsos)
20 {
21 	INIT_LIST_HEAD(&dsos->head);
22 	dsos->root = RB_ROOT;
23 }
24 
25 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
26 {
27 	map_groups__init(&machine->kmaps, machine);
28 	RB_CLEAR_NODE(&machine->rb_node);
29 	dsos__init(&machine->dsos);
30 
31 	machine->threads = RB_ROOT;
32 	pthread_rwlock_init(&machine->threads_lock, NULL);
33 	INIT_LIST_HEAD(&machine->dead_threads);
34 	machine->last_match = NULL;
35 
36 	machine->vdso_info = NULL;
37 
38 	machine->pid = pid;
39 
40 	machine->symbol_filter = NULL;
41 	machine->id_hdr_size = 0;
42 	machine->comm_exec = false;
43 	machine->kernel_start = 0;
44 
45 	machine->root_dir = strdup(root_dir);
46 	if (machine->root_dir == NULL)
47 		return -ENOMEM;
48 
49 	if (pid != HOST_KERNEL_ID) {
50 		struct thread *thread = machine__findnew_thread(machine, -1,
51 								pid);
52 		char comm[64];
53 
54 		if (thread == NULL)
55 			return -ENOMEM;
56 
57 		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
58 		thread__set_comm(thread, comm, 0);
59 		thread__put(thread);
60 	}
61 
62 	machine->current_tid = NULL;
63 
64 	return 0;
65 }
66 
67 struct machine *machine__new_host(void)
68 {
69 	struct machine *machine = malloc(sizeof(*machine));
70 
71 	if (machine != NULL) {
72 		machine__init(machine, "", HOST_KERNEL_ID);
73 
74 		if (machine__create_kernel_maps(machine) < 0)
75 			goto out_delete;
76 	}
77 
78 	return machine;
79 out_delete:
80 	free(machine);
81 	return NULL;
82 }
83 
84 static void dsos__delete(struct dsos *dsos)
85 {
86 	struct dso *pos, *n;
87 
88 	list_for_each_entry_safe(pos, n, &dsos->head, node) {
89 		RB_CLEAR_NODE(&pos->rb_node);
90 		list_del(&pos->node);
91 		dso__delete(pos);
92 	}
93 }
94 
95 void machine__delete_threads(struct machine *machine)
96 {
97 	struct rb_node *nd;
98 
99 	pthread_rwlock_wrlock(&machine->threads_lock);
100 	nd = rb_first(&machine->threads);
101 	while (nd) {
102 		struct thread *t = rb_entry(nd, struct thread, rb_node);
103 
104 		nd = rb_next(nd);
105 		__machine__remove_thread(machine, t, false);
106 	}
107 	pthread_rwlock_unlock(&machine->threads_lock);
108 }
109 
110 void machine__exit(struct machine *machine)
111 {
112 	map_groups__exit(&machine->kmaps);
113 	dsos__delete(&machine->dsos);
114 	machine__exit_vdso(machine);
115 	zfree(&machine->root_dir);
116 	zfree(&machine->current_tid);
117 	pthread_rwlock_destroy(&machine->threads_lock);
118 }
119 
120 void machine__delete(struct machine *machine)
121 {
122 	machine__exit(machine);
123 	free(machine);
124 }
125 
126 void machines__init(struct machines *machines)
127 {
128 	machine__init(&machines->host, "", HOST_KERNEL_ID);
129 	machines->guests = RB_ROOT;
130 	machines->symbol_filter = NULL;
131 }
132 
133 void machines__exit(struct machines *machines)
134 {
135 	machine__exit(&machines->host);
136 	/* XXX exit guest */
137 }
138 
139 struct machine *machines__add(struct machines *machines, pid_t pid,
140 			      const char *root_dir)
141 {
142 	struct rb_node **p = &machines->guests.rb_node;
143 	struct rb_node *parent = NULL;
144 	struct machine *pos, *machine = malloc(sizeof(*machine));
145 
146 	if (machine == NULL)
147 		return NULL;
148 
149 	if (machine__init(machine, root_dir, pid) != 0) {
150 		free(machine);
151 		return NULL;
152 	}
153 
154 	machine->symbol_filter = machines->symbol_filter;
155 
156 	while (*p != NULL) {
157 		parent = *p;
158 		pos = rb_entry(parent, struct machine, rb_node);
159 		if (pid < pos->pid)
160 			p = &(*p)->rb_left;
161 		else
162 			p = &(*p)->rb_right;
163 	}
164 
165 	rb_link_node(&machine->rb_node, parent, p);
166 	rb_insert_color(&machine->rb_node, &machines->guests);
167 
168 	return machine;
169 }
170 
171 void machines__set_symbol_filter(struct machines *machines,
172 				 symbol_filter_t symbol_filter)
173 {
174 	struct rb_node *nd;
175 
176 	machines->symbol_filter = symbol_filter;
177 	machines->host.symbol_filter = symbol_filter;
178 
179 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
180 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
181 
182 		machine->symbol_filter = symbol_filter;
183 	}
184 }
185 
186 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
187 {
188 	struct rb_node *nd;
189 
190 	machines->host.comm_exec = comm_exec;
191 
192 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
193 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
194 
195 		machine->comm_exec = comm_exec;
196 	}
197 }
198 
199 struct machine *machines__find(struct machines *machines, pid_t pid)
200 {
201 	struct rb_node **p = &machines->guests.rb_node;
202 	struct rb_node *parent = NULL;
203 	struct machine *machine;
204 	struct machine *default_machine = NULL;
205 
206 	if (pid == HOST_KERNEL_ID)
207 		return &machines->host;
208 
209 	while (*p != NULL) {
210 		parent = *p;
211 		machine = rb_entry(parent, struct machine, rb_node);
212 		if (pid < machine->pid)
213 			p = &(*p)->rb_left;
214 		else if (pid > machine->pid)
215 			p = &(*p)->rb_right;
216 		else
217 			return machine;
218 		if (!machine->pid)
219 			default_machine = machine;
220 	}
221 
222 	return default_machine;
223 }
224 
225 struct machine *machines__findnew(struct machines *machines, pid_t pid)
226 {
227 	char path[PATH_MAX];
228 	const char *root_dir = "";
229 	struct machine *machine = machines__find(machines, pid);
230 
231 	if (machine && (machine->pid == pid))
232 		goto out;
233 
234 	if ((pid != HOST_KERNEL_ID) &&
235 	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
236 	    (symbol_conf.guestmount)) {
237 		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
238 		if (access(path, R_OK)) {
239 			static struct strlist *seen;
240 
241 			if (!seen)
242 				seen = strlist__new(true, NULL);
243 
244 			if (!strlist__has_entry(seen, path)) {
245 				pr_err("Can't access file %s\n", path);
246 				strlist__add(seen, path);
247 			}
248 			machine = NULL;
249 			goto out;
250 		}
251 		root_dir = path;
252 	}
253 
254 	machine = machines__add(machines, pid, root_dir);
255 out:
256 	return machine;
257 }
258 
259 void machines__process_guests(struct machines *machines,
260 			      machine__process_t process, void *data)
261 {
262 	struct rb_node *nd;
263 
264 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
265 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
266 		process(pos, data);
267 	}
268 }
269 
270 char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
271 {
272 	if (machine__is_host(machine))
273 		snprintf(bf, size, "[%s]", "kernel.kallsyms");
274 	else if (machine__is_default_guest(machine))
275 		snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
276 	else {
277 		snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
278 			 machine->pid);
279 	}
280 
281 	return bf;
282 }
283 
284 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
285 {
286 	struct rb_node *node;
287 	struct machine *machine;
288 
289 	machines->host.id_hdr_size = id_hdr_size;
290 
291 	for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
292 		machine = rb_entry(node, struct machine, rb_node);
293 		machine->id_hdr_size = id_hdr_size;
294 	}
295 
296 	return;
297 }
298 
299 static void machine__update_thread_pid(struct machine *machine,
300 				       struct thread *th, pid_t pid)
301 {
302 	struct thread *leader;
303 
304 	if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
305 		return;
306 
307 	th->pid_ = pid;
308 
309 	if (th->pid_ == th->tid)
310 		return;
311 
312 	leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
313 	if (!leader)
314 		goto out_err;
315 
316 	if (!leader->mg)
317 		leader->mg = map_groups__new(machine);
318 
319 	if (!leader->mg)
320 		goto out_err;
321 
322 	if (th->mg == leader->mg)
323 		return;
324 
325 	if (th->mg) {
326 		/*
327 		 * Maps are created from MMAP events which provide the pid and
328 		 * tid.  Consequently there never should be any maps on a thread
329 		 * with an unknown pid.  Just print an error if there are.
330 		 */
331 		if (!map_groups__empty(th->mg))
332 			pr_err("Discarding thread maps for %d:%d\n",
333 			       th->pid_, th->tid);
334 		map_groups__put(th->mg);
335 	}
336 
337 	th->mg = map_groups__get(leader->mg);
338 
339 	return;
340 
341 out_err:
342 	pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
343 }
344 
345 static struct thread *____machine__findnew_thread(struct machine *machine,
346 						  pid_t pid, pid_t tid,
347 						  bool create)
348 {
349 	struct rb_node **p = &machine->threads.rb_node;
350 	struct rb_node *parent = NULL;
351 	struct thread *th;
352 
353 	/*
354 	 * Front-end cache - TID lookups come in blocks,
355 	 * so most of the time we dont have to look up
356 	 * the full rbtree:
357 	 */
358 	th = machine->last_match;
359 	if (th != NULL) {
360 		if (th->tid == tid) {
361 			machine__update_thread_pid(machine, th, pid);
362 			return th;
363 		}
364 
365 		machine->last_match = NULL;
366 	}
367 
368 	while (*p != NULL) {
369 		parent = *p;
370 		th = rb_entry(parent, struct thread, rb_node);
371 
372 		if (th->tid == tid) {
373 			machine->last_match = th;
374 			machine__update_thread_pid(machine, th, pid);
375 			return th;
376 		}
377 
378 		if (tid < th->tid)
379 			p = &(*p)->rb_left;
380 		else
381 			p = &(*p)->rb_right;
382 	}
383 
384 	if (!create)
385 		return NULL;
386 
387 	th = thread__new(pid, tid);
388 	if (th != NULL) {
389 		rb_link_node(&th->rb_node, parent, p);
390 		rb_insert_color(&th->rb_node, &machine->threads);
391 
392 		/*
393 		 * We have to initialize map_groups separately
394 		 * after rb tree is updated.
395 		 *
396 		 * The reason is that we call machine__findnew_thread
397 		 * within thread__init_map_groups to find the thread
398 		 * leader and that would screwed the rb tree.
399 		 */
400 		if (thread__init_map_groups(th, machine)) {
401 			rb_erase_init(&th->rb_node, &machine->threads);
402 			RB_CLEAR_NODE(&th->rb_node);
403 			thread__delete(th);
404 			return NULL;
405 		}
406 		/*
407 		 * It is now in the rbtree, get a ref
408 		 */
409 		thread__get(th);
410 		machine->last_match = th;
411 	}
412 
413 	return th;
414 }
415 
416 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
417 {
418 	return ____machine__findnew_thread(machine, pid, tid, true);
419 }
420 
421 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
422 				       pid_t tid)
423 {
424 	struct thread *th;
425 
426 	pthread_rwlock_wrlock(&machine->threads_lock);
427 	th = thread__get(__machine__findnew_thread(machine, pid, tid));
428 	pthread_rwlock_unlock(&machine->threads_lock);
429 	return th;
430 }
431 
432 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
433 				    pid_t tid)
434 {
435 	struct thread *th;
436 	pthread_rwlock_rdlock(&machine->threads_lock);
437 	th =  thread__get(____machine__findnew_thread(machine, pid, tid, false));
438 	pthread_rwlock_unlock(&machine->threads_lock);
439 	return th;
440 }
441 
442 struct comm *machine__thread_exec_comm(struct machine *machine,
443 				       struct thread *thread)
444 {
445 	if (machine->comm_exec)
446 		return thread__exec_comm(thread);
447 	else
448 		return thread__comm(thread);
449 }
450 
451 int machine__process_comm_event(struct machine *machine, union perf_event *event,
452 				struct perf_sample *sample)
453 {
454 	struct thread *thread = machine__findnew_thread(machine,
455 							event->comm.pid,
456 							event->comm.tid);
457 	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
458 	int err = 0;
459 
460 	if (exec)
461 		machine->comm_exec = true;
462 
463 	if (dump_trace)
464 		perf_event__fprintf_comm(event, stdout);
465 
466 	if (thread == NULL ||
467 	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
468 		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
469 		err = -1;
470 	}
471 
472 	thread__put(thread);
473 
474 	return err;
475 }
476 
477 int machine__process_lost_event(struct machine *machine __maybe_unused,
478 				union perf_event *event, struct perf_sample *sample __maybe_unused)
479 {
480 	dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
481 		    event->lost.id, event->lost.lost);
482 	return 0;
483 }
484 
485 static struct dso*
486 machine__module_dso(struct machine *machine, struct kmod_path *m,
487 		    const char *filename)
488 {
489 	struct dso *dso;
490 
491 	dso = dsos__find(&machine->dsos, m->name, true);
492 	if (!dso) {
493 		dso = dsos__addnew(&machine->dsos, m->name);
494 		if (dso == NULL)
495 			return NULL;
496 
497 		if (machine__is_host(machine))
498 			dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
499 		else
500 			dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
501 
502 		/* _KMODULE_COMP should be next to _KMODULE */
503 		if (m->kmod && m->comp)
504 			dso->symtab_type++;
505 
506 		dso__set_short_name(dso, strdup(m->name), true);
507 		dso__set_long_name(dso, strdup(filename), true);
508 	}
509 
510 	return dso;
511 }
512 
513 int machine__process_aux_event(struct machine *machine __maybe_unused,
514 			       union perf_event *event)
515 {
516 	if (dump_trace)
517 		perf_event__fprintf_aux(event, stdout);
518 	return 0;
519 }
520 
521 int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
522 					union perf_event *event)
523 {
524 	if (dump_trace)
525 		perf_event__fprintf_itrace_start(event, stdout);
526 	return 0;
527 }
528 
529 struct map *machine__new_module(struct machine *machine, u64 start,
530 				const char *filename)
531 {
532 	struct map *map = NULL;
533 	struct dso *dso;
534 	struct kmod_path m;
535 
536 	if (kmod_path__parse_name(&m, filename))
537 		return NULL;
538 
539 	map = map_groups__find_by_name(&machine->kmaps, MAP__FUNCTION,
540 				       m.name);
541 	if (map)
542 		goto out;
543 
544 	dso = machine__module_dso(machine, &m, filename);
545 	if (dso == NULL)
546 		goto out;
547 
548 	map = map__new2(start, dso, MAP__FUNCTION);
549 	if (map == NULL)
550 		goto out;
551 
552 	map_groups__insert(&machine->kmaps, map);
553 
554 out:
555 	free(m.name);
556 	return map;
557 }
558 
559 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
560 {
561 	struct rb_node *nd;
562 	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
563 
564 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
565 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
566 		ret += __dsos__fprintf(&pos->dsos.head, fp);
567 	}
568 
569 	return ret;
570 }
571 
572 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
573 				     bool (skip)(struct dso *dso, int parm), int parm)
574 {
575 	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
576 }
577 
578 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
579 				     bool (skip)(struct dso *dso, int parm), int parm)
580 {
581 	struct rb_node *nd;
582 	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
583 
584 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
585 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
586 		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
587 	}
588 	return ret;
589 }
590 
591 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
592 {
593 	int i;
594 	size_t printed = 0;
595 	struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
596 
597 	if (kdso->has_build_id) {
598 		char filename[PATH_MAX];
599 		if (dso__build_id_filename(kdso, filename, sizeof(filename)))
600 			printed += fprintf(fp, "[0] %s\n", filename);
601 	}
602 
603 	for (i = 0; i < vmlinux_path__nr_entries; ++i)
604 		printed += fprintf(fp, "[%d] %s\n",
605 				   i + kdso->has_build_id, vmlinux_path[i]);
606 
607 	return printed;
608 }
609 
610 size_t machine__fprintf(struct machine *machine, FILE *fp)
611 {
612 	size_t ret = 0;
613 	struct rb_node *nd;
614 
615 	pthread_rwlock_rdlock(&machine->threads_lock);
616 
617 	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
618 		struct thread *pos = rb_entry(nd, struct thread, rb_node);
619 
620 		ret += thread__fprintf(pos, fp);
621 	}
622 
623 	pthread_rwlock_unlock(&machine->threads_lock);
624 
625 	return ret;
626 }
627 
628 static struct dso *machine__get_kernel(struct machine *machine)
629 {
630 	const char *vmlinux_name = NULL;
631 	struct dso *kernel;
632 
633 	if (machine__is_host(machine)) {
634 		vmlinux_name = symbol_conf.vmlinux_name;
635 		if (!vmlinux_name)
636 			vmlinux_name = "[kernel.kallsyms]";
637 
638 		kernel = machine__findnew_kernel(machine, vmlinux_name,
639 						 "[kernel]", DSO_TYPE_KERNEL);
640 	} else {
641 		char bf[PATH_MAX];
642 
643 		if (machine__is_default_guest(machine))
644 			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
645 		if (!vmlinux_name)
646 			vmlinux_name = machine__mmap_name(machine, bf,
647 							  sizeof(bf));
648 
649 		kernel = machine__findnew_kernel(machine, vmlinux_name,
650 						 "[guest.kernel]",
651 						 DSO_TYPE_GUEST_KERNEL);
652 	}
653 
654 	if (kernel != NULL && (!kernel->has_build_id))
655 		dso__read_running_kernel_build_id(kernel, machine);
656 
657 	return kernel;
658 }
659 
660 struct process_args {
661 	u64 start;
662 };
663 
664 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
665 					   size_t bufsz)
666 {
667 	if (machine__is_default_guest(machine))
668 		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
669 	else
670 		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
671 }
672 
673 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
674 
675 /* Figure out the start address of kernel map from /proc/kallsyms.
676  * Returns the name of the start symbol in *symbol_name. Pass in NULL as
677  * symbol_name if it's not that important.
678  */
679 static u64 machine__get_running_kernel_start(struct machine *machine,
680 					     const char **symbol_name)
681 {
682 	char filename[PATH_MAX];
683 	int i;
684 	const char *name;
685 	u64 addr = 0;
686 
687 	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
688 
689 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
690 		return 0;
691 
692 	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
693 		addr = kallsyms__get_function_start(filename, name);
694 		if (addr)
695 			break;
696 	}
697 
698 	if (symbol_name)
699 		*symbol_name = name;
700 
701 	return addr;
702 }
703 
704 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
705 {
706 	enum map_type type;
707 	u64 start = machine__get_running_kernel_start(machine, NULL);
708 
709 	for (type = 0; type < MAP__NR_TYPES; ++type) {
710 		struct kmap *kmap;
711 
712 		machine->vmlinux_maps[type] = map__new2(start, kernel, type);
713 		if (machine->vmlinux_maps[type] == NULL)
714 			return -1;
715 
716 		machine->vmlinux_maps[type]->map_ip =
717 			machine->vmlinux_maps[type]->unmap_ip =
718 				identity__map_ip;
719 		kmap = map__kmap(machine->vmlinux_maps[type]);
720 		if (!kmap)
721 			return -1;
722 
723 		kmap->kmaps = &machine->kmaps;
724 		map_groups__insert(&machine->kmaps,
725 				   machine->vmlinux_maps[type]);
726 	}
727 
728 	return 0;
729 }
730 
731 void machine__destroy_kernel_maps(struct machine *machine)
732 {
733 	enum map_type type;
734 
735 	for (type = 0; type < MAP__NR_TYPES; ++type) {
736 		struct kmap *kmap;
737 
738 		if (machine->vmlinux_maps[type] == NULL)
739 			continue;
740 
741 		kmap = map__kmap(machine->vmlinux_maps[type]);
742 		map_groups__remove(&machine->kmaps,
743 				   machine->vmlinux_maps[type]);
744 		if (kmap && kmap->ref_reloc_sym) {
745 			/*
746 			 * ref_reloc_sym is shared among all maps, so free just
747 			 * on one of them.
748 			 */
749 			if (type == MAP__FUNCTION) {
750 				zfree((char **)&kmap->ref_reloc_sym->name);
751 				zfree(&kmap->ref_reloc_sym);
752 			} else
753 				kmap->ref_reloc_sym = NULL;
754 		}
755 
756 		machine->vmlinux_maps[type] = NULL;
757 	}
758 }
759 
760 int machines__create_guest_kernel_maps(struct machines *machines)
761 {
762 	int ret = 0;
763 	struct dirent **namelist = NULL;
764 	int i, items = 0;
765 	char path[PATH_MAX];
766 	pid_t pid;
767 	char *endp;
768 
769 	if (symbol_conf.default_guest_vmlinux_name ||
770 	    symbol_conf.default_guest_modules ||
771 	    symbol_conf.default_guest_kallsyms) {
772 		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
773 	}
774 
775 	if (symbol_conf.guestmount) {
776 		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
777 		if (items <= 0)
778 			return -ENOENT;
779 		for (i = 0; i < items; i++) {
780 			if (!isdigit(namelist[i]->d_name[0])) {
781 				/* Filter out . and .. */
782 				continue;
783 			}
784 			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
785 			if ((*endp != '\0') ||
786 			    (endp == namelist[i]->d_name) ||
787 			    (errno == ERANGE)) {
788 				pr_debug("invalid directory (%s). Skipping.\n",
789 					 namelist[i]->d_name);
790 				continue;
791 			}
792 			sprintf(path, "%s/%s/proc/kallsyms",
793 				symbol_conf.guestmount,
794 				namelist[i]->d_name);
795 			ret = access(path, R_OK);
796 			if (ret) {
797 				pr_debug("Can't access file %s\n", path);
798 				goto failure;
799 			}
800 			machines__create_kernel_maps(machines, pid);
801 		}
802 failure:
803 		free(namelist);
804 	}
805 
806 	return ret;
807 }
808 
809 void machines__destroy_kernel_maps(struct machines *machines)
810 {
811 	struct rb_node *next = rb_first(&machines->guests);
812 
813 	machine__destroy_kernel_maps(&machines->host);
814 
815 	while (next) {
816 		struct machine *pos = rb_entry(next, struct machine, rb_node);
817 
818 		next = rb_next(&pos->rb_node);
819 		rb_erase(&pos->rb_node, &machines->guests);
820 		machine__delete(pos);
821 	}
822 }
823 
824 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
825 {
826 	struct machine *machine = machines__findnew(machines, pid);
827 
828 	if (machine == NULL)
829 		return -1;
830 
831 	return machine__create_kernel_maps(machine);
832 }
833 
834 int machine__load_kallsyms(struct machine *machine, const char *filename,
835 			   enum map_type type, symbol_filter_t filter)
836 {
837 	struct map *map = machine->vmlinux_maps[type];
838 	int ret = dso__load_kallsyms(map->dso, filename, map, filter);
839 
840 	if (ret > 0) {
841 		dso__set_loaded(map->dso, type);
842 		/*
843 		 * Since /proc/kallsyms will have multiple sessions for the
844 		 * kernel, with modules between them, fixup the end of all
845 		 * sections.
846 		 */
847 		__map_groups__fixup_end(&machine->kmaps, type);
848 	}
849 
850 	return ret;
851 }
852 
853 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
854 			       symbol_filter_t filter)
855 {
856 	struct map *map = machine->vmlinux_maps[type];
857 	int ret = dso__load_vmlinux_path(map->dso, map, filter);
858 
859 	if (ret > 0)
860 		dso__set_loaded(map->dso, type);
861 
862 	return ret;
863 }
864 
865 static void map_groups__fixup_end(struct map_groups *mg)
866 {
867 	int i;
868 	for (i = 0; i < MAP__NR_TYPES; ++i)
869 		__map_groups__fixup_end(mg, i);
870 }
871 
872 static char *get_kernel_version(const char *root_dir)
873 {
874 	char version[PATH_MAX];
875 	FILE *file;
876 	char *name, *tmp;
877 	const char *prefix = "Linux version ";
878 
879 	sprintf(version, "%s/proc/version", root_dir);
880 	file = fopen(version, "r");
881 	if (!file)
882 		return NULL;
883 
884 	version[0] = '\0';
885 	tmp = fgets(version, sizeof(version), file);
886 	fclose(file);
887 
888 	name = strstr(version, prefix);
889 	if (!name)
890 		return NULL;
891 	name += strlen(prefix);
892 	tmp = strchr(name, ' ');
893 	if (tmp)
894 		*tmp = '\0';
895 
896 	return strdup(name);
897 }
898 
899 static bool is_kmod_dso(struct dso *dso)
900 {
901 	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
902 	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
903 }
904 
905 static int map_groups__set_module_path(struct map_groups *mg, const char *path,
906 				       struct kmod_path *m)
907 {
908 	struct map *map;
909 	char *long_name;
910 
911 	map = map_groups__find_by_name(mg, MAP__FUNCTION, m->name);
912 	if (map == NULL)
913 		return 0;
914 
915 	long_name = strdup(path);
916 	if (long_name == NULL)
917 		return -ENOMEM;
918 
919 	dso__set_long_name(map->dso, long_name, true);
920 	dso__kernel_module_get_build_id(map->dso, "");
921 
922 	/*
923 	 * Full name could reveal us kmod compression, so
924 	 * we need to update the symtab_type if needed.
925 	 */
926 	if (m->comp && is_kmod_dso(map->dso))
927 		map->dso->symtab_type++;
928 
929 	return 0;
930 }
931 
932 static int map_groups__set_modules_path_dir(struct map_groups *mg,
933 				const char *dir_name, int depth)
934 {
935 	struct dirent *dent;
936 	DIR *dir = opendir(dir_name);
937 	int ret = 0;
938 
939 	if (!dir) {
940 		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
941 		return -1;
942 	}
943 
944 	while ((dent = readdir(dir)) != NULL) {
945 		char path[PATH_MAX];
946 		struct stat st;
947 
948 		/*sshfs might return bad dent->d_type, so we have to stat*/
949 		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
950 		if (stat(path, &st))
951 			continue;
952 
953 		if (S_ISDIR(st.st_mode)) {
954 			if (!strcmp(dent->d_name, ".") ||
955 			    !strcmp(dent->d_name, ".."))
956 				continue;
957 
958 			/* Do not follow top-level source and build symlinks */
959 			if (depth == 0) {
960 				if (!strcmp(dent->d_name, "source") ||
961 				    !strcmp(dent->d_name, "build"))
962 					continue;
963 			}
964 
965 			ret = map_groups__set_modules_path_dir(mg, path,
966 							       depth + 1);
967 			if (ret < 0)
968 				goto out;
969 		} else {
970 			struct kmod_path m;
971 
972 			ret = kmod_path__parse_name(&m, dent->d_name);
973 			if (ret)
974 				goto out;
975 
976 			if (m.kmod)
977 				ret = map_groups__set_module_path(mg, path, &m);
978 
979 			free(m.name);
980 
981 			if (ret)
982 				goto out;
983 		}
984 	}
985 
986 out:
987 	closedir(dir);
988 	return ret;
989 }
990 
991 static int machine__set_modules_path(struct machine *machine)
992 {
993 	char *version;
994 	char modules_path[PATH_MAX];
995 
996 	version = get_kernel_version(machine->root_dir);
997 	if (!version)
998 		return -1;
999 
1000 	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1001 		 machine->root_dir, version);
1002 	free(version);
1003 
1004 	return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1005 }
1006 
1007 static int machine__create_module(void *arg, const char *name, u64 start)
1008 {
1009 	struct machine *machine = arg;
1010 	struct map *map;
1011 
1012 	map = machine__new_module(machine, start, name);
1013 	if (map == NULL)
1014 		return -1;
1015 
1016 	dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1017 
1018 	return 0;
1019 }
1020 
1021 static int machine__create_modules(struct machine *machine)
1022 {
1023 	const char *modules;
1024 	char path[PATH_MAX];
1025 
1026 	if (machine__is_default_guest(machine)) {
1027 		modules = symbol_conf.default_guest_modules;
1028 	} else {
1029 		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1030 		modules = path;
1031 	}
1032 
1033 	if (symbol__restricted_filename(modules, "/proc/modules"))
1034 		return -1;
1035 
1036 	if (modules__parse(modules, machine, machine__create_module))
1037 		return -1;
1038 
1039 	if (!machine__set_modules_path(machine))
1040 		return 0;
1041 
1042 	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1043 
1044 	return 0;
1045 }
1046 
1047 int machine__create_kernel_maps(struct machine *machine)
1048 {
1049 	struct dso *kernel = machine__get_kernel(machine);
1050 	const char *name;
1051 	u64 addr = machine__get_running_kernel_start(machine, &name);
1052 	if (!addr)
1053 		return -1;
1054 
1055 	if (kernel == NULL ||
1056 	    __machine__create_kernel_maps(machine, kernel) < 0)
1057 		return -1;
1058 
1059 	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1060 		if (machine__is_host(machine))
1061 			pr_debug("Problems creating module maps, "
1062 				 "continuing anyway...\n");
1063 		else
1064 			pr_debug("Problems creating module maps for guest %d, "
1065 				 "continuing anyway...\n", machine->pid);
1066 	}
1067 
1068 	/*
1069 	 * Now that we have all the maps created, just set the ->end of them:
1070 	 */
1071 	map_groups__fixup_end(&machine->kmaps);
1072 
1073 	if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
1074 					     addr)) {
1075 		machine__destroy_kernel_maps(machine);
1076 		return -1;
1077 	}
1078 
1079 	return 0;
1080 }
1081 
1082 static void machine__set_kernel_mmap_len(struct machine *machine,
1083 					 union perf_event *event)
1084 {
1085 	int i;
1086 
1087 	for (i = 0; i < MAP__NR_TYPES; i++) {
1088 		machine->vmlinux_maps[i]->start = event->mmap.start;
1089 		machine->vmlinux_maps[i]->end   = (event->mmap.start +
1090 						   event->mmap.len);
1091 		/*
1092 		 * Be a bit paranoid here, some perf.data file came with
1093 		 * a zero sized synthesized MMAP event for the kernel.
1094 		 */
1095 		if (machine->vmlinux_maps[i]->end == 0)
1096 			machine->vmlinux_maps[i]->end = ~0ULL;
1097 	}
1098 }
1099 
1100 static bool machine__uses_kcore(struct machine *machine)
1101 {
1102 	struct dso *dso;
1103 
1104 	list_for_each_entry(dso, &machine->dsos.head, node) {
1105 		if (dso__is_kcore(dso))
1106 			return true;
1107 	}
1108 
1109 	return false;
1110 }
1111 
1112 static int machine__process_kernel_mmap_event(struct machine *machine,
1113 					      union perf_event *event)
1114 {
1115 	struct map *map;
1116 	char kmmap_prefix[PATH_MAX];
1117 	enum dso_kernel_type kernel_type;
1118 	bool is_kernel_mmap;
1119 
1120 	/* If we have maps from kcore then we do not need or want any others */
1121 	if (machine__uses_kcore(machine))
1122 		return 0;
1123 
1124 	machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
1125 	if (machine__is_host(machine))
1126 		kernel_type = DSO_TYPE_KERNEL;
1127 	else
1128 		kernel_type = DSO_TYPE_GUEST_KERNEL;
1129 
1130 	is_kernel_mmap = memcmp(event->mmap.filename,
1131 				kmmap_prefix,
1132 				strlen(kmmap_prefix) - 1) == 0;
1133 	if (event->mmap.filename[0] == '/' ||
1134 	    (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1135 		map = machine__new_module(machine, event->mmap.start,
1136 					  event->mmap.filename);
1137 		if (map == NULL)
1138 			goto out_problem;
1139 
1140 		map->end = map->start + event->mmap.len;
1141 	} else if (is_kernel_mmap) {
1142 		const char *symbol_name = (event->mmap.filename +
1143 				strlen(kmmap_prefix));
1144 		/*
1145 		 * Should be there already, from the build-id table in
1146 		 * the header.
1147 		 */
1148 		struct dso *kernel = NULL;
1149 		struct dso *dso;
1150 
1151 		list_for_each_entry(dso, &machine->dsos.head, node) {
1152 			if (!dso->kernel || is_kernel_module(dso->long_name))
1153 				continue;
1154 
1155 			kernel = dso;
1156 			break;
1157 		}
1158 
1159 		if (kernel == NULL)
1160 			kernel = machine__findnew_dso(machine, kmmap_prefix);
1161 		if (kernel == NULL)
1162 			goto out_problem;
1163 
1164 		kernel->kernel = kernel_type;
1165 		if (__machine__create_kernel_maps(machine, kernel) < 0)
1166 			goto out_problem;
1167 
1168 		if (strstr(kernel->long_name, "vmlinux"))
1169 			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1170 
1171 		machine__set_kernel_mmap_len(machine, event);
1172 
1173 		/*
1174 		 * Avoid using a zero address (kptr_restrict) for the ref reloc
1175 		 * symbol. Effectively having zero here means that at record
1176 		 * time /proc/sys/kernel/kptr_restrict was non zero.
1177 		 */
1178 		if (event->mmap.pgoff != 0) {
1179 			maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
1180 							 symbol_name,
1181 							 event->mmap.pgoff);
1182 		}
1183 
1184 		if (machine__is_default_guest(machine)) {
1185 			/*
1186 			 * preload dso of guest kernel and modules
1187 			 */
1188 			dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1189 				  NULL);
1190 		}
1191 	}
1192 	return 0;
1193 out_problem:
1194 	return -1;
1195 }
1196 
1197 int machine__process_mmap2_event(struct machine *machine,
1198 				 union perf_event *event,
1199 				 struct perf_sample *sample __maybe_unused)
1200 {
1201 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1202 	struct thread *thread;
1203 	struct map *map;
1204 	enum map_type type;
1205 	int ret = 0;
1206 
1207 	if (dump_trace)
1208 		perf_event__fprintf_mmap2(event, stdout);
1209 
1210 	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1211 	    cpumode == PERF_RECORD_MISC_KERNEL) {
1212 		ret = machine__process_kernel_mmap_event(machine, event);
1213 		if (ret < 0)
1214 			goto out_problem;
1215 		return 0;
1216 	}
1217 
1218 	thread = machine__findnew_thread(machine, event->mmap2.pid,
1219 					event->mmap2.tid);
1220 	if (thread == NULL)
1221 		goto out_problem;
1222 
1223 	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1224 		type = MAP__VARIABLE;
1225 	else
1226 		type = MAP__FUNCTION;
1227 
1228 	map = map__new(machine, event->mmap2.start,
1229 			event->mmap2.len, event->mmap2.pgoff,
1230 			event->mmap2.pid, event->mmap2.maj,
1231 			event->mmap2.min, event->mmap2.ino,
1232 			event->mmap2.ino_generation,
1233 			event->mmap2.prot,
1234 			event->mmap2.flags,
1235 			event->mmap2.filename, type, thread);
1236 
1237 	if (map == NULL)
1238 		goto out_problem_map;
1239 
1240 	thread__insert_map(thread, map);
1241 	thread__put(thread);
1242 	map__put(map);
1243 	return 0;
1244 
1245 out_problem_map:
1246 	thread__put(thread);
1247 out_problem:
1248 	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1249 	return 0;
1250 }
1251 
1252 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1253 				struct perf_sample *sample __maybe_unused)
1254 {
1255 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1256 	struct thread *thread;
1257 	struct map *map;
1258 	enum map_type type;
1259 	int ret = 0;
1260 
1261 	if (dump_trace)
1262 		perf_event__fprintf_mmap(event, stdout);
1263 
1264 	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1265 	    cpumode == PERF_RECORD_MISC_KERNEL) {
1266 		ret = machine__process_kernel_mmap_event(machine, event);
1267 		if (ret < 0)
1268 			goto out_problem;
1269 		return 0;
1270 	}
1271 
1272 	thread = machine__findnew_thread(machine, event->mmap.pid,
1273 					 event->mmap.tid);
1274 	if (thread == NULL)
1275 		goto out_problem;
1276 
1277 	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1278 		type = MAP__VARIABLE;
1279 	else
1280 		type = MAP__FUNCTION;
1281 
1282 	map = map__new(machine, event->mmap.start,
1283 			event->mmap.len, event->mmap.pgoff,
1284 			event->mmap.pid, 0, 0, 0, 0, 0, 0,
1285 			event->mmap.filename,
1286 			type, thread);
1287 
1288 	if (map == NULL)
1289 		goto out_problem_map;
1290 
1291 	thread__insert_map(thread, map);
1292 	thread__put(thread);
1293 	map__put(map);
1294 	return 0;
1295 
1296 out_problem_map:
1297 	thread__put(thread);
1298 out_problem:
1299 	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1300 	return 0;
1301 }
1302 
1303 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1304 {
1305 	if (machine->last_match == th)
1306 		machine->last_match = NULL;
1307 
1308 	BUG_ON(atomic_read(&th->refcnt) == 0);
1309 	if (lock)
1310 		pthread_rwlock_wrlock(&machine->threads_lock);
1311 	rb_erase_init(&th->rb_node, &machine->threads);
1312 	RB_CLEAR_NODE(&th->rb_node);
1313 	/*
1314 	 * Move it first to the dead_threads list, then drop the reference,
1315 	 * if this is the last reference, then the thread__delete destructor
1316 	 * will be called and we will remove it from the dead_threads list.
1317 	 */
1318 	list_add_tail(&th->node, &machine->dead_threads);
1319 	if (lock)
1320 		pthread_rwlock_unlock(&machine->threads_lock);
1321 	thread__put(th);
1322 }
1323 
1324 void machine__remove_thread(struct machine *machine, struct thread *th)
1325 {
1326 	return __machine__remove_thread(machine, th, true);
1327 }
1328 
1329 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1330 				struct perf_sample *sample)
1331 {
1332 	struct thread *thread = machine__find_thread(machine,
1333 						     event->fork.pid,
1334 						     event->fork.tid);
1335 	struct thread *parent = machine__findnew_thread(machine,
1336 							event->fork.ppid,
1337 							event->fork.ptid);
1338 	int err = 0;
1339 
1340 	/* if a thread currently exists for the thread id remove it */
1341 	if (thread != NULL) {
1342 		machine__remove_thread(machine, thread);
1343 		thread__put(thread);
1344 	}
1345 
1346 	thread = machine__findnew_thread(machine, event->fork.pid,
1347 					 event->fork.tid);
1348 	if (dump_trace)
1349 		perf_event__fprintf_task(event, stdout);
1350 
1351 	if (thread == NULL || parent == NULL ||
1352 	    thread__fork(thread, parent, sample->time) < 0) {
1353 		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1354 		err = -1;
1355 	}
1356 	thread__put(thread);
1357 	thread__put(parent);
1358 
1359 	return err;
1360 }
1361 
1362 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1363 				struct perf_sample *sample __maybe_unused)
1364 {
1365 	struct thread *thread = machine__find_thread(machine,
1366 						     event->fork.pid,
1367 						     event->fork.tid);
1368 
1369 	if (dump_trace)
1370 		perf_event__fprintf_task(event, stdout);
1371 
1372 	if (thread != NULL) {
1373 		thread__exited(thread);
1374 		thread__put(thread);
1375 	}
1376 
1377 	return 0;
1378 }
1379 
1380 int machine__process_event(struct machine *machine, union perf_event *event,
1381 			   struct perf_sample *sample)
1382 {
1383 	int ret;
1384 
1385 	switch (event->header.type) {
1386 	case PERF_RECORD_COMM:
1387 		ret = machine__process_comm_event(machine, event, sample); break;
1388 	case PERF_RECORD_MMAP:
1389 		ret = machine__process_mmap_event(machine, event, sample); break;
1390 	case PERF_RECORD_MMAP2:
1391 		ret = machine__process_mmap2_event(machine, event, sample); break;
1392 	case PERF_RECORD_FORK:
1393 		ret = machine__process_fork_event(machine, event, sample); break;
1394 	case PERF_RECORD_EXIT:
1395 		ret = machine__process_exit_event(machine, event, sample); break;
1396 	case PERF_RECORD_LOST:
1397 		ret = machine__process_lost_event(machine, event, sample); break;
1398 	case PERF_RECORD_AUX:
1399 		ret = machine__process_aux_event(machine, event); break;
1400 	case PERF_RECORD_ITRACE_START:
1401 		ret = machine__process_itrace_start_event(machine, event);
1402 		break;
1403 	default:
1404 		ret = -1;
1405 		break;
1406 	}
1407 
1408 	return ret;
1409 }
1410 
1411 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1412 {
1413 	if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
1414 		return 1;
1415 	return 0;
1416 }
1417 
1418 static void ip__resolve_ams(struct thread *thread,
1419 			    struct addr_map_symbol *ams,
1420 			    u64 ip)
1421 {
1422 	struct addr_location al;
1423 
1424 	memset(&al, 0, sizeof(al));
1425 	/*
1426 	 * We cannot use the header.misc hint to determine whether a
1427 	 * branch stack address is user, kernel, guest, hypervisor.
1428 	 * Branches may straddle the kernel/user/hypervisor boundaries.
1429 	 * Thus, we have to try consecutively until we find a match
1430 	 * or else, the symbol is unknown
1431 	 */
1432 	thread__find_cpumode_addr_location(thread, MAP__FUNCTION, ip, &al);
1433 
1434 	ams->addr = ip;
1435 	ams->al_addr = al.addr;
1436 	ams->sym = al.sym;
1437 	ams->map = al.map;
1438 }
1439 
1440 static void ip__resolve_data(struct thread *thread,
1441 			     u8 m, struct addr_map_symbol *ams, u64 addr)
1442 {
1443 	struct addr_location al;
1444 
1445 	memset(&al, 0, sizeof(al));
1446 
1447 	thread__find_addr_location(thread, m, MAP__VARIABLE, addr, &al);
1448 	if (al.map == NULL) {
1449 		/*
1450 		 * some shared data regions have execute bit set which puts
1451 		 * their mapping in the MAP__FUNCTION type array.
1452 		 * Check there as a fallback option before dropping the sample.
1453 		 */
1454 		thread__find_addr_location(thread, m, MAP__FUNCTION, addr, &al);
1455 	}
1456 
1457 	ams->addr = addr;
1458 	ams->al_addr = al.addr;
1459 	ams->sym = al.sym;
1460 	ams->map = al.map;
1461 }
1462 
1463 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1464 				     struct addr_location *al)
1465 {
1466 	struct mem_info *mi = zalloc(sizeof(*mi));
1467 
1468 	if (!mi)
1469 		return NULL;
1470 
1471 	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1472 	ip__resolve_data(al->thread, al->cpumode, &mi->daddr, sample->addr);
1473 	mi->data_src.val = sample->data_src;
1474 
1475 	return mi;
1476 }
1477 
1478 static int add_callchain_ip(struct thread *thread,
1479 			    struct symbol **parent,
1480 			    struct addr_location *root_al,
1481 			    u8 *cpumode,
1482 			    u64 ip)
1483 {
1484 	struct addr_location al;
1485 
1486 	al.filtered = 0;
1487 	al.sym = NULL;
1488 	if (!cpumode) {
1489 		thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
1490 						   ip, &al);
1491 	} else {
1492 		if (ip >= PERF_CONTEXT_MAX) {
1493 			switch (ip) {
1494 			case PERF_CONTEXT_HV:
1495 				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
1496 				break;
1497 			case PERF_CONTEXT_KERNEL:
1498 				*cpumode = PERF_RECORD_MISC_KERNEL;
1499 				break;
1500 			case PERF_CONTEXT_USER:
1501 				*cpumode = PERF_RECORD_MISC_USER;
1502 				break;
1503 			default:
1504 				pr_debug("invalid callchain context: "
1505 					 "%"PRId64"\n", (s64) ip);
1506 				/*
1507 				 * It seems the callchain is corrupted.
1508 				 * Discard all.
1509 				 */
1510 				callchain_cursor_reset(&callchain_cursor);
1511 				return 1;
1512 			}
1513 			return 0;
1514 		}
1515 		thread__find_addr_location(thread, *cpumode, MAP__FUNCTION,
1516 					   ip, &al);
1517 	}
1518 
1519 	if (al.sym != NULL) {
1520 		if (sort__has_parent && !*parent &&
1521 		    symbol__match_regex(al.sym, &parent_regex))
1522 			*parent = al.sym;
1523 		else if (have_ignore_callees && root_al &&
1524 		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
1525 			/* Treat this symbol as the root,
1526 			   forgetting its callees. */
1527 			*root_al = al;
1528 			callchain_cursor_reset(&callchain_cursor);
1529 		}
1530 	}
1531 
1532 	return callchain_cursor_append(&callchain_cursor, al.addr, al.map, al.sym);
1533 }
1534 
1535 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1536 					   struct addr_location *al)
1537 {
1538 	unsigned int i;
1539 	const struct branch_stack *bs = sample->branch_stack;
1540 	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1541 
1542 	if (!bi)
1543 		return NULL;
1544 
1545 	for (i = 0; i < bs->nr; i++) {
1546 		ip__resolve_ams(al->thread, &bi[i].to, bs->entries[i].to);
1547 		ip__resolve_ams(al->thread, &bi[i].from, bs->entries[i].from);
1548 		bi[i].flags = bs->entries[i].flags;
1549 	}
1550 	return bi;
1551 }
1552 
1553 #define CHASHSZ 127
1554 #define CHASHBITS 7
1555 #define NO_ENTRY 0xff
1556 
1557 #define PERF_MAX_BRANCH_DEPTH 127
1558 
1559 /* Remove loops. */
1560 static int remove_loops(struct branch_entry *l, int nr)
1561 {
1562 	int i, j, off;
1563 	unsigned char chash[CHASHSZ];
1564 
1565 	memset(chash, NO_ENTRY, sizeof(chash));
1566 
1567 	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
1568 
1569 	for (i = 0; i < nr; i++) {
1570 		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
1571 
1572 		/* no collision handling for now */
1573 		if (chash[h] == NO_ENTRY) {
1574 			chash[h] = i;
1575 		} else if (l[chash[h]].from == l[i].from) {
1576 			bool is_loop = true;
1577 			/* check if it is a real loop */
1578 			off = 0;
1579 			for (j = chash[h]; j < i && i + off < nr; j++, off++)
1580 				if (l[j].from != l[i + off].from) {
1581 					is_loop = false;
1582 					break;
1583 				}
1584 			if (is_loop) {
1585 				memmove(l + i, l + i + off,
1586 					(nr - (i + off)) * sizeof(*l));
1587 				nr -= off;
1588 			}
1589 		}
1590 	}
1591 	return nr;
1592 }
1593 
1594 /*
1595  * Recolve LBR callstack chain sample
1596  * Return:
1597  * 1 on success get LBR callchain information
1598  * 0 no available LBR callchain information, should try fp
1599  * negative error code on other errors.
1600  */
1601 static int resolve_lbr_callchain_sample(struct thread *thread,
1602 					struct perf_sample *sample,
1603 					struct symbol **parent,
1604 					struct addr_location *root_al,
1605 					int max_stack)
1606 {
1607 	struct ip_callchain *chain = sample->callchain;
1608 	int chain_nr = min(max_stack, (int)chain->nr);
1609 	u8 cpumode = PERF_RECORD_MISC_USER;
1610 	int i, j, err;
1611 	u64 ip;
1612 
1613 	for (i = 0; i < chain_nr; i++) {
1614 		if (chain->ips[i] == PERF_CONTEXT_USER)
1615 			break;
1616 	}
1617 
1618 	/* LBR only affects the user callchain */
1619 	if (i != chain_nr) {
1620 		struct branch_stack *lbr_stack = sample->branch_stack;
1621 		int lbr_nr = lbr_stack->nr;
1622 		/*
1623 		 * LBR callstack can only get user call chain.
1624 		 * The mix_chain_nr is kernel call chain
1625 		 * number plus LBR user call chain number.
1626 		 * i is kernel call chain number,
1627 		 * 1 is PERF_CONTEXT_USER,
1628 		 * lbr_nr + 1 is the user call chain number.
1629 		 * For details, please refer to the comments
1630 		 * in callchain__printf
1631 		 */
1632 		int mix_chain_nr = i + 1 + lbr_nr + 1;
1633 
1634 		if (mix_chain_nr > PERF_MAX_STACK_DEPTH + PERF_MAX_BRANCH_DEPTH) {
1635 			pr_warning("corrupted callchain. skipping...\n");
1636 			return 0;
1637 		}
1638 
1639 		for (j = 0; j < mix_chain_nr; j++) {
1640 			if (callchain_param.order == ORDER_CALLEE) {
1641 				if (j < i + 1)
1642 					ip = chain->ips[j];
1643 				else if (j > i + 1)
1644 					ip = lbr_stack->entries[j - i - 2].from;
1645 				else
1646 					ip = lbr_stack->entries[0].to;
1647 			} else {
1648 				if (j < lbr_nr)
1649 					ip = lbr_stack->entries[lbr_nr - j - 1].from;
1650 				else if (j > lbr_nr)
1651 					ip = chain->ips[i + 1 - (j - lbr_nr)];
1652 				else
1653 					ip = lbr_stack->entries[0].to;
1654 			}
1655 
1656 			err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
1657 			if (err)
1658 				return (err < 0) ? err : 0;
1659 		}
1660 		return 1;
1661 	}
1662 
1663 	return 0;
1664 }
1665 
1666 static int thread__resolve_callchain_sample(struct thread *thread,
1667 					    struct perf_evsel *evsel,
1668 					    struct perf_sample *sample,
1669 					    struct symbol **parent,
1670 					    struct addr_location *root_al,
1671 					    int max_stack)
1672 {
1673 	struct branch_stack *branch = sample->branch_stack;
1674 	struct ip_callchain *chain = sample->callchain;
1675 	int chain_nr = min(max_stack, (int)chain->nr);
1676 	u8 cpumode = PERF_RECORD_MISC_USER;
1677 	int i, j, err;
1678 	int skip_idx = -1;
1679 	int first_call = 0;
1680 
1681 	callchain_cursor_reset(&callchain_cursor);
1682 
1683 	if (has_branch_callstack(evsel)) {
1684 		err = resolve_lbr_callchain_sample(thread, sample, parent,
1685 						   root_al, max_stack);
1686 		if (err)
1687 			return (err < 0) ? err : 0;
1688 	}
1689 
1690 	/*
1691 	 * Based on DWARF debug information, some architectures skip
1692 	 * a callchain entry saved by the kernel.
1693 	 */
1694 	if (chain->nr < PERF_MAX_STACK_DEPTH)
1695 		skip_idx = arch_skip_callchain_idx(thread, chain);
1696 
1697 	/*
1698 	 * Add branches to call stack for easier browsing. This gives
1699 	 * more context for a sample than just the callers.
1700 	 *
1701 	 * This uses individual histograms of paths compared to the
1702 	 * aggregated histograms the normal LBR mode uses.
1703 	 *
1704 	 * Limitations for now:
1705 	 * - No extra filters
1706 	 * - No annotations (should annotate somehow)
1707 	 */
1708 
1709 	if (branch && callchain_param.branch_callstack) {
1710 		int nr = min(max_stack, (int)branch->nr);
1711 		struct branch_entry be[nr];
1712 
1713 		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
1714 			pr_warning("corrupted branch chain. skipping...\n");
1715 			goto check_calls;
1716 		}
1717 
1718 		for (i = 0; i < nr; i++) {
1719 			if (callchain_param.order == ORDER_CALLEE) {
1720 				be[i] = branch->entries[i];
1721 				/*
1722 				 * Check for overlap into the callchain.
1723 				 * The return address is one off compared to
1724 				 * the branch entry. To adjust for this
1725 				 * assume the calling instruction is not longer
1726 				 * than 8 bytes.
1727 				 */
1728 				if (i == skip_idx ||
1729 				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
1730 					first_call++;
1731 				else if (be[i].from < chain->ips[first_call] &&
1732 				    be[i].from >= chain->ips[first_call] - 8)
1733 					first_call++;
1734 			} else
1735 				be[i] = branch->entries[branch->nr - i - 1];
1736 		}
1737 
1738 		nr = remove_loops(be, nr);
1739 
1740 		for (i = 0; i < nr; i++) {
1741 			err = add_callchain_ip(thread, parent, root_al,
1742 					       NULL, be[i].to);
1743 			if (!err)
1744 				err = add_callchain_ip(thread, parent, root_al,
1745 						       NULL, be[i].from);
1746 			if (err == -EINVAL)
1747 				break;
1748 			if (err)
1749 				return err;
1750 		}
1751 		chain_nr -= nr;
1752 	}
1753 
1754 check_calls:
1755 	if (chain->nr > PERF_MAX_STACK_DEPTH) {
1756 		pr_warning("corrupted callchain. skipping...\n");
1757 		return 0;
1758 	}
1759 
1760 	for (i = first_call; i < chain_nr; i++) {
1761 		u64 ip;
1762 
1763 		if (callchain_param.order == ORDER_CALLEE)
1764 			j = i;
1765 		else
1766 			j = chain->nr - i - 1;
1767 
1768 #ifdef HAVE_SKIP_CALLCHAIN_IDX
1769 		if (j == skip_idx)
1770 			continue;
1771 #endif
1772 		ip = chain->ips[j];
1773 
1774 		err = add_callchain_ip(thread, parent, root_al, &cpumode, ip);
1775 
1776 		if (err)
1777 			return (err < 0) ? err : 0;
1778 	}
1779 
1780 	return 0;
1781 }
1782 
1783 static int unwind_entry(struct unwind_entry *entry, void *arg)
1784 {
1785 	struct callchain_cursor *cursor = arg;
1786 	return callchain_cursor_append(cursor, entry->ip,
1787 				       entry->map, entry->sym);
1788 }
1789 
1790 int thread__resolve_callchain(struct thread *thread,
1791 			      struct perf_evsel *evsel,
1792 			      struct perf_sample *sample,
1793 			      struct symbol **parent,
1794 			      struct addr_location *root_al,
1795 			      int max_stack)
1796 {
1797 	int ret = thread__resolve_callchain_sample(thread, evsel,
1798 						   sample, parent,
1799 						   root_al, max_stack);
1800 	if (ret)
1801 		return ret;
1802 
1803 	/* Can we do dwarf post unwind? */
1804 	if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1805 	      (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1806 		return 0;
1807 
1808 	/* Bail out if nothing was captured. */
1809 	if ((!sample->user_regs.regs) ||
1810 	    (!sample->user_stack.size))
1811 		return 0;
1812 
1813 	return unwind__get_entries(unwind_entry, &callchain_cursor,
1814 				   thread, sample, max_stack);
1815 
1816 }
1817 
1818 int machine__for_each_thread(struct machine *machine,
1819 			     int (*fn)(struct thread *thread, void *p),
1820 			     void *priv)
1821 {
1822 	struct rb_node *nd;
1823 	struct thread *thread;
1824 	int rc = 0;
1825 
1826 	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1827 		thread = rb_entry(nd, struct thread, rb_node);
1828 		rc = fn(thread, priv);
1829 		if (rc != 0)
1830 			return rc;
1831 	}
1832 
1833 	list_for_each_entry(thread, &machine->dead_threads, node) {
1834 		rc = fn(thread, priv);
1835 		if (rc != 0)
1836 			return rc;
1837 	}
1838 	return rc;
1839 }
1840 
1841 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1842 				  struct target *target, struct thread_map *threads,
1843 				  perf_event__handler_t process, bool data_mmap)
1844 {
1845 	if (target__has_task(target))
1846 		return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1847 	else if (target__has_cpu(target))
1848 		return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1849 	/* command specified */
1850 	return 0;
1851 }
1852 
1853 pid_t machine__get_current_tid(struct machine *machine, int cpu)
1854 {
1855 	if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
1856 		return -1;
1857 
1858 	return machine->current_tid[cpu];
1859 }
1860 
1861 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
1862 			     pid_t tid)
1863 {
1864 	struct thread *thread;
1865 
1866 	if (cpu < 0)
1867 		return -EINVAL;
1868 
1869 	if (!machine->current_tid) {
1870 		int i;
1871 
1872 		machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
1873 		if (!machine->current_tid)
1874 			return -ENOMEM;
1875 		for (i = 0; i < MAX_NR_CPUS; i++)
1876 			machine->current_tid[i] = -1;
1877 	}
1878 
1879 	if (cpu >= MAX_NR_CPUS) {
1880 		pr_err("Requested CPU %d too large. ", cpu);
1881 		pr_err("Consider raising MAX_NR_CPUS\n");
1882 		return -EINVAL;
1883 	}
1884 
1885 	machine->current_tid[cpu] = tid;
1886 
1887 	thread = machine__findnew_thread(machine, pid, tid);
1888 	if (!thread)
1889 		return -ENOMEM;
1890 
1891 	thread->cpu = cpu;
1892 	thread__put(thread);
1893 
1894 	return 0;
1895 }
1896 
1897 int machine__get_kernel_start(struct machine *machine)
1898 {
1899 	struct map *map = machine__kernel_map(machine, MAP__FUNCTION);
1900 	int err = 0;
1901 
1902 	/*
1903 	 * The only addresses above 2^63 are kernel addresses of a 64-bit
1904 	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
1905 	 * all addresses including kernel addresses are less than 2^32.  In
1906 	 * that case (32-bit system), if the kernel mapping is unknown, all
1907 	 * addresses will be assumed to be in user space - see
1908 	 * machine__kernel_ip().
1909 	 */
1910 	machine->kernel_start = 1ULL << 63;
1911 	if (map) {
1912 		err = map__load(map, machine->symbol_filter);
1913 		if (map->start)
1914 			machine->kernel_start = map->start;
1915 	}
1916 	return err;
1917 }
1918 
1919 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
1920 {
1921 	return __dsos__findnew(&machine->dsos, filename);
1922 }
1923