xref: /linux/tools/perf/util/machine.c (revision 00a6d7b6762c27d441e9ac8faff36384bc0fc180)
1 #include "callchain.h"
2 #include "debug.h"
3 #include "event.h"
4 #include "evsel.h"
5 #include "hist.h"
6 #include "machine.h"
7 #include "map.h"
8 #include "sort.h"
9 #include "strlist.h"
10 #include "thread.h"
11 #include <stdbool.h>
12 #include <symbol/kallsyms.h>
13 #include "unwind.h"
14 
15 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
16 {
17 	map_groups__init(&machine->kmaps);
18 	RB_CLEAR_NODE(&machine->rb_node);
19 	INIT_LIST_HEAD(&machine->user_dsos);
20 	INIT_LIST_HEAD(&machine->kernel_dsos);
21 
22 	machine->threads = RB_ROOT;
23 	INIT_LIST_HEAD(&machine->dead_threads);
24 	machine->last_match = NULL;
25 
26 	machine->kmaps.machine = machine;
27 	machine->pid = pid;
28 
29 	machine->symbol_filter = NULL;
30 	machine->id_hdr_size = 0;
31 
32 	machine->root_dir = strdup(root_dir);
33 	if (machine->root_dir == NULL)
34 		return -ENOMEM;
35 
36 	if (pid != HOST_KERNEL_ID) {
37 		struct thread *thread = machine__findnew_thread(machine, 0,
38 								pid);
39 		char comm[64];
40 
41 		if (thread == NULL)
42 			return -ENOMEM;
43 
44 		snprintf(comm, sizeof(comm), "[guest/%d]", pid);
45 		thread__set_comm(thread, comm, 0);
46 	}
47 
48 	return 0;
49 }
50 
51 struct machine *machine__new_host(void)
52 {
53 	struct machine *machine = malloc(sizeof(*machine));
54 
55 	if (machine != NULL) {
56 		machine__init(machine, "", HOST_KERNEL_ID);
57 
58 		if (machine__create_kernel_maps(machine) < 0)
59 			goto out_delete;
60 	}
61 
62 	return machine;
63 out_delete:
64 	free(machine);
65 	return NULL;
66 }
67 
68 static void dsos__delete(struct list_head *dsos)
69 {
70 	struct dso *pos, *n;
71 
72 	list_for_each_entry_safe(pos, n, dsos, node) {
73 		list_del(&pos->node);
74 		dso__delete(pos);
75 	}
76 }
77 
78 void machine__delete_dead_threads(struct machine *machine)
79 {
80 	struct thread *n, *t;
81 
82 	list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
83 		list_del(&t->node);
84 		thread__delete(t);
85 	}
86 }
87 
88 void machine__delete_threads(struct machine *machine)
89 {
90 	struct rb_node *nd = rb_first(&machine->threads);
91 
92 	while (nd) {
93 		struct thread *t = rb_entry(nd, struct thread, rb_node);
94 
95 		rb_erase(&t->rb_node, &machine->threads);
96 		nd = rb_next(nd);
97 		thread__delete(t);
98 	}
99 }
100 
101 void machine__exit(struct machine *machine)
102 {
103 	map_groups__exit(&machine->kmaps);
104 	dsos__delete(&machine->user_dsos);
105 	dsos__delete(&machine->kernel_dsos);
106 	zfree(&machine->root_dir);
107 }
108 
109 void machine__delete(struct machine *machine)
110 {
111 	machine__exit(machine);
112 	free(machine);
113 }
114 
115 void machines__init(struct machines *machines)
116 {
117 	machine__init(&machines->host, "", HOST_KERNEL_ID);
118 	machines->guests = RB_ROOT;
119 	machines->symbol_filter = NULL;
120 }
121 
122 void machines__exit(struct machines *machines)
123 {
124 	machine__exit(&machines->host);
125 	/* XXX exit guest */
126 }
127 
128 struct machine *machines__add(struct machines *machines, pid_t pid,
129 			      const char *root_dir)
130 {
131 	struct rb_node **p = &machines->guests.rb_node;
132 	struct rb_node *parent = NULL;
133 	struct machine *pos, *machine = malloc(sizeof(*machine));
134 
135 	if (machine == NULL)
136 		return NULL;
137 
138 	if (machine__init(machine, root_dir, pid) != 0) {
139 		free(machine);
140 		return NULL;
141 	}
142 
143 	machine->symbol_filter = machines->symbol_filter;
144 
145 	while (*p != NULL) {
146 		parent = *p;
147 		pos = rb_entry(parent, struct machine, rb_node);
148 		if (pid < pos->pid)
149 			p = &(*p)->rb_left;
150 		else
151 			p = &(*p)->rb_right;
152 	}
153 
154 	rb_link_node(&machine->rb_node, parent, p);
155 	rb_insert_color(&machine->rb_node, &machines->guests);
156 
157 	return machine;
158 }
159 
160 void machines__set_symbol_filter(struct machines *machines,
161 				 symbol_filter_t symbol_filter)
162 {
163 	struct rb_node *nd;
164 
165 	machines->symbol_filter = symbol_filter;
166 	machines->host.symbol_filter = symbol_filter;
167 
168 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
169 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
170 
171 		machine->symbol_filter = symbol_filter;
172 	}
173 }
174 
175 struct machine *machines__find(struct machines *machines, pid_t pid)
176 {
177 	struct rb_node **p = &machines->guests.rb_node;
178 	struct rb_node *parent = NULL;
179 	struct machine *machine;
180 	struct machine *default_machine = NULL;
181 
182 	if (pid == HOST_KERNEL_ID)
183 		return &machines->host;
184 
185 	while (*p != NULL) {
186 		parent = *p;
187 		machine = rb_entry(parent, struct machine, rb_node);
188 		if (pid < machine->pid)
189 			p = &(*p)->rb_left;
190 		else if (pid > machine->pid)
191 			p = &(*p)->rb_right;
192 		else
193 			return machine;
194 		if (!machine->pid)
195 			default_machine = machine;
196 	}
197 
198 	return default_machine;
199 }
200 
201 struct machine *machines__findnew(struct machines *machines, pid_t pid)
202 {
203 	char path[PATH_MAX];
204 	const char *root_dir = "";
205 	struct machine *machine = machines__find(machines, pid);
206 
207 	if (machine && (machine->pid == pid))
208 		goto out;
209 
210 	if ((pid != HOST_KERNEL_ID) &&
211 	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
212 	    (symbol_conf.guestmount)) {
213 		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
214 		if (access(path, R_OK)) {
215 			static struct strlist *seen;
216 
217 			if (!seen)
218 				seen = strlist__new(true, NULL);
219 
220 			if (!strlist__has_entry(seen, path)) {
221 				pr_err("Can't access file %s\n", path);
222 				strlist__add(seen, path);
223 			}
224 			machine = NULL;
225 			goto out;
226 		}
227 		root_dir = path;
228 	}
229 
230 	machine = machines__add(machines, pid, root_dir);
231 out:
232 	return machine;
233 }
234 
235 void machines__process_guests(struct machines *machines,
236 			      machine__process_t process, void *data)
237 {
238 	struct rb_node *nd;
239 
240 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
241 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
242 		process(pos, data);
243 	}
244 }
245 
246 char *machine__mmap_name(struct machine *machine, char *bf, size_t size)
247 {
248 	if (machine__is_host(machine))
249 		snprintf(bf, size, "[%s]", "kernel.kallsyms");
250 	else if (machine__is_default_guest(machine))
251 		snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
252 	else {
253 		snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms",
254 			 machine->pid);
255 	}
256 
257 	return bf;
258 }
259 
260 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
261 {
262 	struct rb_node *node;
263 	struct machine *machine;
264 
265 	machines->host.id_hdr_size = id_hdr_size;
266 
267 	for (node = rb_first(&machines->guests); node; node = rb_next(node)) {
268 		machine = rb_entry(node, struct machine, rb_node);
269 		machine->id_hdr_size = id_hdr_size;
270 	}
271 
272 	return;
273 }
274 
275 static struct thread *__machine__findnew_thread(struct machine *machine,
276 						pid_t pid, pid_t tid,
277 						bool create)
278 {
279 	struct rb_node **p = &machine->threads.rb_node;
280 	struct rb_node *parent = NULL;
281 	struct thread *th;
282 
283 	/*
284 	 * Front-end cache - TID lookups come in blocks,
285 	 * so most of the time we dont have to look up
286 	 * the full rbtree:
287 	 */
288 	if (machine->last_match && machine->last_match->tid == tid) {
289 		if (pid && pid != machine->last_match->pid_)
290 			machine->last_match->pid_ = pid;
291 		return machine->last_match;
292 	}
293 
294 	while (*p != NULL) {
295 		parent = *p;
296 		th = rb_entry(parent, struct thread, rb_node);
297 
298 		if (th->tid == tid) {
299 			machine->last_match = th;
300 			if (pid && pid != th->pid_)
301 				th->pid_ = pid;
302 			return th;
303 		}
304 
305 		if (tid < th->tid)
306 			p = &(*p)->rb_left;
307 		else
308 			p = &(*p)->rb_right;
309 	}
310 
311 	if (!create)
312 		return NULL;
313 
314 	th = thread__new(pid, tid);
315 	if (th != NULL) {
316 		rb_link_node(&th->rb_node, parent, p);
317 		rb_insert_color(&th->rb_node, &machine->threads);
318 		machine->last_match = th;
319 	}
320 
321 	return th;
322 }
323 
324 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
325 				       pid_t tid)
326 {
327 	return __machine__findnew_thread(machine, pid, tid, true);
328 }
329 
330 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
331 				    pid_t tid)
332 {
333 	return __machine__findnew_thread(machine, pid, tid, false);
334 }
335 
336 int machine__process_comm_event(struct machine *machine, union perf_event *event,
337 				struct perf_sample *sample)
338 {
339 	struct thread *thread = machine__findnew_thread(machine,
340 							event->comm.pid,
341 							event->comm.tid);
342 
343 	if (dump_trace)
344 		perf_event__fprintf_comm(event, stdout);
345 
346 	if (thread == NULL || thread__set_comm(thread, event->comm.comm, sample->time)) {
347 		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
348 		return -1;
349 	}
350 
351 	return 0;
352 }
353 
354 int machine__process_lost_event(struct machine *machine __maybe_unused,
355 				union perf_event *event, struct perf_sample *sample __maybe_unused)
356 {
357 	dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n",
358 		    event->lost.id, event->lost.lost);
359 	return 0;
360 }
361 
362 struct map *machine__new_module(struct machine *machine, u64 start,
363 				const char *filename)
364 {
365 	struct map *map;
366 	struct dso *dso = __dsos__findnew(&machine->kernel_dsos, filename);
367 
368 	if (dso == NULL)
369 		return NULL;
370 
371 	map = map__new2(start, dso, MAP__FUNCTION);
372 	if (map == NULL)
373 		return NULL;
374 
375 	if (machine__is_host(machine))
376 		dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
377 	else
378 		dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE;
379 	map_groups__insert(&machine->kmaps, map);
380 	return map;
381 }
382 
383 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
384 {
385 	struct rb_node *nd;
386 	size_t ret = __dsos__fprintf(&machines->host.kernel_dsos, fp) +
387 		     __dsos__fprintf(&machines->host.user_dsos, fp);
388 
389 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
390 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
391 		ret += __dsos__fprintf(&pos->kernel_dsos, fp);
392 		ret += __dsos__fprintf(&pos->user_dsos, fp);
393 	}
394 
395 	return ret;
396 }
397 
398 size_t machine__fprintf_dsos_buildid(struct machine *machine, FILE *fp,
399 				     bool (skip)(struct dso *dso, int parm), int parm)
400 {
401 	return __dsos__fprintf_buildid(&machine->kernel_dsos, fp, skip, parm) +
402 	       __dsos__fprintf_buildid(&machine->user_dsos, fp, skip, parm);
403 }
404 
405 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
406 				     bool (skip)(struct dso *dso, int parm), int parm)
407 {
408 	struct rb_node *nd;
409 	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
410 
411 	for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
412 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
413 		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
414 	}
415 	return ret;
416 }
417 
418 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
419 {
420 	int i;
421 	size_t printed = 0;
422 	struct dso *kdso = machine->vmlinux_maps[MAP__FUNCTION]->dso;
423 
424 	if (kdso->has_build_id) {
425 		char filename[PATH_MAX];
426 		if (dso__build_id_filename(kdso, filename, sizeof(filename)))
427 			printed += fprintf(fp, "[0] %s\n", filename);
428 	}
429 
430 	for (i = 0; i < vmlinux_path__nr_entries; ++i)
431 		printed += fprintf(fp, "[%d] %s\n",
432 				   i + kdso->has_build_id, vmlinux_path[i]);
433 
434 	return printed;
435 }
436 
437 size_t machine__fprintf(struct machine *machine, FILE *fp)
438 {
439 	size_t ret = 0;
440 	struct rb_node *nd;
441 
442 	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
443 		struct thread *pos = rb_entry(nd, struct thread, rb_node);
444 
445 		ret += thread__fprintf(pos, fp);
446 	}
447 
448 	return ret;
449 }
450 
451 static struct dso *machine__get_kernel(struct machine *machine)
452 {
453 	const char *vmlinux_name = NULL;
454 	struct dso *kernel;
455 
456 	if (machine__is_host(machine)) {
457 		vmlinux_name = symbol_conf.vmlinux_name;
458 		if (!vmlinux_name)
459 			vmlinux_name = "[kernel.kallsyms]";
460 
461 		kernel = dso__kernel_findnew(machine, vmlinux_name,
462 					     "[kernel]",
463 					     DSO_TYPE_KERNEL);
464 	} else {
465 		char bf[PATH_MAX];
466 
467 		if (machine__is_default_guest(machine))
468 			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
469 		if (!vmlinux_name)
470 			vmlinux_name = machine__mmap_name(machine, bf,
471 							  sizeof(bf));
472 
473 		kernel = dso__kernel_findnew(machine, vmlinux_name,
474 					     "[guest.kernel]",
475 					     DSO_TYPE_GUEST_KERNEL);
476 	}
477 
478 	if (kernel != NULL && (!kernel->has_build_id))
479 		dso__read_running_kernel_build_id(kernel, machine);
480 
481 	return kernel;
482 }
483 
484 struct process_args {
485 	u64 start;
486 };
487 
488 static int symbol__in_kernel(void *arg, const char *name,
489 			     char type __maybe_unused, u64 start)
490 {
491 	struct process_args *args = arg;
492 
493 	if (strchr(name, '['))
494 		return 0;
495 
496 	args->start = start;
497 	return 1;
498 }
499 
500 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
501 					   size_t bufsz)
502 {
503 	if (machine__is_default_guest(machine))
504 		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
505 	else
506 		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
507 }
508 
509 /* Figure out the start address of kernel map from /proc/kallsyms */
510 static u64 machine__get_kernel_start_addr(struct machine *machine)
511 {
512 	char filename[PATH_MAX];
513 	struct process_args args;
514 
515 	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
516 
517 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
518 		return 0;
519 
520 	if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
521 		return 0;
522 
523 	return args.start;
524 }
525 
526 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
527 {
528 	enum map_type type;
529 	u64 start = machine__get_kernel_start_addr(machine);
530 
531 	for (type = 0; type < MAP__NR_TYPES; ++type) {
532 		struct kmap *kmap;
533 
534 		machine->vmlinux_maps[type] = map__new2(start, kernel, type);
535 		if (machine->vmlinux_maps[type] == NULL)
536 			return -1;
537 
538 		machine->vmlinux_maps[type]->map_ip =
539 			machine->vmlinux_maps[type]->unmap_ip =
540 				identity__map_ip;
541 		kmap = map__kmap(machine->vmlinux_maps[type]);
542 		kmap->kmaps = &machine->kmaps;
543 		map_groups__insert(&machine->kmaps,
544 				   machine->vmlinux_maps[type]);
545 	}
546 
547 	return 0;
548 }
549 
550 void machine__destroy_kernel_maps(struct machine *machine)
551 {
552 	enum map_type type;
553 
554 	for (type = 0; type < MAP__NR_TYPES; ++type) {
555 		struct kmap *kmap;
556 
557 		if (machine->vmlinux_maps[type] == NULL)
558 			continue;
559 
560 		kmap = map__kmap(machine->vmlinux_maps[type]);
561 		map_groups__remove(&machine->kmaps,
562 				   machine->vmlinux_maps[type]);
563 		if (kmap->ref_reloc_sym) {
564 			/*
565 			 * ref_reloc_sym is shared among all maps, so free just
566 			 * on one of them.
567 			 */
568 			if (type == MAP__FUNCTION) {
569 				zfree((char **)&kmap->ref_reloc_sym->name);
570 				zfree(&kmap->ref_reloc_sym);
571 			} else
572 				kmap->ref_reloc_sym = NULL;
573 		}
574 
575 		map__delete(machine->vmlinux_maps[type]);
576 		machine->vmlinux_maps[type] = NULL;
577 	}
578 }
579 
580 int machines__create_guest_kernel_maps(struct machines *machines)
581 {
582 	int ret = 0;
583 	struct dirent **namelist = NULL;
584 	int i, items = 0;
585 	char path[PATH_MAX];
586 	pid_t pid;
587 	char *endp;
588 
589 	if (symbol_conf.default_guest_vmlinux_name ||
590 	    symbol_conf.default_guest_modules ||
591 	    symbol_conf.default_guest_kallsyms) {
592 		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
593 	}
594 
595 	if (symbol_conf.guestmount) {
596 		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
597 		if (items <= 0)
598 			return -ENOENT;
599 		for (i = 0; i < items; i++) {
600 			if (!isdigit(namelist[i]->d_name[0])) {
601 				/* Filter out . and .. */
602 				continue;
603 			}
604 			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
605 			if ((*endp != '\0') ||
606 			    (endp == namelist[i]->d_name) ||
607 			    (errno == ERANGE)) {
608 				pr_debug("invalid directory (%s). Skipping.\n",
609 					 namelist[i]->d_name);
610 				continue;
611 			}
612 			sprintf(path, "%s/%s/proc/kallsyms",
613 				symbol_conf.guestmount,
614 				namelist[i]->d_name);
615 			ret = access(path, R_OK);
616 			if (ret) {
617 				pr_debug("Can't access file %s\n", path);
618 				goto failure;
619 			}
620 			machines__create_kernel_maps(machines, pid);
621 		}
622 failure:
623 		free(namelist);
624 	}
625 
626 	return ret;
627 }
628 
629 void machines__destroy_kernel_maps(struct machines *machines)
630 {
631 	struct rb_node *next = rb_first(&machines->guests);
632 
633 	machine__destroy_kernel_maps(&machines->host);
634 
635 	while (next) {
636 		struct machine *pos = rb_entry(next, struct machine, rb_node);
637 
638 		next = rb_next(&pos->rb_node);
639 		rb_erase(&pos->rb_node, &machines->guests);
640 		machine__delete(pos);
641 	}
642 }
643 
644 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
645 {
646 	struct machine *machine = machines__findnew(machines, pid);
647 
648 	if (machine == NULL)
649 		return -1;
650 
651 	return machine__create_kernel_maps(machine);
652 }
653 
654 int machine__load_kallsyms(struct machine *machine, const char *filename,
655 			   enum map_type type, symbol_filter_t filter)
656 {
657 	struct map *map = machine->vmlinux_maps[type];
658 	int ret = dso__load_kallsyms(map->dso, filename, map, filter);
659 
660 	if (ret > 0) {
661 		dso__set_loaded(map->dso, type);
662 		/*
663 		 * Since /proc/kallsyms will have multiple sessions for the
664 		 * kernel, with modules between them, fixup the end of all
665 		 * sections.
666 		 */
667 		__map_groups__fixup_end(&machine->kmaps, type);
668 	}
669 
670 	return ret;
671 }
672 
673 int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
674 			       symbol_filter_t filter)
675 {
676 	struct map *map = machine->vmlinux_maps[type];
677 	int ret = dso__load_vmlinux_path(map->dso, map, filter);
678 
679 	if (ret > 0)
680 		dso__set_loaded(map->dso, type);
681 
682 	return ret;
683 }
684 
685 static void map_groups__fixup_end(struct map_groups *mg)
686 {
687 	int i;
688 	for (i = 0; i < MAP__NR_TYPES; ++i)
689 		__map_groups__fixup_end(mg, i);
690 }
691 
692 static char *get_kernel_version(const char *root_dir)
693 {
694 	char version[PATH_MAX];
695 	FILE *file;
696 	char *name, *tmp;
697 	const char *prefix = "Linux version ";
698 
699 	sprintf(version, "%s/proc/version", root_dir);
700 	file = fopen(version, "r");
701 	if (!file)
702 		return NULL;
703 
704 	version[0] = '\0';
705 	tmp = fgets(version, sizeof(version), file);
706 	fclose(file);
707 
708 	name = strstr(version, prefix);
709 	if (!name)
710 		return NULL;
711 	name += strlen(prefix);
712 	tmp = strchr(name, ' ');
713 	if (tmp)
714 		*tmp = '\0';
715 
716 	return strdup(name);
717 }
718 
719 static int map_groups__set_modules_path_dir(struct map_groups *mg,
720 				const char *dir_name, int depth)
721 {
722 	struct dirent *dent;
723 	DIR *dir = opendir(dir_name);
724 	int ret = 0;
725 
726 	if (!dir) {
727 		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
728 		return -1;
729 	}
730 
731 	while ((dent = readdir(dir)) != NULL) {
732 		char path[PATH_MAX];
733 		struct stat st;
734 
735 		/*sshfs might return bad dent->d_type, so we have to stat*/
736 		snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
737 		if (stat(path, &st))
738 			continue;
739 
740 		if (S_ISDIR(st.st_mode)) {
741 			if (!strcmp(dent->d_name, ".") ||
742 			    !strcmp(dent->d_name, ".."))
743 				continue;
744 
745 			/* Do not follow top-level source and build symlinks */
746 			if (depth == 0) {
747 				if (!strcmp(dent->d_name, "source") ||
748 				    !strcmp(dent->d_name, "build"))
749 					continue;
750 			}
751 
752 			ret = map_groups__set_modules_path_dir(mg, path,
753 							       depth + 1);
754 			if (ret < 0)
755 				goto out;
756 		} else {
757 			char *dot = strrchr(dent->d_name, '.'),
758 			     dso_name[PATH_MAX];
759 			struct map *map;
760 			char *long_name;
761 
762 			if (dot == NULL || strcmp(dot, ".ko"))
763 				continue;
764 			snprintf(dso_name, sizeof(dso_name), "[%.*s]",
765 				 (int)(dot - dent->d_name), dent->d_name);
766 
767 			strxfrchar(dso_name, '-', '_');
768 			map = map_groups__find_by_name(mg, MAP__FUNCTION,
769 						       dso_name);
770 			if (map == NULL)
771 				continue;
772 
773 			long_name = strdup(path);
774 			if (long_name == NULL) {
775 				ret = -1;
776 				goto out;
777 			}
778 			dso__set_long_name(map->dso, long_name, true);
779 			dso__kernel_module_get_build_id(map->dso, "");
780 		}
781 	}
782 
783 out:
784 	closedir(dir);
785 	return ret;
786 }
787 
788 static int machine__set_modules_path(struct machine *machine)
789 {
790 	char *version;
791 	char modules_path[PATH_MAX];
792 
793 	version = get_kernel_version(machine->root_dir);
794 	if (!version)
795 		return -1;
796 
797 	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
798 		 machine->root_dir, version);
799 	free(version);
800 
801 	return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
802 }
803 
804 static int machine__create_module(void *arg, const char *name, u64 start)
805 {
806 	struct machine *machine = arg;
807 	struct map *map;
808 
809 	map = machine__new_module(machine, start, name);
810 	if (map == NULL)
811 		return -1;
812 
813 	dso__kernel_module_get_build_id(map->dso, machine->root_dir);
814 
815 	return 0;
816 }
817 
818 static int machine__create_modules(struct machine *machine)
819 {
820 	const char *modules;
821 	char path[PATH_MAX];
822 
823 	if (machine__is_default_guest(machine)) {
824 		modules = symbol_conf.default_guest_modules;
825 	} else {
826 		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
827 		modules = path;
828 	}
829 
830 	if (symbol__restricted_filename(modules, "/proc/modules"))
831 		return -1;
832 
833 	if (modules__parse(modules, machine, machine__create_module))
834 		return -1;
835 
836 	if (!machine__set_modules_path(machine))
837 		return 0;
838 
839 	pr_debug("Problems setting modules path maps, continuing anyway...\n");
840 
841 	return 0;
842 }
843 
844 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
845 
846 int machine__create_kernel_maps(struct machine *machine)
847 {
848 	struct dso *kernel = machine__get_kernel(machine);
849 	char filename[PATH_MAX];
850 	const char *name;
851 	u64 addr = 0;
852 	int i;
853 
854 	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
855 
856 	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
857 		addr = kallsyms__get_function_start(filename, name);
858 		if (addr)
859 			break;
860 	}
861 	if (!addr)
862 		return -1;
863 
864 	if (kernel == NULL ||
865 	    __machine__create_kernel_maps(machine, kernel) < 0)
866 		return -1;
867 
868 	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
869 		if (machine__is_host(machine))
870 			pr_debug("Problems creating module maps, "
871 				 "continuing anyway...\n");
872 		else
873 			pr_debug("Problems creating module maps for guest %d, "
874 				 "continuing anyway...\n", machine->pid);
875 	}
876 
877 	/*
878 	 * Now that we have all the maps created, just set the ->end of them:
879 	 */
880 	map_groups__fixup_end(&machine->kmaps);
881 
882 	if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
883 					     addr)) {
884 		machine__destroy_kernel_maps(machine);
885 		return -1;
886 	}
887 
888 	return 0;
889 }
890 
891 static void machine__set_kernel_mmap_len(struct machine *machine,
892 					 union perf_event *event)
893 {
894 	int i;
895 
896 	for (i = 0; i < MAP__NR_TYPES; i++) {
897 		machine->vmlinux_maps[i]->start = event->mmap.start;
898 		machine->vmlinux_maps[i]->end   = (event->mmap.start +
899 						   event->mmap.len);
900 		/*
901 		 * Be a bit paranoid here, some perf.data file came with
902 		 * a zero sized synthesized MMAP event for the kernel.
903 		 */
904 		if (machine->vmlinux_maps[i]->end == 0)
905 			machine->vmlinux_maps[i]->end = ~0ULL;
906 	}
907 }
908 
909 static bool machine__uses_kcore(struct machine *machine)
910 {
911 	struct dso *dso;
912 
913 	list_for_each_entry(dso, &machine->kernel_dsos, node) {
914 		if (dso__is_kcore(dso))
915 			return true;
916 	}
917 
918 	return false;
919 }
920 
921 static int machine__process_kernel_mmap_event(struct machine *machine,
922 					      union perf_event *event)
923 {
924 	struct map *map;
925 	char kmmap_prefix[PATH_MAX];
926 	enum dso_kernel_type kernel_type;
927 	bool is_kernel_mmap;
928 
929 	/* If we have maps from kcore then we do not need or want any others */
930 	if (machine__uses_kcore(machine))
931 		return 0;
932 
933 	machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
934 	if (machine__is_host(machine))
935 		kernel_type = DSO_TYPE_KERNEL;
936 	else
937 		kernel_type = DSO_TYPE_GUEST_KERNEL;
938 
939 	is_kernel_mmap = memcmp(event->mmap.filename,
940 				kmmap_prefix,
941 				strlen(kmmap_prefix) - 1) == 0;
942 	if (event->mmap.filename[0] == '/' ||
943 	    (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
944 
945 		char short_module_name[1024];
946 		char *name, *dot;
947 
948 		if (event->mmap.filename[0] == '/') {
949 			name = strrchr(event->mmap.filename, '/');
950 			if (name == NULL)
951 				goto out_problem;
952 
953 			++name; /* skip / */
954 			dot = strrchr(name, '.');
955 			if (dot == NULL)
956 				goto out_problem;
957 			snprintf(short_module_name, sizeof(short_module_name),
958 					"[%.*s]", (int)(dot - name), name);
959 			strxfrchar(short_module_name, '-', '_');
960 		} else
961 			strcpy(short_module_name, event->mmap.filename);
962 
963 		map = machine__new_module(machine, event->mmap.start,
964 					  event->mmap.filename);
965 		if (map == NULL)
966 			goto out_problem;
967 
968 		name = strdup(short_module_name);
969 		if (name == NULL)
970 			goto out_problem;
971 
972 		dso__set_short_name(map->dso, name, true);
973 		map->end = map->start + event->mmap.len;
974 	} else if (is_kernel_mmap) {
975 		const char *symbol_name = (event->mmap.filename +
976 				strlen(kmmap_prefix));
977 		/*
978 		 * Should be there already, from the build-id table in
979 		 * the header.
980 		 */
981 		struct dso *kernel = __dsos__findnew(&machine->kernel_dsos,
982 						     kmmap_prefix);
983 		if (kernel == NULL)
984 			goto out_problem;
985 
986 		kernel->kernel = kernel_type;
987 		if (__machine__create_kernel_maps(machine, kernel) < 0)
988 			goto out_problem;
989 
990 		machine__set_kernel_mmap_len(machine, event);
991 
992 		/*
993 		 * Avoid using a zero address (kptr_restrict) for the ref reloc
994 		 * symbol. Effectively having zero here means that at record
995 		 * time /proc/sys/kernel/kptr_restrict was non zero.
996 		 */
997 		if (event->mmap.pgoff != 0) {
998 			maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps,
999 							 symbol_name,
1000 							 event->mmap.pgoff);
1001 		}
1002 
1003 		if (machine__is_default_guest(machine)) {
1004 			/*
1005 			 * preload dso of guest kernel and modules
1006 			 */
1007 			dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION],
1008 				  NULL);
1009 		}
1010 	}
1011 	return 0;
1012 out_problem:
1013 	return -1;
1014 }
1015 
1016 int machine__process_mmap2_event(struct machine *machine,
1017 				 union perf_event *event,
1018 				 struct perf_sample *sample __maybe_unused)
1019 {
1020 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1021 	struct thread *thread;
1022 	struct map *map;
1023 	enum map_type type;
1024 	int ret = 0;
1025 
1026 	if (dump_trace)
1027 		perf_event__fprintf_mmap2(event, stdout);
1028 
1029 	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1030 	    cpumode == PERF_RECORD_MISC_KERNEL) {
1031 		ret = machine__process_kernel_mmap_event(machine, event);
1032 		if (ret < 0)
1033 			goto out_problem;
1034 		return 0;
1035 	}
1036 
1037 	thread = machine__findnew_thread(machine, event->mmap2.pid,
1038 					event->mmap2.tid);
1039 	if (thread == NULL)
1040 		goto out_problem;
1041 
1042 	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1043 		type = MAP__VARIABLE;
1044 	else
1045 		type = MAP__FUNCTION;
1046 
1047 	map = map__new(&machine->user_dsos, event->mmap2.start,
1048 			event->mmap2.len, event->mmap2.pgoff,
1049 			event->mmap2.pid, event->mmap2.maj,
1050 			event->mmap2.min, event->mmap2.ino,
1051 			event->mmap2.ino_generation,
1052 			event->mmap2.filename, type);
1053 
1054 	if (map == NULL)
1055 		goto out_problem;
1056 
1057 	thread__insert_map(thread, map);
1058 	return 0;
1059 
1060 out_problem:
1061 	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1062 	return 0;
1063 }
1064 
1065 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1066 				struct perf_sample *sample __maybe_unused)
1067 {
1068 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1069 	struct thread *thread;
1070 	struct map *map;
1071 	enum map_type type;
1072 	int ret = 0;
1073 
1074 	if (dump_trace)
1075 		perf_event__fprintf_mmap(event, stdout);
1076 
1077 	if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1078 	    cpumode == PERF_RECORD_MISC_KERNEL) {
1079 		ret = machine__process_kernel_mmap_event(machine, event);
1080 		if (ret < 0)
1081 			goto out_problem;
1082 		return 0;
1083 	}
1084 
1085 	thread = machine__findnew_thread(machine, event->mmap.pid,
1086 					 event->mmap.tid);
1087 	if (thread == NULL)
1088 		goto out_problem;
1089 
1090 	if (event->header.misc & PERF_RECORD_MISC_MMAP_DATA)
1091 		type = MAP__VARIABLE;
1092 	else
1093 		type = MAP__FUNCTION;
1094 
1095 	map = map__new(&machine->user_dsos, event->mmap.start,
1096 			event->mmap.len, event->mmap.pgoff,
1097 			event->mmap.pid, 0, 0, 0, 0,
1098 			event->mmap.filename,
1099 			type);
1100 
1101 	if (map == NULL)
1102 		goto out_problem;
1103 
1104 	thread__insert_map(thread, map);
1105 	return 0;
1106 
1107 out_problem:
1108 	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1109 	return 0;
1110 }
1111 
1112 static void machine__remove_thread(struct machine *machine, struct thread *th)
1113 {
1114 	machine->last_match = NULL;
1115 	rb_erase(&th->rb_node, &machine->threads);
1116 	/*
1117 	 * We may have references to this thread, for instance in some hist_entry
1118 	 * instances, so just move them to a separate list.
1119 	 */
1120 	list_add_tail(&th->node, &machine->dead_threads);
1121 }
1122 
1123 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1124 				struct perf_sample *sample)
1125 {
1126 	struct thread *thread = machine__find_thread(machine,
1127 						     event->fork.pid,
1128 						     event->fork.tid);
1129 	struct thread *parent = machine__findnew_thread(machine,
1130 							event->fork.ppid,
1131 							event->fork.ptid);
1132 
1133 	/* if a thread currently exists for the thread id remove it */
1134 	if (thread != NULL)
1135 		machine__remove_thread(machine, thread);
1136 
1137 	thread = machine__findnew_thread(machine, event->fork.pid,
1138 					 event->fork.tid);
1139 	if (dump_trace)
1140 		perf_event__fprintf_task(event, stdout);
1141 
1142 	if (thread == NULL || parent == NULL ||
1143 	    thread__fork(thread, parent, sample->time) < 0) {
1144 		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1145 		return -1;
1146 	}
1147 
1148 	return 0;
1149 }
1150 
1151 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1152 				struct perf_sample *sample __maybe_unused)
1153 {
1154 	struct thread *thread = machine__find_thread(machine,
1155 						     event->fork.pid,
1156 						     event->fork.tid);
1157 
1158 	if (dump_trace)
1159 		perf_event__fprintf_task(event, stdout);
1160 
1161 	if (thread != NULL)
1162 		thread__exited(thread);
1163 
1164 	return 0;
1165 }
1166 
1167 int machine__process_event(struct machine *machine, union perf_event *event,
1168 			   struct perf_sample *sample)
1169 {
1170 	int ret;
1171 
1172 	switch (event->header.type) {
1173 	case PERF_RECORD_COMM:
1174 		ret = machine__process_comm_event(machine, event, sample); break;
1175 	case PERF_RECORD_MMAP:
1176 		ret = machine__process_mmap_event(machine, event, sample); break;
1177 	case PERF_RECORD_MMAP2:
1178 		ret = machine__process_mmap2_event(machine, event, sample); break;
1179 	case PERF_RECORD_FORK:
1180 		ret = machine__process_fork_event(machine, event, sample); break;
1181 	case PERF_RECORD_EXIT:
1182 		ret = machine__process_exit_event(machine, event, sample); break;
1183 	case PERF_RECORD_LOST:
1184 		ret = machine__process_lost_event(machine, event, sample); break;
1185 	default:
1186 		ret = -1;
1187 		break;
1188 	}
1189 
1190 	return ret;
1191 }
1192 
1193 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1194 {
1195 	if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
1196 		return 1;
1197 	return 0;
1198 }
1199 
1200 static void ip__resolve_ams(struct machine *machine, struct thread *thread,
1201 			    struct addr_map_symbol *ams,
1202 			    u64 ip)
1203 {
1204 	struct addr_location al;
1205 
1206 	memset(&al, 0, sizeof(al));
1207 	/*
1208 	 * We cannot use the header.misc hint to determine whether a
1209 	 * branch stack address is user, kernel, guest, hypervisor.
1210 	 * Branches may straddle the kernel/user/hypervisor boundaries.
1211 	 * Thus, we have to try consecutively until we find a match
1212 	 * or else, the symbol is unknown
1213 	 */
1214 	thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
1215 
1216 	ams->addr = ip;
1217 	ams->al_addr = al.addr;
1218 	ams->sym = al.sym;
1219 	ams->map = al.map;
1220 }
1221 
1222 static void ip__resolve_data(struct machine *machine, struct thread *thread,
1223 			     u8 m, struct addr_map_symbol *ams, u64 addr)
1224 {
1225 	struct addr_location al;
1226 
1227 	memset(&al, 0, sizeof(al));
1228 
1229 	thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr,
1230 				   &al);
1231 	ams->addr = addr;
1232 	ams->al_addr = al.addr;
1233 	ams->sym = al.sym;
1234 	ams->map = al.map;
1235 }
1236 
1237 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1238 				     struct addr_location *al)
1239 {
1240 	struct mem_info *mi = zalloc(sizeof(*mi));
1241 
1242 	if (!mi)
1243 		return NULL;
1244 
1245 	ip__resolve_ams(al->machine, al->thread, &mi->iaddr, sample->ip);
1246 	ip__resolve_data(al->machine, al->thread, al->cpumode,
1247 			 &mi->daddr, sample->addr);
1248 	mi->data_src.val = sample->data_src;
1249 
1250 	return mi;
1251 }
1252 
1253 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
1254 					   struct addr_location *al)
1255 {
1256 	unsigned int i;
1257 	const struct branch_stack *bs = sample->branch_stack;
1258 	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
1259 
1260 	if (!bi)
1261 		return NULL;
1262 
1263 	for (i = 0; i < bs->nr; i++) {
1264 		ip__resolve_ams(al->machine, al->thread, &bi[i].to, bs->entries[i].to);
1265 		ip__resolve_ams(al->machine, al->thread, &bi[i].from, bs->entries[i].from);
1266 		bi[i].flags = bs->entries[i].flags;
1267 	}
1268 	return bi;
1269 }
1270 
1271 static int machine__resolve_callchain_sample(struct machine *machine,
1272 					     struct thread *thread,
1273 					     struct ip_callchain *chain,
1274 					     struct symbol **parent,
1275 					     struct addr_location *root_al,
1276 					     int max_stack)
1277 {
1278 	u8 cpumode = PERF_RECORD_MISC_USER;
1279 	int chain_nr = min(max_stack, (int)chain->nr);
1280 	int i;
1281 	int err;
1282 
1283 	callchain_cursor_reset(&callchain_cursor);
1284 
1285 	if (chain->nr > PERF_MAX_STACK_DEPTH) {
1286 		pr_warning("corrupted callchain. skipping...\n");
1287 		return 0;
1288 	}
1289 
1290 	for (i = 0; i < chain_nr; i++) {
1291 		u64 ip;
1292 		struct addr_location al;
1293 
1294 		if (callchain_param.order == ORDER_CALLEE)
1295 			ip = chain->ips[i];
1296 		else
1297 			ip = chain->ips[chain->nr - i - 1];
1298 
1299 		if (ip >= PERF_CONTEXT_MAX) {
1300 			switch (ip) {
1301 			case PERF_CONTEXT_HV:
1302 				cpumode = PERF_RECORD_MISC_HYPERVISOR;
1303 				break;
1304 			case PERF_CONTEXT_KERNEL:
1305 				cpumode = PERF_RECORD_MISC_KERNEL;
1306 				break;
1307 			case PERF_CONTEXT_USER:
1308 				cpumode = PERF_RECORD_MISC_USER;
1309 				break;
1310 			default:
1311 				pr_debug("invalid callchain context: "
1312 					 "%"PRId64"\n", (s64) ip);
1313 				/*
1314 				 * It seems the callchain is corrupted.
1315 				 * Discard all.
1316 				 */
1317 				callchain_cursor_reset(&callchain_cursor);
1318 				return 0;
1319 			}
1320 			continue;
1321 		}
1322 
1323 		al.filtered = 0;
1324 		thread__find_addr_location(thread, machine, cpumode,
1325 					   MAP__FUNCTION, ip, &al);
1326 		if (al.sym != NULL) {
1327 			if (sort__has_parent && !*parent &&
1328 			    symbol__match_regex(al.sym, &parent_regex))
1329 				*parent = al.sym;
1330 			else if (have_ignore_callees && root_al &&
1331 			  symbol__match_regex(al.sym, &ignore_callees_regex)) {
1332 				/* Treat this symbol as the root,
1333 				   forgetting its callees. */
1334 				*root_al = al;
1335 				callchain_cursor_reset(&callchain_cursor);
1336 			}
1337 		}
1338 
1339 		err = callchain_cursor_append(&callchain_cursor,
1340 					      ip, al.map, al.sym);
1341 		if (err)
1342 			return err;
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 static int unwind_entry(struct unwind_entry *entry, void *arg)
1349 {
1350 	struct callchain_cursor *cursor = arg;
1351 	return callchain_cursor_append(cursor, entry->ip,
1352 				       entry->map, entry->sym);
1353 }
1354 
1355 int machine__resolve_callchain(struct machine *machine,
1356 			       struct perf_evsel *evsel,
1357 			       struct thread *thread,
1358 			       struct perf_sample *sample,
1359 			       struct symbol **parent,
1360 			       struct addr_location *root_al,
1361 			       int max_stack)
1362 {
1363 	int ret;
1364 
1365 	ret = machine__resolve_callchain_sample(machine, thread,
1366 						sample->callchain, parent,
1367 						root_al, max_stack);
1368 	if (ret)
1369 		return ret;
1370 
1371 	/* Can we do dwarf post unwind? */
1372 	if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
1373 	      (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
1374 		return 0;
1375 
1376 	/* Bail out if nothing was captured. */
1377 	if ((!sample->user_regs.regs) ||
1378 	    (!sample->user_stack.size))
1379 		return 0;
1380 
1381 	return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
1382 				   thread, sample, max_stack);
1383 
1384 }
1385 
1386 int machine__for_each_thread(struct machine *machine,
1387 			     int (*fn)(struct thread *thread, void *p),
1388 			     void *priv)
1389 {
1390 	struct rb_node *nd;
1391 	struct thread *thread;
1392 	int rc = 0;
1393 
1394 	for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
1395 		thread = rb_entry(nd, struct thread, rb_node);
1396 		rc = fn(thread, priv);
1397 		if (rc != 0)
1398 			return rc;
1399 	}
1400 
1401 	list_for_each_entry(thread, &machine->dead_threads, node) {
1402 		rc = fn(thread, priv);
1403 		if (rc != 0)
1404 			return rc;
1405 	}
1406 	return rc;
1407 }
1408 
1409 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1410 				  struct target *target, struct thread_map *threads,
1411 				  perf_event__handler_t process, bool data_mmap)
1412 {
1413 	if (target__has_task(target))
1414 		return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1415 	else if (target__has_cpu(target))
1416 		return perf_event__synthesize_threads(tool, process, machine, data_mmap);
1417 	/* command specified */
1418 	return 0;
1419 }
1420