xref: /linux/tools/perf/util/machine.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <regex.h>
6 #include <stdlib.h>
7 #include "callchain.h"
8 #include "debug.h"
9 #include "dso.h"
10 #include "env.h"
11 #include "event.h"
12 #include "evsel.h"
13 #include "hist.h"
14 #include "machine.h"
15 #include "map.h"
16 #include "map_symbol.h"
17 #include "branch.h"
18 #include "mem-events.h"
19 #include "mem-info.h"
20 #include "path.h"
21 #include "srcline.h"
22 #include "symbol.h"
23 #include "sort.h"
24 #include "strlist.h"
25 #include "target.h"
26 #include "thread.h"
27 #include "util.h"
28 #include "vdso.h"
29 #include <stdbool.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <unistd.h>
33 #include "unwind.h"
34 #include "linux/hash.h"
35 #include "asm/bug.h"
36 #include "bpf-event.h"
37 #include <internal/lib.h> // page_size
38 #include "cgroup.h"
39 #include "arm64-frame-pointer-unwind-support.h"
40 
41 #include <linux/ctype.h>
42 #include <symbol/kallsyms.h>
43 #include <linux/mman.h>
44 #include <linux/string.h>
45 #include <linux/zalloc.h>
46 
machine__kernel_dso(struct machine * machine)47 static struct dso *machine__kernel_dso(struct machine *machine)
48 {
49 	return map__dso(machine->vmlinux_map);
50 }
51 
machine__set_mmap_name(struct machine * machine)52 static int machine__set_mmap_name(struct machine *machine)
53 {
54 	if (machine__is_host(machine))
55 		machine->mmap_name = strdup("[kernel.kallsyms]");
56 	else if (machine__is_default_guest(machine))
57 		machine->mmap_name = strdup("[guest.kernel.kallsyms]");
58 	else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
59 			  machine->pid) < 0)
60 		machine->mmap_name = NULL;
61 
62 	return machine->mmap_name ? 0 : -ENOMEM;
63 }
64 
thread__set_guest_comm(struct thread * thread,pid_t pid)65 static void thread__set_guest_comm(struct thread *thread, pid_t pid)
66 {
67 	char comm[64];
68 
69 	snprintf(comm, sizeof(comm), "[guest/%d]", pid);
70 	thread__set_comm(thread, comm, 0);
71 }
72 
machine__init(struct machine * machine,const char * root_dir,pid_t pid)73 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
74 {
75 	int err = -ENOMEM;
76 
77 	memset(machine, 0, sizeof(*machine));
78 	machine->kmaps = maps__new(machine);
79 	if (machine->kmaps == NULL)
80 		return -ENOMEM;
81 
82 	RB_CLEAR_NODE(&machine->rb_node);
83 	dsos__init(&machine->dsos);
84 
85 	threads__init(&machine->threads);
86 
87 	machine->vdso_info = NULL;
88 	machine->env = NULL;
89 
90 	machine->pid = pid;
91 
92 	machine->id_hdr_size = 0;
93 	machine->kptr_restrict_warned = false;
94 	machine->comm_exec = false;
95 	machine->kernel_start = 0;
96 	machine->vmlinux_map = NULL;
97 
98 	machine->root_dir = strdup(root_dir);
99 	if (machine->root_dir == NULL)
100 		goto out;
101 
102 	if (machine__set_mmap_name(machine))
103 		goto out;
104 
105 	if (pid != HOST_KERNEL_ID) {
106 		struct thread *thread = machine__findnew_thread(machine, -1,
107 								pid);
108 
109 		if (thread == NULL)
110 			goto out;
111 
112 		thread__set_guest_comm(thread, pid);
113 		thread__put(thread);
114 	}
115 
116 	machine->current_tid = NULL;
117 	err = 0;
118 
119 out:
120 	if (err) {
121 		zfree(&machine->kmaps);
122 		zfree(&machine->root_dir);
123 		zfree(&machine->mmap_name);
124 	}
125 	return 0;
126 }
127 
machine__new_host(void)128 struct machine *machine__new_host(void)
129 {
130 	struct machine *machine = malloc(sizeof(*machine));
131 
132 	if (machine != NULL) {
133 		machine__init(machine, "", HOST_KERNEL_ID);
134 
135 		if (machine__create_kernel_maps(machine) < 0)
136 			goto out_delete;
137 
138 		machine->env = &perf_env;
139 	}
140 
141 	return machine;
142 out_delete:
143 	free(machine);
144 	return NULL;
145 }
146 
machine__new_kallsyms(void)147 struct machine *machine__new_kallsyms(void)
148 {
149 	struct machine *machine = machine__new_host();
150 	/*
151 	 * FIXME:
152 	 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
153 	 *    ask for not using the kcore parsing code, once this one is fixed
154 	 *    to create a map per module.
155 	 */
156 	if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
157 		machine__delete(machine);
158 		machine = NULL;
159 	}
160 
161 	return machine;
162 }
163 
machine__delete_threads(struct machine * machine)164 void machine__delete_threads(struct machine *machine)
165 {
166 	threads__remove_all_threads(&machine->threads);
167 }
168 
machine__exit(struct machine * machine)169 void machine__exit(struct machine *machine)
170 {
171 	if (machine == NULL)
172 		return;
173 
174 	machine__destroy_kernel_maps(machine);
175 	maps__zput(machine->kmaps);
176 	dsos__exit(&machine->dsos);
177 	machine__exit_vdso(machine);
178 	zfree(&machine->root_dir);
179 	zfree(&machine->mmap_name);
180 	zfree(&machine->current_tid);
181 	zfree(&machine->kallsyms_filename);
182 
183 	threads__exit(&machine->threads);
184 }
185 
machine__delete(struct machine * machine)186 void machine__delete(struct machine *machine)
187 {
188 	if (machine) {
189 		machine__exit(machine);
190 		free(machine);
191 	}
192 }
193 
machines__init(struct machines * machines)194 void machines__init(struct machines *machines)
195 {
196 	machine__init(&machines->host, "", HOST_KERNEL_ID);
197 	machines->guests = RB_ROOT_CACHED;
198 }
199 
machines__exit(struct machines * machines)200 void machines__exit(struct machines *machines)
201 {
202 	machine__exit(&machines->host);
203 	/* XXX exit guest */
204 }
205 
machines__add(struct machines * machines,pid_t pid,const char * root_dir)206 struct machine *machines__add(struct machines *machines, pid_t pid,
207 			      const char *root_dir)
208 {
209 	struct rb_node **p = &machines->guests.rb_root.rb_node;
210 	struct rb_node *parent = NULL;
211 	struct machine *pos, *machine = malloc(sizeof(*machine));
212 	bool leftmost = true;
213 
214 	if (machine == NULL)
215 		return NULL;
216 
217 	if (machine__init(machine, root_dir, pid) != 0) {
218 		free(machine);
219 		return NULL;
220 	}
221 
222 	while (*p != NULL) {
223 		parent = *p;
224 		pos = rb_entry(parent, struct machine, rb_node);
225 		if (pid < pos->pid)
226 			p = &(*p)->rb_left;
227 		else {
228 			p = &(*p)->rb_right;
229 			leftmost = false;
230 		}
231 	}
232 
233 	rb_link_node(&machine->rb_node, parent, p);
234 	rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
235 
236 	machine->machines = machines;
237 
238 	return machine;
239 }
240 
machines__set_comm_exec(struct machines * machines,bool comm_exec)241 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
242 {
243 	struct rb_node *nd;
244 
245 	machines->host.comm_exec = comm_exec;
246 
247 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
248 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
249 
250 		machine->comm_exec = comm_exec;
251 	}
252 }
253 
machines__find(struct machines * machines,pid_t pid)254 struct machine *machines__find(struct machines *machines, pid_t pid)
255 {
256 	struct rb_node **p = &machines->guests.rb_root.rb_node;
257 	struct rb_node *parent = NULL;
258 	struct machine *machine;
259 	struct machine *default_machine = NULL;
260 
261 	if (pid == HOST_KERNEL_ID)
262 		return &machines->host;
263 
264 	while (*p != NULL) {
265 		parent = *p;
266 		machine = rb_entry(parent, struct machine, rb_node);
267 		if (pid < machine->pid)
268 			p = &(*p)->rb_left;
269 		else if (pid > machine->pid)
270 			p = &(*p)->rb_right;
271 		else
272 			return machine;
273 		if (!machine->pid)
274 			default_machine = machine;
275 	}
276 
277 	return default_machine;
278 }
279 
machines__findnew(struct machines * machines,pid_t pid)280 struct machine *machines__findnew(struct machines *machines, pid_t pid)
281 {
282 	char path[PATH_MAX];
283 	const char *root_dir = "";
284 	struct machine *machine = machines__find(machines, pid);
285 
286 	if (machine && (machine->pid == pid))
287 		goto out;
288 
289 	if ((pid != HOST_KERNEL_ID) &&
290 	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
291 	    (symbol_conf.guestmount)) {
292 		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
293 		if (access(path, R_OK)) {
294 			static struct strlist *seen;
295 
296 			if (!seen)
297 				seen = strlist__new(NULL, NULL);
298 
299 			if (!strlist__has_entry(seen, path)) {
300 				pr_err("Can't access file %s\n", path);
301 				strlist__add(seen, path);
302 			}
303 			machine = NULL;
304 			goto out;
305 		}
306 		root_dir = path;
307 	}
308 
309 	machine = machines__add(machines, pid, root_dir);
310 out:
311 	return machine;
312 }
313 
machines__find_guest(struct machines * machines,pid_t pid)314 struct machine *machines__find_guest(struct machines *machines, pid_t pid)
315 {
316 	struct machine *machine = machines__find(machines, pid);
317 
318 	if (!machine)
319 		machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
320 	return machine;
321 }
322 
323 /*
324  * A common case for KVM test programs is that the test program acts as the
325  * hypervisor, creating, running and destroying the virtual machine, and
326  * providing the guest object code from its own object code. In this case,
327  * the VM is not running an OS, but only the functions loaded into it by the
328  * hypervisor test program, and conveniently, loaded at the same virtual
329  * addresses.
330  *
331  * Normally to resolve addresses, MMAP events are needed to map addresses
332  * back to the object code and debug symbols for that object code.
333  *
334  * Currently, there is no way to get such mapping information from guests
335  * but, in the scenario described above, the guest has the same mappings
336  * as the hypervisor, so support for that scenario can be achieved.
337  *
338  * To support that, copy the host thread's maps to the guest thread's maps.
339  * Note, we do not discover the guest until we encounter a guest event,
340  * which works well because it is not until then that we know that the host
341  * thread's maps have been set up.
342  *
343  * This function returns the guest thread. Apart from keeping the data
344  * structures sane, using a thread belonging to the guest machine, instead
345  * of the host thread, allows it to have its own comm (refer
346  * thread__set_guest_comm()).
347  */
findnew_guest_code(struct machine * machine,struct machine * host_machine,pid_t pid)348 static struct thread *findnew_guest_code(struct machine *machine,
349 					 struct machine *host_machine,
350 					 pid_t pid)
351 {
352 	struct thread *host_thread;
353 	struct thread *thread;
354 	int err;
355 
356 	if (!machine)
357 		return NULL;
358 
359 	thread = machine__findnew_thread(machine, -1, pid);
360 	if (!thread)
361 		return NULL;
362 
363 	/* Assume maps are set up if there are any */
364 	if (!maps__empty(thread__maps(thread)))
365 		return thread;
366 
367 	host_thread = machine__find_thread(host_machine, -1, pid);
368 	if (!host_thread)
369 		goto out_err;
370 
371 	thread__set_guest_comm(thread, pid);
372 
373 	/*
374 	 * Guest code can be found in hypervisor process at the same address
375 	 * so copy host maps.
376 	 */
377 	err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
378 	thread__put(host_thread);
379 	if (err)
380 		goto out_err;
381 
382 	return thread;
383 
384 out_err:
385 	thread__zput(thread);
386 	return NULL;
387 }
388 
machines__findnew_guest_code(struct machines * machines,pid_t pid)389 struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
390 {
391 	struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
392 	struct machine *machine = machines__findnew(machines, pid);
393 
394 	return findnew_guest_code(machine, host_machine, pid);
395 }
396 
machine__findnew_guest_code(struct machine * machine,pid_t pid)397 struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
398 {
399 	struct machines *machines = machine->machines;
400 	struct machine *host_machine;
401 
402 	if (!machines)
403 		return NULL;
404 
405 	host_machine = machines__find(machines, HOST_KERNEL_ID);
406 
407 	return findnew_guest_code(machine, host_machine, pid);
408 }
409 
machines__process_guests(struct machines * machines,machine__process_t process,void * data)410 void machines__process_guests(struct machines *machines,
411 			      machine__process_t process, void *data)
412 {
413 	struct rb_node *nd;
414 
415 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
416 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
417 		process(pos, data);
418 	}
419 }
420 
machines__set_id_hdr_size(struct machines * machines,u16 id_hdr_size)421 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
422 {
423 	struct rb_node *node;
424 	struct machine *machine;
425 
426 	machines->host.id_hdr_size = id_hdr_size;
427 
428 	for (node = rb_first_cached(&machines->guests); node;
429 	     node = rb_next(node)) {
430 		machine = rb_entry(node, struct machine, rb_node);
431 		machine->id_hdr_size = id_hdr_size;
432 	}
433 
434 	return;
435 }
436 
machine__update_thread_pid(struct machine * machine,struct thread * th,pid_t pid)437 static void machine__update_thread_pid(struct machine *machine,
438 				       struct thread *th, pid_t pid)
439 {
440 	struct thread *leader;
441 
442 	if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
443 		return;
444 
445 	thread__set_pid(th, pid);
446 
447 	if (thread__pid(th) == thread__tid(th))
448 		return;
449 
450 	leader = machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
451 	if (!leader)
452 		goto out_err;
453 
454 	if (!thread__maps(leader))
455 		thread__set_maps(leader, maps__new(machine));
456 
457 	if (!thread__maps(leader))
458 		goto out_err;
459 
460 	if (thread__maps(th) == thread__maps(leader))
461 		goto out_put;
462 
463 	if (thread__maps(th)) {
464 		/*
465 		 * Maps are created from MMAP events which provide the pid and
466 		 * tid.  Consequently there never should be any maps on a thread
467 		 * with an unknown pid.  Just print an error if there are.
468 		 */
469 		if (!maps__empty(thread__maps(th)))
470 			pr_err("Discarding thread maps for %d:%d\n",
471 				thread__pid(th), thread__tid(th));
472 		maps__put(thread__maps(th));
473 	}
474 
475 	thread__set_maps(th, maps__get(thread__maps(leader)));
476 out_put:
477 	thread__put(leader);
478 	return;
479 out_err:
480 	pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
481 	goto out_put;
482 }
483 
484 /*
485  * Caller must eventually drop thread->refcnt returned with a successful
486  * lookup/new thread inserted.
487  */
__machine__findnew_thread(struct machine * machine,pid_t pid,pid_t tid,bool create)488 static struct thread *__machine__findnew_thread(struct machine *machine,
489 						pid_t pid,
490 						pid_t tid,
491 						bool create)
492 {
493 	struct thread *th = threads__find(&machine->threads, tid);
494 	bool created;
495 
496 	if (th) {
497 		machine__update_thread_pid(machine, th, pid);
498 		return th;
499 	}
500 	if (!create)
501 		return NULL;
502 
503 	th = threads__findnew(&machine->threads, pid, tid, &created);
504 	if (created) {
505 		/*
506 		 * We have to initialize maps separately after rb tree is
507 		 * updated.
508 		 *
509 		 * The reason is that we call machine__findnew_thread within
510 		 * thread__init_maps to find the thread leader and that would
511 		 * screwed the rb tree.
512 		 */
513 		if (thread__init_maps(th, machine)) {
514 			pr_err("Thread init failed thread %d\n", pid);
515 			threads__remove(&machine->threads, th);
516 			thread__put(th);
517 			return NULL;
518 		}
519 	} else
520 		machine__update_thread_pid(machine, th, pid);
521 
522 	return th;
523 }
524 
machine__findnew_thread(struct machine * machine,pid_t pid,pid_t tid)525 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
526 {
527 	return __machine__findnew_thread(machine, pid, tid, /*create=*/true);
528 }
529 
machine__find_thread(struct machine * machine,pid_t pid,pid_t tid)530 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
531 				    pid_t tid)
532 {
533 	return __machine__findnew_thread(machine, pid, tid, /*create=*/false);
534 }
535 
536 /*
537  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
538  * So here a single thread is created for that, but actually there is a separate
539  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
540  * is only 1. That causes problems for some tools, requiring workarounds. For
541  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
542  */
machine__idle_thread(struct machine * machine)543 struct thread *machine__idle_thread(struct machine *machine)
544 {
545 	struct thread *thread = machine__findnew_thread(machine, 0, 0);
546 
547 	if (!thread || thread__set_comm(thread, "swapper", 0) ||
548 	    thread__set_namespaces(thread, 0, NULL))
549 		pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
550 
551 	return thread;
552 }
553 
machine__thread_exec_comm(struct machine * machine,struct thread * thread)554 struct comm *machine__thread_exec_comm(struct machine *machine,
555 				       struct thread *thread)
556 {
557 	if (machine->comm_exec)
558 		return thread__exec_comm(thread);
559 	else
560 		return thread__comm(thread);
561 }
562 
machine__process_comm_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)563 int machine__process_comm_event(struct machine *machine, union perf_event *event,
564 				struct perf_sample *sample)
565 {
566 	struct thread *thread = machine__findnew_thread(machine,
567 							event->comm.pid,
568 							event->comm.tid);
569 	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
570 	int err = 0;
571 
572 	if (exec)
573 		machine->comm_exec = true;
574 
575 	if (dump_trace)
576 		perf_event__fprintf_comm(event, stdout);
577 
578 	if (thread == NULL ||
579 	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
580 		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
581 		err = -1;
582 	}
583 
584 	thread__put(thread);
585 
586 	return err;
587 }
588 
machine__process_namespaces_event(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused)589 int machine__process_namespaces_event(struct machine *machine __maybe_unused,
590 				      union perf_event *event,
591 				      struct perf_sample *sample __maybe_unused)
592 {
593 	struct thread *thread = machine__findnew_thread(machine,
594 							event->namespaces.pid,
595 							event->namespaces.tid);
596 	int err = 0;
597 
598 	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
599 		  "\nWARNING: kernel seems to support more namespaces than perf"
600 		  " tool.\nTry updating the perf tool..\n\n");
601 
602 	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
603 		  "\nWARNING: perf tool seems to support more namespaces than"
604 		  " the kernel.\nTry updating the kernel..\n\n");
605 
606 	if (dump_trace)
607 		perf_event__fprintf_namespaces(event, stdout);
608 
609 	if (thread == NULL ||
610 	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
611 		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
612 		err = -1;
613 	}
614 
615 	thread__put(thread);
616 
617 	return err;
618 }
619 
machine__process_cgroup_event(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)620 int machine__process_cgroup_event(struct machine *machine,
621 				  union perf_event *event,
622 				  struct perf_sample *sample __maybe_unused)
623 {
624 	struct cgroup *cgrp;
625 
626 	if (dump_trace)
627 		perf_event__fprintf_cgroup(event, stdout);
628 
629 	cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
630 	if (cgrp == NULL)
631 		return -ENOMEM;
632 
633 	return 0;
634 }
635 
machine__process_lost_event(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample __maybe_unused)636 int machine__process_lost_event(struct machine *machine __maybe_unused,
637 				union perf_event *event, struct perf_sample *sample __maybe_unused)
638 {
639 	dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
640 		    event->lost.id, event->lost.lost);
641 	return 0;
642 }
643 
machine__process_lost_samples_event(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample)644 int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
645 					union perf_event *event, struct perf_sample *sample)
646 {
647 	dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "%s\n",
648 		    sample->id, event->lost_samples.lost,
649 		    event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF ? " (BPF)" : "");
650 	return 0;
651 }
652 
machine__process_aux_event(struct machine * machine __maybe_unused,union perf_event * event)653 int machine__process_aux_event(struct machine *machine __maybe_unused,
654 			       union perf_event *event)
655 {
656 	if (dump_trace)
657 		perf_event__fprintf_aux(event, stdout);
658 	return 0;
659 }
660 
machine__process_itrace_start_event(struct machine * machine __maybe_unused,union perf_event * event)661 int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
662 					union perf_event *event)
663 {
664 	if (dump_trace)
665 		perf_event__fprintf_itrace_start(event, stdout);
666 	return 0;
667 }
668 
machine__process_aux_output_hw_id_event(struct machine * machine __maybe_unused,union perf_event * event)669 int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
670 					    union perf_event *event)
671 {
672 	if (dump_trace)
673 		perf_event__fprintf_aux_output_hw_id(event, stdout);
674 	return 0;
675 }
676 
machine__process_switch_event(struct machine * machine __maybe_unused,union perf_event * event)677 int machine__process_switch_event(struct machine *machine __maybe_unused,
678 				  union perf_event *event)
679 {
680 	if (dump_trace)
681 		perf_event__fprintf_switch(event, stdout);
682 	return 0;
683 }
684 
machine__process_ksymbol_register(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)685 static int machine__process_ksymbol_register(struct machine *machine,
686 					     union perf_event *event,
687 					     struct perf_sample *sample __maybe_unused)
688 {
689 	struct symbol *sym;
690 	struct dso *dso = NULL;
691 	struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
692 	int err = 0;
693 
694 	if (!map) {
695 		dso = dso__new(event->ksymbol.name);
696 
697 		if (!dso) {
698 			err = -ENOMEM;
699 			goto out;
700 		}
701 		dso__set_kernel(dso, DSO_SPACE__KERNEL);
702 		map = map__new2(0, dso);
703 		if (!map) {
704 			err = -ENOMEM;
705 			goto out;
706 		}
707 		if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
708 			dso__set_binary_type(dso, DSO_BINARY_TYPE__OOL);
709 			dso__data(dso)->file_size = event->ksymbol.len;
710 			dso__set_loaded(dso);
711 		}
712 
713 		map__set_start(map, event->ksymbol.addr);
714 		map__set_end(map, map__start(map) + event->ksymbol.len);
715 		err = maps__insert(machine__kernel_maps(machine), map);
716 		if (err) {
717 			err = -ENOMEM;
718 			goto out;
719 		}
720 
721 		dso__set_loaded(dso);
722 
723 		if (is_bpf_image(event->ksymbol.name)) {
724 			dso__set_binary_type(dso, DSO_BINARY_TYPE__BPF_IMAGE);
725 			dso__set_long_name(dso, "", false);
726 		}
727 	} else {
728 		dso = dso__get(map__dso(map));
729 	}
730 
731 	sym = symbol__new(map__map_ip(map, map__start(map)),
732 			  event->ksymbol.len,
733 			  0, 0, event->ksymbol.name);
734 	if (!sym) {
735 		err = -ENOMEM;
736 		goto out;
737 	}
738 	dso__insert_symbol(dso, sym);
739 out:
740 	map__put(map);
741 	dso__put(dso);
742 	return err;
743 }
744 
machine__process_ksymbol_unregister(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)745 static int machine__process_ksymbol_unregister(struct machine *machine,
746 					       union perf_event *event,
747 					       struct perf_sample *sample __maybe_unused)
748 {
749 	struct symbol *sym;
750 	struct map *map;
751 
752 	map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
753 	if (!map)
754 		return 0;
755 
756 	if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
757 		maps__remove(machine__kernel_maps(machine), map);
758 	else {
759 		struct dso *dso = map__dso(map);
760 
761 		sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
762 		if (sym)
763 			dso__delete_symbol(dso, sym);
764 	}
765 	map__put(map);
766 	return 0;
767 }
768 
machine__process_ksymbol(struct machine * machine __maybe_unused,union perf_event * event,struct perf_sample * sample)769 int machine__process_ksymbol(struct machine *machine __maybe_unused,
770 			     union perf_event *event,
771 			     struct perf_sample *sample)
772 {
773 	if (dump_trace)
774 		perf_event__fprintf_ksymbol(event, stdout);
775 
776 	if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
777 		return machine__process_ksymbol_unregister(machine, event,
778 							   sample);
779 	return machine__process_ksymbol_register(machine, event, sample);
780 }
781 
machine__process_text_poke(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)782 int machine__process_text_poke(struct machine *machine, union perf_event *event,
783 			       struct perf_sample *sample __maybe_unused)
784 {
785 	struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
786 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
787 	struct dso *dso = map ? map__dso(map) : NULL;
788 
789 	if (dump_trace)
790 		perf_event__fprintf_text_poke(event, machine, stdout);
791 
792 	if (!event->text_poke.new_len)
793 		goto out;
794 
795 	if (cpumode != PERF_RECORD_MISC_KERNEL) {
796 		pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
797 		goto out;
798 	}
799 
800 	if (dso) {
801 		u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
802 		int ret;
803 
804 		/*
805 		 * Kernel maps might be changed when loading symbols so loading
806 		 * must be done prior to using kernel maps.
807 		 */
808 		map__load(map);
809 		ret = dso__data_write_cache_addr(dso, map, machine,
810 						 event->text_poke.addr,
811 						 new_bytes,
812 						 event->text_poke.new_len);
813 		if (ret != event->text_poke.new_len)
814 			pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
815 				 event->text_poke.addr);
816 	} else {
817 		pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
818 			 event->text_poke.addr);
819 	}
820 out:
821 	map__put(map);
822 	return 0;
823 }
824 
machine__addnew_module_map(struct machine * machine,u64 start,const char * filename)825 static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
826 					      const char *filename)
827 {
828 	struct map *map = NULL;
829 	struct kmod_path m;
830 	struct dso *dso;
831 	int err;
832 
833 	if (kmod_path__parse_name(&m, filename))
834 		return NULL;
835 
836 	dso = dsos__findnew_module_dso(&machine->dsos, machine, &m, filename);
837 	if (dso == NULL)
838 		goto out;
839 
840 	map = map__new2(start, dso);
841 	if (map == NULL)
842 		goto out;
843 
844 	err = maps__insert(machine__kernel_maps(machine), map);
845 	/* If maps__insert failed, return NULL. */
846 	if (err) {
847 		map__put(map);
848 		map = NULL;
849 	}
850 out:
851 	/* put the dso here, corresponding to  machine__findnew_module_dso */
852 	dso__put(dso);
853 	zfree(&m.name);
854 	return map;
855 }
856 
machines__fprintf_dsos(struct machines * machines,FILE * fp)857 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
858 {
859 	struct rb_node *nd;
860 	size_t ret = dsos__fprintf(&machines->host.dsos, fp);
861 
862 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
863 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
864 		ret += dsos__fprintf(&pos->dsos, fp);
865 	}
866 
867 	return ret;
868 }
869 
machine__fprintf_dsos_buildid(struct machine * m,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)870 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
871 				     bool (skip)(struct dso *dso, int parm), int parm)
872 {
873 	return dsos__fprintf_buildid(&m->dsos, fp, skip, parm);
874 }
875 
machines__fprintf_dsos_buildid(struct machines * machines,FILE * fp,bool (skip)(struct dso * dso,int parm),int parm)876 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
877 				     bool (skip)(struct dso *dso, int parm), int parm)
878 {
879 	struct rb_node *nd;
880 	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
881 
882 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
883 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
884 		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
885 	}
886 	return ret;
887 }
888 
machine__fprintf_vmlinux_path(struct machine * machine,FILE * fp)889 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
890 {
891 	int i;
892 	size_t printed = 0;
893 	struct dso *kdso = machine__kernel_dso(machine);
894 
895 	if (dso__has_build_id(kdso)) {
896 		char filename[PATH_MAX];
897 
898 		if (dso__build_id_filename(kdso, filename, sizeof(filename), false))
899 			printed += fprintf(fp, "[0] %s\n", filename);
900 	}
901 
902 	for (i = 0; i < vmlinux_path__nr_entries; ++i) {
903 		printed += fprintf(fp, "[%d] %s\n", i + dso__has_build_id(kdso),
904 				   vmlinux_path[i]);
905 	}
906 	return printed;
907 }
908 
909 struct machine_fprintf_cb_args {
910 	FILE *fp;
911 	size_t printed;
912 };
913 
machine_fprintf_cb(struct thread * thread,void * data)914 static int machine_fprintf_cb(struct thread *thread, void *data)
915 {
916 	struct machine_fprintf_cb_args *args = data;
917 
918 	/* TODO: handle fprintf errors. */
919 	args->printed += thread__fprintf(thread, args->fp);
920 	return 0;
921 }
922 
machine__fprintf(struct machine * machine,FILE * fp)923 size_t machine__fprintf(struct machine *machine, FILE *fp)
924 {
925 	struct machine_fprintf_cb_args args = {
926 		.fp = fp,
927 		.printed = 0,
928 	};
929 	size_t ret = fprintf(fp, "Threads: %zu\n", threads__nr(&machine->threads));
930 
931 	machine__for_each_thread(machine, machine_fprintf_cb, &args);
932 	return ret + args.printed;
933 }
934 
machine__get_kernel(struct machine * machine)935 static struct dso *machine__get_kernel(struct machine *machine)
936 {
937 	const char *vmlinux_name = machine->mmap_name;
938 	struct dso *kernel;
939 
940 	if (machine__is_host(machine)) {
941 		if (symbol_conf.vmlinux_name)
942 			vmlinux_name = symbol_conf.vmlinux_name;
943 
944 		kernel = machine__findnew_kernel(machine, vmlinux_name,
945 						 "[kernel]", DSO_SPACE__KERNEL);
946 	} else {
947 		if (symbol_conf.default_guest_vmlinux_name)
948 			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
949 
950 		kernel = machine__findnew_kernel(machine, vmlinux_name,
951 						 "[guest.kernel]",
952 						 DSO_SPACE__KERNEL_GUEST);
953 	}
954 
955 	if (kernel != NULL && (!dso__has_build_id(kernel)))
956 		dso__read_running_kernel_build_id(kernel, machine);
957 
958 	return kernel;
959 }
960 
machine__get_kallsyms_filename(struct machine * machine,char * buf,size_t bufsz)961 void machine__get_kallsyms_filename(struct machine *machine, char *buf,
962 				    size_t bufsz)
963 {
964 	if (machine__is_default_guest(machine))
965 		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
966 	else
967 		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
968 }
969 
970 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
971 
972 /* Figure out the start address of kernel map from /proc/kallsyms.
973  * Returns the name of the start symbol in *symbol_name. Pass in NULL as
974  * symbol_name if it's not that important.
975  */
machine__get_running_kernel_start(struct machine * machine,const char ** symbol_name,u64 * start,u64 * end)976 static int machine__get_running_kernel_start(struct machine *machine,
977 					     const char **symbol_name,
978 					     u64 *start, u64 *end)
979 {
980 	char filename[PATH_MAX];
981 	int i, err = -1;
982 	const char *name;
983 	u64 addr = 0;
984 
985 	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
986 
987 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
988 		return 0;
989 
990 	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
991 		err = kallsyms__get_function_start(filename, name, &addr);
992 		if (!err)
993 			break;
994 	}
995 
996 	if (err)
997 		return -1;
998 
999 	if (symbol_name)
1000 		*symbol_name = name;
1001 
1002 	*start = addr;
1003 
1004 	err = kallsyms__get_symbol_start(filename, "_edata", &addr);
1005 	if (err)
1006 		err = kallsyms__get_symbol_start(filename, "_etext", &addr);
1007 	if (!err)
1008 		*end = addr;
1009 
1010 	return 0;
1011 }
1012 
machine__create_extra_kernel_map(struct machine * machine,struct dso * kernel,struct extra_kernel_map * xm)1013 int machine__create_extra_kernel_map(struct machine *machine,
1014 				     struct dso *kernel,
1015 				     struct extra_kernel_map *xm)
1016 {
1017 	struct kmap *kmap;
1018 	struct map *map;
1019 	int err;
1020 
1021 	map = map__new2(xm->start, kernel);
1022 	if (!map)
1023 		return -ENOMEM;
1024 
1025 	map__set_end(map, xm->end);
1026 	map__set_pgoff(map, xm->pgoff);
1027 
1028 	kmap = map__kmap(map);
1029 
1030 	strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1031 
1032 	err = maps__insert(machine__kernel_maps(machine), map);
1033 
1034 	if (!err) {
1035 		pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1036 			kmap->name, map__start(map), map__end(map));
1037 	}
1038 
1039 	map__put(map);
1040 
1041 	return err;
1042 }
1043 
find_entry_trampoline(struct dso * dso)1044 static u64 find_entry_trampoline(struct dso *dso)
1045 {
1046 	/* Duplicates are removed so lookup all aliases */
1047 	const char *syms[] = {
1048 		"_entry_trampoline",
1049 		"__entry_trampoline_start",
1050 		"entry_SYSCALL_64_trampoline",
1051 	};
1052 	struct symbol *sym = dso__first_symbol(dso);
1053 	unsigned int i;
1054 
1055 	for (; sym; sym = dso__next_symbol(sym)) {
1056 		if (sym->binding != STB_GLOBAL)
1057 			continue;
1058 		for (i = 0; i < ARRAY_SIZE(syms); i++) {
1059 			if (!strcmp(sym->name, syms[i]))
1060 				return sym->start;
1061 		}
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 /*
1068  * These values can be used for kernels that do not have symbols for the entry
1069  * trampolines in kallsyms.
1070  */
1071 #define X86_64_CPU_ENTRY_AREA_PER_CPU	0xfffffe0000000000ULL
1072 #define X86_64_CPU_ENTRY_AREA_SIZE	0x2c000
1073 #define X86_64_ENTRY_TRAMPOLINE		0x6000
1074 
1075 struct machine__map_x86_64_entry_trampolines_args {
1076 	struct maps *kmaps;
1077 	bool found;
1078 };
1079 
machine__map_x86_64_entry_trampolines_cb(struct map * map,void * data)1080 static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
1081 {
1082 	struct machine__map_x86_64_entry_trampolines_args *args = data;
1083 	struct map *dest_map;
1084 	struct kmap *kmap = __map__kmap(map);
1085 
1086 	if (!kmap || !is_entry_trampoline(kmap->name))
1087 		return 0;
1088 
1089 	dest_map = maps__find(args->kmaps, map__pgoff(map));
1090 	if (RC_CHK_ACCESS(dest_map) != RC_CHK_ACCESS(map))
1091 		map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
1092 
1093 	map__put(dest_map);
1094 	args->found = true;
1095 	return 0;
1096 }
1097 
1098 /* Map x86_64 PTI entry trampolines */
machine__map_x86_64_entry_trampolines(struct machine * machine,struct dso * kernel)1099 int machine__map_x86_64_entry_trampolines(struct machine *machine,
1100 					  struct dso *kernel)
1101 {
1102 	struct machine__map_x86_64_entry_trampolines_args args = {
1103 		.kmaps = machine__kernel_maps(machine),
1104 		.found = false,
1105 	};
1106 	int nr_cpus_avail, cpu;
1107 	u64 pgoff;
1108 
1109 	/*
1110 	 * In the vmlinux case, pgoff is a virtual address which must now be
1111 	 * mapped to a vmlinux offset.
1112 	 */
1113 	maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
1114 
1115 	if (args.found || machine->trampolines_mapped)
1116 		return 0;
1117 
1118 	pgoff = find_entry_trampoline(kernel);
1119 	if (!pgoff)
1120 		return 0;
1121 
1122 	nr_cpus_avail = machine__nr_cpus_avail(machine);
1123 
1124 	/* Add a 1 page map for each CPU's entry trampoline */
1125 	for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1126 		u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1127 			 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1128 			 X86_64_ENTRY_TRAMPOLINE;
1129 		struct extra_kernel_map xm = {
1130 			.start = va,
1131 			.end   = va + page_size,
1132 			.pgoff = pgoff,
1133 		};
1134 
1135 		strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1136 
1137 		if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1138 			return -1;
1139 	}
1140 
1141 	machine->trampolines_mapped = nr_cpus_avail;
1142 
1143 	return 0;
1144 }
1145 
machine__create_extra_kernel_maps(struct machine * machine __maybe_unused,struct dso * kernel __maybe_unused)1146 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1147 					     struct dso *kernel __maybe_unused)
1148 {
1149 	return 0;
1150 }
1151 
1152 static int
__machine__create_kernel_maps(struct machine * machine,struct dso * kernel)1153 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1154 {
1155 	/* In case of renewal the kernel map, destroy previous one */
1156 	machine__destroy_kernel_maps(machine);
1157 
1158 	map__put(machine->vmlinux_map);
1159 	machine->vmlinux_map = map__new2(0, kernel);
1160 	if (machine->vmlinux_map == NULL)
1161 		return -ENOMEM;
1162 
1163 	map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY);
1164 	return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
1165 }
1166 
machine__destroy_kernel_maps(struct machine * machine)1167 void machine__destroy_kernel_maps(struct machine *machine)
1168 {
1169 	struct kmap *kmap;
1170 	struct map *map = machine__kernel_map(machine);
1171 
1172 	if (map == NULL)
1173 		return;
1174 
1175 	kmap = map__kmap(map);
1176 	maps__remove(machine__kernel_maps(machine), map);
1177 	if (kmap && kmap->ref_reloc_sym) {
1178 		zfree((char **)&kmap->ref_reloc_sym->name);
1179 		zfree(&kmap->ref_reloc_sym);
1180 	}
1181 
1182 	map__zput(machine->vmlinux_map);
1183 }
1184 
machines__create_guest_kernel_maps(struct machines * machines)1185 int machines__create_guest_kernel_maps(struct machines *machines)
1186 {
1187 	int ret = 0;
1188 	struct dirent **namelist = NULL;
1189 	int i, items = 0;
1190 	char path[PATH_MAX];
1191 	pid_t pid;
1192 	char *endp;
1193 
1194 	if (symbol_conf.default_guest_vmlinux_name ||
1195 	    symbol_conf.default_guest_modules ||
1196 	    symbol_conf.default_guest_kallsyms) {
1197 		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1198 	}
1199 
1200 	if (symbol_conf.guestmount) {
1201 		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1202 		if (items <= 0)
1203 			return -ENOENT;
1204 		for (i = 0; i < items; i++) {
1205 			if (!isdigit(namelist[i]->d_name[0])) {
1206 				/* Filter out . and .. */
1207 				continue;
1208 			}
1209 			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1210 			if ((*endp != '\0') ||
1211 			    (endp == namelist[i]->d_name) ||
1212 			    (errno == ERANGE)) {
1213 				pr_debug("invalid directory (%s). Skipping.\n",
1214 					 namelist[i]->d_name);
1215 				continue;
1216 			}
1217 			sprintf(path, "%s/%s/proc/kallsyms",
1218 				symbol_conf.guestmount,
1219 				namelist[i]->d_name);
1220 			ret = access(path, R_OK);
1221 			if (ret) {
1222 				pr_debug("Can't access file %s\n", path);
1223 				goto failure;
1224 			}
1225 			machines__create_kernel_maps(machines, pid);
1226 		}
1227 failure:
1228 		free(namelist);
1229 	}
1230 
1231 	return ret;
1232 }
1233 
machines__destroy_kernel_maps(struct machines * machines)1234 void machines__destroy_kernel_maps(struct machines *machines)
1235 {
1236 	struct rb_node *next = rb_first_cached(&machines->guests);
1237 
1238 	machine__destroy_kernel_maps(&machines->host);
1239 
1240 	while (next) {
1241 		struct machine *pos = rb_entry(next, struct machine, rb_node);
1242 
1243 		next = rb_next(&pos->rb_node);
1244 		rb_erase_cached(&pos->rb_node, &machines->guests);
1245 		machine__delete(pos);
1246 	}
1247 }
1248 
machines__create_kernel_maps(struct machines * machines,pid_t pid)1249 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1250 {
1251 	struct machine *machine = machines__findnew(machines, pid);
1252 
1253 	if (machine == NULL)
1254 		return -1;
1255 
1256 	return machine__create_kernel_maps(machine);
1257 }
1258 
machine__load_kallsyms(struct machine * machine,const char * filename)1259 int machine__load_kallsyms(struct machine *machine, const char *filename)
1260 {
1261 	struct map *map = machine__kernel_map(machine);
1262 	struct dso *dso = map__dso(map);
1263 	int ret = __dso__load_kallsyms(dso, filename, map, true);
1264 
1265 	if (ret > 0) {
1266 		dso__set_loaded(dso);
1267 		/*
1268 		 * Since /proc/kallsyms will have multiple sessions for the
1269 		 * kernel, with modules between them, fixup the end of all
1270 		 * sections.
1271 		 */
1272 		maps__fixup_end(machine__kernel_maps(machine));
1273 	}
1274 
1275 	return ret;
1276 }
1277 
machine__load_vmlinux_path(struct machine * machine)1278 int machine__load_vmlinux_path(struct machine *machine)
1279 {
1280 	struct map *map = machine__kernel_map(machine);
1281 	struct dso *dso = map__dso(map);
1282 	int ret = dso__load_vmlinux_path(dso, map);
1283 
1284 	if (ret > 0)
1285 		dso__set_loaded(dso);
1286 
1287 	return ret;
1288 }
1289 
get_kernel_version(const char * root_dir)1290 static char *get_kernel_version(const char *root_dir)
1291 {
1292 	char version[PATH_MAX];
1293 	FILE *file;
1294 	char *name, *tmp;
1295 	const char *prefix = "Linux version ";
1296 
1297 	sprintf(version, "%s/proc/version", root_dir);
1298 	file = fopen(version, "r");
1299 	if (!file)
1300 		return NULL;
1301 
1302 	tmp = fgets(version, sizeof(version), file);
1303 	fclose(file);
1304 	if (!tmp)
1305 		return NULL;
1306 
1307 	name = strstr(version, prefix);
1308 	if (!name)
1309 		return NULL;
1310 	name += strlen(prefix);
1311 	tmp = strchr(name, ' ');
1312 	if (tmp)
1313 		*tmp = '\0';
1314 
1315 	return strdup(name);
1316 }
1317 
is_kmod_dso(struct dso * dso)1318 static bool is_kmod_dso(struct dso *dso)
1319 {
1320 	return dso__symtab_type(dso) == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1321 	       dso__symtab_type(dso) == DSO_BINARY_TYPE__GUEST_KMODULE;
1322 }
1323 
maps__set_module_path(struct maps * maps,const char * path,struct kmod_path * m)1324 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1325 {
1326 	char *long_name;
1327 	struct dso *dso;
1328 	struct map *map = maps__find_by_name(maps, m->name);
1329 
1330 	if (map == NULL)
1331 		return 0;
1332 
1333 	long_name = strdup(path);
1334 	if (long_name == NULL) {
1335 		map__put(map);
1336 		return -ENOMEM;
1337 	}
1338 
1339 	dso = map__dso(map);
1340 	dso__set_long_name(dso, long_name, true);
1341 	dso__kernel_module_get_build_id(dso, "");
1342 
1343 	/*
1344 	 * Full name could reveal us kmod compression, so
1345 	 * we need to update the symtab_type if needed.
1346 	 */
1347 	if (m->comp && is_kmod_dso(dso)) {
1348 		dso__set_symtab_type(dso, dso__symtab_type(dso)+1);
1349 		dso__set_comp(dso, m->comp);
1350 	}
1351 	map__put(map);
1352 	return 0;
1353 }
1354 
maps__set_modules_path_dir(struct maps * maps,const char * dir_name,int depth)1355 static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1356 {
1357 	struct dirent *dent;
1358 	DIR *dir = opendir(dir_name);
1359 	int ret = 0;
1360 
1361 	if (!dir) {
1362 		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1363 		return -1;
1364 	}
1365 
1366 	while ((dent = readdir(dir)) != NULL) {
1367 		char path[PATH_MAX];
1368 		struct stat st;
1369 
1370 		/*sshfs might return bad dent->d_type, so we have to stat*/
1371 		path__join(path, sizeof(path), dir_name, dent->d_name);
1372 		if (stat(path, &st))
1373 			continue;
1374 
1375 		if (S_ISDIR(st.st_mode)) {
1376 			if (!strcmp(dent->d_name, ".") ||
1377 			    !strcmp(dent->d_name, ".."))
1378 				continue;
1379 
1380 			/* Do not follow top-level source and build symlinks */
1381 			if (depth == 0) {
1382 				if (!strcmp(dent->d_name, "source") ||
1383 				    !strcmp(dent->d_name, "build"))
1384 					continue;
1385 			}
1386 
1387 			ret = maps__set_modules_path_dir(maps, path, depth + 1);
1388 			if (ret < 0)
1389 				goto out;
1390 		} else {
1391 			struct kmod_path m;
1392 
1393 			ret = kmod_path__parse_name(&m, dent->d_name);
1394 			if (ret)
1395 				goto out;
1396 
1397 			if (m.kmod)
1398 				ret = maps__set_module_path(maps, path, &m);
1399 
1400 			zfree(&m.name);
1401 
1402 			if (ret)
1403 				goto out;
1404 		}
1405 	}
1406 
1407 out:
1408 	closedir(dir);
1409 	return ret;
1410 }
1411 
machine__set_modules_path(struct machine * machine)1412 static int machine__set_modules_path(struct machine *machine)
1413 {
1414 	char *version;
1415 	char modules_path[PATH_MAX];
1416 
1417 	version = get_kernel_version(machine->root_dir);
1418 	if (!version)
1419 		return -1;
1420 
1421 	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1422 		 machine->root_dir, version);
1423 	free(version);
1424 
1425 	return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
1426 }
arch__fix_module_text_start(u64 * start __maybe_unused,u64 * size __maybe_unused,const char * name __maybe_unused)1427 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1428 				u64 *size __maybe_unused,
1429 				const char *name __maybe_unused)
1430 {
1431 	return 0;
1432 }
1433 
machine__create_module(void * arg,const char * name,u64 start,u64 size)1434 static int machine__create_module(void *arg, const char *name, u64 start,
1435 				  u64 size)
1436 {
1437 	struct machine *machine = arg;
1438 	struct map *map;
1439 
1440 	if (arch__fix_module_text_start(&start, &size, name) < 0)
1441 		return -1;
1442 
1443 	map = machine__addnew_module_map(machine, start, name);
1444 	if (map == NULL)
1445 		return -1;
1446 	map__set_end(map, start + size);
1447 
1448 	dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
1449 	map__put(map);
1450 	return 0;
1451 }
1452 
machine__create_modules(struct machine * machine)1453 static int machine__create_modules(struct machine *machine)
1454 {
1455 	const char *modules;
1456 	char path[PATH_MAX];
1457 
1458 	if (machine__is_default_guest(machine)) {
1459 		modules = symbol_conf.default_guest_modules;
1460 	} else {
1461 		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1462 		modules = path;
1463 	}
1464 
1465 	if (symbol__restricted_filename(modules, "/proc/modules"))
1466 		return -1;
1467 
1468 	if (modules__parse(modules, machine, machine__create_module))
1469 		return -1;
1470 
1471 	maps__fixup_end(machine__kernel_maps(machine));
1472 
1473 	if (!machine__set_modules_path(machine))
1474 		return 0;
1475 
1476 	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1477 
1478 	return 0;
1479 }
1480 
machine__set_kernel_mmap(struct machine * machine,u64 start,u64 end)1481 static void machine__set_kernel_mmap(struct machine *machine,
1482 				     u64 start, u64 end)
1483 {
1484 	map__set_start(machine->vmlinux_map, start);
1485 	map__set_end(machine->vmlinux_map, end);
1486 	/*
1487 	 * Be a bit paranoid here, some perf.data file came with
1488 	 * a zero sized synthesized MMAP event for the kernel.
1489 	 */
1490 	if (start == 0 && end == 0)
1491 		map__set_end(machine->vmlinux_map, ~0ULL);
1492 }
1493 
machine__update_kernel_mmap(struct machine * machine,u64 start,u64 end)1494 static int machine__update_kernel_mmap(struct machine *machine,
1495 				     u64 start, u64 end)
1496 {
1497 	struct map *orig, *updated;
1498 	int err;
1499 
1500 	orig = machine->vmlinux_map;
1501 	updated = map__get(orig);
1502 
1503 	machine->vmlinux_map = updated;
1504 	maps__remove(machine__kernel_maps(machine), orig);
1505 	machine__set_kernel_mmap(machine, start, end);
1506 	err = maps__insert(machine__kernel_maps(machine), updated);
1507 	map__put(orig);
1508 
1509 	return err;
1510 }
1511 
machine__create_kernel_maps(struct machine * machine)1512 int machine__create_kernel_maps(struct machine *machine)
1513 {
1514 	struct dso *kernel = machine__get_kernel(machine);
1515 	const char *name = NULL;
1516 	u64 start = 0, end = ~0ULL;
1517 	int ret;
1518 
1519 	if (kernel == NULL)
1520 		return -1;
1521 
1522 	ret = __machine__create_kernel_maps(machine, kernel);
1523 	if (ret < 0)
1524 		goto out_put;
1525 
1526 	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1527 		if (machine__is_host(machine))
1528 			pr_debug("Problems creating module maps, "
1529 				 "continuing anyway...\n");
1530 		else
1531 			pr_debug("Problems creating module maps for guest %d, "
1532 				 "continuing anyway...\n", machine->pid);
1533 	}
1534 
1535 	if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1536 		if (name &&
1537 		    map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1538 			machine__destroy_kernel_maps(machine);
1539 			ret = -1;
1540 			goto out_put;
1541 		}
1542 
1543 		/*
1544 		 * we have a real start address now, so re-order the kmaps
1545 		 * assume it's the last in the kmaps
1546 		 */
1547 		ret = machine__update_kernel_mmap(machine, start, end);
1548 		if (ret < 0)
1549 			goto out_put;
1550 	}
1551 
1552 	if (machine__create_extra_kernel_maps(machine, kernel))
1553 		pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1554 
1555 	if (end == ~0ULL) {
1556 		/* update end address of the kernel map using adjacent module address */
1557 		struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
1558 							 machine__kernel_map(machine));
1559 
1560 		if (next) {
1561 			machine__set_kernel_mmap(machine, start, map__start(next));
1562 			map__put(next);
1563 		}
1564 	}
1565 
1566 out_put:
1567 	dso__put(kernel);
1568 	return ret;
1569 }
1570 
machine__uses_kcore_cb(struct dso * dso,void * data __maybe_unused)1571 static int machine__uses_kcore_cb(struct dso *dso, void *data __maybe_unused)
1572 {
1573 	return dso__is_kcore(dso) ? 1 : 0;
1574 }
1575 
machine__uses_kcore(struct machine * machine)1576 static bool machine__uses_kcore(struct machine *machine)
1577 {
1578 	return dsos__for_each_dso(&machine->dsos, machine__uses_kcore_cb, NULL) != 0 ? true : false;
1579 }
1580 
perf_event__is_extra_kernel_mmap(struct machine * machine,struct extra_kernel_map * xm)1581 static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1582 					     struct extra_kernel_map *xm)
1583 {
1584 	return machine__is(machine, "x86_64") &&
1585 	       is_entry_trampoline(xm->name);
1586 }
1587 
machine__process_extra_kernel_map(struct machine * machine,struct extra_kernel_map * xm)1588 static int machine__process_extra_kernel_map(struct machine *machine,
1589 					     struct extra_kernel_map *xm)
1590 {
1591 	struct dso *kernel = machine__kernel_dso(machine);
1592 
1593 	if (kernel == NULL)
1594 		return -1;
1595 
1596 	return machine__create_extra_kernel_map(machine, kernel, xm);
1597 }
1598 
machine__process_kernel_mmap_event(struct machine * machine,struct extra_kernel_map * xm,struct build_id * bid)1599 static int machine__process_kernel_mmap_event(struct machine *machine,
1600 					      struct extra_kernel_map *xm,
1601 					      struct build_id *bid)
1602 {
1603 	enum dso_space_type dso_space;
1604 	bool is_kernel_mmap;
1605 	const char *mmap_name = machine->mmap_name;
1606 
1607 	/* If we have maps from kcore then we do not need or want any others */
1608 	if (machine__uses_kcore(machine))
1609 		return 0;
1610 
1611 	if (machine__is_host(machine))
1612 		dso_space = DSO_SPACE__KERNEL;
1613 	else
1614 		dso_space = DSO_SPACE__KERNEL_GUEST;
1615 
1616 	is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1617 	if (!is_kernel_mmap && !machine__is_host(machine)) {
1618 		/*
1619 		 * If the event was recorded inside the guest and injected into
1620 		 * the host perf.data file, then it will match a host mmap_name,
1621 		 * so try that - see machine__set_mmap_name().
1622 		 */
1623 		mmap_name = "[kernel.kallsyms]";
1624 		is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1625 	}
1626 	if (xm->name[0] == '/' ||
1627 	    (!is_kernel_mmap && xm->name[0] == '[')) {
1628 		struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
1629 
1630 		if (map == NULL)
1631 			goto out_problem;
1632 
1633 		map__set_end(map, map__start(map) + xm->end - xm->start);
1634 
1635 		if (build_id__is_defined(bid))
1636 			dso__set_build_id(map__dso(map), bid);
1637 
1638 		map__put(map);
1639 	} else if (is_kernel_mmap) {
1640 		const char *symbol_name = xm->name + strlen(mmap_name);
1641 		/*
1642 		 * Should be there already, from the build-id table in
1643 		 * the header.
1644 		 */
1645 		struct dso *kernel = dsos__find_kernel_dso(&machine->dsos);
1646 
1647 		if (kernel == NULL)
1648 			kernel = machine__findnew_dso(machine, machine->mmap_name);
1649 		if (kernel == NULL)
1650 			goto out_problem;
1651 
1652 		dso__set_kernel(kernel, dso_space);
1653 		if (__machine__create_kernel_maps(machine, kernel) < 0) {
1654 			dso__put(kernel);
1655 			goto out_problem;
1656 		}
1657 
1658 		if (strstr(dso__long_name(kernel), "vmlinux"))
1659 			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1660 
1661 		if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
1662 			dso__put(kernel);
1663 			goto out_problem;
1664 		}
1665 
1666 		if (build_id__is_defined(bid))
1667 			dso__set_build_id(kernel, bid);
1668 
1669 		/*
1670 		 * Avoid using a zero address (kptr_restrict) for the ref reloc
1671 		 * symbol. Effectively having zero here means that at record
1672 		 * time /proc/sys/kernel/kptr_restrict was non zero.
1673 		 */
1674 		if (xm->pgoff != 0) {
1675 			map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1676 							symbol_name,
1677 							xm->pgoff);
1678 		}
1679 
1680 		if (machine__is_default_guest(machine)) {
1681 			/*
1682 			 * preload dso of guest kernel and modules
1683 			 */
1684 			dso__load(kernel, machine__kernel_map(machine));
1685 		}
1686 		dso__put(kernel);
1687 	} else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1688 		return machine__process_extra_kernel_map(machine, xm);
1689 	}
1690 	return 0;
1691 out_problem:
1692 	return -1;
1693 }
1694 
machine__process_mmap2_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1695 int machine__process_mmap2_event(struct machine *machine,
1696 				 union perf_event *event,
1697 				 struct perf_sample *sample)
1698 {
1699 	struct thread *thread;
1700 	struct map *map;
1701 	struct dso_id dso_id = {
1702 		.maj = event->mmap2.maj,
1703 		.min = event->mmap2.min,
1704 		.ino = event->mmap2.ino,
1705 		.ino_generation = event->mmap2.ino_generation,
1706 	};
1707 	struct build_id __bid, *bid = NULL;
1708 	int ret = 0;
1709 
1710 	if (dump_trace)
1711 		perf_event__fprintf_mmap2(event, stdout);
1712 
1713 	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1714 		bid = &__bid;
1715 		build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1716 	}
1717 
1718 	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1719 	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1720 		struct extra_kernel_map xm = {
1721 			.start = event->mmap2.start,
1722 			.end   = event->mmap2.start + event->mmap2.len,
1723 			.pgoff = event->mmap2.pgoff,
1724 		};
1725 
1726 		strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1727 		ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1728 		if (ret < 0)
1729 			goto out_problem;
1730 		return 0;
1731 	}
1732 
1733 	thread = machine__findnew_thread(machine, event->mmap2.pid,
1734 					event->mmap2.tid);
1735 	if (thread == NULL)
1736 		goto out_problem;
1737 
1738 	map = map__new(machine, event->mmap2.start,
1739 			event->mmap2.len, event->mmap2.pgoff,
1740 			&dso_id, event->mmap2.prot,
1741 			event->mmap2.flags, bid,
1742 			event->mmap2.filename, thread);
1743 
1744 	if (map == NULL)
1745 		goto out_problem_map;
1746 
1747 	ret = thread__insert_map(thread, map);
1748 	if (ret)
1749 		goto out_problem_insert;
1750 
1751 	thread__put(thread);
1752 	map__put(map);
1753 	return 0;
1754 
1755 out_problem_insert:
1756 	map__put(map);
1757 out_problem_map:
1758 	thread__put(thread);
1759 out_problem:
1760 	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1761 	return 0;
1762 }
1763 
machine__process_mmap_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1764 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1765 				struct perf_sample *sample)
1766 {
1767 	struct thread *thread;
1768 	struct map *map;
1769 	u32 prot = 0;
1770 	int ret = 0;
1771 
1772 	if (dump_trace)
1773 		perf_event__fprintf_mmap(event, stdout);
1774 
1775 	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1776 	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1777 		struct extra_kernel_map xm = {
1778 			.start = event->mmap.start,
1779 			.end   = event->mmap.start + event->mmap.len,
1780 			.pgoff = event->mmap.pgoff,
1781 		};
1782 
1783 		strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1784 		ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
1785 		if (ret < 0)
1786 			goto out_problem;
1787 		return 0;
1788 	}
1789 
1790 	thread = machine__findnew_thread(machine, event->mmap.pid,
1791 					 event->mmap.tid);
1792 	if (thread == NULL)
1793 		goto out_problem;
1794 
1795 	if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1796 		prot = PROT_EXEC;
1797 
1798 	map = map__new(machine, event->mmap.start,
1799 			event->mmap.len, event->mmap.pgoff,
1800 			NULL, prot, 0, NULL, event->mmap.filename, thread);
1801 
1802 	if (map == NULL)
1803 		goto out_problem_map;
1804 
1805 	ret = thread__insert_map(thread, map);
1806 	if (ret)
1807 		goto out_problem_insert;
1808 
1809 	thread__put(thread);
1810 	map__put(map);
1811 	return 0;
1812 
1813 out_problem_insert:
1814 	map__put(map);
1815 out_problem_map:
1816 	thread__put(thread);
1817 out_problem:
1818 	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1819 	return 0;
1820 }
1821 
machine__remove_thread(struct machine * machine,struct thread * th)1822 void machine__remove_thread(struct machine *machine, struct thread *th)
1823 {
1824 	return threads__remove(&machine->threads, th);
1825 }
1826 
machine__process_fork_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1827 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1828 				struct perf_sample *sample)
1829 {
1830 	struct thread *thread = machine__find_thread(machine,
1831 						     event->fork.pid,
1832 						     event->fork.tid);
1833 	struct thread *parent = machine__findnew_thread(machine,
1834 							event->fork.ppid,
1835 							event->fork.ptid);
1836 	bool do_maps_clone = true;
1837 	int err = 0;
1838 
1839 	if (dump_trace)
1840 		perf_event__fprintf_task(event, stdout);
1841 
1842 	/*
1843 	 * There may be an existing thread that is not actually the parent,
1844 	 * either because we are processing events out of order, or because the
1845 	 * (fork) event that would have removed the thread was lost. Assume the
1846 	 * latter case and continue on as best we can.
1847 	 */
1848 	if (thread__pid(parent) != (pid_t)event->fork.ppid) {
1849 		dump_printf("removing erroneous parent thread %d/%d\n",
1850 			    thread__pid(parent), thread__tid(parent));
1851 		machine__remove_thread(machine, parent);
1852 		thread__put(parent);
1853 		parent = machine__findnew_thread(machine, event->fork.ppid,
1854 						 event->fork.ptid);
1855 	}
1856 
1857 	/* if a thread currently exists for the thread id remove it */
1858 	if (thread != NULL) {
1859 		machine__remove_thread(machine, thread);
1860 		thread__put(thread);
1861 	}
1862 
1863 	thread = machine__findnew_thread(machine, event->fork.pid,
1864 					 event->fork.tid);
1865 	/*
1866 	 * When synthesizing FORK events, we are trying to create thread
1867 	 * objects for the already running tasks on the machine.
1868 	 *
1869 	 * Normally, for a kernel FORK event, we want to clone the parent's
1870 	 * maps because that is what the kernel just did.
1871 	 *
1872 	 * But when synthesizing, this should not be done.  If we do, we end up
1873 	 * with overlapping maps as we process the synthesized MMAP2 events that
1874 	 * get delivered shortly thereafter.
1875 	 *
1876 	 * Use the FORK event misc flags in an internal way to signal this
1877 	 * situation, so we can elide the map clone when appropriate.
1878 	 */
1879 	if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1880 		do_maps_clone = false;
1881 
1882 	if (thread == NULL || parent == NULL ||
1883 	    thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1884 		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1885 		err = -1;
1886 	}
1887 	thread__put(thread);
1888 	thread__put(parent);
1889 
1890 	return err;
1891 }
1892 
machine__process_exit_event(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)1893 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1894 				struct perf_sample *sample __maybe_unused)
1895 {
1896 	struct thread *thread = machine__find_thread(machine,
1897 						     event->fork.pid,
1898 						     event->fork.tid);
1899 
1900 	if (dump_trace)
1901 		perf_event__fprintf_task(event, stdout);
1902 
1903 	if (thread != NULL) {
1904 		if (symbol_conf.keep_exited_threads)
1905 			thread__set_exited(thread, /*exited=*/true);
1906 		else
1907 			machine__remove_thread(machine, thread);
1908 	}
1909 	thread__put(thread);
1910 	return 0;
1911 }
1912 
machine__process_event(struct machine * machine,union perf_event * event,struct perf_sample * sample)1913 int machine__process_event(struct machine *machine, union perf_event *event,
1914 			   struct perf_sample *sample)
1915 {
1916 	int ret;
1917 
1918 	switch (event->header.type) {
1919 	case PERF_RECORD_COMM:
1920 		ret = machine__process_comm_event(machine, event, sample); break;
1921 	case PERF_RECORD_MMAP:
1922 		ret = machine__process_mmap_event(machine, event, sample); break;
1923 	case PERF_RECORD_NAMESPACES:
1924 		ret = machine__process_namespaces_event(machine, event, sample); break;
1925 	case PERF_RECORD_CGROUP:
1926 		ret = machine__process_cgroup_event(machine, event, sample); break;
1927 	case PERF_RECORD_MMAP2:
1928 		ret = machine__process_mmap2_event(machine, event, sample); break;
1929 	case PERF_RECORD_FORK:
1930 		ret = machine__process_fork_event(machine, event, sample); break;
1931 	case PERF_RECORD_EXIT:
1932 		ret = machine__process_exit_event(machine, event, sample); break;
1933 	case PERF_RECORD_LOST:
1934 		ret = machine__process_lost_event(machine, event, sample); break;
1935 	case PERF_RECORD_AUX:
1936 		ret = machine__process_aux_event(machine, event); break;
1937 	case PERF_RECORD_ITRACE_START:
1938 		ret = machine__process_itrace_start_event(machine, event); break;
1939 	case PERF_RECORD_LOST_SAMPLES:
1940 		ret = machine__process_lost_samples_event(machine, event, sample); break;
1941 	case PERF_RECORD_SWITCH:
1942 	case PERF_RECORD_SWITCH_CPU_WIDE:
1943 		ret = machine__process_switch_event(machine, event); break;
1944 	case PERF_RECORD_KSYMBOL:
1945 		ret = machine__process_ksymbol(machine, event, sample); break;
1946 	case PERF_RECORD_BPF_EVENT:
1947 		ret = machine__process_bpf(machine, event, sample); break;
1948 	case PERF_RECORD_TEXT_POKE:
1949 		ret = machine__process_text_poke(machine, event, sample); break;
1950 	case PERF_RECORD_AUX_OUTPUT_HW_ID:
1951 		ret = machine__process_aux_output_hw_id_event(machine, event); break;
1952 	default:
1953 		ret = -1;
1954 		break;
1955 	}
1956 
1957 	return ret;
1958 }
1959 
symbol__match_regex(struct symbol * sym,regex_t * regex)1960 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1961 {
1962 	return regexec(regex, sym->name, 0, NULL, 0) == 0;
1963 }
1964 
ip__resolve_ams(struct thread * thread,struct addr_map_symbol * ams,u64 ip)1965 static void ip__resolve_ams(struct thread *thread,
1966 			    struct addr_map_symbol *ams,
1967 			    u64 ip)
1968 {
1969 	struct addr_location al;
1970 
1971 	addr_location__init(&al);
1972 	/*
1973 	 * We cannot use the header.misc hint to determine whether a
1974 	 * branch stack address is user, kernel, guest, hypervisor.
1975 	 * Branches may straddle the kernel/user/hypervisor boundaries.
1976 	 * Thus, we have to try consecutively until we find a match
1977 	 * or else, the symbol is unknown
1978 	 */
1979 	thread__find_cpumode_addr_location(thread, ip, &al);
1980 
1981 	ams->addr = ip;
1982 	ams->al_addr = al.addr;
1983 	ams->al_level = al.level;
1984 	ams->ms.maps = maps__get(al.maps);
1985 	ams->ms.sym = al.sym;
1986 	ams->ms.map = map__get(al.map);
1987 	ams->phys_addr = 0;
1988 	ams->data_page_size = 0;
1989 	addr_location__exit(&al);
1990 }
1991 
ip__resolve_data(struct thread * thread,u8 m,struct addr_map_symbol * ams,u64 addr,u64 phys_addr,u64 daddr_page_size)1992 static void ip__resolve_data(struct thread *thread,
1993 			     u8 m, struct addr_map_symbol *ams,
1994 			     u64 addr, u64 phys_addr, u64 daddr_page_size)
1995 {
1996 	struct addr_location al;
1997 
1998 	addr_location__init(&al);
1999 
2000 	thread__find_symbol(thread, m, addr, &al);
2001 
2002 	ams->addr = addr;
2003 	ams->al_addr = al.addr;
2004 	ams->al_level = al.level;
2005 	ams->ms.maps = maps__get(al.maps);
2006 	ams->ms.sym = al.sym;
2007 	ams->ms.map = map__get(al.map);
2008 	ams->phys_addr = phys_addr;
2009 	ams->data_page_size = daddr_page_size;
2010 	addr_location__exit(&al);
2011 }
2012 
sample__resolve_mem(struct perf_sample * sample,struct addr_location * al)2013 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2014 				     struct addr_location *al)
2015 {
2016 	struct mem_info *mi = mem_info__new();
2017 
2018 	if (!mi)
2019 		return NULL;
2020 
2021 	ip__resolve_ams(al->thread, mem_info__iaddr(mi), sample->ip);
2022 	ip__resolve_data(al->thread, al->cpumode, mem_info__daddr(mi),
2023 			 sample->addr, sample->phys_addr,
2024 			 sample->data_page_size);
2025 	mem_info__data_src(mi)->val = sample->data_src;
2026 
2027 	return mi;
2028 }
2029 
callchain_srcline(struct map_symbol * ms,u64 ip)2030 static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2031 {
2032 	struct map *map = ms->map;
2033 	char *srcline = NULL;
2034 	struct dso *dso;
2035 
2036 	if (!map || callchain_param.key == CCKEY_FUNCTION)
2037 		return srcline;
2038 
2039 	dso = map__dso(map);
2040 	srcline = srcline__tree_find(dso__srclines(dso), ip);
2041 	if (!srcline) {
2042 		bool show_sym = false;
2043 		bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2044 
2045 		srcline = get_srcline(dso, map__rip_2objdump(map, ip),
2046 				      ms->sym, show_sym, show_addr, ip);
2047 		srcline__tree_insert(dso__srclines(dso), ip, srcline);
2048 	}
2049 
2050 	return srcline;
2051 }
2052 
2053 struct iterations {
2054 	int nr_loop_iter;
2055 	u64 cycles;
2056 };
2057 
add_callchain_ip(struct thread * thread,struct callchain_cursor * cursor,struct symbol ** parent,struct addr_location * root_al,u8 * cpumode,u64 ip,bool branch,struct branch_flags * flags,struct iterations * iter,u64 branch_from,bool symbols)2058 static int add_callchain_ip(struct thread *thread,
2059 			    struct callchain_cursor *cursor,
2060 			    struct symbol **parent,
2061 			    struct addr_location *root_al,
2062 			    u8 *cpumode,
2063 			    u64 ip,
2064 			    bool branch,
2065 			    struct branch_flags *flags,
2066 			    struct iterations *iter,
2067 			    u64 branch_from,
2068 			    bool symbols)
2069 {
2070 	struct map_symbol ms = {};
2071 	struct addr_location al;
2072 	int nr_loop_iter = 0, err = 0;
2073 	u64 iter_cycles = 0;
2074 	const char *srcline = NULL;
2075 
2076 	addr_location__init(&al);
2077 	al.filtered = 0;
2078 	al.sym = NULL;
2079 	al.srcline = NULL;
2080 	if (!cpumode) {
2081 		thread__find_cpumode_addr_location(thread, ip, &al);
2082 	} else {
2083 		if (ip >= PERF_CONTEXT_MAX) {
2084 			switch (ip) {
2085 			case PERF_CONTEXT_HV:
2086 				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
2087 				break;
2088 			case PERF_CONTEXT_KERNEL:
2089 				*cpumode = PERF_RECORD_MISC_KERNEL;
2090 				break;
2091 			case PERF_CONTEXT_USER:
2092 				*cpumode = PERF_RECORD_MISC_USER;
2093 				break;
2094 			default:
2095 				pr_debug("invalid callchain context: "
2096 					 "%"PRId64"\n", (s64) ip);
2097 				/*
2098 				 * It seems the callchain is corrupted.
2099 				 * Discard all.
2100 				 */
2101 				callchain_cursor_reset(cursor);
2102 				err = 1;
2103 				goto out;
2104 			}
2105 			goto out;
2106 		}
2107 		if (symbols)
2108 			thread__find_symbol(thread, *cpumode, ip, &al);
2109 	}
2110 
2111 	if (al.sym != NULL) {
2112 		if (perf_hpp_list.parent && !*parent &&
2113 		    symbol__match_regex(al.sym, &parent_regex))
2114 			*parent = al.sym;
2115 		else if (have_ignore_callees && root_al &&
2116 		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
2117 			/* Treat this symbol as the root,
2118 			   forgetting its callees. */
2119 			addr_location__copy(root_al, &al);
2120 			callchain_cursor_reset(cursor);
2121 		}
2122 	}
2123 
2124 	if (symbol_conf.hide_unresolved && al.sym == NULL)
2125 		goto out;
2126 
2127 	if (iter) {
2128 		nr_loop_iter = iter->nr_loop_iter;
2129 		iter_cycles = iter->cycles;
2130 	}
2131 
2132 	ms.maps = maps__get(al.maps);
2133 	ms.map = map__get(al.map);
2134 	ms.sym = al.sym;
2135 	srcline = callchain_srcline(&ms, al.addr);
2136 	err = callchain_cursor_append(cursor, ip, &ms,
2137 				      branch, flags, nr_loop_iter,
2138 				      iter_cycles, branch_from, srcline);
2139 out:
2140 	addr_location__exit(&al);
2141 	map_symbol__exit(&ms);
2142 	return err;
2143 }
2144 
sample__resolve_bstack(struct perf_sample * sample,struct addr_location * al)2145 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2146 					   struct addr_location *al)
2147 {
2148 	unsigned int i;
2149 	const struct branch_stack *bs = sample->branch_stack;
2150 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2151 	u64 *branch_stack_cntr = sample->branch_stack_cntr;
2152 	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2153 
2154 	if (!bi)
2155 		return NULL;
2156 
2157 	for (i = 0; i < bs->nr; i++) {
2158 		ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2159 		ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2160 		bi[i].flags = entries[i].flags;
2161 		if (branch_stack_cntr)
2162 			bi[i].branch_stack_cntr  = branch_stack_cntr[i];
2163 	}
2164 	return bi;
2165 }
2166 
save_iterations(struct iterations * iter,struct branch_entry * be,int nr)2167 static void save_iterations(struct iterations *iter,
2168 			    struct branch_entry *be, int nr)
2169 {
2170 	int i;
2171 
2172 	iter->nr_loop_iter++;
2173 	iter->cycles = 0;
2174 
2175 	for (i = 0; i < nr; i++)
2176 		iter->cycles += be[i].flags.cycles;
2177 }
2178 
2179 #define CHASHSZ 127
2180 #define CHASHBITS 7
2181 #define NO_ENTRY 0xff
2182 
2183 #define PERF_MAX_BRANCH_DEPTH 127
2184 
2185 /* Remove loops. */
remove_loops(struct branch_entry * l,int nr,struct iterations * iter)2186 static int remove_loops(struct branch_entry *l, int nr,
2187 			struct iterations *iter)
2188 {
2189 	int i, j, off;
2190 	unsigned char chash[CHASHSZ];
2191 
2192 	memset(chash, NO_ENTRY, sizeof(chash));
2193 
2194 	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2195 
2196 	for (i = 0; i < nr; i++) {
2197 		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2198 
2199 		/* no collision handling for now */
2200 		if (chash[h] == NO_ENTRY) {
2201 			chash[h] = i;
2202 		} else if (l[chash[h]].from == l[i].from) {
2203 			bool is_loop = true;
2204 			/* check if it is a real loop */
2205 			off = 0;
2206 			for (j = chash[h]; j < i && i + off < nr; j++, off++)
2207 				if (l[j].from != l[i + off].from) {
2208 					is_loop = false;
2209 					break;
2210 				}
2211 			if (is_loop) {
2212 				j = nr - (i + off);
2213 				if (j > 0) {
2214 					save_iterations(iter + i + off,
2215 						l + i, off);
2216 
2217 					memmove(iter + i, iter + i + off,
2218 						j * sizeof(*iter));
2219 
2220 					memmove(l + i, l + i + off,
2221 						j * sizeof(*l));
2222 				}
2223 
2224 				nr -= off;
2225 			}
2226 		}
2227 	}
2228 	return nr;
2229 }
2230 
lbr_callchain_add_kernel_ip(struct thread * thread,struct callchain_cursor * cursor,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,u64 branch_from,bool callee,int end,bool symbols)2231 static int lbr_callchain_add_kernel_ip(struct thread *thread,
2232 				       struct callchain_cursor *cursor,
2233 				       struct perf_sample *sample,
2234 				       struct symbol **parent,
2235 				       struct addr_location *root_al,
2236 				       u64 branch_from,
2237 				       bool callee, int end,
2238 				       bool symbols)
2239 {
2240 	struct ip_callchain *chain = sample->callchain;
2241 	u8 cpumode = PERF_RECORD_MISC_USER;
2242 	int err, i;
2243 
2244 	if (callee) {
2245 		for (i = 0; i < end + 1; i++) {
2246 			err = add_callchain_ip(thread, cursor, parent,
2247 					       root_al, &cpumode, chain->ips[i],
2248 					       false, NULL, NULL, branch_from,
2249 					       symbols);
2250 			if (err)
2251 				return err;
2252 		}
2253 		return 0;
2254 	}
2255 
2256 	for (i = end; i >= 0; i--) {
2257 		err = add_callchain_ip(thread, cursor, parent,
2258 				       root_al, &cpumode, chain->ips[i],
2259 				       false, NULL, NULL, branch_from,
2260 				       symbols);
2261 		if (err)
2262 			return err;
2263 	}
2264 
2265 	return 0;
2266 }
2267 
save_lbr_cursor_node(struct thread * thread,struct callchain_cursor * cursor,int idx)2268 static void save_lbr_cursor_node(struct thread *thread,
2269 				 struct callchain_cursor *cursor,
2270 				 int idx)
2271 {
2272 	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2273 
2274 	if (!lbr_stitch)
2275 		return;
2276 
2277 	if (cursor->pos == cursor->nr) {
2278 		lbr_stitch->prev_lbr_cursor[idx].valid = false;
2279 		return;
2280 	}
2281 
2282 	if (!cursor->curr)
2283 		cursor->curr = cursor->first;
2284 	else
2285 		cursor->curr = cursor->curr->next;
2286 
2287 	map_symbol__exit(&lbr_stitch->prev_lbr_cursor[idx].ms);
2288 	memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2289 	       sizeof(struct callchain_cursor_node));
2290 	lbr_stitch->prev_lbr_cursor[idx].ms.maps = maps__get(cursor->curr->ms.maps);
2291 	lbr_stitch->prev_lbr_cursor[idx].ms.map = map__get(cursor->curr->ms.map);
2292 
2293 	lbr_stitch->prev_lbr_cursor[idx].valid = true;
2294 	cursor->pos++;
2295 }
2296 
lbr_callchain_add_lbr_ip(struct thread * thread,struct callchain_cursor * cursor,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,u64 * branch_from,bool callee,bool symbols)2297 static int lbr_callchain_add_lbr_ip(struct thread *thread,
2298 				    struct callchain_cursor *cursor,
2299 				    struct perf_sample *sample,
2300 				    struct symbol **parent,
2301 				    struct addr_location *root_al,
2302 				    u64 *branch_from,
2303 				    bool callee,
2304 				    bool symbols)
2305 {
2306 	struct branch_stack *lbr_stack = sample->branch_stack;
2307 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2308 	u8 cpumode = PERF_RECORD_MISC_USER;
2309 	int lbr_nr = lbr_stack->nr;
2310 	struct branch_flags *flags;
2311 	int err, i;
2312 	u64 ip;
2313 
2314 	/*
2315 	 * The curr and pos are not used in writing session. They are cleared
2316 	 * in callchain_cursor_commit() when the writing session is closed.
2317 	 * Using curr and pos to track the current cursor node.
2318 	 */
2319 	if (thread__lbr_stitch(thread)) {
2320 		cursor->curr = NULL;
2321 		cursor->pos = cursor->nr;
2322 		if (cursor->nr) {
2323 			cursor->curr = cursor->first;
2324 			for (i = 0; i < (int)(cursor->nr - 1); i++)
2325 				cursor->curr = cursor->curr->next;
2326 		}
2327 	}
2328 
2329 	if (callee) {
2330 		/* Add LBR ip from first entries.to */
2331 		ip = entries[0].to;
2332 		flags = &entries[0].flags;
2333 		*branch_from = entries[0].from;
2334 		err = add_callchain_ip(thread, cursor, parent,
2335 				       root_al, &cpumode, ip,
2336 				       true, flags, NULL,
2337 				       *branch_from, symbols);
2338 		if (err)
2339 			return err;
2340 
2341 		/*
2342 		 * The number of cursor node increases.
2343 		 * Move the current cursor node.
2344 		 * But does not need to save current cursor node for entry 0.
2345 		 * It's impossible to stitch the whole LBRs of previous sample.
2346 		 */
2347 		if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
2348 			if (!cursor->curr)
2349 				cursor->curr = cursor->first;
2350 			else
2351 				cursor->curr = cursor->curr->next;
2352 			cursor->pos++;
2353 		}
2354 
2355 		/* Add LBR ip from entries.from one by one. */
2356 		for (i = 0; i < lbr_nr; i++) {
2357 			ip = entries[i].from;
2358 			flags = &entries[i].flags;
2359 			err = add_callchain_ip(thread, cursor, parent,
2360 					       root_al, &cpumode, ip,
2361 					       true, flags, NULL,
2362 					       *branch_from, symbols);
2363 			if (err)
2364 				return err;
2365 			save_lbr_cursor_node(thread, cursor, i);
2366 		}
2367 		return 0;
2368 	}
2369 
2370 	/* Add LBR ip from entries.from one by one. */
2371 	for (i = lbr_nr - 1; i >= 0; i--) {
2372 		ip = entries[i].from;
2373 		flags = &entries[i].flags;
2374 		err = add_callchain_ip(thread, cursor, parent,
2375 				       root_al, &cpumode, ip,
2376 				       true, flags, NULL,
2377 				       *branch_from, symbols);
2378 		if (err)
2379 			return err;
2380 		save_lbr_cursor_node(thread, cursor, i);
2381 	}
2382 
2383 	if (lbr_nr > 0) {
2384 		/* Add LBR ip from first entries.to */
2385 		ip = entries[0].to;
2386 		flags = &entries[0].flags;
2387 		*branch_from = entries[0].from;
2388 		err = add_callchain_ip(thread, cursor, parent,
2389 				root_al, &cpumode, ip,
2390 				true, flags, NULL,
2391 				*branch_from, symbols);
2392 		if (err)
2393 			return err;
2394 	}
2395 
2396 	return 0;
2397 }
2398 
lbr_callchain_add_stitched_lbr_ip(struct thread * thread,struct callchain_cursor * cursor)2399 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2400 					     struct callchain_cursor *cursor)
2401 {
2402 	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2403 	struct callchain_cursor_node *cnode;
2404 	struct stitch_list *stitch_node;
2405 	int err;
2406 
2407 	list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2408 		cnode = &stitch_node->cursor;
2409 
2410 		err = callchain_cursor_append(cursor, cnode->ip,
2411 					      &cnode->ms,
2412 					      cnode->branch,
2413 					      &cnode->branch_flags,
2414 					      cnode->nr_loop_iter,
2415 					      cnode->iter_cycles,
2416 					      cnode->branch_from,
2417 					      cnode->srcline);
2418 		if (err)
2419 			return err;
2420 	}
2421 	return 0;
2422 }
2423 
get_stitch_node(struct thread * thread)2424 static struct stitch_list *get_stitch_node(struct thread *thread)
2425 {
2426 	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2427 	struct stitch_list *stitch_node;
2428 
2429 	if (!list_empty(&lbr_stitch->free_lists)) {
2430 		stitch_node = list_first_entry(&lbr_stitch->free_lists,
2431 					       struct stitch_list, node);
2432 		list_del(&stitch_node->node);
2433 
2434 		return stitch_node;
2435 	}
2436 
2437 	return malloc(sizeof(struct stitch_list));
2438 }
2439 
has_stitched_lbr(struct thread * thread,struct perf_sample * cur,struct perf_sample * prev,unsigned int max_lbr,bool callee)2440 static bool has_stitched_lbr(struct thread *thread,
2441 			     struct perf_sample *cur,
2442 			     struct perf_sample *prev,
2443 			     unsigned int max_lbr,
2444 			     bool callee)
2445 {
2446 	struct branch_stack *cur_stack = cur->branch_stack;
2447 	struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2448 	struct branch_stack *prev_stack = prev->branch_stack;
2449 	struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2450 	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2451 	int i, j, nr_identical_branches = 0;
2452 	struct stitch_list *stitch_node;
2453 	u64 cur_base, distance;
2454 
2455 	if (!cur_stack || !prev_stack)
2456 		return false;
2457 
2458 	/* Find the physical index of the base-of-stack for current sample. */
2459 	cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2460 
2461 	distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2462 						     (max_lbr + prev_stack->hw_idx - cur_base);
2463 	/* Previous sample has shorter stack. Nothing can be stitched. */
2464 	if (distance + 1 > prev_stack->nr)
2465 		return false;
2466 
2467 	/*
2468 	 * Check if there are identical LBRs between two samples.
2469 	 * Identical LBRs must have same from, to and flags values. Also,
2470 	 * they have to be saved in the same LBR registers (same physical
2471 	 * index).
2472 	 *
2473 	 * Starts from the base-of-stack of current sample.
2474 	 */
2475 	for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2476 		if ((prev_entries[i].from != cur_entries[j].from) ||
2477 		    (prev_entries[i].to != cur_entries[j].to) ||
2478 		    (prev_entries[i].flags.value != cur_entries[j].flags.value))
2479 			break;
2480 		nr_identical_branches++;
2481 	}
2482 
2483 	if (!nr_identical_branches)
2484 		return false;
2485 
2486 	/*
2487 	 * Save the LBRs between the base-of-stack of previous sample
2488 	 * and the base-of-stack of current sample into lbr_stitch->lists.
2489 	 * These LBRs will be stitched later.
2490 	 */
2491 	for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2492 
2493 		if (!lbr_stitch->prev_lbr_cursor[i].valid)
2494 			continue;
2495 
2496 		stitch_node = get_stitch_node(thread);
2497 		if (!stitch_node)
2498 			return false;
2499 
2500 		memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2501 		       sizeof(struct callchain_cursor_node));
2502 
2503 		stitch_node->cursor.ms.maps = maps__get(lbr_stitch->prev_lbr_cursor[i].ms.maps);
2504 		stitch_node->cursor.ms.map = map__get(lbr_stitch->prev_lbr_cursor[i].ms.map);
2505 
2506 		if (callee)
2507 			list_add(&stitch_node->node, &lbr_stitch->lists);
2508 		else
2509 			list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2510 	}
2511 
2512 	return true;
2513 }
2514 
alloc_lbr_stitch(struct thread * thread,unsigned int max_lbr)2515 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2516 {
2517 	if (thread__lbr_stitch(thread))
2518 		return true;
2519 
2520 	thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
2521 	if (!thread__lbr_stitch(thread))
2522 		goto err;
2523 
2524 	thread__lbr_stitch(thread)->prev_lbr_cursor =
2525 		calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2526 	if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
2527 		goto free_lbr_stitch;
2528 
2529 	thread__lbr_stitch(thread)->prev_lbr_cursor_size = max_lbr + 1;
2530 
2531 	INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
2532 	INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
2533 
2534 	return true;
2535 
2536 free_lbr_stitch:
2537 	free(thread__lbr_stitch(thread));
2538 	thread__set_lbr_stitch(thread, NULL);
2539 err:
2540 	pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2541 	thread__set_lbr_stitch_enable(thread, false);
2542 	return false;
2543 }
2544 
2545 /*
2546  * Resolve LBR callstack chain sample
2547  * Return:
2548  * 1 on success get LBR callchain information
2549  * 0 no available LBR callchain information, should try fp
2550  * negative error code on other errors.
2551  */
resolve_lbr_callchain_sample(struct thread * thread,struct callchain_cursor * cursor,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,int max_stack,unsigned int max_lbr,bool symbols)2552 static int resolve_lbr_callchain_sample(struct thread *thread,
2553 					struct callchain_cursor *cursor,
2554 					struct perf_sample *sample,
2555 					struct symbol **parent,
2556 					struct addr_location *root_al,
2557 					int max_stack,
2558 					unsigned int max_lbr,
2559 					bool symbols)
2560 {
2561 	bool callee = (callchain_param.order == ORDER_CALLEE);
2562 	struct ip_callchain *chain = sample->callchain;
2563 	int chain_nr = min(max_stack, (int)chain->nr), i;
2564 	struct lbr_stitch *lbr_stitch;
2565 	bool stitched_lbr = false;
2566 	u64 branch_from = 0;
2567 	int err;
2568 
2569 	for (i = 0; i < chain_nr; i++) {
2570 		if (chain->ips[i] == PERF_CONTEXT_USER)
2571 			break;
2572 	}
2573 
2574 	/* LBR only affects the user callchain */
2575 	if (i == chain_nr)
2576 		return 0;
2577 
2578 	if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
2579 	    (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2580 		lbr_stitch = thread__lbr_stitch(thread);
2581 
2582 		stitched_lbr = has_stitched_lbr(thread, sample,
2583 						&lbr_stitch->prev_sample,
2584 						max_lbr, callee);
2585 
2586 		if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2587 			struct stitch_list *stitch_node;
2588 
2589 			list_for_each_entry(stitch_node, &lbr_stitch->lists, node)
2590 				map_symbol__exit(&stitch_node->cursor.ms);
2591 
2592 			list_splice_init(&lbr_stitch->lists, &lbr_stitch->free_lists);
2593 		}
2594 		memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2595 	}
2596 
2597 	if (callee) {
2598 		/* Add kernel ip */
2599 		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2600 						  parent, root_al, branch_from,
2601 						  true, i, symbols);
2602 		if (err)
2603 			goto error;
2604 
2605 		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2606 					       root_al, &branch_from, true, symbols);
2607 		if (err)
2608 			goto error;
2609 
2610 		if (stitched_lbr) {
2611 			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2612 			if (err)
2613 				goto error;
2614 		}
2615 
2616 	} else {
2617 		if (stitched_lbr) {
2618 			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2619 			if (err)
2620 				goto error;
2621 		}
2622 		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2623 					       root_al, &branch_from, false, symbols);
2624 		if (err)
2625 			goto error;
2626 
2627 		/* Add kernel ip */
2628 		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2629 						  parent, root_al, branch_from,
2630 						  false, i, symbols);
2631 		if (err)
2632 			goto error;
2633 	}
2634 	return 1;
2635 
2636 error:
2637 	return (err < 0) ? err : 0;
2638 }
2639 
find_prev_cpumode(struct ip_callchain * chain,struct thread * thread,struct callchain_cursor * cursor,struct symbol ** parent,struct addr_location * root_al,u8 * cpumode,int ent,bool symbols)2640 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2641 			     struct callchain_cursor *cursor,
2642 			     struct symbol **parent,
2643 			     struct addr_location *root_al,
2644 			     u8 *cpumode, int ent, bool symbols)
2645 {
2646 	int err = 0;
2647 
2648 	while (--ent >= 0) {
2649 		u64 ip = chain->ips[ent];
2650 
2651 		if (ip >= PERF_CONTEXT_MAX) {
2652 			err = add_callchain_ip(thread, cursor, parent,
2653 					       root_al, cpumode, ip,
2654 					       false, NULL, NULL, 0, symbols);
2655 			break;
2656 		}
2657 	}
2658 	return err;
2659 }
2660 
get_leaf_frame_caller(struct perf_sample * sample,struct thread * thread,int usr_idx)2661 static u64 get_leaf_frame_caller(struct perf_sample *sample,
2662 		struct thread *thread, int usr_idx)
2663 {
2664 	if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
2665 		return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2666 	else
2667 		return 0;
2668 }
2669 
thread__resolve_callchain_sample(struct thread * thread,struct callchain_cursor * cursor,struct evsel * evsel,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,int max_stack,bool symbols)2670 static int thread__resolve_callchain_sample(struct thread *thread,
2671 					    struct callchain_cursor *cursor,
2672 					    struct evsel *evsel,
2673 					    struct perf_sample *sample,
2674 					    struct symbol **parent,
2675 					    struct addr_location *root_al,
2676 					    int max_stack,
2677 					    bool symbols)
2678 {
2679 	struct branch_stack *branch = sample->branch_stack;
2680 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2681 	struct ip_callchain *chain = sample->callchain;
2682 	int chain_nr = 0;
2683 	u8 cpumode = PERF_RECORD_MISC_USER;
2684 	int i, j, err, nr_entries, usr_idx;
2685 	int skip_idx = -1;
2686 	int first_call = 0;
2687 	u64 leaf_frame_caller;
2688 
2689 	if (chain)
2690 		chain_nr = chain->nr;
2691 
2692 	if (evsel__has_branch_callstack(evsel)) {
2693 		struct perf_env *env = evsel__env(evsel);
2694 
2695 		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2696 						   root_al, max_stack,
2697 						   !env ? 0 : env->max_branches,
2698 						   symbols);
2699 		if (err)
2700 			return (err < 0) ? err : 0;
2701 	}
2702 
2703 	/*
2704 	 * Based on DWARF debug information, some architectures skip
2705 	 * a callchain entry saved by the kernel.
2706 	 */
2707 	skip_idx = arch_skip_callchain_idx(thread, chain);
2708 
2709 	/*
2710 	 * Add branches to call stack for easier browsing. This gives
2711 	 * more context for a sample than just the callers.
2712 	 *
2713 	 * This uses individual histograms of paths compared to the
2714 	 * aggregated histograms the normal LBR mode uses.
2715 	 *
2716 	 * Limitations for now:
2717 	 * - No extra filters
2718 	 * - No annotations (should annotate somehow)
2719 	 */
2720 
2721 	if (branch && callchain_param.branch_callstack) {
2722 		int nr = min(max_stack, (int)branch->nr);
2723 		struct branch_entry be[nr];
2724 		struct iterations iter[nr];
2725 
2726 		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2727 			pr_warning("corrupted branch chain. skipping...\n");
2728 			goto check_calls;
2729 		}
2730 
2731 		for (i = 0; i < nr; i++) {
2732 			if (callchain_param.order == ORDER_CALLEE) {
2733 				be[i] = entries[i];
2734 
2735 				if (chain == NULL)
2736 					continue;
2737 
2738 				/*
2739 				 * Check for overlap into the callchain.
2740 				 * The return address is one off compared to
2741 				 * the branch entry. To adjust for this
2742 				 * assume the calling instruction is not longer
2743 				 * than 8 bytes.
2744 				 */
2745 				if (i == skip_idx ||
2746 				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
2747 					first_call++;
2748 				else if (be[i].from < chain->ips[first_call] &&
2749 				    be[i].from >= chain->ips[first_call] - 8)
2750 					first_call++;
2751 			} else
2752 				be[i] = entries[branch->nr - i - 1];
2753 		}
2754 
2755 		memset(iter, 0, sizeof(struct iterations) * nr);
2756 		nr = remove_loops(be, nr, iter);
2757 
2758 		for (i = 0; i < nr; i++) {
2759 			err = add_callchain_ip(thread, cursor, parent,
2760 					       root_al,
2761 					       NULL, be[i].to,
2762 					       true, &be[i].flags,
2763 					       NULL, be[i].from, symbols);
2764 
2765 			if (!err) {
2766 				err = add_callchain_ip(thread, cursor, parent, root_al,
2767 						       NULL, be[i].from,
2768 						       true, &be[i].flags,
2769 						       &iter[i], 0, symbols);
2770 			}
2771 			if (err == -EINVAL)
2772 				break;
2773 			if (err)
2774 				return err;
2775 		}
2776 
2777 		if (chain_nr == 0)
2778 			return 0;
2779 
2780 		chain_nr -= nr;
2781 	}
2782 
2783 check_calls:
2784 	if (chain && callchain_param.order != ORDER_CALLEE) {
2785 		err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2786 					&cpumode, chain->nr - first_call, symbols);
2787 		if (err)
2788 			return (err < 0) ? err : 0;
2789 	}
2790 	for (i = first_call, nr_entries = 0;
2791 	     i < chain_nr && nr_entries < max_stack; i++) {
2792 		u64 ip;
2793 
2794 		if (callchain_param.order == ORDER_CALLEE)
2795 			j = i;
2796 		else
2797 			j = chain->nr - i - 1;
2798 
2799 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2800 		if (j == skip_idx)
2801 			continue;
2802 #endif
2803 		ip = chain->ips[j];
2804 		if (ip < PERF_CONTEXT_MAX)
2805                        ++nr_entries;
2806 		else if (callchain_param.order != ORDER_CALLEE) {
2807 			err = find_prev_cpumode(chain, thread, cursor, parent,
2808 						root_al, &cpumode, j, symbols);
2809 			if (err)
2810 				return (err < 0) ? err : 0;
2811 			continue;
2812 		}
2813 
2814 		/*
2815 		 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
2816 		 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
2817 		 * the index will be different in order to add the missing frame
2818 		 * at the right place.
2819 		 */
2820 
2821 		usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
2822 
2823 		if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
2824 
2825 			leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
2826 
2827 			/*
2828 			 * check if leaf_frame_Caller != ip to not add the same
2829 			 * value twice.
2830 			 */
2831 
2832 			if (leaf_frame_caller && leaf_frame_caller != ip) {
2833 
2834 				err = add_callchain_ip(thread, cursor, parent,
2835 						root_al, &cpumode, leaf_frame_caller,
2836 						false, NULL, NULL, 0, symbols);
2837 				if (err)
2838 					return (err < 0) ? err : 0;
2839 			}
2840 		}
2841 
2842 		err = add_callchain_ip(thread, cursor, parent,
2843 				       root_al, &cpumode, ip,
2844 				       false, NULL, NULL, 0, symbols);
2845 
2846 		if (err)
2847 			return (err < 0) ? err : 0;
2848 	}
2849 
2850 	return 0;
2851 }
2852 
append_inlines(struct callchain_cursor * cursor,struct map_symbol * ms,u64 ip)2853 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
2854 {
2855 	struct symbol *sym = ms->sym;
2856 	struct map *map = ms->map;
2857 	struct inline_node *inline_node;
2858 	struct inline_list *ilist;
2859 	struct dso *dso;
2860 	u64 addr;
2861 	int ret = 1;
2862 	struct map_symbol ilist_ms;
2863 
2864 	if (!symbol_conf.inline_name || !map || !sym)
2865 		return ret;
2866 
2867 	addr = map__dso_map_ip(map, ip);
2868 	addr = map__rip_2objdump(map, addr);
2869 	dso = map__dso(map);
2870 
2871 	inline_node = inlines__tree_find(dso__inlined_nodes(dso), addr);
2872 	if (!inline_node) {
2873 		inline_node = dso__parse_addr_inlines(dso, addr, sym);
2874 		if (!inline_node)
2875 			return ret;
2876 		inlines__tree_insert(dso__inlined_nodes(dso), inline_node);
2877 	}
2878 
2879 	ilist_ms = (struct map_symbol) {
2880 		.maps = maps__get(ms->maps),
2881 		.map = map__get(map),
2882 	};
2883 	list_for_each_entry(ilist, &inline_node->val, list) {
2884 		ilist_ms.sym = ilist->symbol;
2885 		ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
2886 					      NULL, 0, 0, 0, ilist->srcline);
2887 
2888 		if (ret != 0)
2889 			return ret;
2890 	}
2891 	map_symbol__exit(&ilist_ms);
2892 
2893 	return ret;
2894 }
2895 
unwind_entry(struct unwind_entry * entry,void * arg)2896 static int unwind_entry(struct unwind_entry *entry, void *arg)
2897 {
2898 	struct callchain_cursor *cursor = arg;
2899 	const char *srcline = NULL;
2900 	u64 addr = entry->ip;
2901 
2902 	if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
2903 		return 0;
2904 
2905 	if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
2906 		return 0;
2907 
2908 	/*
2909 	 * Convert entry->ip from a virtual address to an offset in
2910 	 * its corresponding binary.
2911 	 */
2912 	if (entry->ms.map)
2913 		addr = map__dso_map_ip(entry->ms.map, entry->ip);
2914 
2915 	srcline = callchain_srcline(&entry->ms, addr);
2916 	return callchain_cursor_append(cursor, entry->ip, &entry->ms,
2917 				       false, NULL, 0, 0, 0, srcline);
2918 }
2919 
thread__resolve_callchain_unwind(struct thread * thread,struct callchain_cursor * cursor,struct evsel * evsel,struct perf_sample * sample,int max_stack,bool symbols)2920 static int thread__resolve_callchain_unwind(struct thread *thread,
2921 					    struct callchain_cursor *cursor,
2922 					    struct evsel *evsel,
2923 					    struct perf_sample *sample,
2924 					    int max_stack, bool symbols)
2925 {
2926 	/* Can we do dwarf post unwind? */
2927 	if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2928 	      (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2929 		return 0;
2930 
2931 	/* Bail out if nothing was captured. */
2932 	if ((!sample->user_regs.regs) ||
2933 	    (!sample->user_stack.size))
2934 		return 0;
2935 
2936 	if (!symbols)
2937 		pr_debug("Not resolving symbols with an unwinder isn't currently supported\n");
2938 
2939 	return unwind__get_entries(unwind_entry, cursor,
2940 				   thread, sample, max_stack, false);
2941 }
2942 
__thread__resolve_callchain(struct thread * thread,struct callchain_cursor * cursor,struct evsel * evsel,struct perf_sample * sample,struct symbol ** parent,struct addr_location * root_al,int max_stack,bool symbols)2943 int __thread__resolve_callchain(struct thread *thread,
2944 				struct callchain_cursor *cursor,
2945 				struct evsel *evsel,
2946 				struct perf_sample *sample,
2947 				struct symbol **parent,
2948 				struct addr_location *root_al,
2949 				int max_stack,
2950 				bool symbols)
2951 {
2952 	int ret = 0;
2953 
2954 	if (cursor == NULL)
2955 		return -ENOMEM;
2956 
2957 	callchain_cursor_reset(cursor);
2958 
2959 	if (callchain_param.order == ORDER_CALLEE) {
2960 		ret = thread__resolve_callchain_sample(thread, cursor,
2961 						       evsel, sample,
2962 						       parent, root_al,
2963 						       max_stack, symbols);
2964 		if (ret)
2965 			return ret;
2966 		ret = thread__resolve_callchain_unwind(thread, cursor,
2967 						       evsel, sample,
2968 						       max_stack, symbols);
2969 	} else {
2970 		ret = thread__resolve_callchain_unwind(thread, cursor,
2971 						       evsel, sample,
2972 						       max_stack, symbols);
2973 		if (ret)
2974 			return ret;
2975 		ret = thread__resolve_callchain_sample(thread, cursor,
2976 						       evsel, sample,
2977 						       parent, root_al,
2978 						       max_stack, symbols);
2979 	}
2980 
2981 	return ret;
2982 }
2983 
machine__for_each_thread(struct machine * machine,int (* fn)(struct thread * thread,void * p),void * priv)2984 int machine__for_each_thread(struct machine *machine,
2985 			     int (*fn)(struct thread *thread, void *p),
2986 			     void *priv)
2987 {
2988 	return threads__for_each_thread(&machine->threads, fn, priv);
2989 }
2990 
machines__for_each_thread(struct machines * machines,int (* fn)(struct thread * thread,void * p),void * priv)2991 int machines__for_each_thread(struct machines *machines,
2992 			      int (*fn)(struct thread *thread, void *p),
2993 			      void *priv)
2994 {
2995 	struct rb_node *nd;
2996 	int rc = 0;
2997 
2998 	rc = machine__for_each_thread(&machines->host, fn, priv);
2999 	if (rc != 0)
3000 		return rc;
3001 
3002 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3003 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
3004 
3005 		rc = machine__for_each_thread(machine, fn, priv);
3006 		if (rc != 0)
3007 			return rc;
3008 	}
3009 	return rc;
3010 }
3011 
3012 
thread_list_cb(struct thread * thread,void * data)3013 static int thread_list_cb(struct thread *thread, void *data)
3014 {
3015 	struct list_head *list = data;
3016 	struct thread_list *entry = malloc(sizeof(*entry));
3017 
3018 	if (!entry)
3019 		return -ENOMEM;
3020 
3021 	entry->thread = thread__get(thread);
3022 	list_add_tail(&entry->list, list);
3023 	return 0;
3024 }
3025 
machine__thread_list(struct machine * machine,struct list_head * list)3026 int machine__thread_list(struct machine *machine, struct list_head *list)
3027 {
3028 	return machine__for_each_thread(machine, thread_list_cb, list);
3029 }
3030 
thread_list__delete(struct list_head * list)3031 void thread_list__delete(struct list_head *list)
3032 {
3033 	struct thread_list *pos, *next;
3034 
3035 	list_for_each_entry_safe(pos, next, list, list) {
3036 		thread__zput(pos->thread);
3037 		list_del(&pos->list);
3038 		free(pos);
3039 	}
3040 }
3041 
machine__get_current_tid(struct machine * machine,int cpu)3042 pid_t machine__get_current_tid(struct machine *machine, int cpu)
3043 {
3044 	if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
3045 		return -1;
3046 
3047 	return machine->current_tid[cpu];
3048 }
3049 
machine__set_current_tid(struct machine * machine,int cpu,pid_t pid,pid_t tid)3050 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3051 			     pid_t tid)
3052 {
3053 	struct thread *thread;
3054 	const pid_t init_val = -1;
3055 
3056 	if (cpu < 0)
3057 		return -EINVAL;
3058 
3059 	if (realloc_array_as_needed(machine->current_tid,
3060 				    machine->current_tid_sz,
3061 				    (unsigned int)cpu,
3062 				    &init_val))
3063 		return -ENOMEM;
3064 
3065 	machine->current_tid[cpu] = tid;
3066 
3067 	thread = machine__findnew_thread(machine, pid, tid);
3068 	if (!thread)
3069 		return -ENOMEM;
3070 
3071 	thread__set_cpu(thread, cpu);
3072 	thread__put(thread);
3073 
3074 	return 0;
3075 }
3076 
3077 /*
3078  * Compares the raw arch string. N.B. see instead perf_env__arch() or
3079  * machine__normalized_is() if a normalized arch is needed.
3080  */
machine__is(struct machine * machine,const char * arch)3081 bool machine__is(struct machine *machine, const char *arch)
3082 {
3083 	return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3084 }
3085 
machine__normalized_is(struct machine * machine,const char * arch)3086 bool machine__normalized_is(struct machine *machine, const char *arch)
3087 {
3088 	return machine && !strcmp(perf_env__arch(machine->env), arch);
3089 }
3090 
machine__nr_cpus_avail(struct machine * machine)3091 int machine__nr_cpus_avail(struct machine *machine)
3092 {
3093 	return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3094 }
3095 
machine__get_kernel_start(struct machine * machine)3096 int machine__get_kernel_start(struct machine *machine)
3097 {
3098 	struct map *map = machine__kernel_map(machine);
3099 	int err = 0;
3100 
3101 	/*
3102 	 * The only addresses above 2^63 are kernel addresses of a 64-bit
3103 	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
3104 	 * all addresses including kernel addresses are less than 2^32.  In
3105 	 * that case (32-bit system), if the kernel mapping is unknown, all
3106 	 * addresses will be assumed to be in user space - see
3107 	 * machine__kernel_ip().
3108 	 */
3109 	machine->kernel_start = 1ULL << 63;
3110 	if (map) {
3111 		err = map__load(map);
3112 		/*
3113 		 * On x86_64, PTI entry trampolines are less than the
3114 		 * start of kernel text, but still above 2^63. So leave
3115 		 * kernel_start = 1ULL << 63 for x86_64.
3116 		 */
3117 		if (!err && !machine__is(machine, "x86_64"))
3118 			machine->kernel_start = map__start(map);
3119 	}
3120 	return err;
3121 }
3122 
machine__addr_cpumode(struct machine * machine,u8 cpumode,u64 addr)3123 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3124 {
3125 	u8 addr_cpumode = cpumode;
3126 	bool kernel_ip;
3127 
3128 	if (!machine->single_address_space)
3129 		goto out;
3130 
3131 	kernel_ip = machine__kernel_ip(machine, addr);
3132 	switch (cpumode) {
3133 	case PERF_RECORD_MISC_KERNEL:
3134 	case PERF_RECORD_MISC_USER:
3135 		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3136 					   PERF_RECORD_MISC_USER;
3137 		break;
3138 	case PERF_RECORD_MISC_GUEST_KERNEL:
3139 	case PERF_RECORD_MISC_GUEST_USER:
3140 		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3141 					   PERF_RECORD_MISC_GUEST_USER;
3142 		break;
3143 	default:
3144 		break;
3145 	}
3146 out:
3147 	return addr_cpumode;
3148 }
3149 
machine__findnew_dso_id(struct machine * machine,const char * filename,const struct dso_id * id)3150 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename,
3151 				    const struct dso_id *id)
3152 {
3153 	return dsos__findnew_id(&machine->dsos, filename, id);
3154 }
3155 
machine__findnew_dso(struct machine * machine,const char * filename)3156 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3157 {
3158 	return machine__findnew_dso_id(machine, filename, NULL);
3159 }
3160 
machine__resolve_kernel_addr(void * vmachine,unsigned long long * addrp,char ** modp)3161 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3162 {
3163 	struct machine *machine = vmachine;
3164 	struct map *map;
3165 	struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3166 
3167 	if (sym == NULL)
3168 		return NULL;
3169 
3170 	*modp = __map__is_kmodule(map) ? (char *)dso__short_name(map__dso(map)) : NULL;
3171 	*addrp = map__unmap_ip(map, sym->start);
3172 	return sym->name;
3173 }
3174 
3175 struct machine__for_each_dso_cb_args {
3176 	struct machine *machine;
3177 	machine__dso_t fn;
3178 	void *priv;
3179 };
3180 
machine__for_each_dso_cb(struct dso * dso,void * data)3181 static int machine__for_each_dso_cb(struct dso *dso, void *data)
3182 {
3183 	struct machine__for_each_dso_cb_args *args = data;
3184 
3185 	return args->fn(dso, args->machine, args->priv);
3186 }
3187 
machine__for_each_dso(struct machine * machine,machine__dso_t fn,void * priv)3188 int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3189 {
3190 	struct machine__for_each_dso_cb_args args = {
3191 		.machine = machine,
3192 		.fn = fn,
3193 		.priv = priv,
3194 	};
3195 
3196 	return dsos__for_each_dso(&machine->dsos, machine__for_each_dso_cb, &args);
3197 }
3198 
machine__for_each_kernel_map(struct machine * machine,machine__map_t fn,void * priv)3199 int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
3200 {
3201 	struct maps *maps = machine__kernel_maps(machine);
3202 
3203 	return maps__for_each_map(maps, fn, priv);
3204 }
3205 
machine__is_lock_function(struct machine * machine,u64 addr)3206 bool machine__is_lock_function(struct machine *machine, u64 addr)
3207 {
3208 	if (!machine->sched.text_start) {
3209 		struct map *kmap;
3210 		struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
3211 
3212 		if (!sym) {
3213 			/* to avoid retry */
3214 			machine->sched.text_start = 1;
3215 			return false;
3216 		}
3217 
3218 		machine->sched.text_start = map__unmap_ip(kmap, sym->start);
3219 
3220 		/* should not fail from here */
3221 		sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
3222 		machine->sched.text_end = map__unmap_ip(kmap, sym->start);
3223 
3224 		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
3225 		machine->lock.text_start = map__unmap_ip(kmap, sym->start);
3226 
3227 		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
3228 		machine->lock.text_end = map__unmap_ip(kmap, sym->start);
3229 
3230 		sym = machine__find_kernel_symbol_by_name(machine, "__traceiter_contention_begin", &kmap);
3231 		if (sym) {
3232 			machine->traceiter.text_start = map__unmap_ip(kmap, sym->start);
3233 			machine->traceiter.text_end = map__unmap_ip(kmap, sym->end);
3234 		}
3235 		sym = machine__find_kernel_symbol_by_name(machine, "trace_contention_begin", &kmap);
3236 		if (sym) {
3237 			machine->trace.text_start = map__unmap_ip(kmap, sym->start);
3238 			machine->trace.text_end = map__unmap_ip(kmap, sym->end);
3239 		}
3240 	}
3241 
3242 	/* failed to get kernel symbols */
3243 	if (machine->sched.text_start == 1)
3244 		return false;
3245 
3246 	/* mutex and rwsem functions are in sched text section */
3247 	if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
3248 		return true;
3249 
3250 	/* spinlock functions are in lock text section */
3251 	if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
3252 		return true;
3253 
3254 	/* traceiter functions currently don't have their own section
3255 	 * but we consider them lock functions
3256 	 */
3257 	if (machine->traceiter.text_start != 0) {
3258 		if (machine->traceiter.text_start <= addr && addr < machine->traceiter.text_end)
3259 			return true;
3260 	}
3261 
3262 	if (machine->trace.text_start != 0) {
3263 		if (machine->trace.text_start <= addr && addr < machine->trace.text_end)
3264 			return true;
3265 	}
3266 
3267 	return false;
3268 }
3269 
machine__hit_all_dsos(struct machine * machine)3270 int machine__hit_all_dsos(struct machine *machine)
3271 {
3272 	return dsos__hit_all(&machine->dsos);
3273 }
3274