xref: /linux/tools/perf/util/machine.c (revision 6c7353836a91b1479e6b81791cdc163fb04b4834)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <regex.h>
6 #include <stdlib.h>
7 #include "callchain.h"
8 #include "debug.h"
9 #include "dso.h"
10 #include "env.h"
11 #include "event.h"
12 #include "evsel.h"
13 #include "hist.h"
14 #include "machine.h"
15 #include "map.h"
16 #include "map_symbol.h"
17 #include "branch.h"
18 #include "mem-events.h"
19 #include "path.h"
20 #include "srcline.h"
21 #include "symbol.h"
22 #include "sort.h"
23 #include "strlist.h"
24 #include "target.h"
25 #include "thread.h"
26 #include "util.h"
27 #include "vdso.h"
28 #include <stdbool.h>
29 #include <sys/types.h>
30 #include <sys/stat.h>
31 #include <unistd.h>
32 #include "unwind.h"
33 #include "linux/hash.h"
34 #include "asm/bug.h"
35 #include "bpf-event.h"
36 #include <internal/lib.h> // page_size
37 #include "cgroup.h"
38 #include "arm64-frame-pointer-unwind-support.h"
39 
40 #include <linux/ctype.h>
41 #include <symbol/kallsyms.h>
42 #include <linux/mman.h>
43 #include <linux/string.h>
44 #include <linux/zalloc.h>
45 
46 static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
47 				     struct thread *th, bool lock);
48 
49 static struct dso *machine__kernel_dso(struct machine *machine)
50 {
51 	return map__dso(machine->vmlinux_map);
52 }
53 
54 static void dsos__init(struct dsos *dsos)
55 {
56 	INIT_LIST_HEAD(&dsos->head);
57 	dsos->root = RB_ROOT;
58 	init_rwsem(&dsos->lock);
59 }
60 
61 static void machine__threads_init(struct machine *machine)
62 {
63 	int i;
64 
65 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
66 		struct threads *threads = &machine->threads[i];
67 		threads->entries = RB_ROOT_CACHED;
68 		init_rwsem(&threads->lock);
69 		threads->nr = 0;
70 		threads->last_match = NULL;
71 	}
72 }
73 
74 static int thread_rb_node__cmp_tid(const void *key, const struct rb_node *nd)
75 {
76 	int to_find = (int) *((pid_t *)key);
77 
78 	return to_find - (int)thread__tid(rb_entry(nd, struct thread_rb_node, rb_node)->thread);
79 }
80 
81 static struct thread_rb_node *thread_rb_node__find(const struct thread *th,
82 						   struct rb_root *tree)
83 {
84 	pid_t to_find = thread__tid(th);
85 	struct rb_node *nd = rb_find(&to_find, tree, thread_rb_node__cmp_tid);
86 
87 	return rb_entry(nd, struct thread_rb_node, rb_node);
88 }
89 
90 static int machine__set_mmap_name(struct machine *machine)
91 {
92 	if (machine__is_host(machine))
93 		machine->mmap_name = strdup("[kernel.kallsyms]");
94 	else if (machine__is_default_guest(machine))
95 		machine->mmap_name = strdup("[guest.kernel.kallsyms]");
96 	else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
97 			  machine->pid) < 0)
98 		machine->mmap_name = NULL;
99 
100 	return machine->mmap_name ? 0 : -ENOMEM;
101 }
102 
103 static void thread__set_guest_comm(struct thread *thread, pid_t pid)
104 {
105 	char comm[64];
106 
107 	snprintf(comm, sizeof(comm), "[guest/%d]", pid);
108 	thread__set_comm(thread, comm, 0);
109 }
110 
111 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
112 {
113 	int err = -ENOMEM;
114 
115 	memset(machine, 0, sizeof(*machine));
116 	machine->kmaps = maps__new(machine);
117 	if (machine->kmaps == NULL)
118 		return -ENOMEM;
119 
120 	RB_CLEAR_NODE(&machine->rb_node);
121 	dsos__init(&machine->dsos);
122 
123 	machine__threads_init(machine);
124 
125 	machine->vdso_info = NULL;
126 	machine->env = NULL;
127 
128 	machine->pid = pid;
129 
130 	machine->id_hdr_size = 0;
131 	machine->kptr_restrict_warned = false;
132 	machine->comm_exec = false;
133 	machine->kernel_start = 0;
134 	machine->vmlinux_map = NULL;
135 
136 	machine->root_dir = strdup(root_dir);
137 	if (machine->root_dir == NULL)
138 		goto out;
139 
140 	if (machine__set_mmap_name(machine))
141 		goto out;
142 
143 	if (pid != HOST_KERNEL_ID) {
144 		struct thread *thread = machine__findnew_thread(machine, -1,
145 								pid);
146 
147 		if (thread == NULL)
148 			goto out;
149 
150 		thread__set_guest_comm(thread, pid);
151 		thread__put(thread);
152 	}
153 
154 	machine->current_tid = NULL;
155 	err = 0;
156 
157 out:
158 	if (err) {
159 		zfree(&machine->kmaps);
160 		zfree(&machine->root_dir);
161 		zfree(&machine->mmap_name);
162 	}
163 	return 0;
164 }
165 
166 struct machine *machine__new_host(void)
167 {
168 	struct machine *machine = malloc(sizeof(*machine));
169 
170 	if (machine != NULL) {
171 		machine__init(machine, "", HOST_KERNEL_ID);
172 
173 		if (machine__create_kernel_maps(machine) < 0)
174 			goto out_delete;
175 	}
176 
177 	return machine;
178 out_delete:
179 	free(machine);
180 	return NULL;
181 }
182 
183 struct machine *machine__new_kallsyms(void)
184 {
185 	struct machine *machine = machine__new_host();
186 	/*
187 	 * FIXME:
188 	 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
189 	 *    ask for not using the kcore parsing code, once this one is fixed
190 	 *    to create a map per module.
191 	 */
192 	if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
193 		machine__delete(machine);
194 		machine = NULL;
195 	}
196 
197 	return machine;
198 }
199 
200 static void dsos__purge(struct dsos *dsos)
201 {
202 	struct dso *pos, *n;
203 
204 	down_write(&dsos->lock);
205 
206 	list_for_each_entry_safe(pos, n, &dsos->head, node) {
207 		RB_CLEAR_NODE(&pos->rb_node);
208 		pos->root = NULL;
209 		list_del_init(&pos->node);
210 		dso__put(pos);
211 	}
212 
213 	up_write(&dsos->lock);
214 }
215 
216 static void dsos__exit(struct dsos *dsos)
217 {
218 	dsos__purge(dsos);
219 	exit_rwsem(&dsos->lock);
220 }
221 
222 void machine__delete_threads(struct machine *machine)
223 {
224 	struct rb_node *nd;
225 	int i;
226 
227 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
228 		struct threads *threads = &machine->threads[i];
229 		down_write(&threads->lock);
230 		nd = rb_first_cached(&threads->entries);
231 		while (nd) {
232 			struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
233 
234 			nd = rb_next(nd);
235 			__machine__remove_thread(machine, trb, trb->thread, false);
236 		}
237 		up_write(&threads->lock);
238 	}
239 }
240 
241 void machine__exit(struct machine *machine)
242 {
243 	int i;
244 
245 	if (machine == NULL)
246 		return;
247 
248 	machine__destroy_kernel_maps(machine);
249 	maps__zput(machine->kmaps);
250 	dsos__exit(&machine->dsos);
251 	machine__exit_vdso(machine);
252 	zfree(&machine->root_dir);
253 	zfree(&machine->mmap_name);
254 	zfree(&machine->current_tid);
255 	zfree(&machine->kallsyms_filename);
256 
257 	machine__delete_threads(machine);
258 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
259 		struct threads *threads = &machine->threads[i];
260 
261 		exit_rwsem(&threads->lock);
262 	}
263 }
264 
265 void machine__delete(struct machine *machine)
266 {
267 	if (machine) {
268 		machine__exit(machine);
269 		free(machine);
270 	}
271 }
272 
273 void machines__init(struct machines *machines)
274 {
275 	machine__init(&machines->host, "", HOST_KERNEL_ID);
276 	machines->guests = RB_ROOT_CACHED;
277 }
278 
279 void machines__exit(struct machines *machines)
280 {
281 	machine__exit(&machines->host);
282 	/* XXX exit guest */
283 }
284 
285 struct machine *machines__add(struct machines *machines, pid_t pid,
286 			      const char *root_dir)
287 {
288 	struct rb_node **p = &machines->guests.rb_root.rb_node;
289 	struct rb_node *parent = NULL;
290 	struct machine *pos, *machine = malloc(sizeof(*machine));
291 	bool leftmost = true;
292 
293 	if (machine == NULL)
294 		return NULL;
295 
296 	if (machine__init(machine, root_dir, pid) != 0) {
297 		free(machine);
298 		return NULL;
299 	}
300 
301 	while (*p != NULL) {
302 		parent = *p;
303 		pos = rb_entry(parent, struct machine, rb_node);
304 		if (pid < pos->pid)
305 			p = &(*p)->rb_left;
306 		else {
307 			p = &(*p)->rb_right;
308 			leftmost = false;
309 		}
310 	}
311 
312 	rb_link_node(&machine->rb_node, parent, p);
313 	rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
314 
315 	machine->machines = machines;
316 
317 	return machine;
318 }
319 
320 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
321 {
322 	struct rb_node *nd;
323 
324 	machines->host.comm_exec = comm_exec;
325 
326 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
327 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
328 
329 		machine->comm_exec = comm_exec;
330 	}
331 }
332 
333 struct machine *machines__find(struct machines *machines, pid_t pid)
334 {
335 	struct rb_node **p = &machines->guests.rb_root.rb_node;
336 	struct rb_node *parent = NULL;
337 	struct machine *machine;
338 	struct machine *default_machine = NULL;
339 
340 	if (pid == HOST_KERNEL_ID)
341 		return &machines->host;
342 
343 	while (*p != NULL) {
344 		parent = *p;
345 		machine = rb_entry(parent, struct machine, rb_node);
346 		if (pid < machine->pid)
347 			p = &(*p)->rb_left;
348 		else if (pid > machine->pid)
349 			p = &(*p)->rb_right;
350 		else
351 			return machine;
352 		if (!machine->pid)
353 			default_machine = machine;
354 	}
355 
356 	return default_machine;
357 }
358 
359 struct machine *machines__findnew(struct machines *machines, pid_t pid)
360 {
361 	char path[PATH_MAX];
362 	const char *root_dir = "";
363 	struct machine *machine = machines__find(machines, pid);
364 
365 	if (machine && (machine->pid == pid))
366 		goto out;
367 
368 	if ((pid != HOST_KERNEL_ID) &&
369 	    (pid != DEFAULT_GUEST_KERNEL_ID) &&
370 	    (symbol_conf.guestmount)) {
371 		sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
372 		if (access(path, R_OK)) {
373 			static struct strlist *seen;
374 
375 			if (!seen)
376 				seen = strlist__new(NULL, NULL);
377 
378 			if (!strlist__has_entry(seen, path)) {
379 				pr_err("Can't access file %s\n", path);
380 				strlist__add(seen, path);
381 			}
382 			machine = NULL;
383 			goto out;
384 		}
385 		root_dir = path;
386 	}
387 
388 	machine = machines__add(machines, pid, root_dir);
389 out:
390 	return machine;
391 }
392 
393 struct machine *machines__find_guest(struct machines *machines, pid_t pid)
394 {
395 	struct machine *machine = machines__find(machines, pid);
396 
397 	if (!machine)
398 		machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
399 	return machine;
400 }
401 
402 /*
403  * A common case for KVM test programs is that the test program acts as the
404  * hypervisor, creating, running and destroying the virtual machine, and
405  * providing the guest object code from its own object code. In this case,
406  * the VM is not running an OS, but only the functions loaded into it by the
407  * hypervisor test program, and conveniently, loaded at the same virtual
408  * addresses.
409  *
410  * Normally to resolve addresses, MMAP events are needed to map addresses
411  * back to the object code and debug symbols for that object code.
412  *
413  * Currently, there is no way to get such mapping information from guests
414  * but, in the scenario described above, the guest has the same mappings
415  * as the hypervisor, so support for that scenario can be achieved.
416  *
417  * To support that, copy the host thread's maps to the guest thread's maps.
418  * Note, we do not discover the guest until we encounter a guest event,
419  * which works well because it is not until then that we know that the host
420  * thread's maps have been set up.
421  *
422  * This function returns the guest thread. Apart from keeping the data
423  * structures sane, using a thread belonging to the guest machine, instead
424  * of the host thread, allows it to have its own comm (refer
425  * thread__set_guest_comm()).
426  */
427 static struct thread *findnew_guest_code(struct machine *machine,
428 					 struct machine *host_machine,
429 					 pid_t pid)
430 {
431 	struct thread *host_thread;
432 	struct thread *thread;
433 	int err;
434 
435 	if (!machine)
436 		return NULL;
437 
438 	thread = machine__findnew_thread(machine, -1, pid);
439 	if (!thread)
440 		return NULL;
441 
442 	/* Assume maps are set up if there are any */
443 	if (maps__nr_maps(thread__maps(thread)))
444 		return thread;
445 
446 	host_thread = machine__find_thread(host_machine, -1, pid);
447 	if (!host_thread)
448 		goto out_err;
449 
450 	thread__set_guest_comm(thread, pid);
451 
452 	/*
453 	 * Guest code can be found in hypervisor process at the same address
454 	 * so copy host maps.
455 	 */
456 	err = maps__copy_from(thread__maps(thread), thread__maps(host_thread));
457 	thread__put(host_thread);
458 	if (err)
459 		goto out_err;
460 
461 	return thread;
462 
463 out_err:
464 	thread__zput(thread);
465 	return NULL;
466 }
467 
468 struct thread *machines__findnew_guest_code(struct machines *machines, pid_t pid)
469 {
470 	struct machine *host_machine = machines__find(machines, HOST_KERNEL_ID);
471 	struct machine *machine = machines__findnew(machines, pid);
472 
473 	return findnew_guest_code(machine, host_machine, pid);
474 }
475 
476 struct thread *machine__findnew_guest_code(struct machine *machine, pid_t pid)
477 {
478 	struct machines *machines = machine->machines;
479 	struct machine *host_machine;
480 
481 	if (!machines)
482 		return NULL;
483 
484 	host_machine = machines__find(machines, HOST_KERNEL_ID);
485 
486 	return findnew_guest_code(machine, host_machine, pid);
487 }
488 
489 void machines__process_guests(struct machines *machines,
490 			      machine__process_t process, void *data)
491 {
492 	struct rb_node *nd;
493 
494 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
495 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
496 		process(pos, data);
497 	}
498 }
499 
500 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
501 {
502 	struct rb_node *node;
503 	struct machine *machine;
504 
505 	machines->host.id_hdr_size = id_hdr_size;
506 
507 	for (node = rb_first_cached(&machines->guests); node;
508 	     node = rb_next(node)) {
509 		machine = rb_entry(node, struct machine, rb_node);
510 		machine->id_hdr_size = id_hdr_size;
511 	}
512 
513 	return;
514 }
515 
516 static void machine__update_thread_pid(struct machine *machine,
517 				       struct thread *th, pid_t pid)
518 {
519 	struct thread *leader;
520 
521 	if (pid == thread__pid(th) || pid == -1 || thread__pid(th) != -1)
522 		return;
523 
524 	thread__set_pid(th, pid);
525 
526 	if (thread__pid(th) == thread__tid(th))
527 		return;
528 
529 	leader = __machine__findnew_thread(machine, thread__pid(th), thread__pid(th));
530 	if (!leader)
531 		goto out_err;
532 
533 	if (!thread__maps(leader))
534 		thread__set_maps(leader, maps__new(machine));
535 
536 	if (!thread__maps(leader))
537 		goto out_err;
538 
539 	if (thread__maps(th) == thread__maps(leader))
540 		goto out_put;
541 
542 	if (thread__maps(th)) {
543 		/*
544 		 * Maps are created from MMAP events which provide the pid and
545 		 * tid.  Consequently there never should be any maps on a thread
546 		 * with an unknown pid.  Just print an error if there are.
547 		 */
548 		if (!maps__empty(thread__maps(th)))
549 			pr_err("Discarding thread maps for %d:%d\n",
550 				thread__pid(th), thread__tid(th));
551 		maps__put(thread__maps(th));
552 	}
553 
554 	thread__set_maps(th, maps__get(thread__maps(leader)));
555 out_put:
556 	thread__put(leader);
557 	return;
558 out_err:
559 	pr_err("Failed to join map groups for %d:%d\n", thread__pid(th), thread__tid(th));
560 	goto out_put;
561 }
562 
563 /*
564  * Front-end cache - TID lookups come in blocks,
565  * so most of the time we dont have to look up
566  * the full rbtree:
567  */
568 static struct thread*
569 __threads__get_last_match(struct threads *threads, struct machine *machine,
570 			  int pid, int tid)
571 {
572 	struct thread *th;
573 
574 	th = threads->last_match;
575 	if (th != NULL) {
576 		if (thread__tid(th) == tid) {
577 			machine__update_thread_pid(machine, th, pid);
578 			return thread__get(th);
579 		}
580 		thread__put(threads->last_match);
581 		threads->last_match = NULL;
582 	}
583 
584 	return NULL;
585 }
586 
587 static struct thread*
588 threads__get_last_match(struct threads *threads, struct machine *machine,
589 			int pid, int tid)
590 {
591 	struct thread *th = NULL;
592 
593 	if (perf_singlethreaded)
594 		th = __threads__get_last_match(threads, machine, pid, tid);
595 
596 	return th;
597 }
598 
599 static void
600 __threads__set_last_match(struct threads *threads, struct thread *th)
601 {
602 	thread__put(threads->last_match);
603 	threads->last_match = thread__get(th);
604 }
605 
606 static void
607 threads__set_last_match(struct threads *threads, struct thread *th)
608 {
609 	if (perf_singlethreaded)
610 		__threads__set_last_match(threads, th);
611 }
612 
613 /*
614  * Caller must eventually drop thread->refcnt returned with a successful
615  * lookup/new thread inserted.
616  */
617 static struct thread *____machine__findnew_thread(struct machine *machine,
618 						  struct threads *threads,
619 						  pid_t pid, pid_t tid,
620 						  bool create)
621 {
622 	struct rb_node **p = &threads->entries.rb_root.rb_node;
623 	struct rb_node *parent = NULL;
624 	struct thread *th;
625 	struct thread_rb_node *nd;
626 	bool leftmost = true;
627 
628 	th = threads__get_last_match(threads, machine, pid, tid);
629 	if (th)
630 		return th;
631 
632 	while (*p != NULL) {
633 		parent = *p;
634 		th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
635 
636 		if (thread__tid(th) == tid) {
637 			threads__set_last_match(threads, th);
638 			machine__update_thread_pid(machine, th, pid);
639 			return thread__get(th);
640 		}
641 
642 		if (tid < thread__tid(th))
643 			p = &(*p)->rb_left;
644 		else {
645 			p = &(*p)->rb_right;
646 			leftmost = false;
647 		}
648 	}
649 
650 	if (!create)
651 		return NULL;
652 
653 	th = thread__new(pid, tid);
654 	if (th == NULL)
655 		return NULL;
656 
657 	nd = malloc(sizeof(*nd));
658 	if (nd == NULL) {
659 		thread__put(th);
660 		return NULL;
661 	}
662 	nd->thread = th;
663 
664 	rb_link_node(&nd->rb_node, parent, p);
665 	rb_insert_color_cached(&nd->rb_node, &threads->entries, leftmost);
666 	/*
667 	 * We have to initialize maps separately after rb tree is updated.
668 	 *
669 	 * The reason is that we call machine__findnew_thread within
670 	 * thread__init_maps to find the thread leader and that would screwed
671 	 * the rb tree.
672 	 */
673 	if (thread__init_maps(th, machine)) {
674 		pr_err("Thread init failed thread %d\n", pid);
675 		rb_erase_cached(&nd->rb_node, &threads->entries);
676 		RB_CLEAR_NODE(&nd->rb_node);
677 		free(nd);
678 		thread__put(th);
679 		return NULL;
680 	}
681 	/*
682 	 * It is now in the rbtree, get a ref
683 	 */
684 	threads__set_last_match(threads, th);
685 	++threads->nr;
686 
687 	return thread__get(th);
688 }
689 
690 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
691 {
692 	return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
693 }
694 
695 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
696 				       pid_t tid)
697 {
698 	struct threads *threads = machine__threads(machine, tid);
699 	struct thread *th;
700 
701 	down_write(&threads->lock);
702 	th = __machine__findnew_thread(machine, pid, tid);
703 	up_write(&threads->lock);
704 	return th;
705 }
706 
707 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
708 				    pid_t tid)
709 {
710 	struct threads *threads = machine__threads(machine, tid);
711 	struct thread *th;
712 
713 	down_read(&threads->lock);
714 	th =  ____machine__findnew_thread(machine, threads, pid, tid, false);
715 	up_read(&threads->lock);
716 	return th;
717 }
718 
719 /*
720  * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
721  * So here a single thread is created for that, but actually there is a separate
722  * idle task per cpu, so there should be one 'struct thread' per cpu, but there
723  * is only 1. That causes problems for some tools, requiring workarounds. For
724  * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
725  */
726 struct thread *machine__idle_thread(struct machine *machine)
727 {
728 	struct thread *thread = machine__findnew_thread(machine, 0, 0);
729 
730 	if (!thread || thread__set_comm(thread, "swapper", 0) ||
731 	    thread__set_namespaces(thread, 0, NULL))
732 		pr_err("problem inserting idle task for machine pid %d\n", machine->pid);
733 
734 	return thread;
735 }
736 
737 struct comm *machine__thread_exec_comm(struct machine *machine,
738 				       struct thread *thread)
739 {
740 	if (machine->comm_exec)
741 		return thread__exec_comm(thread);
742 	else
743 		return thread__comm(thread);
744 }
745 
746 int machine__process_comm_event(struct machine *machine, union perf_event *event,
747 				struct perf_sample *sample)
748 {
749 	struct thread *thread = machine__findnew_thread(machine,
750 							event->comm.pid,
751 							event->comm.tid);
752 	bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
753 	int err = 0;
754 
755 	if (exec)
756 		machine->comm_exec = true;
757 
758 	if (dump_trace)
759 		perf_event__fprintf_comm(event, stdout);
760 
761 	if (thread == NULL ||
762 	    __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
763 		dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
764 		err = -1;
765 	}
766 
767 	thread__put(thread);
768 
769 	return err;
770 }
771 
772 int machine__process_namespaces_event(struct machine *machine __maybe_unused,
773 				      union perf_event *event,
774 				      struct perf_sample *sample __maybe_unused)
775 {
776 	struct thread *thread = machine__findnew_thread(machine,
777 							event->namespaces.pid,
778 							event->namespaces.tid);
779 	int err = 0;
780 
781 	WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
782 		  "\nWARNING: kernel seems to support more namespaces than perf"
783 		  " tool.\nTry updating the perf tool..\n\n");
784 
785 	WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
786 		  "\nWARNING: perf tool seems to support more namespaces than"
787 		  " the kernel.\nTry updating the kernel..\n\n");
788 
789 	if (dump_trace)
790 		perf_event__fprintf_namespaces(event, stdout);
791 
792 	if (thread == NULL ||
793 	    thread__set_namespaces(thread, sample->time, &event->namespaces)) {
794 		dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
795 		err = -1;
796 	}
797 
798 	thread__put(thread);
799 
800 	return err;
801 }
802 
803 int machine__process_cgroup_event(struct machine *machine,
804 				  union perf_event *event,
805 				  struct perf_sample *sample __maybe_unused)
806 {
807 	struct cgroup *cgrp;
808 
809 	if (dump_trace)
810 		perf_event__fprintf_cgroup(event, stdout);
811 
812 	cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
813 	if (cgrp == NULL)
814 		return -ENOMEM;
815 
816 	return 0;
817 }
818 
819 int machine__process_lost_event(struct machine *machine __maybe_unused,
820 				union perf_event *event, struct perf_sample *sample __maybe_unused)
821 {
822 	dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
823 		    event->lost.id, event->lost.lost);
824 	return 0;
825 }
826 
827 int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
828 					union perf_event *event, struct perf_sample *sample)
829 {
830 	dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
831 		    sample->id, event->lost_samples.lost);
832 	return 0;
833 }
834 
835 static struct dso *machine__findnew_module_dso(struct machine *machine,
836 					       struct kmod_path *m,
837 					       const char *filename)
838 {
839 	struct dso *dso;
840 
841 	down_write(&machine->dsos.lock);
842 
843 	dso = __dsos__find(&machine->dsos, m->name, true);
844 	if (!dso) {
845 		dso = __dsos__addnew(&machine->dsos, m->name);
846 		if (dso == NULL)
847 			goto out_unlock;
848 
849 		dso__set_module_info(dso, m, machine);
850 		dso__set_long_name(dso, strdup(filename), true);
851 		dso->kernel = DSO_SPACE__KERNEL;
852 	}
853 
854 	dso__get(dso);
855 out_unlock:
856 	up_write(&machine->dsos.lock);
857 	return dso;
858 }
859 
860 int machine__process_aux_event(struct machine *machine __maybe_unused,
861 			       union perf_event *event)
862 {
863 	if (dump_trace)
864 		perf_event__fprintf_aux(event, stdout);
865 	return 0;
866 }
867 
868 int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
869 					union perf_event *event)
870 {
871 	if (dump_trace)
872 		perf_event__fprintf_itrace_start(event, stdout);
873 	return 0;
874 }
875 
876 int machine__process_aux_output_hw_id_event(struct machine *machine __maybe_unused,
877 					    union perf_event *event)
878 {
879 	if (dump_trace)
880 		perf_event__fprintf_aux_output_hw_id(event, stdout);
881 	return 0;
882 }
883 
884 int machine__process_switch_event(struct machine *machine __maybe_unused,
885 				  union perf_event *event)
886 {
887 	if (dump_trace)
888 		perf_event__fprintf_switch(event, stdout);
889 	return 0;
890 }
891 
892 static int machine__process_ksymbol_register(struct machine *machine,
893 					     union perf_event *event,
894 					     struct perf_sample *sample __maybe_unused)
895 {
896 	struct symbol *sym;
897 	struct dso *dso;
898 	struct map *map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
899 	bool put_map = false;
900 	int err = 0;
901 
902 	if (!map) {
903 		dso = dso__new(event->ksymbol.name);
904 
905 		if (!dso) {
906 			err = -ENOMEM;
907 			goto out;
908 		}
909 		dso->kernel = DSO_SPACE__KERNEL;
910 		map = map__new2(0, dso);
911 		dso__put(dso);
912 		if (!map) {
913 			err = -ENOMEM;
914 			goto out;
915 		}
916 		/*
917 		 * The inserted map has a get on it, we need to put to release
918 		 * the reference count here, but do it after all accesses are
919 		 * done.
920 		 */
921 		put_map = true;
922 		if (event->ksymbol.ksym_type == PERF_RECORD_KSYMBOL_TYPE_OOL) {
923 			dso->binary_type = DSO_BINARY_TYPE__OOL;
924 			dso->data.file_size = event->ksymbol.len;
925 			dso__set_loaded(dso);
926 		}
927 
928 		map__set_start(map, event->ksymbol.addr);
929 		map__set_end(map, map__start(map) + event->ksymbol.len);
930 		err = maps__insert(machine__kernel_maps(machine), map);
931 		if (err) {
932 			err = -ENOMEM;
933 			goto out;
934 		}
935 
936 		dso__set_loaded(dso);
937 
938 		if (is_bpf_image(event->ksymbol.name)) {
939 			dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
940 			dso__set_long_name(dso, "", false);
941 		}
942 	} else {
943 		dso = map__dso(map);
944 	}
945 
946 	sym = symbol__new(map__map_ip(map, map__start(map)),
947 			  event->ksymbol.len,
948 			  0, 0, event->ksymbol.name);
949 	if (!sym) {
950 		err = -ENOMEM;
951 		goto out;
952 	}
953 	dso__insert_symbol(dso, sym);
954 out:
955 	if (put_map)
956 		map__put(map);
957 	return err;
958 }
959 
960 static int machine__process_ksymbol_unregister(struct machine *machine,
961 					       union perf_event *event,
962 					       struct perf_sample *sample __maybe_unused)
963 {
964 	struct symbol *sym;
965 	struct map *map;
966 
967 	map = maps__find(machine__kernel_maps(machine), event->ksymbol.addr);
968 	if (!map)
969 		return 0;
970 
971 	if (!RC_CHK_EQUAL(map, machine->vmlinux_map))
972 		maps__remove(machine__kernel_maps(machine), map);
973 	else {
974 		struct dso *dso = map__dso(map);
975 
976 		sym = dso__find_symbol(dso, map__map_ip(map, map__start(map)));
977 		if (sym)
978 			dso__delete_symbol(dso, sym);
979 	}
980 
981 	return 0;
982 }
983 
984 int machine__process_ksymbol(struct machine *machine __maybe_unused,
985 			     union perf_event *event,
986 			     struct perf_sample *sample)
987 {
988 	if (dump_trace)
989 		perf_event__fprintf_ksymbol(event, stdout);
990 
991 	if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
992 		return machine__process_ksymbol_unregister(machine, event,
993 							   sample);
994 	return machine__process_ksymbol_register(machine, event, sample);
995 }
996 
997 int machine__process_text_poke(struct machine *machine, union perf_event *event,
998 			       struct perf_sample *sample __maybe_unused)
999 {
1000 	struct map *map = maps__find(machine__kernel_maps(machine), event->text_poke.addr);
1001 	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1002 	struct dso *dso = map ? map__dso(map) : NULL;
1003 
1004 	if (dump_trace)
1005 		perf_event__fprintf_text_poke(event, machine, stdout);
1006 
1007 	if (!event->text_poke.new_len)
1008 		return 0;
1009 
1010 	if (cpumode != PERF_RECORD_MISC_KERNEL) {
1011 		pr_debug("%s: unsupported cpumode - ignoring\n", __func__);
1012 		return 0;
1013 	}
1014 
1015 	if (dso) {
1016 		u8 *new_bytes = event->text_poke.bytes + event->text_poke.old_len;
1017 		int ret;
1018 
1019 		/*
1020 		 * Kernel maps might be changed when loading symbols so loading
1021 		 * must be done prior to using kernel maps.
1022 		 */
1023 		map__load(map);
1024 		ret = dso__data_write_cache_addr(dso, map, machine,
1025 						 event->text_poke.addr,
1026 						 new_bytes,
1027 						 event->text_poke.new_len);
1028 		if (ret != event->text_poke.new_len)
1029 			pr_debug("Failed to write kernel text poke at %#" PRI_lx64 "\n",
1030 				 event->text_poke.addr);
1031 	} else {
1032 		pr_debug("Failed to find kernel text poke address map for %#" PRI_lx64 "\n",
1033 			 event->text_poke.addr);
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
1040 					      const char *filename)
1041 {
1042 	struct map *map = NULL;
1043 	struct kmod_path m;
1044 	struct dso *dso;
1045 	int err;
1046 
1047 	if (kmod_path__parse_name(&m, filename))
1048 		return NULL;
1049 
1050 	dso = machine__findnew_module_dso(machine, &m, filename);
1051 	if (dso == NULL)
1052 		goto out;
1053 
1054 	map = map__new2(start, dso);
1055 	if (map == NULL)
1056 		goto out;
1057 
1058 	err = maps__insert(machine__kernel_maps(machine), map);
1059 	/* If maps__insert failed, return NULL. */
1060 	if (err) {
1061 		map__put(map);
1062 		map = NULL;
1063 	}
1064 out:
1065 	/* put the dso here, corresponding to  machine__findnew_module_dso */
1066 	dso__put(dso);
1067 	zfree(&m.name);
1068 	return map;
1069 }
1070 
1071 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
1072 {
1073 	struct rb_node *nd;
1074 	size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
1075 
1076 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1077 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1078 		ret += __dsos__fprintf(&pos->dsos.head, fp);
1079 	}
1080 
1081 	return ret;
1082 }
1083 
1084 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
1085 				     bool (skip)(struct dso *dso, int parm), int parm)
1086 {
1087 	return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
1088 }
1089 
1090 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
1091 				     bool (skip)(struct dso *dso, int parm), int parm)
1092 {
1093 	struct rb_node *nd;
1094 	size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
1095 
1096 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
1097 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
1098 		ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
1099 	}
1100 	return ret;
1101 }
1102 
1103 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
1104 {
1105 	int i;
1106 	size_t printed = 0;
1107 	struct dso *kdso = machine__kernel_dso(machine);
1108 
1109 	if (kdso->has_build_id) {
1110 		char filename[PATH_MAX];
1111 		if (dso__build_id_filename(kdso, filename, sizeof(filename),
1112 					   false))
1113 			printed += fprintf(fp, "[0] %s\n", filename);
1114 	}
1115 
1116 	for (i = 0; i < vmlinux_path__nr_entries; ++i)
1117 		printed += fprintf(fp, "[%d] %s\n",
1118 				   i + kdso->has_build_id, vmlinux_path[i]);
1119 
1120 	return printed;
1121 }
1122 
1123 size_t machine__fprintf(struct machine *machine, FILE *fp)
1124 {
1125 	struct rb_node *nd;
1126 	size_t ret;
1127 	int i;
1128 
1129 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
1130 		struct threads *threads = &machine->threads[i];
1131 
1132 		down_read(&threads->lock);
1133 
1134 		ret = fprintf(fp, "Threads: %u\n", threads->nr);
1135 
1136 		for (nd = rb_first_cached(&threads->entries); nd;
1137 		     nd = rb_next(nd)) {
1138 			struct thread *pos = rb_entry(nd, struct thread_rb_node, rb_node)->thread;
1139 
1140 			ret += thread__fprintf(pos, fp);
1141 		}
1142 
1143 		up_read(&threads->lock);
1144 	}
1145 	return ret;
1146 }
1147 
1148 static struct dso *machine__get_kernel(struct machine *machine)
1149 {
1150 	const char *vmlinux_name = machine->mmap_name;
1151 	struct dso *kernel;
1152 
1153 	if (machine__is_host(machine)) {
1154 		if (symbol_conf.vmlinux_name)
1155 			vmlinux_name = symbol_conf.vmlinux_name;
1156 
1157 		kernel = machine__findnew_kernel(machine, vmlinux_name,
1158 						 "[kernel]", DSO_SPACE__KERNEL);
1159 	} else {
1160 		if (symbol_conf.default_guest_vmlinux_name)
1161 			vmlinux_name = symbol_conf.default_guest_vmlinux_name;
1162 
1163 		kernel = machine__findnew_kernel(machine, vmlinux_name,
1164 						 "[guest.kernel]",
1165 						 DSO_SPACE__KERNEL_GUEST);
1166 	}
1167 
1168 	if (kernel != NULL && (!kernel->has_build_id))
1169 		dso__read_running_kernel_build_id(kernel, machine);
1170 
1171 	return kernel;
1172 }
1173 
1174 void machine__get_kallsyms_filename(struct machine *machine, char *buf,
1175 				    size_t bufsz)
1176 {
1177 	if (machine__is_default_guest(machine))
1178 		scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
1179 	else
1180 		scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
1181 }
1182 
1183 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
1184 
1185 /* Figure out the start address of kernel map from /proc/kallsyms.
1186  * Returns the name of the start symbol in *symbol_name. Pass in NULL as
1187  * symbol_name if it's not that important.
1188  */
1189 static int machine__get_running_kernel_start(struct machine *machine,
1190 					     const char **symbol_name,
1191 					     u64 *start, u64 *end)
1192 {
1193 	char filename[PATH_MAX];
1194 	int i, err = -1;
1195 	const char *name;
1196 	u64 addr = 0;
1197 
1198 	machine__get_kallsyms_filename(machine, filename, PATH_MAX);
1199 
1200 	if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1201 		return 0;
1202 
1203 	for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
1204 		err = kallsyms__get_function_start(filename, name, &addr);
1205 		if (!err)
1206 			break;
1207 	}
1208 
1209 	if (err)
1210 		return -1;
1211 
1212 	if (symbol_name)
1213 		*symbol_name = name;
1214 
1215 	*start = addr;
1216 
1217 	err = kallsyms__get_symbol_start(filename, "_edata", &addr);
1218 	if (err)
1219 		err = kallsyms__get_function_start(filename, "_etext", &addr);
1220 	if (!err)
1221 		*end = addr;
1222 
1223 	return 0;
1224 }
1225 
1226 int machine__create_extra_kernel_map(struct machine *machine,
1227 				     struct dso *kernel,
1228 				     struct extra_kernel_map *xm)
1229 {
1230 	struct kmap *kmap;
1231 	struct map *map;
1232 	int err;
1233 
1234 	map = map__new2(xm->start, kernel);
1235 	if (!map)
1236 		return -ENOMEM;
1237 
1238 	map__set_end(map, xm->end);
1239 	map__set_pgoff(map, xm->pgoff);
1240 
1241 	kmap = map__kmap(map);
1242 
1243 	strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1244 
1245 	err = maps__insert(machine__kernel_maps(machine), map);
1246 
1247 	if (!err) {
1248 		pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1249 			kmap->name, map__start(map), map__end(map));
1250 	}
1251 
1252 	map__put(map);
1253 
1254 	return err;
1255 }
1256 
1257 static u64 find_entry_trampoline(struct dso *dso)
1258 {
1259 	/* Duplicates are removed so lookup all aliases */
1260 	const char *syms[] = {
1261 		"_entry_trampoline",
1262 		"__entry_trampoline_start",
1263 		"entry_SYSCALL_64_trampoline",
1264 	};
1265 	struct symbol *sym = dso__first_symbol(dso);
1266 	unsigned int i;
1267 
1268 	for (; sym; sym = dso__next_symbol(sym)) {
1269 		if (sym->binding != STB_GLOBAL)
1270 			continue;
1271 		for (i = 0; i < ARRAY_SIZE(syms); i++) {
1272 			if (!strcmp(sym->name, syms[i]))
1273 				return sym->start;
1274 		}
1275 	}
1276 
1277 	return 0;
1278 }
1279 
1280 /*
1281  * These values can be used for kernels that do not have symbols for the entry
1282  * trampolines in kallsyms.
1283  */
1284 #define X86_64_CPU_ENTRY_AREA_PER_CPU	0xfffffe0000000000ULL
1285 #define X86_64_CPU_ENTRY_AREA_SIZE	0x2c000
1286 #define X86_64_ENTRY_TRAMPOLINE		0x6000
1287 
1288 struct machine__map_x86_64_entry_trampolines_args {
1289 	struct maps *kmaps;
1290 	bool found;
1291 };
1292 
1293 static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data)
1294 {
1295 	struct machine__map_x86_64_entry_trampolines_args *args = data;
1296 	struct map *dest_map;
1297 	struct kmap *kmap = __map__kmap(map);
1298 
1299 	if (!kmap || !is_entry_trampoline(kmap->name))
1300 		return 0;
1301 
1302 	dest_map = maps__find(args->kmaps, map__pgoff(map));
1303 	if (dest_map != map)
1304 		map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map)));
1305 
1306 	args->found = true;
1307 	return 0;
1308 }
1309 
1310 /* Map x86_64 PTI entry trampolines */
1311 int machine__map_x86_64_entry_trampolines(struct machine *machine,
1312 					  struct dso *kernel)
1313 {
1314 	struct machine__map_x86_64_entry_trampolines_args args = {
1315 		.kmaps = machine__kernel_maps(machine),
1316 		.found = false,
1317 	};
1318 	int nr_cpus_avail, cpu;
1319 	u64 pgoff;
1320 
1321 	/*
1322 	 * In the vmlinux case, pgoff is a virtual address which must now be
1323 	 * mapped to a vmlinux offset.
1324 	 */
1325 	maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args);
1326 
1327 	if (args.found || machine->trampolines_mapped)
1328 		return 0;
1329 
1330 	pgoff = find_entry_trampoline(kernel);
1331 	if (!pgoff)
1332 		return 0;
1333 
1334 	nr_cpus_avail = machine__nr_cpus_avail(machine);
1335 
1336 	/* Add a 1 page map for each CPU's entry trampoline */
1337 	for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1338 		u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1339 			 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1340 			 X86_64_ENTRY_TRAMPOLINE;
1341 		struct extra_kernel_map xm = {
1342 			.start = va,
1343 			.end   = va + page_size,
1344 			.pgoff = pgoff,
1345 		};
1346 
1347 		strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1348 
1349 		if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1350 			return -1;
1351 	}
1352 
1353 	machine->trampolines_mapped = nr_cpus_avail;
1354 
1355 	return 0;
1356 }
1357 
1358 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1359 					     struct dso *kernel __maybe_unused)
1360 {
1361 	return 0;
1362 }
1363 
1364 static int
1365 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1366 {
1367 	/* In case of renewal the kernel map, destroy previous one */
1368 	machine__destroy_kernel_maps(machine);
1369 
1370 	map__put(machine->vmlinux_map);
1371 	machine->vmlinux_map = map__new2(0, kernel);
1372 	if (machine->vmlinux_map == NULL)
1373 		return -ENOMEM;
1374 
1375 	map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY);
1376 	return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map);
1377 }
1378 
1379 void machine__destroy_kernel_maps(struct machine *machine)
1380 {
1381 	struct kmap *kmap;
1382 	struct map *map = machine__kernel_map(machine);
1383 
1384 	if (map == NULL)
1385 		return;
1386 
1387 	kmap = map__kmap(map);
1388 	maps__remove(machine__kernel_maps(machine), map);
1389 	if (kmap && kmap->ref_reloc_sym) {
1390 		zfree((char **)&kmap->ref_reloc_sym->name);
1391 		zfree(&kmap->ref_reloc_sym);
1392 	}
1393 
1394 	map__zput(machine->vmlinux_map);
1395 }
1396 
1397 int machines__create_guest_kernel_maps(struct machines *machines)
1398 {
1399 	int ret = 0;
1400 	struct dirent **namelist = NULL;
1401 	int i, items = 0;
1402 	char path[PATH_MAX];
1403 	pid_t pid;
1404 	char *endp;
1405 
1406 	if (symbol_conf.default_guest_vmlinux_name ||
1407 	    symbol_conf.default_guest_modules ||
1408 	    symbol_conf.default_guest_kallsyms) {
1409 		machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1410 	}
1411 
1412 	if (symbol_conf.guestmount) {
1413 		items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1414 		if (items <= 0)
1415 			return -ENOENT;
1416 		for (i = 0; i < items; i++) {
1417 			if (!isdigit(namelist[i]->d_name[0])) {
1418 				/* Filter out . and .. */
1419 				continue;
1420 			}
1421 			pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1422 			if ((*endp != '\0') ||
1423 			    (endp == namelist[i]->d_name) ||
1424 			    (errno == ERANGE)) {
1425 				pr_debug("invalid directory (%s). Skipping.\n",
1426 					 namelist[i]->d_name);
1427 				continue;
1428 			}
1429 			sprintf(path, "%s/%s/proc/kallsyms",
1430 				symbol_conf.guestmount,
1431 				namelist[i]->d_name);
1432 			ret = access(path, R_OK);
1433 			if (ret) {
1434 				pr_debug("Can't access file %s\n", path);
1435 				goto failure;
1436 			}
1437 			machines__create_kernel_maps(machines, pid);
1438 		}
1439 failure:
1440 		free(namelist);
1441 	}
1442 
1443 	return ret;
1444 }
1445 
1446 void machines__destroy_kernel_maps(struct machines *machines)
1447 {
1448 	struct rb_node *next = rb_first_cached(&machines->guests);
1449 
1450 	machine__destroy_kernel_maps(&machines->host);
1451 
1452 	while (next) {
1453 		struct machine *pos = rb_entry(next, struct machine, rb_node);
1454 
1455 		next = rb_next(&pos->rb_node);
1456 		rb_erase_cached(&pos->rb_node, &machines->guests);
1457 		machine__delete(pos);
1458 	}
1459 }
1460 
1461 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1462 {
1463 	struct machine *machine = machines__findnew(machines, pid);
1464 
1465 	if (machine == NULL)
1466 		return -1;
1467 
1468 	return machine__create_kernel_maps(machine);
1469 }
1470 
1471 int machine__load_kallsyms(struct machine *machine, const char *filename)
1472 {
1473 	struct map *map = machine__kernel_map(machine);
1474 	struct dso *dso = map__dso(map);
1475 	int ret = __dso__load_kallsyms(dso, filename, map, true);
1476 
1477 	if (ret > 0) {
1478 		dso__set_loaded(dso);
1479 		/*
1480 		 * Since /proc/kallsyms will have multiple sessions for the
1481 		 * kernel, with modules between them, fixup the end of all
1482 		 * sections.
1483 		 */
1484 		maps__fixup_end(machine__kernel_maps(machine));
1485 	}
1486 
1487 	return ret;
1488 }
1489 
1490 int machine__load_vmlinux_path(struct machine *machine)
1491 {
1492 	struct map *map = machine__kernel_map(machine);
1493 	struct dso *dso = map__dso(map);
1494 	int ret = dso__load_vmlinux_path(dso, map);
1495 
1496 	if (ret > 0)
1497 		dso__set_loaded(dso);
1498 
1499 	return ret;
1500 }
1501 
1502 static char *get_kernel_version(const char *root_dir)
1503 {
1504 	char version[PATH_MAX];
1505 	FILE *file;
1506 	char *name, *tmp;
1507 	const char *prefix = "Linux version ";
1508 
1509 	sprintf(version, "%s/proc/version", root_dir);
1510 	file = fopen(version, "r");
1511 	if (!file)
1512 		return NULL;
1513 
1514 	tmp = fgets(version, sizeof(version), file);
1515 	fclose(file);
1516 	if (!tmp)
1517 		return NULL;
1518 
1519 	name = strstr(version, prefix);
1520 	if (!name)
1521 		return NULL;
1522 	name += strlen(prefix);
1523 	tmp = strchr(name, ' ');
1524 	if (tmp)
1525 		*tmp = '\0';
1526 
1527 	return strdup(name);
1528 }
1529 
1530 static bool is_kmod_dso(struct dso *dso)
1531 {
1532 	return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1533 	       dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1534 }
1535 
1536 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1537 {
1538 	char *long_name;
1539 	struct dso *dso;
1540 	struct map *map = maps__find_by_name(maps, m->name);
1541 
1542 	if (map == NULL)
1543 		return 0;
1544 
1545 	long_name = strdup(path);
1546 	if (long_name == NULL)
1547 		return -ENOMEM;
1548 
1549 	dso = map__dso(map);
1550 	dso__set_long_name(dso, long_name, true);
1551 	dso__kernel_module_get_build_id(dso, "");
1552 
1553 	/*
1554 	 * Full name could reveal us kmod compression, so
1555 	 * we need to update the symtab_type if needed.
1556 	 */
1557 	if (m->comp && is_kmod_dso(dso)) {
1558 		dso->symtab_type++;
1559 		dso->comp = m->comp;
1560 	}
1561 
1562 	return 0;
1563 }
1564 
1565 static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1566 {
1567 	struct dirent *dent;
1568 	DIR *dir = opendir(dir_name);
1569 	int ret = 0;
1570 
1571 	if (!dir) {
1572 		pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1573 		return -1;
1574 	}
1575 
1576 	while ((dent = readdir(dir)) != NULL) {
1577 		char path[PATH_MAX];
1578 		struct stat st;
1579 
1580 		/*sshfs might return bad dent->d_type, so we have to stat*/
1581 		path__join(path, sizeof(path), dir_name, dent->d_name);
1582 		if (stat(path, &st))
1583 			continue;
1584 
1585 		if (S_ISDIR(st.st_mode)) {
1586 			if (!strcmp(dent->d_name, ".") ||
1587 			    !strcmp(dent->d_name, ".."))
1588 				continue;
1589 
1590 			/* Do not follow top-level source and build symlinks */
1591 			if (depth == 0) {
1592 				if (!strcmp(dent->d_name, "source") ||
1593 				    !strcmp(dent->d_name, "build"))
1594 					continue;
1595 			}
1596 
1597 			ret = maps__set_modules_path_dir(maps, path, depth + 1);
1598 			if (ret < 0)
1599 				goto out;
1600 		} else {
1601 			struct kmod_path m;
1602 
1603 			ret = kmod_path__parse_name(&m, dent->d_name);
1604 			if (ret)
1605 				goto out;
1606 
1607 			if (m.kmod)
1608 				ret = maps__set_module_path(maps, path, &m);
1609 
1610 			zfree(&m.name);
1611 
1612 			if (ret)
1613 				goto out;
1614 		}
1615 	}
1616 
1617 out:
1618 	closedir(dir);
1619 	return ret;
1620 }
1621 
1622 static int machine__set_modules_path(struct machine *machine)
1623 {
1624 	char *version;
1625 	char modules_path[PATH_MAX];
1626 
1627 	version = get_kernel_version(machine->root_dir);
1628 	if (!version)
1629 		return -1;
1630 
1631 	snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1632 		 machine->root_dir, version);
1633 	free(version);
1634 
1635 	return maps__set_modules_path_dir(machine__kernel_maps(machine), modules_path, 0);
1636 }
1637 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1638 				u64 *size __maybe_unused,
1639 				const char *name __maybe_unused)
1640 {
1641 	return 0;
1642 }
1643 
1644 static int machine__create_module(void *arg, const char *name, u64 start,
1645 				  u64 size)
1646 {
1647 	struct machine *machine = arg;
1648 	struct map *map;
1649 
1650 	if (arch__fix_module_text_start(&start, &size, name) < 0)
1651 		return -1;
1652 
1653 	map = machine__addnew_module_map(machine, start, name);
1654 	if (map == NULL)
1655 		return -1;
1656 	map__set_end(map, start + size);
1657 
1658 	dso__kernel_module_get_build_id(map__dso(map), machine->root_dir);
1659 	map__put(map);
1660 	return 0;
1661 }
1662 
1663 static int machine__create_modules(struct machine *machine)
1664 {
1665 	const char *modules;
1666 	char path[PATH_MAX];
1667 
1668 	if (machine__is_default_guest(machine)) {
1669 		modules = symbol_conf.default_guest_modules;
1670 	} else {
1671 		snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1672 		modules = path;
1673 	}
1674 
1675 	if (symbol__restricted_filename(modules, "/proc/modules"))
1676 		return -1;
1677 
1678 	if (modules__parse(modules, machine, machine__create_module))
1679 		return -1;
1680 
1681 	if (!machine__set_modules_path(machine))
1682 		return 0;
1683 
1684 	pr_debug("Problems setting modules path maps, continuing anyway...\n");
1685 
1686 	return 0;
1687 }
1688 
1689 static void machine__set_kernel_mmap(struct machine *machine,
1690 				     u64 start, u64 end)
1691 {
1692 	map__set_start(machine->vmlinux_map, start);
1693 	map__set_end(machine->vmlinux_map, end);
1694 	/*
1695 	 * Be a bit paranoid here, some perf.data file came with
1696 	 * a zero sized synthesized MMAP event for the kernel.
1697 	 */
1698 	if (start == 0 && end == 0)
1699 		map__set_end(machine->vmlinux_map, ~0ULL);
1700 }
1701 
1702 static int machine__update_kernel_mmap(struct machine *machine,
1703 				     u64 start, u64 end)
1704 {
1705 	struct map *orig, *updated;
1706 	int err;
1707 
1708 	orig = machine->vmlinux_map;
1709 	updated = map__get(orig);
1710 
1711 	machine->vmlinux_map = updated;
1712 	machine__set_kernel_mmap(machine, start, end);
1713 	maps__remove(machine__kernel_maps(machine), orig);
1714 	err = maps__insert(machine__kernel_maps(machine), updated);
1715 	map__put(orig);
1716 
1717 	return err;
1718 }
1719 
1720 int machine__create_kernel_maps(struct machine *machine)
1721 {
1722 	struct dso *kernel = machine__get_kernel(machine);
1723 	const char *name = NULL;
1724 	u64 start = 0, end = ~0ULL;
1725 	int ret;
1726 
1727 	if (kernel == NULL)
1728 		return -1;
1729 
1730 	ret = __machine__create_kernel_maps(machine, kernel);
1731 	if (ret < 0)
1732 		goto out_put;
1733 
1734 	if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1735 		if (machine__is_host(machine))
1736 			pr_debug("Problems creating module maps, "
1737 				 "continuing anyway...\n");
1738 		else
1739 			pr_debug("Problems creating module maps for guest %d, "
1740 				 "continuing anyway...\n", machine->pid);
1741 	}
1742 
1743 	if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1744 		if (name &&
1745 		    map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1746 			machine__destroy_kernel_maps(machine);
1747 			ret = -1;
1748 			goto out_put;
1749 		}
1750 
1751 		/*
1752 		 * we have a real start address now, so re-order the kmaps
1753 		 * assume it's the last in the kmaps
1754 		 */
1755 		ret = machine__update_kernel_mmap(machine, start, end);
1756 		if (ret < 0)
1757 			goto out_put;
1758 	}
1759 
1760 	if (machine__create_extra_kernel_maps(machine, kernel))
1761 		pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1762 
1763 	if (end == ~0ULL) {
1764 		/* update end address of the kernel map using adjacent module address */
1765 		struct map *next = maps__find_next_entry(machine__kernel_maps(machine),
1766 							 machine__kernel_map(machine));
1767 
1768 		if (next)
1769 			machine__set_kernel_mmap(machine, start, map__start(next));
1770 	}
1771 
1772 out_put:
1773 	dso__put(kernel);
1774 	return ret;
1775 }
1776 
1777 static bool machine__uses_kcore(struct machine *machine)
1778 {
1779 	struct dso *dso;
1780 
1781 	list_for_each_entry(dso, &machine->dsos.head, node) {
1782 		if (dso__is_kcore(dso))
1783 			return true;
1784 	}
1785 
1786 	return false;
1787 }
1788 
1789 static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1790 					     struct extra_kernel_map *xm)
1791 {
1792 	return machine__is(machine, "x86_64") &&
1793 	       is_entry_trampoline(xm->name);
1794 }
1795 
1796 static int machine__process_extra_kernel_map(struct machine *machine,
1797 					     struct extra_kernel_map *xm)
1798 {
1799 	struct dso *kernel = machine__kernel_dso(machine);
1800 
1801 	if (kernel == NULL)
1802 		return -1;
1803 
1804 	return machine__create_extra_kernel_map(machine, kernel, xm);
1805 }
1806 
1807 static int machine__process_kernel_mmap_event(struct machine *machine,
1808 					      struct extra_kernel_map *xm,
1809 					      struct build_id *bid)
1810 {
1811 	enum dso_space_type dso_space;
1812 	bool is_kernel_mmap;
1813 	const char *mmap_name = machine->mmap_name;
1814 
1815 	/* If we have maps from kcore then we do not need or want any others */
1816 	if (machine__uses_kcore(machine))
1817 		return 0;
1818 
1819 	if (machine__is_host(machine))
1820 		dso_space = DSO_SPACE__KERNEL;
1821 	else
1822 		dso_space = DSO_SPACE__KERNEL_GUEST;
1823 
1824 	is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1825 	if (!is_kernel_mmap && !machine__is_host(machine)) {
1826 		/*
1827 		 * If the event was recorded inside the guest and injected into
1828 		 * the host perf.data file, then it will match a host mmap_name,
1829 		 * so try that - see machine__set_mmap_name().
1830 		 */
1831 		mmap_name = "[kernel.kallsyms]";
1832 		is_kernel_mmap = memcmp(xm->name, mmap_name, strlen(mmap_name) - 1) == 0;
1833 	}
1834 	if (xm->name[0] == '/' ||
1835 	    (!is_kernel_mmap && xm->name[0] == '[')) {
1836 		struct map *map = machine__addnew_module_map(machine, xm->start, xm->name);
1837 
1838 		if (map == NULL)
1839 			goto out_problem;
1840 
1841 		map__set_end(map, map__start(map) + xm->end - xm->start);
1842 
1843 		if (build_id__is_defined(bid))
1844 			dso__set_build_id(map__dso(map), bid);
1845 
1846 		map__put(map);
1847 	} else if (is_kernel_mmap) {
1848 		const char *symbol_name = xm->name + strlen(mmap_name);
1849 		/*
1850 		 * Should be there already, from the build-id table in
1851 		 * the header.
1852 		 */
1853 		struct dso *kernel = NULL;
1854 		struct dso *dso;
1855 
1856 		down_read(&machine->dsos.lock);
1857 
1858 		list_for_each_entry(dso, &machine->dsos.head, node) {
1859 
1860 			/*
1861 			 * The cpumode passed to is_kernel_module is not the
1862 			 * cpumode of *this* event. If we insist on passing
1863 			 * correct cpumode to is_kernel_module, we should
1864 			 * record the cpumode when we adding this dso to the
1865 			 * linked list.
1866 			 *
1867 			 * However we don't really need passing correct
1868 			 * cpumode.  We know the correct cpumode must be kernel
1869 			 * mode (if not, we should not link it onto kernel_dsos
1870 			 * list).
1871 			 *
1872 			 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1873 			 * is_kernel_module() treats it as a kernel cpumode.
1874 			 */
1875 
1876 			if (!dso->kernel ||
1877 			    is_kernel_module(dso->long_name,
1878 					     PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1879 				continue;
1880 
1881 
1882 			kernel = dso__get(dso);
1883 			break;
1884 		}
1885 
1886 		up_read(&machine->dsos.lock);
1887 
1888 		if (kernel == NULL)
1889 			kernel = machine__findnew_dso(machine, machine->mmap_name);
1890 		if (kernel == NULL)
1891 			goto out_problem;
1892 
1893 		kernel->kernel = dso_space;
1894 		if (__machine__create_kernel_maps(machine, kernel) < 0) {
1895 			dso__put(kernel);
1896 			goto out_problem;
1897 		}
1898 
1899 		if (strstr(kernel->long_name, "vmlinux"))
1900 			dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1901 
1902 		if (machine__update_kernel_mmap(machine, xm->start, xm->end) < 0) {
1903 			dso__put(kernel);
1904 			goto out_problem;
1905 		}
1906 
1907 		if (build_id__is_defined(bid))
1908 			dso__set_build_id(kernel, bid);
1909 
1910 		/*
1911 		 * Avoid using a zero address (kptr_restrict) for the ref reloc
1912 		 * symbol. Effectively having zero here means that at record
1913 		 * time /proc/sys/kernel/kptr_restrict was non zero.
1914 		 */
1915 		if (xm->pgoff != 0) {
1916 			map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1917 							symbol_name,
1918 							xm->pgoff);
1919 		}
1920 
1921 		if (machine__is_default_guest(machine)) {
1922 			/*
1923 			 * preload dso of guest kernel and modules
1924 			 */
1925 			dso__load(kernel, machine__kernel_map(machine));
1926 		}
1927 		dso__put(kernel);
1928 	} else if (perf_event__is_extra_kernel_mmap(machine, xm)) {
1929 		return machine__process_extra_kernel_map(machine, xm);
1930 	}
1931 	return 0;
1932 out_problem:
1933 	return -1;
1934 }
1935 
1936 int machine__process_mmap2_event(struct machine *machine,
1937 				 union perf_event *event,
1938 				 struct perf_sample *sample)
1939 {
1940 	struct thread *thread;
1941 	struct map *map;
1942 	struct dso_id dso_id = {
1943 		.maj = event->mmap2.maj,
1944 		.min = event->mmap2.min,
1945 		.ino = event->mmap2.ino,
1946 		.ino_generation = event->mmap2.ino_generation,
1947 	};
1948 	struct build_id __bid, *bid = NULL;
1949 	int ret = 0;
1950 
1951 	if (dump_trace)
1952 		perf_event__fprintf_mmap2(event, stdout);
1953 
1954 	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
1955 		bid = &__bid;
1956 		build_id__init(bid, event->mmap2.build_id, event->mmap2.build_id_size);
1957 	}
1958 
1959 	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1960 	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1961 		struct extra_kernel_map xm = {
1962 			.start = event->mmap2.start,
1963 			.end   = event->mmap2.start + event->mmap2.len,
1964 			.pgoff = event->mmap2.pgoff,
1965 		};
1966 
1967 		strlcpy(xm.name, event->mmap2.filename, KMAP_NAME_LEN);
1968 		ret = machine__process_kernel_mmap_event(machine, &xm, bid);
1969 		if (ret < 0)
1970 			goto out_problem;
1971 		return 0;
1972 	}
1973 
1974 	thread = machine__findnew_thread(machine, event->mmap2.pid,
1975 					event->mmap2.tid);
1976 	if (thread == NULL)
1977 		goto out_problem;
1978 
1979 	map = map__new(machine, event->mmap2.start,
1980 			event->mmap2.len, event->mmap2.pgoff,
1981 			&dso_id, event->mmap2.prot,
1982 			event->mmap2.flags, bid,
1983 			event->mmap2.filename, thread);
1984 
1985 	if (map == NULL)
1986 		goto out_problem_map;
1987 
1988 	ret = thread__insert_map(thread, map);
1989 	if (ret)
1990 		goto out_problem_insert;
1991 
1992 	thread__put(thread);
1993 	map__put(map);
1994 	return 0;
1995 
1996 out_problem_insert:
1997 	map__put(map);
1998 out_problem_map:
1999 	thread__put(thread);
2000 out_problem:
2001 	dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
2002 	return 0;
2003 }
2004 
2005 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
2006 				struct perf_sample *sample)
2007 {
2008 	struct thread *thread;
2009 	struct map *map;
2010 	u32 prot = 0;
2011 	int ret = 0;
2012 
2013 	if (dump_trace)
2014 		perf_event__fprintf_mmap(event, stdout);
2015 
2016 	if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
2017 	    sample->cpumode == PERF_RECORD_MISC_KERNEL) {
2018 		struct extra_kernel_map xm = {
2019 			.start = event->mmap.start,
2020 			.end   = event->mmap.start + event->mmap.len,
2021 			.pgoff = event->mmap.pgoff,
2022 		};
2023 
2024 		strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
2025 		ret = machine__process_kernel_mmap_event(machine, &xm, NULL);
2026 		if (ret < 0)
2027 			goto out_problem;
2028 		return 0;
2029 	}
2030 
2031 	thread = machine__findnew_thread(machine, event->mmap.pid,
2032 					 event->mmap.tid);
2033 	if (thread == NULL)
2034 		goto out_problem;
2035 
2036 	if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
2037 		prot = PROT_EXEC;
2038 
2039 	map = map__new(machine, event->mmap.start,
2040 			event->mmap.len, event->mmap.pgoff,
2041 			NULL, prot, 0, NULL, event->mmap.filename, thread);
2042 
2043 	if (map == NULL)
2044 		goto out_problem_map;
2045 
2046 	ret = thread__insert_map(thread, map);
2047 	if (ret)
2048 		goto out_problem_insert;
2049 
2050 	thread__put(thread);
2051 	map__put(map);
2052 	return 0;
2053 
2054 out_problem_insert:
2055 	map__put(map);
2056 out_problem_map:
2057 	thread__put(thread);
2058 out_problem:
2059 	dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
2060 	return 0;
2061 }
2062 
2063 static void __machine__remove_thread(struct machine *machine, struct thread_rb_node *nd,
2064 				     struct thread *th, bool lock)
2065 {
2066 	struct threads *threads = machine__threads(machine, thread__tid(th));
2067 
2068 	if (!nd)
2069 		nd = thread_rb_node__find(th, &threads->entries.rb_root);
2070 
2071 	if (threads->last_match && RC_CHK_EQUAL(threads->last_match, th))
2072 		threads__set_last_match(threads, NULL);
2073 
2074 	if (lock)
2075 		down_write(&threads->lock);
2076 
2077 	BUG_ON(refcount_read(thread__refcnt(th)) == 0);
2078 
2079 	thread__put(nd->thread);
2080 	rb_erase_cached(&nd->rb_node, &threads->entries);
2081 	RB_CLEAR_NODE(&nd->rb_node);
2082 	--threads->nr;
2083 
2084 	free(nd);
2085 
2086 	if (lock)
2087 		up_write(&threads->lock);
2088 }
2089 
2090 void machine__remove_thread(struct machine *machine, struct thread *th)
2091 {
2092 	return __machine__remove_thread(machine, NULL, th, true);
2093 }
2094 
2095 int machine__process_fork_event(struct machine *machine, union perf_event *event,
2096 				struct perf_sample *sample)
2097 {
2098 	struct thread *thread = machine__find_thread(machine,
2099 						     event->fork.pid,
2100 						     event->fork.tid);
2101 	struct thread *parent = machine__findnew_thread(machine,
2102 							event->fork.ppid,
2103 							event->fork.ptid);
2104 	bool do_maps_clone = true;
2105 	int err = 0;
2106 
2107 	if (dump_trace)
2108 		perf_event__fprintf_task(event, stdout);
2109 
2110 	/*
2111 	 * There may be an existing thread that is not actually the parent,
2112 	 * either because we are processing events out of order, or because the
2113 	 * (fork) event that would have removed the thread was lost. Assume the
2114 	 * latter case and continue on as best we can.
2115 	 */
2116 	if (thread__pid(parent) != (pid_t)event->fork.ppid) {
2117 		dump_printf("removing erroneous parent thread %d/%d\n",
2118 			    thread__pid(parent), thread__tid(parent));
2119 		machine__remove_thread(machine, parent);
2120 		thread__put(parent);
2121 		parent = machine__findnew_thread(machine, event->fork.ppid,
2122 						 event->fork.ptid);
2123 	}
2124 
2125 	/* if a thread currently exists for the thread id remove it */
2126 	if (thread != NULL) {
2127 		machine__remove_thread(machine, thread);
2128 		thread__put(thread);
2129 	}
2130 
2131 	thread = machine__findnew_thread(machine, event->fork.pid,
2132 					 event->fork.tid);
2133 	/*
2134 	 * When synthesizing FORK events, we are trying to create thread
2135 	 * objects for the already running tasks on the machine.
2136 	 *
2137 	 * Normally, for a kernel FORK event, we want to clone the parent's
2138 	 * maps because that is what the kernel just did.
2139 	 *
2140 	 * But when synthesizing, this should not be done.  If we do, we end up
2141 	 * with overlapping maps as we process the synthesized MMAP2 events that
2142 	 * get delivered shortly thereafter.
2143 	 *
2144 	 * Use the FORK event misc flags in an internal way to signal this
2145 	 * situation, so we can elide the map clone when appropriate.
2146 	 */
2147 	if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
2148 		do_maps_clone = false;
2149 
2150 	if (thread == NULL || parent == NULL ||
2151 	    thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
2152 		dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
2153 		err = -1;
2154 	}
2155 	thread__put(thread);
2156 	thread__put(parent);
2157 
2158 	return err;
2159 }
2160 
2161 int machine__process_exit_event(struct machine *machine, union perf_event *event,
2162 				struct perf_sample *sample __maybe_unused)
2163 {
2164 	struct thread *thread = machine__find_thread(machine,
2165 						     event->fork.pid,
2166 						     event->fork.tid);
2167 
2168 	if (dump_trace)
2169 		perf_event__fprintf_task(event, stdout);
2170 
2171 	if (thread != NULL) {
2172 		if (symbol_conf.keep_exited_threads)
2173 			thread__set_exited(thread, /*exited=*/true);
2174 		else
2175 			machine__remove_thread(machine, thread);
2176 	}
2177 	thread__put(thread);
2178 	return 0;
2179 }
2180 
2181 int machine__process_event(struct machine *machine, union perf_event *event,
2182 			   struct perf_sample *sample)
2183 {
2184 	int ret;
2185 
2186 	switch (event->header.type) {
2187 	case PERF_RECORD_COMM:
2188 		ret = machine__process_comm_event(machine, event, sample); break;
2189 	case PERF_RECORD_MMAP:
2190 		ret = machine__process_mmap_event(machine, event, sample); break;
2191 	case PERF_RECORD_NAMESPACES:
2192 		ret = machine__process_namespaces_event(machine, event, sample); break;
2193 	case PERF_RECORD_CGROUP:
2194 		ret = machine__process_cgroup_event(machine, event, sample); break;
2195 	case PERF_RECORD_MMAP2:
2196 		ret = machine__process_mmap2_event(machine, event, sample); break;
2197 	case PERF_RECORD_FORK:
2198 		ret = machine__process_fork_event(machine, event, sample); break;
2199 	case PERF_RECORD_EXIT:
2200 		ret = machine__process_exit_event(machine, event, sample); break;
2201 	case PERF_RECORD_LOST:
2202 		ret = machine__process_lost_event(machine, event, sample); break;
2203 	case PERF_RECORD_AUX:
2204 		ret = machine__process_aux_event(machine, event); break;
2205 	case PERF_RECORD_ITRACE_START:
2206 		ret = machine__process_itrace_start_event(machine, event); break;
2207 	case PERF_RECORD_LOST_SAMPLES:
2208 		ret = machine__process_lost_samples_event(machine, event, sample); break;
2209 	case PERF_RECORD_SWITCH:
2210 	case PERF_RECORD_SWITCH_CPU_WIDE:
2211 		ret = machine__process_switch_event(machine, event); break;
2212 	case PERF_RECORD_KSYMBOL:
2213 		ret = machine__process_ksymbol(machine, event, sample); break;
2214 	case PERF_RECORD_BPF_EVENT:
2215 		ret = machine__process_bpf(machine, event, sample); break;
2216 	case PERF_RECORD_TEXT_POKE:
2217 		ret = machine__process_text_poke(machine, event, sample); break;
2218 	case PERF_RECORD_AUX_OUTPUT_HW_ID:
2219 		ret = machine__process_aux_output_hw_id_event(machine, event); break;
2220 	default:
2221 		ret = -1;
2222 		break;
2223 	}
2224 
2225 	return ret;
2226 }
2227 
2228 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
2229 {
2230 	return regexec(regex, sym->name, 0, NULL, 0) == 0;
2231 }
2232 
2233 static void ip__resolve_ams(struct thread *thread,
2234 			    struct addr_map_symbol *ams,
2235 			    u64 ip)
2236 {
2237 	struct addr_location al;
2238 
2239 	addr_location__init(&al);
2240 	/*
2241 	 * We cannot use the header.misc hint to determine whether a
2242 	 * branch stack address is user, kernel, guest, hypervisor.
2243 	 * Branches may straddle the kernel/user/hypervisor boundaries.
2244 	 * Thus, we have to try consecutively until we find a match
2245 	 * or else, the symbol is unknown
2246 	 */
2247 	thread__find_cpumode_addr_location(thread, ip, &al);
2248 
2249 	ams->addr = ip;
2250 	ams->al_addr = al.addr;
2251 	ams->al_level = al.level;
2252 	ams->ms.maps = maps__get(al.maps);
2253 	ams->ms.sym = al.sym;
2254 	ams->ms.map = map__get(al.map);
2255 	ams->phys_addr = 0;
2256 	ams->data_page_size = 0;
2257 	addr_location__exit(&al);
2258 }
2259 
2260 static void ip__resolve_data(struct thread *thread,
2261 			     u8 m, struct addr_map_symbol *ams,
2262 			     u64 addr, u64 phys_addr, u64 daddr_page_size)
2263 {
2264 	struct addr_location al;
2265 
2266 	addr_location__init(&al);
2267 
2268 	thread__find_symbol(thread, m, addr, &al);
2269 
2270 	ams->addr = addr;
2271 	ams->al_addr = al.addr;
2272 	ams->al_level = al.level;
2273 	ams->ms.maps = maps__get(al.maps);
2274 	ams->ms.sym = al.sym;
2275 	ams->ms.map = map__get(al.map);
2276 	ams->phys_addr = phys_addr;
2277 	ams->data_page_size = daddr_page_size;
2278 	addr_location__exit(&al);
2279 }
2280 
2281 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2282 				     struct addr_location *al)
2283 {
2284 	struct mem_info *mi = mem_info__new();
2285 
2286 	if (!mi)
2287 		return NULL;
2288 
2289 	ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2290 	ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2291 			 sample->addr, sample->phys_addr,
2292 			 sample->data_page_size);
2293 	mi->data_src.val = sample->data_src;
2294 
2295 	return mi;
2296 }
2297 
2298 static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2299 {
2300 	struct map *map = ms->map;
2301 	char *srcline = NULL;
2302 	struct dso *dso;
2303 
2304 	if (!map || callchain_param.key == CCKEY_FUNCTION)
2305 		return srcline;
2306 
2307 	dso = map__dso(map);
2308 	srcline = srcline__tree_find(&dso->srclines, ip);
2309 	if (!srcline) {
2310 		bool show_sym = false;
2311 		bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2312 
2313 		srcline = get_srcline(dso, map__rip_2objdump(map, ip),
2314 				      ms->sym, show_sym, show_addr, ip);
2315 		srcline__tree_insert(&dso->srclines, ip, srcline);
2316 	}
2317 
2318 	return srcline;
2319 }
2320 
2321 struct iterations {
2322 	int nr_loop_iter;
2323 	u64 cycles;
2324 };
2325 
2326 static int add_callchain_ip(struct thread *thread,
2327 			    struct callchain_cursor *cursor,
2328 			    struct symbol **parent,
2329 			    struct addr_location *root_al,
2330 			    u8 *cpumode,
2331 			    u64 ip,
2332 			    bool branch,
2333 			    struct branch_flags *flags,
2334 			    struct iterations *iter,
2335 			    u64 branch_from)
2336 {
2337 	struct map_symbol ms = {};
2338 	struct addr_location al;
2339 	int nr_loop_iter = 0, err = 0;
2340 	u64 iter_cycles = 0;
2341 	const char *srcline = NULL;
2342 
2343 	addr_location__init(&al);
2344 	al.filtered = 0;
2345 	al.sym = NULL;
2346 	al.srcline = NULL;
2347 	if (!cpumode) {
2348 		thread__find_cpumode_addr_location(thread, ip, &al);
2349 	} else {
2350 		if (ip >= PERF_CONTEXT_MAX) {
2351 			switch (ip) {
2352 			case PERF_CONTEXT_HV:
2353 				*cpumode = PERF_RECORD_MISC_HYPERVISOR;
2354 				break;
2355 			case PERF_CONTEXT_KERNEL:
2356 				*cpumode = PERF_RECORD_MISC_KERNEL;
2357 				break;
2358 			case PERF_CONTEXT_USER:
2359 				*cpumode = PERF_RECORD_MISC_USER;
2360 				break;
2361 			default:
2362 				pr_debug("invalid callchain context: "
2363 					 "%"PRId64"\n", (s64) ip);
2364 				/*
2365 				 * It seems the callchain is corrupted.
2366 				 * Discard all.
2367 				 */
2368 				callchain_cursor_reset(cursor);
2369 				err = 1;
2370 				goto out;
2371 			}
2372 			goto out;
2373 		}
2374 		thread__find_symbol(thread, *cpumode, ip, &al);
2375 	}
2376 
2377 	if (al.sym != NULL) {
2378 		if (perf_hpp_list.parent && !*parent &&
2379 		    symbol__match_regex(al.sym, &parent_regex))
2380 			*parent = al.sym;
2381 		else if (have_ignore_callees && root_al &&
2382 		  symbol__match_regex(al.sym, &ignore_callees_regex)) {
2383 			/* Treat this symbol as the root,
2384 			   forgetting its callees. */
2385 			addr_location__copy(root_al, &al);
2386 			callchain_cursor_reset(cursor);
2387 		}
2388 	}
2389 
2390 	if (symbol_conf.hide_unresolved && al.sym == NULL)
2391 		goto out;
2392 
2393 	if (iter) {
2394 		nr_loop_iter = iter->nr_loop_iter;
2395 		iter_cycles = iter->cycles;
2396 	}
2397 
2398 	ms.maps = maps__get(al.maps);
2399 	ms.map = map__get(al.map);
2400 	ms.sym = al.sym;
2401 	srcline = callchain_srcline(&ms, al.addr);
2402 	err = callchain_cursor_append(cursor, ip, &ms,
2403 				      branch, flags, nr_loop_iter,
2404 				      iter_cycles, branch_from, srcline);
2405 out:
2406 	addr_location__exit(&al);
2407 	map_symbol__exit(&ms);
2408 	return err;
2409 }
2410 
2411 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2412 					   struct addr_location *al)
2413 {
2414 	unsigned int i;
2415 	const struct branch_stack *bs = sample->branch_stack;
2416 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2417 	struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2418 
2419 	if (!bi)
2420 		return NULL;
2421 
2422 	for (i = 0; i < bs->nr; i++) {
2423 		ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2424 		ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2425 		bi[i].flags = entries[i].flags;
2426 	}
2427 	return bi;
2428 }
2429 
2430 static void save_iterations(struct iterations *iter,
2431 			    struct branch_entry *be, int nr)
2432 {
2433 	int i;
2434 
2435 	iter->nr_loop_iter++;
2436 	iter->cycles = 0;
2437 
2438 	for (i = 0; i < nr; i++)
2439 		iter->cycles += be[i].flags.cycles;
2440 }
2441 
2442 #define CHASHSZ 127
2443 #define CHASHBITS 7
2444 #define NO_ENTRY 0xff
2445 
2446 #define PERF_MAX_BRANCH_DEPTH 127
2447 
2448 /* Remove loops. */
2449 static int remove_loops(struct branch_entry *l, int nr,
2450 			struct iterations *iter)
2451 {
2452 	int i, j, off;
2453 	unsigned char chash[CHASHSZ];
2454 
2455 	memset(chash, NO_ENTRY, sizeof(chash));
2456 
2457 	BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2458 
2459 	for (i = 0; i < nr; i++) {
2460 		int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2461 
2462 		/* no collision handling for now */
2463 		if (chash[h] == NO_ENTRY) {
2464 			chash[h] = i;
2465 		} else if (l[chash[h]].from == l[i].from) {
2466 			bool is_loop = true;
2467 			/* check if it is a real loop */
2468 			off = 0;
2469 			for (j = chash[h]; j < i && i + off < nr; j++, off++)
2470 				if (l[j].from != l[i + off].from) {
2471 					is_loop = false;
2472 					break;
2473 				}
2474 			if (is_loop) {
2475 				j = nr - (i + off);
2476 				if (j > 0) {
2477 					save_iterations(iter + i + off,
2478 						l + i, off);
2479 
2480 					memmove(iter + i, iter + i + off,
2481 						j * sizeof(*iter));
2482 
2483 					memmove(l + i, l + i + off,
2484 						j * sizeof(*l));
2485 				}
2486 
2487 				nr -= off;
2488 			}
2489 		}
2490 	}
2491 	return nr;
2492 }
2493 
2494 static int lbr_callchain_add_kernel_ip(struct thread *thread,
2495 				       struct callchain_cursor *cursor,
2496 				       struct perf_sample *sample,
2497 				       struct symbol **parent,
2498 				       struct addr_location *root_al,
2499 				       u64 branch_from,
2500 				       bool callee, int end)
2501 {
2502 	struct ip_callchain *chain = sample->callchain;
2503 	u8 cpumode = PERF_RECORD_MISC_USER;
2504 	int err, i;
2505 
2506 	if (callee) {
2507 		for (i = 0; i < end + 1; i++) {
2508 			err = add_callchain_ip(thread, cursor, parent,
2509 					       root_al, &cpumode, chain->ips[i],
2510 					       false, NULL, NULL, branch_from);
2511 			if (err)
2512 				return err;
2513 		}
2514 		return 0;
2515 	}
2516 
2517 	for (i = end; i >= 0; i--) {
2518 		err = add_callchain_ip(thread, cursor, parent,
2519 				       root_al, &cpumode, chain->ips[i],
2520 				       false, NULL, NULL, branch_from);
2521 		if (err)
2522 			return err;
2523 	}
2524 
2525 	return 0;
2526 }
2527 
2528 static void save_lbr_cursor_node(struct thread *thread,
2529 				 struct callchain_cursor *cursor,
2530 				 int idx)
2531 {
2532 	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2533 
2534 	if (!lbr_stitch)
2535 		return;
2536 
2537 	if (cursor->pos == cursor->nr) {
2538 		lbr_stitch->prev_lbr_cursor[idx].valid = false;
2539 		return;
2540 	}
2541 
2542 	if (!cursor->curr)
2543 		cursor->curr = cursor->first;
2544 	else
2545 		cursor->curr = cursor->curr->next;
2546 	memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2547 	       sizeof(struct callchain_cursor_node));
2548 
2549 	lbr_stitch->prev_lbr_cursor[idx].valid = true;
2550 	cursor->pos++;
2551 }
2552 
2553 static int lbr_callchain_add_lbr_ip(struct thread *thread,
2554 				    struct callchain_cursor *cursor,
2555 				    struct perf_sample *sample,
2556 				    struct symbol **parent,
2557 				    struct addr_location *root_al,
2558 				    u64 *branch_from,
2559 				    bool callee)
2560 {
2561 	struct branch_stack *lbr_stack = sample->branch_stack;
2562 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2563 	u8 cpumode = PERF_RECORD_MISC_USER;
2564 	int lbr_nr = lbr_stack->nr;
2565 	struct branch_flags *flags;
2566 	int err, i;
2567 	u64 ip;
2568 
2569 	/*
2570 	 * The curr and pos are not used in writing session. They are cleared
2571 	 * in callchain_cursor_commit() when the writing session is closed.
2572 	 * Using curr and pos to track the current cursor node.
2573 	 */
2574 	if (thread__lbr_stitch(thread)) {
2575 		cursor->curr = NULL;
2576 		cursor->pos = cursor->nr;
2577 		if (cursor->nr) {
2578 			cursor->curr = cursor->first;
2579 			for (i = 0; i < (int)(cursor->nr - 1); i++)
2580 				cursor->curr = cursor->curr->next;
2581 		}
2582 	}
2583 
2584 	if (callee) {
2585 		/* Add LBR ip from first entries.to */
2586 		ip = entries[0].to;
2587 		flags = &entries[0].flags;
2588 		*branch_from = entries[0].from;
2589 		err = add_callchain_ip(thread, cursor, parent,
2590 				       root_al, &cpumode, ip,
2591 				       true, flags, NULL,
2592 				       *branch_from);
2593 		if (err)
2594 			return err;
2595 
2596 		/*
2597 		 * The number of cursor node increases.
2598 		 * Move the current cursor node.
2599 		 * But does not need to save current cursor node for entry 0.
2600 		 * It's impossible to stitch the whole LBRs of previous sample.
2601 		 */
2602 		if (thread__lbr_stitch(thread) && (cursor->pos != cursor->nr)) {
2603 			if (!cursor->curr)
2604 				cursor->curr = cursor->first;
2605 			else
2606 				cursor->curr = cursor->curr->next;
2607 			cursor->pos++;
2608 		}
2609 
2610 		/* Add LBR ip from entries.from one by one. */
2611 		for (i = 0; i < lbr_nr; i++) {
2612 			ip = entries[i].from;
2613 			flags = &entries[i].flags;
2614 			err = add_callchain_ip(thread, cursor, parent,
2615 					       root_al, &cpumode, ip,
2616 					       true, flags, NULL,
2617 					       *branch_from);
2618 			if (err)
2619 				return err;
2620 			save_lbr_cursor_node(thread, cursor, i);
2621 		}
2622 		return 0;
2623 	}
2624 
2625 	/* Add LBR ip from entries.from one by one. */
2626 	for (i = lbr_nr - 1; i >= 0; i--) {
2627 		ip = entries[i].from;
2628 		flags = &entries[i].flags;
2629 		err = add_callchain_ip(thread, cursor, parent,
2630 				       root_al, &cpumode, ip,
2631 				       true, flags, NULL,
2632 				       *branch_from);
2633 		if (err)
2634 			return err;
2635 		save_lbr_cursor_node(thread, cursor, i);
2636 	}
2637 
2638 	if (lbr_nr > 0) {
2639 		/* Add LBR ip from first entries.to */
2640 		ip = entries[0].to;
2641 		flags = &entries[0].flags;
2642 		*branch_from = entries[0].from;
2643 		err = add_callchain_ip(thread, cursor, parent,
2644 				root_al, &cpumode, ip,
2645 				true, flags, NULL,
2646 				*branch_from);
2647 		if (err)
2648 			return err;
2649 	}
2650 
2651 	return 0;
2652 }
2653 
2654 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2655 					     struct callchain_cursor *cursor)
2656 {
2657 	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2658 	struct callchain_cursor_node *cnode;
2659 	struct stitch_list *stitch_node;
2660 	int err;
2661 
2662 	list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2663 		cnode = &stitch_node->cursor;
2664 
2665 		err = callchain_cursor_append(cursor, cnode->ip,
2666 					      &cnode->ms,
2667 					      cnode->branch,
2668 					      &cnode->branch_flags,
2669 					      cnode->nr_loop_iter,
2670 					      cnode->iter_cycles,
2671 					      cnode->branch_from,
2672 					      cnode->srcline);
2673 		if (err)
2674 			return err;
2675 	}
2676 	return 0;
2677 }
2678 
2679 static struct stitch_list *get_stitch_node(struct thread *thread)
2680 {
2681 	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2682 	struct stitch_list *stitch_node;
2683 
2684 	if (!list_empty(&lbr_stitch->free_lists)) {
2685 		stitch_node = list_first_entry(&lbr_stitch->free_lists,
2686 					       struct stitch_list, node);
2687 		list_del(&stitch_node->node);
2688 
2689 		return stitch_node;
2690 	}
2691 
2692 	return malloc(sizeof(struct stitch_list));
2693 }
2694 
2695 static bool has_stitched_lbr(struct thread *thread,
2696 			     struct perf_sample *cur,
2697 			     struct perf_sample *prev,
2698 			     unsigned int max_lbr,
2699 			     bool callee)
2700 {
2701 	struct branch_stack *cur_stack = cur->branch_stack;
2702 	struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2703 	struct branch_stack *prev_stack = prev->branch_stack;
2704 	struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2705 	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
2706 	int i, j, nr_identical_branches = 0;
2707 	struct stitch_list *stitch_node;
2708 	u64 cur_base, distance;
2709 
2710 	if (!cur_stack || !prev_stack)
2711 		return false;
2712 
2713 	/* Find the physical index of the base-of-stack for current sample. */
2714 	cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2715 
2716 	distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2717 						     (max_lbr + prev_stack->hw_idx - cur_base);
2718 	/* Previous sample has shorter stack. Nothing can be stitched. */
2719 	if (distance + 1 > prev_stack->nr)
2720 		return false;
2721 
2722 	/*
2723 	 * Check if there are identical LBRs between two samples.
2724 	 * Identical LBRs must have same from, to and flags values. Also,
2725 	 * they have to be saved in the same LBR registers (same physical
2726 	 * index).
2727 	 *
2728 	 * Starts from the base-of-stack of current sample.
2729 	 */
2730 	for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2731 		if ((prev_entries[i].from != cur_entries[j].from) ||
2732 		    (prev_entries[i].to != cur_entries[j].to) ||
2733 		    (prev_entries[i].flags.value != cur_entries[j].flags.value))
2734 			break;
2735 		nr_identical_branches++;
2736 	}
2737 
2738 	if (!nr_identical_branches)
2739 		return false;
2740 
2741 	/*
2742 	 * Save the LBRs between the base-of-stack of previous sample
2743 	 * and the base-of-stack of current sample into lbr_stitch->lists.
2744 	 * These LBRs will be stitched later.
2745 	 */
2746 	for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2747 
2748 		if (!lbr_stitch->prev_lbr_cursor[i].valid)
2749 			continue;
2750 
2751 		stitch_node = get_stitch_node(thread);
2752 		if (!stitch_node)
2753 			return false;
2754 
2755 		memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2756 		       sizeof(struct callchain_cursor_node));
2757 
2758 		if (callee)
2759 			list_add(&stitch_node->node, &lbr_stitch->lists);
2760 		else
2761 			list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2762 	}
2763 
2764 	return true;
2765 }
2766 
2767 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2768 {
2769 	if (thread__lbr_stitch(thread))
2770 		return true;
2771 
2772 	thread__set_lbr_stitch(thread, zalloc(sizeof(struct lbr_stitch)));
2773 	if (!thread__lbr_stitch(thread))
2774 		goto err;
2775 
2776 	thread__lbr_stitch(thread)->prev_lbr_cursor =
2777 		calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2778 	if (!thread__lbr_stitch(thread)->prev_lbr_cursor)
2779 		goto free_lbr_stitch;
2780 
2781 	INIT_LIST_HEAD(&thread__lbr_stitch(thread)->lists);
2782 	INIT_LIST_HEAD(&thread__lbr_stitch(thread)->free_lists);
2783 
2784 	return true;
2785 
2786 free_lbr_stitch:
2787 	free(thread__lbr_stitch(thread));
2788 	thread__set_lbr_stitch(thread, NULL);
2789 err:
2790 	pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2791 	thread__set_lbr_stitch_enable(thread, false);
2792 	return false;
2793 }
2794 
2795 /*
2796  * Resolve LBR callstack chain sample
2797  * Return:
2798  * 1 on success get LBR callchain information
2799  * 0 no available LBR callchain information, should try fp
2800  * negative error code on other errors.
2801  */
2802 static int resolve_lbr_callchain_sample(struct thread *thread,
2803 					struct callchain_cursor *cursor,
2804 					struct perf_sample *sample,
2805 					struct symbol **parent,
2806 					struct addr_location *root_al,
2807 					int max_stack,
2808 					unsigned int max_lbr)
2809 {
2810 	bool callee = (callchain_param.order == ORDER_CALLEE);
2811 	struct ip_callchain *chain = sample->callchain;
2812 	int chain_nr = min(max_stack, (int)chain->nr), i;
2813 	struct lbr_stitch *lbr_stitch;
2814 	bool stitched_lbr = false;
2815 	u64 branch_from = 0;
2816 	int err;
2817 
2818 	for (i = 0; i < chain_nr; i++) {
2819 		if (chain->ips[i] == PERF_CONTEXT_USER)
2820 			break;
2821 	}
2822 
2823 	/* LBR only affects the user callchain */
2824 	if (i == chain_nr)
2825 		return 0;
2826 
2827 	if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
2828 	    (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2829 		lbr_stitch = thread__lbr_stitch(thread);
2830 
2831 		stitched_lbr = has_stitched_lbr(thread, sample,
2832 						&lbr_stitch->prev_sample,
2833 						max_lbr, callee);
2834 
2835 		if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2836 			list_replace_init(&lbr_stitch->lists,
2837 					  &lbr_stitch->free_lists);
2838 		}
2839 		memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2840 	}
2841 
2842 	if (callee) {
2843 		/* Add kernel ip */
2844 		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2845 						  parent, root_al, branch_from,
2846 						  true, i);
2847 		if (err)
2848 			goto error;
2849 
2850 		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2851 					       root_al, &branch_from, true);
2852 		if (err)
2853 			goto error;
2854 
2855 		if (stitched_lbr) {
2856 			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2857 			if (err)
2858 				goto error;
2859 		}
2860 
2861 	} else {
2862 		if (stitched_lbr) {
2863 			err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2864 			if (err)
2865 				goto error;
2866 		}
2867 		err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2868 					       root_al, &branch_from, false);
2869 		if (err)
2870 			goto error;
2871 
2872 		/* Add kernel ip */
2873 		err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2874 						  parent, root_al, branch_from,
2875 						  false, i);
2876 		if (err)
2877 			goto error;
2878 	}
2879 	return 1;
2880 
2881 error:
2882 	return (err < 0) ? err : 0;
2883 }
2884 
2885 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2886 			     struct callchain_cursor *cursor,
2887 			     struct symbol **parent,
2888 			     struct addr_location *root_al,
2889 			     u8 *cpumode, int ent)
2890 {
2891 	int err = 0;
2892 
2893 	while (--ent >= 0) {
2894 		u64 ip = chain->ips[ent];
2895 
2896 		if (ip >= PERF_CONTEXT_MAX) {
2897 			err = add_callchain_ip(thread, cursor, parent,
2898 					       root_al, cpumode, ip,
2899 					       false, NULL, NULL, 0);
2900 			break;
2901 		}
2902 	}
2903 	return err;
2904 }
2905 
2906 static u64 get_leaf_frame_caller(struct perf_sample *sample,
2907 		struct thread *thread, int usr_idx)
2908 {
2909 	if (machine__normalized_is(maps__machine(thread__maps(thread)), "arm64"))
2910 		return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2911 	else
2912 		return 0;
2913 }
2914 
2915 static int thread__resolve_callchain_sample(struct thread *thread,
2916 					    struct callchain_cursor *cursor,
2917 					    struct evsel *evsel,
2918 					    struct perf_sample *sample,
2919 					    struct symbol **parent,
2920 					    struct addr_location *root_al,
2921 					    int max_stack)
2922 {
2923 	struct branch_stack *branch = sample->branch_stack;
2924 	struct branch_entry *entries = perf_sample__branch_entries(sample);
2925 	struct ip_callchain *chain = sample->callchain;
2926 	int chain_nr = 0;
2927 	u8 cpumode = PERF_RECORD_MISC_USER;
2928 	int i, j, err, nr_entries, usr_idx;
2929 	int skip_idx = -1;
2930 	int first_call = 0;
2931 	u64 leaf_frame_caller;
2932 
2933 	if (chain)
2934 		chain_nr = chain->nr;
2935 
2936 	if (evsel__has_branch_callstack(evsel)) {
2937 		struct perf_env *env = evsel__env(evsel);
2938 
2939 		err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2940 						   root_al, max_stack,
2941 						   !env ? 0 : env->max_branches);
2942 		if (err)
2943 			return (err < 0) ? err : 0;
2944 	}
2945 
2946 	/*
2947 	 * Based on DWARF debug information, some architectures skip
2948 	 * a callchain entry saved by the kernel.
2949 	 */
2950 	skip_idx = arch_skip_callchain_idx(thread, chain);
2951 
2952 	/*
2953 	 * Add branches to call stack for easier browsing. This gives
2954 	 * more context for a sample than just the callers.
2955 	 *
2956 	 * This uses individual histograms of paths compared to the
2957 	 * aggregated histograms the normal LBR mode uses.
2958 	 *
2959 	 * Limitations for now:
2960 	 * - No extra filters
2961 	 * - No annotations (should annotate somehow)
2962 	 */
2963 
2964 	if (branch && callchain_param.branch_callstack) {
2965 		int nr = min(max_stack, (int)branch->nr);
2966 		struct branch_entry be[nr];
2967 		struct iterations iter[nr];
2968 
2969 		if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2970 			pr_warning("corrupted branch chain. skipping...\n");
2971 			goto check_calls;
2972 		}
2973 
2974 		for (i = 0; i < nr; i++) {
2975 			if (callchain_param.order == ORDER_CALLEE) {
2976 				be[i] = entries[i];
2977 
2978 				if (chain == NULL)
2979 					continue;
2980 
2981 				/*
2982 				 * Check for overlap into the callchain.
2983 				 * The return address is one off compared to
2984 				 * the branch entry. To adjust for this
2985 				 * assume the calling instruction is not longer
2986 				 * than 8 bytes.
2987 				 */
2988 				if (i == skip_idx ||
2989 				    chain->ips[first_call] >= PERF_CONTEXT_MAX)
2990 					first_call++;
2991 				else if (be[i].from < chain->ips[first_call] &&
2992 				    be[i].from >= chain->ips[first_call] - 8)
2993 					first_call++;
2994 			} else
2995 				be[i] = entries[branch->nr - i - 1];
2996 		}
2997 
2998 		memset(iter, 0, sizeof(struct iterations) * nr);
2999 		nr = remove_loops(be, nr, iter);
3000 
3001 		for (i = 0; i < nr; i++) {
3002 			err = add_callchain_ip(thread, cursor, parent,
3003 					       root_al,
3004 					       NULL, be[i].to,
3005 					       true, &be[i].flags,
3006 					       NULL, be[i].from);
3007 
3008 			if (!err)
3009 				err = add_callchain_ip(thread, cursor, parent, root_al,
3010 						       NULL, be[i].from,
3011 						       true, &be[i].flags,
3012 						       &iter[i], 0);
3013 			if (err == -EINVAL)
3014 				break;
3015 			if (err)
3016 				return err;
3017 		}
3018 
3019 		if (chain_nr == 0)
3020 			return 0;
3021 
3022 		chain_nr -= nr;
3023 	}
3024 
3025 check_calls:
3026 	if (chain && callchain_param.order != ORDER_CALLEE) {
3027 		err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
3028 					&cpumode, chain->nr - first_call);
3029 		if (err)
3030 			return (err < 0) ? err : 0;
3031 	}
3032 	for (i = first_call, nr_entries = 0;
3033 	     i < chain_nr && nr_entries < max_stack; i++) {
3034 		u64 ip;
3035 
3036 		if (callchain_param.order == ORDER_CALLEE)
3037 			j = i;
3038 		else
3039 			j = chain->nr - i - 1;
3040 
3041 #ifdef HAVE_SKIP_CALLCHAIN_IDX
3042 		if (j == skip_idx)
3043 			continue;
3044 #endif
3045 		ip = chain->ips[j];
3046 		if (ip < PERF_CONTEXT_MAX)
3047                        ++nr_entries;
3048 		else if (callchain_param.order != ORDER_CALLEE) {
3049 			err = find_prev_cpumode(chain, thread, cursor, parent,
3050 						root_al, &cpumode, j);
3051 			if (err)
3052 				return (err < 0) ? err : 0;
3053 			continue;
3054 		}
3055 
3056 		/*
3057 		 * PERF_CONTEXT_USER allows us to locate where the user stack ends.
3058 		 * Depending on callchain_param.order and the position of PERF_CONTEXT_USER,
3059 		 * the index will be different in order to add the missing frame
3060 		 * at the right place.
3061 		 */
3062 
3063 		usr_idx = callchain_param.order == ORDER_CALLEE ? j-2 : j-1;
3064 
3065 		if (usr_idx >= 0 && chain->ips[usr_idx] == PERF_CONTEXT_USER) {
3066 
3067 			leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
3068 
3069 			/*
3070 			 * check if leaf_frame_Caller != ip to not add the same
3071 			 * value twice.
3072 			 */
3073 
3074 			if (leaf_frame_caller && leaf_frame_caller != ip) {
3075 
3076 				err = add_callchain_ip(thread, cursor, parent,
3077 					       root_al, &cpumode, leaf_frame_caller,
3078 					       false, NULL, NULL, 0);
3079 				if (err)
3080 					return (err < 0) ? err : 0;
3081 			}
3082 		}
3083 
3084 		err = add_callchain_ip(thread, cursor, parent,
3085 				       root_al, &cpumode, ip,
3086 				       false, NULL, NULL, 0);
3087 
3088 		if (err)
3089 			return (err < 0) ? err : 0;
3090 	}
3091 
3092 	return 0;
3093 }
3094 
3095 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
3096 {
3097 	struct symbol *sym = ms->sym;
3098 	struct map *map = ms->map;
3099 	struct inline_node *inline_node;
3100 	struct inline_list *ilist;
3101 	struct dso *dso;
3102 	u64 addr;
3103 	int ret = 1;
3104 	struct map_symbol ilist_ms;
3105 
3106 	if (!symbol_conf.inline_name || !map || !sym)
3107 		return ret;
3108 
3109 	addr = map__dso_map_ip(map, ip);
3110 	addr = map__rip_2objdump(map, addr);
3111 	dso = map__dso(map);
3112 
3113 	inline_node = inlines__tree_find(&dso->inlined_nodes, addr);
3114 	if (!inline_node) {
3115 		inline_node = dso__parse_addr_inlines(dso, addr, sym);
3116 		if (!inline_node)
3117 			return ret;
3118 		inlines__tree_insert(&dso->inlined_nodes, inline_node);
3119 	}
3120 
3121 	ilist_ms = (struct map_symbol) {
3122 		.maps = maps__get(ms->maps),
3123 		.map = map__get(map),
3124 	};
3125 	list_for_each_entry(ilist, &inline_node->val, list) {
3126 		ilist_ms.sym = ilist->symbol;
3127 		ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
3128 					      NULL, 0, 0, 0, ilist->srcline);
3129 
3130 		if (ret != 0)
3131 			return ret;
3132 	}
3133 	map_symbol__exit(&ilist_ms);
3134 
3135 	return ret;
3136 }
3137 
3138 static int unwind_entry(struct unwind_entry *entry, void *arg)
3139 {
3140 	struct callchain_cursor *cursor = arg;
3141 	const char *srcline = NULL;
3142 	u64 addr = entry->ip;
3143 
3144 	if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
3145 		return 0;
3146 
3147 	if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
3148 		return 0;
3149 
3150 	/*
3151 	 * Convert entry->ip from a virtual address to an offset in
3152 	 * its corresponding binary.
3153 	 */
3154 	if (entry->ms.map)
3155 		addr = map__dso_map_ip(entry->ms.map, entry->ip);
3156 
3157 	srcline = callchain_srcline(&entry->ms, addr);
3158 	return callchain_cursor_append(cursor, entry->ip, &entry->ms,
3159 				       false, NULL, 0, 0, 0, srcline);
3160 }
3161 
3162 static int thread__resolve_callchain_unwind(struct thread *thread,
3163 					    struct callchain_cursor *cursor,
3164 					    struct evsel *evsel,
3165 					    struct perf_sample *sample,
3166 					    int max_stack)
3167 {
3168 	/* Can we do dwarf post unwind? */
3169 	if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
3170 	      (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
3171 		return 0;
3172 
3173 	/* Bail out if nothing was captured. */
3174 	if ((!sample->user_regs.regs) ||
3175 	    (!sample->user_stack.size))
3176 		return 0;
3177 
3178 	return unwind__get_entries(unwind_entry, cursor,
3179 				   thread, sample, max_stack, false);
3180 }
3181 
3182 int thread__resolve_callchain(struct thread *thread,
3183 			      struct callchain_cursor *cursor,
3184 			      struct evsel *evsel,
3185 			      struct perf_sample *sample,
3186 			      struct symbol **parent,
3187 			      struct addr_location *root_al,
3188 			      int max_stack)
3189 {
3190 	int ret = 0;
3191 
3192 	if (cursor == NULL)
3193 		return -ENOMEM;
3194 
3195 	callchain_cursor_reset(cursor);
3196 
3197 	if (callchain_param.order == ORDER_CALLEE) {
3198 		ret = thread__resolve_callchain_sample(thread, cursor,
3199 						       evsel, sample,
3200 						       parent, root_al,
3201 						       max_stack);
3202 		if (ret)
3203 			return ret;
3204 		ret = thread__resolve_callchain_unwind(thread, cursor,
3205 						       evsel, sample,
3206 						       max_stack);
3207 	} else {
3208 		ret = thread__resolve_callchain_unwind(thread, cursor,
3209 						       evsel, sample,
3210 						       max_stack);
3211 		if (ret)
3212 			return ret;
3213 		ret = thread__resolve_callchain_sample(thread, cursor,
3214 						       evsel, sample,
3215 						       parent, root_al,
3216 						       max_stack);
3217 	}
3218 
3219 	return ret;
3220 }
3221 
3222 int machine__for_each_thread(struct machine *machine,
3223 			     int (*fn)(struct thread *thread, void *p),
3224 			     void *priv)
3225 {
3226 	struct threads *threads;
3227 	struct rb_node *nd;
3228 	int rc = 0;
3229 	int i;
3230 
3231 	for (i = 0; i < THREADS__TABLE_SIZE; i++) {
3232 		threads = &machine->threads[i];
3233 		for (nd = rb_first_cached(&threads->entries); nd;
3234 		     nd = rb_next(nd)) {
3235 			struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
3236 
3237 			rc = fn(trb->thread, priv);
3238 			if (rc != 0)
3239 				return rc;
3240 		}
3241 	}
3242 	return rc;
3243 }
3244 
3245 int machines__for_each_thread(struct machines *machines,
3246 			      int (*fn)(struct thread *thread, void *p),
3247 			      void *priv)
3248 {
3249 	struct rb_node *nd;
3250 	int rc = 0;
3251 
3252 	rc = machine__for_each_thread(&machines->host, fn, priv);
3253 	if (rc != 0)
3254 		return rc;
3255 
3256 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
3257 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
3258 
3259 		rc = machine__for_each_thread(machine, fn, priv);
3260 		if (rc != 0)
3261 			return rc;
3262 	}
3263 	return rc;
3264 }
3265 
3266 pid_t machine__get_current_tid(struct machine *machine, int cpu)
3267 {
3268 	if (cpu < 0 || (size_t)cpu >= machine->current_tid_sz)
3269 		return -1;
3270 
3271 	return machine->current_tid[cpu];
3272 }
3273 
3274 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
3275 			     pid_t tid)
3276 {
3277 	struct thread *thread;
3278 	const pid_t init_val = -1;
3279 
3280 	if (cpu < 0)
3281 		return -EINVAL;
3282 
3283 	if (realloc_array_as_needed(machine->current_tid,
3284 				    machine->current_tid_sz,
3285 				    (unsigned int)cpu,
3286 				    &init_val))
3287 		return -ENOMEM;
3288 
3289 	machine->current_tid[cpu] = tid;
3290 
3291 	thread = machine__findnew_thread(machine, pid, tid);
3292 	if (!thread)
3293 		return -ENOMEM;
3294 
3295 	thread__set_cpu(thread, cpu);
3296 	thread__put(thread);
3297 
3298 	return 0;
3299 }
3300 
3301 /*
3302  * Compares the raw arch string. N.B. see instead perf_env__arch() or
3303  * machine__normalized_is() if a normalized arch is needed.
3304  */
3305 bool machine__is(struct machine *machine, const char *arch)
3306 {
3307 	return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
3308 }
3309 
3310 bool machine__normalized_is(struct machine *machine, const char *arch)
3311 {
3312 	return machine && !strcmp(perf_env__arch(machine->env), arch);
3313 }
3314 
3315 int machine__nr_cpus_avail(struct machine *machine)
3316 {
3317 	return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
3318 }
3319 
3320 int machine__get_kernel_start(struct machine *machine)
3321 {
3322 	struct map *map = machine__kernel_map(machine);
3323 	int err = 0;
3324 
3325 	/*
3326 	 * The only addresses above 2^63 are kernel addresses of a 64-bit
3327 	 * kernel.  Note that addresses are unsigned so that on a 32-bit system
3328 	 * all addresses including kernel addresses are less than 2^32.  In
3329 	 * that case (32-bit system), if the kernel mapping is unknown, all
3330 	 * addresses will be assumed to be in user space - see
3331 	 * machine__kernel_ip().
3332 	 */
3333 	machine->kernel_start = 1ULL << 63;
3334 	if (map) {
3335 		err = map__load(map);
3336 		/*
3337 		 * On x86_64, PTI entry trampolines are less than the
3338 		 * start of kernel text, but still above 2^63. So leave
3339 		 * kernel_start = 1ULL << 63 for x86_64.
3340 		 */
3341 		if (!err && !machine__is(machine, "x86_64"))
3342 			machine->kernel_start = map__start(map);
3343 	}
3344 	return err;
3345 }
3346 
3347 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3348 {
3349 	u8 addr_cpumode = cpumode;
3350 	bool kernel_ip;
3351 
3352 	if (!machine->single_address_space)
3353 		goto out;
3354 
3355 	kernel_ip = machine__kernel_ip(machine, addr);
3356 	switch (cpumode) {
3357 	case PERF_RECORD_MISC_KERNEL:
3358 	case PERF_RECORD_MISC_USER:
3359 		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3360 					   PERF_RECORD_MISC_USER;
3361 		break;
3362 	case PERF_RECORD_MISC_GUEST_KERNEL:
3363 	case PERF_RECORD_MISC_GUEST_USER:
3364 		addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3365 					   PERF_RECORD_MISC_GUEST_USER;
3366 		break;
3367 	default:
3368 		break;
3369 	}
3370 out:
3371 	return addr_cpumode;
3372 }
3373 
3374 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3375 {
3376 	return dsos__findnew_id(&machine->dsos, filename, id);
3377 }
3378 
3379 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3380 {
3381 	return machine__findnew_dso_id(machine, filename, NULL);
3382 }
3383 
3384 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3385 {
3386 	struct machine *machine = vmachine;
3387 	struct map *map;
3388 	struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3389 
3390 	if (sym == NULL)
3391 		return NULL;
3392 
3393 	*modp = __map__is_kmodule(map) ? (char *)map__dso(map)->short_name : NULL;
3394 	*addrp = map__unmap_ip(map, sym->start);
3395 	return sym->name;
3396 }
3397 
3398 int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv)
3399 {
3400 	struct dso *pos;
3401 	int err = 0;
3402 
3403 	list_for_each_entry(pos, &machine->dsos.head, node) {
3404 		if (fn(pos, machine, priv))
3405 			err = -1;
3406 	}
3407 	return err;
3408 }
3409 
3410 int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv)
3411 {
3412 	struct maps *maps = machine__kernel_maps(machine);
3413 
3414 	return maps__for_each_map(maps, fn, priv);
3415 }
3416 
3417 bool machine__is_lock_function(struct machine *machine, u64 addr)
3418 {
3419 	if (!machine->sched.text_start) {
3420 		struct map *kmap;
3421 		struct symbol *sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_start", &kmap);
3422 
3423 		if (!sym) {
3424 			/* to avoid retry */
3425 			machine->sched.text_start = 1;
3426 			return false;
3427 		}
3428 
3429 		machine->sched.text_start = map__unmap_ip(kmap, sym->start);
3430 
3431 		/* should not fail from here */
3432 		sym = machine__find_kernel_symbol_by_name(machine, "__sched_text_end", &kmap);
3433 		machine->sched.text_end = map__unmap_ip(kmap, sym->start);
3434 
3435 		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_start", &kmap);
3436 		machine->lock.text_start = map__unmap_ip(kmap, sym->start);
3437 
3438 		sym = machine__find_kernel_symbol_by_name(machine, "__lock_text_end", &kmap);
3439 		machine->lock.text_end = map__unmap_ip(kmap, sym->start);
3440 	}
3441 
3442 	/* failed to get kernel symbols */
3443 	if (machine->sched.text_start == 1)
3444 		return false;
3445 
3446 	/* mutex and rwsem functions are in sched text section */
3447 	if (machine->sched.text_start <= addr && addr < machine->sched.text_end)
3448 		return true;
3449 
3450 	/* spinlock functions are in lock text section */
3451 	if (machine->lock.text_start <= addr && addr < machine->lock.text_end)
3452 		return true;
3453 
3454 	return false;
3455 }
3456