xref: /linux/tools/perf/util/event.c (revision e9a83bd2322035ed9d7dcf35753d3f984d76c6a5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <inttypes.h>
6 #include <linux/kernel.h>
7 #include <linux/types.h>
8 #include <sys/types.h>
9 #include <sys/stat.h>
10 #include <unistd.h>
11 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
12 #include <api/fs/fs.h>
13 #include <linux/perf_event.h>
14 #include "event.h"
15 #include "debug.h"
16 #include "hist.h"
17 #include "machine.h"
18 #include "sort.h"
19 #include "string2.h"
20 #include "strlist.h"
21 #include "thread.h"
22 #include "thread_map.h"
23 #include <linux/ctype.h>
24 #include "map.h"
25 #include "symbol.h"
26 #include "symbol/kallsyms.h"
27 #include "asm/bug.h"
28 #include "stat.h"
29 #include "session.h"
30 #include "bpf-event.h"
31 
32 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
33 
34 static const char *perf_event__names[] = {
35 	[0]					= "TOTAL",
36 	[PERF_RECORD_MMAP]			= "MMAP",
37 	[PERF_RECORD_MMAP2]			= "MMAP2",
38 	[PERF_RECORD_LOST]			= "LOST",
39 	[PERF_RECORD_COMM]			= "COMM",
40 	[PERF_RECORD_EXIT]			= "EXIT",
41 	[PERF_RECORD_THROTTLE]			= "THROTTLE",
42 	[PERF_RECORD_UNTHROTTLE]		= "UNTHROTTLE",
43 	[PERF_RECORD_FORK]			= "FORK",
44 	[PERF_RECORD_READ]			= "READ",
45 	[PERF_RECORD_SAMPLE]			= "SAMPLE",
46 	[PERF_RECORD_AUX]			= "AUX",
47 	[PERF_RECORD_ITRACE_START]		= "ITRACE_START",
48 	[PERF_RECORD_LOST_SAMPLES]		= "LOST_SAMPLES",
49 	[PERF_RECORD_SWITCH]			= "SWITCH",
50 	[PERF_RECORD_SWITCH_CPU_WIDE]		= "SWITCH_CPU_WIDE",
51 	[PERF_RECORD_NAMESPACES]		= "NAMESPACES",
52 	[PERF_RECORD_KSYMBOL]			= "KSYMBOL",
53 	[PERF_RECORD_BPF_EVENT]			= "BPF_EVENT",
54 	[PERF_RECORD_HEADER_ATTR]		= "ATTR",
55 	[PERF_RECORD_HEADER_EVENT_TYPE]		= "EVENT_TYPE",
56 	[PERF_RECORD_HEADER_TRACING_DATA]	= "TRACING_DATA",
57 	[PERF_RECORD_HEADER_BUILD_ID]		= "BUILD_ID",
58 	[PERF_RECORD_FINISHED_ROUND]		= "FINISHED_ROUND",
59 	[PERF_RECORD_ID_INDEX]			= "ID_INDEX",
60 	[PERF_RECORD_AUXTRACE_INFO]		= "AUXTRACE_INFO",
61 	[PERF_RECORD_AUXTRACE]			= "AUXTRACE",
62 	[PERF_RECORD_AUXTRACE_ERROR]		= "AUXTRACE_ERROR",
63 	[PERF_RECORD_THREAD_MAP]		= "THREAD_MAP",
64 	[PERF_RECORD_CPU_MAP]			= "CPU_MAP",
65 	[PERF_RECORD_STAT_CONFIG]		= "STAT_CONFIG",
66 	[PERF_RECORD_STAT]			= "STAT",
67 	[PERF_RECORD_STAT_ROUND]		= "STAT_ROUND",
68 	[PERF_RECORD_EVENT_UPDATE]		= "EVENT_UPDATE",
69 	[PERF_RECORD_TIME_CONV]			= "TIME_CONV",
70 	[PERF_RECORD_HEADER_FEATURE]		= "FEATURE",
71 	[PERF_RECORD_COMPRESSED]		= "COMPRESSED",
72 };
73 
74 static const char *perf_ns__names[] = {
75 	[NET_NS_INDEX]		= "net",
76 	[UTS_NS_INDEX]		= "uts",
77 	[IPC_NS_INDEX]		= "ipc",
78 	[PID_NS_INDEX]		= "pid",
79 	[USER_NS_INDEX]		= "user",
80 	[MNT_NS_INDEX]		= "mnt",
81 	[CGROUP_NS_INDEX]	= "cgroup",
82 };
83 
84 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
85 
86 const char *perf_event__name(unsigned int id)
87 {
88 	if (id >= ARRAY_SIZE(perf_event__names))
89 		return "INVALID";
90 	if (!perf_event__names[id])
91 		return "UNKNOWN";
92 	return perf_event__names[id];
93 }
94 
95 static const char *perf_ns__name(unsigned int id)
96 {
97 	if (id >= ARRAY_SIZE(perf_ns__names))
98 		return "UNKNOWN";
99 	return perf_ns__names[id];
100 }
101 
102 int perf_tool__process_synth_event(struct perf_tool *tool,
103 				   union perf_event *event,
104 				   struct machine *machine,
105 				   perf_event__handler_t process)
106 {
107 	struct perf_sample synth_sample = {
108 	.pid	   = -1,
109 	.tid	   = -1,
110 	.time	   = -1,
111 	.stream_id = -1,
112 	.cpu	   = -1,
113 	.period	   = 1,
114 	.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
115 	};
116 
117 	return process(tool, event, &synth_sample, machine);
118 };
119 
120 /*
121  * Assumes that the first 4095 bytes of /proc/pid/stat contains
122  * the comm, tgid and ppid.
123  */
124 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
125 				    pid_t *tgid, pid_t *ppid)
126 {
127 	char filename[PATH_MAX];
128 	char bf[4096];
129 	int fd;
130 	size_t size = 0;
131 	ssize_t n;
132 	char *name, *tgids, *ppids;
133 
134 	*tgid = -1;
135 	*ppid = -1;
136 
137 	snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
138 
139 	fd = open(filename, O_RDONLY);
140 	if (fd < 0) {
141 		pr_debug("couldn't open %s\n", filename);
142 		return -1;
143 	}
144 
145 	n = read(fd, bf, sizeof(bf) - 1);
146 	close(fd);
147 	if (n <= 0) {
148 		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
149 			   pid);
150 		return -1;
151 	}
152 	bf[n] = '\0';
153 
154 	name = strstr(bf, "Name:");
155 	tgids = strstr(bf, "Tgid:");
156 	ppids = strstr(bf, "PPid:");
157 
158 	if (name) {
159 		char *nl;
160 
161 		name = skip_spaces(name + 5);  /* strlen("Name:") */
162 		nl = strchr(name, '\n');
163 		if (nl)
164 			*nl = '\0';
165 
166 		size = strlen(name);
167 		if (size >= len)
168 			size = len - 1;
169 		memcpy(comm, name, size);
170 		comm[size] = '\0';
171 	} else {
172 		pr_debug("Name: string not found for pid %d\n", pid);
173 	}
174 
175 	if (tgids) {
176 		tgids += 5;  /* strlen("Tgid:") */
177 		*tgid = atoi(tgids);
178 	} else {
179 		pr_debug("Tgid: string not found for pid %d\n", pid);
180 	}
181 
182 	if (ppids) {
183 		ppids += 5;  /* strlen("PPid:") */
184 		*ppid = atoi(ppids);
185 	} else {
186 		pr_debug("PPid: string not found for pid %d\n", pid);
187 	}
188 
189 	return 0;
190 }
191 
192 static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
193 				    struct machine *machine,
194 				    pid_t *tgid, pid_t *ppid)
195 {
196 	size_t size;
197 
198 	*ppid = -1;
199 
200 	memset(&event->comm, 0, sizeof(event->comm));
201 
202 	if (machine__is_host(machine)) {
203 		if (perf_event__get_comm_ids(pid, event->comm.comm,
204 					     sizeof(event->comm.comm),
205 					     tgid, ppid) != 0) {
206 			return -1;
207 		}
208 	} else {
209 		*tgid = machine->pid;
210 	}
211 
212 	if (*tgid < 0)
213 		return -1;
214 
215 	event->comm.pid = *tgid;
216 	event->comm.header.type = PERF_RECORD_COMM;
217 
218 	size = strlen(event->comm.comm) + 1;
219 	size = PERF_ALIGN(size, sizeof(u64));
220 	memset(event->comm.comm + size, 0, machine->id_hdr_size);
221 	event->comm.header.size = (sizeof(event->comm) -
222 				(sizeof(event->comm.comm) - size) +
223 				machine->id_hdr_size);
224 	event->comm.tid = pid;
225 
226 	return 0;
227 }
228 
229 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
230 					 union perf_event *event, pid_t pid,
231 					 perf_event__handler_t process,
232 					 struct machine *machine)
233 {
234 	pid_t tgid, ppid;
235 
236 	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
237 		return -1;
238 
239 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
240 		return -1;
241 
242 	return tgid;
243 }
244 
245 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
246 					 struct perf_ns_link_info *ns_link_info)
247 {
248 	struct stat64 st;
249 	char proc_ns[128];
250 
251 	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
252 	if (stat64(proc_ns, &st) == 0) {
253 		ns_link_info->dev = st.st_dev;
254 		ns_link_info->ino = st.st_ino;
255 	}
256 }
257 
258 int perf_event__synthesize_namespaces(struct perf_tool *tool,
259 				      union perf_event *event,
260 				      pid_t pid, pid_t tgid,
261 				      perf_event__handler_t process,
262 				      struct machine *machine)
263 {
264 	u32 idx;
265 	struct perf_ns_link_info *ns_link_info;
266 
267 	if (!tool || !tool->namespace_events)
268 		return 0;
269 
270 	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
271 	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
272 	       machine->id_hdr_size));
273 
274 	event->namespaces.pid = tgid;
275 	event->namespaces.tid = pid;
276 
277 	event->namespaces.nr_namespaces = NR_NAMESPACES;
278 
279 	ns_link_info = event->namespaces.link_info;
280 
281 	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
282 		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
283 					     &ns_link_info[idx]);
284 
285 	event->namespaces.header.type = PERF_RECORD_NAMESPACES;
286 
287 	event->namespaces.header.size = (sizeof(event->namespaces) +
288 			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
289 			machine->id_hdr_size);
290 
291 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
292 		return -1;
293 
294 	return 0;
295 }
296 
297 static int perf_event__synthesize_fork(struct perf_tool *tool,
298 				       union perf_event *event,
299 				       pid_t pid, pid_t tgid, pid_t ppid,
300 				       perf_event__handler_t process,
301 				       struct machine *machine)
302 {
303 	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
304 
305 	/*
306 	 * for main thread set parent to ppid from status file. For other
307 	 * threads set parent pid to main thread. ie., assume main thread
308 	 * spawns all threads in a process
309 	*/
310 	if (tgid == pid) {
311 		event->fork.ppid = ppid;
312 		event->fork.ptid = ppid;
313 	} else {
314 		event->fork.ppid = tgid;
315 		event->fork.ptid = tgid;
316 	}
317 	event->fork.pid  = tgid;
318 	event->fork.tid  = pid;
319 	event->fork.header.type = PERF_RECORD_FORK;
320 	event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
321 
322 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
323 
324 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
325 		return -1;
326 
327 	return 0;
328 }
329 
330 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
331 				       union perf_event *event,
332 				       pid_t pid, pid_t tgid,
333 				       perf_event__handler_t process,
334 				       struct machine *machine,
335 				       bool mmap_data)
336 {
337 	char filename[PATH_MAX];
338 	FILE *fp;
339 	unsigned long long t;
340 	bool truncation = false;
341 	unsigned long long timeout = proc_map_timeout * 1000000ULL;
342 	int rc = 0;
343 	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
344 	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
345 
346 	if (machine__is_default_guest(machine))
347 		return 0;
348 
349 	snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
350 		 machine->root_dir, pid, pid);
351 
352 	fp = fopen(filename, "r");
353 	if (fp == NULL) {
354 		/*
355 		 * We raced with a task exiting - just return:
356 		 */
357 		pr_debug("couldn't open %s\n", filename);
358 		return -1;
359 	}
360 
361 	event->header.type = PERF_RECORD_MMAP2;
362 	t = rdclock();
363 
364 	while (1) {
365 		char bf[BUFSIZ];
366 		char prot[5];
367 		char execname[PATH_MAX];
368 		char anonstr[] = "//anon";
369 		unsigned int ino;
370 		size_t size;
371 		ssize_t n;
372 
373 		if (fgets(bf, sizeof(bf), fp) == NULL)
374 			break;
375 
376 		if ((rdclock() - t) > timeout) {
377 			pr_warning("Reading %s time out. "
378 				   "You may want to increase "
379 				   "the time limit by --proc-map-timeout\n",
380 				   filename);
381 			truncation = true;
382 			goto out;
383 		}
384 
385 		/* ensure null termination since stack will be reused. */
386 		strcpy(execname, "");
387 
388 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
389 		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
390 		       &event->mmap2.start, &event->mmap2.len, prot,
391 		       &event->mmap2.pgoff, &event->mmap2.maj,
392 		       &event->mmap2.min,
393 		       &ino, execname);
394 
395 		/*
396  		 * Anon maps don't have the execname.
397  		 */
398 		if (n < 7)
399 			continue;
400 
401 		event->mmap2.ino = (u64)ino;
402 
403 		/*
404 		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
405 		 */
406 		if (machine__is_host(machine))
407 			event->header.misc = PERF_RECORD_MISC_USER;
408 		else
409 			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
410 
411 		/* map protection and flags bits */
412 		event->mmap2.prot = 0;
413 		event->mmap2.flags = 0;
414 		if (prot[0] == 'r')
415 			event->mmap2.prot |= PROT_READ;
416 		if (prot[1] == 'w')
417 			event->mmap2.prot |= PROT_WRITE;
418 		if (prot[2] == 'x')
419 			event->mmap2.prot |= PROT_EXEC;
420 
421 		if (prot[3] == 's')
422 			event->mmap2.flags |= MAP_SHARED;
423 		else
424 			event->mmap2.flags |= MAP_PRIVATE;
425 
426 		if (prot[2] != 'x') {
427 			if (!mmap_data || prot[0] != 'r')
428 				continue;
429 
430 			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
431 		}
432 
433 out:
434 		if (truncation)
435 			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
436 
437 		if (!strcmp(execname, ""))
438 			strcpy(execname, anonstr);
439 
440 		if (hugetlbfs_mnt_len &&
441 		    !strncmp(execname, hugetlbfs_mnt, hugetlbfs_mnt_len)) {
442 			strcpy(execname, anonstr);
443 			event->mmap2.flags |= MAP_HUGETLB;
444 		}
445 
446 		size = strlen(execname) + 1;
447 		memcpy(event->mmap2.filename, execname, size);
448 		size = PERF_ALIGN(size, sizeof(u64));
449 		event->mmap2.len -= event->mmap.start;
450 		event->mmap2.header.size = (sizeof(event->mmap2) -
451 					(sizeof(event->mmap2.filename) - size));
452 		memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
453 		event->mmap2.header.size += machine->id_hdr_size;
454 		event->mmap2.pid = tgid;
455 		event->mmap2.tid = pid;
456 
457 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
458 			rc = -1;
459 			break;
460 		}
461 
462 		if (truncation)
463 			break;
464 	}
465 
466 	fclose(fp);
467 	return rc;
468 }
469 
470 int perf_event__synthesize_modules(struct perf_tool *tool,
471 				   perf_event__handler_t process,
472 				   struct machine *machine)
473 {
474 	int rc = 0;
475 	struct map *pos;
476 	struct maps *maps = machine__kernel_maps(machine);
477 	union perf_event *event = zalloc((sizeof(event->mmap) +
478 					  machine->id_hdr_size));
479 	if (event == NULL) {
480 		pr_debug("Not enough memory synthesizing mmap event "
481 			 "for kernel modules\n");
482 		return -1;
483 	}
484 
485 	event->header.type = PERF_RECORD_MMAP;
486 
487 	/*
488 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
489 	 * __perf_event_mmap
490 	 */
491 	if (machine__is_host(machine))
492 		event->header.misc = PERF_RECORD_MISC_KERNEL;
493 	else
494 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
495 
496 	for (pos = maps__first(maps); pos; pos = map__next(pos)) {
497 		size_t size;
498 
499 		if (!__map__is_kmodule(pos))
500 			continue;
501 
502 		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
503 		event->mmap.header.type = PERF_RECORD_MMAP;
504 		event->mmap.header.size = (sizeof(event->mmap) -
505 				        (sizeof(event->mmap.filename) - size));
506 		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
507 		event->mmap.header.size += machine->id_hdr_size;
508 		event->mmap.start = pos->start;
509 		event->mmap.len   = pos->end - pos->start;
510 		event->mmap.pid   = machine->pid;
511 
512 		memcpy(event->mmap.filename, pos->dso->long_name,
513 		       pos->dso->long_name_len + 1);
514 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
515 			rc = -1;
516 			break;
517 		}
518 	}
519 
520 	free(event);
521 	return rc;
522 }
523 
524 static int __event__synthesize_thread(union perf_event *comm_event,
525 				      union perf_event *mmap_event,
526 				      union perf_event *fork_event,
527 				      union perf_event *namespaces_event,
528 				      pid_t pid, int full,
529 				      perf_event__handler_t process,
530 				      struct perf_tool *tool,
531 				      struct machine *machine,
532 				      bool mmap_data)
533 {
534 	char filename[PATH_MAX];
535 	DIR *tasks;
536 	struct dirent *dirent;
537 	pid_t tgid, ppid;
538 	int rc = 0;
539 
540 	/* special case: only send one comm event using passed in pid */
541 	if (!full) {
542 		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
543 						   process, machine);
544 
545 		if (tgid == -1)
546 			return -1;
547 
548 		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
549 						      tgid, process, machine) < 0)
550 			return -1;
551 
552 		/*
553 		 * send mmap only for thread group leader
554 		 * see thread__init_map_groups
555 		 */
556 		if (pid == tgid &&
557 		    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
558 						       process, machine, mmap_data))
559 			return -1;
560 
561 		return 0;
562 	}
563 
564 	if (machine__is_default_guest(machine))
565 		return 0;
566 
567 	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
568 		 machine->root_dir, pid);
569 
570 	tasks = opendir(filename);
571 	if (tasks == NULL) {
572 		pr_debug("couldn't open %s\n", filename);
573 		return 0;
574 	}
575 
576 	while ((dirent = readdir(tasks)) != NULL) {
577 		char *end;
578 		pid_t _pid;
579 
580 		_pid = strtol(dirent->d_name, &end, 10);
581 		if (*end)
582 			continue;
583 
584 		rc = -1;
585 		if (perf_event__prepare_comm(comm_event, _pid, machine,
586 					     &tgid, &ppid) != 0)
587 			break;
588 
589 		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
590 						ppid, process, machine) < 0)
591 			break;
592 
593 		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
594 						      tgid, process, machine) < 0)
595 			break;
596 
597 		/*
598 		 * Send the prepared comm event
599 		 */
600 		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
601 			break;
602 
603 		rc = 0;
604 		if (_pid == pid) {
605 			/* process the parent's maps too */
606 			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
607 						process, machine, mmap_data);
608 			if (rc)
609 				break;
610 		}
611 	}
612 
613 	closedir(tasks);
614 	return rc;
615 }
616 
617 int perf_event__synthesize_thread_map(struct perf_tool *tool,
618 				      struct thread_map *threads,
619 				      perf_event__handler_t process,
620 				      struct machine *machine,
621 				      bool mmap_data)
622 {
623 	union perf_event *comm_event, *mmap_event, *fork_event;
624 	union perf_event *namespaces_event;
625 	int err = -1, thread, j;
626 
627 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
628 	if (comm_event == NULL)
629 		goto out;
630 
631 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
632 	if (mmap_event == NULL)
633 		goto out_free_comm;
634 
635 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
636 	if (fork_event == NULL)
637 		goto out_free_mmap;
638 
639 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
640 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
641 				  machine->id_hdr_size);
642 	if (namespaces_event == NULL)
643 		goto out_free_fork;
644 
645 	err = 0;
646 	for (thread = 0; thread < threads->nr; ++thread) {
647 		if (__event__synthesize_thread(comm_event, mmap_event,
648 					       fork_event, namespaces_event,
649 					       thread_map__pid(threads, thread), 0,
650 					       process, tool, machine,
651 					       mmap_data)) {
652 			err = -1;
653 			break;
654 		}
655 
656 		/*
657 		 * comm.pid is set to thread group id by
658 		 * perf_event__synthesize_comm
659 		 */
660 		if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) {
661 			bool need_leader = true;
662 
663 			/* is thread group leader in thread_map? */
664 			for (j = 0; j < threads->nr; ++j) {
665 				if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) {
666 					need_leader = false;
667 					break;
668 				}
669 			}
670 
671 			/* if not, generate events for it */
672 			if (need_leader &&
673 			    __event__synthesize_thread(comm_event, mmap_event,
674 						       fork_event, namespaces_event,
675 						       comm_event->comm.pid, 0,
676 						       process, tool, machine,
677 						       mmap_data)) {
678 				err = -1;
679 				break;
680 			}
681 		}
682 	}
683 	free(namespaces_event);
684 out_free_fork:
685 	free(fork_event);
686 out_free_mmap:
687 	free(mmap_event);
688 out_free_comm:
689 	free(comm_event);
690 out:
691 	return err;
692 }
693 
694 static int __perf_event__synthesize_threads(struct perf_tool *tool,
695 					    perf_event__handler_t process,
696 					    struct machine *machine,
697 					    bool mmap_data,
698 					    struct dirent **dirent,
699 					    int start,
700 					    int num)
701 {
702 	union perf_event *comm_event, *mmap_event, *fork_event;
703 	union perf_event *namespaces_event;
704 	int err = -1;
705 	char *end;
706 	pid_t pid;
707 	int i;
708 
709 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
710 	if (comm_event == NULL)
711 		goto out;
712 
713 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
714 	if (mmap_event == NULL)
715 		goto out_free_comm;
716 
717 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
718 	if (fork_event == NULL)
719 		goto out_free_mmap;
720 
721 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
722 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
723 				  machine->id_hdr_size);
724 	if (namespaces_event == NULL)
725 		goto out_free_fork;
726 
727 	for (i = start; i < start + num; i++) {
728 		if (!isdigit(dirent[i]->d_name[0]))
729 			continue;
730 
731 		pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
732 		/* only interested in proper numerical dirents */
733 		if (*end)
734 			continue;
735 		/*
736 		 * We may race with exiting thread, so don't stop just because
737 		 * one thread couldn't be synthesized.
738 		 */
739 		__event__synthesize_thread(comm_event, mmap_event, fork_event,
740 					   namespaces_event, pid, 1, process,
741 					   tool, machine, mmap_data);
742 	}
743 	err = 0;
744 
745 	free(namespaces_event);
746 out_free_fork:
747 	free(fork_event);
748 out_free_mmap:
749 	free(mmap_event);
750 out_free_comm:
751 	free(comm_event);
752 out:
753 	return err;
754 }
755 
756 struct synthesize_threads_arg {
757 	struct perf_tool *tool;
758 	perf_event__handler_t process;
759 	struct machine *machine;
760 	bool mmap_data;
761 	struct dirent **dirent;
762 	int num;
763 	int start;
764 };
765 
766 static void *synthesize_threads_worker(void *arg)
767 {
768 	struct synthesize_threads_arg *args = arg;
769 
770 	__perf_event__synthesize_threads(args->tool, args->process,
771 					 args->machine, args->mmap_data,
772 					 args->dirent,
773 					 args->start, args->num);
774 	return NULL;
775 }
776 
777 int perf_event__synthesize_threads(struct perf_tool *tool,
778 				   perf_event__handler_t process,
779 				   struct machine *machine,
780 				   bool mmap_data,
781 				   unsigned int nr_threads_synthesize)
782 {
783 	struct synthesize_threads_arg *args = NULL;
784 	pthread_t *synthesize_threads = NULL;
785 	char proc_path[PATH_MAX];
786 	struct dirent **dirent;
787 	int num_per_thread;
788 	int m, n, i, j;
789 	int thread_nr;
790 	int base = 0;
791 	int err = -1;
792 
793 
794 	if (machine__is_default_guest(machine))
795 		return 0;
796 
797 	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
798 	n = scandir(proc_path, &dirent, 0, alphasort);
799 	if (n < 0)
800 		return err;
801 
802 	if (nr_threads_synthesize == UINT_MAX)
803 		thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
804 	else
805 		thread_nr = nr_threads_synthesize;
806 
807 	if (thread_nr <= 1) {
808 		err = __perf_event__synthesize_threads(tool, process,
809 						       machine, mmap_data,
810 						       dirent, base, n);
811 		goto free_dirent;
812 	}
813 	if (thread_nr > n)
814 		thread_nr = n;
815 
816 	synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
817 	if (synthesize_threads == NULL)
818 		goto free_dirent;
819 
820 	args = calloc(sizeof(*args), thread_nr);
821 	if (args == NULL)
822 		goto free_threads;
823 
824 	num_per_thread = n / thread_nr;
825 	m = n % thread_nr;
826 	for (i = 0; i < thread_nr; i++) {
827 		args[i].tool = tool;
828 		args[i].process = process;
829 		args[i].machine = machine;
830 		args[i].mmap_data = mmap_data;
831 		args[i].dirent = dirent;
832 	}
833 	for (i = 0; i < m; i++) {
834 		args[i].num = num_per_thread + 1;
835 		args[i].start = i * args[i].num;
836 	}
837 	if (i != 0)
838 		base = args[i-1].start + args[i-1].num;
839 	for (j = i; j < thread_nr; j++) {
840 		args[j].num = num_per_thread;
841 		args[j].start = base + (j - i) * args[i].num;
842 	}
843 
844 	for (i = 0; i < thread_nr; i++) {
845 		if (pthread_create(&synthesize_threads[i], NULL,
846 				   synthesize_threads_worker, &args[i]))
847 			goto out_join;
848 	}
849 	err = 0;
850 out_join:
851 	for (i = 0; i < thread_nr; i++)
852 		pthread_join(synthesize_threads[i], NULL);
853 	free(args);
854 free_threads:
855 	free(synthesize_threads);
856 free_dirent:
857 	for (i = 0; i < n; i++)
858 		free(dirent[i]);
859 	free(dirent);
860 
861 	return err;
862 }
863 
864 struct process_symbol_args {
865 	const char *name;
866 	u64	   start;
867 };
868 
869 static int find_symbol_cb(void *arg, const char *name, char type,
870 			  u64 start)
871 {
872 	struct process_symbol_args *args = arg;
873 
874 	/*
875 	 * Must be a function or at least an alias, as in PARISC64, where "_text" is
876 	 * an 'A' to the same address as "_stext".
877 	 */
878 	if (!(kallsyms__is_function(type) ||
879 	      type == 'A') || strcmp(name, args->name))
880 		return 0;
881 
882 	args->start = start;
883 	return 1;
884 }
885 
886 int kallsyms__get_function_start(const char *kallsyms_filename,
887 				 const char *symbol_name, u64 *addr)
888 {
889 	struct process_symbol_args args = { .name = symbol_name, };
890 
891 	if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
892 		return -1;
893 
894 	*addr = args.start;
895 	return 0;
896 }
897 
898 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
899 					      perf_event__handler_t process __maybe_unused,
900 					      struct machine *machine __maybe_unused)
901 {
902 	return 0;
903 }
904 
905 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
906 						perf_event__handler_t process,
907 						struct machine *machine)
908 {
909 	size_t size;
910 	struct map *map = machine__kernel_map(machine);
911 	struct kmap *kmap;
912 	int err;
913 	union perf_event *event;
914 
915 	if (symbol_conf.kptr_restrict)
916 		return -1;
917 	if (map == NULL)
918 		return -1;
919 
920 	/*
921 	 * We should get this from /sys/kernel/sections/.text, but till that is
922 	 * available use this, and after it is use this as a fallback for older
923 	 * kernels.
924 	 */
925 	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
926 	if (event == NULL) {
927 		pr_debug("Not enough memory synthesizing mmap event "
928 			 "for kernel modules\n");
929 		return -1;
930 	}
931 
932 	if (machine__is_host(machine)) {
933 		/*
934 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
935 		 * see kernel/perf_event.c __perf_event_mmap
936 		 */
937 		event->header.misc = PERF_RECORD_MISC_KERNEL;
938 	} else {
939 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
940 	}
941 
942 	kmap = map__kmap(map);
943 	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
944 			"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
945 	size = PERF_ALIGN(size, sizeof(u64));
946 	event->mmap.header.type = PERF_RECORD_MMAP;
947 	event->mmap.header.size = (sizeof(event->mmap) -
948 			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
949 	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
950 	event->mmap.start = map->start;
951 	event->mmap.len   = map->end - event->mmap.start;
952 	event->mmap.pid   = machine->pid;
953 
954 	err = perf_tool__process_synth_event(tool, event, machine, process);
955 	free(event);
956 
957 	return err;
958 }
959 
960 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
961 				       perf_event__handler_t process,
962 				       struct machine *machine)
963 {
964 	int err;
965 
966 	err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
967 	if (err < 0)
968 		return err;
969 
970 	return perf_event__synthesize_extra_kmaps(tool, process, machine);
971 }
972 
973 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
974 				      struct thread_map *threads,
975 				      perf_event__handler_t process,
976 				      struct machine *machine)
977 {
978 	union perf_event *event;
979 	int i, err, size;
980 
981 	size  = sizeof(event->thread_map);
982 	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
983 
984 	event = zalloc(size);
985 	if (!event)
986 		return -ENOMEM;
987 
988 	event->header.type = PERF_RECORD_THREAD_MAP;
989 	event->header.size = size;
990 	event->thread_map.nr = threads->nr;
991 
992 	for (i = 0; i < threads->nr; i++) {
993 		struct thread_map_event_entry *entry = &event->thread_map.entries[i];
994 		char *comm = thread_map__comm(threads, i);
995 
996 		if (!comm)
997 			comm = (char *) "";
998 
999 		entry->pid = thread_map__pid(threads, i);
1000 		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1001 	}
1002 
1003 	err = process(tool, event, NULL, machine);
1004 
1005 	free(event);
1006 	return err;
1007 }
1008 
1009 static void synthesize_cpus(struct cpu_map_entries *cpus,
1010 			    struct cpu_map *map)
1011 {
1012 	int i;
1013 
1014 	cpus->nr = map->nr;
1015 
1016 	for (i = 0; i < map->nr; i++)
1017 		cpus->cpu[i] = map->map[i];
1018 }
1019 
1020 static void synthesize_mask(struct cpu_map_mask *mask,
1021 			    struct cpu_map *map, int max)
1022 {
1023 	int i;
1024 
1025 	mask->nr = BITS_TO_LONGS(max);
1026 	mask->long_size = sizeof(long);
1027 
1028 	for (i = 0; i < map->nr; i++)
1029 		set_bit(map->map[i], mask->mask);
1030 }
1031 
1032 static size_t cpus_size(struct cpu_map *map)
1033 {
1034 	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1035 }
1036 
1037 static size_t mask_size(struct cpu_map *map, int *max)
1038 {
1039 	int i;
1040 
1041 	*max = 0;
1042 
1043 	for (i = 0; i < map->nr; i++) {
1044 		/* bit possition of the cpu is + 1 */
1045 		int bit = map->map[i] + 1;
1046 
1047 		if (bit > *max)
1048 			*max = bit;
1049 	}
1050 
1051 	return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long);
1052 }
1053 
1054 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max)
1055 {
1056 	size_t size_cpus, size_mask;
1057 	bool is_dummy = cpu_map__empty(map);
1058 
1059 	/*
1060 	 * Both array and mask data have variable size based
1061 	 * on the number of cpus and their actual values.
1062 	 * The size of the 'struct cpu_map_data' is:
1063 	 *
1064 	 *   array = size of 'struct cpu_map_entries' +
1065 	 *           number of cpus * sizeof(u64)
1066 	 *
1067 	 *   mask  = size of 'struct cpu_map_mask' +
1068 	 *           maximum cpu bit converted to size of longs
1069 	 *
1070 	 * and finaly + the size of 'struct cpu_map_data'.
1071 	 */
1072 	size_cpus = cpus_size(map);
1073 	size_mask = mask_size(map, max);
1074 
1075 	if (is_dummy || (size_cpus < size_mask)) {
1076 		*size += size_cpus;
1077 		*type  = PERF_CPU_MAP__CPUS;
1078 	} else {
1079 		*size += size_mask;
1080 		*type  = PERF_CPU_MAP__MASK;
1081 	}
1082 
1083 	*size += sizeof(struct cpu_map_data);
1084 	*size = PERF_ALIGN(*size, sizeof(u64));
1085 	return zalloc(*size);
1086 }
1087 
1088 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map,
1089 			      u16 type, int max)
1090 {
1091 	data->type = type;
1092 
1093 	switch (type) {
1094 	case PERF_CPU_MAP__CPUS:
1095 		synthesize_cpus((struct cpu_map_entries *) data->data, map);
1096 		break;
1097 	case PERF_CPU_MAP__MASK:
1098 		synthesize_mask((struct cpu_map_mask *) data->data, map, max);
1099 	default:
1100 		break;
1101 	};
1102 }
1103 
1104 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map)
1105 {
1106 	size_t size = sizeof(struct cpu_map_event);
1107 	struct cpu_map_event *event;
1108 	int max;
1109 	u16 type;
1110 
1111 	event = cpu_map_data__alloc(map, &size, &type, &max);
1112 	if (!event)
1113 		return NULL;
1114 
1115 	event->header.type = PERF_RECORD_CPU_MAP;
1116 	event->header.size = size;
1117 	event->data.type   = type;
1118 
1119 	cpu_map_data__synthesize(&event->data, map, type, max);
1120 	return event;
1121 }
1122 
1123 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1124 				   struct cpu_map *map,
1125 				   perf_event__handler_t process,
1126 				   struct machine *machine)
1127 {
1128 	struct cpu_map_event *event;
1129 	int err;
1130 
1131 	event = cpu_map_event__new(map);
1132 	if (!event)
1133 		return -ENOMEM;
1134 
1135 	err = process(tool, (union perf_event *) event, NULL, machine);
1136 
1137 	free(event);
1138 	return err;
1139 }
1140 
1141 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1142 				       struct perf_stat_config *config,
1143 				       perf_event__handler_t process,
1144 				       struct machine *machine)
1145 {
1146 	struct stat_config_event *event;
1147 	int size, i = 0, err;
1148 
1149 	size  = sizeof(*event);
1150 	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1151 
1152 	event = zalloc(size);
1153 	if (!event)
1154 		return -ENOMEM;
1155 
1156 	event->header.type = PERF_RECORD_STAT_CONFIG;
1157 	event->header.size = size;
1158 	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1159 
1160 #define ADD(__term, __val)					\
1161 	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
1162 	event->data[i].val = __val;				\
1163 	i++;
1164 
1165 	ADD(AGGR_MODE,	config->aggr_mode)
1166 	ADD(INTERVAL,	config->interval)
1167 	ADD(SCALE,	config->scale)
1168 
1169 	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1170 		  "stat config terms unbalanced\n");
1171 #undef ADD
1172 
1173 	err = process(tool, (union perf_event *) event, NULL, machine);
1174 
1175 	free(event);
1176 	return err;
1177 }
1178 
1179 int perf_event__synthesize_stat(struct perf_tool *tool,
1180 				u32 cpu, u32 thread, u64 id,
1181 				struct perf_counts_values *count,
1182 				perf_event__handler_t process,
1183 				struct machine *machine)
1184 {
1185 	struct stat_event event;
1186 
1187 	event.header.type = PERF_RECORD_STAT;
1188 	event.header.size = sizeof(event);
1189 	event.header.misc = 0;
1190 
1191 	event.id        = id;
1192 	event.cpu       = cpu;
1193 	event.thread    = thread;
1194 	event.val       = count->val;
1195 	event.ena       = count->ena;
1196 	event.run       = count->run;
1197 
1198 	return process(tool, (union perf_event *) &event, NULL, machine);
1199 }
1200 
1201 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1202 				      u64 evtime, u64 type,
1203 				      perf_event__handler_t process,
1204 				      struct machine *machine)
1205 {
1206 	struct stat_round_event event;
1207 
1208 	event.header.type = PERF_RECORD_STAT_ROUND;
1209 	event.header.size = sizeof(event);
1210 	event.header.misc = 0;
1211 
1212 	event.time = evtime;
1213 	event.type = type;
1214 
1215 	return process(tool, (union perf_event *) &event, NULL, machine);
1216 }
1217 
1218 void perf_event__read_stat_config(struct perf_stat_config *config,
1219 				  struct stat_config_event *event)
1220 {
1221 	unsigned i;
1222 
1223 	for (i = 0; i < event->nr; i++) {
1224 
1225 		switch (event->data[i].tag) {
1226 #define CASE(__term, __val)					\
1227 		case PERF_STAT_CONFIG_TERM__##__term:		\
1228 			config->__val = event->data[i].val;	\
1229 			break;
1230 
1231 		CASE(AGGR_MODE, aggr_mode)
1232 		CASE(SCALE,     scale)
1233 		CASE(INTERVAL,  interval)
1234 #undef CASE
1235 		default:
1236 			pr_warning("unknown stat config term %" PRIu64 "\n",
1237 				   event->data[i].tag);
1238 		}
1239 	}
1240 }
1241 
1242 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
1243 {
1244 	const char *s;
1245 
1246 	if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
1247 		s = " exec";
1248 	else
1249 		s = "";
1250 
1251 	return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
1252 }
1253 
1254 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
1255 {
1256 	size_t ret = 0;
1257 	struct perf_ns_link_info *ns_link_info;
1258 	u32 nr_namespaces, idx;
1259 
1260 	ns_link_info = event->namespaces.link_info;
1261 	nr_namespaces = event->namespaces.nr_namespaces;
1262 
1263 	ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
1264 		       event->namespaces.pid,
1265 		       event->namespaces.tid,
1266 		       nr_namespaces);
1267 
1268 	for (idx = 0; idx < nr_namespaces; idx++) {
1269 		if (idx && (idx % 4 == 0))
1270 			ret += fprintf(fp, "\n\t\t ");
1271 
1272 		ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
1273 				perf_ns__name(idx), (u64)ns_link_info[idx].dev,
1274 				(u64)ns_link_info[idx].ino,
1275 				((idx + 1) != nr_namespaces) ? ", " : "]\n");
1276 	}
1277 
1278 	return ret;
1279 }
1280 
1281 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
1282 			     union perf_event *event,
1283 			     struct perf_sample *sample,
1284 			     struct machine *machine)
1285 {
1286 	return machine__process_comm_event(machine, event, sample);
1287 }
1288 
1289 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
1290 				   union perf_event *event,
1291 				   struct perf_sample *sample,
1292 				   struct machine *machine)
1293 {
1294 	return machine__process_namespaces_event(machine, event, sample);
1295 }
1296 
1297 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
1298 			     union perf_event *event,
1299 			     struct perf_sample *sample,
1300 			     struct machine *machine)
1301 {
1302 	return machine__process_lost_event(machine, event, sample);
1303 }
1304 
1305 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
1306 			    union perf_event *event,
1307 			    struct perf_sample *sample __maybe_unused,
1308 			    struct machine *machine)
1309 {
1310 	return machine__process_aux_event(machine, event);
1311 }
1312 
1313 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
1314 				     union perf_event *event,
1315 				     struct perf_sample *sample __maybe_unused,
1316 				     struct machine *machine)
1317 {
1318 	return machine__process_itrace_start_event(machine, event);
1319 }
1320 
1321 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
1322 				     union perf_event *event,
1323 				     struct perf_sample *sample,
1324 				     struct machine *machine)
1325 {
1326 	return machine__process_lost_samples_event(machine, event, sample);
1327 }
1328 
1329 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
1330 			       union perf_event *event,
1331 			       struct perf_sample *sample __maybe_unused,
1332 			       struct machine *machine)
1333 {
1334 	return machine__process_switch_event(machine, event);
1335 }
1336 
1337 int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
1338 				union perf_event *event,
1339 				struct perf_sample *sample __maybe_unused,
1340 				struct machine *machine)
1341 {
1342 	return machine__process_ksymbol(machine, event, sample);
1343 }
1344 
1345 int perf_event__process_bpf_event(struct perf_tool *tool __maybe_unused,
1346 				  union perf_event *event,
1347 				  struct perf_sample *sample __maybe_unused,
1348 				  struct machine *machine)
1349 {
1350 	return machine__process_bpf_event(machine, event, sample);
1351 }
1352 
1353 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
1354 {
1355 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
1356 		       event->mmap.pid, event->mmap.tid, event->mmap.start,
1357 		       event->mmap.len, event->mmap.pgoff,
1358 		       (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
1359 		       event->mmap.filename);
1360 }
1361 
1362 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
1363 {
1364 	return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
1365 			   " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
1366 		       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
1367 		       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
1368 		       event->mmap2.min, event->mmap2.ino,
1369 		       event->mmap2.ino_generation,
1370 		       (event->mmap2.prot & PROT_READ) ? 'r' : '-',
1371 		       (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
1372 		       (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
1373 		       (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
1374 		       event->mmap2.filename);
1375 }
1376 
1377 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
1378 {
1379 	struct thread_map *threads = thread_map__new_event(&event->thread_map);
1380 	size_t ret;
1381 
1382 	ret = fprintf(fp, " nr: ");
1383 
1384 	if (threads)
1385 		ret += thread_map__fprintf(threads, fp);
1386 	else
1387 		ret += fprintf(fp, "failed to get threads from event\n");
1388 
1389 	thread_map__put(threads);
1390 	return ret;
1391 }
1392 
1393 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
1394 {
1395 	struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
1396 	size_t ret;
1397 
1398 	ret = fprintf(fp, ": ");
1399 
1400 	if (cpus)
1401 		ret += cpu_map__fprintf(cpus, fp);
1402 	else
1403 		ret += fprintf(fp, "failed to get cpumap from event\n");
1404 
1405 	cpu_map__put(cpus);
1406 	return ret;
1407 }
1408 
1409 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
1410 			     union perf_event *event,
1411 			     struct perf_sample *sample,
1412 			     struct machine *machine)
1413 {
1414 	return machine__process_mmap_event(machine, event, sample);
1415 }
1416 
1417 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
1418 			     union perf_event *event,
1419 			     struct perf_sample *sample,
1420 			     struct machine *machine)
1421 {
1422 	return machine__process_mmap2_event(machine, event, sample);
1423 }
1424 
1425 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
1426 {
1427 	return fprintf(fp, "(%d:%d):(%d:%d)\n",
1428 		       event->fork.pid, event->fork.tid,
1429 		       event->fork.ppid, event->fork.ptid);
1430 }
1431 
1432 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
1433 			     union perf_event *event,
1434 			     struct perf_sample *sample,
1435 			     struct machine *machine)
1436 {
1437 	return machine__process_fork_event(machine, event, sample);
1438 }
1439 
1440 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
1441 			     union perf_event *event,
1442 			     struct perf_sample *sample,
1443 			     struct machine *machine)
1444 {
1445 	return machine__process_exit_event(machine, event, sample);
1446 }
1447 
1448 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
1449 {
1450 	return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s%s]\n",
1451 		       event->aux.aux_offset, event->aux.aux_size,
1452 		       event->aux.flags,
1453 		       event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
1454 		       event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
1455 		       event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
1456 }
1457 
1458 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
1459 {
1460 	return fprintf(fp, " pid: %u tid: %u\n",
1461 		       event->itrace_start.pid, event->itrace_start.tid);
1462 }
1463 
1464 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
1465 {
1466 	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1467 	const char *in_out = !out ? "IN         " :
1468 		!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
1469 				    "OUT        " : "OUT preempt";
1470 
1471 	if (event->header.type == PERF_RECORD_SWITCH)
1472 		return fprintf(fp, " %s\n", in_out);
1473 
1474 	return fprintf(fp, " %s  %s pid/tid: %5u/%-5u\n",
1475 		       in_out, out ? "next" : "prev",
1476 		       event->context_switch.next_prev_pid,
1477 		       event->context_switch.next_prev_tid);
1478 }
1479 
1480 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
1481 {
1482 	return fprintf(fp, " lost %" PRIu64 "\n", event->lost.lost);
1483 }
1484 
1485 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
1486 {
1487 	return fprintf(fp, " addr %" PRIx64 " len %u type %u flags 0x%x name %s\n",
1488 		       event->ksymbol_event.addr, event->ksymbol_event.len,
1489 		       event->ksymbol_event.ksym_type,
1490 		       event->ksymbol_event.flags, event->ksymbol_event.name);
1491 }
1492 
1493 size_t perf_event__fprintf_bpf_event(union perf_event *event, FILE *fp)
1494 {
1495 	return fprintf(fp, " type %u, flags %u, id %u\n",
1496 		       event->bpf_event.type, event->bpf_event.flags,
1497 		       event->bpf_event.id);
1498 }
1499 
1500 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
1501 {
1502 	size_t ret = fprintf(fp, "PERF_RECORD_%s",
1503 			     perf_event__name(event->header.type));
1504 
1505 	switch (event->header.type) {
1506 	case PERF_RECORD_COMM:
1507 		ret += perf_event__fprintf_comm(event, fp);
1508 		break;
1509 	case PERF_RECORD_FORK:
1510 	case PERF_RECORD_EXIT:
1511 		ret += perf_event__fprintf_task(event, fp);
1512 		break;
1513 	case PERF_RECORD_MMAP:
1514 		ret += perf_event__fprintf_mmap(event, fp);
1515 		break;
1516 	case PERF_RECORD_NAMESPACES:
1517 		ret += perf_event__fprintf_namespaces(event, fp);
1518 		break;
1519 	case PERF_RECORD_MMAP2:
1520 		ret += perf_event__fprintf_mmap2(event, fp);
1521 		break;
1522 	case PERF_RECORD_AUX:
1523 		ret += perf_event__fprintf_aux(event, fp);
1524 		break;
1525 	case PERF_RECORD_ITRACE_START:
1526 		ret += perf_event__fprintf_itrace_start(event, fp);
1527 		break;
1528 	case PERF_RECORD_SWITCH:
1529 	case PERF_RECORD_SWITCH_CPU_WIDE:
1530 		ret += perf_event__fprintf_switch(event, fp);
1531 		break;
1532 	case PERF_RECORD_LOST:
1533 		ret += perf_event__fprintf_lost(event, fp);
1534 		break;
1535 	case PERF_RECORD_KSYMBOL:
1536 		ret += perf_event__fprintf_ksymbol(event, fp);
1537 		break;
1538 	case PERF_RECORD_BPF_EVENT:
1539 		ret += perf_event__fprintf_bpf_event(event, fp);
1540 		break;
1541 	default:
1542 		ret += fprintf(fp, "\n");
1543 	}
1544 
1545 	return ret;
1546 }
1547 
1548 int perf_event__process(struct perf_tool *tool __maybe_unused,
1549 			union perf_event *event,
1550 			struct perf_sample *sample,
1551 			struct machine *machine)
1552 {
1553 	return machine__process_event(machine, event, sample);
1554 }
1555 
1556 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
1557 			     struct addr_location *al)
1558 {
1559 	struct map_groups *mg = thread->mg;
1560 	struct machine *machine = mg->machine;
1561 	bool load_map = false;
1562 
1563 	al->machine = machine;
1564 	al->thread = thread;
1565 	al->addr = addr;
1566 	al->cpumode = cpumode;
1567 	al->filtered = 0;
1568 
1569 	if (machine == NULL) {
1570 		al->map = NULL;
1571 		return NULL;
1572 	}
1573 
1574 	if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
1575 		al->level = 'k';
1576 		mg = &machine->kmaps;
1577 		load_map = true;
1578 	} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
1579 		al->level = '.';
1580 	} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
1581 		al->level = 'g';
1582 		mg = &machine->kmaps;
1583 		load_map = true;
1584 	} else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
1585 		al->level = 'u';
1586 	} else {
1587 		al->level = 'H';
1588 		al->map = NULL;
1589 
1590 		if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
1591 			cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
1592 			!perf_guest)
1593 			al->filtered |= (1 << HIST_FILTER__GUEST);
1594 		if ((cpumode == PERF_RECORD_MISC_USER ||
1595 			cpumode == PERF_RECORD_MISC_KERNEL) &&
1596 			!perf_host)
1597 			al->filtered |= (1 << HIST_FILTER__HOST);
1598 
1599 		return NULL;
1600 	}
1601 
1602 	al->map = map_groups__find(mg, al->addr);
1603 	if (al->map != NULL) {
1604 		/*
1605 		 * Kernel maps might be changed when loading symbols so loading
1606 		 * must be done prior to using kernel maps.
1607 		 */
1608 		if (load_map)
1609 			map__load(al->map);
1610 		al->addr = al->map->map_ip(al->map, al->addr);
1611 	}
1612 
1613 	return al->map;
1614 }
1615 
1616 /*
1617  * For branch stacks or branch samples, the sample cpumode might not be correct
1618  * because it applies only to the sample 'ip' and not necessary to 'addr' or
1619  * branch stack addresses. If possible, use a fallback to deal with those cases.
1620  */
1621 struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
1622 				struct addr_location *al)
1623 {
1624 	struct map *map = thread__find_map(thread, cpumode, addr, al);
1625 	struct machine *machine = thread->mg->machine;
1626 	u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
1627 
1628 	if (map || addr_cpumode == cpumode)
1629 		return map;
1630 
1631 	return thread__find_map(thread, addr_cpumode, addr, al);
1632 }
1633 
1634 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
1635 				   u64 addr, struct addr_location *al)
1636 {
1637 	al->sym = NULL;
1638 	if (thread__find_map(thread, cpumode, addr, al))
1639 		al->sym = map__find_symbol(al->map, al->addr);
1640 	return al->sym;
1641 }
1642 
1643 struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
1644 				      u64 addr, struct addr_location *al)
1645 {
1646 	al->sym = NULL;
1647 	if (thread__find_map_fb(thread, cpumode, addr, al))
1648 		al->sym = map__find_symbol(al->map, al->addr);
1649 	return al->sym;
1650 }
1651 
1652 /*
1653  * Callers need to drop the reference to al->thread, obtained in
1654  * machine__findnew_thread()
1655  */
1656 int machine__resolve(struct machine *machine, struct addr_location *al,
1657 		     struct perf_sample *sample)
1658 {
1659 	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1660 							sample->tid);
1661 
1662 	if (thread == NULL)
1663 		return -1;
1664 
1665 	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
1666 	thread__find_map(thread, sample->cpumode, sample->ip, al);
1667 	dump_printf(" ...... dso: %s\n",
1668 		    al->map ? al->map->dso->long_name :
1669 			al->level == 'H' ? "[hypervisor]" : "<not found>");
1670 
1671 	if (thread__is_filtered(thread))
1672 		al->filtered |= (1 << HIST_FILTER__THREAD);
1673 
1674 	al->sym = NULL;
1675 	al->cpu = sample->cpu;
1676 	al->socket = -1;
1677 	al->srcline = NULL;
1678 
1679 	if (al->cpu >= 0) {
1680 		struct perf_env *env = machine->env;
1681 
1682 		if (env && env->cpu)
1683 			al->socket = env->cpu[al->cpu].socket_id;
1684 	}
1685 
1686 	if (al->map) {
1687 		struct dso *dso = al->map->dso;
1688 
1689 		if (symbol_conf.dso_list &&
1690 		    (!dso || !(strlist__has_entry(symbol_conf.dso_list,
1691 						  dso->short_name) ||
1692 			       (dso->short_name != dso->long_name &&
1693 				strlist__has_entry(symbol_conf.dso_list,
1694 						   dso->long_name))))) {
1695 			al->filtered |= (1 << HIST_FILTER__DSO);
1696 		}
1697 
1698 		al->sym = map__find_symbol(al->map, al->addr);
1699 	}
1700 
1701 	if (symbol_conf.sym_list &&
1702 		(!al->sym || !strlist__has_entry(symbol_conf.sym_list,
1703 						al->sym->name))) {
1704 		al->filtered |= (1 << HIST_FILTER__SYMBOL);
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 /*
1711  * The preprocess_sample method will return with reference counts for the
1712  * in it, when done using (and perhaps getting ref counts if needing to
1713  * keep a pointer to one of those entries) it must be paired with
1714  * addr_location__put(), so that the refcounts can be decremented.
1715  */
1716 void addr_location__put(struct addr_location *al)
1717 {
1718 	thread__zput(al->thread);
1719 }
1720 
1721 bool is_bts_event(struct perf_event_attr *attr)
1722 {
1723 	return attr->type == PERF_TYPE_HARDWARE &&
1724 	       (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1725 	       attr->sample_period == 1;
1726 }
1727 
1728 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
1729 {
1730 	if (attr->type == PERF_TYPE_SOFTWARE &&
1731 	    (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
1732 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
1733 	     attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
1734 		return true;
1735 
1736 	if (is_bts_event(attr))
1737 		return true;
1738 
1739 	return false;
1740 }
1741 
1742 void thread__resolve(struct thread *thread, struct addr_location *al,
1743 		     struct perf_sample *sample)
1744 {
1745 	thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
1746 
1747 	al->cpu = sample->cpu;
1748 	al->sym = NULL;
1749 
1750 	if (al->map)
1751 		al->sym = map__find_symbol(al->map, al->addr);
1752 }
1753