xref: /linux/tools/perf/util/synthetic-events.c (revision f4f346c3465949ebba80c6cc52cd8d2eeaa545fd)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include "util/cgroup.h"
4 #include "util/data.h"
5 #include "util/debug.h"
6 #include "util/dso.h"
7 #include "util/event.h"
8 #include "util/evlist.h"
9 #include "util/machine.h"
10 #include "util/map.h"
11 #include "util/map_symbol.h"
12 #include "util/branch.h"
13 #include "util/memswap.h"
14 #include "util/namespaces.h"
15 #include "util/session.h"
16 #include "util/stat.h"
17 #include "util/symbol.h"
18 #include "util/synthetic-events.h"
19 #include "util/target.h"
20 #include "util/time-utils.h"
21 #include <linux/bitops.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/zalloc.h>
25 #include <linux/perf_event.h>
26 #include <asm/bug.h>
27 #include <perf/evsel.h>
28 #include <perf/cpumap.h>
29 #include <internal/lib.h> // page_size
30 #include <internal/threadmap.h>
31 #include <perf/threadmap.h>
32 #include <symbol/kallsyms.h>
33 #include <dirent.h>
34 #include <errno.h>
35 #include <inttypes.h>
36 #include <stdio.h>
37 #include <string.h>
38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39 #include <api/fs/fs.h>
40 #include <api/io.h>
41 #include <api/io_dir.h>
42 #include <sys/types.h>
43 #include <sys/stat.h>
44 #include <fcntl.h>
45 #include <unistd.h>
46 
47 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
48 
49 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
50 
perf_tool__process_synth_event(const struct perf_tool * tool,union perf_event * event,struct machine * machine,perf_event__handler_t process)51 int perf_tool__process_synth_event(const struct perf_tool *tool,
52 				   union perf_event *event,
53 				   struct machine *machine,
54 				   perf_event__handler_t process)
55 {
56 	struct perf_sample synth_sample = {
57 		.pid	   = -1,
58 		.tid	   = -1,
59 		.time	   = -1,
60 		.stream_id = -1,
61 		.cpu	   = -1,
62 		.period	   = 1,
63 		.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
64 	};
65 
66 	return process(tool, event, &synth_sample, machine);
67 };
68 
69 /*
70  * Assumes that the first 4095 bytes of /proc/pid/stat contains
71  * the comm, tgid and ppid.
72  */
perf_event__get_comm_ids(pid_t pid,pid_t tid,char * comm,size_t len,pid_t * tgid,pid_t * ppid,bool * kernel)73 static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
74 				    pid_t *tgid, pid_t *ppid, bool *kernel)
75 {
76 	char bf[4096];
77 	int fd;
78 	size_t size = 0;
79 	ssize_t n;
80 	char *name, *tgids, *ppids, *vmpeak, *threads;
81 
82 	*tgid = -1;
83 	*ppid = -1;
84 
85 	if (pid)
86 		snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
87 	else
88 		snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
89 
90 	fd = open(bf, O_RDONLY);
91 	if (fd < 0) {
92 		pr_debug("couldn't open %s\n", bf);
93 		return -1;
94 	}
95 
96 	n = read(fd, bf, sizeof(bf) - 1);
97 	close(fd);
98 	if (n <= 0) {
99 		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
100 			   tid);
101 		return -1;
102 	}
103 	bf[n] = '\0';
104 
105 	name = strstr(bf, "Name:");
106 	tgids = strstr(name ?: bf, "Tgid:");
107 	ppids = strstr(tgids ?: bf, "PPid:");
108 	vmpeak = strstr(ppids ?: bf, "VmPeak:");
109 
110 	if (vmpeak)
111 		threads = NULL;
112 	else
113 		threads = strstr(ppids ?: bf, "Threads:");
114 
115 	if (name) {
116 		char *nl;
117 
118 		name = skip_spaces(name + 5);  /* strlen("Name:") */
119 		nl = strchr(name, '\n');
120 		if (nl)
121 			*nl = '\0';
122 
123 		size = strlen(name);
124 		if (size >= len)
125 			size = len - 1;
126 		memcpy(comm, name, size);
127 		comm[size] = '\0';
128 	} else {
129 		pr_debug("Name: string not found for pid %d\n", tid);
130 	}
131 
132 	if (tgids) {
133 		tgids += 5;  /* strlen("Tgid:") */
134 		*tgid = atoi(tgids);
135 	} else {
136 		pr_debug("Tgid: string not found for pid %d\n", tid);
137 	}
138 
139 	if (ppids) {
140 		ppids += 5;  /* strlen("PPid:") */
141 		*ppid = atoi(ppids);
142 	} else {
143 		pr_debug("PPid: string not found for pid %d\n", tid);
144 	}
145 
146 	if (!vmpeak && threads)
147 		*kernel = true;
148 	else
149 		*kernel = false;
150 
151 	return 0;
152 }
153 
perf_event__prepare_comm(union perf_event * event,pid_t pid,pid_t tid,struct machine * machine,pid_t * tgid,pid_t * ppid,bool * kernel)154 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
155 				    struct machine *machine,
156 				    pid_t *tgid, pid_t *ppid, bool *kernel)
157 {
158 	size_t size;
159 
160 	*ppid = -1;
161 
162 	memset(&event->comm, 0, sizeof(event->comm));
163 
164 	if (machine__is_host(machine)) {
165 		if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
166 					     sizeof(event->comm.comm),
167 					     tgid, ppid, kernel) != 0) {
168 			return -1;
169 		}
170 	} else {
171 		*tgid = machine->pid;
172 	}
173 
174 	if (*tgid < 0)
175 		return -1;
176 
177 	event->comm.pid = *tgid;
178 	event->comm.header.type = PERF_RECORD_COMM;
179 
180 	size = strlen(event->comm.comm) + 1;
181 	size = PERF_ALIGN(size, sizeof(u64));
182 	memset(event->comm.comm + size, 0, machine->id_hdr_size);
183 	event->comm.header.size = (sizeof(event->comm) -
184 				(sizeof(event->comm.comm) - size) +
185 				machine->id_hdr_size);
186 	event->comm.tid = tid;
187 
188 	return 0;
189 }
190 
perf_event__synthesize_comm(const struct perf_tool * tool,union perf_event * event,pid_t pid,perf_event__handler_t process,struct machine * machine)191 pid_t perf_event__synthesize_comm(const struct perf_tool *tool,
192 					 union perf_event *event, pid_t pid,
193 					 perf_event__handler_t process,
194 					 struct machine *machine)
195 {
196 	pid_t tgid, ppid;
197 	bool kernel_thread;
198 
199 	if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
200 				     &kernel_thread) != 0)
201 		return -1;
202 
203 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
204 		return -1;
205 
206 	return tgid;
207 }
208 
perf_event__get_ns_link_info(pid_t pid,const char * ns,struct perf_ns_link_info * ns_link_info)209 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
210 					 struct perf_ns_link_info *ns_link_info)
211 {
212 	struct stat64 st;
213 	char proc_ns[128];
214 
215 	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
216 	if (stat64(proc_ns, &st) == 0) {
217 		ns_link_info->dev = st.st_dev;
218 		ns_link_info->ino = st.st_ino;
219 	}
220 }
221 
perf_event__synthesize_namespaces(const struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,perf_event__handler_t process,struct machine * machine)222 int perf_event__synthesize_namespaces(const struct perf_tool *tool,
223 				      union perf_event *event,
224 				      pid_t pid, pid_t tgid,
225 				      perf_event__handler_t process,
226 				      struct machine *machine)
227 {
228 	u32 idx;
229 	struct perf_ns_link_info *ns_link_info;
230 
231 	if (!tool || !tool->namespace_events)
232 		return 0;
233 
234 	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
235 	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
236 	       machine->id_hdr_size));
237 
238 	event->namespaces.pid = tgid;
239 	event->namespaces.tid = pid;
240 
241 	event->namespaces.nr_namespaces = NR_NAMESPACES;
242 
243 	ns_link_info = event->namespaces.link_info;
244 
245 	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
246 		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
247 					     &ns_link_info[idx]);
248 
249 	event->namespaces.header.type = PERF_RECORD_NAMESPACES;
250 
251 	event->namespaces.header.size = (sizeof(event->namespaces) +
252 			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
253 			machine->id_hdr_size);
254 
255 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
256 		return -1;
257 
258 	return 0;
259 }
260 
perf_event__synthesize_fork(const struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,pid_t ppid,perf_event__handler_t process,struct machine * machine)261 static int perf_event__synthesize_fork(const struct perf_tool *tool,
262 				       union perf_event *event,
263 				       pid_t pid, pid_t tgid, pid_t ppid,
264 				       perf_event__handler_t process,
265 				       struct machine *machine)
266 {
267 	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
268 
269 	/*
270 	 * for main thread set parent to ppid from status file. For other
271 	 * threads set parent pid to main thread. ie., assume main thread
272 	 * spawns all threads in a process
273 	*/
274 	if (tgid == pid) {
275 		event->fork.ppid = ppid;
276 		event->fork.ptid = ppid;
277 	} else {
278 		event->fork.ppid = tgid;
279 		event->fork.ptid = tgid;
280 	}
281 	event->fork.pid  = tgid;
282 	event->fork.tid  = pid;
283 	event->fork.header.type = PERF_RECORD_FORK;
284 	event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
285 
286 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
287 
288 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
289 		return -1;
290 
291 	return 0;
292 }
293 
read_proc_maps_line(struct io * io,__u64 * start,__u64 * end,u32 * prot,u32 * flags,__u64 * offset,u32 * maj,u32 * min,__u64 * inode,ssize_t pathname_size,char * pathname)294 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
295 				u32 *prot, u32 *flags, __u64 *offset,
296 				u32 *maj, u32 *min,
297 				__u64 *inode,
298 				ssize_t pathname_size, char *pathname)
299 {
300 	__u64 temp;
301 	int ch;
302 	char *start_pathname = pathname;
303 
304 	if (io__get_hex(io, start) != '-')
305 		return false;
306 	if (io__get_hex(io, end) != ' ')
307 		return false;
308 
309 	/* map protection and flags bits */
310 	*prot = 0;
311 	ch = io__get_char(io);
312 	if (ch == 'r')
313 		*prot |= PROT_READ;
314 	else if (ch != '-')
315 		return false;
316 	ch = io__get_char(io);
317 	if (ch == 'w')
318 		*prot |= PROT_WRITE;
319 	else if (ch != '-')
320 		return false;
321 	ch = io__get_char(io);
322 	if (ch == 'x')
323 		*prot |= PROT_EXEC;
324 	else if (ch != '-')
325 		return false;
326 	ch = io__get_char(io);
327 	if (ch == 's')
328 		*flags = MAP_SHARED;
329 	else if (ch == 'p')
330 		*flags = MAP_PRIVATE;
331 	else
332 		return false;
333 	if (io__get_char(io) != ' ')
334 		return false;
335 
336 	if (io__get_hex(io, offset) != ' ')
337 		return false;
338 
339 	if (io__get_hex(io, &temp) != ':')
340 		return false;
341 	*maj = temp;
342 	if (io__get_hex(io, &temp) != ' ')
343 		return false;
344 	*min = temp;
345 
346 	ch = io__get_dec(io, inode);
347 	if (ch != ' ') {
348 		*pathname = '\0';
349 		return ch == '\n';
350 	}
351 	do {
352 		ch = io__get_char(io);
353 	} while (ch == ' ');
354 	while (true) {
355 		if (ch < 0)
356 			return false;
357 		if (ch == '\0' || ch == '\n' ||
358 		    (pathname + 1 - start_pathname) >= pathname_size) {
359 			*pathname = '\0';
360 			return true;
361 		}
362 		*pathname++ = ch;
363 		ch = io__get_char(io);
364 	}
365 }
366 
perf_record_mmap2__read_build_id(struct perf_record_mmap2 * event,struct machine * machine,bool is_kernel)367 static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
368 					     struct machine *machine,
369 					     bool is_kernel)
370 {
371 	struct build_id bid = { .size = 0, };
372 	struct nsinfo *nsi;
373 	struct nscookie nc;
374 	struct dso *dso = NULL;
375 	struct dso_id dso_id = dso_id_empty;
376 	int rc;
377 
378 	if (is_kernel) {
379 		rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
380 		goto out;
381 	}
382 
383 	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
384 		build_id__init(&dso_id.build_id, event->build_id, event->build_id_size);
385 	} else {
386 		dso_id.maj = event->maj;
387 		dso_id.min = event->min;
388 		dso_id.ino = event->ino;
389 		dso_id.ino_generation = event->ino_generation;
390 		dso_id.mmap2_valid = true;
391 		dso_id.mmap2_ino_generation_valid = true;
392 	};
393 
394 	dso = dsos__findnew_id(&machine->dsos, event->filename, &dso_id);
395 	if (dso && dso__has_build_id(dso)) {
396 		bid = *dso__bid(dso);
397 		rc = 0;
398 		goto out;
399 	}
400 
401 	nsi = nsinfo__new(event->pid);
402 	nsinfo__mountns_enter(nsi, &nc);
403 
404 	rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
405 
406 	nsinfo__mountns_exit(&nc);
407 	nsinfo__put(nsi);
408 
409 out:
410 	if (rc == 0) {
411 		memcpy(event->build_id, bid.data, sizeof(bid.data));
412 		event->build_id_size = (u8) bid.size;
413 		event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
414 		event->__reserved_1 = 0;
415 		event->__reserved_2 = 0;
416 
417 		if (dso && !dso__has_build_id(dso))
418 			dso__set_build_id(dso, &bid);
419 	} else {
420 		if (event->filename[0] == '/') {
421 			pr_debug2("Failed to read build ID for %s\n",
422 				  event->filename);
423 		}
424 	}
425 	dso__put(dso);
426 }
427 
perf_event__synthesize_mmap_events(const struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,perf_event__handler_t process,struct machine * machine,bool mmap_data)428 int perf_event__synthesize_mmap_events(const struct perf_tool *tool,
429 				       union perf_event *event,
430 				       pid_t pid, pid_t tgid,
431 				       perf_event__handler_t process,
432 				       struct machine *machine,
433 				       bool mmap_data)
434 {
435 	unsigned long long t;
436 	char bf[BUFSIZ];
437 	struct io io;
438 	bool truncation = false;
439 	unsigned long long timeout = proc_map_timeout * 1000000ULL;
440 	int rc = 0;
441 	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
442 	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
443 
444 	if (machine__is_default_guest(machine))
445 		return 0;
446 
447 	snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
448 		machine->root_dir, pid, pid);
449 
450 	io.fd = open(bf, O_RDONLY, 0);
451 	if (io.fd < 0) {
452 		/*
453 		 * We raced with a task exiting - just return:
454 		 */
455 		pr_debug("couldn't open %s\n", bf);
456 		return -1;
457 	}
458 	io__init(&io, io.fd, bf, sizeof(bf));
459 
460 	event->header.type = PERF_RECORD_MMAP2;
461 	t = rdclock();
462 
463 	while (!io.eof) {
464 		static const char anonstr[] = "//anon";
465 		size_t size, aligned_size;
466 
467 		/* ensure null termination since stack will be reused. */
468 		event->mmap2.filename[0] = '\0';
469 
470 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
471 		if (!read_proc_maps_line(&io,
472 					&event->mmap2.start,
473 					&event->mmap2.len,
474 					&event->mmap2.prot,
475 					&event->mmap2.flags,
476 					&event->mmap2.pgoff,
477 					&event->mmap2.maj,
478 					&event->mmap2.min,
479 					&event->mmap2.ino,
480 					sizeof(event->mmap2.filename),
481 					event->mmap2.filename))
482 			continue;
483 
484 		if ((rdclock() - t) > timeout) {
485 			pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
486 				   "You may want to increase "
487 				   "the time limit by --proc-map-timeout\n",
488 				   machine->root_dir, pid, pid);
489 			truncation = true;
490 			goto out;
491 		}
492 
493 		event->mmap2.ino_generation = 0;
494 
495 		/*
496 		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
497 		 */
498 		if (machine__is_host(machine))
499 			event->header.misc = PERF_RECORD_MISC_USER;
500 		else
501 			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
502 
503 		if ((event->mmap2.prot & PROT_EXEC) == 0) {
504 			if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
505 				continue;
506 
507 			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
508 		}
509 
510 out:
511 		if (truncation)
512 			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
513 
514 		if (!strcmp(event->mmap2.filename, ""))
515 			strcpy(event->mmap2.filename, anonstr);
516 
517 		if (hugetlbfs_mnt_len &&
518 		    !strncmp(event->mmap2.filename, hugetlbfs_mnt,
519 			     hugetlbfs_mnt_len)) {
520 			strcpy(event->mmap2.filename, anonstr);
521 			event->mmap2.flags |= MAP_HUGETLB;
522 		}
523 
524 		size = strlen(event->mmap2.filename) + 1;
525 		aligned_size = PERF_ALIGN(size, sizeof(u64));
526 		event->mmap2.len -= event->mmap.start;
527 		event->mmap2.header.size = (sizeof(event->mmap2) -
528 					(sizeof(event->mmap2.filename) - aligned_size));
529 		memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
530 			(aligned_size - size));
531 		event->mmap2.header.size += machine->id_hdr_size;
532 		event->mmap2.pid = tgid;
533 		event->mmap2.tid = pid;
534 
535 		if (!symbol_conf.no_buildid_mmap2)
536 			perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
537 
538 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
539 			rc = -1;
540 			break;
541 		}
542 
543 		if (truncation)
544 			break;
545 	}
546 
547 	close(io.fd);
548 	return rc;
549 }
550 
551 #ifdef HAVE_FILE_HANDLE
perf_event__synthesize_cgroup(const struct perf_tool * tool,union perf_event * event,char * path,size_t mount_len,perf_event__handler_t process,struct machine * machine)552 static int perf_event__synthesize_cgroup(const struct perf_tool *tool,
553 					 union perf_event *event,
554 					 char *path, size_t mount_len,
555 					 perf_event__handler_t process,
556 					 struct machine *machine)
557 {
558 	size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
559 	size_t path_len = strlen(path) - mount_len + 1;
560 	struct {
561 		struct file_handle fh;
562 		uint64_t cgroup_id;
563 	} handle;
564 	int mount_id;
565 
566 	while (path_len % sizeof(u64))
567 		path[mount_len + path_len++] = '\0';
568 
569 	memset(&event->cgroup, 0, event_size);
570 
571 	event->cgroup.header.type = PERF_RECORD_CGROUP;
572 	event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
573 
574 	handle.fh.handle_bytes = sizeof(handle.cgroup_id);
575 	if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
576 		pr_debug("stat failed: %s\n", path);
577 		return -1;
578 	}
579 
580 	event->cgroup.id = handle.cgroup_id;
581 	strncpy(event->cgroup.path, path + mount_len, path_len);
582 	memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
583 
584 	if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
585 		pr_debug("process synth event failed\n");
586 		return -1;
587 	}
588 
589 	return 0;
590 }
591 
perf_event__walk_cgroup_tree(const struct perf_tool * tool,union perf_event * event,char * path,size_t mount_len,perf_event__handler_t process,struct machine * machine)592 static int perf_event__walk_cgroup_tree(const struct perf_tool *tool,
593 					union perf_event *event,
594 					char *path, size_t mount_len,
595 					perf_event__handler_t process,
596 					struct machine *machine)
597 {
598 	size_t pos = strlen(path);
599 	DIR *d;
600 	struct dirent *dent;
601 	int ret = 0;
602 
603 	if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
604 					  process, machine) < 0)
605 		return -1;
606 
607 	d = opendir(path);
608 	if (d == NULL) {
609 		pr_debug("failed to open directory: %s\n", path);
610 		return -1;
611 	}
612 
613 	while ((dent = readdir(d)) != NULL) {
614 		if (dent->d_type != DT_DIR)
615 			continue;
616 		if (!strcmp(dent->d_name, ".") ||
617 		    !strcmp(dent->d_name, ".."))
618 			continue;
619 
620 		/* any sane path should be less than PATH_MAX */
621 		if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
622 			continue;
623 
624 		if (path[pos - 1] != '/')
625 			strcat(path, "/");
626 		strcat(path, dent->d_name);
627 
628 		ret = perf_event__walk_cgroup_tree(tool, event, path,
629 						   mount_len, process, machine);
630 		if (ret < 0)
631 			break;
632 
633 		path[pos] = '\0';
634 	}
635 
636 	closedir(d);
637 	return ret;
638 }
639 
perf_event__synthesize_cgroups(const struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)640 int perf_event__synthesize_cgroups(const struct perf_tool *tool,
641 				   perf_event__handler_t process,
642 				   struct machine *machine)
643 {
644 	union perf_event event;
645 	char cgrp_root[PATH_MAX];
646 	size_t mount_len;  /* length of mount point in the path */
647 
648 	if (!tool || !tool->cgroup_events)
649 		return 0;
650 
651 	if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
652 		pr_debug("cannot find cgroup mount point\n");
653 		return -1;
654 	}
655 
656 	mount_len = strlen(cgrp_root);
657 	/* make sure the path starts with a slash (after mount point) */
658 	strcat(cgrp_root, "/");
659 
660 	if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
661 					 process, machine) < 0)
662 		return -1;
663 
664 	return 0;
665 }
666 #else
perf_event__synthesize_cgroups(const struct perf_tool * tool __maybe_unused,perf_event__handler_t process __maybe_unused,struct machine * machine __maybe_unused)667 int perf_event__synthesize_cgroups(const struct perf_tool *tool __maybe_unused,
668 				   perf_event__handler_t process __maybe_unused,
669 				   struct machine *machine __maybe_unused)
670 {
671 	return -1;
672 }
673 #endif
674 
675 struct perf_event__synthesize_modules_maps_cb_args {
676 	const struct perf_tool *tool;
677 	perf_event__handler_t process;
678 	struct machine *machine;
679 	union perf_event *event;
680 };
681 
perf_event__synthesize_modules_maps_cb(struct map * map,void * data)682 static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
683 {
684 	struct perf_event__synthesize_modules_maps_cb_args *args = data;
685 	union perf_event *event = args->event;
686 	struct dso *dso;
687 	size_t size;
688 
689 	if (!__map__is_kmodule(map))
690 		return 0;
691 
692 	dso = map__dso(map);
693 	if (!symbol_conf.no_buildid_mmap2) {
694 		size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
695 		event->mmap2.header.type = PERF_RECORD_MMAP2;
696 		event->mmap2.header.size = (sizeof(event->mmap2) -
697 					(sizeof(event->mmap2.filename) - size));
698 		memset(event->mmap2.filename + size, 0, args->machine->id_hdr_size);
699 		event->mmap2.header.size += args->machine->id_hdr_size;
700 		event->mmap2.start = map__start(map);
701 		event->mmap2.len   = map__size(map);
702 		event->mmap2.pid   = args->machine->pid;
703 
704 		memcpy(event->mmap2.filename, dso__long_name(dso), dso__long_name_len(dso) + 1);
705 
706 		perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false);
707 	} else {
708 		size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
709 		event->mmap.header.type = PERF_RECORD_MMAP;
710 		event->mmap.header.size = (sizeof(event->mmap) -
711 					(sizeof(event->mmap.filename) - size));
712 		memset(event->mmap.filename + size, 0, args->machine->id_hdr_size);
713 		event->mmap.header.size += args->machine->id_hdr_size;
714 		event->mmap.start = map__start(map);
715 		event->mmap.len   = map__size(map);
716 		event->mmap.pid   = args->machine->pid;
717 
718 		memcpy(event->mmap.filename, dso__long_name(dso), dso__long_name_len(dso) + 1);
719 	}
720 
721 	if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0)
722 		return -1;
723 
724 	return 0;
725 }
726 
perf_event__synthesize_modules(const struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)727 int perf_event__synthesize_modules(const struct perf_tool *tool, perf_event__handler_t process,
728 				   struct machine *machine)
729 {
730 	int rc;
731 	struct maps *maps = machine__kernel_maps(machine);
732 	struct perf_event__synthesize_modules_maps_cb_args args = {
733 		.tool = tool,
734 		.process = process,
735 		.machine = machine,
736 	};
737 	size_t size = symbol_conf.no_buildid_mmap2
738 		? sizeof(args.event->mmap)
739 		: sizeof(args.event->mmap2);
740 
741 	args.event = zalloc(size + machine->id_hdr_size);
742 	if (args.event == NULL) {
743 		pr_debug("Not enough memory synthesizing mmap event "
744 			 "for kernel modules\n");
745 		return -1;
746 	}
747 
748 	/*
749 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
750 	 * __perf_event_mmap
751 	 */
752 	if (machine__is_host(machine))
753 		args.event->header.misc = PERF_RECORD_MISC_KERNEL;
754 	else
755 		args.event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
756 
757 	rc = maps__for_each_map(maps, perf_event__synthesize_modules_maps_cb, &args);
758 
759 	free(args.event);
760 	return rc;
761 }
762 
filter_task(const struct dirent * dirent)763 static int filter_task(const struct dirent *dirent)
764 {
765 	return isdigit(dirent->d_name[0]);
766 }
767 
__event__synthesize_thread(union perf_event * comm_event,union perf_event * mmap_event,union perf_event * fork_event,union perf_event * namespaces_event,pid_t pid,int full,perf_event__handler_t process,const struct perf_tool * tool,struct machine * machine,bool needs_mmap,bool mmap_data)768 static int __event__synthesize_thread(union perf_event *comm_event,
769 				      union perf_event *mmap_event,
770 				      union perf_event *fork_event,
771 				      union perf_event *namespaces_event,
772 				      pid_t pid, int full, perf_event__handler_t process,
773 				      const struct perf_tool *tool, struct machine *machine,
774 				      bool needs_mmap, bool mmap_data)
775 {
776 	char filename[PATH_MAX];
777 	struct io_dir iod;
778 	struct io_dirent64 *dent;
779 	pid_t tgid, ppid;
780 	int rc = 0;
781 
782 	/* special case: only send one comm event using passed in pid */
783 	if (!full) {
784 		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
785 						   process, machine);
786 
787 		if (tgid == -1)
788 			return -1;
789 
790 		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
791 						      tgid, process, machine) < 0)
792 			return -1;
793 
794 		/*
795 		 * send mmap only for thread group leader
796 		 * see thread__init_maps()
797 		 */
798 		if (pid == tgid && needs_mmap &&
799 		    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
800 						       process, machine, mmap_data))
801 			return -1;
802 
803 		return 0;
804 	}
805 
806 	if (machine__is_default_guest(machine))
807 		return 0;
808 
809 	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
810 		 machine->root_dir, pid);
811 
812 	io_dir__init(&iod, open(filename, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
813 	if (iod.dirfd < 0)
814 		return -1;
815 
816 	while ((dent = io_dir__readdir(&iod)) != NULL) {
817 		char *end;
818 		pid_t _pid;
819 		bool kernel_thread = false;
820 
821 		if (!isdigit(dent->d_name[0]))
822 			continue;
823 
824 		_pid = strtol(dent->d_name, &end, 10);
825 		if (*end)
826 			continue;
827 
828 		/* some threads may exit just after scan, ignore it */
829 		if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
830 					     &tgid, &ppid, &kernel_thread) != 0)
831 			continue;
832 
833 		rc = -1;
834 		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
835 						ppid, process, machine) < 0)
836 			break;
837 
838 		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
839 						      tgid, process, machine) < 0)
840 			break;
841 
842 		/*
843 		 * Send the prepared comm event
844 		 */
845 		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
846 			break;
847 
848 		rc = 0;
849 		if (_pid == pid && !kernel_thread && needs_mmap) {
850 			/* process the parent's maps too */
851 			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
852 						process, machine, mmap_data);
853 			if (rc)
854 				break;
855 		}
856 	}
857 
858 	close(iod.dirfd);
859 
860 	return rc;
861 }
862 
perf_event__synthesize_thread_map(const struct perf_tool * tool,struct perf_thread_map * threads,perf_event__handler_t process,struct machine * machine,bool needs_mmap,bool mmap_data)863 int perf_event__synthesize_thread_map(const struct perf_tool *tool,
864 				      struct perf_thread_map *threads,
865 				      perf_event__handler_t process,
866 				      struct machine *machine,
867 				      bool needs_mmap, bool mmap_data)
868 {
869 	union perf_event *comm_event, *mmap_event, *fork_event;
870 	union perf_event *namespaces_event;
871 	int err = -1, thread, j;
872 
873 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
874 	if (comm_event == NULL)
875 		goto out;
876 
877 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
878 	if (mmap_event == NULL)
879 		goto out_free_comm;
880 
881 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
882 	if (fork_event == NULL)
883 		goto out_free_mmap;
884 
885 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
886 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
887 				  machine->id_hdr_size);
888 	if (namespaces_event == NULL)
889 		goto out_free_fork;
890 
891 	err = 0;
892 	for (thread = 0; thread < threads->nr; ++thread) {
893 		if (__event__synthesize_thread(comm_event, mmap_event,
894 					       fork_event, namespaces_event,
895 					       perf_thread_map__pid(threads, thread), 0,
896 					       process, tool, machine,
897 					       needs_mmap, mmap_data)) {
898 			err = -1;
899 			break;
900 		}
901 
902 		/*
903 		 * comm.pid is set to thread group id by
904 		 * perf_event__synthesize_comm
905 		 */
906 		if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
907 			bool need_leader = true;
908 
909 			/* is thread group leader in thread_map? */
910 			for (j = 0; j < threads->nr; ++j) {
911 				if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
912 					need_leader = false;
913 					break;
914 				}
915 			}
916 
917 			/* if not, generate events for it */
918 			if (need_leader &&
919 			    __event__synthesize_thread(comm_event, mmap_event,
920 						       fork_event, namespaces_event,
921 						       comm_event->comm.pid, 0,
922 						       process, tool, machine,
923 						       needs_mmap, mmap_data)) {
924 				err = -1;
925 				break;
926 			}
927 		}
928 	}
929 	free(namespaces_event);
930 out_free_fork:
931 	free(fork_event);
932 out_free_mmap:
933 	free(mmap_event);
934 out_free_comm:
935 	free(comm_event);
936 out:
937 	return err;
938 }
939 
__perf_event__synthesize_threads(const struct perf_tool * tool,perf_event__handler_t process,struct machine * machine,bool needs_mmap,bool mmap_data,struct dirent ** dirent,int start,int num)940 static int __perf_event__synthesize_threads(const struct perf_tool *tool,
941 					    perf_event__handler_t process,
942 					    struct machine *machine,
943 					    bool needs_mmap,
944 					    bool mmap_data,
945 					    struct dirent **dirent,
946 					    int start,
947 					    int num)
948 {
949 	union perf_event *comm_event, *mmap_event, *fork_event;
950 	union perf_event *namespaces_event;
951 	int err = -1;
952 	char *end;
953 	pid_t pid;
954 	int i;
955 
956 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
957 	if (comm_event == NULL)
958 		goto out;
959 
960 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
961 	if (mmap_event == NULL)
962 		goto out_free_comm;
963 
964 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
965 	if (fork_event == NULL)
966 		goto out_free_mmap;
967 
968 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
969 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
970 				  machine->id_hdr_size);
971 	if (namespaces_event == NULL)
972 		goto out_free_fork;
973 
974 	for (i = start; i < start + num; i++) {
975 		if (!isdigit(dirent[i]->d_name[0]))
976 			continue;
977 
978 		pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
979 		/* only interested in proper numerical dirents */
980 		if (*end)
981 			continue;
982 		/*
983 		 * We may race with exiting thread, so don't stop just because
984 		 * one thread couldn't be synthesized.
985 		 */
986 		__event__synthesize_thread(comm_event, mmap_event, fork_event,
987 					   namespaces_event, pid, 1, process,
988 					   tool, machine, needs_mmap, mmap_data);
989 	}
990 	err = 0;
991 
992 	free(namespaces_event);
993 out_free_fork:
994 	free(fork_event);
995 out_free_mmap:
996 	free(mmap_event);
997 out_free_comm:
998 	free(comm_event);
999 out:
1000 	return err;
1001 }
1002 
1003 struct synthesize_threads_arg {
1004 	const struct perf_tool *tool;
1005 	perf_event__handler_t process;
1006 	struct machine *machine;
1007 	bool needs_mmap;
1008 	bool mmap_data;
1009 	struct dirent **dirent;
1010 	int num;
1011 	int start;
1012 };
1013 
synthesize_threads_worker(void * arg)1014 static void *synthesize_threads_worker(void *arg)
1015 {
1016 	struct synthesize_threads_arg *args = arg;
1017 
1018 	__perf_event__synthesize_threads(args->tool, args->process,
1019 					 args->machine,
1020 					 args->needs_mmap, args->mmap_data,
1021 					 args->dirent,
1022 					 args->start, args->num);
1023 	return NULL;
1024 }
1025 
perf_event__synthesize_threads(const struct perf_tool * tool,perf_event__handler_t process,struct machine * machine,bool needs_mmap,bool mmap_data,unsigned int nr_threads_synthesize)1026 int perf_event__synthesize_threads(const struct perf_tool *tool,
1027 				   perf_event__handler_t process,
1028 				   struct machine *machine,
1029 				   bool needs_mmap, bool mmap_data,
1030 				   unsigned int nr_threads_synthesize)
1031 {
1032 	struct synthesize_threads_arg *args = NULL;
1033 	pthread_t *synthesize_threads = NULL;
1034 	char proc_path[PATH_MAX];
1035 	struct dirent **dirent;
1036 	int num_per_thread;
1037 	int m, n, i, j;
1038 	int thread_nr;
1039 	int base = 0;
1040 	int err = -1;
1041 
1042 
1043 	if (machine__is_default_guest(machine))
1044 		return 0;
1045 
1046 	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
1047 	n = scandir(proc_path, &dirent, filter_task, NULL);
1048 	if (n < 0)
1049 		return err;
1050 
1051 	if (nr_threads_synthesize == UINT_MAX)
1052 		thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
1053 	else
1054 		thread_nr = nr_threads_synthesize;
1055 
1056 	if (thread_nr <= 1) {
1057 		err = __perf_event__synthesize_threads(tool, process,
1058 						       machine,
1059 						       needs_mmap, mmap_data,
1060 						       dirent, base, n);
1061 		goto free_dirent;
1062 	}
1063 	if (thread_nr > n)
1064 		thread_nr = n;
1065 
1066 	synthesize_threads = calloc(thread_nr, sizeof(pthread_t));
1067 	if (synthesize_threads == NULL)
1068 		goto free_dirent;
1069 
1070 	args = calloc(thread_nr, sizeof(*args));
1071 	if (args == NULL)
1072 		goto free_threads;
1073 
1074 	num_per_thread = n / thread_nr;
1075 	m = n % thread_nr;
1076 	for (i = 0; i < thread_nr; i++) {
1077 		args[i].tool = tool;
1078 		args[i].process = process;
1079 		args[i].machine = machine;
1080 		args[i].needs_mmap = needs_mmap;
1081 		args[i].mmap_data = mmap_data;
1082 		args[i].dirent = dirent;
1083 	}
1084 	for (i = 0; i < m; i++) {
1085 		args[i].num = num_per_thread + 1;
1086 		args[i].start = i * args[i].num;
1087 	}
1088 	if (i != 0)
1089 		base = args[i-1].start + args[i-1].num;
1090 	for (j = i; j < thread_nr; j++) {
1091 		args[j].num = num_per_thread;
1092 		args[j].start = base + (j - i) * args[i].num;
1093 	}
1094 
1095 	for (i = 0; i < thread_nr; i++) {
1096 		if (pthread_create(&synthesize_threads[i], NULL,
1097 				   synthesize_threads_worker, &args[i]))
1098 			goto out_join;
1099 	}
1100 	err = 0;
1101 out_join:
1102 	for (i = 0; i < thread_nr; i++)
1103 		pthread_join(synthesize_threads[i], NULL);
1104 	free(args);
1105 free_threads:
1106 	free(synthesize_threads);
1107 free_dirent:
1108 	for (i = 0; i < n; i++)
1109 		zfree(&dirent[i]);
1110 	free(dirent);
1111 
1112 	return err;
1113 }
1114 
perf_event__synthesize_extra_kmaps(const struct perf_tool * tool __maybe_unused,perf_event__handler_t process __maybe_unused,struct machine * machine __maybe_unused)1115 int __weak perf_event__synthesize_extra_kmaps(const struct perf_tool *tool __maybe_unused,
1116 					      perf_event__handler_t process __maybe_unused,
1117 					      struct machine *machine __maybe_unused)
1118 {
1119 	return 0;
1120 }
1121 
__perf_event__synthesize_kernel_mmap(const struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)1122 static int __perf_event__synthesize_kernel_mmap(const struct perf_tool *tool,
1123 						perf_event__handler_t process,
1124 						struct machine *machine)
1125 {
1126 	union perf_event *event;
1127 	size_t size = symbol_conf.no_buildid_mmap2 ?
1128 			sizeof(event->mmap) : sizeof(event->mmap2);
1129 	struct map *map = machine__kernel_map(machine);
1130 	struct kmap *kmap;
1131 	int err;
1132 
1133 	if (map == NULL)
1134 		return -1;
1135 
1136 	kmap = map__kmap(map);
1137 	if (!kmap->ref_reloc_sym)
1138 		return -1;
1139 
1140 	/*
1141 	 * We should get this from /sys/kernel/sections/.text, but till that is
1142 	 * available use this, and after it is use this as a fallback for older
1143 	 * kernels.
1144 	 */
1145 	event = zalloc(size + machine->id_hdr_size);
1146 	if (event == NULL) {
1147 		pr_debug("Not enough memory synthesizing mmap event "
1148 			 "for kernel modules\n");
1149 		return -1;
1150 	}
1151 
1152 	if (machine__is_host(machine)) {
1153 		/*
1154 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
1155 		 * see kernel/perf_event.c __perf_event_mmap
1156 		 */
1157 		event->header.misc = PERF_RECORD_MISC_KERNEL;
1158 	} else {
1159 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1160 	}
1161 
1162 	if (!symbol_conf.no_buildid_mmap2) {
1163 		size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
1164 				"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1165 		size = PERF_ALIGN(size, sizeof(u64));
1166 		event->mmap2.header.type = PERF_RECORD_MMAP2;
1167 		event->mmap2.header.size = (sizeof(event->mmap2) -
1168 				(sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
1169 		event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
1170 		event->mmap2.start = map__start(map);
1171 		event->mmap2.len   = map__end(map) - event->mmap.start;
1172 		event->mmap2.pid   = machine->pid;
1173 
1174 		perf_record_mmap2__read_build_id(&event->mmap2, machine, true);
1175 	} else {
1176 		size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1177 				"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1178 		size = PERF_ALIGN(size, sizeof(u64));
1179 		event->mmap.header.type = PERF_RECORD_MMAP;
1180 		event->mmap.header.size = (sizeof(event->mmap) -
1181 				(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1182 		event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1183 		event->mmap.start = map__start(map);
1184 		event->mmap.len   = map__end(map) - event->mmap.start;
1185 		event->mmap.pid   = machine->pid;
1186 	}
1187 
1188 	err = perf_tool__process_synth_event(tool, event, machine, process);
1189 	free(event);
1190 
1191 	return err;
1192 }
1193 
perf_event__synthesize_kernel_mmap(const struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)1194 int perf_event__synthesize_kernel_mmap(const struct perf_tool *tool,
1195 				       perf_event__handler_t process,
1196 				       struct machine *machine)
1197 {
1198 	int err;
1199 
1200 	err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1201 	if (err < 0)
1202 		return err;
1203 
1204 	return perf_event__synthesize_extra_kmaps(tool, process, machine);
1205 }
1206 
perf_event__synthesize_thread_map2(const struct perf_tool * tool,struct perf_thread_map * threads,perf_event__handler_t process,struct machine * machine)1207 int perf_event__synthesize_thread_map2(const struct perf_tool *tool,
1208 				      struct perf_thread_map *threads,
1209 				      perf_event__handler_t process,
1210 				      struct machine *machine)
1211 {
1212 	union perf_event *event;
1213 	int i, err, size;
1214 
1215 	size  = sizeof(event->thread_map);
1216 	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
1217 
1218 	event = zalloc(size);
1219 	if (!event)
1220 		return -ENOMEM;
1221 
1222 	event->header.type = PERF_RECORD_THREAD_MAP;
1223 	event->header.size = size;
1224 	event->thread_map.nr = threads->nr;
1225 
1226 	for (i = 0; i < threads->nr; i++) {
1227 		struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1228 		char *comm = perf_thread_map__comm(threads, i);
1229 
1230 		if (!comm)
1231 			comm = (char *) "";
1232 
1233 		entry->pid = perf_thread_map__pid(threads, i);
1234 		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1235 	}
1236 
1237 	err = process(tool, event, NULL, machine);
1238 
1239 	free(event);
1240 	return err;
1241 }
1242 
1243 struct synthesize_cpu_map_data {
1244 	const struct perf_cpu_map *map;
1245 	int nr;
1246 	int min_cpu;
1247 	int max_cpu;
1248 	int has_any_cpu;
1249 	int type;
1250 	size_t size;
1251 	struct perf_record_cpu_map_data *data;
1252 };
1253 
synthesize_cpus(struct synthesize_cpu_map_data * data)1254 static void synthesize_cpus(struct synthesize_cpu_map_data *data)
1255 {
1256 	data->data->type = PERF_CPU_MAP__CPUS;
1257 	data->data->cpus_data.nr = data->nr;
1258 	for (int i = 0; i < data->nr; i++)
1259 		data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu;
1260 }
1261 
synthesize_mask(struct synthesize_cpu_map_data * data)1262 static void synthesize_mask(struct synthesize_cpu_map_data *data)
1263 {
1264 	int idx;
1265 	struct perf_cpu cpu;
1266 
1267 	/* Due to padding, the 4bytes per entry mask variant is always smaller. */
1268 	data->data->type = PERF_CPU_MAP__MASK;
1269 	data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu);
1270 	data->data->mask32_data.long_size = 4;
1271 
1272 	perf_cpu_map__for_each_cpu(cpu, idx, data->map) {
1273 		int bit_word = cpu.cpu / 32;
1274 		u32 bit_mask = 1U << (cpu.cpu & 31);
1275 
1276 		data->data->mask32_data.mask[bit_word] |= bit_mask;
1277 	}
1278 }
1279 
synthesize_range_cpus(struct synthesize_cpu_map_data * data)1280 static void synthesize_range_cpus(struct synthesize_cpu_map_data *data)
1281 {
1282 	data->data->type = PERF_CPU_MAP__RANGE_CPUS;
1283 	data->data->range_cpu_data.any_cpu = data->has_any_cpu;
1284 	data->data->range_cpu_data.start_cpu = data->min_cpu;
1285 	data->data->range_cpu_data.end_cpu = data->max_cpu;
1286 }
1287 
cpu_map_data__alloc(struct synthesize_cpu_map_data * syn_data,size_t header_size)1288 static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data,
1289 				 size_t header_size)
1290 {
1291 	size_t size_cpus, size_mask;
1292 
1293 	syn_data->nr = perf_cpu_map__nr(syn_data->map);
1294 	syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0;
1295 
1296 	syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu;
1297 	syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu;
1298 	if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) {
1299 		/* A consecutive range of CPUs can be encoded using a range. */
1300 		assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64));
1301 		syn_data->type = PERF_CPU_MAP__RANGE_CPUS;
1302 		syn_data->size = header_size + sizeof(u64);
1303 		return zalloc(syn_data->size);
1304 	}
1305 
1306 	size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16);
1307 	/* Due to padding, the 4bytes per entry mask variant is always smaller. */
1308 	size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) +
1309 		BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32);
1310 	if (syn_data->has_any_cpu || size_cpus < size_mask) {
1311 		/* Follow the CPU map encoding. */
1312 		syn_data->type = PERF_CPU_MAP__CPUS;
1313 		syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64));
1314 		return zalloc(syn_data->size);
1315 	}
1316 	/* Encode using a bitmask. */
1317 	syn_data->type = PERF_CPU_MAP__MASK;
1318 	syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64));
1319 	return zalloc(syn_data->size);
1320 }
1321 
cpu_map_data__synthesize(struct synthesize_cpu_map_data * data)1322 static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data)
1323 {
1324 	switch (data->type) {
1325 	case PERF_CPU_MAP__CPUS:
1326 		synthesize_cpus(data);
1327 		break;
1328 	case PERF_CPU_MAP__MASK:
1329 		synthesize_mask(data);
1330 		break;
1331 	case PERF_CPU_MAP__RANGE_CPUS:
1332 		synthesize_range_cpus(data);
1333 		break;
1334 	default:
1335 		break;
1336 	}
1337 }
1338 
cpu_map_event__new(const struct perf_cpu_map * map)1339 static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
1340 {
1341 	struct synthesize_cpu_map_data syn_data = { .map = map };
1342 	struct perf_record_cpu_map *event;
1343 
1344 
1345 	event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header));
1346 	if (!event)
1347 		return NULL;
1348 
1349 	syn_data.data = &event->data;
1350 	event->header.type = PERF_RECORD_CPU_MAP;
1351 	event->header.size = syn_data.size;
1352 	cpu_map_data__synthesize(&syn_data);
1353 	return event;
1354 }
1355 
1356 
perf_event__synthesize_cpu_map(const struct perf_tool * tool,const struct perf_cpu_map * map,perf_event__handler_t process,struct machine * machine)1357 int perf_event__synthesize_cpu_map(const struct perf_tool *tool,
1358 				   const struct perf_cpu_map *map,
1359 				   perf_event__handler_t process,
1360 				   struct machine *machine)
1361 {
1362 	struct perf_record_cpu_map *event;
1363 	int err;
1364 
1365 	event = cpu_map_event__new(map);
1366 	if (!event)
1367 		return -ENOMEM;
1368 
1369 	err = process(tool, (union perf_event *) event, NULL, machine);
1370 
1371 	free(event);
1372 	return err;
1373 }
1374 
perf_event__synthesize_stat_config(const struct perf_tool * tool,struct perf_stat_config * config,perf_event__handler_t process,struct machine * machine)1375 int perf_event__synthesize_stat_config(const struct perf_tool *tool,
1376 				       struct perf_stat_config *config,
1377 				       perf_event__handler_t process,
1378 				       struct machine *machine)
1379 {
1380 	struct perf_record_stat_config *event;
1381 	int size, i = 0, err;
1382 
1383 	size  = sizeof(*event);
1384 	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1385 
1386 	event = zalloc(size);
1387 	if (!event)
1388 		return -ENOMEM;
1389 
1390 	event->header.type = PERF_RECORD_STAT_CONFIG;
1391 	event->header.size = size;
1392 	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1393 
1394 #define ADD(__term, __val)					\
1395 	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
1396 	event->data[i].val = __val;				\
1397 	i++;
1398 
1399 	ADD(AGGR_MODE,	config->aggr_mode)
1400 	ADD(INTERVAL,	config->interval)
1401 	ADD(SCALE,	config->scale)
1402 	ADD(AGGR_LEVEL,	config->aggr_level)
1403 
1404 	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1405 		  "stat config terms unbalanced\n");
1406 #undef ADD
1407 
1408 	err = process(tool, (union perf_event *) event, NULL, machine);
1409 
1410 	free(event);
1411 	return err;
1412 }
1413 
perf_event__synthesize_stat(const struct perf_tool * tool,struct perf_cpu cpu,u32 thread,u64 id,struct perf_counts_values * count,perf_event__handler_t process,struct machine * machine)1414 int perf_event__synthesize_stat(const struct perf_tool *tool,
1415 				struct perf_cpu cpu, u32 thread, u64 id,
1416 				struct perf_counts_values *count,
1417 				perf_event__handler_t process,
1418 				struct machine *machine)
1419 {
1420 	struct perf_record_stat event;
1421 
1422 	event.header.type = PERF_RECORD_STAT;
1423 	event.header.size = sizeof(event);
1424 	event.header.misc = 0;
1425 
1426 	event.id        = id;
1427 	event.cpu       = cpu.cpu;
1428 	event.thread    = thread;
1429 	event.val       = count->val;
1430 	event.ena       = count->ena;
1431 	event.run       = count->run;
1432 
1433 	return process(tool, (union perf_event *) &event, NULL, machine);
1434 }
1435 
perf_event__synthesize_stat_round(const struct perf_tool * tool,u64 evtime,u64 type,perf_event__handler_t process,struct machine * machine)1436 int perf_event__synthesize_stat_round(const struct perf_tool *tool,
1437 				      u64 evtime, u64 type,
1438 				      perf_event__handler_t process,
1439 				      struct machine *machine)
1440 {
1441 	struct perf_record_stat_round event;
1442 
1443 	event.header.type = PERF_RECORD_STAT_ROUND;
1444 	event.header.size = sizeof(event);
1445 	event.header.misc = 0;
1446 
1447 	event.time = evtime;
1448 	event.type = type;
1449 
1450 	return process(tool, (union perf_event *) &event, NULL, machine);
1451 }
1452 
perf_event__sample_event_size(const struct perf_sample * sample,u64 type,u64 read_format)1453 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1454 {
1455 	size_t sz, result = sizeof(struct perf_record_sample);
1456 
1457 	if (type & PERF_SAMPLE_IDENTIFIER)
1458 		result += sizeof(u64);
1459 
1460 	if (type & PERF_SAMPLE_IP)
1461 		result += sizeof(u64);
1462 
1463 	if (type & PERF_SAMPLE_TID)
1464 		result += sizeof(u64);
1465 
1466 	if (type & PERF_SAMPLE_TIME)
1467 		result += sizeof(u64);
1468 
1469 	if (type & PERF_SAMPLE_ADDR)
1470 		result += sizeof(u64);
1471 
1472 	if (type & PERF_SAMPLE_ID)
1473 		result += sizeof(u64);
1474 
1475 	if (type & PERF_SAMPLE_STREAM_ID)
1476 		result += sizeof(u64);
1477 
1478 	if (type & PERF_SAMPLE_CPU)
1479 		result += sizeof(u64);
1480 
1481 	if (type & PERF_SAMPLE_PERIOD)
1482 		result += sizeof(u64);
1483 
1484 	if (type & PERF_SAMPLE_READ) {
1485 		result += sizeof(u64);
1486 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1487 			result += sizeof(u64);
1488 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1489 			result += sizeof(u64);
1490 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1491 		if (read_format & PERF_FORMAT_GROUP) {
1492 			sz = sample_read_value_size(read_format);
1493 			result += sz * sample->read.group.nr;
1494 		} else {
1495 			result += sizeof(u64);
1496 			if (read_format & PERF_FORMAT_LOST)
1497 				result += sizeof(u64);
1498 		}
1499 	}
1500 
1501 	if (type & PERF_SAMPLE_CALLCHAIN) {
1502 		sz = (sample->callchain->nr + 1) * sizeof(u64);
1503 		result += sz;
1504 	}
1505 
1506 	if (type & PERF_SAMPLE_RAW) {
1507 		result += sizeof(u32);
1508 		result += sample->raw_size;
1509 	}
1510 
1511 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1512 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1513 		/* nr, hw_idx */
1514 		sz += 2 * sizeof(u64);
1515 		result += sz;
1516 	}
1517 
1518 	if (type & PERF_SAMPLE_REGS_USER) {
1519 		if (sample->user_regs && sample->user_regs->abi) {
1520 			result += sizeof(u64);
1521 			sz = hweight64(sample->user_regs->mask) * sizeof(u64);
1522 			result += sz;
1523 		} else {
1524 			result += sizeof(u64);
1525 		}
1526 	}
1527 
1528 	if (type & PERF_SAMPLE_STACK_USER) {
1529 		sz = sample->user_stack.size;
1530 		result += sizeof(u64);
1531 		if (sz) {
1532 			result += sz;
1533 			result += sizeof(u64);
1534 		}
1535 	}
1536 
1537 	if (type & PERF_SAMPLE_WEIGHT_TYPE)
1538 		result += sizeof(u64);
1539 
1540 	if (type & PERF_SAMPLE_DATA_SRC)
1541 		result += sizeof(u64);
1542 
1543 	if (type & PERF_SAMPLE_TRANSACTION)
1544 		result += sizeof(u64);
1545 
1546 	if (type & PERF_SAMPLE_REGS_INTR) {
1547 		if (sample->intr_regs && sample->intr_regs->abi) {
1548 			result += sizeof(u64);
1549 			sz = hweight64(sample->intr_regs->mask) * sizeof(u64);
1550 			result += sz;
1551 		} else {
1552 			result += sizeof(u64);
1553 		}
1554 	}
1555 
1556 	if (type & PERF_SAMPLE_PHYS_ADDR)
1557 		result += sizeof(u64);
1558 
1559 	if (type & PERF_SAMPLE_CGROUP)
1560 		result += sizeof(u64);
1561 
1562 	if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
1563 		result += sizeof(u64);
1564 
1565 	if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
1566 		result += sizeof(u64);
1567 
1568 	if (type & PERF_SAMPLE_AUX) {
1569 		result += sizeof(u64);
1570 		result += sample->aux_sample.size;
1571 	}
1572 
1573 	return result;
1574 }
1575 
perf_synthesize_sample_weight(const struct perf_sample * data,__u64 * array,u64 type __maybe_unused)1576 static void perf_synthesize_sample_weight(const struct perf_sample *data,
1577 					       __u64 *array, u64 type __maybe_unused)
1578 {
1579 	*array = data->weight;
1580 
1581 	if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
1582 		*array &= 0xffffffff;
1583 		*array |= ((u64)data->ins_lat << 32);
1584 		*array |= ((u64)data->weight3 << 48);
1585 	}
1586 }
1587 
copy_read_group_values(__u64 * array,__u64 read_format,const struct perf_sample * sample)1588 static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
1589 				     const struct perf_sample *sample)
1590 {
1591 	size_t sz = sample_read_value_size(read_format);
1592 	struct sample_read_value *v = sample->read.group.values;
1593 
1594 	sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1595 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1596 		memcpy(array, v, sz);
1597 		array = (void *)array + sz;
1598 	}
1599 	return array;
1600 }
1601 
perf_event__synthesize_sample(union perf_event * event,u64 type,u64 read_format,const struct perf_sample * sample)1602 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1603 				  const struct perf_sample *sample)
1604 {
1605 	__u64 *array;
1606 	size_t sz;
1607 	/*
1608 	 * used for cross-endian analysis. See git commit 65014ab3
1609 	 * for why this goofiness is needed.
1610 	 */
1611 	union u64_swap u;
1612 
1613 	array = event->sample.array;
1614 
1615 	if (type & PERF_SAMPLE_IDENTIFIER) {
1616 		*array = sample->id;
1617 		array++;
1618 	}
1619 
1620 	if (type & PERF_SAMPLE_IP) {
1621 		*array = sample->ip;
1622 		array++;
1623 	}
1624 
1625 	if (type & PERF_SAMPLE_TID) {
1626 		u.val32[0] = sample->pid;
1627 		u.val32[1] = sample->tid;
1628 		*array = u.val64;
1629 		array++;
1630 	}
1631 
1632 	if (type & PERF_SAMPLE_TIME) {
1633 		*array = sample->time;
1634 		array++;
1635 	}
1636 
1637 	if (type & PERF_SAMPLE_ADDR) {
1638 		*array = sample->addr;
1639 		array++;
1640 	}
1641 
1642 	if (type & PERF_SAMPLE_ID) {
1643 		*array = sample->id;
1644 		array++;
1645 	}
1646 
1647 	if (type & PERF_SAMPLE_STREAM_ID) {
1648 		*array = sample->stream_id;
1649 		array++;
1650 	}
1651 
1652 	if (type & PERF_SAMPLE_CPU) {
1653 		u.val32[0] = sample->cpu;
1654 		u.val32[1] = 0;
1655 		*array = u.val64;
1656 		array++;
1657 	}
1658 
1659 	if (type & PERF_SAMPLE_PERIOD) {
1660 		*array = sample->period;
1661 		array++;
1662 	}
1663 
1664 	if (type & PERF_SAMPLE_READ) {
1665 		if (read_format & PERF_FORMAT_GROUP)
1666 			*array = sample->read.group.nr;
1667 		else
1668 			*array = sample->read.one.value;
1669 		array++;
1670 
1671 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1672 			*array = sample->read.time_enabled;
1673 			array++;
1674 		}
1675 
1676 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1677 			*array = sample->read.time_running;
1678 			array++;
1679 		}
1680 
1681 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1682 		if (read_format & PERF_FORMAT_GROUP) {
1683 			array = copy_read_group_values(array, read_format,
1684 						       sample);
1685 		} else {
1686 			*array = sample->read.one.id;
1687 			array++;
1688 
1689 			if (read_format & PERF_FORMAT_LOST) {
1690 				*array = sample->read.one.lost;
1691 				array++;
1692 			}
1693 		}
1694 	}
1695 
1696 	if (type & PERF_SAMPLE_CALLCHAIN) {
1697 		sz = (sample->callchain->nr + 1) * sizeof(u64);
1698 		memcpy(array, sample->callchain, sz);
1699 		array = (void *)array + sz;
1700 	}
1701 
1702 	if (type & PERF_SAMPLE_RAW) {
1703 		u32 *array32 = (void *)array;
1704 
1705 		*array32 = sample->raw_size;
1706 		array32++;
1707 
1708 		memcpy(array32, sample->raw_data, sample->raw_size);
1709 		array = (void *)(array32 + (sample->raw_size / sizeof(u32)));
1710 
1711 		/* make sure the array is 64-bit aligned */
1712 		BUG_ON(((long)array) % sizeof(u64));
1713 	}
1714 
1715 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1716 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1717 		/* nr, hw_idx */
1718 		sz += 2 * sizeof(u64);
1719 		memcpy(array, sample->branch_stack, sz);
1720 		array = (void *)array + sz;
1721 	}
1722 
1723 	if (type & PERF_SAMPLE_REGS_USER) {
1724 		if (sample->user_regs && sample->user_regs->abi) {
1725 			*array++ = sample->user_regs->abi;
1726 			sz = hweight64(sample->user_regs->mask) * sizeof(u64);
1727 			memcpy(array, sample->user_regs->regs, sz);
1728 			array = (void *)array + sz;
1729 		} else {
1730 			*array++ = 0;
1731 		}
1732 	}
1733 
1734 	if (type & PERF_SAMPLE_STACK_USER) {
1735 		sz = sample->user_stack.size;
1736 		*array++ = sz;
1737 		if (sz) {
1738 			memcpy(array, sample->user_stack.data, sz);
1739 			array = (void *)array + sz;
1740 			*array++ = sz;
1741 		}
1742 	}
1743 
1744 	if (type & PERF_SAMPLE_WEIGHT_TYPE) {
1745 		perf_synthesize_sample_weight(sample, array, type);
1746 		array++;
1747 	}
1748 
1749 	if (type & PERF_SAMPLE_DATA_SRC) {
1750 		*array = sample->data_src;
1751 		array++;
1752 	}
1753 
1754 	if (type & PERF_SAMPLE_TRANSACTION) {
1755 		*array = sample->transaction;
1756 		array++;
1757 	}
1758 
1759 	if (type & PERF_SAMPLE_REGS_INTR) {
1760 		if (sample->intr_regs && sample->intr_regs->abi) {
1761 			*array++ = sample->intr_regs->abi;
1762 			sz = hweight64(sample->intr_regs->mask) * sizeof(u64);
1763 			memcpy(array, sample->intr_regs->regs, sz);
1764 			array = (void *)array + sz;
1765 		} else {
1766 			*array++ = 0;
1767 		}
1768 	}
1769 
1770 	if (type & PERF_SAMPLE_PHYS_ADDR) {
1771 		*array = sample->phys_addr;
1772 		array++;
1773 	}
1774 
1775 	if (type & PERF_SAMPLE_CGROUP) {
1776 		*array = sample->cgroup;
1777 		array++;
1778 	}
1779 
1780 	if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
1781 		*array = sample->data_page_size;
1782 		array++;
1783 	}
1784 
1785 	if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
1786 		*array = sample->code_page_size;
1787 		array++;
1788 	}
1789 
1790 	if (type & PERF_SAMPLE_AUX) {
1791 		sz = sample->aux_sample.size;
1792 		*array++ = sz;
1793 		memcpy(array, sample->aux_sample.data, sz);
1794 		array = (void *)array + sz;
1795 	}
1796 
1797 	return 0;
1798 }
1799 
perf_event__synthesize_id_sample(__u64 * array,u64 type,const struct perf_sample * sample)1800 int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample)
1801 {
1802 	__u64 *start = array;
1803 
1804 	/*
1805 	 * used for cross-endian analysis. See git commit 65014ab3
1806 	 * for why this goofiness is needed.
1807 	 */
1808 	union u64_swap u;
1809 
1810 	if (type & PERF_SAMPLE_TID) {
1811 		u.val32[0] = sample->pid;
1812 		u.val32[1] = sample->tid;
1813 		*array = u.val64;
1814 		array++;
1815 	}
1816 
1817 	if (type & PERF_SAMPLE_TIME) {
1818 		*array = sample->time;
1819 		array++;
1820 	}
1821 
1822 	if (type & PERF_SAMPLE_ID) {
1823 		*array = sample->id;
1824 		array++;
1825 	}
1826 
1827 	if (type & PERF_SAMPLE_STREAM_ID) {
1828 		*array = sample->stream_id;
1829 		array++;
1830 	}
1831 
1832 	if (type & PERF_SAMPLE_CPU) {
1833 		u.val32[0] = sample->cpu;
1834 		u.val32[1] = 0;
1835 		*array = u.val64;
1836 		array++;
1837 	}
1838 
1839 	if (type & PERF_SAMPLE_IDENTIFIER) {
1840 		*array = sample->id;
1841 		array++;
1842 	}
1843 
1844 	return (void *)array - (void *)start;
1845 }
1846 
__perf_event__synthesize_id_index(const struct perf_tool * tool,perf_event__handler_t process,struct evlist * evlist,struct machine * machine,size_t from)1847 int __perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process,
1848 				      struct evlist *evlist, struct machine *machine, size_t from)
1849 {
1850 	union perf_event *ev;
1851 	struct evsel *evsel;
1852 	size_t nr = 0, i = 0, sz, max_nr, n, pos;
1853 	size_t e1_sz = sizeof(struct id_index_entry);
1854 	size_t e2_sz = sizeof(struct id_index_entry_2);
1855 	size_t etot_sz = e1_sz + e2_sz;
1856 	bool e2_needed = false;
1857 	int err;
1858 
1859 	max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz;
1860 
1861 	pos = 0;
1862 	evlist__for_each_entry(evlist, evsel) {
1863 		if (pos++ < from)
1864 			continue;
1865 		nr += evsel->core.ids;
1866 	}
1867 
1868 	if (!nr)
1869 		return 0;
1870 
1871 	pr_debug2("Synthesizing id index\n");
1872 
1873 	n = nr > max_nr ? max_nr : nr;
1874 	sz = sizeof(struct perf_record_id_index) + n * etot_sz;
1875 	ev = zalloc(sz);
1876 	if (!ev)
1877 		return -ENOMEM;
1878 
1879 	sz = sizeof(struct perf_record_id_index) + n * e1_sz;
1880 
1881 	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1882 	ev->id_index.nr = n;
1883 
1884 	pos = 0;
1885 	evlist__for_each_entry(evlist, evsel) {
1886 		u32 j;
1887 
1888 		if (pos++ < from)
1889 			continue;
1890 		for (j = 0; j < evsel->core.ids; j++, i++) {
1891 			struct id_index_entry *e;
1892 			struct id_index_entry_2 *e2;
1893 			struct perf_sample_id *sid;
1894 
1895 			if (i >= n) {
1896 				ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0);
1897 				err = process(tool, ev, NULL, machine);
1898 				if (err)
1899 					goto out_err;
1900 				nr -= n;
1901 				i = 0;
1902 				e2_needed = false;
1903 			}
1904 
1905 			e = &ev->id_index.entries[i];
1906 
1907 			e->id = evsel->core.id[j];
1908 
1909 			sid = evlist__id2sid(evlist, e->id);
1910 			if (!sid) {
1911 				free(ev);
1912 				return -ENOENT;
1913 			}
1914 
1915 			e->idx = sid->idx;
1916 			e->cpu = sid->cpu.cpu;
1917 			e->tid = sid->tid;
1918 
1919 			if (sid->machine_pid)
1920 				e2_needed = true;
1921 
1922 			e2 = (void *)ev + sz;
1923 			e2[i].machine_pid = sid->machine_pid;
1924 			e2[i].vcpu        = sid->vcpu.cpu;
1925 		}
1926 	}
1927 
1928 	sz = sizeof(struct perf_record_id_index) + nr * e1_sz;
1929 	ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0);
1930 	ev->id_index.nr = nr;
1931 
1932 	err = process(tool, ev, NULL, machine);
1933 out_err:
1934 	free(ev);
1935 
1936 	return err;
1937 }
1938 
perf_event__synthesize_id_index(const struct perf_tool * tool,perf_event__handler_t process,struct evlist * evlist,struct machine * machine)1939 int perf_event__synthesize_id_index(const struct perf_tool *tool, perf_event__handler_t process,
1940 				    struct evlist *evlist, struct machine *machine)
1941 {
1942 	return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
1943 }
1944 
__machine__synthesize_threads(struct machine * machine,const struct perf_tool * tool,struct target * target,struct perf_thread_map * threads,perf_event__handler_t process,bool needs_mmap,bool data_mmap,unsigned int nr_threads_synthesize)1945 int __machine__synthesize_threads(struct machine *machine, const struct perf_tool *tool,
1946 				  struct target *target, struct perf_thread_map *threads,
1947 				  perf_event__handler_t process, bool needs_mmap,
1948 				  bool data_mmap, unsigned int nr_threads_synthesize)
1949 {
1950 	/*
1951 	 * When perf runs in non-root PID namespace, and the namespace's proc FS
1952 	 * is not mounted, nsinfo__is_in_root_namespace() returns false.
1953 	 * In this case, the proc FS is coming for the parent namespace, thus
1954 	 * perf tool will wrongly gather process info from its parent PID
1955 	 * namespace.
1956 	 *
1957 	 * To avoid the confusion that the perf tool runs in a child PID
1958 	 * namespace but it synthesizes thread info from its parent PID
1959 	 * namespace, returns failure with warning.
1960 	 */
1961 	if (!nsinfo__is_in_root_namespace()) {
1962 		pr_err("Perf runs in non-root PID namespace but it tries to ");
1963 		pr_err("gather process info from its parent PID namespace.\n");
1964 		pr_err("Please mount the proc file system properly, e.g. ");
1965 		pr_err("add the option '--mount-proc' for unshare command.\n");
1966 		return -EPERM;
1967 	}
1968 
1969 	if (target__has_task(target))
1970 		return perf_event__synthesize_thread_map(tool, threads, process, machine,
1971 							 needs_mmap, data_mmap);
1972 	else if (target__has_cpu(target))
1973 		return perf_event__synthesize_threads(tool, process, machine,
1974 						      needs_mmap, data_mmap,
1975 						      nr_threads_synthesize);
1976 	/* command specified */
1977 	return 0;
1978 }
1979 
machine__synthesize_threads(struct machine * machine,struct target * target,struct perf_thread_map * threads,bool needs_mmap,bool data_mmap,unsigned int nr_threads_synthesize)1980 int machine__synthesize_threads(struct machine *machine, struct target *target,
1981 				struct perf_thread_map *threads, bool needs_mmap,
1982 				bool data_mmap, unsigned int nr_threads_synthesize)
1983 {
1984 	return __machine__synthesize_threads(machine, NULL, target, threads,
1985 					     perf_event__process, needs_mmap,
1986 					     data_mmap, nr_threads_synthesize);
1987 }
1988 
event_update_event__new(size_t size,u64 type,u64 id)1989 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1990 {
1991 	struct perf_record_event_update *ev;
1992 
1993 	size += sizeof(*ev);
1994 	size  = PERF_ALIGN(size, sizeof(u64));
1995 
1996 	ev = zalloc(size);
1997 	if (ev) {
1998 		ev->header.type = PERF_RECORD_EVENT_UPDATE;
1999 		ev->header.size = (u16)size;
2000 		ev->type	= type;
2001 		ev->id		= id;
2002 	}
2003 	return ev;
2004 }
2005 
perf_event__synthesize_event_update_unit(const struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)2006 int perf_event__synthesize_event_update_unit(const struct perf_tool *tool, struct evsel *evsel,
2007 					     perf_event__handler_t process)
2008 {
2009 	size_t size = strlen(evsel->unit);
2010 	struct perf_record_event_update *ev;
2011 	int err;
2012 
2013 	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
2014 	if (ev == NULL)
2015 		return -ENOMEM;
2016 
2017 	strlcpy(ev->unit, evsel->unit, size + 1);
2018 	err = process(tool, (union perf_event *)ev, NULL, NULL);
2019 	free(ev);
2020 	return err;
2021 }
2022 
perf_event__synthesize_event_update_scale(const struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)2023 int perf_event__synthesize_event_update_scale(const struct perf_tool *tool, struct evsel *evsel,
2024 					      perf_event__handler_t process)
2025 {
2026 	struct perf_record_event_update *ev;
2027 	struct perf_record_event_update_scale *ev_data;
2028 	int err;
2029 
2030 	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
2031 	if (ev == NULL)
2032 		return -ENOMEM;
2033 
2034 	ev->scale.scale = evsel->scale;
2035 	err = process(tool, (union perf_event *)ev, NULL, NULL);
2036 	free(ev);
2037 	return err;
2038 }
2039 
perf_event__synthesize_event_update_name(const struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)2040 int perf_event__synthesize_event_update_name(const struct perf_tool *tool, struct evsel *evsel,
2041 					     perf_event__handler_t process)
2042 {
2043 	struct perf_record_event_update *ev;
2044 	size_t len = strlen(evsel__name(evsel));
2045 	int err;
2046 
2047 	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
2048 	if (ev == NULL)
2049 		return -ENOMEM;
2050 
2051 	strlcpy(ev->name, evsel->name, len + 1);
2052 	err = process(tool, (union perf_event *)ev, NULL, NULL);
2053 	free(ev);
2054 	return err;
2055 }
2056 
perf_event__synthesize_event_update_cpus(const struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)2057 int perf_event__synthesize_event_update_cpus(const struct perf_tool *tool, struct evsel *evsel,
2058 					     perf_event__handler_t process)
2059 {
2060 	struct synthesize_cpu_map_data syn_data = { .map = evsel->core.pmu_cpus };
2061 	struct perf_record_event_update *ev;
2062 	int err;
2063 
2064 	ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64));
2065 	if (!ev)
2066 		return -ENOMEM;
2067 
2068 	syn_data.data = &ev->cpus.cpus;
2069 	ev->header.type = PERF_RECORD_EVENT_UPDATE;
2070 	ev->header.size = (u16)syn_data.size;
2071 	ev->type	= PERF_EVENT_UPDATE__CPUS;
2072 	ev->id		= evsel->core.id[0];
2073 	cpu_map_data__synthesize(&syn_data);
2074 
2075 	err = process(tool, (union perf_event *)ev, NULL, NULL);
2076 	free(ev);
2077 	return err;
2078 }
2079 
perf_event__synthesize_attrs(const struct perf_tool * tool,struct evlist * evlist,perf_event__handler_t process)2080 int perf_event__synthesize_attrs(const struct perf_tool *tool, struct evlist *evlist,
2081 				 perf_event__handler_t process)
2082 {
2083 	struct evsel *evsel;
2084 	int err = 0;
2085 
2086 	evlist__for_each_entry(evlist, evsel) {
2087 		err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
2088 						  evsel->core.id, process);
2089 		if (err) {
2090 			pr_debug("failed to create perf header attribute\n");
2091 			return err;
2092 		}
2093 	}
2094 
2095 	return err;
2096 }
2097 
has_unit(struct evsel * evsel)2098 static bool has_unit(struct evsel *evsel)
2099 {
2100 	return evsel->unit && *evsel->unit;
2101 }
2102 
has_scale(struct evsel * evsel)2103 static bool has_scale(struct evsel *evsel)
2104 {
2105 	return evsel->scale != 1;
2106 }
2107 
perf_event__synthesize_extra_attr(const struct perf_tool * tool,struct evlist * evsel_list,perf_event__handler_t process,bool is_pipe)2108 int perf_event__synthesize_extra_attr(const struct perf_tool *tool, struct evlist *evsel_list,
2109 				      perf_event__handler_t process, bool is_pipe)
2110 {
2111 	struct evsel *evsel;
2112 	int err;
2113 
2114 	/*
2115 	 * Synthesize other events stuff not carried within
2116 	 * attr event - unit, scale, name
2117 	 */
2118 	evlist__for_each_entry(evsel_list, evsel) {
2119 		if (!evsel->supported)
2120 			continue;
2121 
2122 		/*
2123 		 * Synthesize unit and scale only if it's defined.
2124 		 */
2125 		if (has_unit(evsel)) {
2126 			err = perf_event__synthesize_event_update_unit(tool, evsel, process);
2127 			if (err < 0) {
2128 				pr_err("Couldn't synthesize evsel unit.\n");
2129 				return err;
2130 			}
2131 		}
2132 
2133 		if (has_scale(evsel)) {
2134 			err = perf_event__synthesize_event_update_scale(tool, evsel, process);
2135 			if (err < 0) {
2136 				pr_err("Couldn't synthesize evsel evsel.\n");
2137 				return err;
2138 			}
2139 		}
2140 
2141 		if (evsel->core.pmu_cpus) {
2142 			err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
2143 			if (err < 0) {
2144 				pr_err("Couldn't synthesize evsel cpus.\n");
2145 				return err;
2146 			}
2147 		}
2148 
2149 		/*
2150 		 * Name is needed only for pipe output,
2151 		 * perf.data carries event names.
2152 		 */
2153 		if (is_pipe) {
2154 			err = perf_event__synthesize_event_update_name(tool, evsel, process);
2155 			if (err < 0) {
2156 				pr_err("Couldn't synthesize evsel name.\n");
2157 				return err;
2158 			}
2159 		}
2160 	}
2161 	return 0;
2162 }
2163 
perf_event__synthesize_attr(const struct perf_tool * tool,struct perf_event_attr * attr,u32 ids,u64 * id,perf_event__handler_t process)2164 int perf_event__synthesize_attr(const struct perf_tool *tool, struct perf_event_attr *attr,
2165 				u32 ids, u64 *id, perf_event__handler_t process)
2166 {
2167 	union perf_event *ev;
2168 	size_t size;
2169 	int err;
2170 
2171 	size = sizeof(struct perf_event_attr);
2172 	size = PERF_ALIGN(size, sizeof(u64));
2173 	size += sizeof(struct perf_event_header);
2174 	size += ids * sizeof(u64);
2175 
2176 	ev = zalloc(size);
2177 
2178 	if (ev == NULL)
2179 		return -ENOMEM;
2180 
2181 	ev->attr.attr = *attr;
2182 	memcpy(perf_record_header_attr_id(ev), id, ids * sizeof(u64));
2183 
2184 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2185 	ev->attr.header.size = (u16)size;
2186 
2187 	if (ev->attr.header.size == size)
2188 		err = process(tool, ev, NULL, NULL);
2189 	else
2190 		err = -E2BIG;
2191 
2192 	free(ev);
2193 
2194 	return err;
2195 }
2196 
2197 #ifdef HAVE_LIBTRACEEVENT
perf_event__synthesize_tracing_data(const struct perf_tool * tool,int fd,struct evlist * evlist,perf_event__handler_t process)2198 int perf_event__synthesize_tracing_data(const struct perf_tool *tool, int fd, struct evlist *evlist,
2199 					perf_event__handler_t process)
2200 {
2201 	union perf_event ev;
2202 	struct tracing_data *tdata;
2203 	ssize_t size = 0, aligned_size = 0, padding;
2204 	struct feat_fd ff;
2205 
2206 	/*
2207 	 * We are going to store the size of the data followed
2208 	 * by the data contents. Since the fd descriptor is a pipe,
2209 	 * we cannot seek back to store the size of the data once
2210 	 * we know it. Instead we:
2211 	 *
2212 	 * - write the tracing data to the temp file
2213 	 * - get/write the data size to pipe
2214 	 * - write the tracing data from the temp file
2215 	 *   to the pipe
2216 	 */
2217 	tdata = tracing_data_get(&evlist->core.entries, fd, true);
2218 	if (!tdata)
2219 		return -1;
2220 
2221 	memset(&ev, 0, sizeof(ev.tracing_data));
2222 
2223 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2224 	size = tdata->size;
2225 	aligned_size = PERF_ALIGN(size, sizeof(u64));
2226 	padding = aligned_size - size;
2227 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
2228 	ev.tracing_data.size = aligned_size;
2229 
2230 	process(tool, &ev, NULL, NULL);
2231 
2232 	/*
2233 	 * The put function will copy all the tracing data
2234 	 * stored in temp file to the pipe.
2235 	 */
2236 	tracing_data_put(tdata);
2237 
2238 	ff = (struct feat_fd){ .fd = fd };
2239 	if (write_padded(&ff, NULL, 0, padding))
2240 		return -1;
2241 
2242 	return aligned_size;
2243 }
2244 #endif
2245 
perf_event__synthesize_build_id(const struct perf_tool * tool,struct perf_sample * sample,struct machine * machine,perf_event__handler_t process,const struct evsel * evsel,__u16 misc,const struct build_id * bid,const char * filename)2246 int perf_event__synthesize_build_id(const struct perf_tool *tool,
2247 				    struct perf_sample *sample,
2248 				    struct machine *machine,
2249 				    perf_event__handler_t process,
2250 				    const struct evsel *evsel,
2251 				    __u16 misc,
2252 				    const struct build_id *bid,
2253 				    const char *filename)
2254 {
2255 	union perf_event ev;
2256 	size_t len;
2257 
2258 	len = sizeof(ev.build_id) + strlen(filename) + 1;
2259 	len = PERF_ALIGN(len, sizeof(u64));
2260 
2261 	memset(&ev, 0, len);
2262 
2263 	ev.build_id.size = bid->size;
2264 	if (ev.build_id.size > sizeof(ev.build_id.build_id))
2265 		ev.build_id.size = sizeof(ev.build_id.build_id);
2266 	memcpy(ev.build_id.build_id, bid->data, ev.build_id.size);
2267 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2268 	ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE;
2269 	ev.build_id.pid = machine->pid;
2270 	ev.build_id.header.size = len;
2271 	strcpy(ev.build_id.filename, filename);
2272 
2273 	if (evsel) {
2274 		void *array = &ev;
2275 		int ret;
2276 
2277 		array += ev.header.size;
2278 		ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
2279 		if (ret < 0)
2280 			return ret;
2281 
2282 		if (ret & 7) {
2283 			pr_err("Bad id sample size %d\n", ret);
2284 			return -EINVAL;
2285 		}
2286 
2287 		ev.header.size += ret;
2288 	}
2289 
2290 	return process(tool, &ev, sample, machine);
2291 }
2292 
perf_event__synthesize_mmap2_build_id(const struct perf_tool * tool,struct perf_sample * sample,struct machine * machine,perf_event__handler_t process,const struct evsel * evsel,__u16 misc,__u32 pid,__u32 tid,__u64 start,__u64 len,__u64 pgoff,const struct build_id * bid,__u32 prot,__u32 flags,const char * filename)2293 int perf_event__synthesize_mmap2_build_id(const struct perf_tool *tool,
2294 					  struct perf_sample *sample,
2295 					  struct machine *machine,
2296 					  perf_event__handler_t process,
2297 					  const struct evsel *evsel,
2298 					  __u16 misc,
2299 					  __u32 pid, __u32 tid,
2300 					  __u64 start, __u64 len, __u64 pgoff,
2301 					  const struct build_id *bid,
2302 					  __u32 prot, __u32 flags,
2303 					  const char *filename)
2304 {
2305 	union perf_event ev;
2306 	size_t ev_len;
2307 	void *array;
2308 	int ret;
2309 
2310 	ev_len = sizeof(ev.mmap2) - sizeof(ev.mmap2.filename) + strlen(filename) + 1;
2311 	ev_len = PERF_ALIGN(ev_len, sizeof(u64));
2312 
2313 	memset(&ev, 0, ev_len);
2314 
2315 	ev.mmap2.header.type = PERF_RECORD_MMAP2;
2316 	ev.mmap2.header.misc = misc | PERF_RECORD_MISC_MMAP_BUILD_ID;
2317 	ev.mmap2.header.size = ev_len;
2318 
2319 	ev.mmap2.pid = pid;
2320 	ev.mmap2.tid = tid;
2321 	ev.mmap2.start = start;
2322 	ev.mmap2.len = len;
2323 	ev.mmap2.pgoff = pgoff;
2324 
2325 	ev.mmap2.build_id_size = bid->size;
2326 	if (ev.mmap2.build_id_size > sizeof(ev.mmap2.build_id))
2327 		ev.build_id.size = sizeof(ev.mmap2.build_id);
2328 	memcpy(ev.mmap2.build_id, bid->data, ev.mmap2.build_id_size);
2329 
2330 	ev.mmap2.prot = prot;
2331 	ev.mmap2.flags = flags;
2332 
2333 	memcpy(ev.mmap2.filename, filename, min(strlen(filename), sizeof(ev.mmap.filename)));
2334 
2335 	array = &ev;
2336 	array += ev.header.size;
2337 	ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
2338 	if (ret < 0)
2339 		return ret;
2340 
2341 	if (ret & 7) {
2342 		pr_err("Bad id sample size %d\n", ret);
2343 		return -EINVAL;
2344 	}
2345 
2346 	ev.header.size += ret;
2347 
2348 	return process(tool, &ev, sample, machine);
2349 }
2350 
perf_event__synthesize_stat_events(struct perf_stat_config * config,const struct perf_tool * tool,struct evlist * evlist,perf_event__handler_t process,bool attrs)2351 int perf_event__synthesize_stat_events(struct perf_stat_config *config, const struct perf_tool *tool,
2352 				       struct evlist *evlist, perf_event__handler_t process, bool attrs)
2353 {
2354 	int err;
2355 
2356 	if (attrs) {
2357 		err = perf_event__synthesize_attrs(tool, evlist, process);
2358 		if (err < 0) {
2359 			pr_err("Couldn't synthesize attrs.\n");
2360 			return err;
2361 		}
2362 	}
2363 
2364 	err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
2365 	err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
2366 	if (err < 0) {
2367 		pr_err("Couldn't synthesize thread map.\n");
2368 		return err;
2369 	}
2370 
2371 	err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
2372 	if (err < 0) {
2373 		pr_err("Couldn't synthesize thread map.\n");
2374 		return err;
2375 	}
2376 
2377 	err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2378 	if (err < 0) {
2379 		pr_err("Couldn't synthesize config.\n");
2380 		return err;
2381 	}
2382 
2383 	return 0;
2384 }
2385 
2386 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2387 
perf_event__synthesize_features(const struct perf_tool * tool,struct perf_session * session,struct evlist * evlist,perf_event__handler_t process)2388 int perf_event__synthesize_features(const struct perf_tool *tool, struct perf_session *session,
2389 				    struct evlist *evlist, perf_event__handler_t process)
2390 {
2391 	struct perf_header *header = &session->header;
2392 	struct perf_record_header_feature *fe;
2393 	struct feat_fd ff;
2394 	size_t sz, sz_hdr;
2395 	int feat, ret;
2396 
2397 	sz_hdr = sizeof(fe->header);
2398 	sz = sizeof(union perf_event);
2399 	/* get a nice alignment */
2400 	sz = PERF_ALIGN(sz, page_size);
2401 
2402 	memset(&ff, 0, sizeof(ff));
2403 
2404 	ff.buf = malloc(sz);
2405 	if (!ff.buf)
2406 		return -ENOMEM;
2407 
2408 	ff.size = sz - sz_hdr;
2409 	ff.ph = &session->header;
2410 
2411 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2412 		if (!feat_ops[feat].synthesize) {
2413 			pr_debug("No record header feature for header :%d\n", feat);
2414 			continue;
2415 		}
2416 
2417 		ff.offset = sizeof(*fe);
2418 
2419 		ret = feat_ops[feat].write(&ff, evlist);
2420 		if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2421 			pr_debug("Error writing feature\n");
2422 			continue;
2423 		}
2424 		/* ff.buf may have changed due to realloc in do_write() */
2425 		fe = ff.buf;
2426 		memset(fe, 0, sizeof(*fe));
2427 
2428 		fe->feat_id = feat;
2429 		fe->header.type = PERF_RECORD_HEADER_FEATURE;
2430 		fe->header.size = ff.offset;
2431 
2432 		ret = process(tool, ff.buf, NULL, NULL);
2433 		if (ret) {
2434 			free(ff.buf);
2435 			return ret;
2436 		}
2437 	}
2438 
2439 	/* Send HEADER_LAST_FEATURE mark. */
2440 	fe = ff.buf;
2441 	fe->feat_id     = HEADER_LAST_FEATURE;
2442 	fe->header.type = PERF_RECORD_HEADER_FEATURE;
2443 	fe->header.size = sizeof(*fe);
2444 
2445 	ret = process(tool, ff.buf, NULL, NULL);
2446 
2447 	free(ff.buf);
2448 	return ret;
2449 }
2450 
perf_event__synthesize_for_pipe(const struct perf_tool * tool,struct perf_session * session,struct perf_data * data,perf_event__handler_t process)2451 int perf_event__synthesize_for_pipe(const struct perf_tool *tool,
2452 				    struct perf_session *session,
2453 				    struct perf_data *data,
2454 				    perf_event__handler_t process)
2455 {
2456 	int err;
2457 	int ret = 0;
2458 	struct evlist *evlist = session->evlist;
2459 
2460 	/*
2461 	 * We need to synthesize events first, because some
2462 	 * features works on top of them (on report side).
2463 	 */
2464 	err = perf_event__synthesize_attrs(tool, evlist, process);
2465 	if (err < 0) {
2466 		pr_err("Couldn't synthesize attrs.\n");
2467 		return err;
2468 	}
2469 	ret += err;
2470 
2471 	err = perf_event__synthesize_features(tool, session, evlist, process);
2472 	if (err < 0) {
2473 		pr_err("Couldn't synthesize features.\n");
2474 		return err;
2475 	}
2476 	ret += err;
2477 
2478 #ifdef HAVE_LIBTRACEEVENT
2479 	if (have_tracepoints(&evlist->core.entries)) {
2480 		int fd = perf_data__fd(data);
2481 
2482 		/*
2483 		 * FIXME err <= 0 here actually means that
2484 		 * there were no tracepoints so its not really
2485 		 * an error, just that we don't need to
2486 		 * synthesize anything.  We really have to
2487 		 * return this more properly and also
2488 		 * propagate errors that now are calling die()
2489 		 */
2490 		err = perf_event__synthesize_tracing_data(tool,	fd, evlist,
2491 							  process);
2492 		if (err <= 0) {
2493 			pr_err("Couldn't record tracing data.\n");
2494 			return err;
2495 		}
2496 		ret += err;
2497 	}
2498 #else
2499 	(void)data;
2500 #endif
2501 
2502 	return ret;
2503 }
2504 
parse_synth_opt(char * synth)2505 int parse_synth_opt(char *synth)
2506 {
2507 	char *p, *q;
2508 	int ret = 0;
2509 
2510 	if (synth == NULL)
2511 		return -1;
2512 
2513 	for (q = synth; (p = strsep(&q, ",")); p = q) {
2514 		if (!strcasecmp(p, "no") || !strcasecmp(p, "none"))
2515 			return 0;
2516 
2517 		if (!strcasecmp(p, "all"))
2518 			return PERF_SYNTH_ALL;
2519 
2520 		if (!strcasecmp(p, "task"))
2521 			ret |= PERF_SYNTH_TASK;
2522 		else if (!strcasecmp(p, "mmap"))
2523 			ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP;
2524 		else if (!strcasecmp(p, "cgroup"))
2525 			ret |= PERF_SYNTH_CGROUP;
2526 		else
2527 			return -1;
2528 	}
2529 
2530 	return ret;
2531 }
2532