xref: /linux/tools/perf/util/evlist.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include "debugfs.h"
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "evlist.h"
15 #include "evsel.h"
16 #include <unistd.h>
17 
18 #include "parse-events.h"
19 
20 #include <sys/mman.h>
21 
22 #include <linux/bitops.h>
23 #include <linux/hash.h>
24 
25 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
27 
28 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
29 		       struct thread_map *threads)
30 {
31 	int i;
32 
33 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
34 		INIT_HLIST_HEAD(&evlist->heads[i]);
35 	INIT_LIST_HEAD(&evlist->entries);
36 	perf_evlist__set_maps(evlist, cpus, threads);
37 	evlist->workload.pid = -1;
38 }
39 
40 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
41 				     struct thread_map *threads)
42 {
43 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
44 
45 	if (evlist != NULL)
46 		perf_evlist__init(evlist, cpus, threads);
47 
48 	return evlist;
49 }
50 
51 void perf_evlist__config_attrs(struct perf_evlist *evlist,
52 			       struct perf_record_opts *opts)
53 {
54 	struct perf_evsel *evsel;
55 
56 	if (evlist->cpus->map[0] < 0)
57 		opts->no_inherit = true;
58 
59 	list_for_each_entry(evsel, &evlist->entries, node) {
60 		perf_evsel__config(evsel, opts);
61 
62 		if (evlist->nr_entries > 1)
63 			evsel->attr.sample_type |= PERF_SAMPLE_ID;
64 	}
65 }
66 
67 static void perf_evlist__purge(struct perf_evlist *evlist)
68 {
69 	struct perf_evsel *pos, *n;
70 
71 	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
72 		list_del_init(&pos->node);
73 		perf_evsel__delete(pos);
74 	}
75 
76 	evlist->nr_entries = 0;
77 }
78 
79 void perf_evlist__exit(struct perf_evlist *evlist)
80 {
81 	free(evlist->mmap);
82 	free(evlist->pollfd);
83 	evlist->mmap = NULL;
84 	evlist->pollfd = NULL;
85 }
86 
87 void perf_evlist__delete(struct perf_evlist *evlist)
88 {
89 	perf_evlist__purge(evlist);
90 	perf_evlist__exit(evlist);
91 	free(evlist);
92 }
93 
94 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
95 {
96 	list_add_tail(&entry->node, &evlist->entries);
97 	++evlist->nr_entries;
98 }
99 
100 static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
101 					  struct list_head *list,
102 					  int nr_entries)
103 {
104 	list_splice_tail(list, &evlist->entries);
105 	evlist->nr_entries += nr_entries;
106 }
107 
108 int perf_evlist__add_default(struct perf_evlist *evlist)
109 {
110 	struct perf_event_attr attr = {
111 		.type = PERF_TYPE_HARDWARE,
112 		.config = PERF_COUNT_HW_CPU_CYCLES,
113 	};
114 	struct perf_evsel *evsel;
115 
116 	event_attr_init(&attr);
117 
118 	evsel = perf_evsel__new(&attr, 0);
119 	if (evsel == NULL)
120 		goto error;
121 
122 	/* use strdup() because free(evsel) assumes name is allocated */
123 	evsel->name = strdup("cycles");
124 	if (!evsel->name)
125 		goto error_free;
126 
127 	perf_evlist__add(evlist, evsel);
128 	return 0;
129 error_free:
130 	perf_evsel__delete(evsel);
131 error:
132 	return -ENOMEM;
133 }
134 
135 int perf_evlist__add_attrs(struct perf_evlist *evlist,
136 			   struct perf_event_attr *attrs, size_t nr_attrs)
137 {
138 	struct perf_evsel *evsel, *n;
139 	LIST_HEAD(head);
140 	size_t i;
141 
142 	for (i = 0; i < nr_attrs; i++) {
143 		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
144 		if (evsel == NULL)
145 			goto out_delete_partial_list;
146 		list_add_tail(&evsel->node, &head);
147 	}
148 
149 	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
150 
151 	return 0;
152 
153 out_delete_partial_list:
154 	list_for_each_entry_safe(evsel, n, &head, node)
155 		perf_evsel__delete(evsel);
156 	return -1;
157 }
158 
159 static int trace_event__id(const char *evname)
160 {
161 	char *filename, *colon;
162 	int err = -1, fd;
163 
164 	if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
165 		return -1;
166 
167 	colon = strrchr(filename, ':');
168 	if (colon != NULL)
169 		*colon = '/';
170 
171 	fd = open(filename, O_RDONLY);
172 	if (fd >= 0) {
173 		char id[16];
174 		if (read(fd, id, sizeof(id)) > 0)
175 			err = atoi(id);
176 		close(fd);
177 	}
178 
179 	free(filename);
180 	return err;
181 }
182 
183 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
184 				 const char *tracepoints[],
185 				 size_t nr_tracepoints)
186 {
187 	int err;
188 	size_t i;
189 	struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
190 
191 	if (attrs == NULL)
192 		return -1;
193 
194 	for (i = 0; i < nr_tracepoints; i++) {
195 		err = trace_event__id(tracepoints[i]);
196 
197 		if (err < 0)
198 			goto out_free_attrs;
199 
200 		attrs[i].type	       = PERF_TYPE_TRACEPOINT;
201 		attrs[i].config	       = err;
202 	        attrs[i].sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
203 					  PERF_SAMPLE_CPU);
204 		attrs[i].sample_period = 1;
205 	}
206 
207 	err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
208 out_free_attrs:
209 	free(attrs);
210 	return err;
211 }
212 
213 static struct perf_evsel *
214 	perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
215 {
216 	struct perf_evsel *evsel;
217 
218 	list_for_each_entry(evsel, &evlist->entries, node) {
219 		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
220 		    (int)evsel->attr.config == id)
221 			return evsel;
222 	}
223 
224 	return NULL;
225 }
226 
227 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
228 					  const struct perf_evsel_str_handler *assocs,
229 					  size_t nr_assocs)
230 {
231 	struct perf_evsel *evsel;
232 	int err;
233 	size_t i;
234 
235 	for (i = 0; i < nr_assocs; i++) {
236 		err = trace_event__id(assocs[i].name);
237 		if (err < 0)
238 			goto out;
239 
240 		evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
241 		if (evsel == NULL)
242 			continue;
243 
244 		err = -EEXIST;
245 		if (evsel->handler.func != NULL)
246 			goto out;
247 		evsel->handler.func = assocs[i].handler;
248 	}
249 
250 	err = 0;
251 out:
252 	return err;
253 }
254 
255 void perf_evlist__disable(struct perf_evlist *evlist)
256 {
257 	int cpu, thread;
258 	struct perf_evsel *pos;
259 
260 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
261 		list_for_each_entry(pos, &evlist->entries, node) {
262 			for (thread = 0; thread < evlist->threads->nr; thread++)
263 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
264 		}
265 	}
266 }
267 
268 void perf_evlist__enable(struct perf_evlist *evlist)
269 {
270 	int cpu, thread;
271 	struct perf_evsel *pos;
272 
273 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
274 		list_for_each_entry(pos, &evlist->entries, node) {
275 			for (thread = 0; thread < evlist->threads->nr; thread++)
276 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
277 		}
278 	}
279 }
280 
281 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
282 {
283 	int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
284 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
285 	return evlist->pollfd != NULL ? 0 : -ENOMEM;
286 }
287 
288 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
289 {
290 	fcntl(fd, F_SETFL, O_NONBLOCK);
291 	evlist->pollfd[evlist->nr_fds].fd = fd;
292 	evlist->pollfd[evlist->nr_fds].events = POLLIN;
293 	evlist->nr_fds++;
294 }
295 
296 static void perf_evlist__id_hash(struct perf_evlist *evlist,
297 				 struct perf_evsel *evsel,
298 				 int cpu, int thread, u64 id)
299 {
300 	int hash;
301 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
302 
303 	sid->id = id;
304 	sid->evsel = evsel;
305 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
306 	hlist_add_head(&sid->node, &evlist->heads[hash]);
307 }
308 
309 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
310 			 int cpu, int thread, u64 id)
311 {
312 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
313 	evsel->id[evsel->ids++] = id;
314 }
315 
316 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
317 				  struct perf_evsel *evsel,
318 				  int cpu, int thread, int fd)
319 {
320 	u64 read_data[4] = { 0, };
321 	int id_idx = 1; /* The first entry is the counter value */
322 
323 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
324 	    read(fd, &read_data, sizeof(read_data)) == -1)
325 		return -1;
326 
327 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
328 		++id_idx;
329 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
330 		++id_idx;
331 
332 	perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
333 	return 0;
334 }
335 
336 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
337 {
338 	struct hlist_head *head;
339 	struct hlist_node *pos;
340 	struct perf_sample_id *sid;
341 	int hash;
342 
343 	if (evlist->nr_entries == 1)
344 		return list_entry(evlist->entries.next, struct perf_evsel, node);
345 
346 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
347 	head = &evlist->heads[hash];
348 
349 	hlist_for_each_entry(sid, pos, head, node)
350 		if (sid->id == id)
351 			return sid->evsel;
352 	return NULL;
353 }
354 
355 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
356 {
357 	/* XXX Move this to perf.c, making it generally available */
358 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
359 	struct perf_mmap *md = &evlist->mmap[idx];
360 	unsigned int head = perf_mmap__read_head(md);
361 	unsigned int old = md->prev;
362 	unsigned char *data = md->base + page_size;
363 	union perf_event *event = NULL;
364 
365 	if (evlist->overwrite) {
366 		/*
367 		 * If we're further behind than half the buffer, there's a chance
368 		 * the writer will bite our tail and mess up the samples under us.
369 		 *
370 		 * If we somehow ended up ahead of the head, we got messed up.
371 		 *
372 		 * In either case, truncate and restart at head.
373 		 */
374 		int diff = head - old;
375 		if (diff > md->mask / 2 || diff < 0) {
376 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
377 
378 			/*
379 			 * head points to a known good entry, start there.
380 			 */
381 			old = head;
382 		}
383 	}
384 
385 	if (old != head) {
386 		size_t size;
387 
388 		event = (union perf_event *)&data[old & md->mask];
389 		size = event->header.size;
390 
391 		/*
392 		 * Event straddles the mmap boundary -- header should always
393 		 * be inside due to u64 alignment of output.
394 		 */
395 		if ((old & md->mask) + size != ((old + size) & md->mask)) {
396 			unsigned int offset = old;
397 			unsigned int len = min(sizeof(*event), size), cpy;
398 			void *dst = &evlist->event_copy;
399 
400 			do {
401 				cpy = min(md->mask + 1 - (offset & md->mask), len);
402 				memcpy(dst, &data[offset & md->mask], cpy);
403 				offset += cpy;
404 				dst += cpy;
405 				len -= cpy;
406 			} while (len);
407 
408 			event = &evlist->event_copy;
409 		}
410 
411 		old += size;
412 	}
413 
414 	md->prev = old;
415 
416 	if (!evlist->overwrite)
417 		perf_mmap__write_tail(md, old);
418 
419 	return event;
420 }
421 
422 void perf_evlist__munmap(struct perf_evlist *evlist)
423 {
424 	int i;
425 
426 	for (i = 0; i < evlist->nr_mmaps; i++) {
427 		if (evlist->mmap[i].base != NULL) {
428 			munmap(evlist->mmap[i].base, evlist->mmap_len);
429 			evlist->mmap[i].base = NULL;
430 		}
431 	}
432 
433 	free(evlist->mmap);
434 	evlist->mmap = NULL;
435 }
436 
437 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
438 {
439 	evlist->nr_mmaps = evlist->cpus->nr;
440 	if (evlist->cpus->map[0] == -1)
441 		evlist->nr_mmaps = evlist->threads->nr;
442 	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
443 	return evlist->mmap != NULL ? 0 : -ENOMEM;
444 }
445 
446 static int __perf_evlist__mmap(struct perf_evlist *evlist,
447 			       int idx, int prot, int mask, int fd)
448 {
449 	evlist->mmap[idx].prev = 0;
450 	evlist->mmap[idx].mask = mask;
451 	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
452 				      MAP_SHARED, fd, 0);
453 	if (evlist->mmap[idx].base == MAP_FAILED) {
454 		evlist->mmap[idx].base = NULL;
455 		return -1;
456 	}
457 
458 	perf_evlist__add_pollfd(evlist, fd);
459 	return 0;
460 }
461 
462 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
463 {
464 	struct perf_evsel *evsel;
465 	int cpu, thread;
466 
467 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
468 		int output = -1;
469 
470 		for (thread = 0; thread < evlist->threads->nr; thread++) {
471 			list_for_each_entry(evsel, &evlist->entries, node) {
472 				int fd = FD(evsel, cpu, thread);
473 
474 				if (output == -1) {
475 					output = fd;
476 					if (__perf_evlist__mmap(evlist, cpu,
477 								prot, mask, output) < 0)
478 						goto out_unmap;
479 				} else {
480 					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
481 						goto out_unmap;
482 				}
483 
484 				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
485 				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
486 					goto out_unmap;
487 			}
488 		}
489 	}
490 
491 	return 0;
492 
493 out_unmap:
494 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
495 		if (evlist->mmap[cpu].base != NULL) {
496 			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
497 			evlist->mmap[cpu].base = NULL;
498 		}
499 	}
500 	return -1;
501 }
502 
503 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
504 {
505 	struct perf_evsel *evsel;
506 	int thread;
507 
508 	for (thread = 0; thread < evlist->threads->nr; thread++) {
509 		int output = -1;
510 
511 		list_for_each_entry(evsel, &evlist->entries, node) {
512 			int fd = FD(evsel, 0, thread);
513 
514 			if (output == -1) {
515 				output = fd;
516 				if (__perf_evlist__mmap(evlist, thread,
517 							prot, mask, output) < 0)
518 					goto out_unmap;
519 			} else {
520 				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
521 					goto out_unmap;
522 			}
523 
524 			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
525 			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
526 				goto out_unmap;
527 		}
528 	}
529 
530 	return 0;
531 
532 out_unmap:
533 	for (thread = 0; thread < evlist->threads->nr; thread++) {
534 		if (evlist->mmap[thread].base != NULL) {
535 			munmap(evlist->mmap[thread].base, evlist->mmap_len);
536 			evlist->mmap[thread].base = NULL;
537 		}
538 	}
539 	return -1;
540 }
541 
542 /** perf_evlist__mmap - Create per cpu maps to receive events
543  *
544  * @evlist - list of events
545  * @pages - map length in pages
546  * @overwrite - overwrite older events?
547  *
548  * If overwrite is false the user needs to signal event consuption using:
549  *
550  *	struct perf_mmap *m = &evlist->mmap[cpu];
551  *	unsigned int head = perf_mmap__read_head(m);
552  *
553  *	perf_mmap__write_tail(m, head)
554  *
555  * Using perf_evlist__read_on_cpu does this automatically.
556  */
557 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
558 		      bool overwrite)
559 {
560 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
561 	struct perf_evsel *evsel;
562 	const struct cpu_map *cpus = evlist->cpus;
563 	const struct thread_map *threads = evlist->threads;
564 	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
565 
566         /* 512 kiB: default amount of unprivileged mlocked memory */
567         if (pages == UINT_MAX)
568                 pages = (512 * 1024) / page_size;
569 	else if (!is_power_of_2(pages))
570 		return -EINVAL;
571 
572 	mask = pages * page_size - 1;
573 
574 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
575 		return -ENOMEM;
576 
577 	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
578 		return -ENOMEM;
579 
580 	evlist->overwrite = overwrite;
581 	evlist->mmap_len = (pages + 1) * page_size;
582 
583 	list_for_each_entry(evsel, &evlist->entries, node) {
584 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
585 		    evsel->sample_id == NULL &&
586 		    perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
587 			return -ENOMEM;
588 	}
589 
590 	if (evlist->cpus->map[0] == -1)
591 		return perf_evlist__mmap_per_thread(evlist, prot, mask);
592 
593 	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
594 }
595 
596 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
597 			     pid_t target_tid, const char *cpu_list)
598 {
599 	evlist->threads = thread_map__new(target_pid, target_tid);
600 
601 	if (evlist->threads == NULL)
602 		return -1;
603 
604 	if (cpu_list == NULL && target_tid != -1)
605 		evlist->cpus = cpu_map__dummy_new();
606 	else
607 		evlist->cpus = cpu_map__new(cpu_list);
608 
609 	if (evlist->cpus == NULL)
610 		goto out_delete_threads;
611 
612 	return 0;
613 
614 out_delete_threads:
615 	thread_map__delete(evlist->threads);
616 	return -1;
617 }
618 
619 void perf_evlist__delete_maps(struct perf_evlist *evlist)
620 {
621 	cpu_map__delete(evlist->cpus);
622 	thread_map__delete(evlist->threads);
623 	evlist->cpus	= NULL;
624 	evlist->threads = NULL;
625 }
626 
627 int perf_evlist__set_filters(struct perf_evlist *evlist)
628 {
629 	const struct thread_map *threads = evlist->threads;
630 	const struct cpu_map *cpus = evlist->cpus;
631 	struct perf_evsel *evsel;
632 	char *filter;
633 	int thread;
634 	int cpu;
635 	int err;
636 	int fd;
637 
638 	list_for_each_entry(evsel, &evlist->entries, node) {
639 		filter = evsel->filter;
640 		if (!filter)
641 			continue;
642 		for (cpu = 0; cpu < cpus->nr; cpu++) {
643 			for (thread = 0; thread < threads->nr; thread++) {
644 				fd = FD(evsel, cpu, thread);
645 				err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
646 				if (err)
647 					return err;
648 			}
649 		}
650 	}
651 
652 	return 0;
653 }
654 
655 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
656 {
657 	struct perf_evsel *pos, *first;
658 
659 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
660 
661 	list_for_each_entry_continue(pos, &evlist->entries, node) {
662 		if (first->attr.sample_type != pos->attr.sample_type)
663 			return false;
664 	}
665 
666 	return true;
667 }
668 
669 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
670 {
671 	struct perf_evsel *first;
672 
673 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
674 	return first->attr.sample_type;
675 }
676 
677 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
678 {
679 	struct perf_evsel *first;
680 	struct perf_sample *data;
681 	u64 sample_type;
682 	u16 size = 0;
683 
684 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
685 
686 	if (!first->attr.sample_id_all)
687 		goto out;
688 
689 	sample_type = first->attr.sample_type;
690 
691 	if (sample_type & PERF_SAMPLE_TID)
692 		size += sizeof(data->tid) * 2;
693 
694        if (sample_type & PERF_SAMPLE_TIME)
695 		size += sizeof(data->time);
696 
697 	if (sample_type & PERF_SAMPLE_ID)
698 		size += sizeof(data->id);
699 
700 	if (sample_type & PERF_SAMPLE_STREAM_ID)
701 		size += sizeof(data->stream_id);
702 
703 	if (sample_type & PERF_SAMPLE_CPU)
704 		size += sizeof(data->cpu) * 2;
705 out:
706 	return size;
707 }
708 
709 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
710 {
711 	struct perf_evsel *pos, *first;
712 
713 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
714 
715 	list_for_each_entry_continue(pos, &evlist->entries, node) {
716 		if (first->attr.sample_id_all != pos->attr.sample_id_all)
717 			return false;
718 	}
719 
720 	return true;
721 }
722 
723 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
724 {
725 	struct perf_evsel *first;
726 
727 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
728 	return first->attr.sample_id_all;
729 }
730 
731 void perf_evlist__set_selected(struct perf_evlist *evlist,
732 			       struct perf_evsel *evsel)
733 {
734 	evlist->selected = evsel;
735 }
736 
737 int perf_evlist__open(struct perf_evlist *evlist, bool group)
738 {
739 	struct perf_evsel *evsel, *first;
740 	int err, ncpus, nthreads;
741 
742 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
743 
744 	list_for_each_entry(evsel, &evlist->entries, node) {
745 		struct xyarray *group_fd = NULL;
746 
747 		if (group && evsel != first)
748 			group_fd = first->fd;
749 
750 		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
751 				       group, group_fd);
752 		if (err < 0)
753 			goto out_err;
754 	}
755 
756 	return 0;
757 out_err:
758 	ncpus = evlist->cpus ? evlist->cpus->nr : 1;
759 	nthreads = evlist->threads ? evlist->threads->nr : 1;
760 
761 	list_for_each_entry_reverse(evsel, &evlist->entries, node)
762 		perf_evsel__close(evsel, ncpus, nthreads);
763 
764 	return err;
765 }
766 
767 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
768 				  struct perf_record_opts *opts,
769 				  const char *argv[])
770 {
771 	int child_ready_pipe[2], go_pipe[2];
772 	char bf;
773 
774 	if (pipe(child_ready_pipe) < 0) {
775 		perror("failed to create 'ready' pipe");
776 		return -1;
777 	}
778 
779 	if (pipe(go_pipe) < 0) {
780 		perror("failed to create 'go' pipe");
781 		goto out_close_ready_pipe;
782 	}
783 
784 	evlist->workload.pid = fork();
785 	if (evlist->workload.pid < 0) {
786 		perror("failed to fork");
787 		goto out_close_pipes;
788 	}
789 
790 	if (!evlist->workload.pid) {
791 		if (opts->pipe_output)
792 			dup2(2, 1);
793 
794 		close(child_ready_pipe[0]);
795 		close(go_pipe[1]);
796 		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
797 
798 		/*
799 		 * Do a dummy execvp to get the PLT entry resolved,
800 		 * so we avoid the resolver overhead on the real
801 		 * execvp call.
802 		 */
803 		execvp("", (char **)argv);
804 
805 		/*
806 		 * Tell the parent we're ready to go
807 		 */
808 		close(child_ready_pipe[1]);
809 
810 		/*
811 		 * Wait until the parent tells us to go.
812 		 */
813 		if (read(go_pipe[0], &bf, 1) == -1)
814 			perror("unable to read pipe");
815 
816 		execvp(argv[0], (char **)argv);
817 
818 		perror(argv[0]);
819 		kill(getppid(), SIGUSR1);
820 		exit(-1);
821 	}
822 
823 	if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1)
824 		evlist->threads->map[0] = evlist->workload.pid;
825 
826 	close(child_ready_pipe[1]);
827 	close(go_pipe[0]);
828 	/*
829 	 * wait for child to settle
830 	 */
831 	if (read(child_ready_pipe[0], &bf, 1) == -1) {
832 		perror("unable to read pipe");
833 		goto out_close_pipes;
834 	}
835 
836 	evlist->workload.cork_fd = go_pipe[1];
837 	close(child_ready_pipe[0]);
838 	return 0;
839 
840 out_close_pipes:
841 	close(go_pipe[0]);
842 	close(go_pipe[1]);
843 out_close_ready_pipe:
844 	close(child_ready_pipe[0]);
845 	close(child_ready_pipe[1]);
846 	return -1;
847 }
848 
849 int perf_evlist__start_workload(struct perf_evlist *evlist)
850 {
851 	if (evlist->workload.cork_fd > 0) {
852 		/*
853 		 * Remove the cork, let it rip!
854 		 */
855 		return close(evlist->workload.cork_fd);
856 	}
857 
858 	return 0;
859 }
860