xref: /linux/tools/perf/util/evlist.c (revision d97b46a64674a267bc41c9e16132ee2a98c3347d)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include "debugfs.h"
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "target.h"
15 #include "evlist.h"
16 #include "evsel.h"
17 #include <unistd.h>
18 
19 #include "parse-events.h"
20 
21 #include <sys/mman.h>
22 
23 #include <linux/bitops.h>
24 #include <linux/hash.h>
25 
26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
28 
29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 		       struct thread_map *threads)
31 {
32 	int i;
33 
34 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 		INIT_HLIST_HEAD(&evlist->heads[i]);
36 	INIT_LIST_HEAD(&evlist->entries);
37 	perf_evlist__set_maps(evlist, cpus, threads);
38 	evlist->workload.pid = -1;
39 }
40 
41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42 				     struct thread_map *threads)
43 {
44 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45 
46 	if (evlist != NULL)
47 		perf_evlist__init(evlist, cpus, threads);
48 
49 	return evlist;
50 }
51 
52 void perf_evlist__config_attrs(struct perf_evlist *evlist,
53 			       struct perf_record_opts *opts)
54 {
55 	struct perf_evsel *evsel, *first;
56 
57 	if (evlist->cpus->map[0] < 0)
58 		opts->no_inherit = true;
59 
60 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
61 
62 	list_for_each_entry(evsel, &evlist->entries, node) {
63 		perf_evsel__config(evsel, opts, first);
64 
65 		if (evlist->nr_entries > 1)
66 			evsel->attr.sample_type |= PERF_SAMPLE_ID;
67 	}
68 }
69 
70 static void perf_evlist__purge(struct perf_evlist *evlist)
71 {
72 	struct perf_evsel *pos, *n;
73 
74 	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75 		list_del_init(&pos->node);
76 		perf_evsel__delete(pos);
77 	}
78 
79 	evlist->nr_entries = 0;
80 }
81 
82 void perf_evlist__exit(struct perf_evlist *evlist)
83 {
84 	free(evlist->mmap);
85 	free(evlist->pollfd);
86 	evlist->mmap = NULL;
87 	evlist->pollfd = NULL;
88 }
89 
90 void perf_evlist__delete(struct perf_evlist *evlist)
91 {
92 	perf_evlist__purge(evlist);
93 	perf_evlist__exit(evlist);
94 	free(evlist);
95 }
96 
97 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
98 {
99 	list_add_tail(&entry->node, &evlist->entries);
100 	++evlist->nr_entries;
101 }
102 
103 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104 				   struct list_head *list,
105 				   int nr_entries)
106 {
107 	list_splice_tail(list, &evlist->entries);
108 	evlist->nr_entries += nr_entries;
109 }
110 
111 int perf_evlist__add_default(struct perf_evlist *evlist)
112 {
113 	struct perf_event_attr attr = {
114 		.type = PERF_TYPE_HARDWARE,
115 		.config = PERF_COUNT_HW_CPU_CYCLES,
116 	};
117 	struct perf_evsel *evsel;
118 
119 	event_attr_init(&attr);
120 
121 	evsel = perf_evsel__new(&attr, 0);
122 	if (evsel == NULL)
123 		goto error;
124 
125 	/* use strdup() because free(evsel) assumes name is allocated */
126 	evsel->name = strdup("cycles");
127 	if (!evsel->name)
128 		goto error_free;
129 
130 	perf_evlist__add(evlist, evsel);
131 	return 0;
132 error_free:
133 	perf_evsel__delete(evsel);
134 error:
135 	return -ENOMEM;
136 }
137 
138 int perf_evlist__add_attrs(struct perf_evlist *evlist,
139 			   struct perf_event_attr *attrs, size_t nr_attrs)
140 {
141 	struct perf_evsel *evsel, *n;
142 	LIST_HEAD(head);
143 	size_t i;
144 
145 	for (i = 0; i < nr_attrs; i++) {
146 		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
147 		if (evsel == NULL)
148 			goto out_delete_partial_list;
149 		list_add_tail(&evsel->node, &head);
150 	}
151 
152 	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
153 
154 	return 0;
155 
156 out_delete_partial_list:
157 	list_for_each_entry_safe(evsel, n, &head, node)
158 		perf_evsel__delete(evsel);
159 	return -1;
160 }
161 
162 static int trace_event__id(const char *evname)
163 {
164 	char *filename, *colon;
165 	int err = -1, fd;
166 
167 	if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
168 		return -1;
169 
170 	colon = strrchr(filename, ':');
171 	if (colon != NULL)
172 		*colon = '/';
173 
174 	fd = open(filename, O_RDONLY);
175 	if (fd >= 0) {
176 		char id[16];
177 		if (read(fd, id, sizeof(id)) > 0)
178 			err = atoi(id);
179 		close(fd);
180 	}
181 
182 	free(filename);
183 	return err;
184 }
185 
186 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
187 				 const char *tracepoints[],
188 				 size_t nr_tracepoints)
189 {
190 	int err;
191 	size_t i;
192 	struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
193 
194 	if (attrs == NULL)
195 		return -1;
196 
197 	for (i = 0; i < nr_tracepoints; i++) {
198 		err = trace_event__id(tracepoints[i]);
199 
200 		if (err < 0)
201 			goto out_free_attrs;
202 
203 		attrs[i].type	       = PERF_TYPE_TRACEPOINT;
204 		attrs[i].config	       = err;
205 	        attrs[i].sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
206 					  PERF_SAMPLE_CPU);
207 		attrs[i].sample_period = 1;
208 	}
209 
210 	err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
211 out_free_attrs:
212 	free(attrs);
213 	return err;
214 }
215 
216 static struct perf_evsel *
217 	perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
218 {
219 	struct perf_evsel *evsel;
220 
221 	list_for_each_entry(evsel, &evlist->entries, node) {
222 		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
223 		    (int)evsel->attr.config == id)
224 			return evsel;
225 	}
226 
227 	return NULL;
228 }
229 
230 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
231 					  const struct perf_evsel_str_handler *assocs,
232 					  size_t nr_assocs)
233 {
234 	struct perf_evsel *evsel;
235 	int err;
236 	size_t i;
237 
238 	for (i = 0; i < nr_assocs; i++) {
239 		err = trace_event__id(assocs[i].name);
240 		if (err < 0)
241 			goto out;
242 
243 		evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
244 		if (evsel == NULL)
245 			continue;
246 
247 		err = -EEXIST;
248 		if (evsel->handler.func != NULL)
249 			goto out;
250 		evsel->handler.func = assocs[i].handler;
251 	}
252 
253 	err = 0;
254 out:
255 	return err;
256 }
257 
258 void perf_evlist__disable(struct perf_evlist *evlist)
259 {
260 	int cpu, thread;
261 	struct perf_evsel *pos;
262 
263 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
264 		list_for_each_entry(pos, &evlist->entries, node) {
265 			for (thread = 0; thread < evlist->threads->nr; thread++)
266 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
267 		}
268 	}
269 }
270 
271 void perf_evlist__enable(struct perf_evlist *evlist)
272 {
273 	int cpu, thread;
274 	struct perf_evsel *pos;
275 
276 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
277 		list_for_each_entry(pos, &evlist->entries, node) {
278 			for (thread = 0; thread < evlist->threads->nr; thread++)
279 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
280 		}
281 	}
282 }
283 
284 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
285 {
286 	int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
287 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
288 	return evlist->pollfd != NULL ? 0 : -ENOMEM;
289 }
290 
291 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
292 {
293 	fcntl(fd, F_SETFL, O_NONBLOCK);
294 	evlist->pollfd[evlist->nr_fds].fd = fd;
295 	evlist->pollfd[evlist->nr_fds].events = POLLIN;
296 	evlist->nr_fds++;
297 }
298 
299 static void perf_evlist__id_hash(struct perf_evlist *evlist,
300 				 struct perf_evsel *evsel,
301 				 int cpu, int thread, u64 id)
302 {
303 	int hash;
304 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
305 
306 	sid->id = id;
307 	sid->evsel = evsel;
308 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
309 	hlist_add_head(&sid->node, &evlist->heads[hash]);
310 }
311 
312 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
313 			 int cpu, int thread, u64 id)
314 {
315 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
316 	evsel->id[evsel->ids++] = id;
317 }
318 
319 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
320 				  struct perf_evsel *evsel,
321 				  int cpu, int thread, int fd)
322 {
323 	u64 read_data[4] = { 0, };
324 	int id_idx = 1; /* The first entry is the counter value */
325 
326 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
327 	    read(fd, &read_data, sizeof(read_data)) == -1)
328 		return -1;
329 
330 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
331 		++id_idx;
332 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
333 		++id_idx;
334 
335 	perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
336 	return 0;
337 }
338 
339 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
340 {
341 	struct hlist_head *head;
342 	struct hlist_node *pos;
343 	struct perf_sample_id *sid;
344 	int hash;
345 
346 	if (evlist->nr_entries == 1)
347 		return list_entry(evlist->entries.next, struct perf_evsel, node);
348 
349 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
350 	head = &evlist->heads[hash];
351 
352 	hlist_for_each_entry(sid, pos, head, node)
353 		if (sid->id == id)
354 			return sid->evsel;
355 
356 	if (!perf_evlist__sample_id_all(evlist))
357 		return list_entry(evlist->entries.next, struct perf_evsel, node);
358 
359 	return NULL;
360 }
361 
362 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
363 {
364 	/* XXX Move this to perf.c, making it generally available */
365 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
366 	struct perf_mmap *md = &evlist->mmap[idx];
367 	unsigned int head = perf_mmap__read_head(md);
368 	unsigned int old = md->prev;
369 	unsigned char *data = md->base + page_size;
370 	union perf_event *event = NULL;
371 
372 	if (evlist->overwrite) {
373 		/*
374 		 * If we're further behind than half the buffer, there's a chance
375 		 * the writer will bite our tail and mess up the samples under us.
376 		 *
377 		 * If we somehow ended up ahead of the head, we got messed up.
378 		 *
379 		 * In either case, truncate and restart at head.
380 		 */
381 		int diff = head - old;
382 		if (diff > md->mask / 2 || diff < 0) {
383 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
384 
385 			/*
386 			 * head points to a known good entry, start there.
387 			 */
388 			old = head;
389 		}
390 	}
391 
392 	if (old != head) {
393 		size_t size;
394 
395 		event = (union perf_event *)&data[old & md->mask];
396 		size = event->header.size;
397 
398 		/*
399 		 * Event straddles the mmap boundary -- header should always
400 		 * be inside due to u64 alignment of output.
401 		 */
402 		if ((old & md->mask) + size != ((old + size) & md->mask)) {
403 			unsigned int offset = old;
404 			unsigned int len = min(sizeof(*event), size), cpy;
405 			void *dst = &evlist->event_copy;
406 
407 			do {
408 				cpy = min(md->mask + 1 - (offset & md->mask), len);
409 				memcpy(dst, &data[offset & md->mask], cpy);
410 				offset += cpy;
411 				dst += cpy;
412 				len -= cpy;
413 			} while (len);
414 
415 			event = &evlist->event_copy;
416 		}
417 
418 		old += size;
419 	}
420 
421 	md->prev = old;
422 
423 	if (!evlist->overwrite)
424 		perf_mmap__write_tail(md, old);
425 
426 	return event;
427 }
428 
429 void perf_evlist__munmap(struct perf_evlist *evlist)
430 {
431 	int i;
432 
433 	for (i = 0; i < evlist->nr_mmaps; i++) {
434 		if (evlist->mmap[i].base != NULL) {
435 			munmap(evlist->mmap[i].base, evlist->mmap_len);
436 			evlist->mmap[i].base = NULL;
437 		}
438 	}
439 
440 	free(evlist->mmap);
441 	evlist->mmap = NULL;
442 }
443 
444 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
445 {
446 	evlist->nr_mmaps = evlist->cpus->nr;
447 	if (evlist->cpus->map[0] == -1)
448 		evlist->nr_mmaps = evlist->threads->nr;
449 	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
450 	return evlist->mmap != NULL ? 0 : -ENOMEM;
451 }
452 
453 static int __perf_evlist__mmap(struct perf_evlist *evlist,
454 			       int idx, int prot, int mask, int fd)
455 {
456 	evlist->mmap[idx].prev = 0;
457 	evlist->mmap[idx].mask = mask;
458 	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
459 				      MAP_SHARED, fd, 0);
460 	if (evlist->mmap[idx].base == MAP_FAILED) {
461 		evlist->mmap[idx].base = NULL;
462 		return -1;
463 	}
464 
465 	perf_evlist__add_pollfd(evlist, fd);
466 	return 0;
467 }
468 
469 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
470 {
471 	struct perf_evsel *evsel;
472 	int cpu, thread;
473 
474 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
475 		int output = -1;
476 
477 		for (thread = 0; thread < evlist->threads->nr; thread++) {
478 			list_for_each_entry(evsel, &evlist->entries, node) {
479 				int fd = FD(evsel, cpu, thread);
480 
481 				if (output == -1) {
482 					output = fd;
483 					if (__perf_evlist__mmap(evlist, cpu,
484 								prot, mask, output) < 0)
485 						goto out_unmap;
486 				} else {
487 					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
488 						goto out_unmap;
489 				}
490 
491 				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
492 				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
493 					goto out_unmap;
494 			}
495 		}
496 	}
497 
498 	return 0;
499 
500 out_unmap:
501 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
502 		if (evlist->mmap[cpu].base != NULL) {
503 			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
504 			evlist->mmap[cpu].base = NULL;
505 		}
506 	}
507 	return -1;
508 }
509 
510 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
511 {
512 	struct perf_evsel *evsel;
513 	int thread;
514 
515 	for (thread = 0; thread < evlist->threads->nr; thread++) {
516 		int output = -1;
517 
518 		list_for_each_entry(evsel, &evlist->entries, node) {
519 			int fd = FD(evsel, 0, thread);
520 
521 			if (output == -1) {
522 				output = fd;
523 				if (__perf_evlist__mmap(evlist, thread,
524 							prot, mask, output) < 0)
525 					goto out_unmap;
526 			} else {
527 				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
528 					goto out_unmap;
529 			}
530 
531 			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
532 			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
533 				goto out_unmap;
534 		}
535 	}
536 
537 	return 0;
538 
539 out_unmap:
540 	for (thread = 0; thread < evlist->threads->nr; thread++) {
541 		if (evlist->mmap[thread].base != NULL) {
542 			munmap(evlist->mmap[thread].base, evlist->mmap_len);
543 			evlist->mmap[thread].base = NULL;
544 		}
545 	}
546 	return -1;
547 }
548 
549 /** perf_evlist__mmap - Create per cpu maps to receive events
550  *
551  * @evlist - list of events
552  * @pages - map length in pages
553  * @overwrite - overwrite older events?
554  *
555  * If overwrite is false the user needs to signal event consuption using:
556  *
557  *	struct perf_mmap *m = &evlist->mmap[cpu];
558  *	unsigned int head = perf_mmap__read_head(m);
559  *
560  *	perf_mmap__write_tail(m, head)
561  *
562  * Using perf_evlist__read_on_cpu does this automatically.
563  */
564 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
565 		      bool overwrite)
566 {
567 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
568 	struct perf_evsel *evsel;
569 	const struct cpu_map *cpus = evlist->cpus;
570 	const struct thread_map *threads = evlist->threads;
571 	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
572 
573         /* 512 kiB: default amount of unprivileged mlocked memory */
574         if (pages == UINT_MAX)
575                 pages = (512 * 1024) / page_size;
576 	else if (!is_power_of_2(pages))
577 		return -EINVAL;
578 
579 	mask = pages * page_size - 1;
580 
581 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
582 		return -ENOMEM;
583 
584 	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
585 		return -ENOMEM;
586 
587 	evlist->overwrite = overwrite;
588 	evlist->mmap_len = (pages + 1) * page_size;
589 
590 	list_for_each_entry(evsel, &evlist->entries, node) {
591 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
592 		    evsel->sample_id == NULL &&
593 		    perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
594 			return -ENOMEM;
595 	}
596 
597 	if (evlist->cpus->map[0] == -1)
598 		return perf_evlist__mmap_per_thread(evlist, prot, mask);
599 
600 	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
601 }
602 
603 int perf_evlist__create_maps(struct perf_evlist *evlist,
604 			     struct perf_target *target)
605 {
606 	evlist->threads = thread_map__new_str(target->pid, target->tid,
607 					      target->uid);
608 
609 	if (evlist->threads == NULL)
610 		return -1;
611 
612 	if (perf_target__has_task(target))
613 		evlist->cpus = cpu_map__dummy_new();
614 	else if (!perf_target__has_cpu(target) && !target->uses_mmap)
615 		evlist->cpus = cpu_map__dummy_new();
616 	else
617 		evlist->cpus = cpu_map__new(target->cpu_list);
618 
619 	if (evlist->cpus == NULL)
620 		goto out_delete_threads;
621 
622 	return 0;
623 
624 out_delete_threads:
625 	thread_map__delete(evlist->threads);
626 	return -1;
627 }
628 
629 void perf_evlist__delete_maps(struct perf_evlist *evlist)
630 {
631 	cpu_map__delete(evlist->cpus);
632 	thread_map__delete(evlist->threads);
633 	evlist->cpus	= NULL;
634 	evlist->threads = NULL;
635 }
636 
637 int perf_evlist__set_filters(struct perf_evlist *evlist)
638 {
639 	const struct thread_map *threads = evlist->threads;
640 	const struct cpu_map *cpus = evlist->cpus;
641 	struct perf_evsel *evsel;
642 	char *filter;
643 	int thread;
644 	int cpu;
645 	int err;
646 	int fd;
647 
648 	list_for_each_entry(evsel, &evlist->entries, node) {
649 		filter = evsel->filter;
650 		if (!filter)
651 			continue;
652 		for (cpu = 0; cpu < cpus->nr; cpu++) {
653 			for (thread = 0; thread < threads->nr; thread++) {
654 				fd = FD(evsel, cpu, thread);
655 				err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
656 				if (err)
657 					return err;
658 			}
659 		}
660 	}
661 
662 	return 0;
663 }
664 
665 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
666 {
667 	struct perf_evsel *pos, *first;
668 
669 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
670 
671 	list_for_each_entry_continue(pos, &evlist->entries, node) {
672 		if (first->attr.sample_type != pos->attr.sample_type)
673 			return false;
674 	}
675 
676 	return true;
677 }
678 
679 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
680 {
681 	struct perf_evsel *first;
682 
683 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
684 	return first->attr.sample_type;
685 }
686 
687 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
688 {
689 	struct perf_evsel *first;
690 	struct perf_sample *data;
691 	u64 sample_type;
692 	u16 size = 0;
693 
694 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
695 
696 	if (!first->attr.sample_id_all)
697 		goto out;
698 
699 	sample_type = first->attr.sample_type;
700 
701 	if (sample_type & PERF_SAMPLE_TID)
702 		size += sizeof(data->tid) * 2;
703 
704        if (sample_type & PERF_SAMPLE_TIME)
705 		size += sizeof(data->time);
706 
707 	if (sample_type & PERF_SAMPLE_ID)
708 		size += sizeof(data->id);
709 
710 	if (sample_type & PERF_SAMPLE_STREAM_ID)
711 		size += sizeof(data->stream_id);
712 
713 	if (sample_type & PERF_SAMPLE_CPU)
714 		size += sizeof(data->cpu) * 2;
715 out:
716 	return size;
717 }
718 
719 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
720 {
721 	struct perf_evsel *pos, *first;
722 
723 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
724 
725 	list_for_each_entry_continue(pos, &evlist->entries, node) {
726 		if (first->attr.sample_id_all != pos->attr.sample_id_all)
727 			return false;
728 	}
729 
730 	return true;
731 }
732 
733 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
734 {
735 	struct perf_evsel *first;
736 
737 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
738 	return first->attr.sample_id_all;
739 }
740 
741 void perf_evlist__set_selected(struct perf_evlist *evlist,
742 			       struct perf_evsel *evsel)
743 {
744 	evlist->selected = evsel;
745 }
746 
747 int perf_evlist__open(struct perf_evlist *evlist, bool group)
748 {
749 	struct perf_evsel *evsel, *first;
750 	int err, ncpus, nthreads;
751 
752 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
753 
754 	list_for_each_entry(evsel, &evlist->entries, node) {
755 		struct xyarray *group_fd = NULL;
756 
757 		if (group && evsel != first)
758 			group_fd = first->fd;
759 
760 		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
761 				       group, group_fd);
762 		if (err < 0)
763 			goto out_err;
764 	}
765 
766 	return 0;
767 out_err:
768 	ncpus = evlist->cpus ? evlist->cpus->nr : 1;
769 	nthreads = evlist->threads ? evlist->threads->nr : 1;
770 
771 	list_for_each_entry_reverse(evsel, &evlist->entries, node)
772 		perf_evsel__close(evsel, ncpus, nthreads);
773 
774 	errno = -err;
775 	return err;
776 }
777 
778 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
779 				  struct perf_record_opts *opts,
780 				  const char *argv[])
781 {
782 	int child_ready_pipe[2], go_pipe[2];
783 	char bf;
784 
785 	if (pipe(child_ready_pipe) < 0) {
786 		perror("failed to create 'ready' pipe");
787 		return -1;
788 	}
789 
790 	if (pipe(go_pipe) < 0) {
791 		perror("failed to create 'go' pipe");
792 		goto out_close_ready_pipe;
793 	}
794 
795 	evlist->workload.pid = fork();
796 	if (evlist->workload.pid < 0) {
797 		perror("failed to fork");
798 		goto out_close_pipes;
799 	}
800 
801 	if (!evlist->workload.pid) {
802 		if (opts->pipe_output)
803 			dup2(2, 1);
804 
805 		close(child_ready_pipe[0]);
806 		close(go_pipe[1]);
807 		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
808 
809 		/*
810 		 * Do a dummy execvp to get the PLT entry resolved,
811 		 * so we avoid the resolver overhead on the real
812 		 * execvp call.
813 		 */
814 		execvp("", (char **)argv);
815 
816 		/*
817 		 * Tell the parent we're ready to go
818 		 */
819 		close(child_ready_pipe[1]);
820 
821 		/*
822 		 * Wait until the parent tells us to go.
823 		 */
824 		if (read(go_pipe[0], &bf, 1) == -1)
825 			perror("unable to read pipe");
826 
827 		execvp(argv[0], (char **)argv);
828 
829 		perror(argv[0]);
830 		kill(getppid(), SIGUSR1);
831 		exit(-1);
832 	}
833 
834 	if (perf_target__none(&opts->target))
835 		evlist->threads->map[0] = evlist->workload.pid;
836 
837 	close(child_ready_pipe[1]);
838 	close(go_pipe[0]);
839 	/*
840 	 * wait for child to settle
841 	 */
842 	if (read(child_ready_pipe[0], &bf, 1) == -1) {
843 		perror("unable to read pipe");
844 		goto out_close_pipes;
845 	}
846 
847 	evlist->workload.cork_fd = go_pipe[1];
848 	close(child_ready_pipe[0]);
849 	return 0;
850 
851 out_close_pipes:
852 	close(go_pipe[0]);
853 	close(go_pipe[1]);
854 out_close_ready_pipe:
855 	close(child_ready_pipe[0]);
856 	close(child_ready_pipe[1]);
857 	return -1;
858 }
859 
860 int perf_evlist__start_workload(struct perf_evlist *evlist)
861 {
862 	if (evlist->workload.cork_fd > 0) {
863 		/*
864 		 * Remove the cork, let it rip!
865 		 */
866 		return close(evlist->workload.cork_fd);
867 	}
868 
869 	return 0;
870 }
871