xref: /linux/tools/perf/util/evlist.c (revision 90ab5ee94171b3e28de6bb42ee30b527014e0be7)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include "util.h"
10 #include "debugfs.h"
11 #include <poll.h>
12 #include "cpumap.h"
13 #include "thread_map.h"
14 #include "evlist.h"
15 #include "evsel.h"
16 #include <unistd.h>
17 
18 #include "parse-events.h"
19 
20 #include <sys/mman.h>
21 
22 #include <linux/bitops.h>
23 #include <linux/hash.h>
24 
25 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
26 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
27 
28 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
29 		       struct thread_map *threads)
30 {
31 	int i;
32 
33 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
34 		INIT_HLIST_HEAD(&evlist->heads[i]);
35 	INIT_LIST_HEAD(&evlist->entries);
36 	perf_evlist__set_maps(evlist, cpus, threads);
37 	evlist->workload.pid = -1;
38 }
39 
40 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
41 				     struct thread_map *threads)
42 {
43 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
44 
45 	if (evlist != NULL)
46 		perf_evlist__init(evlist, cpus, threads);
47 
48 	return evlist;
49 }
50 
51 void perf_evlist__config_attrs(struct perf_evlist *evlist,
52 			       struct perf_record_opts *opts)
53 {
54 	struct perf_evsel *evsel;
55 
56 	if (evlist->cpus->map[0] < 0)
57 		opts->no_inherit = true;
58 
59 	list_for_each_entry(evsel, &evlist->entries, node) {
60 		perf_evsel__config(evsel, opts);
61 
62 		if (evlist->nr_entries > 1)
63 			evsel->attr.sample_type |= PERF_SAMPLE_ID;
64 	}
65 }
66 
67 static void perf_evlist__purge(struct perf_evlist *evlist)
68 {
69 	struct perf_evsel *pos, *n;
70 
71 	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
72 		list_del_init(&pos->node);
73 		perf_evsel__delete(pos);
74 	}
75 
76 	evlist->nr_entries = 0;
77 }
78 
79 void perf_evlist__exit(struct perf_evlist *evlist)
80 {
81 	free(evlist->mmap);
82 	free(evlist->pollfd);
83 	evlist->mmap = NULL;
84 	evlist->pollfd = NULL;
85 }
86 
87 void perf_evlist__delete(struct perf_evlist *evlist)
88 {
89 	perf_evlist__purge(evlist);
90 	perf_evlist__exit(evlist);
91 	free(evlist);
92 }
93 
94 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
95 {
96 	list_add_tail(&entry->node, &evlist->entries);
97 	++evlist->nr_entries;
98 }
99 
100 static void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
101 					  struct list_head *list,
102 					  int nr_entries)
103 {
104 	list_splice_tail(list, &evlist->entries);
105 	evlist->nr_entries += nr_entries;
106 }
107 
108 int perf_evlist__add_default(struct perf_evlist *evlist)
109 {
110 	struct perf_event_attr attr = {
111 		.type = PERF_TYPE_HARDWARE,
112 		.config = PERF_COUNT_HW_CPU_CYCLES,
113 	};
114 	struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
115 
116 	if (evsel == NULL)
117 		goto error;
118 
119 	/* use strdup() because free(evsel) assumes name is allocated */
120 	evsel->name = strdup("cycles");
121 	if (!evsel->name)
122 		goto error_free;
123 
124 	perf_evlist__add(evlist, evsel);
125 	return 0;
126 error_free:
127 	perf_evsel__delete(evsel);
128 error:
129 	return -ENOMEM;
130 }
131 
132 int perf_evlist__add_attrs(struct perf_evlist *evlist,
133 			   struct perf_event_attr *attrs, size_t nr_attrs)
134 {
135 	struct perf_evsel *evsel, *n;
136 	LIST_HEAD(head);
137 	size_t i;
138 
139 	for (i = 0; i < nr_attrs; i++) {
140 		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
141 		if (evsel == NULL)
142 			goto out_delete_partial_list;
143 		list_add_tail(&evsel->node, &head);
144 	}
145 
146 	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
147 
148 	return 0;
149 
150 out_delete_partial_list:
151 	list_for_each_entry_safe(evsel, n, &head, node)
152 		perf_evsel__delete(evsel);
153 	return -1;
154 }
155 
156 static int trace_event__id(const char *evname)
157 {
158 	char *filename, *colon;
159 	int err = -1, fd;
160 
161 	if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
162 		return -1;
163 
164 	colon = strrchr(filename, ':');
165 	if (colon != NULL)
166 		*colon = '/';
167 
168 	fd = open(filename, O_RDONLY);
169 	if (fd >= 0) {
170 		char id[16];
171 		if (read(fd, id, sizeof(id)) > 0)
172 			err = atoi(id);
173 		close(fd);
174 	}
175 
176 	free(filename);
177 	return err;
178 }
179 
180 int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
181 				 const char *tracepoints[],
182 				 size_t nr_tracepoints)
183 {
184 	int err;
185 	size_t i;
186 	struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
187 
188 	if (attrs == NULL)
189 		return -1;
190 
191 	for (i = 0; i < nr_tracepoints; i++) {
192 		err = trace_event__id(tracepoints[i]);
193 
194 		if (err < 0)
195 			goto out_free_attrs;
196 
197 		attrs[i].type	       = PERF_TYPE_TRACEPOINT;
198 		attrs[i].config	       = err;
199 	        attrs[i].sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
200 					  PERF_SAMPLE_CPU);
201 		attrs[i].sample_period = 1;
202 	}
203 
204 	err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
205 out_free_attrs:
206 	free(attrs);
207 	return err;
208 }
209 
210 static struct perf_evsel *
211 	perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
212 {
213 	struct perf_evsel *evsel;
214 
215 	list_for_each_entry(evsel, &evlist->entries, node) {
216 		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
217 		    (int)evsel->attr.config == id)
218 			return evsel;
219 	}
220 
221 	return NULL;
222 }
223 
224 int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
225 					  const struct perf_evsel_str_handler *assocs,
226 					  size_t nr_assocs)
227 {
228 	struct perf_evsel *evsel;
229 	int err;
230 	size_t i;
231 
232 	for (i = 0; i < nr_assocs; i++) {
233 		err = trace_event__id(assocs[i].name);
234 		if (err < 0)
235 			goto out;
236 
237 		evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
238 		if (evsel == NULL)
239 			continue;
240 
241 		err = -EEXIST;
242 		if (evsel->handler.func != NULL)
243 			goto out;
244 		evsel->handler.func = assocs[i].handler;
245 	}
246 
247 	err = 0;
248 out:
249 	return err;
250 }
251 
252 void perf_evlist__disable(struct perf_evlist *evlist)
253 {
254 	int cpu, thread;
255 	struct perf_evsel *pos;
256 
257 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
258 		list_for_each_entry(pos, &evlist->entries, node) {
259 			for (thread = 0; thread < evlist->threads->nr; thread++)
260 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
261 		}
262 	}
263 }
264 
265 void perf_evlist__enable(struct perf_evlist *evlist)
266 {
267 	int cpu, thread;
268 	struct perf_evsel *pos;
269 
270 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
271 		list_for_each_entry(pos, &evlist->entries, node) {
272 			for (thread = 0; thread < evlist->threads->nr; thread++)
273 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
274 		}
275 	}
276 }
277 
278 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
279 {
280 	int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
281 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
282 	return evlist->pollfd != NULL ? 0 : -ENOMEM;
283 }
284 
285 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
286 {
287 	fcntl(fd, F_SETFL, O_NONBLOCK);
288 	evlist->pollfd[evlist->nr_fds].fd = fd;
289 	evlist->pollfd[evlist->nr_fds].events = POLLIN;
290 	evlist->nr_fds++;
291 }
292 
293 static void perf_evlist__id_hash(struct perf_evlist *evlist,
294 				 struct perf_evsel *evsel,
295 				 int cpu, int thread, u64 id)
296 {
297 	int hash;
298 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
299 
300 	sid->id = id;
301 	sid->evsel = evsel;
302 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
303 	hlist_add_head(&sid->node, &evlist->heads[hash]);
304 }
305 
306 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
307 			 int cpu, int thread, u64 id)
308 {
309 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
310 	evsel->id[evsel->ids++] = id;
311 }
312 
313 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
314 				  struct perf_evsel *evsel,
315 				  int cpu, int thread, int fd)
316 {
317 	u64 read_data[4] = { 0, };
318 	int id_idx = 1; /* The first entry is the counter value */
319 
320 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
321 	    read(fd, &read_data, sizeof(read_data)) == -1)
322 		return -1;
323 
324 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
325 		++id_idx;
326 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
327 		++id_idx;
328 
329 	perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
330 	return 0;
331 }
332 
333 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
334 {
335 	struct hlist_head *head;
336 	struct hlist_node *pos;
337 	struct perf_sample_id *sid;
338 	int hash;
339 
340 	if (evlist->nr_entries == 1)
341 		return list_entry(evlist->entries.next, struct perf_evsel, node);
342 
343 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
344 	head = &evlist->heads[hash];
345 
346 	hlist_for_each_entry(sid, pos, head, node)
347 		if (sid->id == id)
348 			return sid->evsel;
349 	return NULL;
350 }
351 
352 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
353 {
354 	/* XXX Move this to perf.c, making it generally available */
355 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
356 	struct perf_mmap *md = &evlist->mmap[idx];
357 	unsigned int head = perf_mmap__read_head(md);
358 	unsigned int old = md->prev;
359 	unsigned char *data = md->base + page_size;
360 	union perf_event *event = NULL;
361 
362 	if (evlist->overwrite) {
363 		/*
364 		 * If we're further behind than half the buffer, there's a chance
365 		 * the writer will bite our tail and mess up the samples under us.
366 		 *
367 		 * If we somehow ended up ahead of the head, we got messed up.
368 		 *
369 		 * In either case, truncate and restart at head.
370 		 */
371 		int diff = head - old;
372 		if (diff > md->mask / 2 || diff < 0) {
373 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
374 
375 			/*
376 			 * head points to a known good entry, start there.
377 			 */
378 			old = head;
379 		}
380 	}
381 
382 	if (old != head) {
383 		size_t size;
384 
385 		event = (union perf_event *)&data[old & md->mask];
386 		size = event->header.size;
387 
388 		/*
389 		 * Event straddles the mmap boundary -- header should always
390 		 * be inside due to u64 alignment of output.
391 		 */
392 		if ((old & md->mask) + size != ((old + size) & md->mask)) {
393 			unsigned int offset = old;
394 			unsigned int len = min(sizeof(*event), size), cpy;
395 			void *dst = &evlist->event_copy;
396 
397 			do {
398 				cpy = min(md->mask + 1 - (offset & md->mask), len);
399 				memcpy(dst, &data[offset & md->mask], cpy);
400 				offset += cpy;
401 				dst += cpy;
402 				len -= cpy;
403 			} while (len);
404 
405 			event = &evlist->event_copy;
406 		}
407 
408 		old += size;
409 	}
410 
411 	md->prev = old;
412 
413 	if (!evlist->overwrite)
414 		perf_mmap__write_tail(md, old);
415 
416 	return event;
417 }
418 
419 void perf_evlist__munmap(struct perf_evlist *evlist)
420 {
421 	int i;
422 
423 	for (i = 0; i < evlist->nr_mmaps; i++) {
424 		if (evlist->mmap[i].base != NULL) {
425 			munmap(evlist->mmap[i].base, evlist->mmap_len);
426 			evlist->mmap[i].base = NULL;
427 		}
428 	}
429 
430 	free(evlist->mmap);
431 	evlist->mmap = NULL;
432 }
433 
434 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
435 {
436 	evlist->nr_mmaps = evlist->cpus->nr;
437 	if (evlist->cpus->map[0] == -1)
438 		evlist->nr_mmaps = evlist->threads->nr;
439 	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
440 	return evlist->mmap != NULL ? 0 : -ENOMEM;
441 }
442 
443 static int __perf_evlist__mmap(struct perf_evlist *evlist,
444 			       int idx, int prot, int mask, int fd)
445 {
446 	evlist->mmap[idx].prev = 0;
447 	evlist->mmap[idx].mask = mask;
448 	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
449 				      MAP_SHARED, fd, 0);
450 	if (evlist->mmap[idx].base == MAP_FAILED) {
451 		evlist->mmap[idx].base = NULL;
452 		return -1;
453 	}
454 
455 	perf_evlist__add_pollfd(evlist, fd);
456 	return 0;
457 }
458 
459 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
460 {
461 	struct perf_evsel *evsel;
462 	int cpu, thread;
463 
464 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
465 		int output = -1;
466 
467 		for (thread = 0; thread < evlist->threads->nr; thread++) {
468 			list_for_each_entry(evsel, &evlist->entries, node) {
469 				int fd = FD(evsel, cpu, thread);
470 
471 				if (output == -1) {
472 					output = fd;
473 					if (__perf_evlist__mmap(evlist, cpu,
474 								prot, mask, output) < 0)
475 						goto out_unmap;
476 				} else {
477 					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
478 						goto out_unmap;
479 				}
480 
481 				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
482 				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
483 					goto out_unmap;
484 			}
485 		}
486 	}
487 
488 	return 0;
489 
490 out_unmap:
491 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
492 		if (evlist->mmap[cpu].base != NULL) {
493 			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
494 			evlist->mmap[cpu].base = NULL;
495 		}
496 	}
497 	return -1;
498 }
499 
500 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
501 {
502 	struct perf_evsel *evsel;
503 	int thread;
504 
505 	for (thread = 0; thread < evlist->threads->nr; thread++) {
506 		int output = -1;
507 
508 		list_for_each_entry(evsel, &evlist->entries, node) {
509 			int fd = FD(evsel, 0, thread);
510 
511 			if (output == -1) {
512 				output = fd;
513 				if (__perf_evlist__mmap(evlist, thread,
514 							prot, mask, output) < 0)
515 					goto out_unmap;
516 			} else {
517 				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
518 					goto out_unmap;
519 			}
520 
521 			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
522 			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
523 				goto out_unmap;
524 		}
525 	}
526 
527 	return 0;
528 
529 out_unmap:
530 	for (thread = 0; thread < evlist->threads->nr; thread++) {
531 		if (evlist->mmap[thread].base != NULL) {
532 			munmap(evlist->mmap[thread].base, evlist->mmap_len);
533 			evlist->mmap[thread].base = NULL;
534 		}
535 	}
536 	return -1;
537 }
538 
539 /** perf_evlist__mmap - Create per cpu maps to receive events
540  *
541  * @evlist - list of events
542  * @pages - map length in pages
543  * @overwrite - overwrite older events?
544  *
545  * If overwrite is false the user needs to signal event consuption using:
546  *
547  *	struct perf_mmap *m = &evlist->mmap[cpu];
548  *	unsigned int head = perf_mmap__read_head(m);
549  *
550  *	perf_mmap__write_tail(m, head)
551  *
552  * Using perf_evlist__read_on_cpu does this automatically.
553  */
554 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
555 		      bool overwrite)
556 {
557 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
558 	struct perf_evsel *evsel;
559 	const struct cpu_map *cpus = evlist->cpus;
560 	const struct thread_map *threads = evlist->threads;
561 	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
562 
563         /* 512 kiB: default amount of unprivileged mlocked memory */
564         if (pages == UINT_MAX)
565                 pages = (512 * 1024) / page_size;
566 	else if (!is_power_of_2(pages))
567 		return -EINVAL;
568 
569 	mask = pages * page_size - 1;
570 
571 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
572 		return -ENOMEM;
573 
574 	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
575 		return -ENOMEM;
576 
577 	evlist->overwrite = overwrite;
578 	evlist->mmap_len = (pages + 1) * page_size;
579 
580 	list_for_each_entry(evsel, &evlist->entries, node) {
581 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
582 		    evsel->sample_id == NULL &&
583 		    perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
584 			return -ENOMEM;
585 	}
586 
587 	if (evlist->cpus->map[0] == -1)
588 		return perf_evlist__mmap_per_thread(evlist, prot, mask);
589 
590 	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
591 }
592 
593 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
594 			     pid_t target_tid, const char *cpu_list)
595 {
596 	evlist->threads = thread_map__new(target_pid, target_tid);
597 
598 	if (evlist->threads == NULL)
599 		return -1;
600 
601 	if (cpu_list == NULL && target_tid != -1)
602 		evlist->cpus = cpu_map__dummy_new();
603 	else
604 		evlist->cpus = cpu_map__new(cpu_list);
605 
606 	if (evlist->cpus == NULL)
607 		goto out_delete_threads;
608 
609 	return 0;
610 
611 out_delete_threads:
612 	thread_map__delete(evlist->threads);
613 	return -1;
614 }
615 
616 void perf_evlist__delete_maps(struct perf_evlist *evlist)
617 {
618 	cpu_map__delete(evlist->cpus);
619 	thread_map__delete(evlist->threads);
620 	evlist->cpus	= NULL;
621 	evlist->threads = NULL;
622 }
623 
624 int perf_evlist__set_filters(struct perf_evlist *evlist)
625 {
626 	const struct thread_map *threads = evlist->threads;
627 	const struct cpu_map *cpus = evlist->cpus;
628 	struct perf_evsel *evsel;
629 	char *filter;
630 	int thread;
631 	int cpu;
632 	int err;
633 	int fd;
634 
635 	list_for_each_entry(evsel, &evlist->entries, node) {
636 		filter = evsel->filter;
637 		if (!filter)
638 			continue;
639 		for (cpu = 0; cpu < cpus->nr; cpu++) {
640 			for (thread = 0; thread < threads->nr; thread++) {
641 				fd = FD(evsel, cpu, thread);
642 				err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
643 				if (err)
644 					return err;
645 			}
646 		}
647 	}
648 
649 	return 0;
650 }
651 
652 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
653 {
654 	struct perf_evsel *pos, *first;
655 
656 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
657 
658 	list_for_each_entry_continue(pos, &evlist->entries, node) {
659 		if (first->attr.sample_type != pos->attr.sample_type)
660 			return false;
661 	}
662 
663 	return true;
664 }
665 
666 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
667 {
668 	struct perf_evsel *first;
669 
670 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
671 	return first->attr.sample_type;
672 }
673 
674 u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
675 {
676 	struct perf_evsel *first;
677 	struct perf_sample *data;
678 	u64 sample_type;
679 	u16 size = 0;
680 
681 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
682 
683 	if (!first->attr.sample_id_all)
684 		goto out;
685 
686 	sample_type = first->attr.sample_type;
687 
688 	if (sample_type & PERF_SAMPLE_TID)
689 		size += sizeof(data->tid) * 2;
690 
691        if (sample_type & PERF_SAMPLE_TIME)
692 		size += sizeof(data->time);
693 
694 	if (sample_type & PERF_SAMPLE_ID)
695 		size += sizeof(data->id);
696 
697 	if (sample_type & PERF_SAMPLE_STREAM_ID)
698 		size += sizeof(data->stream_id);
699 
700 	if (sample_type & PERF_SAMPLE_CPU)
701 		size += sizeof(data->cpu) * 2;
702 out:
703 	return size;
704 }
705 
706 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
707 {
708 	struct perf_evsel *pos, *first;
709 
710 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
711 
712 	list_for_each_entry_continue(pos, &evlist->entries, node) {
713 		if (first->attr.sample_id_all != pos->attr.sample_id_all)
714 			return false;
715 	}
716 
717 	return true;
718 }
719 
720 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
721 {
722 	struct perf_evsel *first;
723 
724 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
725 	return first->attr.sample_id_all;
726 }
727 
728 void perf_evlist__set_selected(struct perf_evlist *evlist,
729 			       struct perf_evsel *evsel)
730 {
731 	evlist->selected = evsel;
732 }
733 
734 int perf_evlist__open(struct perf_evlist *evlist, bool group)
735 {
736 	struct perf_evsel *evsel, *first;
737 	int err, ncpus, nthreads;
738 
739 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
740 
741 	list_for_each_entry(evsel, &evlist->entries, node) {
742 		struct xyarray *group_fd = NULL;
743 
744 		if (group && evsel != first)
745 			group_fd = first->fd;
746 
747 		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
748 				       group, group_fd);
749 		if (err < 0)
750 			goto out_err;
751 	}
752 
753 	return 0;
754 out_err:
755 	ncpus = evlist->cpus ? evlist->cpus->nr : 1;
756 	nthreads = evlist->threads ? evlist->threads->nr : 1;
757 
758 	list_for_each_entry_reverse(evsel, &evlist->entries, node)
759 		perf_evsel__close(evsel, ncpus, nthreads);
760 
761 	return err;
762 }
763 
764 int perf_evlist__prepare_workload(struct perf_evlist *evlist,
765 				  struct perf_record_opts *opts,
766 				  const char *argv[])
767 {
768 	int child_ready_pipe[2], go_pipe[2];
769 	char bf;
770 
771 	if (pipe(child_ready_pipe) < 0) {
772 		perror("failed to create 'ready' pipe");
773 		return -1;
774 	}
775 
776 	if (pipe(go_pipe) < 0) {
777 		perror("failed to create 'go' pipe");
778 		goto out_close_ready_pipe;
779 	}
780 
781 	evlist->workload.pid = fork();
782 	if (evlist->workload.pid < 0) {
783 		perror("failed to fork");
784 		goto out_close_pipes;
785 	}
786 
787 	if (!evlist->workload.pid) {
788 		if (opts->pipe_output)
789 			dup2(2, 1);
790 
791 		close(child_ready_pipe[0]);
792 		close(go_pipe[1]);
793 		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
794 
795 		/*
796 		 * Do a dummy execvp to get the PLT entry resolved,
797 		 * so we avoid the resolver overhead on the real
798 		 * execvp call.
799 		 */
800 		execvp("", (char **)argv);
801 
802 		/*
803 		 * Tell the parent we're ready to go
804 		 */
805 		close(child_ready_pipe[1]);
806 
807 		/*
808 		 * Wait until the parent tells us to go.
809 		 */
810 		if (read(go_pipe[0], &bf, 1) == -1)
811 			perror("unable to read pipe");
812 
813 		execvp(argv[0], (char **)argv);
814 
815 		perror(argv[0]);
816 		kill(getppid(), SIGUSR1);
817 		exit(-1);
818 	}
819 
820 	if (!opts->system_wide && opts->target_tid == -1 && opts->target_pid == -1)
821 		evlist->threads->map[0] = evlist->workload.pid;
822 
823 	close(child_ready_pipe[1]);
824 	close(go_pipe[0]);
825 	/*
826 	 * wait for child to settle
827 	 */
828 	if (read(child_ready_pipe[0], &bf, 1) == -1) {
829 		perror("unable to read pipe");
830 		goto out_close_pipes;
831 	}
832 
833 	evlist->workload.cork_fd = go_pipe[1];
834 	close(child_ready_pipe[0]);
835 	return 0;
836 
837 out_close_pipes:
838 	close(go_pipe[0]);
839 	close(go_pipe[1]);
840 out_close_ready_pipe:
841 	close(child_ready_pipe[0]);
842 	close(child_ready_pipe[1]);
843 	return -1;
844 }
845 
846 int perf_evlist__start_workload(struct perf_evlist *evlist)
847 {
848 	if (evlist->workload.cork_fd > 0) {
849 		/*
850 		 * Remove the cork, let it rip!
851 		 */
852 		return close(evlist->workload.cork_fd);
853 	}
854 
855 	return 0;
856 }
857