xref: /linux/tools/lib/perf/evlist.c (revision 01abac26dccd77eddffec6b032e51f501714dee3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <perf/evlist.h>
3 #include <perf/evsel.h>
4 #include <linux/bitops.h>
5 #include <linux/list.h>
6 #include <linux/hash.h>
7 #include <sys/ioctl.h>
8 #include <internal/evlist.h>
9 #include <internal/evsel.h>
10 #include <internal/xyarray.h>
11 #include <internal/mmap.h>
12 #include <internal/cpumap.h>
13 #include <internal/threadmap.h>
14 #include <internal/lib.h>
15 #include <linux/zalloc.h>
16 #include <stdlib.h>
17 #include <errno.h>
18 #include <unistd.h>
19 #include <fcntl.h>
20 #include <signal.h>
21 #include <poll.h>
22 #include <sys/mman.h>
23 #include <perf/cpumap.h>
24 #include <perf/threadmap.h>
25 #include <api/fd/array.h>
26 #include "internal.h"
27 
28 void perf_evlist__init(struct perf_evlist *evlist)
29 {
30 	INIT_LIST_HEAD(&evlist->entries);
31 	evlist->nr_entries = 0;
32 	fdarray__init(&evlist->pollfd, 64);
33 	perf_evlist__reset_id_hash(evlist);
34 }
35 
36 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
37 					  struct perf_evsel *evsel)
38 {
39 	if (evsel->system_wide) {
40 		/* System wide: set the cpu map of the evsel to all online CPUs. */
41 		perf_cpu_map__put(evsel->cpus);
42 		evsel->cpus = perf_cpu_map__new_online_cpus();
43 	} else if (evlist->has_user_cpus && evsel->is_pmu_core) {
44 		/*
45 		 * User requested CPUs on a core PMU, ensure the requested CPUs
46 		 * are valid by intersecting with those of the PMU.
47 		 */
48 		perf_cpu_map__put(evsel->cpus);
49 		evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
50 
51 		/*
52 		 * Empty cpu lists would eventually get opened as "any" so remove
53 		 * genuinely empty ones before they're opened in the wrong place.
54 		 */
55 		if (perf_cpu_map__is_empty(evsel->cpus)) {
56 			struct perf_evsel *next = perf_evlist__next(evlist, evsel);
57 
58 			perf_evlist__remove(evlist, evsel);
59 			/* Keep idx contiguous */
60 			if (next)
61 				list_for_each_entry_from(next, &evlist->entries, node)
62 					next->idx--;
63 		}
64 	} else if (!evsel->own_cpus || evlist->has_user_cpus ||
65 		(!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
66 		/*
67 		 * The PMU didn't specify a default cpu map, this isn't a core
68 		 * event and the user requested CPUs or the evlist user
69 		 * requested CPUs have the "any CPU" (aka dummy) CPU value. In
70 		 * which case use the user requested CPUs rather than the PMU
71 		 * ones.
72 		 */
73 		perf_cpu_map__put(evsel->cpus);
74 		evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
75 	} else if (evsel->cpus != evsel->own_cpus) {
76 		/*
77 		 * No user requested cpu map but the PMU cpu map doesn't match
78 		 * the evsel's. Reset it back to the PMU cpu map.
79 		 */
80 		perf_cpu_map__put(evsel->cpus);
81 		evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
82 	}
83 
84 	if (evsel->system_wide) {
85 		perf_thread_map__put(evsel->threads);
86 		evsel->threads = perf_thread_map__new_dummy();
87 	} else {
88 		perf_thread_map__put(evsel->threads);
89 		evsel->threads = perf_thread_map__get(evlist->threads);
90 	}
91 
92 	evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
93 }
94 
95 static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
96 {
97 	struct perf_evsel *evsel, *n;
98 
99 	evlist->needs_map_propagation = true;
100 
101 	list_for_each_entry_safe(evsel, n, &evlist->entries, node)
102 		__perf_evlist__propagate_maps(evlist, evsel);
103 }
104 
105 void perf_evlist__add(struct perf_evlist *evlist,
106 		      struct perf_evsel *evsel)
107 {
108 	evsel->idx = evlist->nr_entries;
109 	list_add_tail(&evsel->node, &evlist->entries);
110 	evlist->nr_entries += 1;
111 
112 	if (evlist->needs_map_propagation)
113 		__perf_evlist__propagate_maps(evlist, evsel);
114 }
115 
116 void perf_evlist__remove(struct perf_evlist *evlist,
117 			 struct perf_evsel *evsel)
118 {
119 	list_del_init(&evsel->node);
120 	evlist->nr_entries -= 1;
121 }
122 
123 struct perf_evlist *perf_evlist__new(void)
124 {
125 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
126 
127 	if (evlist != NULL)
128 		perf_evlist__init(evlist);
129 
130 	return evlist;
131 }
132 
133 struct perf_evsel *
134 perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
135 {
136 	struct perf_evsel *next;
137 
138 	if (!prev) {
139 		next = list_first_entry(&evlist->entries,
140 					struct perf_evsel,
141 					node);
142 	} else {
143 		next = list_next_entry(prev, node);
144 	}
145 
146 	/* Empty list is noticed here so don't need checking on entry. */
147 	if (&next->node == &evlist->entries)
148 		return NULL;
149 
150 	return next;
151 }
152 
153 static void perf_evlist__purge(struct perf_evlist *evlist)
154 {
155 	struct perf_evsel *pos, *n;
156 
157 	perf_evlist__for_each_entry_safe(evlist, n, pos) {
158 		list_del_init(&pos->node);
159 		perf_evsel__delete(pos);
160 	}
161 
162 	evlist->nr_entries = 0;
163 }
164 
165 void perf_evlist__exit(struct perf_evlist *evlist)
166 {
167 	perf_cpu_map__put(evlist->user_requested_cpus);
168 	perf_cpu_map__put(evlist->all_cpus);
169 	perf_thread_map__put(evlist->threads);
170 	evlist->user_requested_cpus = NULL;
171 	evlist->all_cpus = NULL;
172 	evlist->threads = NULL;
173 	fdarray__exit(&evlist->pollfd);
174 }
175 
176 void perf_evlist__delete(struct perf_evlist *evlist)
177 {
178 	if (evlist == NULL)
179 		return;
180 
181 	perf_evlist__munmap(evlist);
182 	perf_evlist__close(evlist);
183 	perf_evlist__purge(evlist);
184 	perf_evlist__exit(evlist);
185 	free(evlist);
186 }
187 
188 void perf_evlist__set_maps(struct perf_evlist *evlist,
189 			   struct perf_cpu_map *cpus,
190 			   struct perf_thread_map *threads)
191 {
192 	/*
193 	 * Allow for the possibility that one or another of the maps isn't being
194 	 * changed i.e. don't put it.  Note we are assuming the maps that are
195 	 * being applied are brand new and evlist is taking ownership of the
196 	 * original reference count of 1.  If that is not the case it is up to
197 	 * the caller to increase the reference count.
198 	 */
199 	if (cpus != evlist->user_requested_cpus) {
200 		perf_cpu_map__put(evlist->user_requested_cpus);
201 		evlist->user_requested_cpus = perf_cpu_map__get(cpus);
202 	}
203 
204 	if (threads != evlist->threads) {
205 		perf_thread_map__put(evlist->threads);
206 		evlist->threads = perf_thread_map__get(threads);
207 	}
208 
209 	perf_evlist__propagate_maps(evlist);
210 }
211 
212 int perf_evlist__open(struct perf_evlist *evlist)
213 {
214 	struct perf_evsel *evsel;
215 	int err;
216 
217 	perf_evlist__for_each_entry(evlist, evsel) {
218 		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
219 		if (err < 0)
220 			goto out_err;
221 	}
222 
223 	return 0;
224 
225 out_err:
226 	perf_evlist__close(evlist);
227 	return err;
228 }
229 
230 void perf_evlist__close(struct perf_evlist *evlist)
231 {
232 	struct perf_evsel *evsel;
233 
234 	perf_evlist__for_each_entry_reverse(evlist, evsel)
235 		perf_evsel__close(evsel);
236 }
237 
238 void perf_evlist__enable(struct perf_evlist *evlist)
239 {
240 	struct perf_evsel *evsel;
241 
242 	perf_evlist__for_each_entry(evlist, evsel)
243 		perf_evsel__enable(evsel);
244 }
245 
246 void perf_evlist__disable(struct perf_evlist *evlist)
247 {
248 	struct perf_evsel *evsel;
249 
250 	perf_evlist__for_each_entry(evlist, evsel)
251 		perf_evsel__disable(evsel);
252 }
253 
254 u64 perf_evlist__read_format(struct perf_evlist *evlist)
255 {
256 	struct perf_evsel *first = perf_evlist__first(evlist);
257 
258 	return first->attr.read_format;
259 }
260 
261 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
262 
263 static void perf_evlist__id_hash(struct perf_evlist *evlist,
264 				 struct perf_evsel *evsel,
265 				 int cpu_map_idx, int thread, u64 id)
266 {
267 	int hash;
268 	struct perf_sample_id *sid = SID(evsel, cpu_map_idx, thread);
269 
270 	sid->id = id;
271 	sid->evsel = evsel;
272 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
273 	hlist_add_head(&sid->node, &evlist->heads[hash]);
274 }
275 
276 void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
277 {
278 	int i;
279 
280 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
281 		INIT_HLIST_HEAD(&evlist->heads[i]);
282 }
283 
284 void perf_evlist__id_add(struct perf_evlist *evlist,
285 			 struct perf_evsel *evsel,
286 			 int cpu_map_idx, int thread, u64 id)
287 {
288 	if (!SID(evsel, cpu_map_idx, thread))
289 		return;
290 
291 	perf_evlist__id_hash(evlist, evsel, cpu_map_idx, thread, id);
292 	evsel->id[evsel->ids++] = id;
293 }
294 
295 int perf_evlist__id_add_fd(struct perf_evlist *evlist,
296 			   struct perf_evsel *evsel,
297 			   int cpu_map_idx, int thread, int fd)
298 {
299 	u64 read_data[4] = { 0, };
300 	int id_idx = 1; /* The first entry is the counter value */
301 	u64 id;
302 	int ret;
303 
304 	if (!SID(evsel, cpu_map_idx, thread))
305 		return -1;
306 
307 	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
308 	if (!ret)
309 		goto add;
310 
311 	if (errno != ENOTTY)
312 		return -1;
313 
314 	/* Legacy way to get event id.. All hail to old kernels! */
315 
316 	/*
317 	 * This way does not work with group format read, so bail
318 	 * out in that case.
319 	 */
320 	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
321 		return -1;
322 
323 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
324 	    read(fd, &read_data, sizeof(read_data)) == -1)
325 		return -1;
326 
327 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
328 		++id_idx;
329 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
330 		++id_idx;
331 
332 	id = read_data[id_idx];
333 
334 add:
335 	perf_evlist__id_add(evlist, evsel, cpu_map_idx, thread, id);
336 	return 0;
337 }
338 
339 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
340 {
341 	int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
342 	int nr_threads = perf_thread_map__nr(evlist->threads);
343 	int nfds = 0;
344 	struct perf_evsel *evsel;
345 
346 	perf_evlist__for_each_entry(evlist, evsel) {
347 		if (evsel->system_wide)
348 			nfds += nr_cpus;
349 		else
350 			nfds += nr_cpus * nr_threads;
351 	}
352 
353 	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
354 	    fdarray__grow(&evlist->pollfd, nfds) < 0)
355 		return -ENOMEM;
356 
357 	return 0;
358 }
359 
360 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
361 			    void *ptr, short revent, enum fdarray_flags flags)
362 {
363 	int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
364 
365 	if (pos >= 0) {
366 		evlist->pollfd.priv[pos].ptr = ptr;
367 		fcntl(fd, F_SETFL, O_NONBLOCK);
368 	}
369 
370 	return pos;
371 }
372 
373 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
374 					 void *arg __maybe_unused)
375 {
376 	struct perf_mmap *map = fda->priv[fd].ptr;
377 
378 	if (map)
379 		perf_mmap__put(map);
380 }
381 
382 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
383 {
384 	return fdarray__filter(&evlist->pollfd, revents_and_mask,
385 			       perf_evlist__munmap_filtered, NULL);
386 }
387 
388 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
389 {
390 	return fdarray__poll(&evlist->pollfd, timeout);
391 }
392 
393 static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
394 {
395 	int i;
396 	struct perf_mmap *map;
397 
398 	map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
399 	if (!map)
400 		return NULL;
401 
402 	for (i = 0; i < evlist->nr_mmaps; i++) {
403 		struct perf_mmap *prev = i ? &map[i - 1] : NULL;
404 
405 		/*
406 		 * When the perf_mmap() call is made we grab one refcount, plus
407 		 * one extra to let perf_mmap__consume() get the last
408 		 * events after all real references (perf_mmap__get()) are
409 		 * dropped.
410 		 *
411 		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
412 		 * thus does perf_mmap__get() on it.
413 		 */
414 		perf_mmap__init(&map[i], prev, overwrite, NULL);
415 	}
416 
417 	return map;
418 }
419 
420 static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
421 {
422 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
423 
424 	sid->idx = idx;
425 	sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
426 	sid->tid = perf_thread_map__pid(evsel->threads, thread);
427 }
428 
429 static struct perf_mmap*
430 perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
431 {
432 	struct perf_mmap *maps;
433 
434 	maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
435 
436 	if (!maps) {
437 		maps = perf_evlist__alloc_mmap(evlist, overwrite);
438 		if (!maps)
439 			return NULL;
440 
441 		if (overwrite)
442 			evlist->mmap_ovw = maps;
443 		else
444 			evlist->mmap = maps;
445 	}
446 
447 	return &maps[idx];
448 }
449 
450 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
451 
452 static int
453 perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
454 			  int output, struct perf_cpu cpu)
455 {
456 	return perf_mmap__mmap(map, mp, output, cpu);
457 }
458 
459 static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
460 					bool overwrite)
461 {
462 	if (overwrite)
463 		evlist->mmap_ovw_first = map;
464 	else
465 		evlist->mmap_first = map;
466 }
467 
468 static int
469 mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
470 	       int idx, struct perf_mmap_param *mp, int cpu_idx,
471 	       int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
472 {
473 	struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
474 	struct perf_evsel *evsel;
475 	int revent;
476 
477 	perf_evlist__for_each_entry(evlist, evsel) {
478 		bool overwrite = evsel->attr.write_backward;
479 		enum fdarray_flags flgs;
480 		struct perf_mmap *map;
481 		int *output, fd, cpu;
482 
483 		if (evsel->system_wide && thread)
484 			continue;
485 
486 		cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
487 		if (cpu == -1)
488 			continue;
489 
490 		map = ops->get(evlist, overwrite, idx);
491 		if (map == NULL)
492 			return -ENOMEM;
493 
494 		if (overwrite) {
495 			mp->prot = PROT_READ;
496 			output   = _output_overwrite;
497 		} else {
498 			mp->prot = PROT_READ | PROT_WRITE;
499 			output   = _output;
500 		}
501 
502 		fd = FD(evsel, cpu, thread);
503 
504 		if (*output == -1) {
505 			*output = fd;
506 
507 			/*
508 			 * The last one will be done at perf_mmap__consume(), so that we
509 			 * make sure we don't prevent tools from consuming every last event in
510 			 * the ring buffer.
511 			 *
512 			 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
513 			 * anymore, but the last events for it are still in the ring buffer,
514 			 * waiting to be consumed.
515 			 *
516 			 * Tools can chose to ignore this at their own discretion, but the
517 			 * evlist layer can't just drop it when filtering events in
518 			 * perf_evlist__filter_pollfd().
519 			 */
520 			refcount_set(&map->refcnt, 2);
521 
522 			if (ops->idx)
523 				ops->idx(evlist, evsel, mp, idx);
524 
525 			/* Debug message used by test scripts */
526 			pr_debug("idx %d: mmapping fd %d\n", idx, *output);
527 			if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
528 				return -1;
529 
530 			*nr_mmaps += 1;
531 
532 			if (!idx)
533 				perf_evlist__set_mmap_first(evlist, map, overwrite);
534 		} else {
535 			/* Debug message used by test scripts */
536 			pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
537 			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
538 				return -1;
539 
540 			perf_mmap__get(map);
541 		}
542 
543 		revent = !overwrite ? POLLIN : 0;
544 
545 		flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
546 		if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
547 			perf_mmap__put(map);
548 			return -1;
549 		}
550 
551 		if (evsel->attr.read_format & PERF_FORMAT_ID) {
552 			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
553 						   fd) < 0)
554 				return -1;
555 			perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
556 		}
557 	}
558 
559 	return 0;
560 }
561 
562 static int
563 mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
564 		struct perf_mmap_param *mp)
565 {
566 	int nr_threads = perf_thread_map__nr(evlist->threads);
567 	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
568 	int cpu, thread, idx = 0;
569 	int nr_mmaps = 0;
570 
571 	pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
572 		 __func__, nr_cpus, nr_threads);
573 
574 	/* per-thread mmaps */
575 	for (thread = 0; thread < nr_threads; thread++, idx++) {
576 		int output = -1;
577 		int output_overwrite = -1;
578 
579 		if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
580 				   &output_overwrite, &nr_mmaps))
581 			goto out_unmap;
582 	}
583 
584 	/* system-wide mmaps i.e. per-cpu */
585 	for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
586 		int output = -1;
587 		int output_overwrite = -1;
588 
589 		if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
590 				   &output_overwrite, &nr_mmaps))
591 			goto out_unmap;
592 	}
593 
594 	if (nr_mmaps != evlist->nr_mmaps)
595 		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
596 
597 	return 0;
598 
599 out_unmap:
600 	perf_evlist__munmap(evlist);
601 	return -1;
602 }
603 
604 static int
605 mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
606 	     struct perf_mmap_param *mp)
607 {
608 	int nr_threads = perf_thread_map__nr(evlist->threads);
609 	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
610 	int nr_mmaps = 0;
611 	int cpu, thread;
612 
613 	pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
614 
615 	for (cpu = 0; cpu < nr_cpus; cpu++) {
616 		int output = -1;
617 		int output_overwrite = -1;
618 
619 		for (thread = 0; thread < nr_threads; thread++) {
620 			if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
621 					   thread, &output, &output_overwrite, &nr_mmaps))
622 				goto out_unmap;
623 		}
624 	}
625 
626 	if (nr_mmaps != evlist->nr_mmaps)
627 		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
628 
629 	return 0;
630 
631 out_unmap:
632 	perf_evlist__munmap(evlist);
633 	return -1;
634 }
635 
636 static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
637 {
638 	int nr_mmaps;
639 
640 	/* One for each CPU */
641 	nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
642 	if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
643 		/* Plus one for each thread */
644 		nr_mmaps += perf_thread_map__nr(evlist->threads);
645 		/* Minus the per-thread CPU (-1) */
646 		nr_mmaps -= 1;
647 	}
648 
649 	return nr_mmaps;
650 }
651 
652 int perf_evlist__mmap_ops(struct perf_evlist *evlist,
653 			  struct perf_evlist_mmap_ops *ops,
654 			  struct perf_mmap_param *mp)
655 {
656 	const struct perf_cpu_map *cpus = evlist->all_cpus;
657 	struct perf_evsel *evsel;
658 
659 	if (!ops || !ops->get || !ops->mmap)
660 		return -EINVAL;
661 
662 	mp->mask = evlist->mmap_len - page_size - 1;
663 
664 	evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
665 
666 	perf_evlist__for_each_entry(evlist, evsel) {
667 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
668 		    evsel->sample_id == NULL &&
669 		    perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
670 			return -ENOMEM;
671 	}
672 
673 	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
674 		return -ENOMEM;
675 
676 	if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
677 		return mmap_per_thread(evlist, ops, mp);
678 
679 	return mmap_per_cpu(evlist, ops, mp);
680 }
681 
682 int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
683 {
684 	struct perf_mmap_param mp;
685 	struct perf_evlist_mmap_ops ops = {
686 		.get  = perf_evlist__mmap_cb_get,
687 		.mmap = perf_evlist__mmap_cb_mmap,
688 	};
689 
690 	evlist->mmap_len = (pages + 1) * page_size;
691 
692 	return perf_evlist__mmap_ops(evlist, &ops, &mp);
693 }
694 
695 void perf_evlist__munmap(struct perf_evlist *evlist)
696 {
697 	int i;
698 
699 	if (evlist->mmap) {
700 		for (i = 0; i < evlist->nr_mmaps; i++)
701 			perf_mmap__munmap(&evlist->mmap[i]);
702 	}
703 
704 	if (evlist->mmap_ovw) {
705 		for (i = 0; i < evlist->nr_mmaps; i++)
706 			perf_mmap__munmap(&evlist->mmap_ovw[i]);
707 	}
708 
709 	zfree(&evlist->mmap);
710 	zfree(&evlist->mmap_ovw);
711 }
712 
713 struct perf_mmap*
714 perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
715 		       bool overwrite)
716 {
717 	if (map)
718 		return map->next;
719 
720 	return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
721 }
722 
723 void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
724 {
725 	struct perf_evsel *evsel;
726 	int n = 0;
727 
728 	__perf_evlist__for_each_entry(list, evsel) {
729 		evsel->leader = leader;
730 		n++;
731 	}
732 	leader->nr_members = n;
733 }
734 
735 void perf_evlist__set_leader(struct perf_evlist *evlist)
736 {
737 	if (evlist->nr_entries) {
738 		struct perf_evsel *first = list_entry(evlist->entries.next,
739 						struct perf_evsel, node);
740 
741 		__perf_evlist__set_leader(&evlist->entries, first);
742 	}
743 }
744 
745 int perf_evlist__nr_groups(struct perf_evlist *evlist)
746 {
747 	struct perf_evsel *evsel;
748 	int nr_groups = 0;
749 
750 	perf_evlist__for_each_evsel(evlist, evsel) {
751 		/*
752 		 * evsels by default have a nr_members of 1, and they are their
753 		 * own leader. If the nr_members is >1 then this is an
754 		 * indication of a group.
755 		 */
756 		if (evsel->leader == evsel && evsel->nr_members > 1)
757 			nr_groups++;
758 	}
759 	return nr_groups;
760 }
761 
762 void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel)
763 {
764 	if (!evsel->system_wide) {
765 		evsel->system_wide = true;
766 		if (evlist->needs_map_propagation)
767 			__perf_evlist__propagate_maps(evlist, evsel);
768 	}
769 }
770