xref: /linux/tools/perf/util/evlist.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include <poll.h>
10 #include "cpumap.h"
11 #include "thread_map.h"
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "util.h"
15 
16 #include <sys/mman.h>
17 
18 #include <linux/bitops.h>
19 #include <linux/hash.h>
20 
21 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
22 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
23 
24 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
25 		       struct thread_map *threads)
26 {
27 	int i;
28 
29 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
30 		INIT_HLIST_HEAD(&evlist->heads[i]);
31 	INIT_LIST_HEAD(&evlist->entries);
32 	perf_evlist__set_maps(evlist, cpus, threads);
33 }
34 
35 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
36 				     struct thread_map *threads)
37 {
38 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
39 
40 	if (evlist != NULL)
41 		perf_evlist__init(evlist, cpus, threads);
42 
43 	return evlist;
44 }
45 
46 static void perf_evlist__purge(struct perf_evlist *evlist)
47 {
48 	struct perf_evsel *pos, *n;
49 
50 	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
51 		list_del_init(&pos->node);
52 		perf_evsel__delete(pos);
53 	}
54 
55 	evlist->nr_entries = 0;
56 }
57 
58 void perf_evlist__exit(struct perf_evlist *evlist)
59 {
60 	free(evlist->mmap);
61 	free(evlist->pollfd);
62 	evlist->mmap = NULL;
63 	evlist->pollfd = NULL;
64 }
65 
66 void perf_evlist__delete(struct perf_evlist *evlist)
67 {
68 	perf_evlist__purge(evlist);
69 	perf_evlist__exit(evlist);
70 	free(evlist);
71 }
72 
73 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
74 {
75 	list_add_tail(&entry->node, &evlist->entries);
76 	++evlist->nr_entries;
77 }
78 
79 int perf_evlist__add_default(struct perf_evlist *evlist)
80 {
81 	struct perf_event_attr attr = {
82 		.type = PERF_TYPE_HARDWARE,
83 		.config = PERF_COUNT_HW_CPU_CYCLES,
84 	};
85 	struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
86 
87 	if (evsel == NULL)
88 		goto error;
89 
90 	/* use strdup() because free(evsel) assumes name is allocated */
91 	evsel->name = strdup("cycles");
92 	if (!evsel->name)
93 		goto error_free;
94 
95 	perf_evlist__add(evlist, evsel);
96 	return 0;
97 error_free:
98 	perf_evsel__delete(evsel);
99 error:
100 	return -ENOMEM;
101 }
102 
103 void perf_evlist__disable(struct perf_evlist *evlist)
104 {
105 	int cpu, thread;
106 	struct perf_evsel *pos;
107 
108 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
109 		list_for_each_entry(pos, &evlist->entries, node) {
110 			for (thread = 0; thread < evlist->threads->nr; thread++)
111 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
112 		}
113 	}
114 }
115 
116 void perf_evlist__enable(struct perf_evlist *evlist)
117 {
118 	int cpu, thread;
119 	struct perf_evsel *pos;
120 
121 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
122 		list_for_each_entry(pos, &evlist->entries, node) {
123 			for (thread = 0; thread < evlist->threads->nr; thread++)
124 				ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
125 		}
126 	}
127 }
128 
129 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
130 {
131 	int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
132 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
133 	return evlist->pollfd != NULL ? 0 : -ENOMEM;
134 }
135 
136 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
137 {
138 	fcntl(fd, F_SETFL, O_NONBLOCK);
139 	evlist->pollfd[evlist->nr_fds].fd = fd;
140 	evlist->pollfd[evlist->nr_fds].events = POLLIN;
141 	evlist->nr_fds++;
142 }
143 
144 static void perf_evlist__id_hash(struct perf_evlist *evlist,
145 				 struct perf_evsel *evsel,
146 				 int cpu, int thread, u64 id)
147 {
148 	int hash;
149 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
150 
151 	sid->id = id;
152 	sid->evsel = evsel;
153 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
154 	hlist_add_head(&sid->node, &evlist->heads[hash]);
155 }
156 
157 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
158 			 int cpu, int thread, u64 id)
159 {
160 	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
161 	evsel->id[evsel->ids++] = id;
162 }
163 
164 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
165 				  struct perf_evsel *evsel,
166 				  int cpu, int thread, int fd)
167 {
168 	u64 read_data[4] = { 0, };
169 	int id_idx = 1; /* The first entry is the counter value */
170 
171 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
172 	    read(fd, &read_data, sizeof(read_data)) == -1)
173 		return -1;
174 
175 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
176 		++id_idx;
177 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
178 		++id_idx;
179 
180 	perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
181 	return 0;
182 }
183 
184 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
185 {
186 	struct hlist_head *head;
187 	struct hlist_node *pos;
188 	struct perf_sample_id *sid;
189 	int hash;
190 
191 	if (evlist->nr_entries == 1)
192 		return list_entry(evlist->entries.next, struct perf_evsel, node);
193 
194 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
195 	head = &evlist->heads[hash];
196 
197 	hlist_for_each_entry(sid, pos, head, node)
198 		if (sid->id == id)
199 			return sid->evsel;
200 	return NULL;
201 }
202 
203 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
204 {
205 	/* XXX Move this to perf.c, making it generally available */
206 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
207 	struct perf_mmap *md = &evlist->mmap[idx];
208 	unsigned int head = perf_mmap__read_head(md);
209 	unsigned int old = md->prev;
210 	unsigned char *data = md->base + page_size;
211 	union perf_event *event = NULL;
212 
213 	if (evlist->overwrite) {
214 		/*
215 		 * If we're further behind than half the buffer, there's a chance
216 		 * the writer will bite our tail and mess up the samples under us.
217 		 *
218 		 * If we somehow ended up ahead of the head, we got messed up.
219 		 *
220 		 * In either case, truncate and restart at head.
221 		 */
222 		int diff = head - old;
223 		if (diff > md->mask / 2 || diff < 0) {
224 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
225 
226 			/*
227 			 * head points to a known good entry, start there.
228 			 */
229 			old = head;
230 		}
231 	}
232 
233 	if (old != head) {
234 		size_t size;
235 
236 		event = (union perf_event *)&data[old & md->mask];
237 		size = event->header.size;
238 
239 		/*
240 		 * Event straddles the mmap boundary -- header should always
241 		 * be inside due to u64 alignment of output.
242 		 */
243 		if ((old & md->mask) + size != ((old + size) & md->mask)) {
244 			unsigned int offset = old;
245 			unsigned int len = min(sizeof(*event), size), cpy;
246 			void *dst = &evlist->event_copy;
247 
248 			do {
249 				cpy = min(md->mask + 1 - (offset & md->mask), len);
250 				memcpy(dst, &data[offset & md->mask], cpy);
251 				offset += cpy;
252 				dst += cpy;
253 				len -= cpy;
254 			} while (len);
255 
256 			event = &evlist->event_copy;
257 		}
258 
259 		old += size;
260 	}
261 
262 	md->prev = old;
263 
264 	if (!evlist->overwrite)
265 		perf_mmap__write_tail(md, old);
266 
267 	return event;
268 }
269 
270 void perf_evlist__munmap(struct perf_evlist *evlist)
271 {
272 	int i;
273 
274 	for (i = 0; i < evlist->nr_mmaps; i++) {
275 		if (evlist->mmap[i].base != NULL) {
276 			munmap(evlist->mmap[i].base, evlist->mmap_len);
277 			evlist->mmap[i].base = NULL;
278 		}
279 	}
280 
281 	free(evlist->mmap);
282 	evlist->mmap = NULL;
283 }
284 
285 int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
286 {
287 	evlist->nr_mmaps = evlist->cpus->nr;
288 	if (evlist->cpus->map[0] == -1)
289 		evlist->nr_mmaps = evlist->threads->nr;
290 	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
291 	return evlist->mmap != NULL ? 0 : -ENOMEM;
292 }
293 
294 static int __perf_evlist__mmap(struct perf_evlist *evlist,
295 			       int idx, int prot, int mask, int fd)
296 {
297 	evlist->mmap[idx].prev = 0;
298 	evlist->mmap[idx].mask = mask;
299 	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
300 				      MAP_SHARED, fd, 0);
301 	if (evlist->mmap[idx].base == MAP_FAILED)
302 		return -1;
303 
304 	perf_evlist__add_pollfd(evlist, fd);
305 	return 0;
306 }
307 
308 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
309 {
310 	struct perf_evsel *evsel;
311 	int cpu, thread;
312 
313 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
314 		int output = -1;
315 
316 		for (thread = 0; thread < evlist->threads->nr; thread++) {
317 			list_for_each_entry(evsel, &evlist->entries, node) {
318 				int fd = FD(evsel, cpu, thread);
319 
320 				if (output == -1) {
321 					output = fd;
322 					if (__perf_evlist__mmap(evlist, cpu,
323 								prot, mask, output) < 0)
324 						goto out_unmap;
325 				} else {
326 					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
327 						goto out_unmap;
328 				}
329 
330 				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
331 				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
332 					goto out_unmap;
333 			}
334 		}
335 	}
336 
337 	return 0;
338 
339 out_unmap:
340 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
341 		if (evlist->mmap[cpu].base != NULL) {
342 			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
343 			evlist->mmap[cpu].base = NULL;
344 		}
345 	}
346 	return -1;
347 }
348 
349 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
350 {
351 	struct perf_evsel *evsel;
352 	int thread;
353 
354 	for (thread = 0; thread < evlist->threads->nr; thread++) {
355 		int output = -1;
356 
357 		list_for_each_entry(evsel, &evlist->entries, node) {
358 			int fd = FD(evsel, 0, thread);
359 
360 			if (output == -1) {
361 				output = fd;
362 				if (__perf_evlist__mmap(evlist, thread,
363 							prot, mask, output) < 0)
364 					goto out_unmap;
365 			} else {
366 				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
367 					goto out_unmap;
368 			}
369 
370 			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
371 			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
372 				goto out_unmap;
373 		}
374 	}
375 
376 	return 0;
377 
378 out_unmap:
379 	for (thread = 0; thread < evlist->threads->nr; thread++) {
380 		if (evlist->mmap[thread].base != NULL) {
381 			munmap(evlist->mmap[thread].base, evlist->mmap_len);
382 			evlist->mmap[thread].base = NULL;
383 		}
384 	}
385 	return -1;
386 }
387 
388 /** perf_evlist__mmap - Create per cpu maps to receive events
389  *
390  * @evlist - list of events
391  * @pages - map length in pages
392  * @overwrite - overwrite older events?
393  *
394  * If overwrite is false the user needs to signal event consuption using:
395  *
396  *	struct perf_mmap *m = &evlist->mmap[cpu];
397  *	unsigned int head = perf_mmap__read_head(m);
398  *
399  *	perf_mmap__write_tail(m, head)
400  *
401  * Using perf_evlist__read_on_cpu does this automatically.
402  */
403 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
404 {
405 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
406 	int mask = pages * page_size - 1;
407 	struct perf_evsel *evsel;
408 	const struct cpu_map *cpus = evlist->cpus;
409 	const struct thread_map *threads = evlist->threads;
410 	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
411 
412 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
413 		return -ENOMEM;
414 
415 	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
416 		return -ENOMEM;
417 
418 	evlist->overwrite = overwrite;
419 	evlist->mmap_len = (pages + 1) * page_size;
420 
421 	list_for_each_entry(evsel, &evlist->entries, node) {
422 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
423 		    evsel->sample_id == NULL &&
424 		    perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
425 			return -ENOMEM;
426 	}
427 
428 	if (evlist->cpus->map[0] == -1)
429 		return perf_evlist__mmap_per_thread(evlist, prot, mask);
430 
431 	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
432 }
433 
434 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
435 			     pid_t target_tid, const char *cpu_list)
436 {
437 	evlist->threads = thread_map__new(target_pid, target_tid);
438 
439 	if (evlist->threads == NULL)
440 		return -1;
441 
442 	if (cpu_list == NULL && target_tid != -1)
443 		evlist->cpus = cpu_map__dummy_new();
444 	else
445 		evlist->cpus = cpu_map__new(cpu_list);
446 
447 	if (evlist->cpus == NULL)
448 		goto out_delete_threads;
449 
450 	return 0;
451 
452 out_delete_threads:
453 	thread_map__delete(evlist->threads);
454 	return -1;
455 }
456 
457 void perf_evlist__delete_maps(struct perf_evlist *evlist)
458 {
459 	cpu_map__delete(evlist->cpus);
460 	thread_map__delete(evlist->threads);
461 	evlist->cpus	= NULL;
462 	evlist->threads = NULL;
463 }
464 
465 int perf_evlist__set_filters(struct perf_evlist *evlist)
466 {
467 	const struct thread_map *threads = evlist->threads;
468 	const struct cpu_map *cpus = evlist->cpus;
469 	struct perf_evsel *evsel;
470 	char *filter;
471 	int thread;
472 	int cpu;
473 	int err;
474 	int fd;
475 
476 	list_for_each_entry(evsel, &evlist->entries, node) {
477 		filter = evsel->filter;
478 		if (!filter)
479 			continue;
480 		for (cpu = 0; cpu < cpus->nr; cpu++) {
481 			for (thread = 0; thread < threads->nr; thread++) {
482 				fd = FD(evsel, cpu, thread);
483 				err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
484 				if (err)
485 					return err;
486 			}
487 		}
488 	}
489 
490 	return 0;
491 }
492 
493 bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
494 {
495 	struct perf_evsel *pos, *first;
496 
497 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
498 
499 	list_for_each_entry_continue(pos, &evlist->entries, node) {
500 		if (first->attr.sample_type != pos->attr.sample_type)
501 			return false;
502 	}
503 
504 	return true;
505 }
506 
507 u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
508 {
509 	struct perf_evsel *first;
510 
511 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
512 	return first->attr.sample_type;
513 }
514 
515 bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
516 {
517 	struct perf_evsel *pos, *first;
518 
519 	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
520 
521 	list_for_each_entry_continue(pos, &evlist->entries, node) {
522 		if (first->attr.sample_id_all != pos->attr.sample_id_all)
523 			return false;
524 	}
525 
526 	return true;
527 }
528 
529 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
530 {
531 	struct perf_evsel *first;
532 
533 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
534 	return first->attr.sample_id_all;
535 }
536 
537 void perf_evlist__set_selected(struct perf_evlist *evlist,
538 			       struct perf_evsel *evsel)
539 {
540 	evlist->selected = evsel;
541 }
542 
543 int perf_evlist__open(struct perf_evlist *evlist, bool group)
544 {
545 	struct perf_evsel *evsel, *first;
546 	int err, ncpus, nthreads;
547 
548 	first = list_entry(evlist->entries.next, struct perf_evsel, node);
549 
550 	list_for_each_entry(evsel, &evlist->entries, node) {
551 		struct xyarray *group_fd = NULL;
552 
553 		if (group && evsel != first)
554 			group_fd = first->fd;
555 
556 		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
557 				       group, group_fd);
558 		if (err < 0)
559 			goto out_err;
560 	}
561 
562 	return 0;
563 out_err:
564 	ncpus = evlist->cpus ? evlist->cpus->nr : 1;
565 	nthreads = evlist->threads ? evlist->threads->nr : 1;
566 
567 	list_for_each_entry_reverse(evsel, &evlist->entries, node)
568 		perf_evsel__close(evsel, ncpus, nthreads);
569 
570 	return err;
571 }
572