xref: /linux/tools/perf/util/evlist.c (revision 3d3b5e95997208067c963923db90ed1517565d14)
1 /*
2  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3  *
4  * Parts came from builtin-{top,stat,record}.c, see those files for further
5  * copyright notes.
6  *
7  * Released under the GPL v2. (and only v2, not any later version)
8  */
9 #include <poll.h>
10 #include "cpumap.h"
11 #include "thread_map.h"
12 #include "evlist.h"
13 #include "evsel.h"
14 #include "util.h"
15 
16 #include <sys/mman.h>
17 
18 #include <linux/bitops.h>
19 #include <linux/hash.h>
20 
21 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
22 #define SID(e, x, y) xyarray__entry(e->id, x, y)
23 
24 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
25 		       struct thread_map *threads)
26 {
27 	int i;
28 
29 	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
30 		INIT_HLIST_HEAD(&evlist->heads[i]);
31 	INIT_LIST_HEAD(&evlist->entries);
32 	perf_evlist__set_maps(evlist, cpus, threads);
33 }
34 
35 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
36 				     struct thread_map *threads)
37 {
38 	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
39 
40 	if (evlist != NULL)
41 		perf_evlist__init(evlist, cpus, threads);
42 
43 	return evlist;
44 }
45 
46 static void perf_evlist__purge(struct perf_evlist *evlist)
47 {
48 	struct perf_evsel *pos, *n;
49 
50 	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
51 		list_del_init(&pos->node);
52 		perf_evsel__delete(pos);
53 	}
54 
55 	evlist->nr_entries = 0;
56 }
57 
58 void perf_evlist__exit(struct perf_evlist *evlist)
59 {
60 	free(evlist->mmap);
61 	free(evlist->pollfd);
62 	evlist->mmap = NULL;
63 	evlist->pollfd = NULL;
64 }
65 
66 void perf_evlist__delete(struct perf_evlist *evlist)
67 {
68 	perf_evlist__purge(evlist);
69 	perf_evlist__exit(evlist);
70 	free(evlist);
71 }
72 
73 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
74 {
75 	list_add_tail(&entry->node, &evlist->entries);
76 	++evlist->nr_entries;
77 }
78 
79 int perf_evlist__add_default(struct perf_evlist *evlist)
80 {
81 	struct perf_event_attr attr = {
82 		.type = PERF_TYPE_HARDWARE,
83 		.config = PERF_COUNT_HW_CPU_CYCLES,
84 	};
85 	struct perf_evsel *evsel = perf_evsel__new(&attr, 0);
86 
87 	if (evsel == NULL)
88 		return -ENOMEM;
89 
90 	perf_evlist__add(evlist, evsel);
91 	return 0;
92 }
93 
94 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
95 {
96 	int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
97 	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
98 	return evlist->pollfd != NULL ? 0 : -ENOMEM;
99 }
100 
101 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
102 {
103 	fcntl(fd, F_SETFL, O_NONBLOCK);
104 	evlist->pollfd[evlist->nr_fds].fd = fd;
105 	evlist->pollfd[evlist->nr_fds].events = POLLIN;
106 	evlist->nr_fds++;
107 }
108 
109 void perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel,
110 			  int cpu, int thread, u64 id)
111 {
112 	int hash;
113 	struct perf_sample_id *sid = SID(evsel, cpu, thread);
114 
115 	sid->id = id;
116 	sid->evsel = evsel;
117 	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
118 	hlist_add_head(&sid->node, &evlist->heads[hash]);
119 }
120 
121 static int perf_evlist__id_hash_fd(struct perf_evlist *evlist,
122 				   struct perf_evsel *evsel,
123 				   int cpu, int thread, int fd)
124 {
125 	u64 read_data[4] = { 0, };
126 	int id_idx = 1; /* The first entry is the counter value */
127 
128 	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
129 	    read(fd, &read_data, sizeof(read_data)) == -1)
130 		return -1;
131 
132 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
133 		++id_idx;
134 	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
135 		++id_idx;
136 
137 	perf_evlist__id_hash(evlist, evsel, cpu, thread, read_data[id_idx]);
138 	return 0;
139 }
140 
141 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
142 {
143 	struct hlist_head *head;
144 	struct hlist_node *pos;
145 	struct perf_sample_id *sid;
146 	int hash;
147 
148 	if (evlist->nr_entries == 1)
149 		return list_entry(evlist->entries.next, struct perf_evsel, node);
150 
151 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
152 	head = &evlist->heads[hash];
153 
154 	hlist_for_each_entry(sid, pos, head, node)
155 		if (sid->id == id)
156 			return sid->evsel;
157 	return NULL;
158 }
159 
160 union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
161 {
162 	/* XXX Move this to perf.c, making it generally available */
163 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
164 	struct perf_mmap *md = &evlist->mmap[cpu];
165 	unsigned int head = perf_mmap__read_head(md);
166 	unsigned int old = md->prev;
167 	unsigned char *data = md->base + page_size;
168 	union perf_event *event = NULL;
169 
170 	if (evlist->overwrite) {
171 		/*
172 		 * If we're further behind than half the buffer, there's a chance
173 		 * the writer will bite our tail and mess up the samples under us.
174 		 *
175 		 * If we somehow ended up ahead of the head, we got messed up.
176 		 *
177 		 * In either case, truncate and restart at head.
178 		 */
179 		int diff = head - old;
180 		if (diff > md->mask / 2 || diff < 0) {
181 			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
182 
183 			/*
184 			 * head points to a known good entry, start there.
185 			 */
186 			old = head;
187 		}
188 	}
189 
190 	if (old != head) {
191 		size_t size;
192 
193 		event = (union perf_event *)&data[old & md->mask];
194 		size = event->header.size;
195 
196 		/*
197 		 * Event straddles the mmap boundary -- header should always
198 		 * be inside due to u64 alignment of output.
199 		 */
200 		if ((old & md->mask) + size != ((old + size) & md->mask)) {
201 			unsigned int offset = old;
202 			unsigned int len = min(sizeof(*event), size), cpy;
203 			void *dst = &evlist->event_copy;
204 
205 			do {
206 				cpy = min(md->mask + 1 - (offset & md->mask), len);
207 				memcpy(dst, &data[offset & md->mask], cpy);
208 				offset += cpy;
209 				dst += cpy;
210 				len -= cpy;
211 			} while (len);
212 
213 			event = &evlist->event_copy;
214 		}
215 
216 		old += size;
217 	}
218 
219 	md->prev = old;
220 
221 	if (!evlist->overwrite)
222 		perf_mmap__write_tail(md, old);
223 
224 	return event;
225 }
226 
227 void perf_evlist__munmap(struct perf_evlist *evlist)
228 {
229 	int cpu;
230 
231 	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
232 		if (evlist->mmap[cpu].base != NULL) {
233 			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
234 			evlist->mmap[cpu].base = NULL;
235 		}
236 	}
237 }
238 
239 int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
240 {
241 	evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap));
242 	return evlist->mmap != NULL ? 0 : -ENOMEM;
243 }
244 
245 static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
246 			       int mask, int fd)
247 {
248 	evlist->mmap[cpu].prev = 0;
249 	evlist->mmap[cpu].mask = mask;
250 	evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
251 				      MAP_SHARED, fd, 0);
252 	if (evlist->mmap[cpu].base == MAP_FAILED)
253 		return -1;
254 
255 	perf_evlist__add_pollfd(evlist, fd);
256 	return 0;
257 }
258 
259 /** perf_evlist__mmap - Create per cpu maps to receive events
260  *
261  * @evlist - list of events
262  * @pages - map length in pages
263  * @overwrite - overwrite older events?
264  *
265  * If overwrite is false the user needs to signal event consuption using:
266  *
267  *	struct perf_mmap *m = &evlist->mmap[cpu];
268  *	unsigned int head = perf_mmap__read_head(m);
269  *
270  *	perf_mmap__write_tail(m, head)
271  *
272  * Using perf_evlist__read_on_cpu does this automatically.
273  */
274 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
275 {
276 	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
277 	int mask = pages * page_size - 1, cpu;
278 	struct perf_evsel *first_evsel, *evsel;
279 	const struct cpu_map *cpus = evlist->cpus;
280 	const struct thread_map *threads = evlist->threads;
281 	int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
282 
283 	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
284 		return -ENOMEM;
285 
286 	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
287 		return -ENOMEM;
288 
289 	evlist->overwrite = overwrite;
290 	evlist->mmap_len = (pages + 1) * page_size;
291 	first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
292 
293 	list_for_each_entry(evsel, &evlist->entries, node) {
294 		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
295 		    evsel->id == NULL &&
296 		    perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
297 			return -ENOMEM;
298 
299 		for (cpu = 0; cpu < cpus->nr; cpu++) {
300 			for (thread = 0; thread < threads->nr; thread++) {
301 				int fd = FD(evsel, cpu, thread);
302 
303 				if (evsel->idx || thread) {
304 					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
305 						  FD(first_evsel, cpu, 0)) != 0)
306 						goto out_unmap;
307 				} else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0)
308 					goto out_unmap;
309 
310 				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
311 				    perf_evlist__id_hash_fd(evlist, evsel, cpu, thread, fd) < 0)
312 					goto out_unmap;
313 			}
314 		}
315 	}
316 
317 	return 0;
318 
319 out_unmap:
320 	for (cpu = 0; cpu < cpus->nr; cpu++) {
321 		if (evlist->mmap[cpu].base != NULL) {
322 			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
323 			evlist->mmap[cpu].base = NULL;
324 		}
325 	}
326 	return -1;
327 }
328 
329 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
330 			     pid_t target_tid, const char *cpu_list)
331 {
332 	evlist->threads = thread_map__new(target_pid, target_tid);
333 
334 	if (evlist->threads == NULL)
335 		return -1;
336 
337 	if (target_tid != -1)
338 		evlist->cpus = cpu_map__dummy_new();
339 	else
340 		evlist->cpus = cpu_map__new(cpu_list);
341 
342 	if (evlist->cpus == NULL)
343 		goto out_delete_threads;
344 
345 	return 0;
346 
347 out_delete_threads:
348 	thread_map__delete(evlist->threads);
349 	return -1;
350 }
351 
352 void perf_evlist__delete_maps(struct perf_evlist *evlist)
353 {
354 	cpu_map__delete(evlist->cpus);
355 	thread_map__delete(evlist->threads);
356 	evlist->cpus	= NULL;
357 	evlist->threads = NULL;
358 }
359 
360 int perf_evlist__set_filters(struct perf_evlist *evlist)
361 {
362 	const struct thread_map *threads = evlist->threads;
363 	const struct cpu_map *cpus = evlist->cpus;
364 	struct perf_evsel *evsel;
365 	char *filter;
366 	int thread;
367 	int cpu;
368 	int err;
369 	int fd;
370 
371 	list_for_each_entry(evsel, &evlist->entries, node) {
372 		filter = evsel->filter;
373 		if (!filter)
374 			continue;
375 		for (cpu = 0; cpu < cpus->nr; cpu++) {
376 			for (thread = 0; thread < threads->nr; thread++) {
377 				fd = FD(evsel, cpu, thread);
378 				err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
379 				if (err)
380 					return err;
381 			}
382 		}
383 	}
384 
385 	return 0;
386 }
387