xref: /linux/tools/lib/perf/evsel.c (revision d786bdf2a70545a868cd0b06b5603cd5a5fec011)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <unistd.h>
4 #include <sys/syscall.h>
5 #include <perf/evsel.h>
6 #include <perf/cpumap.h>
7 #include <perf/threadmap.h>
8 #include <linux/list.h>
9 #include <internal/evsel.h>
10 #include <linux/zalloc.h>
11 #include <stdlib.h>
12 #include <internal/xyarray.h>
13 #include <internal/cpumap.h>
14 #include <internal/mmap.h>
15 #include <internal/threadmap.h>
16 #include <internal/lib.h>
17 #include <linux/string.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 #include <asm/bug.h>
21 
22 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
23 		      int idx)
24 {
25 	INIT_LIST_HEAD(&evsel->node);
26 	evsel->attr = *attr;
27 	evsel->idx  = idx;
28 	evsel->leader = evsel;
29 }
30 
31 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
32 {
33 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
34 
35 	if (evsel != NULL)
36 		perf_evsel__init(evsel, attr, 0);
37 
38 	return evsel;
39 }
40 
41 void perf_evsel__delete(struct perf_evsel *evsel)
42 {
43 	free(evsel);
44 }
45 
46 #define FD(_evsel, _cpu_map_idx, _thread)				\
47 	((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
48 #define MMAP(_evsel, _cpu_map_idx, _thread)				\
49 	(_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
50 		      : NULL)
51 
52 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
53 {
54 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
55 
56 	if (evsel->fd) {
57 		int idx, thread;
58 
59 		for (idx = 0; idx < ncpus; idx++) {
60 			for (thread = 0; thread < nthreads; thread++) {
61 				int *fd = FD(evsel, idx, thread);
62 
63 				if (fd)
64 					*fd = -1;
65 			}
66 		}
67 	}
68 
69 	return evsel->fd != NULL ? 0 : -ENOMEM;
70 }
71 
72 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
73 {
74 	evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
75 
76 	return evsel->mmap != NULL ? 0 : -ENOMEM;
77 }
78 
79 static int
80 sys_perf_event_open(struct perf_event_attr *attr,
81 		    pid_t pid, struct perf_cpu cpu, int group_fd,
82 		    unsigned long flags)
83 {
84 	return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
85 }
86 
87 static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
88 {
89 	struct perf_evsel *leader = evsel->leader;
90 	int *fd;
91 
92 	if (evsel == leader) {
93 		*group_fd = -1;
94 		return 0;
95 	}
96 
97 	/*
98 	 * Leader must be already processed/open,
99 	 * if not it's a bug.
100 	 */
101 	if (!leader->fd)
102 		return -ENOTCONN;
103 
104 	fd = FD(leader, cpu_map_idx, thread);
105 	if (fd == NULL || *fd == -1)
106 		return -EBADF;
107 
108 	*group_fd = *fd;
109 
110 	return 0;
111 }
112 
113 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
114 		     struct perf_thread_map *threads)
115 {
116 	struct perf_cpu cpu;
117 	int idx, thread, err = 0;
118 
119 	if (cpus == NULL) {
120 		static struct perf_cpu_map *empty_cpu_map;
121 
122 		if (empty_cpu_map == NULL) {
123 			empty_cpu_map = perf_cpu_map__dummy_new();
124 			if (empty_cpu_map == NULL)
125 				return -ENOMEM;
126 		}
127 
128 		cpus = empty_cpu_map;
129 	}
130 
131 	if (threads == NULL) {
132 		static struct perf_thread_map *empty_thread_map;
133 
134 		if (empty_thread_map == NULL) {
135 			empty_thread_map = perf_thread_map__new_dummy();
136 			if (empty_thread_map == NULL)
137 				return -ENOMEM;
138 		}
139 
140 		threads = empty_thread_map;
141 	}
142 
143 	if (evsel->fd == NULL &&
144 	    perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
145 		return -ENOMEM;
146 
147 	perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
148 		for (thread = 0; thread < threads->nr; thread++) {
149 			int fd, group_fd, *evsel_fd;
150 
151 			evsel_fd = FD(evsel, idx, thread);
152 			if (evsel_fd == NULL)
153 				return -EINVAL;
154 
155 			err = get_group_fd(evsel, idx, thread, &group_fd);
156 			if (err < 0)
157 				return err;
158 
159 			fd = sys_perf_event_open(&evsel->attr,
160 						 threads->map[thread].pid,
161 						 cpu, group_fd, 0);
162 
163 			if (fd < 0)
164 				return -errno;
165 
166 			*evsel_fd = fd;
167 		}
168 	}
169 
170 	return err;
171 }
172 
173 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
174 {
175 	int thread;
176 
177 	for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
178 		int *fd = FD(evsel, cpu_map_idx, thread);
179 
180 		if (fd && *fd >= 0) {
181 			close(*fd);
182 			*fd = -1;
183 		}
184 	}
185 }
186 
187 void perf_evsel__close_fd(struct perf_evsel *evsel)
188 {
189 	for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
190 		perf_evsel__close_fd_cpu(evsel, idx);
191 }
192 
193 void perf_evsel__free_fd(struct perf_evsel *evsel)
194 {
195 	xyarray__delete(evsel->fd);
196 	evsel->fd = NULL;
197 }
198 
199 void perf_evsel__close(struct perf_evsel *evsel)
200 {
201 	if (evsel->fd == NULL)
202 		return;
203 
204 	perf_evsel__close_fd(evsel);
205 	perf_evsel__free_fd(evsel);
206 }
207 
208 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
209 {
210 	if (evsel->fd == NULL)
211 		return;
212 
213 	perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
214 }
215 
216 void perf_evsel__munmap(struct perf_evsel *evsel)
217 {
218 	int idx, thread;
219 
220 	if (evsel->fd == NULL || evsel->mmap == NULL)
221 		return;
222 
223 	for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
224 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
225 			int *fd = FD(evsel, idx, thread);
226 
227 			if (fd == NULL || *fd < 0)
228 				continue;
229 
230 			perf_mmap__munmap(MMAP(evsel, idx, thread));
231 		}
232 	}
233 
234 	xyarray__delete(evsel->mmap);
235 	evsel->mmap = NULL;
236 }
237 
238 int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
239 {
240 	int ret, idx, thread;
241 	struct perf_mmap_param mp = {
242 		.prot = PROT_READ | PROT_WRITE,
243 		.mask = (pages * page_size) - 1,
244 	};
245 
246 	if (evsel->fd == NULL || evsel->mmap)
247 		return -EINVAL;
248 
249 	if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
250 		return -ENOMEM;
251 
252 	for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
253 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
254 			int *fd = FD(evsel, idx, thread);
255 			struct perf_mmap *map;
256 			struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
257 
258 			if (fd == NULL || *fd < 0)
259 				continue;
260 
261 			map = MMAP(evsel, idx, thread);
262 			perf_mmap__init(map, NULL, false, NULL);
263 
264 			ret = perf_mmap__mmap(map, &mp, *fd, cpu);
265 			if (ret) {
266 				perf_evsel__munmap(evsel);
267 				return ret;
268 			}
269 		}
270 	}
271 
272 	return 0;
273 }
274 
275 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
276 {
277 	int *fd = FD(evsel, cpu_map_idx, thread);
278 
279 	if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
280 		return NULL;
281 
282 	return MMAP(evsel, cpu_map_idx, thread)->base;
283 }
284 
285 int perf_evsel__read_size(struct perf_evsel *evsel)
286 {
287 	u64 read_format = evsel->attr.read_format;
288 	int entry = sizeof(u64); /* value */
289 	int size = 0;
290 	int nr = 1;
291 
292 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
293 		size += sizeof(u64);
294 
295 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
296 		size += sizeof(u64);
297 
298 	if (read_format & PERF_FORMAT_ID)
299 		entry += sizeof(u64);
300 
301 	if (read_format & PERF_FORMAT_GROUP) {
302 		nr = evsel->nr_members;
303 		size += sizeof(u64);
304 	}
305 
306 	size += entry * nr;
307 	return size;
308 }
309 
310 int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
311 		     struct perf_counts_values *count)
312 {
313 	size_t size = perf_evsel__read_size(evsel);
314 	int *fd = FD(evsel, cpu_map_idx, thread);
315 
316 	memset(count, 0, sizeof(*count));
317 
318 	if (fd == NULL || *fd < 0)
319 		return -EINVAL;
320 
321 	if (MMAP(evsel, cpu_map_idx, thread) &&
322 	    !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
323 		return 0;
324 
325 	if (readn(*fd, count->values, size) <= 0)
326 		return -errno;
327 
328 	return 0;
329 }
330 
331 static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg,
332 			     int cpu_map_idx, int thread)
333 {
334 	int *fd = FD(evsel, cpu_map_idx, thread);
335 
336 	if (fd == NULL || *fd < 0)
337 		return -1;
338 
339 	return ioctl(*fd, ioc, arg);
340 }
341 
342 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
343 				 int ioc,  void *arg,
344 				 int cpu_map_idx)
345 {
346 	int thread;
347 
348 	for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
349 		int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
350 
351 		if (err)
352 			return err;
353 	}
354 
355 	return 0;
356 }
357 
358 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
359 {
360 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
361 }
362 
363 int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
364 {
365 	struct perf_cpu cpu __maybe_unused;
366 	int idx;
367 	int err;
368 
369 	perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
370 		err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
371 		if (err)
372 			return err;
373 	}
374 
375 	return 0;
376 }
377 
378 int perf_evsel__enable(struct perf_evsel *evsel)
379 {
380 	int i;
381 	int err = 0;
382 
383 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
384 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
385 	return err;
386 }
387 
388 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
389 {
390 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
391 }
392 
393 int perf_evsel__disable(struct perf_evsel *evsel)
394 {
395 	int i;
396 	int err = 0;
397 
398 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
399 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
400 	return err;
401 }
402 
403 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
404 {
405 	int err = 0, i;
406 
407 	for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
408 		err = perf_evsel__run_ioctl(evsel,
409 				     PERF_EVENT_IOC_SET_FILTER,
410 				     (void *)filter, i);
411 	return err;
412 }
413 
414 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
415 {
416 	return evsel->cpus;
417 }
418 
419 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
420 {
421 	return evsel->threads;
422 }
423 
424 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
425 {
426 	return &evsel->attr;
427 }
428 
429 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
430 {
431 	if (ncpus == 0 || nthreads == 0)
432 		return 0;
433 
434 	if (evsel->system_wide)
435 		nthreads = 1;
436 
437 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
438 	if (evsel->sample_id == NULL)
439 		return -ENOMEM;
440 
441 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
442 	if (evsel->id == NULL) {
443 		xyarray__delete(evsel->sample_id);
444 		evsel->sample_id = NULL;
445 		return -ENOMEM;
446 	}
447 
448 	return 0;
449 }
450 
451 void perf_evsel__free_id(struct perf_evsel *evsel)
452 {
453 	xyarray__delete(evsel->sample_id);
454 	evsel->sample_id = NULL;
455 	zfree(&evsel->id);
456 	evsel->ids = 0;
457 }
458 
459 void perf_counts_values__scale(struct perf_counts_values *count,
460 			       bool scale, __s8 *pscaled)
461 {
462 	s8 scaled = 0;
463 
464 	if (scale) {
465 		if (count->run == 0) {
466 			scaled = -1;
467 			count->val = 0;
468 		} else if (count->run < count->ena) {
469 			scaled = 1;
470 			count->val = (u64)((double)count->val * count->ena / count->run);
471 		}
472 	}
473 
474 	if (pscaled)
475 		*pscaled = scaled;
476 }
477