xref: /linux/tools/lib/perf/evsel.c (revision 38fe0e0156c037c060f81fe4e36549fae760322d)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <unistd.h>
4 #include <sys/syscall.h>
5 #include <perf/evsel.h>
6 #include <perf/cpumap.h>
7 #include <perf/threadmap.h>
8 #include <linux/list.h>
9 #include <internal/evsel.h>
10 #include <linux/zalloc.h>
11 #include <stdlib.h>
12 #include <internal/xyarray.h>
13 #include <internal/cpumap.h>
14 #include <internal/mmap.h>
15 #include <internal/threadmap.h>
16 #include <internal/lib.h>
17 #include <linux/string.h>
18 #include <sys/ioctl.h>
19 #include <sys/mman.h>
20 
21 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
22 		      int idx)
23 {
24 	INIT_LIST_HEAD(&evsel->node);
25 	evsel->attr = *attr;
26 	evsel->idx  = idx;
27 }
28 
29 struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
30 {
31 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
32 
33 	if (evsel != NULL)
34 		perf_evsel__init(evsel, attr, 0);
35 
36 	return evsel;
37 }
38 
39 void perf_evsel__delete(struct perf_evsel *evsel)
40 {
41 	free(evsel);
42 }
43 
44 #define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
45 #define MMAP(e, x, y) (e->mmap ? ((struct perf_mmap *) xyarray__entry(e->mmap, x, y)) : NULL)
46 
47 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
48 {
49 	evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
50 
51 	if (evsel->fd) {
52 		int cpu, thread;
53 		for (cpu = 0; cpu < ncpus; cpu++) {
54 			for (thread = 0; thread < nthreads; thread++) {
55 				FD(evsel, cpu, thread) = -1;
56 			}
57 		}
58 	}
59 
60 	return evsel->fd != NULL ? 0 : -ENOMEM;
61 }
62 
63 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
64 {
65 	evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
66 
67 	return evsel->mmap != NULL ? 0 : -ENOMEM;
68 }
69 
70 static int
71 sys_perf_event_open(struct perf_event_attr *attr,
72 		    pid_t pid, int cpu, int group_fd,
73 		    unsigned long flags)
74 {
75 	return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
76 }
77 
78 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
79 		     struct perf_thread_map *threads)
80 {
81 	int cpu, thread, err = 0;
82 
83 	if (cpus == NULL) {
84 		static struct perf_cpu_map *empty_cpu_map;
85 
86 		if (empty_cpu_map == NULL) {
87 			empty_cpu_map = perf_cpu_map__dummy_new();
88 			if (empty_cpu_map == NULL)
89 				return -ENOMEM;
90 		}
91 
92 		cpus = empty_cpu_map;
93 	}
94 
95 	if (threads == NULL) {
96 		static struct perf_thread_map *empty_thread_map;
97 
98 		if (empty_thread_map == NULL) {
99 			empty_thread_map = perf_thread_map__new_dummy();
100 			if (empty_thread_map == NULL)
101 				return -ENOMEM;
102 		}
103 
104 		threads = empty_thread_map;
105 	}
106 
107 	if (evsel->fd == NULL &&
108 	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
109 		return -ENOMEM;
110 
111 	for (cpu = 0; cpu < cpus->nr; cpu++) {
112 		for (thread = 0; thread < threads->nr; thread++) {
113 			int fd;
114 
115 			fd = sys_perf_event_open(&evsel->attr,
116 						 threads->map[thread].pid,
117 						 cpus->map[cpu], -1, 0);
118 
119 			if (fd < 0)
120 				return -errno;
121 
122 			FD(evsel, cpu, thread) = fd;
123 		}
124 	}
125 
126 	return err;
127 }
128 
129 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
130 {
131 	int thread;
132 
133 	for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
134 		if (FD(evsel, cpu, thread) >= 0)
135 			close(FD(evsel, cpu, thread));
136 		FD(evsel, cpu, thread) = -1;
137 	}
138 }
139 
140 void perf_evsel__close_fd(struct perf_evsel *evsel)
141 {
142 	int cpu;
143 
144 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
145 		perf_evsel__close_fd_cpu(evsel, cpu);
146 }
147 
148 void perf_evsel__free_fd(struct perf_evsel *evsel)
149 {
150 	xyarray__delete(evsel->fd);
151 	evsel->fd = NULL;
152 }
153 
154 void perf_evsel__close(struct perf_evsel *evsel)
155 {
156 	if (evsel->fd == NULL)
157 		return;
158 
159 	perf_evsel__close_fd(evsel);
160 	perf_evsel__free_fd(evsel);
161 }
162 
163 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
164 {
165 	if (evsel->fd == NULL)
166 		return;
167 
168 	perf_evsel__close_fd_cpu(evsel, cpu);
169 }
170 
171 void perf_evsel__munmap(struct perf_evsel *evsel)
172 {
173 	int cpu, thread;
174 
175 	if (evsel->fd == NULL || evsel->mmap == NULL)
176 		return;
177 
178 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
179 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
180 			int fd = FD(evsel, cpu, thread);
181 			struct perf_mmap *map = MMAP(evsel, cpu, thread);
182 
183 			if (fd < 0)
184 				continue;
185 
186 			perf_mmap__munmap(map);
187 		}
188 	}
189 
190 	xyarray__delete(evsel->mmap);
191 	evsel->mmap = NULL;
192 }
193 
194 int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
195 {
196 	int ret, cpu, thread;
197 	struct perf_mmap_param mp = {
198 		.prot = PROT_READ | PROT_WRITE,
199 		.mask = (pages * page_size) - 1,
200 	};
201 
202 	if (evsel->fd == NULL || evsel->mmap)
203 		return -EINVAL;
204 
205 	if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
206 		return -ENOMEM;
207 
208 	for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++) {
209 		for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
210 			int fd = FD(evsel, cpu, thread);
211 			struct perf_mmap *map = MMAP(evsel, cpu, thread);
212 
213 			if (fd < 0)
214 				continue;
215 
216 			perf_mmap__init(map, NULL, false, NULL);
217 
218 			ret = perf_mmap__mmap(map, &mp, fd, cpu);
219 			if (ret) {
220 				perf_evsel__munmap(evsel);
221 				return ret;
222 			}
223 		}
224 	}
225 
226 	return 0;
227 }
228 
229 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu, int thread)
230 {
231 	if (FD(evsel, cpu, thread) < 0 || MMAP(evsel, cpu, thread) == NULL)
232 		return NULL;
233 
234 	return MMAP(evsel, cpu, thread)->base;
235 }
236 
237 int perf_evsel__read_size(struct perf_evsel *evsel)
238 {
239 	u64 read_format = evsel->attr.read_format;
240 	int entry = sizeof(u64); /* value */
241 	int size = 0;
242 	int nr = 1;
243 
244 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
245 		size += sizeof(u64);
246 
247 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
248 		size += sizeof(u64);
249 
250 	if (read_format & PERF_FORMAT_ID)
251 		entry += sizeof(u64);
252 
253 	if (read_format & PERF_FORMAT_GROUP) {
254 		nr = evsel->nr_members;
255 		size += sizeof(u64);
256 	}
257 
258 	size += entry * nr;
259 	return size;
260 }
261 
262 int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
263 		     struct perf_counts_values *count)
264 {
265 	size_t size = perf_evsel__read_size(evsel);
266 
267 	memset(count, 0, sizeof(*count));
268 
269 	if (FD(evsel, cpu, thread) < 0)
270 		return -EINVAL;
271 
272 	if (MMAP(evsel, cpu, thread) &&
273 	    !perf_mmap__read_self(MMAP(evsel, cpu, thread), count))
274 		return 0;
275 
276 	if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
277 		return -errno;
278 
279 	return 0;
280 }
281 
282 static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
283 				 int ioc,  void *arg,
284 				 int cpu)
285 {
286 	int thread;
287 
288 	for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
289 		int fd = FD(evsel, cpu, thread),
290 		    err = ioctl(fd, ioc, arg);
291 
292 		if (err)
293 			return err;
294 	}
295 
296 	return 0;
297 }
298 
299 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
300 {
301 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
302 }
303 
304 int perf_evsel__enable(struct perf_evsel *evsel)
305 {
306 	int i;
307 	int err = 0;
308 
309 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
310 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
311 	return err;
312 }
313 
314 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
315 {
316 	return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
317 }
318 
319 int perf_evsel__disable(struct perf_evsel *evsel)
320 {
321 	int i;
322 	int err = 0;
323 
324 	for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
325 		err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
326 	return err;
327 }
328 
329 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
330 {
331 	int err = 0, i;
332 
333 	for (i = 0; i < evsel->cpus->nr && !err; i++)
334 		err = perf_evsel__run_ioctl(evsel,
335 				     PERF_EVENT_IOC_SET_FILTER,
336 				     (void *)filter, i);
337 	return err;
338 }
339 
340 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
341 {
342 	return evsel->cpus;
343 }
344 
345 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
346 {
347 	return evsel->threads;
348 }
349 
350 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
351 {
352 	return &evsel->attr;
353 }
354 
355 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
356 {
357 	if (ncpus == 0 || nthreads == 0)
358 		return 0;
359 
360 	if (evsel->system_wide)
361 		nthreads = 1;
362 
363 	evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
364 	if (evsel->sample_id == NULL)
365 		return -ENOMEM;
366 
367 	evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
368 	if (evsel->id == NULL) {
369 		xyarray__delete(evsel->sample_id);
370 		evsel->sample_id = NULL;
371 		return -ENOMEM;
372 	}
373 
374 	return 0;
375 }
376 
377 void perf_evsel__free_id(struct perf_evsel *evsel)
378 {
379 	xyarray__delete(evsel->sample_id);
380 	evsel->sample_id = NULL;
381 	zfree(&evsel->id);
382 	evsel->ids = 0;
383 }
384