xref: /linux/tools/perf/tests/perf-record.c (revision 68a052239fc4b351e961f698b824f7654a346091)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/string.h>
5 
6 #include <sched.h>
7 #include <perf/mmap.h>
8 #include "event.h"
9 #include "evlist.h"
10 #include "evsel.h"
11 #include "debug.h"
12 #include "record.h"
13 #include "tests.h"
14 #include "util/mmap.h"
15 #include "util/sample.h"
16 #include "util/cpumap.h"
17 
18 static int sched__get_first_possible_cpu(pid_t pid, cpu_set_t *maskp)
19 {
20 	int i, cpu = -1;
21 	int nrcpus = cpu__max_cpu().cpu;
22 	size_t size = CPU_ALLOC_SIZE(nrcpus);
23 
24 realloc:
25 	CPU_ZERO_S(size, maskp);
26 
27 	if (sched_getaffinity(pid, size, maskp) == -1) {
28 		if (errno == EINVAL && nrcpus < (cpu__max_cpu().cpu << 8)) {
29 			nrcpus = nrcpus << 2;
30 			goto realloc;
31 		}
32 		perror("sched_getaffinity");
33 			return -1;
34 	}
35 
36 	for (i = 0; i < nrcpus; i++) {
37 		if (CPU_ISSET_S(i, size, maskp)) {
38 			if (cpu == -1)
39 				cpu = i;
40 			else
41 				CPU_CLR_S(i, size, maskp);
42 		}
43 	}
44 
45 	return cpu;
46 }
47 
48 static int test__PERF_RECORD(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
49 {
50 	struct record_opts opts = {
51 		.target = {
52 			.uses_mmap = true,
53 		},
54 		.no_buffering = true,
55 		.mmap_pages   = 256,
56 	};
57 	int nrcpus = cpu__max_cpu().cpu;
58 	cpu_set_t *cpu_mask;
59 	size_t cpu_mask_size;
60 	struct evlist *evlist = evlist__new_dummy();
61 	struct evsel *evsel;
62 	struct perf_sample sample;
63 	const char *cmd = "sleep";
64 	const char *argv[] = { cmd, "1", NULL, };
65 	char *bname, *mmap_filename;
66 	u64 prev_time = 0;
67 	bool found_cmd_mmap = false,
68 	     found_coreutils_mmap = false,
69 	     found_libc_mmap = false,
70 	     found_vdso_mmap = false,
71 	     found_ld_mmap = false;
72 	int err = -1, errs = 0, i, wakeups = 0;
73 	u32 cpu;
74 	int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, };
75 	char sbuf[STRERR_BUFSIZE];
76 
77 	cpu_mask = CPU_ALLOC(nrcpus);
78 	if (!cpu_mask) {
79 		pr_debug("failed to create cpumask\n");
80 		goto out;
81 	}
82 
83 	cpu_mask_size = CPU_ALLOC_SIZE(nrcpus);
84 	CPU_ZERO_S(cpu_mask_size, cpu_mask);
85 
86 	perf_sample__init(&sample, /*all=*/false);
87 	if (evlist == NULL) /* Fallback for kernels lacking PERF_COUNT_SW_DUMMY */
88 		evlist = evlist__new_default();
89 
90 	if (evlist == NULL) {
91 		pr_debug("Not enough memory to create evlist\n");
92 		CPU_FREE(cpu_mask);
93 		goto out;
94 	}
95 
96 	/*
97 	 * Create maps of threads and cpus to monitor. In this case
98 	 * we start with all threads and cpus (-1, -1) but then in
99 	 * evlist__prepare_workload we'll fill in the only thread
100 	 * we're monitoring, the one forked there.
101 	 */
102 	err = evlist__create_maps(evlist, &opts.target);
103 	if (err < 0) {
104 		pr_debug("Not enough memory to create thread/cpu maps\n");
105 		goto out_delete_evlist;
106 	}
107 
108 	/*
109 	 * Prepare the workload in argv[] to run, it'll fork it, and then wait
110 	 * for evlist__start_workload() to exec it. This is done this way
111 	 * so that we have time to open the evlist (calling sys_perf_event_open
112 	 * on all the fds) and then mmap them.
113 	 */
114 	err = evlist__prepare_workload(evlist, &opts.target, argv, false, NULL);
115 	if (err < 0) {
116 		pr_debug("Couldn't run the workload!\n");
117 		goto out_delete_evlist;
118 	}
119 
120 	/*
121 	 * Config the evsels, setting attr->comm on the first one, etc.
122 	 */
123 	evsel = evlist__first(evlist);
124 	evsel__set_sample_bit(evsel, CPU);
125 	evsel__set_sample_bit(evsel, TID);
126 	evsel__set_sample_bit(evsel, TIME);
127 	evlist__config(evlist, &opts, NULL);
128 
129 	err = sched__get_first_possible_cpu(evlist->workload.pid, cpu_mask);
130 	if (err < 0) {
131 		pr_debug("sched__get_first_possible_cpu: %s\n",
132 			 str_error_r(errno, sbuf, sizeof(sbuf)));
133 		evlist__cancel_workload(evlist);
134 		goto out_delete_evlist;
135 	}
136 
137 	cpu = err;
138 
139 	/*
140 	 * So that we can check perf_sample.cpu on all the samples.
141 	 */
142 	if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, cpu_mask) < 0) {
143 		pr_debug("sched_setaffinity: %s\n",
144 			 str_error_r(errno, sbuf, sizeof(sbuf)));
145 		evlist__cancel_workload(evlist);
146 		goto out_delete_evlist;
147 	}
148 
149 	/*
150 	 * Call sys_perf_event_open on all the fds on all the evsels,
151 	 * grouping them if asked to.
152 	 */
153 	err = evlist__open(evlist);
154 	if (err < 0) {
155 		pr_debug("perf_evlist__open: %s\n",
156 			 str_error_r(errno, sbuf, sizeof(sbuf)));
157 		evlist__cancel_workload(evlist);
158 		goto out_delete_evlist;
159 	}
160 
161 	/*
162 	 * mmap the first fd on a given CPU and ask for events for the other
163 	 * fds in the same CPU to be injected in the same mmap ring buffer
164 	 * (using ioctl(PERF_EVENT_IOC_SET_OUTPUT)).
165 	 */
166 	err = evlist__mmap(evlist, opts.mmap_pages);
167 	if (err < 0) {
168 		pr_debug("evlist__mmap: %s\n",
169 			 str_error_r(errno, sbuf, sizeof(sbuf)));
170 		evlist__cancel_workload(evlist);
171 		goto out_delete_evlist;
172 	}
173 
174 	/*
175 	 * Now that all is properly set up, enable the events, they will
176 	 * count just on workload.pid, which will start...
177 	 */
178 	evlist__enable(evlist);
179 
180 	/*
181 	 * Now!
182 	 */
183 	evlist__start_workload(evlist);
184 
185 	while (1) {
186 		int before = total_events;
187 
188 		for (i = 0; i < evlist->core.nr_mmaps; i++) {
189 			union perf_event *event;
190 			struct mmap *md;
191 
192 			md = &evlist->mmap[i];
193 			if (perf_mmap__read_init(&md->core) < 0)
194 				continue;
195 
196 			while ((event = perf_mmap__read_event(&md->core)) != NULL) {
197 				const u32 type = event->header.type;
198 				const char *name = perf_event__name(type);
199 
200 				++total_events;
201 				if (type < PERF_RECORD_MAX)
202 					nr_events[type]++;
203 
204 				err = evlist__parse_sample(evlist, event, &sample);
205 				if (err < 0) {
206 					if (verbose > 0)
207 						perf_event__fprintf(event, NULL, stderr);
208 					pr_debug("Couldn't parse sample\n");
209 					goto out_delete_evlist;
210 				}
211 
212 				if (verbose > 0) {
213 					pr_info("%" PRIu64" %d ", sample.time, sample.cpu);
214 					perf_event__fprintf(event, NULL, stderr);
215 				}
216 
217 				if (prev_time > sample.time) {
218 					pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n",
219 						 name, prev_time, sample.time);
220 					++errs;
221 				}
222 
223 				prev_time = sample.time;
224 
225 				if (sample.cpu != cpu) {
226 					pr_debug("%s with unexpected cpu, expected %d, got %d\n",
227 						 name, cpu, sample.cpu);
228 					++errs;
229 				}
230 
231 				if ((pid_t)sample.pid != evlist->workload.pid) {
232 					pr_debug("%s with unexpected pid, expected %d, got %d\n",
233 						 name, evlist->workload.pid, sample.pid);
234 					++errs;
235 				}
236 
237 				if ((pid_t)sample.tid != evlist->workload.pid) {
238 					pr_debug("%s with unexpected tid, expected %d, got %d\n",
239 						 name, evlist->workload.pid, sample.tid);
240 					++errs;
241 				}
242 
243 				if ((type == PERF_RECORD_COMM ||
244 				     type == PERF_RECORD_MMAP ||
245 				     type == PERF_RECORD_MMAP2 ||
246 				     type == PERF_RECORD_FORK ||
247 				     type == PERF_RECORD_EXIT) &&
248 				     (pid_t)event->comm.pid != evlist->workload.pid) {
249 					pr_debug("%s with unexpected pid/tid\n", name);
250 					++errs;
251 				}
252 
253 				if ((type == PERF_RECORD_COMM ||
254 				     type == PERF_RECORD_MMAP ||
255 				     type == PERF_RECORD_MMAP2) &&
256 				     event->comm.pid != event->comm.tid) {
257 					pr_debug("%s with different pid/tid!\n", name);
258 					++errs;
259 				}
260 
261 				switch (type) {
262 				case PERF_RECORD_COMM:
263 					if (strcmp(event->comm.comm, cmd)) {
264 						pr_debug("%s with unexpected comm!\n", name);
265 						++errs;
266 					}
267 					break;
268 				case PERF_RECORD_EXIT:
269 					goto found_exit;
270 				case PERF_RECORD_MMAP:
271 					mmap_filename = event->mmap.filename;
272 					goto check_bname;
273 				case PERF_RECORD_MMAP2:
274 					mmap_filename = event->mmap2.filename;
275 				check_bname:
276 					bname = strrchr(mmap_filename, '/');
277 					if (bname != NULL) {
278 						if (!found_cmd_mmap)
279 							found_cmd_mmap = !strcmp(bname + 1, cmd);
280 						if (!found_coreutils_mmap)
281 							found_coreutils_mmap = !strcmp(bname + 1, "coreutils");
282 						if (!found_libc_mmap)
283 							found_libc_mmap = !strncmp(bname + 1, "libc", 4);
284 						if (!found_ld_mmap)
285 							found_ld_mmap = !strncmp(bname + 1, "ld", 2);
286 					} else if (!found_vdso_mmap)
287 						found_vdso_mmap = !strcmp(mmap_filename, "[vdso]");
288 					break;
289 
290 				case PERF_RECORD_SAMPLE:
291 					/* Just ignore samples for now */
292 					break;
293 				default:
294 					pr_debug("Unexpected perf_event->header.type %d!\n",
295 						 type);
296 					++errs;
297 				}
298 
299 				perf_mmap__consume(&md->core);
300 			}
301 			perf_mmap__read_done(&md->core);
302 		}
303 
304 		/*
305 		 * We don't use poll here because at least at 3.1 times the
306 		 * PERF_RECORD_{!SAMPLE} events don't honour
307 		 * perf_event_attr.wakeup_events, just PERF_EVENT_SAMPLE does.
308 		 */
309 		if (total_events == before && false)
310 			evlist__poll(evlist, -1);
311 
312 		sleep(1);
313 		if (++wakeups > 5) {
314 			pr_debug("No PERF_RECORD_EXIT event!\n");
315 			break;
316 		}
317 	}
318 
319 found_exit:
320 	if (nr_events[PERF_RECORD_COMM] > 1 + !!found_coreutils_mmap) {
321 		pr_debug("Excessive number of PERF_RECORD_COMM events!\n");
322 		++errs;
323 	}
324 
325 	if (nr_events[PERF_RECORD_COMM] == 0) {
326 		pr_debug("Missing PERF_RECORD_COMM for %s!\n", cmd);
327 		++errs;
328 	}
329 
330 	if (!found_cmd_mmap && !found_coreutils_mmap) {
331 		pr_debug("PERF_RECORD_MMAP for %s missing!\n", cmd);
332 		++errs;
333 	}
334 
335 	if (!found_libc_mmap) {
336 		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "libc");
337 		++errs;
338 	}
339 
340 	if (!found_ld_mmap) {
341 		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "ld");
342 		++errs;
343 	}
344 
345 	if (!found_vdso_mmap) {
346 		pr_debug("PERF_RECORD_MMAP for %s missing!\n", "[vdso]");
347 		++errs;
348 	}
349 out_delete_evlist:
350 	CPU_FREE(cpu_mask);
351 	evlist__delete(evlist);
352 out:
353 	perf_sample__exit(&sample);
354 	if (err == -EACCES)
355 		return TEST_SKIP;
356 	if (err < 0 || errs != 0)
357 		return TEST_FAIL;
358 	return TEST_OK;
359 }
360 
361 static struct test_case tests__PERF_RECORD[] = {
362 	TEST_CASE_REASON("PERF_RECORD_* events & perf_sample fields",
363 			 PERF_RECORD,
364 			 "permissions"),
365 	{	.name = NULL, }
366 };
367 
368 struct test_suite suite__PERF_RECORD = {
369 	.desc = "PERF_RECORD_* events & perf_sample fields",
370 	.test_cases = tests__PERF_RECORD,
371 };
372