xref: /linux/tools/perf/tests/sample-parsing.c (revision 82e8d723e9e6698572098bf2976223d5069b34b5)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdbool.h>
3 #include <inttypes.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <linux/bitops.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 
10 #include "map_symbol.h"
11 #include "branch.h"
12 #include "event.h"
13 #include "evsel.h"
14 #include "debug.h"
15 #include "util/synthetic-events.h"
16 
17 #include "tests.h"
18 
19 #define COMP(m) do {					\
20 	if (s1->m != s2->m) {				\
21 		pr_debug("Samples differ at '"#m"'\n");	\
22 		return false;				\
23 	}						\
24 } while (0)
25 
26 #define MCOMP(m) do {					\
27 	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
28 		pr_debug("Samples differ at '"#m"'\n");	\
29 		return false;				\
30 	}						\
31 } while (0)
32 
33 static bool samples_same(const struct perf_sample *s1,
34 			 const struct perf_sample *s2,
35 			 u64 type, u64 read_format)
36 {
37 	size_t i;
38 
39 	if (type & PERF_SAMPLE_IDENTIFIER)
40 		COMP(id);
41 
42 	if (type & PERF_SAMPLE_IP)
43 		COMP(ip);
44 
45 	if (type & PERF_SAMPLE_TID) {
46 		COMP(pid);
47 		COMP(tid);
48 	}
49 
50 	if (type & PERF_SAMPLE_TIME)
51 		COMP(time);
52 
53 	if (type & PERF_SAMPLE_ADDR)
54 		COMP(addr);
55 
56 	if (type & PERF_SAMPLE_ID)
57 		COMP(id);
58 
59 	if (type & PERF_SAMPLE_STREAM_ID)
60 		COMP(stream_id);
61 
62 	if (type & PERF_SAMPLE_CPU)
63 		COMP(cpu);
64 
65 	if (type & PERF_SAMPLE_PERIOD)
66 		COMP(period);
67 
68 	if (type & PERF_SAMPLE_READ) {
69 		if (read_format & PERF_FORMAT_GROUP)
70 			COMP(read.group.nr);
71 		else
72 			COMP(read.one.value);
73 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
74 			COMP(read.time_enabled);
75 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
76 			COMP(read.time_running);
77 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
78 		if (read_format & PERF_FORMAT_GROUP) {
79 			for (i = 0; i < s1->read.group.nr; i++)
80 				MCOMP(read.group.values[i]);
81 		} else {
82 			COMP(read.one.id);
83 		}
84 	}
85 
86 	if (type & PERF_SAMPLE_CALLCHAIN) {
87 		COMP(callchain->nr);
88 		for (i = 0; i < s1->callchain->nr; i++)
89 			COMP(callchain->ips[i]);
90 	}
91 
92 	if (type & PERF_SAMPLE_RAW) {
93 		COMP(raw_size);
94 		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
95 			pr_debug("Samples differ at 'raw_data'\n");
96 			return false;
97 		}
98 	}
99 
100 	if (type & PERF_SAMPLE_BRANCH_STACK) {
101 		COMP(branch_stack->nr);
102 		for (i = 0; i < s1->branch_stack->nr; i++)
103 			MCOMP(branch_stack->entries[i]);
104 	}
105 
106 	if (type & PERF_SAMPLE_REGS_USER) {
107 		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
108 
109 		COMP(user_regs.mask);
110 		COMP(user_regs.abi);
111 		if (s1->user_regs.abi &&
112 		    (!s1->user_regs.regs || !s2->user_regs.regs ||
113 		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
114 			pr_debug("Samples differ at 'user_regs'\n");
115 			return false;
116 		}
117 	}
118 
119 	if (type & PERF_SAMPLE_STACK_USER) {
120 		COMP(user_stack.size);
121 		if (memcmp(s1->user_stack.data, s2->user_stack.data,
122 			   s1->user_stack.size)) {
123 			pr_debug("Samples differ at 'user_stack'\n");
124 			return false;
125 		}
126 	}
127 
128 	if (type & PERF_SAMPLE_WEIGHT)
129 		COMP(weight);
130 
131 	if (type & PERF_SAMPLE_DATA_SRC)
132 		COMP(data_src);
133 
134 	if (type & PERF_SAMPLE_TRANSACTION)
135 		COMP(transaction);
136 
137 	if (type & PERF_SAMPLE_REGS_INTR) {
138 		size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
139 
140 		COMP(intr_regs.mask);
141 		COMP(intr_regs.abi);
142 		if (s1->intr_regs.abi &&
143 		    (!s1->intr_regs.regs || !s2->intr_regs.regs ||
144 		     memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
145 			pr_debug("Samples differ at 'intr_regs'\n");
146 			return false;
147 		}
148 	}
149 
150 	if (type & PERF_SAMPLE_PHYS_ADDR)
151 		COMP(phys_addr);
152 
153 	return true;
154 }
155 
156 static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
157 {
158 	struct evsel evsel = {
159 		.needs_swap = false,
160 		.core = {
161 			. attr = {
162 				.sample_type = sample_type,
163 				.read_format = read_format,
164 			},
165 		},
166 	};
167 	union perf_event *event;
168 	union {
169 		struct ip_callchain callchain;
170 		u64 data[64];
171 	} callchain = {
172 		/* 3 ips */
173 		.data = {3, 201, 202, 203},
174 	};
175 	union {
176 		struct branch_stack branch_stack;
177 		u64 data[64];
178 	} branch_stack = {
179 		/* 1 branch_entry */
180 		.data = {1, 211, 212, 213},
181 	};
182 	u64 regs[64];
183 	const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
184 	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
185 	struct perf_sample sample = {
186 		.ip		= 101,
187 		.pid		= 102,
188 		.tid		= 103,
189 		.time		= 104,
190 		.addr		= 105,
191 		.id		= 106,
192 		.stream_id	= 107,
193 		.period		= 108,
194 		.weight		= 109,
195 		.cpu		= 110,
196 		.raw_size	= sizeof(raw_data),
197 		.data_src	= 111,
198 		.transaction	= 112,
199 		.raw_data	= (void *)raw_data,
200 		.callchain	= &callchain.callchain,
201 		.branch_stack	= &branch_stack.branch_stack,
202 		.user_regs	= {
203 			.abi	= PERF_SAMPLE_REGS_ABI_64,
204 			.mask	= sample_regs,
205 			.regs	= regs,
206 		},
207 		.user_stack	= {
208 			.size	= sizeof(data),
209 			.data	= (void *)data,
210 		},
211 		.read		= {
212 			.time_enabled = 0x030a59d664fca7deULL,
213 			.time_running = 0x011b6ae553eb98edULL,
214 		},
215 		.intr_regs	= {
216 			.abi	= PERF_SAMPLE_REGS_ABI_64,
217 			.mask	= sample_regs,
218 			.regs	= regs,
219 		},
220 		.phys_addr	= 113,
221 	};
222 	struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
223 	struct perf_sample sample_out;
224 	size_t i, sz, bufsz;
225 	int err, ret = -1;
226 
227 	if (sample_type & PERF_SAMPLE_REGS_USER)
228 		evsel.core.attr.sample_regs_user = sample_regs;
229 
230 	if (sample_type & PERF_SAMPLE_REGS_INTR)
231 		evsel.core.attr.sample_regs_intr = sample_regs;
232 
233 	for (i = 0; i < sizeof(regs); i++)
234 		*(i + (u8 *)regs) = i & 0xfe;
235 
236 	if (read_format & PERF_FORMAT_GROUP) {
237 		sample.read.group.nr     = 4;
238 		sample.read.group.values = values;
239 	} else {
240 		sample.read.one.value = 0x08789faeb786aa87ULL;
241 		sample.read.one.id    = 99;
242 	}
243 
244 	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
245 	bufsz = sz + 4096; /* Add a bit for overrun checking */
246 	event = malloc(bufsz);
247 	if (!event) {
248 		pr_debug("malloc failed\n");
249 		return -1;
250 	}
251 
252 	memset(event, 0xff, bufsz);
253 	event->header.type = PERF_RECORD_SAMPLE;
254 	event->header.misc = 0;
255 	event->header.size = sz;
256 
257 	err = perf_event__synthesize_sample(event, sample_type, read_format,
258 					    &sample);
259 	if (err) {
260 		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
261 			 "perf_event__synthesize_sample", sample_type, err);
262 		goto out_free;
263 	}
264 
265 	/* The data does not contain 0xff so we use that to check the size */
266 	for (i = bufsz; i > 0; i--) {
267 		if (*(i - 1 + (u8 *)event) != 0xff)
268 			break;
269 	}
270 	if (i != sz) {
271 		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
272 			 i, sz);
273 		goto out_free;
274 	}
275 
276 	evsel.sample_size = __perf_evsel__sample_size(sample_type);
277 
278 	err = perf_evsel__parse_sample(&evsel, event, &sample_out);
279 	if (err) {
280 		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
281 			 "perf_evsel__parse_sample", sample_type, err);
282 		goto out_free;
283 	}
284 
285 	if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
286 		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
287 			 sample_type);
288 		goto out_free;
289 	}
290 
291 	ret = 0;
292 out_free:
293 	free(event);
294 	if (ret && read_format)
295 		pr_debug("read_format %#"PRIx64"\n", read_format);
296 	return ret;
297 }
298 
299 /**
300  * test__sample_parsing - test sample parsing.
301  *
302  * This function implements a test that synthesizes a sample event, parses it
303  * and then checks that the parsed sample matches the original sample.  The test
304  * checks sample format bits separately and together.  If the test passes %0 is
305  * returned, otherwise %-1 is returned.
306  */
307 int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
308 {
309 	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
310 	u64 sample_type;
311 	u64 sample_regs;
312 	size_t i;
313 	int err;
314 
315 	/*
316 	 * Fail the test if it has not been updated when new sample format bits
317 	 * were added.  Please actually update the test rather than just change
318 	 * the condition below.
319 	 */
320 	if (PERF_SAMPLE_MAX > PERF_SAMPLE_PHYS_ADDR << 1) {
321 		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
322 		return -1;
323 	}
324 
325 	/* Test each sample format bit separately */
326 	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
327 	     sample_type <<= 1) {
328 		/* Test read_format variations */
329 		if (sample_type == PERF_SAMPLE_READ) {
330 			for (i = 0; i < ARRAY_SIZE(rf); i++) {
331 				err = do_test(sample_type, 0, rf[i]);
332 				if (err)
333 					return err;
334 			}
335 			continue;
336 		}
337 		sample_regs = 0;
338 
339 		if (sample_type == PERF_SAMPLE_REGS_USER)
340 			sample_regs = 0x3fff;
341 
342 		if (sample_type == PERF_SAMPLE_REGS_INTR)
343 			sample_regs = 0xff0fff;
344 
345 		err = do_test(sample_type, sample_regs, 0);
346 		if (err)
347 			return err;
348 	}
349 
350 	/* Test all sample format bits together */
351 	sample_type = PERF_SAMPLE_MAX - 1;
352 	sample_regs = 0x3fff; /* shared yb intr and user regs */
353 	for (i = 0; i < ARRAY_SIZE(rf); i++) {
354 		err = do_test(sample_type, sample_regs, rf[i]);
355 		if (err)
356 			return err;
357 	}
358 
359 	return 0;
360 }
361