1 // SPDX-License-Identifier: GPL-2.0 2 #include <stdbool.h> 3 #include <inttypes.h> 4 #include <stdlib.h> 5 #include <string.h> 6 #include <linux/bitops.h> 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 10 #include "map_symbol.h" 11 #include "branch.h" 12 #include "event.h" 13 #include "evsel.h" 14 #include "debug.h" 15 #include "util/synthetic-events.h" 16 17 #include "tests.h" 18 19 #define COMP(m) do { \ 20 if (s1->m != s2->m) { \ 21 pr_debug("Samples differ at '"#m"'\n"); \ 22 return false; \ 23 } \ 24 } while (0) 25 26 #define MCOMP(m) do { \ 27 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \ 28 pr_debug("Samples differ at '"#m"'\n"); \ 29 return false; \ 30 } \ 31 } while (0) 32 33 static bool samples_same(const struct perf_sample *s1, 34 const struct perf_sample *s2, 35 u64 type, u64 read_format) 36 { 37 size_t i; 38 39 if (type & PERF_SAMPLE_IDENTIFIER) 40 COMP(id); 41 42 if (type & PERF_SAMPLE_IP) 43 COMP(ip); 44 45 if (type & PERF_SAMPLE_TID) { 46 COMP(pid); 47 COMP(tid); 48 } 49 50 if (type & PERF_SAMPLE_TIME) 51 COMP(time); 52 53 if (type & PERF_SAMPLE_ADDR) 54 COMP(addr); 55 56 if (type & PERF_SAMPLE_ID) 57 COMP(id); 58 59 if (type & PERF_SAMPLE_STREAM_ID) 60 COMP(stream_id); 61 62 if (type & PERF_SAMPLE_CPU) 63 COMP(cpu); 64 65 if (type & PERF_SAMPLE_PERIOD) 66 COMP(period); 67 68 if (type & PERF_SAMPLE_READ) { 69 if (read_format & PERF_FORMAT_GROUP) 70 COMP(read.group.nr); 71 else 72 COMP(read.one.value); 73 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 74 COMP(read.time_enabled); 75 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 76 COMP(read.time_running); 77 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 78 if (read_format & PERF_FORMAT_GROUP) { 79 for (i = 0; i < s1->read.group.nr; i++) 80 MCOMP(read.group.values[i]); 81 } else { 82 COMP(read.one.id); 83 } 84 } 85 86 if (type & PERF_SAMPLE_CALLCHAIN) { 87 COMP(callchain->nr); 88 for (i = 0; i < s1->callchain->nr; i++) 89 COMP(callchain->ips[i]); 90 } 91 92 if (type & PERF_SAMPLE_RAW) { 93 COMP(raw_size); 94 if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) { 95 pr_debug("Samples differ at 'raw_data'\n"); 96 return false; 97 } 98 } 99 100 if (type & PERF_SAMPLE_BRANCH_STACK) { 101 COMP(branch_stack->nr); 102 COMP(branch_stack->hw_idx); 103 for (i = 0; i < s1->branch_stack->nr; i++) 104 MCOMP(branch_stack->entries[i]); 105 } 106 107 if (type & PERF_SAMPLE_REGS_USER) { 108 size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64); 109 110 COMP(user_regs.mask); 111 COMP(user_regs.abi); 112 if (s1->user_regs.abi && 113 (!s1->user_regs.regs || !s2->user_regs.regs || 114 memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) { 115 pr_debug("Samples differ at 'user_regs'\n"); 116 return false; 117 } 118 } 119 120 if (type & PERF_SAMPLE_STACK_USER) { 121 COMP(user_stack.size); 122 if (memcmp(s1->user_stack.data, s2->user_stack.data, 123 s1->user_stack.size)) { 124 pr_debug("Samples differ at 'user_stack'\n"); 125 return false; 126 } 127 } 128 129 if (type & PERF_SAMPLE_WEIGHT) 130 COMP(weight); 131 132 if (type & PERF_SAMPLE_DATA_SRC) 133 COMP(data_src); 134 135 if (type & PERF_SAMPLE_TRANSACTION) 136 COMP(transaction); 137 138 if (type & PERF_SAMPLE_REGS_INTR) { 139 size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64); 140 141 COMP(intr_regs.mask); 142 COMP(intr_regs.abi); 143 if (s1->intr_regs.abi && 144 (!s1->intr_regs.regs || !s2->intr_regs.regs || 145 memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) { 146 pr_debug("Samples differ at 'intr_regs'\n"); 147 return false; 148 } 149 } 150 151 if (type & PERF_SAMPLE_PHYS_ADDR) 152 COMP(phys_addr); 153 154 if (type & PERF_SAMPLE_AUX) { 155 COMP(aux_sample.size); 156 if (memcmp(s1->aux_sample.data, s2->aux_sample.data, 157 s1->aux_sample.size)) { 158 pr_debug("Samples differ at 'aux_sample'\n"); 159 return false; 160 } 161 } 162 163 return true; 164 } 165 166 static int do_test(u64 sample_type, u64 sample_regs, u64 read_format) 167 { 168 struct evsel evsel = { 169 .needs_swap = false, 170 .core = { 171 . attr = { 172 .sample_type = sample_type, 173 .read_format = read_format, 174 }, 175 }, 176 }; 177 union perf_event *event; 178 union { 179 struct ip_callchain callchain; 180 u64 data[64]; 181 } callchain = { 182 /* 3 ips */ 183 .data = {3, 201, 202, 203}, 184 }; 185 union { 186 struct branch_stack branch_stack; 187 u64 data[64]; 188 } branch_stack = { 189 /* 1 branch_entry */ 190 .data = {1, -1ULL, 211, 212, 213}, 191 }; 192 u64 regs[64]; 193 const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL}; 194 const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL}; 195 const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282}; 196 struct perf_sample sample = { 197 .ip = 101, 198 .pid = 102, 199 .tid = 103, 200 .time = 104, 201 .addr = 105, 202 .id = 106, 203 .stream_id = 107, 204 .period = 108, 205 .weight = 109, 206 .cpu = 110, 207 .raw_size = sizeof(raw_data), 208 .data_src = 111, 209 .transaction = 112, 210 .raw_data = (void *)raw_data, 211 .callchain = &callchain.callchain, 212 .no_hw_idx = false, 213 .branch_stack = &branch_stack.branch_stack, 214 .user_regs = { 215 .abi = PERF_SAMPLE_REGS_ABI_64, 216 .mask = sample_regs, 217 .regs = regs, 218 }, 219 .user_stack = { 220 .size = sizeof(data), 221 .data = (void *)data, 222 }, 223 .read = { 224 .time_enabled = 0x030a59d664fca7deULL, 225 .time_running = 0x011b6ae553eb98edULL, 226 }, 227 .intr_regs = { 228 .abi = PERF_SAMPLE_REGS_ABI_64, 229 .mask = sample_regs, 230 .regs = regs, 231 }, 232 .phys_addr = 113, 233 .aux_sample = { 234 .size = sizeof(aux_data), 235 .data = (void *)aux_data, 236 }, 237 }; 238 struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},}; 239 struct perf_sample sample_out; 240 size_t i, sz, bufsz; 241 int err, ret = -1; 242 243 if (sample_type & PERF_SAMPLE_REGS_USER) 244 evsel.core.attr.sample_regs_user = sample_regs; 245 246 if (sample_type & PERF_SAMPLE_REGS_INTR) 247 evsel.core.attr.sample_regs_intr = sample_regs; 248 249 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 250 evsel.core.attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX; 251 252 for (i = 0; i < sizeof(regs); i++) 253 *(i + (u8 *)regs) = i & 0xfe; 254 255 if (read_format & PERF_FORMAT_GROUP) { 256 sample.read.group.nr = 4; 257 sample.read.group.values = values; 258 } else { 259 sample.read.one.value = 0x08789faeb786aa87ULL; 260 sample.read.one.id = 99; 261 } 262 263 sz = perf_event__sample_event_size(&sample, sample_type, read_format); 264 bufsz = sz + 4096; /* Add a bit for overrun checking */ 265 event = malloc(bufsz); 266 if (!event) { 267 pr_debug("malloc failed\n"); 268 return -1; 269 } 270 271 memset(event, 0xff, bufsz); 272 event->header.type = PERF_RECORD_SAMPLE; 273 event->header.misc = 0; 274 event->header.size = sz; 275 276 err = perf_event__synthesize_sample(event, sample_type, read_format, 277 &sample); 278 if (err) { 279 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n", 280 "perf_event__synthesize_sample", sample_type, err); 281 goto out_free; 282 } 283 284 /* The data does not contain 0xff so we use that to check the size */ 285 for (i = bufsz; i > 0; i--) { 286 if (*(i - 1 + (u8 *)event) != 0xff) 287 break; 288 } 289 if (i != sz) { 290 pr_debug("Event size mismatch: actual %zu vs expected %zu\n", 291 i, sz); 292 goto out_free; 293 } 294 295 evsel.sample_size = __perf_evsel__sample_size(sample_type); 296 297 err = perf_evsel__parse_sample(&evsel, event, &sample_out); 298 if (err) { 299 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n", 300 "perf_evsel__parse_sample", sample_type, err); 301 goto out_free; 302 } 303 304 if (!samples_same(&sample, &sample_out, sample_type, read_format)) { 305 pr_debug("parsing failed for sample_type %#"PRIx64"\n", 306 sample_type); 307 goto out_free; 308 } 309 310 ret = 0; 311 out_free: 312 free(event); 313 if (ret && read_format) 314 pr_debug("read_format %#"PRIx64"\n", read_format); 315 return ret; 316 } 317 318 /** 319 * test__sample_parsing - test sample parsing. 320 * 321 * This function implements a test that synthesizes a sample event, parses it 322 * and then checks that the parsed sample matches the original sample. The test 323 * checks sample format bits separately and together. If the test passes %0 is 324 * returned, otherwise %-1 is returned. 325 */ 326 int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused) 327 { 328 const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15}; 329 u64 sample_type; 330 u64 sample_regs; 331 size_t i; 332 int err; 333 334 /* 335 * Fail the test if it has not been updated when new sample format bits 336 * were added. Please actually update the test rather than just change 337 * the condition below. 338 */ 339 if (PERF_SAMPLE_MAX > PERF_SAMPLE_AUX << 1) { 340 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n"); 341 return -1; 342 } 343 344 /* Test each sample format bit separately */ 345 for (sample_type = 1; sample_type != PERF_SAMPLE_MAX; 346 sample_type <<= 1) { 347 /* Test read_format variations */ 348 if (sample_type == PERF_SAMPLE_READ) { 349 for (i = 0; i < ARRAY_SIZE(rf); i++) { 350 err = do_test(sample_type, 0, rf[i]); 351 if (err) 352 return err; 353 } 354 continue; 355 } 356 sample_regs = 0; 357 358 if (sample_type == PERF_SAMPLE_REGS_USER) 359 sample_regs = 0x3fff; 360 361 if (sample_type == PERF_SAMPLE_REGS_INTR) 362 sample_regs = 0xff0fff; 363 364 err = do_test(sample_type, sample_regs, 0); 365 if (err) 366 return err; 367 } 368 369 /* Test all sample format bits together */ 370 sample_type = PERF_SAMPLE_MAX - 1; 371 sample_regs = 0x3fff; /* shared yb intr and user regs */ 372 for (i = 0; i < ARRAY_SIZE(rf); i++) { 373 err = do_test(sample_type, sample_regs, rf[i]); 374 if (err) 375 return err; 376 } 377 378 return 0; 379 } 380