1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Arm Statistical Profiling Extensions (SPE) support 4 * Copyright (c) 2017-2018, Arm Ltd. 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/types.h> 9 #include <linux/bitops.h> 10 #include <linux/log2.h> 11 #include <linux/string.h> 12 #include <linux/zalloc.h> 13 #include <errno.h> 14 #include <time.h> 15 16 #include "../../../util/cpumap.h" 17 #include "../../../util/event.h" 18 #include "../../../util/evsel.h" 19 #include "../../../util/evsel_config.h" 20 #include "../../../util/evlist.h" 21 #include "../../../util/session.h" 22 #include <internal/lib.h> // page_size 23 #include "../../../util/pmu.h" 24 #include "../../../util/debug.h" 25 #include "../../../util/auxtrace.h" 26 #include "../../../util/record.h" 27 #include "../../../util/header.h" 28 #include "../../../util/arm-spe.h" 29 #include <tools/libc_compat.h> // reallocarray 30 31 #define ARM_SPE_CPU_MAGIC 0x1010101010101010ULL 32 33 #define KiB(x) ((x) * 1024) 34 #define MiB(x) ((x) * 1024 * 1024) 35 36 struct arm_spe_recording { 37 struct auxtrace_record itr; 38 struct perf_pmu *arm_spe_pmu; 39 struct evlist *evlist; 40 int wrapped_cnt; 41 bool *wrapped; 42 }; 43 44 /* Iterate config list to detect if the "freq" parameter is set */ 45 static bool arm_spe_is_set_freq(struct evsel *evsel) 46 { 47 struct evsel_config_term *term; 48 49 list_for_each_entry(term, &evsel->config_terms, list) { 50 if (term->type == EVSEL__CONFIG_TERM_FREQ) 51 return true; 52 } 53 54 return false; 55 } 56 57 /* 58 * arm_spe_find_cpus() returns a new cpu map, and the caller should invoke 59 * perf_cpu_map__put() to release the map after use. 60 */ 61 static struct perf_cpu_map *arm_spe_find_cpus(struct evlist *evlist) 62 { 63 struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus; 64 struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); 65 struct perf_cpu_map *intersect_cpus; 66 67 /* cpu map is not "any" CPU , we have specific CPUs to work with */ 68 if (!perf_cpu_map__has_any_cpu(event_cpus)) { 69 intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus); 70 perf_cpu_map__put(online_cpus); 71 /* Event can be "any" CPU so count all CPUs. */ 72 } else { 73 intersect_cpus = online_cpus; 74 } 75 76 return intersect_cpus; 77 } 78 79 static size_t 80 arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused, 81 struct evlist *evlist) 82 { 83 struct perf_cpu_map *cpu_map = arm_spe_find_cpus(evlist); 84 size_t size; 85 86 if (!cpu_map) 87 return 0; 88 89 size = ARM_SPE_AUXTRACE_PRIV_MAX + 90 ARM_SPE_CPU_PRIV_MAX * perf_cpu_map__nr(cpu_map); 91 size *= sizeof(u64); 92 93 perf_cpu_map__put(cpu_map); 94 return size; 95 } 96 97 static int arm_spe_save_cpu_header(struct auxtrace_record *itr, 98 struct perf_cpu cpu, __u64 data[]) 99 { 100 struct arm_spe_recording *sper = 101 container_of(itr, struct arm_spe_recording, itr); 102 struct perf_pmu *pmu = NULL; 103 char *cpuid = NULL; 104 u64 val; 105 106 /* Read CPU MIDR */ 107 cpuid = get_cpuid_allow_env_override(cpu); 108 if (!cpuid) 109 return -ENOMEM; 110 val = strtol(cpuid, NULL, 16); 111 112 data[ARM_SPE_MAGIC] = ARM_SPE_CPU_MAGIC; 113 data[ARM_SPE_CPU] = cpu.cpu; 114 data[ARM_SPE_CPU_NR_PARAMS] = ARM_SPE_CPU_PRIV_MAX - ARM_SPE_CPU_MIDR; 115 data[ARM_SPE_CPU_MIDR] = val; 116 117 /* Find the associate Arm SPE PMU for the CPU */ 118 if (perf_cpu_map__has(sper->arm_spe_pmu->cpus, cpu)) 119 pmu = sper->arm_spe_pmu; 120 121 if (!pmu) { 122 /* No Arm SPE PMU is found */ 123 data[ARM_SPE_CPU_PMU_TYPE] = ULLONG_MAX; 124 data[ARM_SPE_CAP_MIN_IVAL] = 0; 125 data[ARM_SPE_CAP_EVENT_FILTER] = 0; 126 } else { 127 data[ARM_SPE_CPU_PMU_TYPE] = pmu->type; 128 129 if (perf_pmu__scan_file(pmu, "caps/min_interval", "%lu", &val) != 1) 130 val = 0; 131 data[ARM_SPE_CAP_MIN_IVAL] = val; 132 133 if (perf_pmu__scan_file(pmu, "caps/event_filter", "%lx", &val) != 1) 134 val = 0; 135 data[ARM_SPE_CAP_EVENT_FILTER] = val; 136 } 137 138 free(cpuid); 139 return ARM_SPE_CPU_PRIV_MAX; 140 } 141 142 static int arm_spe_info_fill(struct auxtrace_record *itr, 143 struct perf_session *session, 144 struct perf_record_auxtrace_info *auxtrace_info, 145 size_t priv_size) 146 { 147 int i, ret; 148 size_t offset; 149 struct arm_spe_recording *sper = 150 container_of(itr, struct arm_spe_recording, itr); 151 struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu; 152 struct perf_cpu_map *cpu_map; 153 struct perf_cpu cpu; 154 __u64 *data; 155 156 if (priv_size != arm_spe_info_priv_size(itr, session->evlist)) 157 return -EINVAL; 158 159 if (!session->evlist->core.nr_mmaps) 160 return -EINVAL; 161 162 cpu_map = arm_spe_find_cpus(session->evlist); 163 if (!cpu_map) 164 return -EINVAL; 165 166 auxtrace_info->type = PERF_AUXTRACE_ARM_SPE; 167 auxtrace_info->priv[ARM_SPE_HEADER_VERSION] = ARM_SPE_HEADER_CURRENT_VERSION; 168 auxtrace_info->priv[ARM_SPE_HEADER_SIZE] = 169 ARM_SPE_AUXTRACE_PRIV_MAX - ARM_SPE_HEADER_VERSION; 170 auxtrace_info->priv[ARM_SPE_PMU_TYPE_V2] = arm_spe_pmu->type; 171 auxtrace_info->priv[ARM_SPE_CPUS_NUM] = perf_cpu_map__nr(cpu_map); 172 173 offset = ARM_SPE_AUXTRACE_PRIV_MAX; 174 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) { 175 assert(offset < priv_size); 176 data = &auxtrace_info->priv[offset]; 177 ret = arm_spe_save_cpu_header(itr, cpu, data); 178 if (ret < 0) 179 goto out; 180 offset += ret; 181 } 182 183 ret = 0; 184 out: 185 perf_cpu_map__put(cpu_map); 186 return ret; 187 } 188 189 static void 190 arm_spe_snapshot_resolve_auxtrace_defaults(struct record_opts *opts, 191 bool privileged) 192 { 193 /* 194 * The default snapshot size is the auxtrace mmap size. If neither auxtrace mmap size nor 195 * snapshot size is specified, then the default is 4MiB for privileged users, 128KiB for 196 * unprivileged users. 197 * 198 * The default auxtrace mmap size is 4MiB/page_size for privileged users, 128KiB for 199 * unprivileged users. If an unprivileged user does not specify mmap pages, the mmap pages 200 * will be reduced from the default 512KiB/page_size to 256KiB/page_size, otherwise the 201 * user is likely to get an error as they exceed their mlock limmit. 202 */ 203 204 /* 205 * No size were given to '-S' or '-m,', so go with the default 206 */ 207 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) { 208 if (privileged) { 209 opts->auxtrace_mmap_pages = MiB(4) / page_size; 210 } else { 211 opts->auxtrace_mmap_pages = KiB(128) / page_size; 212 if (opts->mmap_pages == UINT_MAX) 213 opts->mmap_pages = KiB(256) / page_size; 214 } 215 } else if (!opts->auxtrace_mmap_pages && !privileged && opts->mmap_pages == UINT_MAX) { 216 opts->mmap_pages = KiB(256) / page_size; 217 } 218 219 /* 220 * '-m,xyz' was specified but no snapshot size, so make the snapshot size as big as the 221 * auxtrace mmap area. 222 */ 223 if (!opts->auxtrace_snapshot_size) 224 opts->auxtrace_snapshot_size = opts->auxtrace_mmap_pages * (size_t)page_size; 225 226 /* 227 * '-Sxyz' was specified but no auxtrace mmap area, so make the auxtrace mmap area big 228 * enough to fit the requested snapshot size. 229 */ 230 if (!opts->auxtrace_mmap_pages) { 231 size_t sz = opts->auxtrace_snapshot_size; 232 233 sz = round_up(sz, page_size) / page_size; 234 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz); 235 } 236 } 237 238 static __u64 arm_spe_pmu__sample_period(const struct perf_pmu *arm_spe_pmu) 239 { 240 static __u64 sample_period; 241 242 if (sample_period) 243 return sample_period; 244 245 /* 246 * If kernel driver doesn't advertise a minimum, 247 * use max allowable by PMSIDR_EL1.INTERVAL 248 */ 249 if (perf_pmu__scan_file(arm_spe_pmu, "caps/min_interval", "%llu", 250 &sample_period) != 1) { 251 pr_debug("arm_spe driver doesn't advertise a min. interval. Using 4096\n"); 252 sample_period = 4096; 253 } 254 return sample_period; 255 } 256 257 static void arm_spe_setup_evsel(struct evsel *evsel, struct perf_cpu_map *cpus) 258 { 259 u64 pa_enable_bit; 260 261 evsel->core.attr.freq = 0; 262 evsel->core.attr.sample_period = arm_spe_pmu__sample_period(evsel->pmu); 263 evsel->needs_auxtrace_mmap = true; 264 265 /* 266 * To obtain the auxtrace buffer file descriptor, the auxtrace event 267 * must come first. 268 */ 269 evlist__to_front(evsel->evlist, evsel); 270 271 /* 272 * In the case of per-cpu mmaps, sample CPU for AUX event; 273 * also enable the timestamp tracing for samples correlation. 274 */ 275 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { 276 evsel__set_sample_bit(evsel, CPU); 277 evsel__set_config_if_unset(evsel, "ts_enable", 1); 278 } 279 280 /* 281 * Set this only so that perf report knows that SPE generates memory info. It has no effect 282 * on the opening of the event or the SPE data produced. 283 */ 284 evsel__set_sample_bit(evsel, DATA_SRC); 285 286 /* 287 * The PHYS_ADDR flag does not affect the driver behaviour, it is used to 288 * inform that the resulting output's SPE samples contain physical addresses 289 * where applicable. 290 */ 291 292 if (!evsel__get_config_val(evsel, "pa_enable", &pa_enable_bit)) 293 if (pa_enable_bit) 294 evsel__set_sample_bit(evsel, PHYS_ADDR); 295 } 296 297 static int arm_spe_setup_aux_buffer(struct record_opts *opts) 298 { 299 bool privileged = perf_event_paranoid_check(-1); 300 301 /* 302 * we are in snapshot mode. 303 */ 304 if (opts->auxtrace_snapshot_mode) { 305 /* 306 * Command arguments '-Sxyz' and/or '-m,xyz' are missing, so fill those in with 307 * default values. 308 */ 309 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) 310 arm_spe_snapshot_resolve_auxtrace_defaults(opts, privileged); 311 312 /* 313 * Snapshot size can't be bigger than the auxtrace area. 314 */ 315 if (opts->auxtrace_snapshot_size > opts->auxtrace_mmap_pages * (size_t)page_size) { 316 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n", 317 opts->auxtrace_snapshot_size, 318 opts->auxtrace_mmap_pages * (size_t)page_size); 319 return -EINVAL; 320 } 321 322 /* 323 * Something went wrong somewhere - this shouldn't happen. 324 */ 325 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) { 326 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n"); 327 return -EINVAL; 328 } 329 330 pr_debug2("%sx snapshot size: %zu\n", ARM_SPE_PMU_NAME, 331 opts->auxtrace_snapshot_size); 332 } 333 334 /* We are in full trace mode but '-m,xyz' wasn't specified */ 335 if (!opts->auxtrace_mmap_pages) { 336 if (privileged) { 337 opts->auxtrace_mmap_pages = MiB(4) / page_size; 338 } else { 339 opts->auxtrace_mmap_pages = KiB(128) / page_size; 340 if (opts->mmap_pages == UINT_MAX) 341 opts->mmap_pages = KiB(256) / page_size; 342 } 343 } 344 345 /* Validate auxtrace_mmap_pages */ 346 if (opts->auxtrace_mmap_pages) { 347 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; 348 size_t min_sz = KiB(8); 349 350 if (sz < min_sz || !is_power_of_2(sz)) { 351 pr_err("Invalid mmap size for ARM SPE: must be at least %zuKiB and a power of 2\n", 352 min_sz / 1024); 353 return -EINVAL; 354 } 355 } 356 357 return 0; 358 } 359 360 static int arm_spe_setup_tracking_event(struct evlist *evlist, 361 struct record_opts *opts) 362 { 363 int err; 364 struct evsel *tracking_evsel; 365 struct perf_cpu_map *cpus = evlist->core.user_requested_cpus; 366 367 /* Add dummy event to keep tracking */ 368 err = parse_event(evlist, "dummy:u"); 369 if (err) 370 return err; 371 372 tracking_evsel = evlist__last(evlist); 373 evlist__set_tracking_event(evlist, tracking_evsel); 374 375 tracking_evsel->core.attr.freq = 0; 376 tracking_evsel->core.attr.sample_period = 1; 377 378 /* In per-cpu case, always need the time of mmap events etc */ 379 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) { 380 evsel__set_sample_bit(tracking_evsel, TIME); 381 evsel__set_sample_bit(tracking_evsel, CPU); 382 383 /* also track task context switch */ 384 if (!record_opts__no_switch_events(opts)) 385 tracking_evsel->core.attr.context_switch = 1; 386 } 387 388 return 0; 389 } 390 391 static int arm_spe_recording_options(struct auxtrace_record *itr, 392 struct evlist *evlist, 393 struct record_opts *opts) 394 { 395 struct arm_spe_recording *sper = 396 container_of(itr, struct arm_spe_recording, itr); 397 struct evsel *evsel, *tmp; 398 struct perf_cpu_map *cpus = evlist->core.user_requested_cpus; 399 bool discard = false; 400 int err; 401 u64 discard_bit; 402 403 sper->evlist = evlist; 404 405 evlist__for_each_entry(evlist, evsel) { 406 if (evsel__is_aux_event(evsel)) { 407 if (!strstarts(evsel->pmu->name, ARM_SPE_PMU_NAME)) { 408 pr_err("Found unexpected auxtrace event: %s\n", 409 evsel->pmu->name); 410 return -EINVAL; 411 } 412 opts->full_auxtrace = true; 413 414 if (opts->user_freq != UINT_MAX || 415 arm_spe_is_set_freq(evsel)) { 416 pr_err("Arm SPE: Frequency is not supported. " 417 "Set period with -c option or PMU parameter (-e %s/period=NUM/).\n", 418 evsel->pmu->name); 419 return -EINVAL; 420 } 421 } 422 } 423 424 if (!opts->full_auxtrace) 425 return 0; 426 427 evlist__for_each_entry_safe(evlist, tmp, evsel) { 428 if (evsel__is_aux_event(evsel)) { 429 arm_spe_setup_evsel(evsel, cpus); 430 if (!evsel__get_config_val(evsel, "discard", &discard_bit)) 431 discard = !!discard_bit; 432 } 433 } 434 435 if (discard) 436 return 0; 437 438 err = arm_spe_setup_aux_buffer(opts); 439 if (err) 440 return err; 441 442 return arm_spe_setup_tracking_event(evlist, opts); 443 } 444 445 static int arm_spe_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused, 446 struct record_opts *opts, 447 const char *str) 448 { 449 unsigned long long snapshot_size = 0; 450 char *endptr; 451 452 if (str) { 453 snapshot_size = strtoull(str, &endptr, 0); 454 if (*endptr || snapshot_size > SIZE_MAX) 455 return -1; 456 } 457 458 opts->auxtrace_snapshot_mode = true; 459 opts->auxtrace_snapshot_size = snapshot_size; 460 461 return 0; 462 } 463 464 static int arm_spe_snapshot_start(struct auxtrace_record *itr) 465 { 466 struct arm_spe_recording *ptr = 467 container_of(itr, struct arm_spe_recording, itr); 468 struct evsel *evsel; 469 int ret = -EINVAL; 470 471 evlist__for_each_entry(ptr->evlist, evsel) { 472 if (evsel__is_aux_event(evsel)) { 473 ret = evsel__disable(evsel); 474 if (ret < 0) 475 return ret; 476 } 477 } 478 return ret; 479 } 480 481 static int arm_spe_snapshot_finish(struct auxtrace_record *itr) 482 { 483 struct arm_spe_recording *ptr = 484 container_of(itr, struct arm_spe_recording, itr); 485 struct evsel *evsel; 486 int ret = -EINVAL; 487 488 evlist__for_each_entry(ptr->evlist, evsel) { 489 if (evsel__is_aux_event(evsel)) { 490 ret = evsel__enable(evsel); 491 if (ret < 0) 492 return ret; 493 } 494 } 495 return ret; 496 } 497 498 static int arm_spe_alloc_wrapped_array(struct arm_spe_recording *ptr, int idx) 499 { 500 bool *wrapped; 501 int cnt = ptr->wrapped_cnt, new_cnt, i; 502 503 /* 504 * No need to allocate, so return early. 505 */ 506 if (idx < cnt) 507 return 0; 508 509 /* 510 * Make ptr->wrapped as big as idx. 511 */ 512 new_cnt = idx + 1; 513 514 /* 515 * Free'ed in arm_spe_recording_free(). 516 */ 517 wrapped = reallocarray(ptr->wrapped, new_cnt, sizeof(bool)); 518 if (!wrapped) 519 return -ENOMEM; 520 521 /* 522 * init new allocated values. 523 */ 524 for (i = cnt; i < new_cnt; i++) 525 wrapped[i] = false; 526 527 ptr->wrapped_cnt = new_cnt; 528 ptr->wrapped = wrapped; 529 530 return 0; 531 } 532 533 static bool arm_spe_buffer_has_wrapped(unsigned char *buffer, 534 size_t buffer_size, u64 head) 535 { 536 u64 i, watermark; 537 u64 *buf = (u64 *)buffer; 538 size_t buf_size = buffer_size; 539 540 /* 541 * Defensively handle the case where head might be continually increasing - if its value is 542 * equal or greater than the size of the ring buffer, then we can safely determine it has 543 * wrapped around. Otherwise, continue to detect if head might have wrapped. 544 */ 545 if (head >= buffer_size) 546 return true; 547 548 /* 549 * We want to look the very last 512 byte (chosen arbitrarily) in the ring buffer. 550 */ 551 watermark = buf_size - 512; 552 553 /* 554 * The value of head is somewhere within the size of the ring buffer. This can be that there 555 * hasn't been enough data to fill the ring buffer yet or the trace time was so long that 556 * head has numerically wrapped around. To find we need to check if we have data at the 557 * very end of the ring buffer. We can reliably do this because mmap'ed pages are zeroed 558 * out and there is a fresh mapping with every new session. 559 */ 560 561 /* 562 * head is less than 512 byte from the end of the ring buffer. 563 */ 564 if (head > watermark) 565 watermark = head; 566 567 /* 568 * Speed things up by using 64 bit transactions (see "u64 *buf" above) 569 */ 570 watermark /= sizeof(u64); 571 buf_size /= sizeof(u64); 572 573 /* 574 * If we find trace data at the end of the ring buffer, head has been there and has 575 * numerically wrapped around at least once. 576 */ 577 for (i = watermark; i < buf_size; i++) 578 if (buf[i]) 579 return true; 580 581 return false; 582 } 583 584 static int arm_spe_find_snapshot(struct auxtrace_record *itr, int idx, 585 struct auxtrace_mmap *mm, unsigned char *data, 586 u64 *head, u64 *old) 587 { 588 int err; 589 bool wrapped; 590 struct arm_spe_recording *ptr = 591 container_of(itr, struct arm_spe_recording, itr); 592 593 /* 594 * Allocate memory to keep track of wrapping if this is the first 595 * time we deal with this *mm. 596 */ 597 if (idx >= ptr->wrapped_cnt) { 598 err = arm_spe_alloc_wrapped_array(ptr, idx); 599 if (err) 600 return err; 601 } 602 603 /* 604 * Check to see if *head has wrapped around. If it hasn't only the 605 * amount of data between *head and *old is snapshot'ed to avoid 606 * bloating the perf.data file with zeros. But as soon as *head has 607 * wrapped around the entire size of the AUX ring buffer it taken. 608 */ 609 wrapped = ptr->wrapped[idx]; 610 if (!wrapped && arm_spe_buffer_has_wrapped(data, mm->len, *head)) { 611 wrapped = true; 612 ptr->wrapped[idx] = true; 613 } 614 615 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n", 616 __func__, idx, (size_t)*old, (size_t)*head, mm->len); 617 618 /* 619 * No wrap has occurred, we can just use *head and *old. 620 */ 621 if (!wrapped) 622 return 0; 623 624 /* 625 * *head has wrapped around - adjust *head and *old to pickup the 626 * entire content of the AUX buffer. 627 */ 628 if (*head >= mm->len) { 629 *old = *head - mm->len; 630 } else { 631 *head += mm->len; 632 *old = *head - mm->len; 633 } 634 635 return 0; 636 } 637 638 static u64 arm_spe_reference(struct auxtrace_record *itr __maybe_unused) 639 { 640 struct timespec ts; 641 642 clock_gettime(CLOCK_MONOTONIC_RAW, &ts); 643 644 return ts.tv_sec ^ ts.tv_nsec; 645 } 646 647 static void arm_spe_recording_free(struct auxtrace_record *itr) 648 { 649 struct arm_spe_recording *sper = 650 container_of(itr, struct arm_spe_recording, itr); 651 652 zfree(&sper->wrapped); 653 free(sper); 654 } 655 656 struct auxtrace_record *arm_spe_recording_init(int *err, 657 struct perf_pmu *arm_spe_pmu) 658 { 659 struct arm_spe_recording *sper; 660 661 if (!arm_spe_pmu) { 662 *err = -ENODEV; 663 return NULL; 664 } 665 666 sper = zalloc(sizeof(struct arm_spe_recording)); 667 if (!sper) { 668 *err = -ENOMEM; 669 return NULL; 670 } 671 672 sper->arm_spe_pmu = arm_spe_pmu; 673 sper->itr.snapshot_start = arm_spe_snapshot_start; 674 sper->itr.snapshot_finish = arm_spe_snapshot_finish; 675 sper->itr.find_snapshot = arm_spe_find_snapshot; 676 sper->itr.parse_snapshot_options = arm_spe_parse_snapshot_options; 677 sper->itr.recording_options = arm_spe_recording_options; 678 sper->itr.info_priv_size = arm_spe_info_priv_size; 679 sper->itr.info_fill = arm_spe_info_fill; 680 sper->itr.free = arm_spe_recording_free; 681 sper->itr.reference = arm_spe_reference; 682 sper->itr.read_finish = auxtrace_record__read_finish; 683 sper->itr.alignment = 0; 684 685 *err = 0; 686 return &sper->itr; 687 } 688 689 void 690 arm_spe_pmu_default_config(const struct perf_pmu *arm_spe_pmu, struct perf_event_attr *attr) 691 { 692 attr->sample_period = arm_spe_pmu__sample_period(arm_spe_pmu); 693 } 694