1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Arm Statistical Profiling Extensions (SPE) support
4 * Copyright (c) 2017-2018, Arm Ltd.
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/bitops.h>
10 #include <linux/log2.h>
11 #include <linux/string.h>
12 #include <linux/zalloc.h>
13 #include <errno.h>
14 #include <time.h>
15
16 #include "../../../util/cpumap.h"
17 #include "../../../util/event.h"
18 #include "../../../util/evsel.h"
19 #include "../../../util/evsel_config.h"
20 #include "../../../util/evlist.h"
21 #include "../../../util/session.h"
22 #include <internal/lib.h> // page_size
23 #include "../../../util/pmu.h"
24 #include "../../../util/debug.h"
25 #include "../../../util/auxtrace.h"
26 #include "../../../util/record.h"
27 #include "../../../util/header.h"
28 #include "../../../util/arm-spe.h"
29 #include <tools/libc_compat.h> // reallocarray
30
31 #define ARM_SPE_CPU_MAGIC 0x1010101010101010ULL
32
33 #define KiB(x) ((x) * 1024)
34 #define MiB(x) ((x) * 1024 * 1024)
35
36 struct arm_spe_recording {
37 struct auxtrace_record itr;
38 struct perf_pmu *arm_spe_pmu;
39 struct evlist *evlist;
40 int wrapped_cnt;
41 bool *wrapped;
42 };
43
44 /* Iterate config list to detect if the "freq" parameter is set */
arm_spe_is_set_freq(struct evsel * evsel)45 static bool arm_spe_is_set_freq(struct evsel *evsel)
46 {
47 struct evsel_config_term *term;
48
49 list_for_each_entry(term, &evsel->config_terms, list) {
50 if (term->type == EVSEL__CONFIG_TERM_FREQ)
51 return true;
52 }
53
54 return false;
55 }
56
57 /*
58 * arm_spe_find_cpus() returns a new cpu map, and the caller should invoke
59 * perf_cpu_map__put() to release the map after use.
60 */
arm_spe_find_cpus(struct evlist * evlist)61 static struct perf_cpu_map *arm_spe_find_cpus(struct evlist *evlist)
62 {
63 struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
64 struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus();
65 struct perf_cpu_map *intersect_cpus;
66
67 /* cpu map is not "any" CPU , we have specific CPUs to work with */
68 if (!perf_cpu_map__has_any_cpu(event_cpus)) {
69 intersect_cpus = perf_cpu_map__intersect(event_cpus, online_cpus);
70 perf_cpu_map__put(online_cpus);
71 /* Event can be "any" CPU so count all CPUs. */
72 } else {
73 intersect_cpus = online_cpus;
74 }
75
76 return intersect_cpus;
77 }
78
79 static size_t
arm_spe_info_priv_size(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist)80 arm_spe_info_priv_size(struct auxtrace_record *itr __maybe_unused,
81 struct evlist *evlist)
82 {
83 struct perf_cpu_map *cpu_map = arm_spe_find_cpus(evlist);
84 size_t size;
85
86 if (!cpu_map)
87 return 0;
88
89 size = ARM_SPE_AUXTRACE_PRIV_MAX +
90 ARM_SPE_CPU_PRIV_MAX * perf_cpu_map__nr(cpu_map);
91 size *= sizeof(u64);
92
93 perf_cpu_map__put(cpu_map);
94 return size;
95 }
96
arm_spe_save_cpu_header(struct auxtrace_record * itr,struct perf_cpu cpu,__u64 data[])97 static int arm_spe_save_cpu_header(struct auxtrace_record *itr,
98 struct perf_cpu cpu, __u64 data[])
99 {
100 struct arm_spe_recording *sper =
101 container_of(itr, struct arm_spe_recording, itr);
102 struct perf_pmu *pmu = NULL;
103 char *cpuid = NULL;
104 u64 val;
105
106 /* Read CPU MIDR */
107 cpuid = get_cpuid_allow_env_override(cpu);
108 if (!cpuid)
109 return -ENOMEM;
110 val = strtol(cpuid, NULL, 16);
111
112 data[ARM_SPE_MAGIC] = ARM_SPE_CPU_MAGIC;
113 data[ARM_SPE_CPU] = cpu.cpu;
114 data[ARM_SPE_CPU_NR_PARAMS] = ARM_SPE_CPU_PRIV_MAX - ARM_SPE_CPU_MIDR;
115 data[ARM_SPE_CPU_MIDR] = val;
116
117 /* Find the associate Arm SPE PMU for the CPU */
118 if (perf_cpu_map__has(sper->arm_spe_pmu->cpus, cpu))
119 pmu = sper->arm_spe_pmu;
120
121 if (!pmu) {
122 /* No Arm SPE PMU is found */
123 data[ARM_SPE_CPU_PMU_TYPE] = ULLONG_MAX;
124 data[ARM_SPE_CAP_MIN_IVAL] = 0;
125 data[ARM_SPE_CAP_EVENT_FILTER] = 0;
126 } else {
127 data[ARM_SPE_CPU_PMU_TYPE] = pmu->type;
128
129 if (perf_pmu__scan_file(pmu, "caps/min_interval", "%lu", &val) != 1)
130 val = 0;
131 data[ARM_SPE_CAP_MIN_IVAL] = val;
132
133 if (perf_pmu__scan_file(pmu, "caps/event_filter", "%lx", &val) != 1)
134 val = 0;
135 data[ARM_SPE_CAP_EVENT_FILTER] = val;
136 }
137
138 free(cpuid);
139 return ARM_SPE_CPU_PRIV_MAX;
140 }
141
arm_spe_info_fill(struct auxtrace_record * itr,struct perf_session * session,struct perf_record_auxtrace_info * auxtrace_info,size_t priv_size)142 static int arm_spe_info_fill(struct auxtrace_record *itr,
143 struct perf_session *session,
144 struct perf_record_auxtrace_info *auxtrace_info,
145 size_t priv_size)
146 {
147 int i, ret;
148 size_t offset;
149 struct arm_spe_recording *sper =
150 container_of(itr, struct arm_spe_recording, itr);
151 struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
152 struct perf_cpu_map *cpu_map;
153 struct perf_cpu cpu;
154 __u64 *data;
155
156 if (priv_size != arm_spe_info_priv_size(itr, session->evlist))
157 return -EINVAL;
158
159 if (!session->evlist->core.nr_mmaps)
160 return -EINVAL;
161
162 cpu_map = arm_spe_find_cpus(session->evlist);
163 if (!cpu_map)
164 return -EINVAL;
165
166 auxtrace_info->type = PERF_AUXTRACE_ARM_SPE;
167 auxtrace_info->priv[ARM_SPE_HEADER_VERSION] = ARM_SPE_HEADER_CURRENT_VERSION;
168 auxtrace_info->priv[ARM_SPE_HEADER_SIZE] =
169 ARM_SPE_AUXTRACE_PRIV_MAX - ARM_SPE_HEADER_VERSION;
170 auxtrace_info->priv[ARM_SPE_PMU_TYPE_V2] = arm_spe_pmu->type;
171 auxtrace_info->priv[ARM_SPE_CPUS_NUM] = perf_cpu_map__nr(cpu_map);
172
173 offset = ARM_SPE_AUXTRACE_PRIV_MAX;
174 perf_cpu_map__for_each_cpu(cpu, i, cpu_map) {
175 assert(offset < priv_size);
176 data = &auxtrace_info->priv[offset];
177 ret = arm_spe_save_cpu_header(itr, cpu, data);
178 if (ret < 0)
179 goto out;
180 offset += ret;
181 }
182
183 ret = 0;
184 out:
185 perf_cpu_map__put(cpu_map);
186 return ret;
187 }
188
189 static void
arm_spe_snapshot_resolve_auxtrace_defaults(struct record_opts * opts,bool privileged)190 arm_spe_snapshot_resolve_auxtrace_defaults(struct record_opts *opts,
191 bool privileged)
192 {
193 /*
194 * The default snapshot size is the auxtrace mmap size. If neither auxtrace mmap size nor
195 * snapshot size is specified, then the default is 4MiB for privileged users, 128KiB for
196 * unprivileged users.
197 *
198 * The default auxtrace mmap size is 4MiB/page_size for privileged users, 128KiB for
199 * unprivileged users. If an unprivileged user does not specify mmap pages, the mmap pages
200 * will be reduced from the default 512KiB/page_size to 256KiB/page_size, otherwise the
201 * user is likely to get an error as they exceed their mlock limmit.
202 */
203
204 /*
205 * No size were given to '-S' or '-m,', so go with the default
206 */
207 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
208 if (privileged) {
209 opts->auxtrace_mmap_pages = MiB(4) / page_size;
210 } else {
211 opts->auxtrace_mmap_pages = KiB(128) / page_size;
212 if (opts->mmap_pages == UINT_MAX)
213 opts->mmap_pages = KiB(256) / page_size;
214 }
215 } else if (!opts->auxtrace_mmap_pages && !privileged && opts->mmap_pages == UINT_MAX) {
216 opts->mmap_pages = KiB(256) / page_size;
217 }
218
219 /*
220 * '-m,xyz' was specified but no snapshot size, so make the snapshot size as big as the
221 * auxtrace mmap area.
222 */
223 if (!opts->auxtrace_snapshot_size)
224 opts->auxtrace_snapshot_size = opts->auxtrace_mmap_pages * (size_t)page_size;
225
226 /*
227 * '-Sxyz' was specified but no auxtrace mmap area, so make the auxtrace mmap area big
228 * enough to fit the requested snapshot size.
229 */
230 if (!opts->auxtrace_mmap_pages) {
231 size_t sz = opts->auxtrace_snapshot_size;
232
233 sz = round_up(sz, page_size) / page_size;
234 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
235 }
236 }
237
arm_spe_pmu__sample_period(const struct perf_pmu * arm_spe_pmu)238 static __u64 arm_spe_pmu__sample_period(const struct perf_pmu *arm_spe_pmu)
239 {
240 static __u64 sample_period;
241
242 if (sample_period)
243 return sample_period;
244
245 /*
246 * If kernel driver doesn't advertise a minimum,
247 * use max allowable by PMSIDR_EL1.INTERVAL
248 */
249 if (perf_pmu__scan_file(arm_spe_pmu, "caps/min_interval", "%llu",
250 &sample_period) != 1) {
251 pr_debug("arm_spe driver doesn't advertise a min. interval. Using 4096\n");
252 sample_period = 4096;
253 }
254 return sample_period;
255 }
256
arm_spe_setup_evsel(struct evsel * evsel,struct perf_cpu_map * cpus)257 static void arm_spe_setup_evsel(struct evsel *evsel, struct perf_cpu_map *cpus)
258 {
259 u64 bit;
260
261 evsel->core.attr.freq = 0;
262 evsel->core.attr.sample_period = arm_spe_pmu__sample_period(evsel->pmu);
263 evsel->needs_auxtrace_mmap = true;
264
265 /*
266 * To obtain the auxtrace buffer file descriptor, the auxtrace event
267 * must come first.
268 */
269 evlist__to_front(evsel->evlist, evsel);
270
271 /*
272 * In the case of per-cpu mmaps, sample CPU for AUX event;
273 * also enable the timestamp tracing for samples correlation.
274 */
275 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) {
276 evsel__set_sample_bit(evsel, CPU);
277 evsel__set_config_if_unset(evsel->pmu, evsel, "ts_enable", 1);
278 }
279
280 /*
281 * Set this only so that perf report knows that SPE generates memory info. It has no effect
282 * on the opening of the event or the SPE data produced.
283 */
284 evsel__set_sample_bit(evsel, DATA_SRC);
285
286 /*
287 * The PHYS_ADDR flag does not affect the driver behaviour, it is used to
288 * inform that the resulting output's SPE samples contain physical addresses
289 * where applicable.
290 */
291 bit = perf_pmu__format_bits(evsel->pmu, "pa_enable");
292 if (evsel->core.attr.config & bit)
293 evsel__set_sample_bit(evsel, PHYS_ADDR);
294 }
295
arm_spe_setup_aux_buffer(struct record_opts * opts)296 static int arm_spe_setup_aux_buffer(struct record_opts *opts)
297 {
298 bool privileged = perf_event_paranoid_check(-1);
299
300 /*
301 * we are in snapshot mode.
302 */
303 if (opts->auxtrace_snapshot_mode) {
304 /*
305 * Command arguments '-Sxyz' and/or '-m,xyz' are missing, so fill those in with
306 * default values.
307 */
308 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages)
309 arm_spe_snapshot_resolve_auxtrace_defaults(opts, privileged);
310
311 /*
312 * Snapshot size can't be bigger than the auxtrace area.
313 */
314 if (opts->auxtrace_snapshot_size > opts->auxtrace_mmap_pages * (size_t)page_size) {
315 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
316 opts->auxtrace_snapshot_size,
317 opts->auxtrace_mmap_pages * (size_t)page_size);
318 return -EINVAL;
319 }
320
321 /*
322 * Something went wrong somewhere - this shouldn't happen.
323 */
324 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
325 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
326 return -EINVAL;
327 }
328
329 pr_debug2("%sx snapshot size: %zu\n", ARM_SPE_PMU_NAME,
330 opts->auxtrace_snapshot_size);
331 }
332
333 /* We are in full trace mode but '-m,xyz' wasn't specified */
334 if (!opts->auxtrace_mmap_pages) {
335 if (privileged) {
336 opts->auxtrace_mmap_pages = MiB(4) / page_size;
337 } else {
338 opts->auxtrace_mmap_pages = KiB(128) / page_size;
339 if (opts->mmap_pages == UINT_MAX)
340 opts->mmap_pages = KiB(256) / page_size;
341 }
342 }
343
344 /* Validate auxtrace_mmap_pages */
345 if (opts->auxtrace_mmap_pages) {
346 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
347 size_t min_sz = KiB(8);
348
349 if (sz < min_sz || !is_power_of_2(sz)) {
350 pr_err("Invalid mmap size for ARM SPE: must be at least %zuKiB and a power of 2\n",
351 min_sz / 1024);
352 return -EINVAL;
353 }
354 }
355
356 return 0;
357 }
358
arm_spe_setup_tracking_event(struct evlist * evlist,struct record_opts * opts)359 static int arm_spe_setup_tracking_event(struct evlist *evlist,
360 struct record_opts *opts)
361 {
362 int err;
363 struct evsel *tracking_evsel;
364 struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
365
366 /* Add dummy event to keep tracking */
367 err = parse_event(evlist, "dummy:u");
368 if (err)
369 return err;
370
371 tracking_evsel = evlist__last(evlist);
372 evlist__set_tracking_event(evlist, tracking_evsel);
373
374 tracking_evsel->core.attr.freq = 0;
375 tracking_evsel->core.attr.sample_period = 1;
376
377 /* In per-cpu case, always need the time of mmap events etc */
378 if (!perf_cpu_map__is_any_cpu_or_is_empty(cpus)) {
379 evsel__set_sample_bit(tracking_evsel, TIME);
380 evsel__set_sample_bit(tracking_evsel, CPU);
381
382 /* also track task context switch */
383 if (!record_opts__no_switch_events(opts))
384 tracking_evsel->core.attr.context_switch = 1;
385 }
386
387 return 0;
388 }
389
arm_spe_recording_options(struct auxtrace_record * itr,struct evlist * evlist,struct record_opts * opts)390 static int arm_spe_recording_options(struct auxtrace_record *itr,
391 struct evlist *evlist,
392 struct record_opts *opts)
393 {
394 struct arm_spe_recording *sper =
395 container_of(itr, struct arm_spe_recording, itr);
396 struct evsel *evsel, *tmp;
397 struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
398 bool discard = false;
399 int err;
400
401 sper->evlist = evlist;
402
403 evlist__for_each_entry(evlist, evsel) {
404 if (evsel__is_aux_event(evsel)) {
405 if (!strstarts(evsel->pmu->name, ARM_SPE_PMU_NAME)) {
406 pr_err("Found unexpected auxtrace event: %s\n",
407 evsel->pmu->name);
408 return -EINVAL;
409 }
410 opts->full_auxtrace = true;
411
412 if (opts->user_freq != UINT_MAX ||
413 arm_spe_is_set_freq(evsel)) {
414 pr_err("Arm SPE: Frequency is not supported. "
415 "Set period with -c option or PMU parameter (-e %s/period=NUM/).\n",
416 evsel->pmu->name);
417 return -EINVAL;
418 }
419 }
420 }
421
422 if (!opts->full_auxtrace)
423 return 0;
424
425 evlist__for_each_entry_safe(evlist, tmp, evsel) {
426 if (evsel__is_aux_event(evsel)) {
427 arm_spe_setup_evsel(evsel, cpus);
428 if (evsel->core.attr.config &
429 perf_pmu__format_bits(evsel->pmu, "discard"))
430 discard = true;
431 }
432 }
433
434 if (discard)
435 return 0;
436
437 err = arm_spe_setup_aux_buffer(opts);
438 if (err)
439 return err;
440
441 return arm_spe_setup_tracking_event(evlist, opts);
442 }
443
arm_spe_parse_snapshot_options(struct auxtrace_record * itr __maybe_unused,struct record_opts * opts,const char * str)444 static int arm_spe_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
445 struct record_opts *opts,
446 const char *str)
447 {
448 unsigned long long snapshot_size = 0;
449 char *endptr;
450
451 if (str) {
452 snapshot_size = strtoull(str, &endptr, 0);
453 if (*endptr || snapshot_size > SIZE_MAX)
454 return -1;
455 }
456
457 opts->auxtrace_snapshot_mode = true;
458 opts->auxtrace_snapshot_size = snapshot_size;
459
460 return 0;
461 }
462
arm_spe_snapshot_start(struct auxtrace_record * itr)463 static int arm_spe_snapshot_start(struct auxtrace_record *itr)
464 {
465 struct arm_spe_recording *ptr =
466 container_of(itr, struct arm_spe_recording, itr);
467 struct evsel *evsel;
468 int ret = -EINVAL;
469
470 evlist__for_each_entry(ptr->evlist, evsel) {
471 if (evsel__is_aux_event(evsel)) {
472 ret = evsel__disable(evsel);
473 if (ret < 0)
474 return ret;
475 }
476 }
477 return ret;
478 }
479
arm_spe_snapshot_finish(struct auxtrace_record * itr)480 static int arm_spe_snapshot_finish(struct auxtrace_record *itr)
481 {
482 struct arm_spe_recording *ptr =
483 container_of(itr, struct arm_spe_recording, itr);
484 struct evsel *evsel;
485 int ret = -EINVAL;
486
487 evlist__for_each_entry(ptr->evlist, evsel) {
488 if (evsel__is_aux_event(evsel)) {
489 ret = evsel__enable(evsel);
490 if (ret < 0)
491 return ret;
492 }
493 }
494 return ret;
495 }
496
arm_spe_alloc_wrapped_array(struct arm_spe_recording * ptr,int idx)497 static int arm_spe_alloc_wrapped_array(struct arm_spe_recording *ptr, int idx)
498 {
499 bool *wrapped;
500 int cnt = ptr->wrapped_cnt, new_cnt, i;
501
502 /*
503 * No need to allocate, so return early.
504 */
505 if (idx < cnt)
506 return 0;
507
508 /*
509 * Make ptr->wrapped as big as idx.
510 */
511 new_cnt = idx + 1;
512
513 /*
514 * Free'ed in arm_spe_recording_free().
515 */
516 wrapped = reallocarray(ptr->wrapped, new_cnt, sizeof(bool));
517 if (!wrapped)
518 return -ENOMEM;
519
520 /*
521 * init new allocated values.
522 */
523 for (i = cnt; i < new_cnt; i++)
524 wrapped[i] = false;
525
526 ptr->wrapped_cnt = new_cnt;
527 ptr->wrapped = wrapped;
528
529 return 0;
530 }
531
arm_spe_buffer_has_wrapped(unsigned char * buffer,size_t buffer_size,u64 head)532 static bool arm_spe_buffer_has_wrapped(unsigned char *buffer,
533 size_t buffer_size, u64 head)
534 {
535 u64 i, watermark;
536 u64 *buf = (u64 *)buffer;
537 size_t buf_size = buffer_size;
538
539 /*
540 * Defensively handle the case where head might be continually increasing - if its value is
541 * equal or greater than the size of the ring buffer, then we can safely determine it has
542 * wrapped around. Otherwise, continue to detect if head might have wrapped.
543 */
544 if (head >= buffer_size)
545 return true;
546
547 /*
548 * We want to look the very last 512 byte (chosen arbitrarily) in the ring buffer.
549 */
550 watermark = buf_size - 512;
551
552 /*
553 * The value of head is somewhere within the size of the ring buffer. This can be that there
554 * hasn't been enough data to fill the ring buffer yet or the trace time was so long that
555 * head has numerically wrapped around. To find we need to check if we have data at the
556 * very end of the ring buffer. We can reliably do this because mmap'ed pages are zeroed
557 * out and there is a fresh mapping with every new session.
558 */
559
560 /*
561 * head is less than 512 byte from the end of the ring buffer.
562 */
563 if (head > watermark)
564 watermark = head;
565
566 /*
567 * Speed things up by using 64 bit transactions (see "u64 *buf" above)
568 */
569 watermark /= sizeof(u64);
570 buf_size /= sizeof(u64);
571
572 /*
573 * If we find trace data at the end of the ring buffer, head has been there and has
574 * numerically wrapped around at least once.
575 */
576 for (i = watermark; i < buf_size; i++)
577 if (buf[i])
578 return true;
579
580 return false;
581 }
582
arm_spe_find_snapshot(struct auxtrace_record * itr,int idx,struct auxtrace_mmap * mm,unsigned char * data,u64 * head,u64 * old)583 static int arm_spe_find_snapshot(struct auxtrace_record *itr, int idx,
584 struct auxtrace_mmap *mm, unsigned char *data,
585 u64 *head, u64 *old)
586 {
587 int err;
588 bool wrapped;
589 struct arm_spe_recording *ptr =
590 container_of(itr, struct arm_spe_recording, itr);
591
592 /*
593 * Allocate memory to keep track of wrapping if this is the first
594 * time we deal with this *mm.
595 */
596 if (idx >= ptr->wrapped_cnt) {
597 err = arm_spe_alloc_wrapped_array(ptr, idx);
598 if (err)
599 return err;
600 }
601
602 /*
603 * Check to see if *head has wrapped around. If it hasn't only the
604 * amount of data between *head and *old is snapshot'ed to avoid
605 * bloating the perf.data file with zeros. But as soon as *head has
606 * wrapped around the entire size of the AUX ring buffer it taken.
607 */
608 wrapped = ptr->wrapped[idx];
609 if (!wrapped && arm_spe_buffer_has_wrapped(data, mm->len, *head)) {
610 wrapped = true;
611 ptr->wrapped[idx] = true;
612 }
613
614 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
615 __func__, idx, (size_t)*old, (size_t)*head, mm->len);
616
617 /*
618 * No wrap has occurred, we can just use *head and *old.
619 */
620 if (!wrapped)
621 return 0;
622
623 /*
624 * *head has wrapped around - adjust *head and *old to pickup the
625 * entire content of the AUX buffer.
626 */
627 if (*head >= mm->len) {
628 *old = *head - mm->len;
629 } else {
630 *head += mm->len;
631 *old = *head - mm->len;
632 }
633
634 return 0;
635 }
636
arm_spe_reference(struct auxtrace_record * itr __maybe_unused)637 static u64 arm_spe_reference(struct auxtrace_record *itr __maybe_unused)
638 {
639 struct timespec ts;
640
641 clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
642
643 return ts.tv_sec ^ ts.tv_nsec;
644 }
645
arm_spe_recording_free(struct auxtrace_record * itr)646 static void arm_spe_recording_free(struct auxtrace_record *itr)
647 {
648 struct arm_spe_recording *sper =
649 container_of(itr, struct arm_spe_recording, itr);
650
651 zfree(&sper->wrapped);
652 free(sper);
653 }
654
arm_spe_recording_init(int * err,struct perf_pmu * arm_spe_pmu)655 struct auxtrace_record *arm_spe_recording_init(int *err,
656 struct perf_pmu *arm_spe_pmu)
657 {
658 struct arm_spe_recording *sper;
659
660 if (!arm_spe_pmu) {
661 *err = -ENODEV;
662 return NULL;
663 }
664
665 sper = zalloc(sizeof(struct arm_spe_recording));
666 if (!sper) {
667 *err = -ENOMEM;
668 return NULL;
669 }
670
671 sper->arm_spe_pmu = arm_spe_pmu;
672 sper->itr.snapshot_start = arm_spe_snapshot_start;
673 sper->itr.snapshot_finish = arm_spe_snapshot_finish;
674 sper->itr.find_snapshot = arm_spe_find_snapshot;
675 sper->itr.parse_snapshot_options = arm_spe_parse_snapshot_options;
676 sper->itr.recording_options = arm_spe_recording_options;
677 sper->itr.info_priv_size = arm_spe_info_priv_size;
678 sper->itr.info_fill = arm_spe_info_fill;
679 sper->itr.free = arm_spe_recording_free;
680 sper->itr.reference = arm_spe_reference;
681 sper->itr.read_finish = auxtrace_record__read_finish;
682 sper->itr.alignment = 0;
683
684 *err = 0;
685 return &sper->itr;
686 }
687
688 void
arm_spe_pmu_default_config(const struct perf_pmu * arm_spe_pmu,struct perf_event_attr * attr)689 arm_spe_pmu_default_config(const struct perf_pmu *arm_spe_pmu, struct perf_event_attr *attr)
690 {
691 attr->sample_period = arm_spe_pmu__sample_period(arm_spe_pmu);
692 }
693