xref: /linux/tools/perf/util/arm-spe.c (revision af9e8d12b139c92e748eb2956bbef03315ea7516)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Arm Statistical Profiling Extensions (SPE) support
4  * Copyright (c) 2017-2018, Arm Ltd.
5  */
6 
7 #include <byteswap.h>
8 #include <endian.h>
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <linux/bitops.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/types.h>
15 #include <linux/zalloc.h>
16 #include <stdlib.h>
17 #include <unistd.h>
18 
19 #include "auxtrace.h"
20 #include "color.h"
21 #include "debug.h"
22 #include "evlist.h"
23 #include "evsel.h"
24 #include "machine.h"
25 #include "session.h"
26 #include "symbol.h"
27 #include "thread.h"
28 #include "thread-stack.h"
29 #include "tsc.h"
30 #include "tool.h"
31 #include "util/synthetic-events.h"
32 
33 #include "arm-spe.h"
34 #include "arm-spe-decoder/arm-spe-decoder.h"
35 #include "arm-spe-decoder/arm-spe-pkt-decoder.h"
36 
37 #include "../../arch/arm64/include/asm/cputype.h"
38 #define MAX_TIMESTAMP (~0ULL)
39 
40 #define is_ldst_op(op)		(!!((op) & ARM_SPE_OP_LDST))
41 
42 #define ARM_SPE_CACHE_EVENT(lvl) \
43 	(ARM_SPE_##lvl##_ACCESS | ARM_SPE_##lvl##_MISS)
44 
45 #define arm_spe_is_cache_level(type, lvl) \
46 	((type) & ARM_SPE_CACHE_EVENT(lvl))
47 
48 #define arm_spe_is_cache_hit(type, lvl) \
49 	(((type) & ARM_SPE_CACHE_EVENT(lvl)) == ARM_SPE_##lvl##_ACCESS)
50 
51 #define arm_spe_is_cache_miss(type, lvl) \
52 	((type) & ARM_SPE_##lvl##_MISS)
53 
54 struct arm_spe {
55 	struct auxtrace			auxtrace;
56 	struct auxtrace_queues		queues;
57 	struct auxtrace_heap		heap;
58 	struct itrace_synth_opts        synth_opts;
59 	u32				auxtrace_type;
60 	struct perf_session		*session;
61 	struct machine			*machine;
62 	u32				pmu_type;
63 
64 	struct perf_tsc_conversion	tc;
65 
66 	u8				timeless_decoding;
67 	u8				data_queued;
68 
69 	u64				sample_type;
70 	u8				sample_flc;
71 	u8				sample_llc;
72 	u8				sample_tlb;
73 	u8				sample_branch;
74 	u8				sample_remote_access;
75 	u8				sample_memory;
76 	u8				sample_instructions;
77 
78 	u64				l1d_miss_id;
79 	u64				l1d_access_id;
80 	u64				llc_miss_id;
81 	u64				llc_access_id;
82 	u64				tlb_miss_id;
83 	u64				tlb_access_id;
84 	u64				branch_id;
85 	u64				remote_access_id;
86 	u64				memory_id;
87 	u64				instructions_id;
88 
89 	u64				kernel_start;
90 
91 	unsigned long			num_events;
92 	u8				use_ctx_pkt_for_pid;
93 
94 	u64				**metadata;
95 	u64				metadata_ver;
96 	u64				metadata_nr_cpu;
97 	bool				is_homogeneous;
98 };
99 
100 struct arm_spe_queue {
101 	struct arm_spe			*spe;
102 	unsigned int			queue_nr;
103 	struct auxtrace_buffer		*buffer;
104 	struct auxtrace_buffer		*old_buffer;
105 	union perf_event		*event_buf;
106 	bool				on_heap;
107 	bool				done;
108 	pid_t				pid;
109 	pid_t				tid;
110 	int				cpu;
111 	struct arm_spe_decoder		*decoder;
112 	u64				time;
113 	u64				timestamp;
114 	struct thread			*thread;
115 	u64				sample_count;
116 	u32				flags;
117 	struct branch_stack		*last_branch;
118 };
119 
120 struct data_source_handle {
121 	const struct midr_range *midr_ranges;
122 	void (*ds_synth)(const struct arm_spe_record *record,
123 			 union perf_mem_data_src *data_src);
124 };
125 
126 #define DS(range, func)					\
127 	{						\
128 		.midr_ranges = range,			\
129 		.ds_synth = arm_spe__synth_##func,	\
130 	}
131 
132 static void arm_spe_dump(struct arm_spe *spe __maybe_unused,
133 			 unsigned char *buf, size_t len)
134 {
135 	struct arm_spe_pkt packet;
136 	size_t pos = 0;
137 	int ret, pkt_len, i;
138 	char desc[ARM_SPE_PKT_DESC_MAX];
139 	const char *color = PERF_COLOR_BLUE;
140 
141 	color_fprintf(stdout, color,
142 		      ". ... ARM SPE data: size %#zx bytes\n",
143 		      len);
144 
145 	while (len) {
146 		ret = arm_spe_get_packet(buf, len, &packet);
147 		if (ret > 0)
148 			pkt_len = ret;
149 		else
150 			pkt_len = 1;
151 		printf(".");
152 		color_fprintf(stdout, color, "  %08zx: ", pos);
153 		for (i = 0; i < pkt_len; i++)
154 			color_fprintf(stdout, color, " %02x", buf[i]);
155 		for (; i < 16; i++)
156 			color_fprintf(stdout, color, "   ");
157 		if (ret > 0) {
158 			ret = arm_spe_pkt_desc(&packet, desc,
159 					       ARM_SPE_PKT_DESC_MAX);
160 			if (!ret)
161 				color_fprintf(stdout, color, " %s\n", desc);
162 		} else {
163 			color_fprintf(stdout, color, " Bad packet!\n");
164 		}
165 		pos += pkt_len;
166 		buf += pkt_len;
167 		len -= pkt_len;
168 	}
169 }
170 
171 static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf,
172 			       size_t len)
173 {
174 	printf(".\n");
175 	arm_spe_dump(spe, buf, len);
176 }
177 
178 static int arm_spe_get_trace(struct arm_spe_buffer *b, void *data)
179 {
180 	struct arm_spe_queue *speq = data;
181 	struct auxtrace_buffer *buffer = speq->buffer;
182 	struct auxtrace_buffer *old_buffer = speq->old_buffer;
183 	struct auxtrace_queue *queue;
184 
185 	queue = &speq->spe->queues.queue_array[speq->queue_nr];
186 
187 	buffer = auxtrace_buffer__next(queue, buffer);
188 	/* If no more data, drop the previous auxtrace_buffer and return */
189 	if (!buffer) {
190 		if (old_buffer)
191 			auxtrace_buffer__drop_data(old_buffer);
192 		b->len = 0;
193 		return 0;
194 	}
195 
196 	speq->buffer = buffer;
197 
198 	/* If the aux_buffer doesn't have data associated, try to load it */
199 	if (!buffer->data) {
200 		/* get the file desc associated with the perf data file */
201 		int fd = perf_data__fd(speq->spe->session->data);
202 
203 		buffer->data = auxtrace_buffer__get_data(buffer, fd);
204 		if (!buffer->data)
205 			return -ENOMEM;
206 	}
207 
208 	b->len = buffer->size;
209 	b->buf = buffer->data;
210 
211 	if (b->len) {
212 		if (old_buffer)
213 			auxtrace_buffer__drop_data(old_buffer);
214 		speq->old_buffer = buffer;
215 	} else {
216 		auxtrace_buffer__drop_data(buffer);
217 		return arm_spe_get_trace(b, data);
218 	}
219 
220 	return 0;
221 }
222 
223 static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe,
224 		unsigned int queue_nr)
225 {
226 	struct arm_spe_params params = { .get_trace = 0, };
227 	struct arm_spe_queue *speq;
228 
229 	speq = zalloc(sizeof(*speq));
230 	if (!speq)
231 		return NULL;
232 
233 	speq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
234 	if (!speq->event_buf)
235 		goto out_free;
236 
237 	speq->spe = spe;
238 	speq->queue_nr = queue_nr;
239 	speq->pid = -1;
240 	speq->tid = -1;
241 	speq->cpu = -1;
242 
243 	/* params set */
244 	params.get_trace = arm_spe_get_trace;
245 	params.data = speq;
246 
247 	if (spe->synth_opts.last_branch) {
248 		size_t sz = sizeof(struct branch_stack);
249 
250 		/* Allocate up to two entries for PBT + TGT */
251 		sz += sizeof(struct branch_entry) *
252 			min(spe->synth_opts.last_branch_sz, 2U);
253 		speq->last_branch = zalloc(sz);
254 		if (!speq->last_branch)
255 			goto out_free;
256 	}
257 
258 	/* create new decoder */
259 	speq->decoder = arm_spe_decoder_new(&params);
260 	if (!speq->decoder)
261 		goto out_free;
262 
263 	return speq;
264 
265 out_free:
266 	zfree(&speq->event_buf);
267 	zfree(&speq->last_branch);
268 	free(speq);
269 
270 	return NULL;
271 }
272 
273 static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip)
274 {
275 	return ip >= spe->kernel_start ?
276 		PERF_RECORD_MISC_KERNEL :
277 		PERF_RECORD_MISC_USER;
278 }
279 
280 static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe,
281 				    struct auxtrace_queue *queue)
282 {
283 	struct arm_spe_queue *speq = queue->priv;
284 	pid_t tid;
285 
286 	tid = machine__get_current_tid(spe->machine, speq->cpu);
287 	if (tid != -1) {
288 		speq->tid = tid;
289 		thread__zput(speq->thread);
290 	} else
291 		speq->tid = queue->tid;
292 
293 	if ((!speq->thread) && (speq->tid != -1)) {
294 		speq->thread = machine__find_thread(spe->machine, -1,
295 						    speq->tid);
296 	}
297 
298 	if (speq->thread) {
299 		speq->pid = thread__pid(speq->thread);
300 		if (queue->cpu == -1)
301 			speq->cpu = thread__cpu(speq->thread);
302 	}
303 }
304 
305 static int arm_spe_set_tid(struct arm_spe_queue *speq, pid_t tid)
306 {
307 	struct arm_spe *spe = speq->spe;
308 	int err = machine__set_current_tid(spe->machine, speq->cpu, -1, tid);
309 
310 	if (err)
311 		return err;
312 
313 	arm_spe_set_pid_tid_cpu(spe, &spe->queues.queue_array[speq->queue_nr]);
314 
315 	return 0;
316 }
317 
318 static u64 *arm_spe__get_metadata_by_cpu(struct arm_spe *spe, int cpu)
319 {
320 	u64 i;
321 
322 	if (!spe->metadata)
323 		return NULL;
324 
325 	/* CPU ID is -1 for per-thread mode */
326 	if (cpu < 0) {
327 		/*
328 		 * On the heterogeneous system, due to CPU ID is -1,
329 		 * cannot confirm the data source packet is supported.
330 		 */
331 		if (!spe->is_homogeneous)
332 			return NULL;
333 
334 		/* In homogeneous system, simply use CPU0's metadata */
335 		return spe->metadata[0];
336 	}
337 
338 	for (i = 0; i < spe->metadata_nr_cpu; i++)
339 		if (spe->metadata[i][ARM_SPE_CPU] == (u64)cpu)
340 			return spe->metadata[i];
341 
342 	return NULL;
343 }
344 
345 static struct simd_flags arm_spe__synth_simd_flags(const struct arm_spe_record *record)
346 {
347 	struct simd_flags simd_flags = {};
348 
349 	if ((record->op & ARM_SPE_OP_LDST) && (record->op & ARM_SPE_OP_SVE_LDST))
350 		simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
351 
352 	if ((record->op & ARM_SPE_OP_OTHER) && (record->op & ARM_SPE_OP_SVE_OTHER))
353 		simd_flags.arch |= SIMD_OP_FLAGS_ARCH_SVE;
354 
355 	if (record->type & ARM_SPE_SVE_PARTIAL_PRED)
356 		simd_flags.pred |= SIMD_OP_FLAGS_PRED_PARTIAL;
357 
358 	if (record->type & ARM_SPE_SVE_EMPTY_PRED)
359 		simd_flags.pred |= SIMD_OP_FLAGS_PRED_EMPTY;
360 
361 	return simd_flags;
362 }
363 
364 static void arm_spe_prep_sample(struct arm_spe *spe,
365 				struct arm_spe_queue *speq,
366 				union perf_event *event,
367 				struct perf_sample *sample)
368 {
369 	struct arm_spe_record *record = &speq->decoder->record;
370 
371 	if (!spe->timeless_decoding)
372 		sample->time = tsc_to_perf_time(record->timestamp, &spe->tc);
373 
374 	sample->ip = record->from_ip;
375 	sample->cpumode = arm_spe_cpumode(spe, sample->ip);
376 	sample->pid = speq->pid;
377 	sample->tid = speq->tid;
378 	sample->period = spe->synth_opts.period;
379 	sample->cpu = speq->cpu;
380 	sample->simd_flags = arm_spe__synth_simd_flags(record);
381 
382 	event->sample.header.type = PERF_RECORD_SAMPLE;
383 	event->sample.header.misc = sample->cpumode;
384 	event->sample.header.size = sizeof(struct perf_event_header);
385 }
386 
387 static void arm_spe__prep_branch_stack(struct arm_spe_queue *speq)
388 {
389 	struct arm_spe *spe = speq->spe;
390 	struct arm_spe_record *record = &speq->decoder->record;
391 	struct branch_stack *bstack = speq->last_branch;
392 	struct branch_flags *bs_flags;
393 	unsigned int last_branch_sz = spe->synth_opts.last_branch_sz;
394 	bool have_tgt = !!(speq->flags & PERF_IP_FLAG_BRANCH);
395 	bool have_pbt = last_branch_sz >= (have_tgt + 1U) && record->prev_br_tgt;
396 	size_t sz = sizeof(struct branch_stack) +
397 		    sizeof(struct branch_entry) * min(last_branch_sz, 2U) /* PBT + TGT */;
398 	int i = 0;
399 
400 	/* Clean up branch stack */
401 	memset(bstack, 0x0, sz);
402 
403 	if (!have_tgt && !have_pbt)
404 		return;
405 
406 	if (have_tgt) {
407 		bstack->entries[i].from = record->from_ip;
408 		bstack->entries[i].to = record->to_ip;
409 
410 		bs_flags = &bstack->entries[i].flags;
411 		bs_flags->value = 0;
412 
413 		if (record->op & ARM_SPE_OP_BR_CR_BL) {
414 			if (record->op & ARM_SPE_OP_BR_COND)
415 				bs_flags->type |= PERF_BR_COND_CALL;
416 			else
417 				bs_flags->type |= PERF_BR_CALL;
418 		/*
419 		 * Indirect branch instruction without link (e.g. BR),
420 		 * take this case as function return.
421 		 */
422 		} else if (record->op & ARM_SPE_OP_BR_CR_RET ||
423 			   record->op & ARM_SPE_OP_BR_INDIRECT) {
424 			if (record->op & ARM_SPE_OP_BR_COND)
425 				bs_flags->type |= PERF_BR_COND_RET;
426 			else
427 				bs_flags->type |= PERF_BR_RET;
428 		} else if (record->op & ARM_SPE_OP_BR_CR_NON_BL_RET) {
429 			if (record->op & ARM_SPE_OP_BR_COND)
430 				bs_flags->type |= PERF_BR_COND;
431 			else
432 				bs_flags->type |= PERF_BR_UNCOND;
433 		} else {
434 			if (record->op & ARM_SPE_OP_BR_COND)
435 				bs_flags->type |= PERF_BR_COND;
436 			else
437 				bs_flags->type |= PERF_BR_UNKNOWN;
438 		}
439 
440 		if (record->type & ARM_SPE_BRANCH_MISS) {
441 			bs_flags->mispred = 1;
442 			bs_flags->predicted = 0;
443 		} else {
444 			bs_flags->mispred = 0;
445 			bs_flags->predicted = 1;
446 		}
447 
448 		if (record->type & ARM_SPE_BRANCH_NOT_TAKEN)
449 			bs_flags->not_taken = 1;
450 
451 		if (record->type & ARM_SPE_IN_TXN)
452 			bs_flags->in_tx = 1;
453 
454 		bs_flags->cycles = min(record->latency, 0xFFFFU);
455 		i++;
456 	}
457 
458 	if (have_pbt) {
459 		bs_flags = &bstack->entries[i].flags;
460 		bs_flags->type |= PERF_BR_UNKNOWN;
461 		bstack->entries[i].to = record->prev_br_tgt;
462 		i++;
463 	}
464 
465 	bstack->nr = i;
466 	bstack->hw_idx = -1ULL;
467 }
468 
469 static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
470 {
471 	event->header.size = perf_event__sample_event_size(sample, type, 0);
472 	return perf_event__synthesize_sample(event, type, 0, sample);
473 }
474 
475 static inline int
476 arm_spe_deliver_synth_event(struct arm_spe *spe,
477 			    struct arm_spe_queue *speq __maybe_unused,
478 			    union perf_event *event,
479 			    struct perf_sample *sample)
480 {
481 	int ret;
482 
483 	if (spe->synth_opts.inject) {
484 		ret = arm_spe__inject_event(event, sample, spe->sample_type);
485 		if (ret)
486 			return ret;
487 	}
488 
489 	ret = perf_session__deliver_synth_event(spe->session, event, sample);
490 	if (ret)
491 		pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
492 
493 	return ret;
494 }
495 
496 static int arm_spe__synth_mem_sample(struct arm_spe_queue *speq,
497 				     u64 spe_events_id,
498 				     union perf_mem_data_src data_src)
499 {
500 	struct arm_spe *spe = speq->spe;
501 	struct arm_spe_record *record = &speq->decoder->record;
502 	union perf_event *event = speq->event_buf;
503 	struct perf_sample sample;
504 	int ret;
505 
506 	perf_sample__init(&sample, /*all=*/true);
507 	arm_spe_prep_sample(spe, speq, event, &sample);
508 
509 	sample.id = spe_events_id;
510 	sample.stream_id = spe_events_id;
511 	sample.addr = record->virt_addr;
512 	sample.phys_addr = record->phys_addr;
513 	sample.data_src = data_src.val;
514 	sample.weight = record->latency;
515 
516 	ret = arm_spe_deliver_synth_event(spe, speq, event, &sample);
517 	perf_sample__exit(&sample);
518 	return ret;
519 }
520 
521 static int arm_spe__synth_branch_sample(struct arm_spe_queue *speq,
522 					u64 spe_events_id)
523 {
524 	struct arm_spe *spe = speq->spe;
525 	struct arm_spe_record *record = &speq->decoder->record;
526 	union perf_event *event = speq->event_buf;
527 	struct perf_sample sample;
528 	int ret;
529 
530 	perf_sample__init(&sample, /*all=*/true);
531 	arm_spe_prep_sample(spe, speq, event, &sample);
532 
533 	sample.id = spe_events_id;
534 	sample.stream_id = spe_events_id;
535 	sample.addr = record->to_ip;
536 	sample.weight = record->latency;
537 	sample.flags = speq->flags;
538 	sample.branch_stack = speq->last_branch;
539 
540 	ret = arm_spe_deliver_synth_event(spe, speq, event, &sample);
541 	perf_sample__exit(&sample);
542 	return ret;
543 }
544 
545 static int arm_spe__synth_instruction_sample(struct arm_spe_queue *speq,
546 					     u64 spe_events_id,
547 					     union perf_mem_data_src data_src)
548 {
549 	struct arm_spe *spe = speq->spe;
550 	struct arm_spe_record *record = &speq->decoder->record;
551 	union perf_event *event = speq->event_buf;
552 	struct perf_sample sample;
553 	int ret;
554 
555 	perf_sample__init(&sample, /*all=*/true);
556 	arm_spe_prep_sample(spe, speq, event, &sample);
557 
558 	sample.id = spe_events_id;
559 	sample.stream_id = spe_events_id;
560 	sample.addr = record->to_ip;
561 	sample.phys_addr = record->phys_addr;
562 	sample.data_src = data_src.val;
563 	sample.weight = record->latency;
564 	sample.flags = speq->flags;
565 	sample.branch_stack = speq->last_branch;
566 
567 	ret = arm_spe_deliver_synth_event(spe, speq, event, &sample);
568 	perf_sample__exit(&sample);
569 	return ret;
570 }
571 
572 static const struct midr_range common_ds_encoding_cpus[] = {
573 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
574 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A720AE),
575 	MIDR_ALL_VERSIONS(MIDR_CORTEX_A725),
576 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
577 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
578 	MIDR_ALL_VERSIONS(MIDR_CORTEX_X925),
579 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
580 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
581 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
582 	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
583 	{},
584 };
585 
586 static const struct midr_range ampereone_ds_encoding_cpus[] = {
587 	MIDR_ALL_VERSIONS(MIDR_AMPERE1A),
588 	{},
589 };
590 
591 static const struct midr_range hisi_hip_ds_encoding_cpus[] = {
592 	MIDR_ALL_VERSIONS(MIDR_HISI_HIP12),
593 	{},
594 };
595 
596 static void arm_spe__sample_flags(struct arm_spe_queue *speq)
597 {
598 	const struct arm_spe_record *record = &speq->decoder->record;
599 
600 	speq->flags = 0;
601 	if (record->op & ARM_SPE_OP_BRANCH_ERET) {
602 		speq->flags = PERF_IP_FLAG_BRANCH;
603 
604 		if (record->type & ARM_SPE_BRANCH_MISS)
605 			speq->flags |= PERF_IP_FLAG_BRANCH_MISS;
606 
607 		if (record->type & ARM_SPE_BRANCH_NOT_TAKEN)
608 			speq->flags |= PERF_IP_FLAG_NOT_TAKEN;
609 
610 		if (record->type & ARM_SPE_IN_TXN)
611 			speq->flags |= PERF_IP_FLAG_IN_TX;
612 
613 		if (record->op & ARM_SPE_OP_BR_COND)
614 			speq->flags |= PERF_IP_FLAG_CONDITIONAL;
615 
616 		if (record->op & ARM_SPE_OP_BR_CR_BL)
617 			speq->flags |= PERF_IP_FLAG_CALL;
618 		else if (record->op & ARM_SPE_OP_BR_CR_RET)
619 			speq->flags |= PERF_IP_FLAG_RETURN;
620 		/*
621 		 * Indirect branch instruction without link (e.g. BR),
622 		 * take it as a function return.
623 		 */
624 		else if (record->op & ARM_SPE_OP_BR_INDIRECT)
625 			speq->flags |= PERF_IP_FLAG_RETURN;
626 	}
627 }
628 
629 static void arm_spe__synth_data_source_common(const struct arm_spe_record *record,
630 					      union perf_mem_data_src *data_src)
631 {
632 	/*
633 	 * Even though four levels of cache hierarchy are possible, no known
634 	 * production Neoverse systems currently include more than three levels
635 	 * so for the time being we assume three exist. If a production system
636 	 * is built with four the this function would have to be changed to
637 	 * detect the number of levels for reporting.
638 	 */
639 
640 	/*
641 	 * We have no data on the hit level or data source for stores in the
642 	 * Neoverse SPE records.
643 	 */
644 	if (record->op & ARM_SPE_OP_ST) {
645 		data_src->mem_lvl = PERF_MEM_LVL_NA;
646 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
647 		data_src->mem_snoop = PERF_MEM_SNOOP_NA;
648 		return;
649 	}
650 
651 	switch (record->source) {
652 	case ARM_SPE_COMMON_DS_L1D:
653 		data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
654 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
655 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
656 		break;
657 	case ARM_SPE_COMMON_DS_L2:
658 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
659 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
660 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
661 		break;
662 	case ARM_SPE_COMMON_DS_PEER_CORE:
663 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
664 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
665 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
666 		break;
667 	/*
668 	 * We don't know if this is L1, L2 but we do know it was a cache-2-cache
669 	 * transfer, so set SNOOPX_PEER
670 	 */
671 	case ARM_SPE_COMMON_DS_LOCAL_CLUSTER:
672 	case ARM_SPE_COMMON_DS_PEER_CLUSTER:
673 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
674 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
675 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
676 		break;
677 	/*
678 	 * System cache is assumed to be L3
679 	 */
680 	case ARM_SPE_COMMON_DS_SYS_CACHE:
681 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
682 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
683 		data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
684 		break;
685 	/*
686 	 * We don't know what level it hit in, except it came from the other
687 	 * socket
688 	 */
689 	case ARM_SPE_COMMON_DS_REMOTE:
690 		data_src->mem_lvl = PERF_MEM_LVL_NA;
691 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
692 		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
693 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
694 		break;
695 	case ARM_SPE_COMMON_DS_DRAM:
696 		data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
697 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
698 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
699 		break;
700 	default:
701 		break;
702 	}
703 }
704 
705 /*
706  * Source is IMPDEF. Here we convert the source code used on AmpereOne cores
707  * to the common (Neoverse, Cortex) to avoid duplicating the decoding code.
708  */
709 static void arm_spe__synth_data_source_ampereone(const struct arm_spe_record *record,
710 						 union perf_mem_data_src *data_src)
711 {
712 	struct arm_spe_record common_record;
713 
714 	switch (record->source) {
715 	case ARM_SPE_AMPEREONE_LOCAL_CHIP_CACHE_OR_DEVICE:
716 		common_record.source = ARM_SPE_COMMON_DS_PEER_CORE;
717 		break;
718 	case ARM_SPE_AMPEREONE_SLC:
719 		common_record.source = ARM_SPE_COMMON_DS_SYS_CACHE;
720 		break;
721 	case ARM_SPE_AMPEREONE_REMOTE_CHIP_CACHE:
722 		common_record.source = ARM_SPE_COMMON_DS_REMOTE;
723 		break;
724 	case ARM_SPE_AMPEREONE_DDR:
725 		common_record.source = ARM_SPE_COMMON_DS_DRAM;
726 		break;
727 	case ARM_SPE_AMPEREONE_L1D:
728 		common_record.source = ARM_SPE_COMMON_DS_L1D;
729 		break;
730 	case ARM_SPE_AMPEREONE_L2D:
731 		common_record.source = ARM_SPE_COMMON_DS_L2;
732 		break;
733 	default:
734 		pr_warning_once("AmpereOne: Unknown data source (0x%x)\n",
735 				record->source);
736 		return;
737 	}
738 
739 	common_record.op = record->op;
740 	arm_spe__synth_data_source_common(&common_record, data_src);
741 }
742 
743 static void arm_spe__synth_data_source_hisi_hip(const struct arm_spe_record *record,
744 						union perf_mem_data_src *data_src)
745 {
746 	/* Use common synthesis method to handle store operations */
747 	if (record->op & ARM_SPE_OP_ST) {
748 		arm_spe__synth_data_source_common(record, data_src);
749 		return;
750 	}
751 
752 	switch (record->source) {
753 	case ARM_SPE_HISI_HIP_PEER_CPU:
754 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
755 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
756 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
757 		break;
758 	case ARM_SPE_HISI_HIP_PEER_CPU_HITM:
759 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
760 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
761 		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
762 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
763 		break;
764 	case ARM_SPE_HISI_HIP_L3:
765 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
766 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
767 		data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
768 		break;
769 	case ARM_SPE_HISI_HIP_L3_HITM:
770 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
771 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
772 		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
773 		break;
774 	case ARM_SPE_HISI_HIP_PEER_CLUSTER:
775 		data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
776 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
777 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
778 		break;
779 	case ARM_SPE_HISI_HIP_PEER_CLUSTER_HITM:
780 		data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
781 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
782 		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
783 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
784 		break;
785 	case ARM_SPE_HISI_HIP_REMOTE_SOCKET:
786 		data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2;
787 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
788 		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
789 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
790 		break;
791 	case ARM_SPE_HISI_HIP_REMOTE_SOCKET_HITM:
792 		data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2;
793 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_ANY_CACHE;
794 		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
795 		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
796 		data_src->mem_snoopx = PERF_MEM_SNOOPX_PEER;
797 		break;
798 	case ARM_SPE_HISI_HIP_LOCAL_MEM:
799 		data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
800 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
801 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
802 		break;
803 	case ARM_SPE_HISI_HIP_REMOTE_MEM:
804 		data_src->mem_lvl = PERF_MEM_LVL_REM_RAM1 | PERF_MEM_LVL_HIT;
805 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_RAM;
806 		data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
807 		break;
808 	case ARM_SPE_HISI_HIP_NC_DEV:
809 		data_src->mem_lvl = PERF_MEM_LVL_IO | PERF_MEM_LVL_HIT;
810 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_IO;
811 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
812 		break;
813 	case ARM_SPE_HISI_HIP_L2:
814 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
815 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
816 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
817 		break;
818 	case ARM_SPE_HISI_HIP_L2_HITM:
819 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
820 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
821 		data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
822 		break;
823 	case ARM_SPE_HISI_HIP_L1:
824 		data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
825 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
826 		data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
827 		break;
828 	default:
829 		break;
830 	}
831 }
832 
833 static const struct data_source_handle data_source_handles[] = {
834 	DS(common_ds_encoding_cpus, data_source_common),
835 	DS(ampereone_ds_encoding_cpus, data_source_ampereone),
836 	DS(hisi_hip_ds_encoding_cpus, data_source_hisi_hip),
837 };
838 
839 static void arm_spe__synth_ld_memory_level(const struct arm_spe_record *record,
840 					   union perf_mem_data_src *data_src)
841 {
842 	/*
843 	 * To find a cache hit, search in ascending order from the lower level
844 	 * caches to the higher level caches. This reflects the best scenario
845 	 * for a cache hit.
846 	 */
847 	if (arm_spe_is_cache_hit(record->type, L1D)) {
848 		data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
849 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
850 	} else if (record->type & ARM_SPE_RECENTLY_FETCHED) {
851 		data_src->mem_lvl = PERF_MEM_LVL_LFB | PERF_MEM_LVL_HIT;
852 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_LFB;
853 	} else if (arm_spe_is_cache_hit(record->type, L2D)) {
854 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
855 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
856 	} else if (arm_spe_is_cache_hit(record->type, LLC)) {
857 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
858 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
859 	/*
860 	 * To find a cache miss, search in descending order from the higher
861 	 * level cache to the lower level cache. This represents the worst
862 	 * scenario for a cache miss.
863 	 */
864 	} else if (arm_spe_is_cache_miss(record->type, LLC)) {
865 		data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_MISS;
866 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
867 	} else if (arm_spe_is_cache_miss(record->type, L2D)) {
868 		data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_MISS;
869 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
870 	} else if (arm_spe_is_cache_miss(record->type, L1D)) {
871 		data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
872 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
873 	}
874 }
875 
876 static void arm_spe__synth_st_memory_level(const struct arm_spe_record *record,
877 					   union perf_mem_data_src *data_src)
878 {
879 	/* Record the greatest level info for a store operation. */
880 	if (arm_spe_is_cache_level(record->type, LLC)) {
881 		data_src->mem_lvl = PERF_MEM_LVL_L3;
882 		data_src->mem_lvl |= arm_spe_is_cache_miss(record->type, LLC) ?
883 				     PERF_MEM_LVL_MISS : PERF_MEM_LVL_HIT;
884 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L3;
885 	} else if (arm_spe_is_cache_level(record->type, L2D)) {
886 		data_src->mem_lvl = PERF_MEM_LVL_L2;
887 		data_src->mem_lvl |= arm_spe_is_cache_miss(record->type, L2D) ?
888 				     PERF_MEM_LVL_MISS : PERF_MEM_LVL_HIT;
889 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L2;
890 	} else if (arm_spe_is_cache_level(record->type, L1D)) {
891 		data_src->mem_lvl = PERF_MEM_LVL_L1;
892 		data_src->mem_lvl |= arm_spe_is_cache_miss(record->type, L1D) ?
893 				     PERF_MEM_LVL_MISS : PERF_MEM_LVL_HIT;
894 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_L1;
895 	}
896 }
897 
898 static void arm_spe__synth_memory_level(struct arm_spe_queue *speq,
899 					const struct arm_spe_record *record,
900 					union perf_mem_data_src *data_src)
901 {
902 	struct arm_spe *spe = speq->spe;
903 
904 	/*
905 	 * The data source packet contains more info for cache levels for
906 	 * peer snooping. So respect the memory level if has been set by
907 	 * data source parsing.
908 	 */
909 	if (!data_src->mem_lvl) {
910 		if (data_src->mem_op == PERF_MEM_OP_LOAD)
911 			arm_spe__synth_ld_memory_level(record, data_src);
912 		if (data_src->mem_op == PERF_MEM_OP_STORE)
913 			arm_spe__synth_st_memory_level(record, data_src);
914 	}
915 
916 	if (!data_src->mem_lvl) {
917 		data_src->mem_lvl = PERF_MEM_LVL_NA;
918 		data_src->mem_lvl_num = PERF_MEM_LVLNUM_NA;
919 	}
920 
921 	/*
922 	 * If 'mem_snoop' has been set by data source packet, skip to set
923 	 * it at here.
924 	 */
925 	if (!data_src->mem_snoop) {
926 		if (record->type & ARM_SPE_DATA_SNOOPED) {
927 			if (record->type & ARM_SPE_HITM)
928 				data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
929 			else
930 				data_src->mem_snoop = PERF_MEM_SNOOP_HIT;
931 		} else {
932 			u64 *metadata =
933 				arm_spe__get_metadata_by_cpu(spe, speq->cpu);
934 
935 			/*
936 			 * Set NA ("Not available") mode if no meta data or the
937 			 * SNOOPED event is not supported.
938 			 */
939 			if (!metadata ||
940 			    !(metadata[ARM_SPE_CAP_EVENT_FILTER] & ARM_SPE_DATA_SNOOPED))
941 				data_src->mem_snoop = PERF_MEM_SNOOP_NA;
942 			else
943 				data_src->mem_snoop = PERF_MEM_SNOOP_NONE;
944 		}
945 	}
946 
947 	if (!data_src->mem_remote) {
948 		if (record->type & ARM_SPE_REMOTE_ACCESS)
949 			data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
950 	}
951 }
952 
953 static void arm_spe__synth_ds(struct arm_spe_queue *speq,
954 			      const struct arm_spe_record *record,
955 			      union perf_mem_data_src *data_src)
956 {
957 	struct arm_spe *spe = speq->spe;
958 	u64 *metadata = NULL;
959 	u64 midr;
960 	unsigned int i;
961 
962 	/* Metadata version 1 assumes all CPUs are the same (old behavior) */
963 	if (spe->metadata_ver == 1) {
964 		const char *cpuid;
965 
966 		pr_warning_once("Old SPE metadata, re-record to improve decode accuracy\n");
967 		cpuid = perf_env__cpuid(perf_session__env(spe->session));
968 		midr = strtol(cpuid, NULL, 16);
969 	} else {
970 		metadata = arm_spe__get_metadata_by_cpu(spe, speq->cpu);
971 		if (!metadata)
972 			return;
973 
974 		midr = metadata[ARM_SPE_CPU_MIDR];
975 	}
976 
977 	for (i = 0; i < ARRAY_SIZE(data_source_handles); i++) {
978 		if (is_midr_in_range_list(midr, data_source_handles[i].midr_ranges)) {
979 			return data_source_handles[i].ds_synth(record, data_src);
980 		}
981 	}
982 
983 	return;
984 }
985 
986 static union perf_mem_data_src
987 arm_spe__synth_data_source(struct arm_spe_queue *speq,
988 			   const struct arm_spe_record *record)
989 {
990 	union perf_mem_data_src	data_src = {};
991 
992 	/* Only synthesize data source for LDST operations */
993 	if (!is_ldst_op(record->op))
994 		return data_src;
995 
996 	if (record->op & ARM_SPE_OP_LD)
997 		data_src.mem_op = PERF_MEM_OP_LOAD;
998 	else if (record->op & ARM_SPE_OP_ST)
999 		data_src.mem_op = PERF_MEM_OP_STORE;
1000 	else
1001 		return data_src;
1002 
1003 	arm_spe__synth_ds(speq, record, &data_src);
1004 	arm_spe__synth_memory_level(speq, record, &data_src);
1005 
1006 	if (record->type & (ARM_SPE_TLB_ACCESS | ARM_SPE_TLB_MISS)) {
1007 		data_src.mem_dtlb = PERF_MEM_TLB_WK;
1008 
1009 		if (record->type & ARM_SPE_TLB_MISS)
1010 			data_src.mem_dtlb |= PERF_MEM_TLB_MISS;
1011 		else
1012 			data_src.mem_dtlb |= PERF_MEM_TLB_HIT;
1013 	}
1014 
1015 	return data_src;
1016 }
1017 
1018 static int arm_spe_sample(struct arm_spe_queue *speq)
1019 {
1020 	const struct arm_spe_record *record = &speq->decoder->record;
1021 	struct arm_spe *spe = speq->spe;
1022 	union perf_mem_data_src data_src;
1023 	int err;
1024 
1025 	/*
1026 	 * Discard all samples until period is reached
1027 	 */
1028 	speq->sample_count++;
1029 	if (speq->sample_count < spe->synth_opts.period)
1030 		return 0;
1031 	speq->sample_count = 0;
1032 
1033 	arm_spe__sample_flags(speq);
1034 	data_src = arm_spe__synth_data_source(speq, record);
1035 
1036 	if (spe->sample_flc) {
1037 		if (record->type & ARM_SPE_L1D_MISS) {
1038 			err = arm_spe__synth_mem_sample(speq, spe->l1d_miss_id,
1039 							data_src);
1040 			if (err)
1041 				return err;
1042 		}
1043 
1044 		if (record->type & ARM_SPE_L1D_ACCESS) {
1045 			err = arm_spe__synth_mem_sample(speq, spe->l1d_access_id,
1046 							data_src);
1047 			if (err)
1048 				return err;
1049 		}
1050 	}
1051 
1052 	if (spe->sample_llc) {
1053 		if (record->type & ARM_SPE_LLC_MISS) {
1054 			err = arm_spe__synth_mem_sample(speq, spe->llc_miss_id,
1055 							data_src);
1056 			if (err)
1057 				return err;
1058 		}
1059 
1060 		if (record->type & ARM_SPE_LLC_ACCESS) {
1061 			err = arm_spe__synth_mem_sample(speq, spe->llc_access_id,
1062 							data_src);
1063 			if (err)
1064 				return err;
1065 		}
1066 	}
1067 
1068 	if (spe->sample_tlb) {
1069 		if (record->type & ARM_SPE_TLB_MISS) {
1070 			err = arm_spe__synth_mem_sample(speq, spe->tlb_miss_id,
1071 							data_src);
1072 			if (err)
1073 				return err;
1074 		}
1075 
1076 		if (record->type & ARM_SPE_TLB_ACCESS) {
1077 			err = arm_spe__synth_mem_sample(speq, spe->tlb_access_id,
1078 							data_src);
1079 			if (err)
1080 				return err;
1081 		}
1082 	}
1083 
1084 	if (spe->synth_opts.last_branch &&
1085 	    (spe->sample_branch || spe->sample_instructions))
1086 		arm_spe__prep_branch_stack(speq);
1087 
1088 	if (spe->sample_branch && (record->op & ARM_SPE_OP_BRANCH_ERET)) {
1089 		err = arm_spe__synth_branch_sample(speq, spe->branch_id);
1090 		if (err)
1091 			return err;
1092 	}
1093 
1094 	if (spe->sample_remote_access &&
1095 	    (record->type & ARM_SPE_REMOTE_ACCESS)) {
1096 		err = arm_spe__synth_mem_sample(speq, spe->remote_access_id,
1097 						data_src);
1098 		if (err)
1099 			return err;
1100 	}
1101 
1102 	/*
1103 	 * When data_src is zero it means the record is not a memory operation,
1104 	 * skip to synthesize memory sample for this case.
1105 	 */
1106 	if (spe->sample_memory && is_ldst_op(record->op)) {
1107 		err = arm_spe__synth_mem_sample(speq, spe->memory_id, data_src);
1108 		if (err)
1109 			return err;
1110 	}
1111 
1112 	if (spe->sample_instructions) {
1113 		err = arm_spe__synth_instruction_sample(speq, spe->instructions_id, data_src);
1114 		if (err)
1115 			return err;
1116 	}
1117 
1118 	return 0;
1119 }
1120 
1121 static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp)
1122 {
1123 	struct arm_spe *spe = speq->spe;
1124 	struct arm_spe_record *record;
1125 	int ret;
1126 
1127 	if (!spe->kernel_start)
1128 		spe->kernel_start = machine__kernel_start(spe->machine);
1129 
1130 	while (1) {
1131 		/*
1132 		 * The usual logic is firstly to decode the packets, and then
1133 		 * based the record to synthesize sample; but here the flow is
1134 		 * reversed: it calls arm_spe_sample() for synthesizing samples
1135 		 * prior to arm_spe_decode().
1136 		 *
1137 		 * Two reasons for this code logic:
1138 		 * 1. Firstly, when setup queue in arm_spe__setup_queue(), it
1139 		 * has decoded trace data and generated a record, but the record
1140 		 * is left to generate sample until run to here, so it's correct
1141 		 * to synthesize sample for the left record.
1142 		 * 2. After decoding trace data, it needs to compare the record
1143 		 * timestamp with the coming perf event, if the record timestamp
1144 		 * is later than the perf event, it needs bail out and pushs the
1145 		 * record into auxtrace heap, thus the record can be deferred to
1146 		 * synthesize sample until run to here at the next time; so this
1147 		 * can correlate samples between Arm SPE trace data and other
1148 		 * perf events with correct time ordering.
1149 		 */
1150 
1151 		/*
1152 		 * Update pid/tid info.
1153 		 */
1154 		record = &speq->decoder->record;
1155 		if (!spe->timeless_decoding && record->context_id != (u64)-1) {
1156 			ret = arm_spe_set_tid(speq, record->context_id);
1157 			if (ret)
1158 				return ret;
1159 
1160 			spe->use_ctx_pkt_for_pid = true;
1161 		}
1162 
1163 		ret = arm_spe_sample(speq);
1164 		if (ret)
1165 			return ret;
1166 
1167 		ret = arm_spe_decode(speq->decoder);
1168 		if (!ret) {
1169 			pr_debug("No data or all data has been processed.\n");
1170 			return 1;
1171 		}
1172 
1173 		/*
1174 		 * Error is detected when decode SPE trace data, continue to
1175 		 * the next trace data and find out more records.
1176 		 */
1177 		if (ret < 0)
1178 			continue;
1179 
1180 		record = &speq->decoder->record;
1181 
1182 		/* Update timestamp for the last record */
1183 		if (record->timestamp > speq->timestamp)
1184 			speq->timestamp = record->timestamp;
1185 
1186 		/*
1187 		 * If the timestamp of the queue is later than timestamp of the
1188 		 * coming perf event, bail out so can allow the perf event to
1189 		 * be processed ahead.
1190 		 */
1191 		if (!spe->timeless_decoding && speq->timestamp >= *timestamp) {
1192 			*timestamp = speq->timestamp;
1193 			return 0;
1194 		}
1195 	}
1196 
1197 	return 0;
1198 }
1199 
1200 static int arm_spe__setup_queue(struct arm_spe *spe,
1201 			       struct auxtrace_queue *queue,
1202 			       unsigned int queue_nr)
1203 {
1204 	struct arm_spe_queue *speq = queue->priv;
1205 	struct arm_spe_record *record;
1206 
1207 	if (list_empty(&queue->head) || speq)
1208 		return 0;
1209 
1210 	speq = arm_spe__alloc_queue(spe, queue_nr);
1211 
1212 	if (!speq)
1213 		return -ENOMEM;
1214 
1215 	queue->priv = speq;
1216 
1217 	if (queue->cpu != -1)
1218 		speq->cpu = queue->cpu;
1219 
1220 	if (!speq->on_heap) {
1221 		int ret;
1222 
1223 		if (spe->timeless_decoding)
1224 			return 0;
1225 
1226 retry:
1227 		ret = arm_spe_decode(speq->decoder);
1228 
1229 		if (!ret)
1230 			return 0;
1231 
1232 		if (ret < 0)
1233 			goto retry;
1234 
1235 		record = &speq->decoder->record;
1236 
1237 		speq->timestamp = record->timestamp;
1238 		ret = auxtrace_heap__add(&spe->heap, queue_nr, speq->timestamp);
1239 		if (ret)
1240 			return ret;
1241 		speq->on_heap = true;
1242 	}
1243 
1244 	return 0;
1245 }
1246 
1247 static int arm_spe__setup_queues(struct arm_spe *spe)
1248 {
1249 	unsigned int i;
1250 	int ret;
1251 
1252 	for (i = 0; i < spe->queues.nr_queues; i++) {
1253 		ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i);
1254 		if (ret)
1255 			return ret;
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static int arm_spe__update_queues(struct arm_spe *spe)
1262 {
1263 	if (spe->queues.new_data) {
1264 		spe->queues.new_data = false;
1265 		return arm_spe__setup_queues(spe);
1266 	}
1267 
1268 	return 0;
1269 }
1270 
1271 static bool arm_spe__is_timeless_decoding(struct arm_spe *spe)
1272 {
1273 	struct evsel *evsel;
1274 	struct evlist *evlist = spe->session->evlist;
1275 	bool timeless_decoding = true;
1276 
1277 	/*
1278 	 * Circle through the list of event and complain if we find one
1279 	 * with the time bit set.
1280 	 */
1281 	evlist__for_each_entry(evlist, evsel) {
1282 		if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
1283 			timeless_decoding = false;
1284 	}
1285 
1286 	return timeless_decoding;
1287 }
1288 
1289 static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp)
1290 {
1291 	unsigned int queue_nr;
1292 	u64 ts;
1293 	int ret;
1294 
1295 	while (1) {
1296 		struct auxtrace_queue *queue;
1297 		struct arm_spe_queue *speq;
1298 
1299 		if (!spe->heap.heap_cnt)
1300 			return 0;
1301 
1302 		if (spe->heap.heap_array[0].ordinal >= timestamp)
1303 			return 0;
1304 
1305 		queue_nr = spe->heap.heap_array[0].queue_nr;
1306 		queue = &spe->queues.queue_array[queue_nr];
1307 		speq = queue->priv;
1308 
1309 		auxtrace_heap__pop(&spe->heap);
1310 
1311 		if (spe->heap.heap_cnt) {
1312 			ts = spe->heap.heap_array[0].ordinal + 1;
1313 			if (ts > timestamp)
1314 				ts = timestamp;
1315 		} else {
1316 			ts = timestamp;
1317 		}
1318 
1319 		/*
1320 		 * A previous context-switch event has set pid/tid in the machine's context, so
1321 		 * here we need to update the pid/tid in the thread and SPE queue.
1322 		 */
1323 		if (!spe->use_ctx_pkt_for_pid)
1324 			arm_spe_set_pid_tid_cpu(spe, queue);
1325 
1326 		ret = arm_spe_run_decoder(speq, &ts);
1327 		if (ret < 0) {
1328 			auxtrace_heap__add(&spe->heap, queue_nr, ts);
1329 			return ret;
1330 		}
1331 
1332 		if (!ret) {
1333 			ret = auxtrace_heap__add(&spe->heap, queue_nr, ts);
1334 			if (ret < 0)
1335 				return ret;
1336 		} else {
1337 			speq->on_heap = false;
1338 		}
1339 	}
1340 
1341 	return 0;
1342 }
1343 
1344 static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid,
1345 					    u64 time_)
1346 {
1347 	struct auxtrace_queues *queues = &spe->queues;
1348 	unsigned int i;
1349 	u64 ts = 0;
1350 
1351 	for (i = 0; i < queues->nr_queues; i++) {
1352 		struct auxtrace_queue *queue = &spe->queues.queue_array[i];
1353 		struct arm_spe_queue *speq = queue->priv;
1354 
1355 		if (speq && (tid == -1 || speq->tid == tid)) {
1356 			speq->time = time_;
1357 			arm_spe_set_pid_tid_cpu(spe, queue);
1358 			arm_spe_run_decoder(speq, &ts);
1359 		}
1360 	}
1361 	return 0;
1362 }
1363 
1364 static int arm_spe_context_switch(struct arm_spe *spe, union perf_event *event,
1365 				  struct perf_sample *sample)
1366 {
1367 	pid_t pid, tid;
1368 	int cpu;
1369 
1370 	if (!(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT))
1371 		return 0;
1372 
1373 	pid = event->context_switch.next_prev_pid;
1374 	tid = event->context_switch.next_prev_tid;
1375 	cpu = sample->cpu;
1376 
1377 	if (tid == -1)
1378 		pr_warning("context_switch event has no tid\n");
1379 
1380 	return machine__set_current_tid(spe->machine, cpu, pid, tid);
1381 }
1382 
1383 static int arm_spe_process_event(struct perf_session *session,
1384 				 union perf_event *event,
1385 				 struct perf_sample *sample,
1386 				 const struct perf_tool *tool)
1387 {
1388 	int err = 0;
1389 	u64 timestamp;
1390 	struct arm_spe *spe = container_of(session->auxtrace,
1391 			struct arm_spe, auxtrace);
1392 
1393 	if (dump_trace)
1394 		return 0;
1395 
1396 	if (!tool->ordered_events) {
1397 		pr_err("SPE trace requires ordered events\n");
1398 		return -EINVAL;
1399 	}
1400 
1401 	if (sample->time && (sample->time != (u64) -1))
1402 		timestamp = perf_time_to_tsc(sample->time, &spe->tc);
1403 	else
1404 		timestamp = 0;
1405 
1406 	if (timestamp || spe->timeless_decoding) {
1407 		err = arm_spe__update_queues(spe);
1408 		if (err)
1409 			return err;
1410 	}
1411 
1412 	if (spe->timeless_decoding) {
1413 		if (event->header.type == PERF_RECORD_EXIT) {
1414 			err = arm_spe_process_timeless_queues(spe,
1415 					event->fork.tid,
1416 					sample->time);
1417 		}
1418 	} else if (timestamp) {
1419 		err = arm_spe_process_queues(spe, timestamp);
1420 		if (err)
1421 			return err;
1422 
1423 		if (!spe->use_ctx_pkt_for_pid &&
1424 		    (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE ||
1425 		    event->header.type == PERF_RECORD_SWITCH))
1426 			err = arm_spe_context_switch(spe, event, sample);
1427 	}
1428 
1429 	return err;
1430 }
1431 
1432 static int arm_spe_process_auxtrace_event(struct perf_session *session,
1433 					  union perf_event *event,
1434 					  const struct perf_tool *tool __maybe_unused)
1435 {
1436 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1437 					     auxtrace);
1438 
1439 	if (!spe->data_queued) {
1440 		struct auxtrace_buffer *buffer;
1441 		off_t data_offset;
1442 		int fd = perf_data__fd(session->data);
1443 		int err;
1444 
1445 		if (perf_data__is_pipe(session->data)) {
1446 			data_offset = 0;
1447 		} else {
1448 			data_offset = lseek(fd, 0, SEEK_CUR);
1449 			if (data_offset == -1)
1450 				return -errno;
1451 		}
1452 
1453 		err = auxtrace_queues__add_event(&spe->queues, session, event,
1454 				data_offset, &buffer);
1455 		if (err)
1456 			return err;
1457 
1458 		/* Dump here now we have copied a piped trace out of the pipe */
1459 		if (dump_trace) {
1460 			if (auxtrace_buffer__get_data(buffer, fd)) {
1461 				arm_spe_dump_event(spe, buffer->data,
1462 						buffer->size);
1463 				auxtrace_buffer__put_data(buffer);
1464 			}
1465 		}
1466 	}
1467 
1468 	return 0;
1469 }
1470 
1471 static int arm_spe_flush(struct perf_session *session __maybe_unused,
1472 			 const struct perf_tool *tool __maybe_unused)
1473 {
1474 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1475 			auxtrace);
1476 	int ret;
1477 
1478 	if (dump_trace)
1479 		return 0;
1480 
1481 	if (!tool->ordered_events)
1482 		return -EINVAL;
1483 
1484 	ret = arm_spe__update_queues(spe);
1485 	if (ret < 0)
1486 		return ret;
1487 
1488 	if (spe->timeless_decoding)
1489 		return arm_spe_process_timeless_queues(spe, -1,
1490 				MAX_TIMESTAMP - 1);
1491 
1492 	ret = arm_spe_process_queues(spe, MAX_TIMESTAMP);
1493 	if (ret)
1494 		return ret;
1495 
1496 	if (!spe->use_ctx_pkt_for_pid)
1497 		ui__warning("Arm SPE CONTEXT packets not found in the traces.\n"
1498 			    "Matching of TIDs to SPE events could be inaccurate.\n");
1499 
1500 	return 0;
1501 }
1502 
1503 static u64 *arm_spe__alloc_per_cpu_metadata(u64 *buf, int per_cpu_size)
1504 {
1505 	u64 *metadata;
1506 
1507 	metadata = zalloc(per_cpu_size);
1508 	if (!metadata)
1509 		return NULL;
1510 
1511 	memcpy(metadata, buf, per_cpu_size);
1512 	return metadata;
1513 }
1514 
1515 static void arm_spe__free_metadata(u64 **metadata, int nr_cpu)
1516 {
1517 	int i;
1518 
1519 	for (i = 0; i < nr_cpu; i++)
1520 		zfree(&metadata[i]);
1521 	free(metadata);
1522 }
1523 
1524 static u64 **arm_spe__alloc_metadata(struct perf_record_auxtrace_info *info,
1525 				     u64 *ver, int *nr_cpu)
1526 {
1527 	u64 *ptr = (u64 *)info->priv;
1528 	u64 metadata_size;
1529 	u64 **metadata = NULL;
1530 	int hdr_sz, per_cpu_sz, i;
1531 
1532 	metadata_size = info->header.size -
1533 		sizeof(struct perf_record_auxtrace_info);
1534 
1535 	/* Metadata version 1 */
1536 	if (metadata_size == ARM_SPE_AUXTRACE_V1_PRIV_SIZE) {
1537 		*ver = 1;
1538 		*nr_cpu = 0;
1539 		/* No per CPU metadata */
1540 		return NULL;
1541 	}
1542 
1543 	*ver = ptr[ARM_SPE_HEADER_VERSION];
1544 	hdr_sz = ptr[ARM_SPE_HEADER_SIZE];
1545 	*nr_cpu = ptr[ARM_SPE_CPUS_NUM];
1546 
1547 	metadata = calloc(*nr_cpu, sizeof(*metadata));
1548 	if (!metadata)
1549 		return NULL;
1550 
1551 	/* Locate the start address of per CPU metadata */
1552 	ptr += hdr_sz;
1553 	per_cpu_sz = (metadata_size - (hdr_sz * sizeof(u64))) / (*nr_cpu);
1554 
1555 	for (i = 0; i < *nr_cpu; i++) {
1556 		metadata[i] = arm_spe__alloc_per_cpu_metadata(ptr, per_cpu_sz);
1557 		if (!metadata[i])
1558 			goto err_per_cpu_metadata;
1559 
1560 		ptr += per_cpu_sz / sizeof(u64);
1561 	}
1562 
1563 	return metadata;
1564 
1565 err_per_cpu_metadata:
1566 	arm_spe__free_metadata(metadata, *nr_cpu);
1567 	return NULL;
1568 }
1569 
1570 static void arm_spe_free_queue(void *priv)
1571 {
1572 	struct arm_spe_queue *speq = priv;
1573 
1574 	if (!speq)
1575 		return;
1576 	thread__zput(speq->thread);
1577 	arm_spe_decoder_free(speq->decoder);
1578 	zfree(&speq->event_buf);
1579 	zfree(&speq->last_branch);
1580 	free(speq);
1581 }
1582 
1583 static void arm_spe_free_events(struct perf_session *session)
1584 {
1585 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1586 					     auxtrace);
1587 	struct auxtrace_queues *queues = &spe->queues;
1588 	unsigned int i;
1589 
1590 	for (i = 0; i < queues->nr_queues; i++) {
1591 		arm_spe_free_queue(queues->queue_array[i].priv);
1592 		queues->queue_array[i].priv = NULL;
1593 	}
1594 	auxtrace_queues__free(queues);
1595 }
1596 
1597 static void arm_spe_free(struct perf_session *session)
1598 {
1599 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe,
1600 					     auxtrace);
1601 
1602 	auxtrace_heap__free(&spe->heap);
1603 	arm_spe_free_events(session);
1604 	session->auxtrace = NULL;
1605 	arm_spe__free_metadata(spe->metadata, spe->metadata_nr_cpu);
1606 	free(spe);
1607 }
1608 
1609 static bool arm_spe_evsel_is_auxtrace(struct perf_session *session,
1610 				      struct evsel *evsel)
1611 {
1612 	struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, auxtrace);
1613 
1614 	return evsel->core.attr.type == spe->pmu_type;
1615 }
1616 
1617 static const char * const metadata_hdr_v1_fmts[] = {
1618 	[ARM_SPE_PMU_TYPE]		= "  PMU Type           :%"PRId64"\n",
1619 	[ARM_SPE_PER_CPU_MMAPS]		= "  Per CPU mmaps      :%"PRId64"\n",
1620 };
1621 
1622 static const char * const metadata_hdr_fmts[] = {
1623 	[ARM_SPE_HEADER_VERSION]	= "  Header version     :%"PRId64"\n",
1624 	[ARM_SPE_HEADER_SIZE]		= "  Header size        :%"PRId64"\n",
1625 	[ARM_SPE_PMU_TYPE_V2]		= "  PMU type v2        :%"PRId64"\n",
1626 	[ARM_SPE_CPUS_NUM]		= "  CPU number         :%"PRId64"\n",
1627 };
1628 
1629 static const char * const metadata_per_cpu_fmts[] = {
1630 	[ARM_SPE_MAGIC]			= "    Magic            :0x%"PRIx64"\n",
1631 	[ARM_SPE_CPU]			= "    CPU #            :%"PRId64"\n",
1632 	[ARM_SPE_CPU_NR_PARAMS]		= "    Num of params    :%"PRId64"\n",
1633 	[ARM_SPE_CPU_MIDR]		= "    MIDR             :0x%"PRIx64"\n",
1634 	[ARM_SPE_CPU_PMU_TYPE]		= "    PMU Type         :%"PRId64"\n",
1635 	[ARM_SPE_CAP_MIN_IVAL]		= "    Min Interval     :%"PRId64"\n",
1636 	[ARM_SPE_CAP_EVENT_FILTER]	= "    Event Filter     :0x%"PRIx64"\n",
1637 };
1638 
1639 static void arm_spe_print_info(struct arm_spe *spe, __u64 *arr)
1640 {
1641 	unsigned int i, cpu, hdr_size, cpu_num, cpu_size;
1642 	const char * const *hdr_fmts;
1643 
1644 	if (!dump_trace)
1645 		return;
1646 
1647 	if (spe->metadata_ver == 1) {
1648 		cpu_num = 0;
1649 		hdr_size = ARM_SPE_AUXTRACE_V1_PRIV_MAX;
1650 		hdr_fmts = metadata_hdr_v1_fmts;
1651 	} else {
1652 		cpu_num = arr[ARM_SPE_CPUS_NUM];
1653 		hdr_size = arr[ARM_SPE_HEADER_SIZE];
1654 		hdr_fmts = metadata_hdr_fmts;
1655 	}
1656 
1657 	for (i = 0; i < hdr_size; i++)
1658 		fprintf(stdout, hdr_fmts[i], arr[i]);
1659 
1660 	arr += hdr_size;
1661 	for (cpu = 0; cpu < cpu_num; cpu++) {
1662 		/*
1663 		 * The parameters from ARM_SPE_MAGIC to ARM_SPE_CPU_NR_PARAMS
1664 		 * are fixed. The sequential parameter size is decided by the
1665 		 * field 'ARM_SPE_CPU_NR_PARAMS'.
1666 		 */
1667 		cpu_size = (ARM_SPE_CPU_NR_PARAMS + 1) + arr[ARM_SPE_CPU_NR_PARAMS];
1668 		for (i = 0; i < cpu_size; i++)
1669 			fprintf(stdout, metadata_per_cpu_fmts[i], arr[i]);
1670 		arr += cpu_size;
1671 	}
1672 }
1673 
1674 static void arm_spe_set_event_name(struct evlist *evlist, u64 id,
1675 				    const char *name)
1676 {
1677 	struct evsel *evsel;
1678 
1679 	evlist__for_each_entry(evlist, evsel) {
1680 		if (evsel->core.id && evsel->core.id[0] == id) {
1681 			if (evsel->name)
1682 				zfree(&evsel->name);
1683 			evsel->name = strdup(name);
1684 			break;
1685 		}
1686 	}
1687 }
1688 
1689 static int
1690 arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
1691 {
1692 	struct evlist *evlist = session->evlist;
1693 	struct evsel *evsel;
1694 	struct perf_event_attr attr;
1695 	bool found = false;
1696 	u64 id;
1697 	int err;
1698 
1699 	evlist__for_each_entry(evlist, evsel) {
1700 		if (evsel->core.attr.type == spe->pmu_type) {
1701 			found = true;
1702 			break;
1703 		}
1704 	}
1705 
1706 	if (!found) {
1707 		pr_debug("No selected events with SPE trace data\n");
1708 		return 0;
1709 	}
1710 
1711 	memset(&attr, 0, sizeof(struct perf_event_attr));
1712 	attr.size = sizeof(struct perf_event_attr);
1713 	attr.type = PERF_TYPE_HARDWARE;
1714 	attr.sample_type = evsel->core.attr.sample_type &
1715 				(PERF_SAMPLE_MASK | PERF_SAMPLE_PHYS_ADDR);
1716 	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1717 			    PERF_SAMPLE_PERIOD | PERF_SAMPLE_DATA_SRC |
1718 			    PERF_SAMPLE_WEIGHT | PERF_SAMPLE_ADDR;
1719 	if (spe->timeless_decoding)
1720 		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1721 	else
1722 		attr.sample_type |= PERF_SAMPLE_TIME;
1723 
1724 	spe->sample_type = attr.sample_type;
1725 
1726 	attr.exclude_user = evsel->core.attr.exclude_user;
1727 	attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1728 	attr.exclude_hv = evsel->core.attr.exclude_hv;
1729 	attr.exclude_host = evsel->core.attr.exclude_host;
1730 	attr.exclude_guest = evsel->core.attr.exclude_guest;
1731 	attr.sample_id_all = evsel->core.attr.sample_id_all;
1732 	attr.read_format = evsel->core.attr.read_format;
1733 	attr.sample_period = spe->synth_opts.period;
1734 
1735 	/* create new id val to be a fixed offset from evsel id */
1736 	id = auxtrace_synth_id_range_start(evsel);
1737 
1738 	if (spe->synth_opts.flc) {
1739 		spe->sample_flc = true;
1740 
1741 		/* Level 1 data cache miss */
1742 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1743 		if (err)
1744 			return err;
1745 		spe->l1d_miss_id = id;
1746 		arm_spe_set_event_name(evlist, id, "l1d-miss");
1747 		id += 1;
1748 
1749 		/* Level 1 data cache access */
1750 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1751 		if (err)
1752 			return err;
1753 		spe->l1d_access_id = id;
1754 		arm_spe_set_event_name(evlist, id, "l1d-access");
1755 		id += 1;
1756 	}
1757 
1758 	if (spe->synth_opts.llc) {
1759 		spe->sample_llc = true;
1760 
1761 		/* Last level cache miss */
1762 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1763 		if (err)
1764 			return err;
1765 		spe->llc_miss_id = id;
1766 		arm_spe_set_event_name(evlist, id, "llc-miss");
1767 		id += 1;
1768 
1769 		/* Last level cache access */
1770 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1771 		if (err)
1772 			return err;
1773 		spe->llc_access_id = id;
1774 		arm_spe_set_event_name(evlist, id, "llc-access");
1775 		id += 1;
1776 	}
1777 
1778 	if (spe->synth_opts.tlb) {
1779 		spe->sample_tlb = true;
1780 
1781 		/* TLB miss */
1782 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1783 		if (err)
1784 			return err;
1785 		spe->tlb_miss_id = id;
1786 		arm_spe_set_event_name(evlist, id, "tlb-miss");
1787 		id += 1;
1788 
1789 		/* TLB access */
1790 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1791 		if (err)
1792 			return err;
1793 		spe->tlb_access_id = id;
1794 		arm_spe_set_event_name(evlist, id, "tlb-access");
1795 		id += 1;
1796 	}
1797 
1798 	if (spe->synth_opts.last_branch) {
1799 		if (spe->synth_opts.last_branch_sz > 2)
1800 			pr_debug("Arm SPE supports only two bstack entries (PBT+TGT).\n");
1801 
1802 		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1803 		/*
1804 		 * We don't use the hardware index, but the sample generation
1805 		 * code uses the new format branch_stack with this field,
1806 		 * so the event attributes must indicate that it's present.
1807 		 */
1808 		attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
1809 	}
1810 
1811 	if (spe->synth_opts.branches) {
1812 		spe->sample_branch = true;
1813 
1814 		/* Branch */
1815 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1816 		if (err)
1817 			return err;
1818 		spe->branch_id = id;
1819 		arm_spe_set_event_name(evlist, id, "branch");
1820 		id += 1;
1821 	}
1822 
1823 	if (spe->synth_opts.remote_access) {
1824 		spe->sample_remote_access = true;
1825 
1826 		/* Remote access */
1827 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1828 		if (err)
1829 			return err;
1830 		spe->remote_access_id = id;
1831 		arm_spe_set_event_name(evlist, id, "remote-access");
1832 		id += 1;
1833 	}
1834 
1835 	if (spe->synth_opts.mem) {
1836 		spe->sample_memory = true;
1837 
1838 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1839 		if (err)
1840 			return err;
1841 		spe->memory_id = id;
1842 		arm_spe_set_event_name(evlist, id, "memory");
1843 		id += 1;
1844 	}
1845 
1846 	if (spe->synth_opts.instructions) {
1847 		spe->sample_instructions = true;
1848 		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1849 
1850 		err = perf_session__deliver_synth_attr_event(session, &attr, id);
1851 		if (err)
1852 			return err;
1853 		spe->instructions_id = id;
1854 		arm_spe_set_event_name(evlist, id, "instructions");
1855 	}
1856 
1857 	return 0;
1858 }
1859 
1860 static bool arm_spe__is_homogeneous(u64 **metadata, int nr_cpu)
1861 {
1862 	u64 midr;
1863 	int i;
1864 
1865 	if (!nr_cpu)
1866 		return false;
1867 
1868 	for (i = 0; i < nr_cpu; i++) {
1869 		if (!metadata[i])
1870 			return false;
1871 
1872 		if (i == 0) {
1873 			midr = metadata[i][ARM_SPE_CPU_MIDR];
1874 			continue;
1875 		}
1876 
1877 		if (midr != metadata[i][ARM_SPE_CPU_MIDR])
1878 			return false;
1879 	}
1880 
1881 	return true;
1882 }
1883 
1884 int arm_spe_process_auxtrace_info(union perf_event *event,
1885 				  struct perf_session *session)
1886 {
1887 	struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
1888 	size_t min_sz = ARM_SPE_AUXTRACE_V1_PRIV_SIZE;
1889 	struct perf_record_time_conv *tc = &session->time_conv;
1890 	struct arm_spe *spe;
1891 	u64 **metadata = NULL;
1892 	u64 metadata_ver;
1893 	int nr_cpu, err;
1894 
1895 	if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
1896 					min_sz)
1897 		return -EINVAL;
1898 
1899 	metadata = arm_spe__alloc_metadata(auxtrace_info, &metadata_ver,
1900 					   &nr_cpu);
1901 	if (!metadata && metadata_ver != 1) {
1902 		pr_err("Failed to parse Arm SPE metadata.\n");
1903 		return -EINVAL;
1904 	}
1905 
1906 	spe = zalloc(sizeof(struct arm_spe));
1907 	if (!spe) {
1908 		err = -ENOMEM;
1909 		goto err_free_metadata;
1910 	}
1911 
1912 	err = auxtrace_queues__init(&spe->queues);
1913 	if (err)
1914 		goto err_free;
1915 
1916 	spe->session = session;
1917 	spe->machine = &session->machines.host; /* No kvm support */
1918 	spe->auxtrace_type = auxtrace_info->type;
1919 	if (metadata_ver == 1)
1920 		spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE];
1921 	else
1922 		spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE_V2];
1923 	spe->metadata = metadata;
1924 	spe->metadata_ver = metadata_ver;
1925 	spe->metadata_nr_cpu = nr_cpu;
1926 	spe->is_homogeneous = arm_spe__is_homogeneous(metadata, nr_cpu);
1927 
1928 	spe->timeless_decoding = arm_spe__is_timeless_decoding(spe);
1929 
1930 	/*
1931 	 * The synthesized event PERF_RECORD_TIME_CONV has been handled ahead
1932 	 * and the parameters for hardware clock are stored in the session
1933 	 * context.  Passes these parameters to the struct perf_tsc_conversion
1934 	 * in "spe->tc", which is used for later conversion between clock
1935 	 * counter and timestamp.
1936 	 *
1937 	 * For backward compatibility, copies the fields starting from
1938 	 * "time_cycles" only if they are contained in the event.
1939 	 */
1940 	spe->tc.time_shift = tc->time_shift;
1941 	spe->tc.time_mult = tc->time_mult;
1942 	spe->tc.time_zero = tc->time_zero;
1943 
1944 	if (event_contains(*tc, time_cycles)) {
1945 		spe->tc.time_cycles = tc->time_cycles;
1946 		spe->tc.time_mask = tc->time_mask;
1947 		spe->tc.cap_user_time_zero = tc->cap_user_time_zero;
1948 		spe->tc.cap_user_time_short = tc->cap_user_time_short;
1949 	}
1950 
1951 	spe->auxtrace.process_event = arm_spe_process_event;
1952 	spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event;
1953 	spe->auxtrace.flush_events = arm_spe_flush;
1954 	spe->auxtrace.free_events = arm_spe_free_events;
1955 	spe->auxtrace.free = arm_spe_free;
1956 	spe->auxtrace.evsel_is_auxtrace = arm_spe_evsel_is_auxtrace;
1957 	session->auxtrace = &spe->auxtrace;
1958 
1959 	arm_spe_print_info(spe, &auxtrace_info->priv[0]);
1960 
1961 	if (dump_trace)
1962 		return 0;
1963 
1964 	if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
1965 		spe->synth_opts = *session->itrace_synth_opts;
1966 	} else {
1967 		itrace_synth_opts__set_default(&spe->synth_opts, false);
1968 		/* Default nanoseconds period not supported */
1969 		spe->synth_opts.period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
1970 		spe->synth_opts.period = 1;
1971 	}
1972 
1973 	if (spe->synth_opts.period_type != PERF_ITRACE_PERIOD_INSTRUCTIONS) {
1974 		ui__error("You must only use i (instructions) --itrace period with Arm SPE. e.g --itrace=i1i\n");
1975 		err = -EINVAL;
1976 		goto err_free_queues;
1977 	}
1978 	if (spe->synth_opts.period > 1)
1979 		ui__warning("Arm SPE has a hardware-based sampling period.\n\n"
1980 			    "--itrace periods > 1i downsample by an interval of n SPE samples rather than n instructions.\n");
1981 
1982 	err = arm_spe_synth_events(spe, session);
1983 	if (err)
1984 		goto err_free_queues;
1985 
1986 	err = auxtrace_queues__process_index(&spe->queues, session);
1987 	if (err)
1988 		goto err_free_queues;
1989 
1990 	if (spe->queues.populated)
1991 		spe->data_queued = true;
1992 
1993 	return 0;
1994 
1995 err_free_queues:
1996 	auxtrace_queues__free(&spe->queues);
1997 	session->auxtrace = NULL;
1998 err_free:
1999 	free(spe);
2000 err_free_metadata:
2001 	arm_spe__free_metadata(metadata, nr_cpu);
2002 	return err;
2003 }
2004