xref: /linux/tools/perf/arch/arm/util/cs-etm.c (revision 7ae811b12e419fd70b7d7159f20ed8519bbe18cc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <api/fs/fs.h>
8 #include <linux/bits.h>
9 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/coresight-pmu.h>
12 #include <linux/kernel.h>
13 #include <linux/log2.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/zalloc.h>
17 
18 #include "cs-etm.h"
19 #include "../../util/debug.h"
20 #include "../../util/record.h"
21 #include "../../util/auxtrace.h"
22 #include "../../util/cpumap.h"
23 #include "../../util/event.h"
24 #include "../../util/evlist.h"
25 #include "../../util/evsel.h"
26 #include "../../util/pmu.h"
27 #include "../../util/cs-etm.h"
28 #include "../../util/util.h"
29 
30 #include <errno.h>
31 #include <stdlib.h>
32 #include <sys/stat.h>
33 
34 struct cs_etm_recording {
35 	struct auxtrace_record	itr;
36 	struct perf_pmu		*cs_etm_pmu;
37 	struct evlist		*evlist;
38 	int			wrapped_cnt;
39 	bool			*wrapped;
40 	bool			snapshot_mode;
41 	size_t			snapshot_size;
42 };
43 
44 static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
45 	[CS_ETM_ETMCCER]	= "mgmt/etmccer",
46 	[CS_ETM_ETMIDR]		= "mgmt/etmidr",
47 };
48 
49 static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
50 	[CS_ETMV4_TRCIDR0]		= "trcidr/trcidr0",
51 	[CS_ETMV4_TRCIDR1]		= "trcidr/trcidr1",
52 	[CS_ETMV4_TRCIDR2]		= "trcidr/trcidr2",
53 	[CS_ETMV4_TRCIDR8]		= "trcidr/trcidr8",
54 	[CS_ETMV4_TRCAUTHSTATUS]	= "mgmt/trcauthstatus",
55 };
56 
57 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
58 
59 static int cs_etm_set_context_id(struct auxtrace_record *itr,
60 				 struct evsel *evsel, int cpu)
61 {
62 	struct cs_etm_recording *ptr;
63 	struct perf_pmu *cs_etm_pmu;
64 	char path[PATH_MAX];
65 	int err = -EINVAL;
66 	u32 val;
67 
68 	ptr = container_of(itr, struct cs_etm_recording, itr);
69 	cs_etm_pmu = ptr->cs_etm_pmu;
70 
71 	if (!cs_etm_is_etmv4(itr, cpu))
72 		goto out;
73 
74 	/* Get a handle on TRCIRD2 */
75 	snprintf(path, PATH_MAX, "cpu%d/%s",
76 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
77 	err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
78 
79 	/* There was a problem reading the file, bailing out */
80 	if (err != 1) {
81 		pr_err("%s: can't read file %s\n",
82 		       CORESIGHT_ETM_PMU_NAME, path);
83 		goto out;
84 	}
85 
86 	/*
87 	 * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
88 	 * is supported:
89 	 *  0b00000 Context ID tracing is not supported.
90 	 *  0b00100 Maximum of 32-bit Context ID size.
91 	 *  All other values are reserved.
92 	 */
93 	val = BMVAL(val, 5, 9);
94 	if (!val || val != 0x4) {
95 		err = -EINVAL;
96 		goto out;
97 	}
98 
99 	/* All good, let the kernel know */
100 	evsel->core.attr.config |= (1 << ETM_OPT_CTXTID);
101 	err = 0;
102 
103 out:
104 
105 	return err;
106 }
107 
108 static int cs_etm_set_timestamp(struct auxtrace_record *itr,
109 				struct evsel *evsel, int cpu)
110 {
111 	struct cs_etm_recording *ptr;
112 	struct perf_pmu *cs_etm_pmu;
113 	char path[PATH_MAX];
114 	int err = -EINVAL;
115 	u32 val;
116 
117 	ptr = container_of(itr, struct cs_etm_recording, itr);
118 	cs_etm_pmu = ptr->cs_etm_pmu;
119 
120 	if (!cs_etm_is_etmv4(itr, cpu))
121 		goto out;
122 
123 	/* Get a handle on TRCIRD0 */
124 	snprintf(path, PATH_MAX, "cpu%d/%s",
125 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
126 	err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
127 
128 	/* There was a problem reading the file, bailing out */
129 	if (err != 1) {
130 		pr_err("%s: can't read file %s\n",
131 		       CORESIGHT_ETM_PMU_NAME, path);
132 		goto out;
133 	}
134 
135 	/*
136 	 * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping
137 	 * is supported:
138 	 *  0b00000 Global timestamping is not implemented
139 	 *  0b00110 Implementation supports a maximum timestamp of 48bits.
140 	 *  0b01000 Implementation supports a maximum timestamp of 64bits.
141 	 */
142 	val &= GENMASK(28, 24);
143 	if (!val) {
144 		err = -EINVAL;
145 		goto out;
146 	}
147 
148 	/* All good, let the kernel know */
149 	evsel->core.attr.config |= (1 << ETM_OPT_TS);
150 	err = 0;
151 
152 out:
153 	return err;
154 }
155 
156 static int cs_etm_set_option(struct auxtrace_record *itr,
157 			     struct evsel *evsel, u32 option)
158 {
159 	int i, err = -EINVAL;
160 	struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
161 	struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
162 
163 	/* Set option of each CPU we have */
164 	for (i = 0; i < cpu__max_cpu(); i++) {
165 		if (!cpu_map__has(event_cpus, i) ||
166 		    !cpu_map__has(online_cpus, i))
167 			continue;
168 
169 		if (option & ETM_OPT_CTXTID) {
170 			err = cs_etm_set_context_id(itr, evsel, i);
171 			if (err)
172 				goto out;
173 		}
174 		if (option & ETM_OPT_TS) {
175 			err = cs_etm_set_timestamp(itr, evsel, i);
176 			if (err)
177 				goto out;
178 		}
179 		if (option & ~(ETM_OPT_CTXTID | ETM_OPT_TS))
180 			/* Nothing else is currently supported */
181 			goto out;
182 	}
183 
184 	err = 0;
185 out:
186 	perf_cpu_map__put(online_cpus);
187 	return err;
188 }
189 
190 static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
191 					 struct record_opts *opts,
192 					 const char *str)
193 {
194 	struct cs_etm_recording *ptr =
195 				container_of(itr, struct cs_etm_recording, itr);
196 	unsigned long long snapshot_size = 0;
197 	char *endptr;
198 
199 	if (str) {
200 		snapshot_size = strtoull(str, &endptr, 0);
201 		if (*endptr || snapshot_size > SIZE_MAX)
202 			return -1;
203 	}
204 
205 	opts->auxtrace_snapshot_mode = true;
206 	opts->auxtrace_snapshot_size = snapshot_size;
207 	ptr->snapshot_size = snapshot_size;
208 
209 	return 0;
210 }
211 
212 static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
213 				struct evsel *evsel)
214 {
215 	char msg[BUFSIZ], path[PATH_MAX], *sink;
216 	struct perf_evsel_config_term *term;
217 	int ret = -EINVAL;
218 	u32 hash;
219 
220 	if (evsel->core.attr.config2 & GENMASK(31, 0))
221 		return 0;
222 
223 	list_for_each_entry(term, &evsel->config_terms, list) {
224 		if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
225 			continue;
226 
227 		sink = term->val.drv_cfg;
228 		snprintf(path, PATH_MAX, "sinks/%s", sink);
229 
230 		ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
231 		if (ret != 1) {
232 			pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
233 			       sink, perf_evsel__name(evsel), errno,
234 			       str_error_r(errno, msg, sizeof(msg)));
235 			return ret;
236 		}
237 
238 		evsel->core.attr.config2 |= hash;
239 		return 0;
240 	}
241 
242 	/*
243 	 * No sink was provided on the command line - for _now_ treat
244 	 * this as an error.
245 	 */
246 	return ret;
247 }
248 
249 static int cs_etm_recording_options(struct auxtrace_record *itr,
250 				    struct evlist *evlist,
251 				    struct record_opts *opts)
252 {
253 	int ret;
254 	struct cs_etm_recording *ptr =
255 				container_of(itr, struct cs_etm_recording, itr);
256 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
257 	struct evsel *evsel, *cs_etm_evsel = NULL;
258 	struct perf_cpu_map *cpus = evlist->core.cpus;
259 	bool privileged = perf_event_paranoid_check(-1);
260 	int err = 0;
261 
262 	ptr->evlist = evlist;
263 	ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
264 
265 	if (perf_can_record_switch_events())
266 		opts->record_switch_events = true;
267 
268 	evlist__for_each_entry(evlist, evsel) {
269 		if (evsel->core.attr.type == cs_etm_pmu->type) {
270 			if (cs_etm_evsel) {
271 				pr_err("There may be only one %s event\n",
272 				       CORESIGHT_ETM_PMU_NAME);
273 				return -EINVAL;
274 			}
275 			evsel->core.attr.freq = 0;
276 			evsel->core.attr.sample_period = 1;
277 			cs_etm_evsel = evsel;
278 			opts->full_auxtrace = true;
279 		}
280 	}
281 
282 	/* no need to continue if at least one event of interest was found */
283 	if (!cs_etm_evsel)
284 		return 0;
285 
286 	ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
287 	if (ret)
288 		return ret;
289 
290 	if (opts->use_clockid) {
291 		pr_err("Cannot use clockid (-k option) with %s\n",
292 		       CORESIGHT_ETM_PMU_NAME);
293 		return -EINVAL;
294 	}
295 
296 	/* we are in snapshot mode */
297 	if (opts->auxtrace_snapshot_mode) {
298 		/*
299 		 * No size were given to '-S' or '-m,', so go with
300 		 * the default
301 		 */
302 		if (!opts->auxtrace_snapshot_size &&
303 		    !opts->auxtrace_mmap_pages) {
304 			if (privileged) {
305 				opts->auxtrace_mmap_pages = MiB(4) / page_size;
306 			} else {
307 				opts->auxtrace_mmap_pages =
308 							KiB(128) / page_size;
309 				if (opts->mmap_pages == UINT_MAX)
310 					opts->mmap_pages = KiB(256) / page_size;
311 			}
312 		} else if (!opts->auxtrace_mmap_pages && !privileged &&
313 						opts->mmap_pages == UINT_MAX) {
314 			opts->mmap_pages = KiB(256) / page_size;
315 		}
316 
317 		/*
318 		 * '-m,xyz' was specified but no snapshot size, so make the
319 		 * snapshot size as big as the auxtrace mmap area.
320 		 */
321 		if (!opts->auxtrace_snapshot_size) {
322 			opts->auxtrace_snapshot_size =
323 				opts->auxtrace_mmap_pages * (size_t)page_size;
324 		}
325 
326 		/*
327 		 * -Sxyz was specified but no auxtrace mmap area, so make the
328 		 * auxtrace mmap area big enough to fit the requested snapshot
329 		 * size.
330 		 */
331 		if (!opts->auxtrace_mmap_pages) {
332 			size_t sz = opts->auxtrace_snapshot_size;
333 
334 			sz = round_up(sz, page_size) / page_size;
335 			opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
336 		}
337 
338 		/* Snapshost size can't be bigger than the auxtrace area */
339 		if (opts->auxtrace_snapshot_size >
340 				opts->auxtrace_mmap_pages * (size_t)page_size) {
341 			pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
342 			       opts->auxtrace_snapshot_size,
343 			       opts->auxtrace_mmap_pages * (size_t)page_size);
344 			return -EINVAL;
345 		}
346 
347 		/* Something went wrong somewhere - this shouldn't happen */
348 		if (!opts->auxtrace_snapshot_size ||
349 		    !opts->auxtrace_mmap_pages) {
350 			pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
351 			return -EINVAL;
352 		}
353 	}
354 
355 	/* We are in full trace mode but '-m,xyz' wasn't specified */
356 	if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
357 		if (privileged) {
358 			opts->auxtrace_mmap_pages = MiB(4) / page_size;
359 		} else {
360 			opts->auxtrace_mmap_pages = KiB(128) / page_size;
361 			if (opts->mmap_pages == UINT_MAX)
362 				opts->mmap_pages = KiB(256) / page_size;
363 		}
364 
365 	}
366 
367 	/* Validate auxtrace_mmap_pages provided by user */
368 	if (opts->auxtrace_mmap_pages) {
369 		unsigned int max_page = (KiB(128) / page_size);
370 		size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
371 
372 		if (!privileged &&
373 		    opts->auxtrace_mmap_pages > max_page) {
374 			opts->auxtrace_mmap_pages = max_page;
375 			pr_err("auxtrace too big, truncating to %d\n",
376 			       max_page);
377 		}
378 
379 		if (!is_power_of_2(sz)) {
380 			pr_err("Invalid mmap size for %s: must be a power of 2\n",
381 			       CORESIGHT_ETM_PMU_NAME);
382 			return -EINVAL;
383 		}
384 	}
385 
386 	if (opts->auxtrace_snapshot_mode)
387 		pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
388 			  opts->auxtrace_snapshot_size);
389 
390 	/*
391 	 * To obtain the auxtrace buffer file descriptor, the auxtrace
392 	 * event must come first.
393 	 */
394 	perf_evlist__to_front(evlist, cs_etm_evsel);
395 
396 	/*
397 	 * In the case of per-cpu mmaps, we need the CPU on the
398 	 * AUX event.  We also need the contextID in order to be notified
399 	 * when a context switch happened.
400 	 */
401 	if (!perf_cpu_map__empty(cpus)) {
402 		perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
403 
404 		err = cs_etm_set_option(itr, cs_etm_evsel,
405 					ETM_OPT_CTXTID | ETM_OPT_TS);
406 		if (err)
407 			goto out;
408 	}
409 
410 	/* Add dummy event to keep tracking */
411 	if (opts->full_auxtrace) {
412 		struct evsel *tracking_evsel;
413 
414 		err = parse_events(evlist, "dummy:u", NULL);
415 		if (err)
416 			goto out;
417 
418 		tracking_evsel = perf_evlist__last(evlist);
419 		perf_evlist__set_tracking_event(evlist, tracking_evsel);
420 
421 		tracking_evsel->core.attr.freq = 0;
422 		tracking_evsel->core.attr.sample_period = 1;
423 
424 		/* In per-cpu case, always need the time of mmap events etc */
425 		if (!perf_cpu_map__empty(cpus))
426 			perf_evsel__set_sample_bit(tracking_evsel, TIME);
427 	}
428 
429 out:
430 	return err;
431 }
432 
433 static u64 cs_etm_get_config(struct auxtrace_record *itr)
434 {
435 	u64 config = 0;
436 	struct cs_etm_recording *ptr =
437 			container_of(itr, struct cs_etm_recording, itr);
438 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
439 	struct evlist *evlist = ptr->evlist;
440 	struct evsel *evsel;
441 
442 	evlist__for_each_entry(evlist, evsel) {
443 		if (evsel->core.attr.type == cs_etm_pmu->type) {
444 			/*
445 			 * Variable perf_event_attr::config is assigned to
446 			 * ETMv3/PTM.  The bit fields have been made to match
447 			 * the ETMv3.5 ETRMCR register specification.  See the
448 			 * PMU_FORMAT_ATTR() declarations in
449 			 * drivers/hwtracing/coresight/coresight-perf.c for
450 			 * details.
451 			 */
452 			config = evsel->core.attr.config;
453 			break;
454 		}
455 	}
456 
457 	return config;
458 }
459 
460 #ifndef BIT
461 #define BIT(N) (1UL << (N))
462 #endif
463 
464 static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
465 {
466 	u64 config = 0;
467 	u64 config_opts = 0;
468 
469 	/*
470 	 * The perf event variable config bits represent both
471 	 * the command line options and register programming
472 	 * bits in ETMv3/PTM. For ETMv4 we must remap options
473 	 * to real bits
474 	 */
475 	config_opts = cs_etm_get_config(itr);
476 	if (config_opts & BIT(ETM_OPT_CYCACC))
477 		config |= BIT(ETM4_CFG_BIT_CYCACC);
478 	if (config_opts & BIT(ETM_OPT_CTXTID))
479 		config |= BIT(ETM4_CFG_BIT_CTXTID);
480 	if (config_opts & BIT(ETM_OPT_TS))
481 		config |= BIT(ETM4_CFG_BIT_TS);
482 	if (config_opts & BIT(ETM_OPT_RETSTK))
483 		config |= BIT(ETM4_CFG_BIT_RETSTK);
484 
485 	return config;
486 }
487 
488 static size_t
489 cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
490 		      struct evlist *evlist __maybe_unused)
491 {
492 	int i;
493 	int etmv3 = 0, etmv4 = 0;
494 	struct perf_cpu_map *event_cpus = evlist->core.cpus;
495 	struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
496 
497 	/* cpu map is not empty, we have specific CPUs to work with */
498 	if (!perf_cpu_map__empty(event_cpus)) {
499 		for (i = 0; i < cpu__max_cpu(); i++) {
500 			if (!cpu_map__has(event_cpus, i) ||
501 			    !cpu_map__has(online_cpus, i))
502 				continue;
503 
504 			if (cs_etm_is_etmv4(itr, i))
505 				etmv4++;
506 			else
507 				etmv3++;
508 		}
509 	} else {
510 		/* get configuration for all CPUs in the system */
511 		for (i = 0; i < cpu__max_cpu(); i++) {
512 			if (!cpu_map__has(online_cpus, i))
513 				continue;
514 
515 			if (cs_etm_is_etmv4(itr, i))
516 				etmv4++;
517 			else
518 				etmv3++;
519 		}
520 	}
521 
522 	perf_cpu_map__put(online_cpus);
523 
524 	return (CS_ETM_HEADER_SIZE +
525 	       (etmv4 * CS_ETMV4_PRIV_SIZE) +
526 	       (etmv3 * CS_ETMV3_PRIV_SIZE));
527 }
528 
529 static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
530 {
531 	bool ret = false;
532 	char path[PATH_MAX];
533 	int scan;
534 	unsigned int val;
535 	struct cs_etm_recording *ptr =
536 			container_of(itr, struct cs_etm_recording, itr);
537 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
538 
539 	/* Take any of the RO files for ETMv4 and see if it present */
540 	snprintf(path, PATH_MAX, "cpu%d/%s",
541 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
542 	scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
543 
544 	/* The file was read successfully, we have a winner */
545 	if (scan == 1)
546 		ret = true;
547 
548 	return ret;
549 }
550 
551 static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
552 {
553 	char pmu_path[PATH_MAX];
554 	int scan;
555 	unsigned int val = 0;
556 
557 	/* Get RO metadata from sysfs */
558 	snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
559 
560 	scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
561 	if (scan != 1)
562 		pr_err("%s: error reading: %s\n", __func__, pmu_path);
563 
564 	return val;
565 }
566 
567 static void cs_etm_get_metadata(int cpu, u32 *offset,
568 				struct auxtrace_record *itr,
569 				struct perf_record_auxtrace_info *info)
570 {
571 	u32 increment;
572 	u64 magic;
573 	struct cs_etm_recording *ptr =
574 			container_of(itr, struct cs_etm_recording, itr);
575 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
576 
577 	/* first see what kind of tracer this cpu is affined to */
578 	if (cs_etm_is_etmv4(itr, cpu)) {
579 		magic = __perf_cs_etmv4_magic;
580 		/* Get trace configuration register */
581 		info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
582 						cs_etmv4_get_config(itr);
583 		/* Get traceID from the framework */
584 		info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
585 						coresight_get_trace_id(cpu);
586 		/* Get read-only information from sysFS */
587 		info->priv[*offset + CS_ETMV4_TRCIDR0] =
588 			cs_etm_get_ro(cs_etm_pmu, cpu,
589 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
590 		info->priv[*offset + CS_ETMV4_TRCIDR1] =
591 			cs_etm_get_ro(cs_etm_pmu, cpu,
592 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
593 		info->priv[*offset + CS_ETMV4_TRCIDR2] =
594 			cs_etm_get_ro(cs_etm_pmu, cpu,
595 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
596 		info->priv[*offset + CS_ETMV4_TRCIDR8] =
597 			cs_etm_get_ro(cs_etm_pmu, cpu,
598 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
599 		info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
600 			cs_etm_get_ro(cs_etm_pmu, cpu,
601 				      metadata_etmv4_ro
602 				      [CS_ETMV4_TRCAUTHSTATUS]);
603 
604 		/* How much space was used */
605 		increment = CS_ETMV4_PRIV_MAX;
606 	} else {
607 		magic = __perf_cs_etmv3_magic;
608 		/* Get configuration register */
609 		info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
610 		/* Get traceID from the framework */
611 		info->priv[*offset + CS_ETM_ETMTRACEIDR] =
612 						coresight_get_trace_id(cpu);
613 		/* Get read-only information from sysFS */
614 		info->priv[*offset + CS_ETM_ETMCCER] =
615 			cs_etm_get_ro(cs_etm_pmu, cpu,
616 				      metadata_etmv3_ro[CS_ETM_ETMCCER]);
617 		info->priv[*offset + CS_ETM_ETMIDR] =
618 			cs_etm_get_ro(cs_etm_pmu, cpu,
619 				      metadata_etmv3_ro[CS_ETM_ETMIDR]);
620 
621 		/* How much space was used */
622 		increment = CS_ETM_PRIV_MAX;
623 	}
624 
625 	/* Build generic header portion */
626 	info->priv[*offset + CS_ETM_MAGIC] = magic;
627 	info->priv[*offset + CS_ETM_CPU] = cpu;
628 	/* Where the next CPU entry should start from */
629 	*offset += increment;
630 }
631 
632 static int cs_etm_info_fill(struct auxtrace_record *itr,
633 			    struct perf_session *session,
634 			    struct perf_record_auxtrace_info *info,
635 			    size_t priv_size)
636 {
637 	int i;
638 	u32 offset;
639 	u64 nr_cpu, type;
640 	struct perf_cpu_map *cpu_map;
641 	struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
642 	struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
643 	struct cs_etm_recording *ptr =
644 			container_of(itr, struct cs_etm_recording, itr);
645 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
646 
647 	if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
648 		return -EINVAL;
649 
650 	if (!session->evlist->nr_mmaps)
651 		return -EINVAL;
652 
653 	/* If the cpu_map is empty all online CPUs are involved */
654 	if (perf_cpu_map__empty(event_cpus)) {
655 		cpu_map = online_cpus;
656 	} else {
657 		/* Make sure all specified CPUs are online */
658 		for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
659 			if (cpu_map__has(event_cpus, i) &&
660 			    !cpu_map__has(online_cpus, i))
661 				return -EINVAL;
662 		}
663 
664 		cpu_map = event_cpus;
665 	}
666 
667 	nr_cpu = perf_cpu_map__nr(cpu_map);
668 	/* Get PMU type as dynamically assigned by the core */
669 	type = cs_etm_pmu->type;
670 
671 	/* First fill out the session header */
672 	info->type = PERF_AUXTRACE_CS_ETM;
673 	info->priv[CS_HEADER_VERSION_0] = 0;
674 	info->priv[CS_PMU_TYPE_CPUS] = type << 32;
675 	info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
676 	info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
677 
678 	offset = CS_ETM_SNAPSHOT + 1;
679 
680 	for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
681 		if (cpu_map__has(cpu_map, i))
682 			cs_etm_get_metadata(i, &offset, itr, info);
683 
684 	perf_cpu_map__put(online_cpus);
685 
686 	return 0;
687 }
688 
689 static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
690 {
691 	bool *wrapped;
692 	int cnt = ptr->wrapped_cnt;
693 
694 	/* Make @ptr->wrapped as big as @idx */
695 	while (cnt <= idx)
696 		cnt++;
697 
698 	/*
699 	 * Free'ed in cs_etm_recording_free().  Using realloc() to avoid
700 	 * cross compilation problems where the host's system supports
701 	 * reallocarray() but not the target.
702 	 */
703 	wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
704 	if (!wrapped)
705 		return -ENOMEM;
706 
707 	wrapped[cnt - 1] = false;
708 	ptr->wrapped_cnt = cnt;
709 	ptr->wrapped = wrapped;
710 
711 	return 0;
712 }
713 
714 static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
715 				      size_t buffer_size, u64 head)
716 {
717 	u64 i, watermark;
718 	u64 *buf = (u64 *)buffer;
719 	size_t buf_size = buffer_size;
720 
721 	/*
722 	 * We want to look the very last 512 byte (chosen arbitrarily) in
723 	 * the ring buffer.
724 	 */
725 	watermark = buf_size - 512;
726 
727 	/*
728 	 * @head is continuously increasing - if its value is equal or greater
729 	 * than the size of the ring buffer, it has wrapped around.
730 	 */
731 	if (head >= buffer_size)
732 		return true;
733 
734 	/*
735 	 * The value of @head is somewhere within the size of the ring buffer.
736 	 * This can be that there hasn't been enough data to fill the ring
737 	 * buffer yet or the trace time was so long that @head has numerically
738 	 * wrapped around.  To find we need to check if we have data at the very
739 	 * end of the ring buffer.  We can reliably do this because mmap'ed
740 	 * pages are zeroed out and there is a fresh mapping with every new
741 	 * session.
742 	 */
743 
744 	/* @head is less than 512 byte from the end of the ring buffer */
745 	if (head > watermark)
746 		watermark = head;
747 
748 	/*
749 	 * Speed things up by using 64 bit transactions (see "u64 *buf" above)
750 	 */
751 	watermark >>= 3;
752 	buf_size >>= 3;
753 
754 	/*
755 	 * If we find trace data at the end of the ring buffer, @head has
756 	 * been there and has numerically wrapped around at least once.
757 	 */
758 	for (i = watermark; i < buf_size; i++)
759 		if (buf[i])
760 			return true;
761 
762 	return false;
763 }
764 
765 static int cs_etm_find_snapshot(struct auxtrace_record *itr,
766 				int idx, struct auxtrace_mmap *mm,
767 				unsigned char *data,
768 				u64 *head, u64 *old)
769 {
770 	int err;
771 	bool wrapped;
772 	struct cs_etm_recording *ptr =
773 			container_of(itr, struct cs_etm_recording, itr);
774 
775 	/*
776 	 * Allocate memory to keep track of wrapping if this is the first
777 	 * time we deal with this *mm.
778 	 */
779 	if (idx >= ptr->wrapped_cnt) {
780 		err = cs_etm_alloc_wrapped_array(ptr, idx);
781 		if (err)
782 			return err;
783 	}
784 
785 	/*
786 	 * Check to see if *head has wrapped around.  If it hasn't only the
787 	 * amount of data between *head and *old is snapshot'ed to avoid
788 	 * bloating the perf.data file with zeros.  But as soon as *head has
789 	 * wrapped around the entire size of the AUX ring buffer it taken.
790 	 */
791 	wrapped = ptr->wrapped[idx];
792 	if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
793 		wrapped = true;
794 		ptr->wrapped[idx] = true;
795 	}
796 
797 	pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
798 		  __func__, idx, (size_t)*old, (size_t)*head, mm->len);
799 
800 	/* No wrap has occurred, we can just use *head and *old. */
801 	if (!wrapped)
802 		return 0;
803 
804 	/*
805 	 * *head has wrapped around - adjust *head and *old to pickup the
806 	 * entire content of the AUX buffer.
807 	 */
808 	if (*head >= mm->len) {
809 		*old = *head - mm->len;
810 	} else {
811 		*head += mm->len;
812 		*old = *head - mm->len;
813 	}
814 
815 	return 0;
816 }
817 
818 static int cs_etm_snapshot_start(struct auxtrace_record *itr)
819 {
820 	struct cs_etm_recording *ptr =
821 			container_of(itr, struct cs_etm_recording, itr);
822 	struct evsel *evsel;
823 
824 	evlist__for_each_entry(ptr->evlist, evsel) {
825 		if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
826 			return evsel__disable(evsel);
827 	}
828 	return -EINVAL;
829 }
830 
831 static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
832 {
833 	struct cs_etm_recording *ptr =
834 			container_of(itr, struct cs_etm_recording, itr);
835 	struct evsel *evsel;
836 
837 	evlist__for_each_entry(ptr->evlist, evsel) {
838 		if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
839 			return evsel__enable(evsel);
840 	}
841 	return -EINVAL;
842 }
843 
844 static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
845 {
846 	return (((u64) rand() <<  0) & 0x00000000FFFFFFFFull) |
847 		(((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
848 }
849 
850 static void cs_etm_recording_free(struct auxtrace_record *itr)
851 {
852 	struct cs_etm_recording *ptr =
853 			container_of(itr, struct cs_etm_recording, itr);
854 
855 	zfree(&ptr->wrapped);
856 	free(ptr);
857 }
858 
859 static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
860 {
861 	struct cs_etm_recording *ptr =
862 			container_of(itr, struct cs_etm_recording, itr);
863 	struct evsel *evsel;
864 
865 	evlist__for_each_entry(ptr->evlist, evsel) {
866 		if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
867 			return perf_evlist__enable_event_idx(ptr->evlist,
868 							     evsel, idx);
869 	}
870 
871 	return -EINVAL;
872 }
873 
874 struct auxtrace_record *cs_etm_record_init(int *err)
875 {
876 	struct perf_pmu *cs_etm_pmu;
877 	struct cs_etm_recording *ptr;
878 
879 	cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
880 
881 	if (!cs_etm_pmu) {
882 		*err = -EINVAL;
883 		goto out;
884 	}
885 
886 	ptr = zalloc(sizeof(struct cs_etm_recording));
887 	if (!ptr) {
888 		*err = -ENOMEM;
889 		goto out;
890 	}
891 
892 	ptr->cs_etm_pmu			= cs_etm_pmu;
893 	ptr->itr.parse_snapshot_options	= cs_etm_parse_snapshot_options;
894 	ptr->itr.recording_options	= cs_etm_recording_options;
895 	ptr->itr.info_priv_size		= cs_etm_info_priv_size;
896 	ptr->itr.info_fill		= cs_etm_info_fill;
897 	ptr->itr.find_snapshot		= cs_etm_find_snapshot;
898 	ptr->itr.snapshot_start		= cs_etm_snapshot_start;
899 	ptr->itr.snapshot_finish	= cs_etm_snapshot_finish;
900 	ptr->itr.reference		= cs_etm_reference;
901 	ptr->itr.free			= cs_etm_recording_free;
902 	ptr->itr.read_finish		= cs_etm_read_finish;
903 
904 	*err = 0;
905 	return &ptr->itr;
906 out:
907 	return NULL;
908 }
909