xref: /linux/drivers/hwtracing/coresight/coresight-trbe.c (revision feafee284579d29537a5a56ba8f23894f0463f3d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This driver enables Trace Buffer Extension (TRBE) as a per-cpu coresight
4  * sink device could then pair with an appropriate per-cpu coresight source
5  * device (ETE) thus generating required trace data. Trace can be enabled
6  * via the perf framework.
7  *
8  * The AUX buffer handling is inspired from Arm SPE PMU driver.
9  *
10  * Copyright (C) 2020 ARM Ltd.
11  *
12  * Author: Anshuman Khandual <anshuman.khandual@arm.com>
13  */
14 #define DRVNAME "arm_trbe"
15 
16 #define pr_fmt(fmt) DRVNAME ": " fmt
17 
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <linux/kvm_host.h>
21 #include <linux/vmalloc.h>
22 
23 #include "coresight-self-hosted-trace.h"
24 #include "coresight-trbe.h"
25 
26 #define PERF_IDX2OFF(idx, buf) \
27 	((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
28 
29 /*
30  * A padding packet that will help the user space tools
31  * in skipping relevant sections in the captured trace
32  * data which could not be decoded. TRBE doesn't support
33  * formatting the trace data, unlike the legacy CoreSight
34  * sinks and thus we use ETE trace packets to pad the
35  * sections of the buffer.
36  */
37 #define ETE_IGNORE_PACKET		0x70
38 
39 /*
40  * Minimum amount of meaningful trace will contain:
41  * A-Sync, Trace Info, Trace On, Address, Atom.
42  * This is about 44bytes of ETE trace. To be on
43  * the safer side, we assume 64bytes is the minimum
44  * space required for a meaningful session, before
45  * we hit a "WRAP" event.
46  */
47 #define TRBE_TRACE_MIN_BUF_SIZE		64
48 
49 enum trbe_fault_action {
50 	TRBE_FAULT_ACT_WRAP,
51 	TRBE_FAULT_ACT_SPURIOUS,
52 	TRBE_FAULT_ACT_FATAL,
53 };
54 
55 struct trbe_buf {
56 	/*
57 	 * Even though trbe_base represents vmap()
58 	 * mapped allocated buffer's start address,
59 	 * it's being as unsigned long for various
60 	 * arithmetic and comparision operations &
61 	 * also to be consistent with trbe_write &
62 	 * trbe_limit sibling pointers.
63 	 */
64 	unsigned long trbe_base;
65 	/* The base programmed into the TRBE */
66 	unsigned long trbe_hw_base;
67 	unsigned long trbe_limit;
68 	unsigned long trbe_write;
69 	int nr_pages;
70 	void **pages;
71 	bool snapshot;
72 	struct trbe_cpudata *cpudata;
73 };
74 
75 /*
76  * TRBE erratum list
77  *
78  * The errata are defined in arm64 generic cpu_errata framework.
79  * Since the errata work arounds could be applied individually
80  * to the affected CPUs inside the TRBE driver, we need to know if
81  * a given CPU is affected by the erratum. Unlike the other erratum
82  * work arounds, TRBE driver needs to check multiple times during
83  * a trace session. Thus we need a quicker access to per-CPU
84  * errata and not issue costly this_cpu_has_cap() everytime.
85  * We keep a set of the affected errata in trbe_cpudata, per TRBE.
86  *
87  * We rely on the corresponding cpucaps to be defined for a given
88  * TRBE erratum. We map the given cpucap into a TRBE internal number
89  * to make the tracking of the errata lean.
90  *
91  * This helps in :
92  *   - Not duplicating the detection logic
93  *   - Streamlined detection of erratum across the system
94  */
95 #define TRBE_WORKAROUND_OVERWRITE_FILL_MODE	0
96 #define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE	1
97 #define TRBE_NEEDS_DRAIN_AFTER_DISABLE		2
98 #define TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE	3
99 #define TRBE_IS_BROKEN				4
100 
101 static int trbe_errata_cpucaps[] = {
102 	[TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
103 	[TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
104 	[TRBE_NEEDS_DRAIN_AFTER_DISABLE] = ARM64_WORKAROUND_2064142,
105 	[TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE] = ARM64_WORKAROUND_2038923,
106 	[TRBE_IS_BROKEN] = ARM64_WORKAROUND_1902691,
107 	-1,		/* Sentinel, must be the last entry */
108 };
109 
110 /* The total number of listed errata in trbe_errata_cpucaps */
111 #define TRBE_ERRATA_MAX			(ARRAY_SIZE(trbe_errata_cpucaps) - 1)
112 
113 /*
114  * Safe limit for the number of bytes that may be overwritten
115  * when ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE is triggered.
116  */
117 #define TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES	256
118 
119 /*
120  * struct trbe_cpudata: TRBE instance specific data
121  * @trbe_flag		- TRBE dirty/access flag support
122  * @trbe_hw_align	- Actual TRBE alignment required for TRBPTR_EL1.
123  * @trbe_align		- Software alignment used for the TRBPTR_EL1.
124  * @cpu			- CPU this TRBE belongs to.
125  * @mode		- Mode of current operation. (perf/disabled)
126  * @drvdata		- TRBE specific drvdata
127  * @errata		- Bit map for the errata on this TRBE.
128  */
129 struct trbe_cpudata {
130 	bool trbe_flag;
131 	u64 trbe_hw_align;
132 	u64 trbe_align;
133 	int cpu;
134 	enum cs_mode mode;
135 	struct trbe_buf *buf;
136 	struct trbe_drvdata *drvdata;
137 	DECLARE_BITMAP(errata, TRBE_ERRATA_MAX);
138 };
139 
140 struct trbe_drvdata {
141 	struct trbe_cpudata __percpu *cpudata;
142 	struct perf_output_handle * __percpu *handle;
143 	struct hlist_node hotplug_node;
144 	int irq;
145 	cpumask_t supported_cpus;
146 	enum cpuhp_state trbe_online;
147 	struct platform_device *pdev;
148 };
149 
trbe_check_errata(struct trbe_cpudata * cpudata)150 static void trbe_check_errata(struct trbe_cpudata *cpudata)
151 {
152 	int i;
153 
154 	for (i = 0; i < TRBE_ERRATA_MAX; i++) {
155 		int cap = trbe_errata_cpucaps[i];
156 
157 		if (WARN_ON_ONCE(cap < 0))
158 			return;
159 		if (this_cpu_has_cap(cap))
160 			set_bit(i, cpudata->errata);
161 	}
162 }
163 
trbe_has_erratum(struct trbe_cpudata * cpudata,int i)164 static bool trbe_has_erratum(struct trbe_cpudata *cpudata, int i)
165 {
166 	return (i < TRBE_ERRATA_MAX) && test_bit(i, cpudata->errata);
167 }
168 
trbe_may_overwrite_in_fill_mode(struct trbe_cpudata * cpudata)169 static bool trbe_may_overwrite_in_fill_mode(struct trbe_cpudata *cpudata)
170 {
171 	return trbe_has_erratum(cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE);
172 }
173 
trbe_may_write_out_of_range(struct trbe_cpudata * cpudata)174 static bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata)
175 {
176 	return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE);
177 }
178 
trbe_needs_drain_after_disable(struct trbe_cpudata * cpudata)179 static bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata)
180 {
181 	/*
182 	 * Errata affected TRBE implementation will need TSB CSYNC and
183 	 * DSB in order to prevent subsequent writes into certain TRBE
184 	 * system registers from being ignored and not effected.
185 	 */
186 	return trbe_has_erratum(cpudata, TRBE_NEEDS_DRAIN_AFTER_DISABLE);
187 }
188 
trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata * cpudata)189 static bool trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata *cpudata)
190 {
191 	/*
192 	 * Errata affected TRBE implementation will need an additional
193 	 * context synchronization in order to prevent an inconsistent
194 	 * TRBE prohibited region view on the CPU which could possibly
195 	 * corrupt the TRBE buffer or the TRBE state.
196 	 */
197 	return trbe_has_erratum(cpudata, TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE);
198 }
199 
trbe_is_broken(struct trbe_cpudata * cpudata)200 static bool trbe_is_broken(struct trbe_cpudata *cpudata)
201 {
202 	return trbe_has_erratum(cpudata, TRBE_IS_BROKEN);
203 }
204 
trbe_alloc_node(struct perf_event * event)205 static int trbe_alloc_node(struct perf_event *event)
206 {
207 	if (event->cpu == -1)
208 		return NUMA_NO_NODE;
209 	return cpu_to_node(event->cpu);
210 }
211 
trbe_drain_buffer(void)212 static void trbe_drain_buffer(void)
213 {
214 	tsb_csync();
215 	dsb(nsh);
216 }
217 
set_trbe_enabled(struct trbe_cpudata * cpudata,u64 trblimitr)218 static void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
219 {
220 	/*
221 	 * Enable the TRBE without clearing LIMITPTR which
222 	 * might be required for fetching the buffer limits.
223 	 */
224 	trblimitr |= TRBLIMITR_EL1_E;
225 	write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
226 	kvm_enable_trbe();
227 
228 	/* Synchronize the TRBE enable event */
229 	isb();
230 
231 	if (trbe_needs_ctxt_sync_after_enable(cpudata))
232 		isb();
233 }
234 
set_trbe_disabled(struct trbe_cpudata * cpudata)235 static void set_trbe_disabled(struct trbe_cpudata *cpudata)
236 {
237 	u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
238 
239 	/*
240 	 * Disable the TRBE without clearing LIMITPTR which
241 	 * might be required for fetching the buffer limits.
242 	 */
243 	trblimitr &= ~TRBLIMITR_EL1_E;
244 	write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
245 	kvm_disable_trbe();
246 
247 	if (trbe_needs_drain_after_disable(cpudata))
248 		trbe_drain_buffer();
249 	isb();
250 }
251 
trbe_drain_and_disable_local(struct trbe_cpudata * cpudata)252 static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
253 {
254 	trbe_drain_buffer();
255 	set_trbe_disabled(cpudata);
256 }
257 
trbe_reset_local(struct trbe_cpudata * cpudata)258 static void trbe_reset_local(struct trbe_cpudata *cpudata)
259 {
260 	write_sysreg_s(0, SYS_TRBLIMITR_EL1);
261 	trbe_drain_buffer();
262 	write_sysreg_s(0, SYS_TRBPTR_EL1);
263 	write_sysreg_s(0, SYS_TRBBASER_EL1);
264 	write_sysreg_s(0, SYS_TRBSR_EL1);
265 }
266 
trbe_report_wrap_event(struct perf_output_handle * handle)267 static void trbe_report_wrap_event(struct perf_output_handle *handle)
268 {
269 	/*
270 	 * Mark the buffer to indicate that there was a WRAP event by
271 	 * setting the COLLISION flag. This indicates to the user that
272 	 * the TRBE trace collection was stopped without stopping the
273 	 * ETE and thus there might be some amount of trace that was
274 	 * lost between the time the WRAP was detected and the IRQ
275 	 * was consumed by the CPU.
276 	 *
277 	 * Setting the TRUNCATED flag would move the event to STOPPED
278 	 * state unnecessarily, even when there is space left in the
279 	 * ring buffer. Using the COLLISION flag doesn't have this side
280 	 * effect. We only set TRUNCATED flag when there is no space
281 	 * left in the ring buffer.
282 	 */
283 	perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
284 }
285 
trbe_stop_and_truncate_event(struct perf_output_handle * handle)286 static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
287 {
288 	struct trbe_buf *buf = etm_perf_sink_config(handle);
289 
290 	/*
291 	 * We cannot proceed with the buffer collection and we
292 	 * do not have any data for the current session. The
293 	 * etm_perf driver expects to close out the aux_buffer
294 	 * at event_stop(). So disable the TRBE here and leave
295 	 * the update_buffer() to return a 0 size.
296 	 */
297 	trbe_drain_and_disable_local(buf->cpudata);
298 	perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
299 	perf_aux_output_end(handle, 0);
300 	*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
301 }
302 
303 /*
304  * TRBE Buffer Management
305  *
306  * The TRBE buffer spans from the base pointer till the limit pointer. When enabled,
307  * it starts writing trace data from the write pointer onward till the limit pointer.
308  * When the write pointer reaches the address just before the limit pointer, it gets
309  * wrapped around again to the base pointer. This is called a TRBE wrap event, which
310  * generates a maintenance interrupt when operated in WRAP or FILL mode. This driver
311  * uses FILL mode, where the TRBE stops the trace collection at wrap event. The IRQ
312  * handler updates the AUX buffer and re-enables the TRBE with updated WRITE and
313  * LIMIT pointers.
314  *
315  *	Wrap around with an IRQ
316  *	------ < ------ < ------- < ----- < -----
317  *	|					|
318  *	------ > ------ > ------- > ----- > -----
319  *
320  *	+---------------+-----------------------+
321  *	|		|			|
322  *	+---------------+-----------------------+
323  *	Base Pointer	Write Pointer		Limit Pointer
324  *
325  * The base and limit pointers always needs to be PAGE_SIZE aligned. But the write
326  * pointer can be aligned to the implementation defined TRBE trace buffer alignment
327  * as captured in trbe_cpudata->trbe_align.
328  *
329  *
330  *		head		tail		wakeup
331  *	+---------------------------------------+----- ~ ~ ------
332  *	|$$$$$$$|################|$$$$$$$$$$$$$$|		|
333  *	+---------------------------------------+----- ~ ~ ------
334  *	Base Pointer	Write Pointer		Limit Pointer
335  *
336  * The perf_output_handle indices (head, tail, wakeup) are monotonically increasing
337  * values which tracks all the driver writes and user reads from the perf auxiliary
338  * buffer. Generally [head..tail] is the area where the driver can write into unless
339  * the wakeup is behind the tail. Enabled TRBE buffer span needs to be adjusted and
340  * configured depending on the perf_output_handle indices, so that the driver does
341  * not override into areas in the perf auxiliary buffer which is being or yet to be
342  * consumed from the user space. The enabled TRBE buffer area is a moving subset of
343  * the allocated perf auxiliary buffer.
344  */
345 
__trbe_pad_buf(struct trbe_buf * buf,u64 offset,int len)346 static void __trbe_pad_buf(struct trbe_buf *buf, u64 offset, int len)
347 {
348 	memset((void *)buf->trbe_base + offset, ETE_IGNORE_PACKET, len);
349 }
350 
trbe_pad_buf(struct perf_output_handle * handle,int len)351 static void trbe_pad_buf(struct perf_output_handle *handle, int len)
352 {
353 	struct trbe_buf *buf = etm_perf_sink_config(handle);
354 	u64 head = PERF_IDX2OFF(handle->head, buf);
355 
356 	__trbe_pad_buf(buf, head, len);
357 	if (!buf->snapshot)
358 		perf_aux_output_skip(handle, len);
359 }
360 
trbe_snapshot_offset(struct perf_output_handle * handle)361 static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
362 {
363 	struct trbe_buf *buf = etm_perf_sink_config(handle);
364 
365 	/*
366 	 * The ETE trace has alignment synchronization packets allowing
367 	 * the decoder to reset in case of an overflow or corruption.
368 	 * So we can use the entire buffer for the snapshot mode.
369 	 */
370 	return buf->nr_pages * PAGE_SIZE;
371 }
372 
trbe_min_trace_buf_size(struct perf_output_handle * handle)373 static u64 trbe_min_trace_buf_size(struct perf_output_handle *handle)
374 {
375 	u64 size = TRBE_TRACE_MIN_BUF_SIZE;
376 	struct trbe_buf *buf = etm_perf_sink_config(handle);
377 	struct trbe_cpudata *cpudata = buf->cpudata;
378 
379 	/*
380 	 * When the TRBE is affected by an erratum that could make it
381 	 * write to the next "virtually addressed" page beyond the LIMIT.
382 	 * We need to make sure there is always a PAGE after the LIMIT,
383 	 * within the buffer. Thus we ensure there is at least an extra
384 	 * page than normal. With this we could then adjust the LIMIT
385 	 * pointer down by a PAGE later.
386 	 */
387 	if (trbe_may_write_out_of_range(cpudata))
388 		size += PAGE_SIZE;
389 	return size;
390 }
391 
392 /*
393  * TRBE Limit Calculation
394  *
395  * The following markers are used to illustrate various TRBE buffer situations.
396  *
397  * $$$$ - Data area, unconsumed captured trace data, not to be overridden
398  * #### - Free area, enabled, trace will be written
399  * %%%% - Free area, disabled, trace will not be written
400  * ==== - Free area, padded with ETE_IGNORE_PACKET, trace will be skipped
401  */
__trbe_normal_offset(struct perf_output_handle * handle)402 static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
403 {
404 	struct trbe_buf *buf = etm_perf_sink_config(handle);
405 	struct trbe_cpudata *cpudata = buf->cpudata;
406 	const u64 bufsize = buf->nr_pages * PAGE_SIZE;
407 	u64 limit = bufsize;
408 	u64 head, tail, wakeup;
409 
410 	head = PERF_IDX2OFF(handle->head, buf);
411 
412 	/*
413 	 *		head
414 	 *	------->|
415 	 *	|
416 	 *	head	TRBE align	tail
417 	 * +----|-------|---------------|-------+
418 	 * |$$$$|=======|###############|$$$$$$$|
419 	 * +----|-------|---------------|-------+
420 	 * trbe_base				trbe_base + nr_pages
421 	 *
422 	 * Perf aux buffer output head position can be misaligned depending on
423 	 * various factors including user space reads. In case misaligned, head
424 	 * needs to be aligned before TRBE can be configured. Pad the alignment
425 	 * gap with ETE_IGNORE_PACKET bytes that will be ignored by user tools
426 	 * and skip this section thus advancing the head.
427 	 */
428 	if (!IS_ALIGNED(head, cpudata->trbe_align)) {
429 		unsigned long delta = roundup(head, cpudata->trbe_align) - head;
430 
431 		delta = min(delta, handle->size);
432 		trbe_pad_buf(handle, delta);
433 		head = PERF_IDX2OFF(handle->head, buf);
434 	}
435 
436 	/*
437 	 *	head = tail (size = 0)
438 	 * +----|-------------------------------+
439 	 * |$$$$|$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$	|
440 	 * +----|-------------------------------+
441 	 * trbe_base				trbe_base + nr_pages
442 	 *
443 	 * Perf aux buffer does not have any space for the driver to write into.
444 	 */
445 	if (!handle->size)
446 		return 0;
447 
448 	/* Compute the tail and wakeup indices now that we've aligned head */
449 	tail = PERF_IDX2OFF(handle->head + handle->size, buf);
450 	wakeup = PERF_IDX2OFF(handle->wakeup, buf);
451 
452 	/*
453 	 * Lets calculate the buffer area which TRBE could write into. There
454 	 * are three possible scenarios here. Limit needs to be aligned with
455 	 * PAGE_SIZE per the TRBE requirement. Always avoid clobbering the
456 	 * unconsumed data.
457 	 *
458 	 * 1) head < tail
459 	 *
460 	 *	head			tail
461 	 * +----|-----------------------|-------+
462 	 * |$$$$|#######################|$$$$$$$|
463 	 * +----|-----------------------|-------+
464 	 * trbe_base			limit	trbe_base + nr_pages
465 	 *
466 	 * TRBE could write into [head..tail] area. Unless the tail is right at
467 	 * the end of the buffer, neither an wrap around nor an IRQ is expected
468 	 * while being enabled.
469 	 *
470 	 * 2) head == tail
471 	 *
472 	 *	head = tail (size > 0)
473 	 * +----|-------------------------------+
474 	 * |%%%%|###############################|
475 	 * +----|-------------------------------+
476 	 * trbe_base				limit = trbe_base + nr_pages
477 	 *
478 	 * TRBE should just write into [head..base + nr_pages] area even though
479 	 * the entire buffer is empty. Reason being, when the trace reaches the
480 	 * end of the buffer, it will just wrap around with an IRQ giving an
481 	 * opportunity to reconfigure the buffer.
482 	 *
483 	 * 3) tail < head
484 	 *
485 	 *	tail			head
486 	 * +----|-----------------------|-------+
487 	 * |%%%%|$$$$$$$$$$$$$$$$$$$$$$$|#######|
488 	 * +----|-----------------------|-------+
489 	 * trbe_base				limit = trbe_base + nr_pages
490 	 *
491 	 * TRBE should just write into [head..base + nr_pages] area even though
492 	 * the [trbe_base..tail] is also empty. Reason being, when the trace
493 	 * reaches the end of the buffer, it will just wrap around with an IRQ
494 	 * giving an opportunity to reconfigure the buffer.
495 	 */
496 	if (head < tail)
497 		limit = round_down(tail, PAGE_SIZE);
498 
499 	/*
500 	 * Wakeup may be arbitrarily far into the future. If it's not in the
501 	 * current generation, either we'll wrap before hitting it, or it's
502 	 * in the past and has been handled already.
503 	 *
504 	 * If there's a wakeup before we wrap, arrange to be woken up by the
505 	 * page boundary following it. Keep the tail boundary if that's lower.
506 	 *
507 	 *	head		wakeup	tail
508 	 * +----|---------------|-------|-------+
509 	 * |$$$$|###############|%%%%%%%|$$$$$$$|
510 	 * +----|---------------|-------|-------+
511 	 * trbe_base		limit		trbe_base + nr_pages
512 	 */
513 	if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
514 		limit = min(limit, round_up(wakeup, PAGE_SIZE));
515 
516 	/*
517 	 * There are two situation when this can happen i.e limit is before
518 	 * the head and hence TRBE cannot be configured.
519 	 *
520 	 * 1) head < tail (aligned down with PAGE_SIZE) and also they are both
521 	 * within the same PAGE size range.
522 	 *
523 	 *			PAGE_SIZE
524 	 *		|----------------------|
525 	 *
526 	 *		limit	head	tail
527 	 * +------------|------|--------|-------+
528 	 * |$$$$$$$$$$$$$$$$$$$|========|$$$$$$$|
529 	 * +------------|------|--------|-------+
530 	 * trbe_base				trbe_base + nr_pages
531 	 *
532 	 * 2) head < wakeup (aligned up with PAGE_SIZE) < tail and also both
533 	 * head and wakeup are within same PAGE size range.
534 	 *
535 	 *		PAGE_SIZE
536 	 *	|----------------------|
537 	 *
538 	 *	limit	head	wakeup  tail
539 	 * +----|------|-------|--------|-------+
540 	 * |$$$$$$$$$$$|=======|========|$$$$$$$|
541 	 * +----|------|-------|--------|-------+
542 	 * trbe_base				trbe_base + nr_pages
543 	 */
544 	if (limit > head)
545 		return limit;
546 
547 	trbe_pad_buf(handle, handle->size);
548 	return 0;
549 }
550 
trbe_normal_offset(struct perf_output_handle * handle)551 static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
552 {
553 	struct trbe_buf *buf = etm_perf_sink_config(handle);
554 	u64 limit = __trbe_normal_offset(handle);
555 	u64 head = PERF_IDX2OFF(handle->head, buf);
556 
557 	/*
558 	 * If the head is too close to the limit and we don't
559 	 * have space for a meaningful run, we rather pad it
560 	 * and start fresh.
561 	 *
562 	 * We might have to do this more than once to make sure
563 	 * we have enough required space.
564 	 */
565 	while (limit && ((limit - head) < trbe_min_trace_buf_size(handle))) {
566 		trbe_pad_buf(handle, limit - head);
567 		limit = __trbe_normal_offset(handle);
568 		head = PERF_IDX2OFF(handle->head, buf);
569 	}
570 	return limit;
571 }
572 
compute_trbe_buffer_limit(struct perf_output_handle * handle)573 static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle)
574 {
575 	struct trbe_buf *buf = etm_perf_sink_config(handle);
576 	unsigned long offset;
577 
578 	if (buf->snapshot)
579 		offset = trbe_snapshot_offset(handle);
580 	else
581 		offset = trbe_normal_offset(handle);
582 	return buf->trbe_base + offset;
583 }
584 
clr_trbe_status(void)585 static void clr_trbe_status(void)
586 {
587 	u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
588 
589 	WARN_ON(is_trbe_enabled());
590 	trbsr &= ~TRBSR_EL1_IRQ;
591 	trbsr &= ~TRBSR_EL1_TRG;
592 	trbsr &= ~TRBSR_EL1_WRAP;
593 	trbsr &= ~TRBSR_EL1_EC_MASK;
594 	trbsr &= ~TRBSR_EL1_BSC_MASK;
595 	trbsr &= ~TRBSR_EL1_S;
596 	write_sysreg_s(trbsr, SYS_TRBSR_EL1);
597 }
598 
set_trbe_limit_pointer_enabled(struct trbe_buf * buf)599 static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf)
600 {
601 	u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
602 	unsigned long addr = buf->trbe_limit;
603 
604 	WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_EL1_LIMIT_SHIFT)));
605 	WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
606 
607 	trblimitr &= ~TRBLIMITR_EL1_nVM;
608 	trblimitr &= ~TRBLIMITR_EL1_FM_MASK;
609 	trblimitr &= ~TRBLIMITR_EL1_TM_MASK;
610 	trblimitr &= ~TRBLIMITR_EL1_LIMIT_MASK;
611 
612 	/*
613 	 * Fill trace buffer mode is used here while configuring the
614 	 * TRBE for trace capture. In this particular mode, the trace
615 	 * collection is stopped and a maintenance interrupt is raised
616 	 * when the current write pointer wraps. This pause in trace
617 	 * collection gives the software an opportunity to capture the
618 	 * trace data in the interrupt handler, before reconfiguring
619 	 * the TRBE.
620 	 */
621 	trblimitr |= (TRBLIMITR_EL1_FM_FILL << TRBLIMITR_EL1_FM_SHIFT) &
622 		     TRBLIMITR_EL1_FM_MASK;
623 
624 	/*
625 	 * Trigger mode is not used here while configuring the TRBE for
626 	 * the trace capture. Hence just keep this in the ignore mode.
627 	 */
628 	trblimitr |= (TRBLIMITR_EL1_TM_IGNR << TRBLIMITR_EL1_TM_SHIFT) &
629 		     TRBLIMITR_EL1_TM_MASK;
630 	trblimitr |= (addr & PAGE_MASK);
631 	set_trbe_enabled(buf->cpudata, trblimitr);
632 }
633 
trbe_enable_hw(struct trbe_buf * buf)634 static void trbe_enable_hw(struct trbe_buf *buf)
635 {
636 	WARN_ON(buf->trbe_hw_base < buf->trbe_base);
637 	WARN_ON(buf->trbe_write < buf->trbe_hw_base);
638 	WARN_ON(buf->trbe_write >= buf->trbe_limit);
639 	set_trbe_disabled(buf->cpudata);
640 	clr_trbe_status();
641 	set_trbe_base_pointer(buf->trbe_hw_base);
642 	set_trbe_write_pointer(buf->trbe_write);
643 
644 	/*
645 	 * Synchronize all the register updates
646 	 * till now before enabling the TRBE.
647 	 */
648 	isb();
649 	set_trbe_limit_pointer_enabled(buf);
650 }
651 
trbe_get_fault_act(struct perf_output_handle * handle,u64 trbsr)652 static enum trbe_fault_action trbe_get_fault_act(struct perf_output_handle *handle,
653 						 u64 trbsr)
654 {
655 	int ec = get_trbe_ec(trbsr);
656 	int bsc = get_trbe_bsc(trbsr);
657 	struct trbe_buf *buf = etm_perf_sink_config(handle);
658 	struct trbe_cpudata *cpudata = buf->cpudata;
659 
660 	WARN_ON(is_trbe_running(trbsr));
661 	if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
662 		return TRBE_FAULT_ACT_FATAL;
663 
664 	if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
665 		return TRBE_FAULT_ACT_FATAL;
666 
667 	/*
668 	 * If the trbe is affected by TRBE_WORKAROUND_OVERWRITE_FILL_MODE,
669 	 * it might write data after a WRAP event in the fill mode.
670 	 * Thus the check TRBPTR == TRBBASER will not be honored.
671 	 */
672 	if ((is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) &&
673 	    (trbe_may_overwrite_in_fill_mode(cpudata) ||
674 	     get_trbe_write_pointer() == get_trbe_base_pointer()))
675 		return TRBE_FAULT_ACT_WRAP;
676 
677 	return TRBE_FAULT_ACT_SPURIOUS;
678 }
679 
trbe_get_trace_size(struct perf_output_handle * handle,struct trbe_buf * buf,bool wrap)680 static unsigned long trbe_get_trace_size(struct perf_output_handle *handle,
681 					 struct trbe_buf *buf, bool wrap)
682 {
683 	u64 write;
684 	u64 start_off, end_off;
685 	u64 size;
686 	u64 overwrite_skip = TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
687 
688 	/*
689 	 * If the TRBE has wrapped around the write pointer has
690 	 * wrapped and should be treated as limit.
691 	 *
692 	 * When the TRBE is affected by TRBE_WORKAROUND_WRITE_OUT_OF_RANGE,
693 	 * it may write upto 64bytes beyond the "LIMIT". The driver already
694 	 * keeps a valid page next to the LIMIT and we could potentially
695 	 * consume the trace data that may have been collected there. But we
696 	 * cannot be really sure it is available, and the TRBPTR may not
697 	 * indicate the same. Also, affected cores are also affected by another
698 	 * erratum which forces the PAGE_SIZE alignment on the TRBPTR, and thus
699 	 * could potentially pad an entire PAGE_SIZE - 64bytes, to get those
700 	 * 64bytes. Thus we ignore the potential triggering of the erratum
701 	 * on WRAP and limit the data to LIMIT.
702 	 */
703 	if (wrap)
704 		write = get_trbe_limit_pointer();
705 	else
706 		write = get_trbe_write_pointer();
707 
708 	/*
709 	 * TRBE may use a different base address than the base
710 	 * of the ring buffer. Thus use the beginning of the ring
711 	 * buffer to compute the offsets.
712 	 */
713 	end_off = write - buf->trbe_base;
714 	start_off = PERF_IDX2OFF(handle->head, buf);
715 
716 	if (WARN_ON_ONCE(end_off < start_off))
717 		return 0;
718 
719 	size = end_off - start_off;
720 	/*
721 	 * If the TRBE is affected by the following erratum, we must fill
722 	 * the space we skipped with IGNORE packets. And we are always
723 	 * guaranteed to have at least a PAGE_SIZE space in the buffer.
724 	 */
725 	if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE) &&
726 	    !WARN_ON(size < overwrite_skip))
727 		__trbe_pad_buf(buf, start_off, overwrite_skip);
728 
729 	return size;
730 }
731 
arm_trbe_alloc_buffer(struct coresight_device * csdev,struct perf_event * event,void ** pages,int nr_pages,bool snapshot)732 static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
733 				   struct perf_event *event, void **pages,
734 				   int nr_pages, bool snapshot)
735 {
736 	struct trbe_buf *buf;
737 	struct page **pglist;
738 	int i;
739 
740 	/*
741 	 * TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with
742 	 * just a single page, there would not be any room left while writing
743 	 * into a partially filled TRBE buffer after the page size alignment.
744 	 * Hence restrict the minimum buffer size as two pages.
745 	 */
746 	if (nr_pages < 2)
747 		return NULL;
748 
749 	buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
750 	if (!buf)
751 		return ERR_PTR(-ENOMEM);
752 
753 	pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
754 	if (!pglist) {
755 		kfree(buf);
756 		return ERR_PTR(-ENOMEM);
757 	}
758 
759 	for (i = 0; i < nr_pages; i++)
760 		pglist[i] = virt_to_page(pages[i]);
761 
762 	buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
763 	if (!buf->trbe_base) {
764 		kfree(pglist);
765 		kfree(buf);
766 		return ERR_PTR(-ENOMEM);
767 	}
768 	buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
769 	buf->trbe_write = buf->trbe_base;
770 	buf->snapshot = snapshot;
771 	buf->nr_pages = nr_pages;
772 	buf->pages = pages;
773 	kfree(pglist);
774 	return buf;
775 }
776 
arm_trbe_free_buffer(void * config)777 static void arm_trbe_free_buffer(void *config)
778 {
779 	struct trbe_buf *buf = config;
780 
781 	vunmap((void *)buf->trbe_base);
782 	kfree(buf);
783 }
784 
arm_trbe_update_buffer(struct coresight_device * csdev,struct perf_output_handle * handle,void * config)785 static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
786 					    struct perf_output_handle *handle,
787 					    void *config)
788 {
789 	struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
790 	struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
791 	struct trbe_buf *buf = config;
792 	enum trbe_fault_action act;
793 	unsigned long size, status;
794 	unsigned long flags;
795 	bool wrap = false;
796 
797 	WARN_ON(buf->cpudata != cpudata);
798 	WARN_ON(cpudata->cpu != smp_processor_id());
799 	WARN_ON(cpudata->drvdata != drvdata);
800 	if (cpudata->mode != CS_MODE_PERF)
801 		return 0;
802 
803 	/*
804 	 * We are about to disable the TRBE. And this could in turn
805 	 * fill up the buffer triggering, an IRQ. This could be consumed
806 	 * by the PE asynchronously, causing a race here against
807 	 * the IRQ handler in closing out the handle. So, let us
808 	 * make sure the IRQ can't trigger while we are collecting
809 	 * the buffer. We also make sure that a WRAP event is handled
810 	 * accordingly.
811 	 */
812 	local_irq_save(flags);
813 
814 	/*
815 	 * If the TRBE was disabled due to lack of space in the AUX buffer or a
816 	 * spurious fault, the driver leaves it disabled, truncating the buffer.
817 	 * Since the etm_perf driver expects to close out the AUX buffer, the
818 	 * driver skips it. Thus, just pass in 0 size here to indicate that the
819 	 * buffer was truncated.
820 	 */
821 	if (!is_trbe_enabled()) {
822 		size = 0;
823 		goto done;
824 	}
825 	/*
826 	 * perf handle structure needs to be shared with the TRBE IRQ handler for
827 	 * capturing trace data and restarting the handle. There is a probability
828 	 * of an undefined reference based crash when etm event is being stopped
829 	 * while a TRBE IRQ also getting processed. This happens due the release
830 	 * of perf handle via perf_aux_output_end() in etm_event_stop(). Stopping
831 	 * the TRBE here will ensure that no IRQ could be generated when the perf
832 	 * handle gets freed in etm_event_stop().
833 	 */
834 	trbe_drain_and_disable_local(cpudata);
835 
836 	/* Check if there is a pending interrupt and handle it here */
837 	status = read_sysreg_s(SYS_TRBSR_EL1);
838 	if (is_trbe_irq(status)) {
839 
840 		/*
841 		 * Now that we are handling the IRQ here, clear the IRQ
842 		 * from the status, to let the irq handler know that it
843 		 * is taken care of.
844 		 */
845 		clr_trbe_irq();
846 		isb();
847 
848 		act = trbe_get_fault_act(handle, status);
849 		/*
850 		 * If this was not due to a WRAP event, we have some
851 		 * errors and as such buffer is empty.
852 		 */
853 		if (act != TRBE_FAULT_ACT_WRAP) {
854 			size = 0;
855 			goto done;
856 		}
857 
858 		trbe_report_wrap_event(handle);
859 		wrap = true;
860 	}
861 
862 	size = trbe_get_trace_size(handle, buf, wrap);
863 
864 done:
865 	local_irq_restore(flags);
866 
867 	if (buf->snapshot)
868 		handle->head += size;
869 	return size;
870 }
871 
872 
trbe_apply_work_around_before_enable(struct trbe_buf * buf)873 static int trbe_apply_work_around_before_enable(struct trbe_buf *buf)
874 {
875 	/*
876 	 * TRBE_WORKAROUND_OVERWRITE_FILL_MODE causes the TRBE to overwrite a few cache
877 	 * line size from the "TRBBASER_EL1" in the event of a "FILL".
878 	 * Thus, we could loose some amount of the trace at the base.
879 	 *
880 	 * Before Fix:
881 	 *
882 	 *  normal-BASE     head (normal-TRBPTR)         tail (normal-LIMIT)
883 	 *  |                   \/                       /
884 	 *   -------------------------------------------------------------
885 	 *  |   Pg0      |   Pg1       |           |          |  PgN     |
886 	 *   -------------------------------------------------------------
887 	 *
888 	 * In the normal course of action, we would set the TRBBASER to the
889 	 * beginning of the ring-buffer (normal-BASE). But with the erratum,
890 	 * the TRBE could overwrite the contents at the "normal-BASE", after
891 	 * hitting the "normal-LIMIT", since it doesn't stop as expected. And
892 	 * this is wrong. This could result in overwriting trace collected in
893 	 * one of the previous runs, being consumed by the user. So we must
894 	 * always make sure that the TRBBASER is within the region
895 	 * [head, head+size]. Note that TRBBASER must be PAGE aligned,
896 	 *
897 	 *  After moving the BASE:
898 	 *
899 	 *  normal-BASE     head (normal-TRBPTR)         tail (normal-LIMIT)
900 	 *  |                   \/                       /
901 	 *   -------------------------------------------------------------
902 	 *  |         |          |xyzdef.     |..   tuvw|                |
903 	 *   -------------------------------------------------------------
904 	 *                      /
905 	 *              New-BASER
906 	 *
907 	 * Also, we would set the TRBPTR to head (after adjusting for
908 	 * alignment) at normal-PTR. This would mean that the last few bytes
909 	 * of the trace (say, "xyz") might overwrite the first few bytes of
910 	 * trace written ("abc"). More importantly they will appear in what
911 	 * userspace sees as the beginning of the trace, which is wrong. We may
912 	 * not always have space to move the latest trace "xyz" to the correct
913 	 * order as it must appear beyond the LIMIT. (i.e, [head..head+size]).
914 	 * Thus it is easier to ignore those bytes than to complicate the
915 	 * driver to move it, assuming that the erratum was triggered and
916 	 * doing additional checks to see if there is indeed allowed space at
917 	 * TRBLIMITR.LIMIT.
918 	 *
919 	 *  Thus the full workaround will move the BASE and the PTR and would
920 	 *  look like (after padding at the skipped bytes at the end of
921 	 *  session) :
922 	 *
923 	 *  normal-BASE     head (normal-TRBPTR)         tail (normal-LIMIT)
924 	 *  |                   \/                       /
925 	 *   -------------------------------------------------------------
926 	 *  |         |          |///abc..     |..  rst|                |
927 	 *   -------------------------------------------------------------
928 	 *                      /    |
929 	 *              New-BASER    New-TRBPTR
930 	 *
931 	 * To summarize, with the work around:
932 	 *
933 	 *  - We always align the offset for the next session to PAGE_SIZE
934 	 *    (This is to ensure we can program the TRBBASER to this offset
935 	 *    within the region [head...head+size]).
936 	 *
937 	 *  - At TRBE enable:
938 	 *     - Set the TRBBASER to the page aligned offset of the current
939 	 *       proposed write offset. (which is guaranteed to be aligned
940 	 *       as above)
941 	 *     - Move the TRBPTR to skip first 256bytes (that might be
942 	 *       overwritten with the erratum). This ensures that the trace
943 	 *       generated in the session is not re-written.
944 	 *
945 	 *  - At trace collection:
946 	 *     - Pad the 256bytes skipped above again with IGNORE packets.
947 	 */
948 	if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE)) {
949 		if (WARN_ON(!IS_ALIGNED(buf->trbe_write, PAGE_SIZE)))
950 			return -EINVAL;
951 		buf->trbe_hw_base = buf->trbe_write;
952 		buf->trbe_write += TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
953 	}
954 
955 	/*
956 	 * TRBE_WORKAROUND_WRITE_OUT_OF_RANGE could cause the TRBE to write to
957 	 * the next page after the TRBLIMITR.LIMIT. For perf, the "next page"
958 	 * may be:
959 	 *     - The page beyond the ring buffer. This could mean, TRBE could
960 	 *       corrupt another entity (kernel / user)
961 	 *     - A portion of the "ring buffer" consumed by the userspace.
962 	 *       i.e, a page outisde [head, head + size].
963 	 *
964 	 * We work around this by:
965 	 *     - Making sure that we have at least an extra space of PAGE left
966 	 *       in the ring buffer [head, head + size], than we normally do
967 	 *       without the erratum. See trbe_min_trace_buf_size().
968 	 *
969 	 *     - Adjust the TRBLIMITR.LIMIT to leave the extra PAGE outside
970 	 *       the TRBE's range (i.e [TRBBASER, TRBLIMITR.LIMI] ).
971 	 */
972 	if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE)) {
973 		s64 space = buf->trbe_limit - buf->trbe_write;
974 		/*
975 		 * We must have more than a PAGE_SIZE worth space in the proposed
976 		 * range for the TRBE.
977 		 */
978 		if (WARN_ON(space <= PAGE_SIZE ||
979 			    !IS_ALIGNED(buf->trbe_limit, PAGE_SIZE)))
980 			return -EINVAL;
981 		buf->trbe_limit -= PAGE_SIZE;
982 	}
983 
984 	return 0;
985 }
986 
__arm_trbe_enable(struct trbe_buf * buf,struct perf_output_handle * handle)987 static int __arm_trbe_enable(struct trbe_buf *buf,
988 			     struct perf_output_handle *handle)
989 {
990 	int ret = 0;
991 
992 	perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
993 	buf->trbe_limit = compute_trbe_buffer_limit(handle);
994 	buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
995 	if (buf->trbe_limit == buf->trbe_base) {
996 		ret = -ENOSPC;
997 		goto err;
998 	}
999 	/* Set the base of the TRBE to the buffer base */
1000 	buf->trbe_hw_base = buf->trbe_base;
1001 
1002 	ret = trbe_apply_work_around_before_enable(buf);
1003 	if (ret)
1004 		goto err;
1005 
1006 	*this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
1007 	trbe_enable_hw(buf);
1008 	return 0;
1009 err:
1010 	trbe_stop_and_truncate_event(handle);
1011 	return ret;
1012 }
1013 
arm_trbe_enable(struct coresight_device * csdev,enum cs_mode mode,void * data)1014 static int arm_trbe_enable(struct coresight_device *csdev, enum cs_mode mode,
1015 			   void *data)
1016 {
1017 	struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1018 	struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
1019 	struct perf_output_handle *handle = data;
1020 	struct trbe_buf *buf = etm_perf_sink_config(handle);
1021 
1022 	WARN_ON(cpudata->cpu != smp_processor_id());
1023 	WARN_ON(cpudata->drvdata != drvdata);
1024 	if (mode != CS_MODE_PERF)
1025 		return -EINVAL;
1026 
1027 	cpudata->buf = buf;
1028 	cpudata->mode = mode;
1029 	buf->cpudata = cpudata;
1030 
1031 	return __arm_trbe_enable(buf, handle);
1032 }
1033 
arm_trbe_disable(struct coresight_device * csdev)1034 static int arm_trbe_disable(struct coresight_device *csdev)
1035 {
1036 	struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1037 	struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
1038 	struct trbe_buf *buf = cpudata->buf;
1039 
1040 	WARN_ON(buf->cpudata != cpudata);
1041 	WARN_ON(cpudata->cpu != smp_processor_id());
1042 	WARN_ON(cpudata->drvdata != drvdata);
1043 	if (cpudata->mode != CS_MODE_PERF)
1044 		return -EINVAL;
1045 
1046 	trbe_drain_and_disable_local(cpudata);
1047 	buf->cpudata = NULL;
1048 	cpudata->buf = NULL;
1049 	cpudata->mode = CS_MODE_DISABLED;
1050 	return 0;
1051 }
1052 
trbe_handle_spurious(struct perf_output_handle * handle)1053 static void trbe_handle_spurious(struct perf_output_handle *handle)
1054 {
1055 	struct trbe_buf *buf = etm_perf_sink_config(handle);
1056 	u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
1057 
1058 	/*
1059 	 * If the IRQ was spurious, simply re-enable the TRBE
1060 	 * back without modifying the buffer parameters to
1061 	 * retain the trace collected so far.
1062 	 */
1063 	set_trbe_enabled(buf->cpudata, trblimitr);
1064 }
1065 
trbe_handle_overflow(struct perf_output_handle * handle)1066 static int trbe_handle_overflow(struct perf_output_handle *handle)
1067 {
1068 	struct perf_event *event = handle->event;
1069 	struct trbe_buf *buf = etm_perf_sink_config(handle);
1070 	unsigned long size;
1071 	struct etm_event_data *event_data;
1072 
1073 	size = trbe_get_trace_size(handle, buf, true);
1074 	if (buf->snapshot)
1075 		handle->head += size;
1076 
1077 	trbe_report_wrap_event(handle);
1078 	perf_aux_output_end(handle, size);
1079 	event_data = perf_aux_output_begin(handle, event);
1080 	if (!event_data) {
1081 		/*
1082 		 * We are unable to restart the trace collection,
1083 		 * thus leave the TRBE disabled. The etm-perf driver
1084 		 * is able to detect this with a disconnected handle
1085 		 * (handle->event = NULL).
1086 		 */
1087 		trbe_drain_and_disable_local(buf->cpudata);
1088 		*this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
1089 		return -EINVAL;
1090 	}
1091 
1092 	return __arm_trbe_enable(buf, handle);
1093 }
1094 
is_perf_trbe(struct perf_output_handle * handle)1095 static bool is_perf_trbe(struct perf_output_handle *handle)
1096 {
1097 	struct trbe_buf *buf = etm_perf_sink_config(handle);
1098 	struct trbe_cpudata *cpudata = buf->cpudata;
1099 	struct trbe_drvdata *drvdata = cpudata->drvdata;
1100 	int cpu = smp_processor_id();
1101 
1102 	WARN_ON(buf->trbe_hw_base != get_trbe_base_pointer());
1103 	WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
1104 
1105 	if (cpudata->mode != CS_MODE_PERF)
1106 		return false;
1107 
1108 	if (cpudata->cpu != cpu)
1109 		return false;
1110 
1111 	if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1112 		return false;
1113 
1114 	return true;
1115 }
1116 
cpu_prohibit_trace(void)1117 static u64 cpu_prohibit_trace(void)
1118 {
1119 	u64 trfcr = read_trfcr();
1120 
1121 	/* Prohibit tracing at EL0 & the kernel EL */
1122 	write_trfcr(trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE));
1123 	/* Return the original value of the TRFCR */
1124 	return trfcr;
1125 }
1126 
arm_trbe_irq_handler(int irq,void * dev)1127 static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
1128 {
1129 	struct perf_output_handle **handle_ptr = dev;
1130 	struct perf_output_handle *handle = *handle_ptr;
1131 	struct trbe_buf *buf = etm_perf_sink_config(handle);
1132 	enum trbe_fault_action act;
1133 	u64 status;
1134 	bool truncated = false;
1135 	u64 trfcr;
1136 
1137 	/* Reads to TRBSR_EL1 is fine when TRBE is active */
1138 	status = read_sysreg_s(SYS_TRBSR_EL1);
1139 	/*
1140 	 * If the pending IRQ was handled by update_buffer callback
1141 	 * we have nothing to do here.
1142 	 */
1143 	if (!is_trbe_irq(status))
1144 		return IRQ_NONE;
1145 
1146 	/* Prohibit the CPU from tracing before we disable the TRBE */
1147 	trfcr = cpu_prohibit_trace();
1148 	/*
1149 	 * Ensure the trace is visible to the CPUs and
1150 	 * any external aborts have been resolved.
1151 	 */
1152 	trbe_drain_and_disable_local(buf->cpudata);
1153 	clr_trbe_irq();
1154 	isb();
1155 
1156 	if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle))
1157 		return IRQ_NONE;
1158 
1159 	if (!is_perf_trbe(handle))
1160 		return IRQ_NONE;
1161 
1162 	act = trbe_get_fault_act(handle, status);
1163 	switch (act) {
1164 	case TRBE_FAULT_ACT_WRAP:
1165 		truncated = !!trbe_handle_overflow(handle);
1166 		break;
1167 	case TRBE_FAULT_ACT_SPURIOUS:
1168 		trbe_handle_spurious(handle);
1169 		break;
1170 	case TRBE_FAULT_ACT_FATAL:
1171 		trbe_stop_and_truncate_event(handle);
1172 		truncated = true;
1173 		break;
1174 	}
1175 
1176 	/*
1177 	 * If the buffer was truncated, ensure perf callbacks
1178 	 * have completed, which will disable the event.
1179 	 *
1180 	 * Otherwise, restore the trace filter controls to
1181 	 * allow the tracing.
1182 	 */
1183 	if (truncated)
1184 		irq_work_run();
1185 	else
1186 		write_trfcr(trfcr);
1187 
1188 	return IRQ_HANDLED;
1189 }
1190 
1191 static const struct coresight_ops_sink arm_trbe_sink_ops = {
1192 	.enable		= arm_trbe_enable,
1193 	.disable	= arm_trbe_disable,
1194 	.alloc_buffer	= arm_trbe_alloc_buffer,
1195 	.free_buffer	= arm_trbe_free_buffer,
1196 	.update_buffer	= arm_trbe_update_buffer,
1197 };
1198 
1199 static const struct coresight_ops arm_trbe_cs_ops = {
1200 	.sink_ops	= &arm_trbe_sink_ops,
1201 };
1202 
align_show(struct device * dev,struct device_attribute * attr,char * buf)1203 static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf)
1204 {
1205 	struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
1206 
1207 	return sprintf(buf, "%llx\n", cpudata->trbe_hw_align);
1208 }
1209 static DEVICE_ATTR_RO(align);
1210 
flag_show(struct device * dev,struct device_attribute * attr,char * buf)1211 static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf)
1212 {
1213 	struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
1214 
1215 	return sprintf(buf, "%d\n", cpudata->trbe_flag);
1216 }
1217 static DEVICE_ATTR_RO(flag);
1218 
1219 static struct attribute *arm_trbe_attrs[] = {
1220 	&dev_attr_align.attr,
1221 	&dev_attr_flag.attr,
1222 	NULL,
1223 };
1224 
1225 static const struct attribute_group arm_trbe_group = {
1226 	.attrs = arm_trbe_attrs,
1227 };
1228 
1229 static const struct attribute_group *arm_trbe_groups[] = {
1230 	&arm_trbe_group,
1231 	NULL,
1232 };
1233 
arm_trbe_enable_cpu(void * info)1234 static void arm_trbe_enable_cpu(void *info)
1235 {
1236 	struct trbe_drvdata *drvdata = info;
1237 	struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
1238 
1239 	trbe_reset_local(cpudata);
1240 	enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
1241 }
1242 
arm_trbe_disable_cpu(void * info)1243 static void arm_trbe_disable_cpu(void *info)
1244 {
1245 	struct trbe_drvdata *drvdata = info;
1246 	struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
1247 
1248 	disable_percpu_irq(drvdata->irq);
1249 	trbe_reset_local(cpudata);
1250 }
1251 
1252 
arm_trbe_register_coresight_cpu(struct trbe_drvdata * drvdata,int cpu)1253 static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
1254 {
1255 	struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1256 	struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
1257 	struct coresight_desc desc = { 0 };
1258 	struct device *dev;
1259 
1260 	if (WARN_ON(trbe_csdev))
1261 		return;
1262 
1263 	/* If the TRBE was not probed on the CPU, we shouldn't be here */
1264 	if (WARN_ON(!cpudata->drvdata))
1265 		return;
1266 
1267 	dev = &cpudata->drvdata->pdev->dev;
1268 	desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
1269 	if (!desc.name)
1270 		goto cpu_clear;
1271 	/*
1272 	 * TRBE coresight devices do not need regular connections
1273 	 * information, as the paths get built between all percpu
1274 	 * source and their respective percpu sink devices. Though
1275 	 * coresight_register() expect device connections via the
1276 	 * platform_data, which TRBE devices do not have. As they
1277 	 * are not real ACPI devices, coresight_get_platform_data()
1278 	 * ends up failing. Instead let's allocate a dummy zeroed
1279 	 * coresight_platform_data structure and assign that back
1280 	 * into the device for that purpose.
1281 	 */
1282 	desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL);
1283 	if (IS_ERR(desc.pdata))
1284 		goto cpu_clear;
1285 
1286 	desc.type = CORESIGHT_DEV_TYPE_SINK;
1287 	desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
1288 	desc.ops = &arm_trbe_cs_ops;
1289 	desc.groups = arm_trbe_groups;
1290 	desc.dev = dev;
1291 	trbe_csdev = coresight_register(&desc);
1292 	if (IS_ERR(trbe_csdev))
1293 		goto cpu_clear;
1294 
1295 	dev_set_drvdata(&trbe_csdev->dev, cpudata);
1296 	coresight_set_percpu_sink(cpu, trbe_csdev);
1297 	return;
1298 cpu_clear:
1299 	cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
1300 }
1301 
1302 /*
1303  * Must be called with preemption disabled, for trbe_check_errata().
1304  */
arm_trbe_probe_cpu(void * info)1305 static void arm_trbe_probe_cpu(void *info)
1306 {
1307 	struct trbe_drvdata *drvdata = info;
1308 	int cpu = smp_processor_id();
1309 	struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1310 	u64 trbidr;
1311 
1312 	if (WARN_ON(!cpudata))
1313 		goto cpu_clear;
1314 
1315 	if (!is_trbe_available()) {
1316 		pr_err("TRBE is not implemented on cpu %d\n", cpu);
1317 		goto cpu_clear;
1318 	}
1319 
1320 	trbidr = read_sysreg_s(SYS_TRBIDR_EL1);
1321 	if (!is_trbe_programmable(trbidr)) {
1322 		pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
1323 		goto cpu_clear;
1324 	}
1325 
1326 	cpudata->trbe_hw_align = 1ULL << get_trbe_address_align(trbidr);
1327 	if (cpudata->trbe_hw_align > SZ_2K) {
1328 		pr_err("Unsupported alignment on cpu %d\n", cpu);
1329 		goto cpu_clear;
1330 	}
1331 
1332 	/*
1333 	 * Run the TRBE erratum checks, now that we know
1334 	 * this instance is about to be registered.
1335 	 */
1336 	trbe_check_errata(cpudata);
1337 
1338 	if (trbe_is_broken(cpudata)) {
1339 		pr_err("Disabling TRBE on cpu%d due to erratum\n", cpu);
1340 		goto cpu_clear;
1341 	}
1342 
1343 	/*
1344 	 * If the TRBE is affected by erratum TRBE_WORKAROUND_OVERWRITE_FILL_MODE,
1345 	 * we must always program the TBRPTR_EL1, 256bytes from a page
1346 	 * boundary, with TRBBASER_EL1 set to the page, to prevent
1347 	 * TRBE over-writing 256bytes at TRBBASER_EL1 on FILL event.
1348 	 *
1349 	 * Thus make sure we always align our write pointer to a PAGE_SIZE,
1350 	 * which also guarantees that we have at least a PAGE_SIZE space in
1351 	 * the buffer (TRBLIMITR is PAGE aligned) and thus we can skip
1352 	 * the required bytes at the base.
1353 	 */
1354 	if (trbe_may_overwrite_in_fill_mode(cpudata))
1355 		cpudata->trbe_align = PAGE_SIZE;
1356 	else
1357 		cpudata->trbe_align = cpudata->trbe_hw_align;
1358 
1359 	cpudata->trbe_flag = get_trbe_flag_update(trbidr);
1360 	cpudata->cpu = cpu;
1361 	cpudata->drvdata = drvdata;
1362 	return;
1363 cpu_clear:
1364 	cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
1365 }
1366 
arm_trbe_remove_coresight_cpu(struct trbe_drvdata * drvdata,int cpu)1367 static void arm_trbe_remove_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
1368 {
1369 	struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
1370 
1371 	if (trbe_csdev) {
1372 		coresight_unregister(trbe_csdev);
1373 		coresight_set_percpu_sink(cpu, NULL);
1374 	}
1375 }
1376 
arm_trbe_probe_coresight(struct trbe_drvdata * drvdata)1377 static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
1378 {
1379 	int cpu;
1380 
1381 	drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
1382 	if (!drvdata->cpudata)
1383 		return -ENOMEM;
1384 
1385 	for_each_cpu(cpu, &drvdata->supported_cpus) {
1386 		/* If we fail to probe the CPU, let us defer it to hotplug callbacks */
1387 		if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1))
1388 			continue;
1389 		if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1390 			arm_trbe_register_coresight_cpu(drvdata, cpu);
1391 		if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1392 			smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
1393 	}
1394 	return 0;
1395 }
1396 
arm_trbe_remove_coresight(struct trbe_drvdata * drvdata)1397 static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
1398 {
1399 	int cpu;
1400 
1401 	for_each_cpu(cpu, &drvdata->supported_cpus) {
1402 		smp_call_function_single(cpu, arm_trbe_disable_cpu, drvdata, 1);
1403 		arm_trbe_remove_coresight_cpu(drvdata, cpu);
1404 	}
1405 	free_percpu(drvdata->cpudata);
1406 	return 0;
1407 }
1408 
arm_trbe_probe_hotplugged_cpu(struct trbe_drvdata * drvdata)1409 static void arm_trbe_probe_hotplugged_cpu(struct trbe_drvdata *drvdata)
1410 {
1411 	preempt_disable();
1412 	arm_trbe_probe_cpu(drvdata);
1413 	preempt_enable();
1414 }
1415 
arm_trbe_cpu_startup(unsigned int cpu,struct hlist_node * node)1416 static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
1417 {
1418 	struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
1419 
1420 	if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
1421 
1422 		/*
1423 		 * If this CPU was not probed for TRBE,
1424 		 * initialize it now.
1425 		 */
1426 		if (!coresight_get_percpu_sink(cpu)) {
1427 			arm_trbe_probe_hotplugged_cpu(drvdata);
1428 			if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1429 				arm_trbe_register_coresight_cpu(drvdata, cpu);
1430 			if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1431 				arm_trbe_enable_cpu(drvdata);
1432 		} else {
1433 			arm_trbe_enable_cpu(drvdata);
1434 		}
1435 	}
1436 	return 0;
1437 }
1438 
arm_trbe_cpu_teardown(unsigned int cpu,struct hlist_node * node)1439 static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1440 {
1441 	struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
1442 
1443 	if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1444 		arm_trbe_disable_cpu(drvdata);
1445 	return 0;
1446 }
1447 
arm_trbe_probe_cpuhp(struct trbe_drvdata * drvdata)1448 static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
1449 {
1450 	enum cpuhp_state trbe_online;
1451 	int ret;
1452 
1453 	trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1454 					      arm_trbe_cpu_startup, arm_trbe_cpu_teardown);
1455 	if (trbe_online < 0)
1456 		return trbe_online;
1457 
1458 	ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node);
1459 	if (ret) {
1460 		cpuhp_remove_multi_state(trbe_online);
1461 		return ret;
1462 	}
1463 	drvdata->trbe_online = trbe_online;
1464 	return 0;
1465 }
1466 
arm_trbe_remove_cpuhp(struct trbe_drvdata * drvdata)1467 static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
1468 {
1469 	cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node);
1470 	cpuhp_remove_multi_state(drvdata->trbe_online);
1471 }
1472 
arm_trbe_probe_irq(struct platform_device * pdev,struct trbe_drvdata * drvdata)1473 static int arm_trbe_probe_irq(struct platform_device *pdev,
1474 			      struct trbe_drvdata *drvdata)
1475 {
1476 	int ret;
1477 
1478 	drvdata->irq = platform_get_irq(pdev, 0);
1479 	if (drvdata->irq < 0) {
1480 		pr_err("IRQ not found for the platform device\n");
1481 		return drvdata->irq;
1482 	}
1483 
1484 	if (!irq_is_percpu(drvdata->irq)) {
1485 		pr_err("IRQ is not a PPI\n");
1486 		return -EINVAL;
1487 	}
1488 
1489 	if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
1490 		return -EINVAL;
1491 
1492 	drvdata->handle = alloc_percpu(struct perf_output_handle *);
1493 	if (!drvdata->handle)
1494 		return -ENOMEM;
1495 
1496 	ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
1497 	if (ret) {
1498 		free_percpu(drvdata->handle);
1499 		return ret;
1500 	}
1501 	return 0;
1502 }
1503 
arm_trbe_remove_irq(struct trbe_drvdata * drvdata)1504 static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
1505 {
1506 	free_percpu_irq(drvdata->irq, drvdata->handle);
1507 	free_percpu(drvdata->handle);
1508 }
1509 
arm_trbe_device_probe(struct platform_device * pdev)1510 static int arm_trbe_device_probe(struct platform_device *pdev)
1511 {
1512 	struct trbe_drvdata *drvdata;
1513 	struct device *dev = &pdev->dev;
1514 	int ret;
1515 
1516 	/* Trace capture is not possible with kernel page table isolation */
1517 	if (arm64_kernel_unmapped_at_el0()) {
1518 		pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
1519 		return -EOPNOTSUPP;
1520 	}
1521 
1522 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1523 	if (!drvdata)
1524 		return -ENOMEM;
1525 
1526 	dev_set_drvdata(dev, drvdata);
1527 	drvdata->pdev = pdev;
1528 	ret = arm_trbe_probe_irq(pdev, drvdata);
1529 	if (ret)
1530 		return ret;
1531 
1532 	ret = arm_trbe_probe_coresight(drvdata);
1533 	if (ret)
1534 		goto probe_failed;
1535 
1536 	ret = arm_trbe_probe_cpuhp(drvdata);
1537 	if (ret)
1538 		goto cpuhp_failed;
1539 
1540 	return 0;
1541 cpuhp_failed:
1542 	arm_trbe_remove_coresight(drvdata);
1543 probe_failed:
1544 	arm_trbe_remove_irq(drvdata);
1545 	return ret;
1546 }
1547 
arm_trbe_device_remove(struct platform_device * pdev)1548 static void arm_trbe_device_remove(struct platform_device *pdev)
1549 {
1550 	struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
1551 
1552 	arm_trbe_remove_cpuhp(drvdata);
1553 	arm_trbe_remove_coresight(drvdata);
1554 	arm_trbe_remove_irq(drvdata);
1555 }
1556 
1557 static const struct of_device_id arm_trbe_of_match[] = {
1558 	{ .compatible = "arm,trace-buffer-extension"},
1559 	{},
1560 };
1561 MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
1562 
1563 #ifdef CONFIG_ACPI
1564 static const struct platform_device_id arm_trbe_acpi_match[] = {
1565 	{ ARMV8_TRBE_PDEV_NAME, 0 },
1566 	{ }
1567 };
1568 MODULE_DEVICE_TABLE(platform, arm_trbe_acpi_match);
1569 #endif
1570 
1571 static struct platform_driver arm_trbe_driver = {
1572 	.id_table = ACPI_PTR(arm_trbe_acpi_match),
1573 	.driver	= {
1574 		.name = DRVNAME,
1575 		.of_match_table = of_match_ptr(arm_trbe_of_match),
1576 		.suppress_bind_attrs = true,
1577 	},
1578 	.probe	= arm_trbe_device_probe,
1579 	.remove = arm_trbe_device_remove,
1580 };
1581 
arm_trbe_init(void)1582 static int __init arm_trbe_init(void)
1583 {
1584 	int ret;
1585 
1586 	ret = platform_driver_register(&arm_trbe_driver);
1587 	if (!ret)
1588 		return 0;
1589 
1590 	pr_err("Error registering %s platform driver\n", DRVNAME);
1591 	return ret;
1592 }
1593 
arm_trbe_exit(void)1594 static void __exit arm_trbe_exit(void)
1595 {
1596 	platform_driver_unregister(&arm_trbe_driver);
1597 }
1598 module_init(arm_trbe_init);
1599 module_exit(arm_trbe_exit);
1600 
1601 MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>");
1602 MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver");
1603 MODULE_LICENSE("GPL v2");
1604