1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This driver enables Trace Buffer Extension (TRBE) as a per-cpu coresight
4 * sink device could then pair with an appropriate per-cpu coresight source
5 * device (ETE) thus generating required trace data. Trace can be enabled
6 * via the perf framework.
7 *
8 * The AUX buffer handling is inspired from Arm SPE PMU driver.
9 *
10 * Copyright (C) 2020 ARM Ltd.
11 *
12 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
13 */
14 #define DRVNAME "arm_trbe"
15
16 #define pr_fmt(fmt) DRVNAME ": " fmt
17
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <linux/kvm_host.h>
21 #include <linux/vmalloc.h>
22
23 #include "coresight-self-hosted-trace.h"
24 #include "coresight-trbe.h"
25
26 #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
27
28 /*
29 * A padding packet that will help the user space tools
30 * in skipping relevant sections in the captured trace
31 * data which could not be decoded. TRBE doesn't support
32 * formatting the trace data, unlike the legacy CoreSight
33 * sinks and thus we use ETE trace packets to pad the
34 * sections of the buffer.
35 */
36 #define ETE_IGNORE_PACKET 0x70
37
38 /*
39 * Minimum amount of meaningful trace will contain:
40 * A-Sync, Trace Info, Trace On, Address, Atom.
41 * This is about 44bytes of ETE trace. To be on
42 * the safer side, we assume 64bytes is the minimum
43 * space required for a meaningful session, before
44 * we hit a "WRAP" event.
45 */
46 #define TRBE_TRACE_MIN_BUF_SIZE 64
47
48 enum trbe_fault_action {
49 TRBE_FAULT_ACT_WRAP,
50 TRBE_FAULT_ACT_SPURIOUS,
51 TRBE_FAULT_ACT_FATAL,
52 };
53
54 struct trbe_buf {
55 /*
56 * Even though trbe_base represents vmap()
57 * mapped allocated buffer's start address,
58 * it's being as unsigned long for various
59 * arithmetic and comparision operations &
60 * also to be consistent with trbe_write &
61 * trbe_limit sibling pointers.
62 */
63 unsigned long trbe_base;
64 /* The base programmed into the TRBE */
65 unsigned long trbe_hw_base;
66 unsigned long trbe_limit;
67 unsigned long trbe_write;
68 int nr_pages;
69 void **pages;
70 bool snapshot;
71 struct trbe_cpudata *cpudata;
72 };
73
74 /*
75 * TRBE erratum list
76 *
77 * The errata are defined in arm64 generic cpu_errata framework.
78 * Since the errata work arounds could be applied individually
79 * to the affected CPUs inside the TRBE driver, we need to know if
80 * a given CPU is affected by the erratum. Unlike the other erratum
81 * work arounds, TRBE driver needs to check multiple times during
82 * a trace session. Thus we need a quicker access to per-CPU
83 * errata and not issue costly this_cpu_has_cap() everytime.
84 * We keep a set of the affected errata in trbe_cpudata, per TRBE.
85 *
86 * We rely on the corresponding cpucaps to be defined for a given
87 * TRBE erratum. We map the given cpucap into a TRBE internal number
88 * to make the tracking of the errata lean.
89 *
90 * This helps in :
91 * - Not duplicating the detection logic
92 * - Streamlined detection of erratum across the system
93 */
94 #define TRBE_WORKAROUND_OVERWRITE_FILL_MODE 0
95 #define TRBE_WORKAROUND_WRITE_OUT_OF_RANGE 1
96 #define TRBE_NEEDS_DRAIN_AFTER_DISABLE 2
97 #define TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE 3
98 #define TRBE_IS_BROKEN 4
99
100 static int trbe_errata_cpucaps[] = {
101 [TRBE_WORKAROUND_OVERWRITE_FILL_MODE] = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
102 [TRBE_WORKAROUND_WRITE_OUT_OF_RANGE] = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
103 [TRBE_NEEDS_DRAIN_AFTER_DISABLE] = ARM64_WORKAROUND_2064142,
104 [TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE] = ARM64_WORKAROUND_2038923,
105 [TRBE_IS_BROKEN] = ARM64_WORKAROUND_1902691,
106 -1, /* Sentinel, must be the last entry */
107 };
108
109 /* The total number of listed errata in trbe_errata_cpucaps */
110 #define TRBE_ERRATA_MAX (ARRAY_SIZE(trbe_errata_cpucaps) - 1)
111
112 /*
113 * Safe limit for the number of bytes that may be overwritten
114 * when ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE is triggered.
115 */
116 #define TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES 256
117
118 /*
119 * struct trbe_cpudata: TRBE instance specific data
120 * @trbe_flag - TRBE dirty/access flag support
121 * @trbe_hw_align - Actual TRBE alignment required for TRBPTR_EL1.
122 * @trbe_align - Software alignment used for the TRBPTR_EL1.
123 * @cpu - CPU this TRBE belongs to.
124 * @mode - Mode of current operation. (perf/disabled)
125 * @drvdata - TRBE specific drvdata
126 * @errata - Bit map for the errata on this TRBE.
127 */
128 struct trbe_cpudata {
129 bool trbe_flag;
130 u64 trbe_hw_align;
131 u64 trbe_align;
132 int cpu;
133 enum cs_mode mode;
134 struct trbe_buf *buf;
135 struct trbe_drvdata *drvdata;
136 DECLARE_BITMAP(errata, TRBE_ERRATA_MAX);
137 };
138
139 struct trbe_drvdata {
140 struct trbe_cpudata __percpu *cpudata;
141 struct perf_output_handle * __percpu *handle;
142 struct hlist_node hotplug_node;
143 int irq;
144 cpumask_t supported_cpus;
145 enum cpuhp_state trbe_online;
146 struct platform_device *pdev;
147 };
148
trbe_check_errata(struct trbe_cpudata * cpudata)149 static void trbe_check_errata(struct trbe_cpudata *cpudata)
150 {
151 int i;
152
153 for (i = 0; i < TRBE_ERRATA_MAX; i++) {
154 int cap = trbe_errata_cpucaps[i];
155
156 if (WARN_ON_ONCE(cap < 0))
157 return;
158 if (this_cpu_has_cap(cap))
159 set_bit(i, cpudata->errata);
160 }
161 }
162
trbe_has_erratum(struct trbe_cpudata * cpudata,int i)163 static bool trbe_has_erratum(struct trbe_cpudata *cpudata, int i)
164 {
165 return (i < TRBE_ERRATA_MAX) && test_bit(i, cpudata->errata);
166 }
167
trbe_may_overwrite_in_fill_mode(struct trbe_cpudata * cpudata)168 static bool trbe_may_overwrite_in_fill_mode(struct trbe_cpudata *cpudata)
169 {
170 return trbe_has_erratum(cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE);
171 }
172
trbe_may_write_out_of_range(struct trbe_cpudata * cpudata)173 static bool trbe_may_write_out_of_range(struct trbe_cpudata *cpudata)
174 {
175 return trbe_has_erratum(cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE);
176 }
177
trbe_needs_drain_after_disable(struct trbe_cpudata * cpudata)178 static bool trbe_needs_drain_after_disable(struct trbe_cpudata *cpudata)
179 {
180 /*
181 * Errata affected TRBE implementation will need TSB CSYNC and
182 * DSB in order to prevent subsequent writes into certain TRBE
183 * system registers from being ignored and not effected.
184 */
185 return trbe_has_erratum(cpudata, TRBE_NEEDS_DRAIN_AFTER_DISABLE);
186 }
187
trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata * cpudata)188 static bool trbe_needs_ctxt_sync_after_enable(struct trbe_cpudata *cpudata)
189 {
190 /*
191 * Errata affected TRBE implementation will need an additional
192 * context synchronization in order to prevent an inconsistent
193 * TRBE prohibited region view on the CPU which could possibly
194 * corrupt the TRBE buffer or the TRBE state.
195 */
196 return trbe_has_erratum(cpudata, TRBE_NEEDS_CTXT_SYNC_AFTER_ENABLE);
197 }
198
trbe_is_broken(struct trbe_cpudata * cpudata)199 static bool trbe_is_broken(struct trbe_cpudata *cpudata)
200 {
201 return trbe_has_erratum(cpudata, TRBE_IS_BROKEN);
202 }
203
trbe_alloc_node(struct perf_event * event)204 static int trbe_alloc_node(struct perf_event *event)
205 {
206 if (event->cpu == -1)
207 return NUMA_NO_NODE;
208 return cpu_to_node(event->cpu);
209 }
210
trbe_drain_buffer(void)211 static void trbe_drain_buffer(void)
212 {
213 tsb_csync();
214 dsb(nsh);
215 }
216
set_trbe_enabled(struct trbe_cpudata * cpudata,u64 trblimitr)217 static void set_trbe_enabled(struct trbe_cpudata *cpudata, u64 trblimitr)
218 {
219 /*
220 * Enable the TRBE without clearing LIMITPTR which
221 * might be required for fetching the buffer limits.
222 */
223 trblimitr |= TRBLIMITR_EL1_E;
224 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
225 kvm_enable_trbe();
226
227 /* Synchronize the TRBE enable event */
228 isb();
229
230 if (trbe_needs_ctxt_sync_after_enable(cpudata))
231 isb();
232 }
233
set_trbe_disabled(struct trbe_cpudata * cpudata)234 static void set_trbe_disabled(struct trbe_cpudata *cpudata)
235 {
236 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
237
238 /*
239 * Disable the TRBE without clearing LIMITPTR which
240 * might be required for fetching the buffer limits.
241 */
242 trblimitr &= ~TRBLIMITR_EL1_E;
243 write_sysreg_s(trblimitr, SYS_TRBLIMITR_EL1);
244 kvm_disable_trbe();
245
246 if (trbe_needs_drain_after_disable(cpudata))
247 trbe_drain_buffer();
248 isb();
249 }
250
trbe_drain_and_disable_local(struct trbe_cpudata * cpudata)251 static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
252 {
253 trbe_drain_buffer();
254 set_trbe_disabled(cpudata);
255 }
256
trbe_reset_local(struct trbe_cpudata * cpudata)257 static void trbe_reset_local(struct trbe_cpudata *cpudata)
258 {
259 write_sysreg_s(0, SYS_TRBLIMITR_EL1);
260 trbe_drain_buffer();
261 write_sysreg_s(0, SYS_TRBPTR_EL1);
262 write_sysreg_s(0, SYS_TRBBASER_EL1);
263 write_sysreg_s(0, SYS_TRBSR_EL1);
264 }
265
trbe_report_wrap_event(struct perf_output_handle * handle)266 static void trbe_report_wrap_event(struct perf_output_handle *handle)
267 {
268 /*
269 * Mark the buffer to indicate that there was a WRAP event by
270 * setting the COLLISION flag. This indicates to the user that
271 * the TRBE trace collection was stopped without stopping the
272 * ETE and thus there might be some amount of trace that was
273 * lost between the time the WRAP was detected and the IRQ
274 * was consumed by the CPU.
275 *
276 * Setting the TRUNCATED flag would move the event to STOPPED
277 * state unnecessarily, even when there is space left in the
278 * ring buffer. Using the COLLISION flag doesn't have this side
279 * effect. We only set TRUNCATED flag when there is no space
280 * left in the ring buffer.
281 */
282 perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
283 }
284
trbe_stop_and_truncate_event(struct perf_output_handle * handle)285 static void trbe_stop_and_truncate_event(struct perf_output_handle *handle)
286 {
287 struct trbe_buf *buf = etm_perf_sink_config(handle);
288
289 /*
290 * We cannot proceed with the buffer collection and we
291 * do not have any data for the current session. The
292 * etm_perf driver expects to close out the aux_buffer
293 * at event_stop(). So disable the TRBE here and leave
294 * the update_buffer() to return a 0 size.
295 */
296 trbe_drain_and_disable_local(buf->cpudata);
297 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
298 perf_aux_output_end(handle, 0);
299 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
300 }
301
302 /*
303 * TRBE Buffer Management
304 *
305 * The TRBE buffer spans from the base pointer till the limit pointer. When enabled,
306 * it starts writing trace data from the write pointer onward till the limit pointer.
307 * When the write pointer reaches the address just before the limit pointer, it gets
308 * wrapped around again to the base pointer. This is called a TRBE wrap event, which
309 * generates a maintenance interrupt when operated in WRAP or FILL mode. This driver
310 * uses FILL mode, where the TRBE stops the trace collection at wrap event. The IRQ
311 * handler updates the AUX buffer and re-enables the TRBE with updated WRITE and
312 * LIMIT pointers.
313 *
314 * Wrap around with an IRQ
315 * ------ < ------ < ------- < ----- < -----
316 * | |
317 * ------ > ------ > ------- > ----- > -----
318 *
319 * +---------------+-----------------------+
320 * | | |
321 * +---------------+-----------------------+
322 * Base Pointer Write Pointer Limit Pointer
323 *
324 * The base and limit pointers always needs to be PAGE_SIZE aligned. But the write
325 * pointer can be aligned to the implementation defined TRBE trace buffer alignment
326 * as captured in trbe_cpudata->trbe_align.
327 *
328 *
329 * head tail wakeup
330 * +---------------------------------------+----- ~ ~ ------
331 * |$$$$$$$|################|$$$$$$$$$$$$$$| |
332 * +---------------------------------------+----- ~ ~ ------
333 * Base Pointer Write Pointer Limit Pointer
334 *
335 * The perf_output_handle indices (head, tail, wakeup) are monotonically increasing
336 * values which tracks all the driver writes and user reads from the perf auxiliary
337 * buffer. Generally [head..tail] is the area where the driver can write into unless
338 * the wakeup is behind the tail. Enabled TRBE buffer span needs to be adjusted and
339 * configured depending on the perf_output_handle indices, so that the driver does
340 * not override into areas in the perf auxiliary buffer which is being or yet to be
341 * consumed from the user space. The enabled TRBE buffer area is a moving subset of
342 * the allocated perf auxiliary buffer.
343 */
344
__trbe_pad_buf(struct trbe_buf * buf,u64 offset,int len)345 static void __trbe_pad_buf(struct trbe_buf *buf, u64 offset, int len)
346 {
347 memset((void *)buf->trbe_base + offset, ETE_IGNORE_PACKET, len);
348 }
349
trbe_pad_buf(struct perf_output_handle * handle,int len)350 static void trbe_pad_buf(struct perf_output_handle *handle, int len)
351 {
352 struct trbe_buf *buf = etm_perf_sink_config(handle);
353 u64 head = PERF_IDX2OFF(handle->head, buf);
354
355 __trbe_pad_buf(buf, head, len);
356 if (!buf->snapshot)
357 perf_aux_output_skip(handle, len);
358 }
359
trbe_snapshot_offset(struct perf_output_handle * handle)360 static unsigned long trbe_snapshot_offset(struct perf_output_handle *handle)
361 {
362 struct trbe_buf *buf = etm_perf_sink_config(handle);
363
364 /*
365 * The ETE trace has alignment synchronization packets allowing
366 * the decoder to reset in case of an overflow or corruption.
367 * So we can use the entire buffer for the snapshot mode.
368 */
369 return buf->nr_pages * PAGE_SIZE;
370 }
371
trbe_min_trace_buf_size(struct perf_output_handle * handle)372 static u64 trbe_min_trace_buf_size(struct perf_output_handle *handle)
373 {
374 u64 size = TRBE_TRACE_MIN_BUF_SIZE;
375 struct trbe_buf *buf = etm_perf_sink_config(handle);
376 struct trbe_cpudata *cpudata = buf->cpudata;
377
378 /*
379 * When the TRBE is affected by an erratum that could make it
380 * write to the next "virtually addressed" page beyond the LIMIT.
381 * We need to make sure there is always a PAGE after the LIMIT,
382 * within the buffer. Thus we ensure there is at least an extra
383 * page than normal. With this we could then adjust the LIMIT
384 * pointer down by a PAGE later.
385 */
386 if (trbe_may_write_out_of_range(cpudata))
387 size += PAGE_SIZE;
388 return size;
389 }
390
391 /*
392 * TRBE Limit Calculation
393 *
394 * The following markers are used to illustrate various TRBE buffer situations.
395 *
396 * $$$$ - Data area, unconsumed captured trace data, not to be overridden
397 * #### - Free area, enabled, trace will be written
398 * %%%% - Free area, disabled, trace will not be written
399 * ==== - Free area, padded with ETE_IGNORE_PACKET, trace will be skipped
400 */
__trbe_normal_offset(struct perf_output_handle * handle)401 static unsigned long __trbe_normal_offset(struct perf_output_handle *handle)
402 {
403 struct trbe_buf *buf = etm_perf_sink_config(handle);
404 struct trbe_cpudata *cpudata = buf->cpudata;
405 const u64 bufsize = buf->nr_pages * PAGE_SIZE;
406 u64 limit = bufsize;
407 u64 head, tail, wakeup;
408
409 head = PERF_IDX2OFF(handle->head, buf);
410
411 /*
412 * head
413 * ------->|
414 * |
415 * head TRBE align tail
416 * +----|-------|---------------|-------+
417 * |$$$$|=======|###############|$$$$$$$|
418 * +----|-------|---------------|-------+
419 * trbe_base trbe_base + nr_pages
420 *
421 * Perf aux buffer output head position can be misaligned depending on
422 * various factors including user space reads. In case misaligned, head
423 * needs to be aligned before TRBE can be configured. Pad the alignment
424 * gap with ETE_IGNORE_PACKET bytes that will be ignored by user tools
425 * and skip this section thus advancing the head.
426 */
427 if (!IS_ALIGNED(head, cpudata->trbe_align)) {
428 unsigned long delta = roundup(head, cpudata->trbe_align) - head;
429
430 delta = min(delta, handle->size);
431 trbe_pad_buf(handle, delta);
432 head = PERF_IDX2OFF(handle->head, buf);
433 }
434
435 /*
436 * head = tail (size = 0)
437 * +----|-------------------------------+
438 * |$$$$|$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ |
439 * +----|-------------------------------+
440 * trbe_base trbe_base + nr_pages
441 *
442 * Perf aux buffer does not have any space for the driver to write into.
443 */
444 if (!handle->size)
445 return 0;
446
447 /* Compute the tail and wakeup indices now that we've aligned head */
448 tail = PERF_IDX2OFF(handle->head + handle->size, buf);
449 wakeup = PERF_IDX2OFF(handle->wakeup, buf);
450
451 /*
452 * Lets calculate the buffer area which TRBE could write into. There
453 * are three possible scenarios here. Limit needs to be aligned with
454 * PAGE_SIZE per the TRBE requirement. Always avoid clobbering the
455 * unconsumed data.
456 *
457 * 1) head < tail
458 *
459 * head tail
460 * +----|-----------------------|-------+
461 * |$$$$|#######################|$$$$$$$|
462 * +----|-----------------------|-------+
463 * trbe_base limit trbe_base + nr_pages
464 *
465 * TRBE could write into [head..tail] area. Unless the tail is right at
466 * the end of the buffer, neither an wrap around nor an IRQ is expected
467 * while being enabled.
468 *
469 * 2) head == tail
470 *
471 * head = tail (size > 0)
472 * +----|-------------------------------+
473 * |%%%%|###############################|
474 * +----|-------------------------------+
475 * trbe_base limit = trbe_base + nr_pages
476 *
477 * TRBE should just write into [head..base + nr_pages] area even though
478 * the entire buffer is empty. Reason being, when the trace reaches the
479 * end of the buffer, it will just wrap around with an IRQ giving an
480 * opportunity to reconfigure the buffer.
481 *
482 * 3) tail < head
483 *
484 * tail head
485 * +----|-----------------------|-------+
486 * |%%%%|$$$$$$$$$$$$$$$$$$$$$$$|#######|
487 * +----|-----------------------|-------+
488 * trbe_base limit = trbe_base + nr_pages
489 *
490 * TRBE should just write into [head..base + nr_pages] area even though
491 * the [trbe_base..tail] is also empty. Reason being, when the trace
492 * reaches the end of the buffer, it will just wrap around with an IRQ
493 * giving an opportunity to reconfigure the buffer.
494 */
495 if (head < tail)
496 limit = round_down(tail, PAGE_SIZE);
497
498 /*
499 * Wakeup may be arbitrarily far into the future. If it's not in the
500 * current generation, either we'll wrap before hitting it, or it's
501 * in the past and has been handled already.
502 *
503 * If there's a wakeup before we wrap, arrange to be woken up by the
504 * page boundary following it. Keep the tail boundary if that's lower.
505 *
506 * head wakeup tail
507 * +----|---------------|-------|-------+
508 * |$$$$|###############|%%%%%%%|$$$$$$$|
509 * +----|---------------|-------|-------+
510 * trbe_base limit trbe_base + nr_pages
511 */
512 if (handle->wakeup < (handle->head + handle->size) && head <= wakeup)
513 limit = min(limit, round_up(wakeup, PAGE_SIZE));
514
515 /*
516 * There are two situation when this can happen i.e limit is before
517 * the head and hence TRBE cannot be configured.
518 *
519 * 1) head < tail (aligned down with PAGE_SIZE) and also they are both
520 * within the same PAGE size range.
521 *
522 * PAGE_SIZE
523 * |----------------------|
524 *
525 * limit head tail
526 * +------------|------|--------|-------+
527 * |$$$$$$$$$$$$$$$$$$$|========|$$$$$$$|
528 * +------------|------|--------|-------+
529 * trbe_base trbe_base + nr_pages
530 *
531 * 2) head < wakeup (aligned up with PAGE_SIZE) < tail and also both
532 * head and wakeup are within same PAGE size range.
533 *
534 * PAGE_SIZE
535 * |----------------------|
536 *
537 * limit head wakeup tail
538 * +----|------|-------|--------|-------+
539 * |$$$$$$$$$$$|=======|========|$$$$$$$|
540 * +----|------|-------|--------|-------+
541 * trbe_base trbe_base + nr_pages
542 */
543 if (limit > head)
544 return limit;
545
546 trbe_pad_buf(handle, handle->size);
547 return 0;
548 }
549
trbe_normal_offset(struct perf_output_handle * handle)550 static unsigned long trbe_normal_offset(struct perf_output_handle *handle)
551 {
552 struct trbe_buf *buf = etm_perf_sink_config(handle);
553 u64 limit = __trbe_normal_offset(handle);
554 u64 head = PERF_IDX2OFF(handle->head, buf);
555
556 /*
557 * If the head is too close to the limit and we don't
558 * have space for a meaningful run, we rather pad it
559 * and start fresh.
560 *
561 * We might have to do this more than once to make sure
562 * we have enough required space.
563 */
564 while (limit && ((limit - head) < trbe_min_trace_buf_size(handle))) {
565 trbe_pad_buf(handle, limit - head);
566 limit = __trbe_normal_offset(handle);
567 head = PERF_IDX2OFF(handle->head, buf);
568 }
569 return limit;
570 }
571
compute_trbe_buffer_limit(struct perf_output_handle * handle)572 static unsigned long compute_trbe_buffer_limit(struct perf_output_handle *handle)
573 {
574 struct trbe_buf *buf = etm_perf_sink_config(handle);
575 unsigned long offset;
576
577 if (buf->snapshot)
578 offset = trbe_snapshot_offset(handle);
579 else
580 offset = trbe_normal_offset(handle);
581 return buf->trbe_base + offset;
582 }
583
clr_trbe_status(void)584 static void clr_trbe_status(void)
585 {
586 u64 trbsr = read_sysreg_s(SYS_TRBSR_EL1);
587
588 WARN_ON(is_trbe_enabled());
589 trbsr &= ~TRBSR_EL1_IRQ;
590 trbsr &= ~TRBSR_EL1_TRG;
591 trbsr &= ~TRBSR_EL1_WRAP;
592 trbsr &= ~TRBSR_EL1_EC_MASK;
593 trbsr &= ~TRBSR_EL1_BSC_MASK;
594 trbsr &= ~TRBSR_EL1_S;
595 write_sysreg_s(trbsr, SYS_TRBSR_EL1);
596 }
597
set_trbe_limit_pointer_enabled(struct trbe_buf * buf)598 static void set_trbe_limit_pointer_enabled(struct trbe_buf *buf)
599 {
600 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
601 unsigned long addr = buf->trbe_limit;
602
603 WARN_ON(!IS_ALIGNED(addr, (1UL << TRBLIMITR_EL1_LIMIT_SHIFT)));
604 WARN_ON(!IS_ALIGNED(addr, PAGE_SIZE));
605
606 trblimitr &= ~TRBLIMITR_EL1_nVM;
607 trblimitr &= ~TRBLIMITR_EL1_FM_MASK;
608 trblimitr &= ~TRBLIMITR_EL1_TM_MASK;
609 trblimitr &= ~TRBLIMITR_EL1_LIMIT_MASK;
610
611 /*
612 * Fill trace buffer mode is used here while configuring the
613 * TRBE for trace capture. In this particular mode, the trace
614 * collection is stopped and a maintenance interrupt is raised
615 * when the current write pointer wraps. This pause in trace
616 * collection gives the software an opportunity to capture the
617 * trace data in the interrupt handler, before reconfiguring
618 * the TRBE.
619 */
620 trblimitr |= (TRBLIMITR_EL1_FM_FILL << TRBLIMITR_EL1_FM_SHIFT) &
621 TRBLIMITR_EL1_FM_MASK;
622
623 /*
624 * Trigger mode is not used here while configuring the TRBE for
625 * the trace capture. Hence just keep this in the ignore mode.
626 */
627 trblimitr |= (TRBLIMITR_EL1_TM_IGNR << TRBLIMITR_EL1_TM_SHIFT) &
628 TRBLIMITR_EL1_TM_MASK;
629 trblimitr |= (addr & PAGE_MASK);
630 set_trbe_enabled(buf->cpudata, trblimitr);
631 }
632
trbe_enable_hw(struct trbe_buf * buf)633 static void trbe_enable_hw(struct trbe_buf *buf)
634 {
635 WARN_ON(buf->trbe_hw_base < buf->trbe_base);
636 WARN_ON(buf->trbe_write < buf->trbe_hw_base);
637 WARN_ON(buf->trbe_write >= buf->trbe_limit);
638 set_trbe_disabled(buf->cpudata);
639 clr_trbe_status();
640 set_trbe_base_pointer(buf->trbe_hw_base);
641 set_trbe_write_pointer(buf->trbe_write);
642
643 /*
644 * Synchronize all the register updates
645 * till now before enabling the TRBE.
646 */
647 isb();
648 set_trbe_limit_pointer_enabled(buf);
649 }
650
trbe_get_fault_act(struct perf_output_handle * handle,u64 trbsr)651 static enum trbe_fault_action trbe_get_fault_act(struct perf_output_handle *handle,
652 u64 trbsr)
653 {
654 int ec = get_trbe_ec(trbsr);
655 int bsc = get_trbe_bsc(trbsr);
656 struct trbe_buf *buf = etm_perf_sink_config(handle);
657 struct trbe_cpudata *cpudata = buf->cpudata;
658
659 WARN_ON(is_trbe_running(trbsr));
660 if (is_trbe_trg(trbsr) || is_trbe_abort(trbsr))
661 return TRBE_FAULT_ACT_FATAL;
662
663 if ((ec == TRBE_EC_STAGE1_ABORT) || (ec == TRBE_EC_STAGE2_ABORT))
664 return TRBE_FAULT_ACT_FATAL;
665
666 /*
667 * If the trbe is affected by TRBE_WORKAROUND_OVERWRITE_FILL_MODE,
668 * it might write data after a WRAP event in the fill mode.
669 * Thus the check TRBPTR == TRBBASER will not be honored.
670 */
671 if ((is_trbe_wrap(trbsr) && (ec == TRBE_EC_OTHERS) && (bsc == TRBE_BSC_FILLED)) &&
672 (trbe_may_overwrite_in_fill_mode(cpudata) ||
673 get_trbe_write_pointer() == get_trbe_base_pointer()))
674 return TRBE_FAULT_ACT_WRAP;
675
676 return TRBE_FAULT_ACT_SPURIOUS;
677 }
678
trbe_get_trace_size(struct perf_output_handle * handle,struct trbe_buf * buf,bool wrap)679 static unsigned long trbe_get_trace_size(struct perf_output_handle *handle,
680 struct trbe_buf *buf, bool wrap)
681 {
682 u64 write;
683 u64 start_off, end_off;
684 u64 size;
685 u64 overwrite_skip = TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
686
687 /*
688 * If the TRBE has wrapped around the write pointer has
689 * wrapped and should be treated as limit.
690 *
691 * When the TRBE is affected by TRBE_WORKAROUND_WRITE_OUT_OF_RANGE,
692 * it may write upto 64bytes beyond the "LIMIT". The driver already
693 * keeps a valid page next to the LIMIT and we could potentially
694 * consume the trace data that may have been collected there. But we
695 * cannot be really sure it is available, and the TRBPTR may not
696 * indicate the same. Also, affected cores are also affected by another
697 * erratum which forces the PAGE_SIZE alignment on the TRBPTR, and thus
698 * could potentially pad an entire PAGE_SIZE - 64bytes, to get those
699 * 64bytes. Thus we ignore the potential triggering of the erratum
700 * on WRAP and limit the data to LIMIT.
701 */
702 if (wrap)
703 write = get_trbe_limit_pointer();
704 else
705 write = get_trbe_write_pointer();
706
707 /*
708 * TRBE may use a different base address than the base
709 * of the ring buffer. Thus use the beginning of the ring
710 * buffer to compute the offsets.
711 */
712 end_off = write - buf->trbe_base;
713 start_off = PERF_IDX2OFF(handle->head, buf);
714
715 if (WARN_ON_ONCE(end_off < start_off))
716 return 0;
717
718 size = end_off - start_off;
719 /*
720 * If the TRBE is affected by the following erratum, we must fill
721 * the space we skipped with IGNORE packets. And we are always
722 * guaranteed to have at least a PAGE_SIZE space in the buffer.
723 */
724 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE) &&
725 !WARN_ON(size < overwrite_skip))
726 __trbe_pad_buf(buf, start_off, overwrite_skip);
727
728 return size;
729 }
730
arm_trbe_alloc_buffer(struct coresight_device * csdev,struct perf_event * event,void ** pages,int nr_pages,bool snapshot)731 static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
732 struct perf_event *event, void **pages,
733 int nr_pages, bool snapshot)
734 {
735 struct trbe_buf *buf;
736 struct page **pglist;
737 int i;
738
739 /*
740 * TRBE LIMIT and TRBE WRITE pointers must be page aligned. But with
741 * just a single page, there would not be any room left while writing
742 * into a partially filled TRBE buffer after the page size alignment.
743 * Hence restrict the minimum buffer size as two pages.
744 */
745 if (nr_pages < 2)
746 return NULL;
747
748 buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
749 if (!buf)
750 return ERR_PTR(-ENOMEM);
751
752 pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
753 if (!pglist) {
754 kfree(buf);
755 return ERR_PTR(-ENOMEM);
756 }
757
758 for (i = 0; i < nr_pages; i++)
759 pglist[i] = virt_to_page(pages[i]);
760
761 buf->trbe_base = (unsigned long)vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL);
762 if (!buf->trbe_base) {
763 kfree(pglist);
764 kfree(buf);
765 return ERR_PTR(-ENOMEM);
766 }
767 buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
768 buf->trbe_write = buf->trbe_base;
769 buf->snapshot = snapshot;
770 buf->nr_pages = nr_pages;
771 buf->pages = pages;
772 kfree(pglist);
773 return buf;
774 }
775
arm_trbe_free_buffer(void * config)776 static void arm_trbe_free_buffer(void *config)
777 {
778 struct trbe_buf *buf = config;
779
780 vunmap((void *)buf->trbe_base);
781 kfree(buf);
782 }
783
arm_trbe_update_buffer(struct coresight_device * csdev,struct perf_output_handle * handle,void * config)784 static unsigned long arm_trbe_update_buffer(struct coresight_device *csdev,
785 struct perf_output_handle *handle,
786 void *config)
787 {
788 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
789 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
790 struct trbe_buf *buf = config;
791 enum trbe_fault_action act;
792 unsigned long size, status;
793 unsigned long flags;
794 bool wrap = false;
795
796 WARN_ON(buf->cpudata != cpudata);
797 WARN_ON(cpudata->cpu != smp_processor_id());
798 WARN_ON(cpudata->drvdata != drvdata);
799 if (cpudata->mode != CS_MODE_PERF)
800 return 0;
801
802 /*
803 * We are about to disable the TRBE. And this could in turn
804 * fill up the buffer triggering, an IRQ. This could be consumed
805 * by the PE asynchronously, causing a race here against
806 * the IRQ handler in closing out the handle. So, let us
807 * make sure the IRQ can't trigger while we are collecting
808 * the buffer. We also make sure that a WRAP event is handled
809 * accordingly.
810 */
811 local_irq_save(flags);
812
813 /*
814 * If the TRBE was disabled due to lack of space in the AUX buffer or a
815 * spurious fault, the driver leaves it disabled, truncating the buffer.
816 * Since the etm_perf driver expects to close out the AUX buffer, the
817 * driver skips it. Thus, just pass in 0 size here to indicate that the
818 * buffer was truncated.
819 */
820 if (!is_trbe_enabled()) {
821 size = 0;
822 goto done;
823 }
824 /*
825 * perf handle structure needs to be shared with the TRBE IRQ handler for
826 * capturing trace data and restarting the handle. There is a probability
827 * of an undefined reference based crash when etm event is being stopped
828 * while a TRBE IRQ also getting processed. This happens due the release
829 * of perf handle via perf_aux_output_end() in etm_event_stop(). Stopping
830 * the TRBE here will ensure that no IRQ could be generated when the perf
831 * handle gets freed in etm_event_stop().
832 */
833 trbe_drain_and_disable_local(cpudata);
834
835 /* Check if there is a pending interrupt and handle it here */
836 status = read_sysreg_s(SYS_TRBSR_EL1);
837 if (is_trbe_irq(status)) {
838
839 /*
840 * Now that we are handling the IRQ here, clear the IRQ
841 * from the status, to let the irq handler know that it
842 * is taken care of.
843 */
844 clr_trbe_irq();
845 isb();
846
847 act = trbe_get_fault_act(handle, status);
848 /*
849 * If this was not due to a WRAP event, we have some
850 * errors and as such buffer is empty.
851 */
852 if (act != TRBE_FAULT_ACT_WRAP) {
853 size = 0;
854 goto done;
855 }
856
857 trbe_report_wrap_event(handle);
858 wrap = true;
859 }
860
861 size = trbe_get_trace_size(handle, buf, wrap);
862
863 done:
864 local_irq_restore(flags);
865
866 if (buf->snapshot)
867 handle->head += size;
868 return size;
869 }
870
871
trbe_apply_work_around_before_enable(struct trbe_buf * buf)872 static int trbe_apply_work_around_before_enable(struct trbe_buf *buf)
873 {
874 /*
875 * TRBE_WORKAROUND_OVERWRITE_FILL_MODE causes the TRBE to overwrite a few cache
876 * line size from the "TRBBASER_EL1" in the event of a "FILL".
877 * Thus, we could loose some amount of the trace at the base.
878 *
879 * Before Fix:
880 *
881 * normal-BASE head (normal-TRBPTR) tail (normal-LIMIT)
882 * | \/ /
883 * -------------------------------------------------------------
884 * | Pg0 | Pg1 | | | PgN |
885 * -------------------------------------------------------------
886 *
887 * In the normal course of action, we would set the TRBBASER to the
888 * beginning of the ring-buffer (normal-BASE). But with the erratum,
889 * the TRBE could overwrite the contents at the "normal-BASE", after
890 * hitting the "normal-LIMIT", since it doesn't stop as expected. And
891 * this is wrong. This could result in overwriting trace collected in
892 * one of the previous runs, being consumed by the user. So we must
893 * always make sure that the TRBBASER is within the region
894 * [head, head+size]. Note that TRBBASER must be PAGE aligned,
895 *
896 * After moving the BASE:
897 *
898 * normal-BASE head (normal-TRBPTR) tail (normal-LIMIT)
899 * | \/ /
900 * -------------------------------------------------------------
901 * | | |xyzdef. |.. tuvw| |
902 * -------------------------------------------------------------
903 * /
904 * New-BASER
905 *
906 * Also, we would set the TRBPTR to head (after adjusting for
907 * alignment) at normal-PTR. This would mean that the last few bytes
908 * of the trace (say, "xyz") might overwrite the first few bytes of
909 * trace written ("abc"). More importantly they will appear in what
910 * userspace sees as the beginning of the trace, which is wrong. We may
911 * not always have space to move the latest trace "xyz" to the correct
912 * order as it must appear beyond the LIMIT. (i.e, [head..head+size]).
913 * Thus it is easier to ignore those bytes than to complicate the
914 * driver to move it, assuming that the erratum was triggered and
915 * doing additional checks to see if there is indeed allowed space at
916 * TRBLIMITR.LIMIT.
917 *
918 * Thus the full workaround will move the BASE and the PTR and would
919 * look like (after padding at the skipped bytes at the end of
920 * session) :
921 *
922 * normal-BASE head (normal-TRBPTR) tail (normal-LIMIT)
923 * | \/ /
924 * -------------------------------------------------------------
925 * | | |///abc.. |.. rst| |
926 * -------------------------------------------------------------
927 * / |
928 * New-BASER New-TRBPTR
929 *
930 * To summarize, with the work around:
931 *
932 * - We always align the offset for the next session to PAGE_SIZE
933 * (This is to ensure we can program the TRBBASER to this offset
934 * within the region [head...head+size]).
935 *
936 * - At TRBE enable:
937 * - Set the TRBBASER to the page aligned offset of the current
938 * proposed write offset. (which is guaranteed to be aligned
939 * as above)
940 * - Move the TRBPTR to skip first 256bytes (that might be
941 * overwritten with the erratum). This ensures that the trace
942 * generated in the session is not re-written.
943 *
944 * - At trace collection:
945 * - Pad the 256bytes skipped above again with IGNORE packets.
946 */
947 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_OVERWRITE_FILL_MODE)) {
948 if (WARN_ON(!IS_ALIGNED(buf->trbe_write, PAGE_SIZE)))
949 return -EINVAL;
950 buf->trbe_hw_base = buf->trbe_write;
951 buf->trbe_write += TRBE_WORKAROUND_OVERWRITE_FILL_MODE_SKIP_BYTES;
952 }
953
954 /*
955 * TRBE_WORKAROUND_WRITE_OUT_OF_RANGE could cause the TRBE to write to
956 * the next page after the TRBLIMITR.LIMIT. For perf, the "next page"
957 * may be:
958 * - The page beyond the ring buffer. This could mean, TRBE could
959 * corrupt another entity (kernel / user)
960 * - A portion of the "ring buffer" consumed by the userspace.
961 * i.e, a page outisde [head, head + size].
962 *
963 * We work around this by:
964 * - Making sure that we have at least an extra space of PAGE left
965 * in the ring buffer [head, head + size], than we normally do
966 * without the erratum. See trbe_min_trace_buf_size().
967 *
968 * - Adjust the TRBLIMITR.LIMIT to leave the extra PAGE outside
969 * the TRBE's range (i.e [TRBBASER, TRBLIMITR.LIMI] ).
970 */
971 if (trbe_has_erratum(buf->cpudata, TRBE_WORKAROUND_WRITE_OUT_OF_RANGE)) {
972 s64 space = buf->trbe_limit - buf->trbe_write;
973 /*
974 * We must have more than a PAGE_SIZE worth space in the proposed
975 * range for the TRBE.
976 */
977 if (WARN_ON(space <= PAGE_SIZE ||
978 !IS_ALIGNED(buf->trbe_limit, PAGE_SIZE)))
979 return -EINVAL;
980 buf->trbe_limit -= PAGE_SIZE;
981 }
982
983 return 0;
984 }
985
__arm_trbe_enable(struct trbe_buf * buf,struct perf_output_handle * handle)986 static int __arm_trbe_enable(struct trbe_buf *buf,
987 struct perf_output_handle *handle)
988 {
989 int ret = 0;
990
991 perf_aux_output_flag(handle, PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
992 buf->trbe_limit = compute_trbe_buffer_limit(handle);
993 buf->trbe_write = buf->trbe_base + PERF_IDX2OFF(handle->head, buf);
994 if (buf->trbe_limit == buf->trbe_base) {
995 ret = -ENOSPC;
996 goto err;
997 }
998 /* Set the base of the TRBE to the buffer base */
999 buf->trbe_hw_base = buf->trbe_base;
1000
1001 ret = trbe_apply_work_around_before_enable(buf);
1002 if (ret)
1003 goto err;
1004
1005 *this_cpu_ptr(buf->cpudata->drvdata->handle) = handle;
1006 trbe_enable_hw(buf);
1007 return 0;
1008 err:
1009 trbe_stop_and_truncate_event(handle);
1010 return ret;
1011 }
1012
arm_trbe_enable(struct coresight_device * csdev,enum cs_mode mode,void * data)1013 static int arm_trbe_enable(struct coresight_device *csdev, enum cs_mode mode,
1014 void *data)
1015 {
1016 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1017 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
1018 struct perf_output_handle *handle = data;
1019 struct trbe_buf *buf = etm_perf_sink_config(handle);
1020
1021 WARN_ON(cpudata->cpu != smp_processor_id());
1022 WARN_ON(cpudata->drvdata != drvdata);
1023 if (mode != CS_MODE_PERF)
1024 return -EINVAL;
1025
1026 cpudata->buf = buf;
1027 cpudata->mode = mode;
1028 buf->cpudata = cpudata;
1029
1030 return __arm_trbe_enable(buf, handle);
1031 }
1032
arm_trbe_disable(struct coresight_device * csdev)1033 static int arm_trbe_disable(struct coresight_device *csdev)
1034 {
1035 struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1036 struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
1037 struct trbe_buf *buf = cpudata->buf;
1038
1039 WARN_ON(buf->cpudata != cpudata);
1040 WARN_ON(cpudata->cpu != smp_processor_id());
1041 WARN_ON(cpudata->drvdata != drvdata);
1042 if (cpudata->mode != CS_MODE_PERF)
1043 return -EINVAL;
1044
1045 trbe_drain_and_disable_local(cpudata);
1046 buf->cpudata = NULL;
1047 cpudata->buf = NULL;
1048 cpudata->mode = CS_MODE_DISABLED;
1049 return 0;
1050 }
1051
trbe_handle_spurious(struct perf_output_handle * handle)1052 static void trbe_handle_spurious(struct perf_output_handle *handle)
1053 {
1054 struct trbe_buf *buf = etm_perf_sink_config(handle);
1055 u64 trblimitr = read_sysreg_s(SYS_TRBLIMITR_EL1);
1056
1057 /*
1058 * If the IRQ was spurious, simply re-enable the TRBE
1059 * back without modifying the buffer parameters to
1060 * retain the trace collected so far.
1061 */
1062 set_trbe_enabled(buf->cpudata, trblimitr);
1063 }
1064
trbe_handle_overflow(struct perf_output_handle * handle)1065 static int trbe_handle_overflow(struct perf_output_handle *handle)
1066 {
1067 struct perf_event *event = handle->event;
1068 struct trbe_buf *buf = etm_perf_sink_config(handle);
1069 unsigned long size;
1070 struct etm_event_data *event_data;
1071
1072 size = trbe_get_trace_size(handle, buf, true);
1073 if (buf->snapshot)
1074 handle->head += size;
1075
1076 trbe_report_wrap_event(handle);
1077 perf_aux_output_end(handle, size);
1078 event_data = perf_aux_output_begin(handle, event);
1079 if (!event_data) {
1080 /*
1081 * We are unable to restart the trace collection,
1082 * thus leave the TRBE disabled. The etm-perf driver
1083 * is able to detect this with a disconnected handle
1084 * (handle->event = NULL).
1085 */
1086 trbe_drain_and_disable_local(buf->cpudata);
1087 *this_cpu_ptr(buf->cpudata->drvdata->handle) = NULL;
1088 return -EINVAL;
1089 }
1090
1091 return __arm_trbe_enable(buf, handle);
1092 }
1093
is_perf_trbe(struct perf_output_handle * handle)1094 static bool is_perf_trbe(struct perf_output_handle *handle)
1095 {
1096 struct trbe_buf *buf = etm_perf_sink_config(handle);
1097 struct trbe_cpudata *cpudata = buf->cpudata;
1098 struct trbe_drvdata *drvdata = cpudata->drvdata;
1099 int cpu = smp_processor_id();
1100
1101 WARN_ON(buf->trbe_hw_base != get_trbe_base_pointer());
1102 WARN_ON(buf->trbe_limit != get_trbe_limit_pointer());
1103
1104 if (cpudata->mode != CS_MODE_PERF)
1105 return false;
1106
1107 if (cpudata->cpu != cpu)
1108 return false;
1109
1110 if (!cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1111 return false;
1112
1113 return true;
1114 }
1115
cpu_prohibit_trace(void)1116 static u64 cpu_prohibit_trace(void)
1117 {
1118 u64 trfcr = read_trfcr();
1119
1120 /* Prohibit tracing at EL0 & the kernel EL */
1121 write_trfcr(trfcr & ~(TRFCR_EL1_ExTRE | TRFCR_EL1_E0TRE));
1122 /* Return the original value of the TRFCR */
1123 return trfcr;
1124 }
1125
arm_trbe_irq_handler(int irq,void * dev)1126 static irqreturn_t arm_trbe_irq_handler(int irq, void *dev)
1127 {
1128 struct perf_output_handle **handle_ptr = dev;
1129 struct perf_output_handle *handle = *handle_ptr;
1130 struct trbe_buf *buf = etm_perf_sink_config(handle);
1131 enum trbe_fault_action act;
1132 u64 status;
1133 bool truncated = false;
1134 u64 trfcr;
1135
1136 /* Reads to TRBSR_EL1 is fine when TRBE is active */
1137 status = read_sysreg_s(SYS_TRBSR_EL1);
1138 /*
1139 * If the pending IRQ was handled by update_buffer callback
1140 * we have nothing to do here.
1141 */
1142 if (!is_trbe_irq(status))
1143 return IRQ_NONE;
1144
1145 /* Prohibit the CPU from tracing before we disable the TRBE */
1146 trfcr = cpu_prohibit_trace();
1147 /*
1148 * Ensure the trace is visible to the CPUs and
1149 * any external aborts have been resolved.
1150 */
1151 trbe_drain_and_disable_local(buf->cpudata);
1152 clr_trbe_irq();
1153 isb();
1154
1155 if (WARN_ON_ONCE(!handle) || !perf_get_aux(handle))
1156 return IRQ_NONE;
1157
1158 if (!is_perf_trbe(handle))
1159 return IRQ_NONE;
1160
1161 act = trbe_get_fault_act(handle, status);
1162 switch (act) {
1163 case TRBE_FAULT_ACT_WRAP:
1164 truncated = !!trbe_handle_overflow(handle);
1165 break;
1166 case TRBE_FAULT_ACT_SPURIOUS:
1167 trbe_handle_spurious(handle);
1168 break;
1169 case TRBE_FAULT_ACT_FATAL:
1170 trbe_stop_and_truncate_event(handle);
1171 truncated = true;
1172 break;
1173 }
1174
1175 /*
1176 * If the buffer was truncated, ensure perf callbacks
1177 * have completed, which will disable the event.
1178 *
1179 * Otherwise, restore the trace filter controls to
1180 * allow the tracing.
1181 */
1182 if (truncated)
1183 irq_work_run();
1184 else
1185 write_trfcr(trfcr);
1186
1187 return IRQ_HANDLED;
1188 }
1189
1190 static const struct coresight_ops_sink arm_trbe_sink_ops = {
1191 .enable = arm_trbe_enable,
1192 .disable = arm_trbe_disable,
1193 .alloc_buffer = arm_trbe_alloc_buffer,
1194 .free_buffer = arm_trbe_free_buffer,
1195 .update_buffer = arm_trbe_update_buffer,
1196 };
1197
1198 static const struct coresight_ops arm_trbe_cs_ops = {
1199 .sink_ops = &arm_trbe_sink_ops,
1200 };
1201
align_show(struct device * dev,struct device_attribute * attr,char * buf)1202 static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf)
1203 {
1204 struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
1205
1206 return sprintf(buf, "%llx\n", cpudata->trbe_hw_align);
1207 }
1208 static DEVICE_ATTR_RO(align);
1209
flag_show(struct device * dev,struct device_attribute * attr,char * buf)1210 static ssize_t flag_show(struct device *dev, struct device_attribute *attr, char *buf)
1211 {
1212 struct trbe_cpudata *cpudata = dev_get_drvdata(dev);
1213
1214 return sprintf(buf, "%d\n", cpudata->trbe_flag);
1215 }
1216 static DEVICE_ATTR_RO(flag);
1217
1218 static struct attribute *arm_trbe_attrs[] = {
1219 &dev_attr_align.attr,
1220 &dev_attr_flag.attr,
1221 NULL,
1222 };
1223
1224 static const struct attribute_group arm_trbe_group = {
1225 .attrs = arm_trbe_attrs,
1226 };
1227
1228 static const struct attribute_group *arm_trbe_groups[] = {
1229 &arm_trbe_group,
1230 NULL,
1231 };
1232
arm_trbe_enable_cpu(void * info)1233 static void arm_trbe_enable_cpu(void *info)
1234 {
1235 struct trbe_drvdata *drvdata = info;
1236 struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
1237
1238 trbe_reset_local(cpudata);
1239 enable_percpu_irq(drvdata->irq, IRQ_TYPE_NONE);
1240 }
1241
arm_trbe_disable_cpu(void * info)1242 static void arm_trbe_disable_cpu(void *info)
1243 {
1244 struct trbe_drvdata *drvdata = info;
1245 struct trbe_cpudata *cpudata = this_cpu_ptr(drvdata->cpudata);
1246
1247 disable_percpu_irq(drvdata->irq);
1248 trbe_reset_local(cpudata);
1249 }
1250
1251
arm_trbe_register_coresight_cpu(struct trbe_drvdata * drvdata,int cpu)1252 static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
1253 {
1254 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1255 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
1256 struct coresight_desc desc = { 0 };
1257 struct device *dev;
1258
1259 if (WARN_ON(trbe_csdev))
1260 return;
1261
1262 /* If the TRBE was not probed on the CPU, we shouldn't be here */
1263 if (WARN_ON(!cpudata->drvdata))
1264 return;
1265
1266 dev = &cpudata->drvdata->pdev->dev;
1267 desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu);
1268 if (!desc.name)
1269 goto cpu_clear;
1270 /*
1271 * TRBE coresight devices do not need regular connections
1272 * information, as the paths get built between all percpu
1273 * source and their respective percpu sink devices. Though
1274 * coresight_register() expect device connections via the
1275 * platform_data, which TRBE devices do not have. As they
1276 * are not real ACPI devices, coresight_get_platform_data()
1277 * ends up failing. Instead let's allocate a dummy zeroed
1278 * coresight_platform_data structure and assign that back
1279 * into the device for that purpose.
1280 */
1281 desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL);
1282 if (IS_ERR(desc.pdata))
1283 goto cpu_clear;
1284
1285 desc.type = CORESIGHT_DEV_TYPE_SINK;
1286 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PERCPU_SYSMEM;
1287 desc.ops = &arm_trbe_cs_ops;
1288 desc.groups = arm_trbe_groups;
1289 desc.dev = dev;
1290 trbe_csdev = coresight_register(&desc);
1291 if (IS_ERR(trbe_csdev))
1292 goto cpu_clear;
1293
1294 dev_set_drvdata(&trbe_csdev->dev, cpudata);
1295 coresight_set_percpu_sink(cpu, trbe_csdev);
1296 return;
1297 cpu_clear:
1298 cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
1299 }
1300
1301 /*
1302 * Must be called with preemption disabled, for trbe_check_errata().
1303 */
arm_trbe_probe_cpu(void * info)1304 static void arm_trbe_probe_cpu(void *info)
1305 {
1306 struct trbe_drvdata *drvdata = info;
1307 int cpu = smp_processor_id();
1308 struct trbe_cpudata *cpudata = per_cpu_ptr(drvdata->cpudata, cpu);
1309 u64 trbidr;
1310
1311 if (WARN_ON(!cpudata))
1312 goto cpu_clear;
1313
1314 if (!is_trbe_available()) {
1315 pr_err("TRBE is not implemented on cpu %d\n", cpu);
1316 goto cpu_clear;
1317 }
1318
1319 trbidr = read_sysreg_s(SYS_TRBIDR_EL1);
1320 if (!is_trbe_programmable(trbidr)) {
1321 pr_err("TRBE is owned in higher exception level on cpu %d\n", cpu);
1322 goto cpu_clear;
1323 }
1324
1325 cpudata->trbe_hw_align = 1ULL << get_trbe_address_align(trbidr);
1326 if (cpudata->trbe_hw_align > SZ_2K) {
1327 pr_err("Unsupported alignment on cpu %d\n", cpu);
1328 goto cpu_clear;
1329 }
1330
1331 /*
1332 * Run the TRBE erratum checks, now that we know
1333 * this instance is about to be registered.
1334 */
1335 trbe_check_errata(cpudata);
1336
1337 if (trbe_is_broken(cpudata)) {
1338 pr_err("Disabling TRBE on cpu%d due to erratum\n", cpu);
1339 goto cpu_clear;
1340 }
1341
1342 /*
1343 * If the TRBE is affected by erratum TRBE_WORKAROUND_OVERWRITE_FILL_MODE,
1344 * we must always program the TBRPTR_EL1, 256bytes from a page
1345 * boundary, with TRBBASER_EL1 set to the page, to prevent
1346 * TRBE over-writing 256bytes at TRBBASER_EL1 on FILL event.
1347 *
1348 * Thus make sure we always align our write pointer to a PAGE_SIZE,
1349 * which also guarantees that we have at least a PAGE_SIZE space in
1350 * the buffer (TRBLIMITR is PAGE aligned) and thus we can skip
1351 * the required bytes at the base.
1352 */
1353 if (trbe_may_overwrite_in_fill_mode(cpudata))
1354 cpudata->trbe_align = PAGE_SIZE;
1355 else
1356 cpudata->trbe_align = cpudata->trbe_hw_align;
1357
1358 cpudata->trbe_flag = get_trbe_flag_update(trbidr);
1359 cpudata->cpu = cpu;
1360 cpudata->drvdata = drvdata;
1361 return;
1362 cpu_clear:
1363 cpumask_clear_cpu(cpu, &drvdata->supported_cpus);
1364 }
1365
arm_trbe_remove_coresight_cpu(struct trbe_drvdata * drvdata,int cpu)1366 static void arm_trbe_remove_coresight_cpu(struct trbe_drvdata *drvdata, int cpu)
1367 {
1368 struct coresight_device *trbe_csdev = coresight_get_percpu_sink(cpu);
1369
1370 if (trbe_csdev) {
1371 coresight_unregister(trbe_csdev);
1372 coresight_set_percpu_sink(cpu, NULL);
1373 }
1374 }
1375
arm_trbe_probe_coresight(struct trbe_drvdata * drvdata)1376 static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata)
1377 {
1378 int cpu;
1379
1380 drvdata->cpudata = alloc_percpu(typeof(*drvdata->cpudata));
1381 if (!drvdata->cpudata)
1382 return -ENOMEM;
1383
1384 for_each_cpu(cpu, &drvdata->supported_cpus) {
1385 /* If we fail to probe the CPU, let us defer it to hotplug callbacks */
1386 if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1))
1387 continue;
1388 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1389 arm_trbe_register_coresight_cpu(drvdata, cpu);
1390 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1391 smp_call_function_single(cpu, arm_trbe_enable_cpu, drvdata, 1);
1392 }
1393 return 0;
1394 }
1395
arm_trbe_remove_coresight(struct trbe_drvdata * drvdata)1396 static int arm_trbe_remove_coresight(struct trbe_drvdata *drvdata)
1397 {
1398 int cpu;
1399
1400 for_each_cpu(cpu, &drvdata->supported_cpus) {
1401 smp_call_function_single(cpu, arm_trbe_disable_cpu, drvdata, 1);
1402 arm_trbe_remove_coresight_cpu(drvdata, cpu);
1403 }
1404 free_percpu(drvdata->cpudata);
1405 return 0;
1406 }
1407
arm_trbe_probe_hotplugged_cpu(struct trbe_drvdata * drvdata)1408 static void arm_trbe_probe_hotplugged_cpu(struct trbe_drvdata *drvdata)
1409 {
1410 preempt_disable();
1411 arm_trbe_probe_cpu(drvdata);
1412 preempt_enable();
1413 }
1414
arm_trbe_cpu_startup(unsigned int cpu,struct hlist_node * node)1415 static int arm_trbe_cpu_startup(unsigned int cpu, struct hlist_node *node)
1416 {
1417 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
1418
1419 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) {
1420
1421 /*
1422 * If this CPU was not probed for TRBE,
1423 * initialize it now.
1424 */
1425 if (!coresight_get_percpu_sink(cpu)) {
1426 arm_trbe_probe_hotplugged_cpu(drvdata);
1427 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1428 arm_trbe_register_coresight_cpu(drvdata, cpu);
1429 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1430 arm_trbe_enable_cpu(drvdata);
1431 } else {
1432 arm_trbe_enable_cpu(drvdata);
1433 }
1434 }
1435 return 0;
1436 }
1437
arm_trbe_cpu_teardown(unsigned int cpu,struct hlist_node * node)1438 static int arm_trbe_cpu_teardown(unsigned int cpu, struct hlist_node *node)
1439 {
1440 struct trbe_drvdata *drvdata = hlist_entry_safe(node, struct trbe_drvdata, hotplug_node);
1441
1442 if (cpumask_test_cpu(cpu, &drvdata->supported_cpus))
1443 arm_trbe_disable_cpu(drvdata);
1444 return 0;
1445 }
1446
arm_trbe_probe_cpuhp(struct trbe_drvdata * drvdata)1447 static int arm_trbe_probe_cpuhp(struct trbe_drvdata *drvdata)
1448 {
1449 enum cpuhp_state trbe_online;
1450 int ret;
1451
1452 trbe_online = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DRVNAME,
1453 arm_trbe_cpu_startup, arm_trbe_cpu_teardown);
1454 if (trbe_online < 0)
1455 return trbe_online;
1456
1457 ret = cpuhp_state_add_instance(trbe_online, &drvdata->hotplug_node);
1458 if (ret) {
1459 cpuhp_remove_multi_state(trbe_online);
1460 return ret;
1461 }
1462 drvdata->trbe_online = trbe_online;
1463 return 0;
1464 }
1465
arm_trbe_remove_cpuhp(struct trbe_drvdata * drvdata)1466 static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
1467 {
1468 cpuhp_state_remove_instance(drvdata->trbe_online, &drvdata->hotplug_node);
1469 cpuhp_remove_multi_state(drvdata->trbe_online);
1470 }
1471
arm_trbe_probe_irq(struct platform_device * pdev,struct trbe_drvdata * drvdata)1472 static int arm_trbe_probe_irq(struct platform_device *pdev,
1473 struct trbe_drvdata *drvdata)
1474 {
1475 int ret;
1476
1477 drvdata->irq = platform_get_irq(pdev, 0);
1478 if (drvdata->irq < 0) {
1479 pr_err("IRQ not found for the platform device\n");
1480 return drvdata->irq;
1481 }
1482
1483 if (!irq_is_percpu(drvdata->irq)) {
1484 pr_err("IRQ is not a PPI\n");
1485 return -EINVAL;
1486 }
1487
1488 if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
1489 return -EINVAL;
1490
1491 drvdata->handle = alloc_percpu(struct perf_output_handle *);
1492 if (!drvdata->handle)
1493 return -ENOMEM;
1494
1495 ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
1496 if (ret) {
1497 free_percpu(drvdata->handle);
1498 return ret;
1499 }
1500 return 0;
1501 }
1502
arm_trbe_remove_irq(struct trbe_drvdata * drvdata)1503 static void arm_trbe_remove_irq(struct trbe_drvdata *drvdata)
1504 {
1505 free_percpu_irq(drvdata->irq, drvdata->handle);
1506 free_percpu(drvdata->handle);
1507 }
1508
arm_trbe_device_probe(struct platform_device * pdev)1509 static int arm_trbe_device_probe(struct platform_device *pdev)
1510 {
1511 struct trbe_drvdata *drvdata;
1512 struct device *dev = &pdev->dev;
1513 int ret;
1514
1515 /* Trace capture is not possible with kernel page table isolation */
1516 if (arm64_kernel_unmapped_at_el0()) {
1517 pr_err("TRBE wouldn't work if kernel gets unmapped at EL0\n");
1518 return -EOPNOTSUPP;
1519 }
1520
1521 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1522 if (!drvdata)
1523 return -ENOMEM;
1524
1525 dev_set_drvdata(dev, drvdata);
1526 drvdata->pdev = pdev;
1527 ret = arm_trbe_probe_irq(pdev, drvdata);
1528 if (ret)
1529 return ret;
1530
1531 ret = arm_trbe_probe_coresight(drvdata);
1532 if (ret)
1533 goto probe_failed;
1534
1535 ret = arm_trbe_probe_cpuhp(drvdata);
1536 if (ret)
1537 goto cpuhp_failed;
1538
1539 return 0;
1540 cpuhp_failed:
1541 arm_trbe_remove_coresight(drvdata);
1542 probe_failed:
1543 arm_trbe_remove_irq(drvdata);
1544 return ret;
1545 }
1546
arm_trbe_device_remove(struct platform_device * pdev)1547 static void arm_trbe_device_remove(struct platform_device *pdev)
1548 {
1549 struct trbe_drvdata *drvdata = platform_get_drvdata(pdev);
1550
1551 arm_trbe_remove_cpuhp(drvdata);
1552 arm_trbe_remove_coresight(drvdata);
1553 arm_trbe_remove_irq(drvdata);
1554 }
1555
1556 static const struct of_device_id arm_trbe_of_match[] = {
1557 { .compatible = "arm,trace-buffer-extension"},
1558 {},
1559 };
1560 MODULE_DEVICE_TABLE(of, arm_trbe_of_match);
1561
1562 #ifdef CONFIG_ACPI
1563 static const struct platform_device_id arm_trbe_acpi_match[] = {
1564 { ARMV8_TRBE_PDEV_NAME, 0 },
1565 { }
1566 };
1567 MODULE_DEVICE_TABLE(platform, arm_trbe_acpi_match);
1568 #endif
1569
1570 static struct platform_driver arm_trbe_driver = {
1571 .id_table = ACPI_PTR(arm_trbe_acpi_match),
1572 .driver = {
1573 .name = DRVNAME,
1574 .of_match_table = of_match_ptr(arm_trbe_of_match),
1575 .suppress_bind_attrs = true,
1576 },
1577 .probe = arm_trbe_device_probe,
1578 .remove = arm_trbe_device_remove,
1579 };
1580
arm_trbe_init(void)1581 static int __init arm_trbe_init(void)
1582 {
1583 int ret;
1584
1585 ret = platform_driver_register(&arm_trbe_driver);
1586 if (!ret)
1587 return 0;
1588
1589 pr_err("Error registering %s platform driver\n", DRVNAME);
1590 return ret;
1591 }
1592
arm_trbe_exit(void)1593 static void __exit arm_trbe_exit(void)
1594 {
1595 platform_driver_unregister(&arm_trbe_driver);
1596 }
1597 module_init(arm_trbe_init);
1598 module_exit(arm_trbe_exit);
1599
1600 MODULE_AUTHOR("Anshuman Khandual <anshuman.khandual@arm.com>");
1601 MODULE_DESCRIPTION("Arm Trace Buffer Extension (TRBE) driver");
1602 MODULE_LICENSE("GPL v2");
1603