1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <linux/atomic.h>
8 #include <linux/circ_buf.h>
9 #include <linux/coresight.h>
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
12 #include "coresight-priv.h"
13 #include "coresight-tmc.h"
14 #include "coresight-etm-perf.h"
15
16 static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 struct perf_output_handle *handle);
18
__tmc_etb_enable_hw(struct tmc_drvdata * drvdata)19 static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20 {
21 int rc = 0;
22 u32 ffcr;
23
24 CS_UNLOCK(drvdata->base);
25
26 /* Wait for TMCSReady bit to be set */
27 rc = tmc_wait_for_tmcready(drvdata);
28 if (rc) {
29 dev_err(&drvdata->csdev->dev,
30 "Failed to enable: TMC not ready\n");
31 CS_LOCK(drvdata->base);
32 return rc;
33 }
34
35 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
36
37 ffcr = TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN |
38 TMC_FFCR_FON_TRIG_EVT | TMC_FFCR_TRIGON_TRIGIN;
39 if (drvdata->stop_on_flush)
40 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
41 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
42
43 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
44 tmc_enable_hw(drvdata);
45
46 CS_LOCK(drvdata->base);
47 return rc;
48 }
49
tmc_etb_enable_hw(struct tmc_drvdata * drvdata)50 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
51 {
52 int rc = coresight_claim_device(drvdata->csdev);
53
54 if (rc)
55 return rc;
56
57 rc = __tmc_etb_enable_hw(drvdata);
58 if (rc)
59 coresight_disclaim_device(drvdata->csdev);
60 return rc;
61 }
62
tmc_etb_dump_hw(struct tmc_drvdata * drvdata)63 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
64 {
65 char *bufp;
66 u32 read_data, lost;
67
68 /* Check if the buffer wrapped around. */
69 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
70 bufp = drvdata->buf;
71 drvdata->len = 0;
72 while (1) {
73 read_data = readl_relaxed(drvdata->base + TMC_RRD);
74 if (read_data == 0xFFFFFFFF)
75 break;
76 memcpy(bufp, &read_data, 4);
77 bufp += 4;
78 drvdata->len += 4;
79 }
80
81 if (lost)
82 coresight_insert_barrier_packet(drvdata->buf);
83 return;
84 }
85
__tmc_etb_disable_hw(struct tmc_drvdata * drvdata)86 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
87 {
88 CS_UNLOCK(drvdata->base);
89
90 tmc_flush_and_stop(drvdata);
91 /*
92 * When operating in sysFS mode the content of the buffer needs to be
93 * read before the TMC is disabled.
94 */
95 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
96 tmc_etb_dump_hw(drvdata);
97 tmc_disable_hw(drvdata);
98
99 CS_LOCK(drvdata->base);
100 }
101
tmc_etb_disable_hw(struct tmc_drvdata * drvdata)102 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
103 {
104 __tmc_etb_disable_hw(drvdata);
105 coresight_disclaim_device(drvdata->csdev);
106 }
107
__tmc_etf_enable_hw(struct tmc_drvdata * drvdata)108 static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
109 {
110 int rc = 0;
111
112 CS_UNLOCK(drvdata->base);
113
114 /* Wait for TMCSReady bit to be set */
115 rc = tmc_wait_for_tmcready(drvdata);
116 if (rc) {
117 dev_err(&drvdata->csdev->dev,
118 "Failed to enable : TMC is not ready\n");
119 CS_LOCK(drvdata->base);
120 return rc;
121 }
122
123 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
124 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
125 drvdata->base + TMC_FFCR);
126 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
127 tmc_enable_hw(drvdata);
128
129 CS_LOCK(drvdata->base);
130 return rc;
131 }
132
tmc_etf_enable_hw(struct tmc_drvdata * drvdata)133 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
134 {
135 int rc = coresight_claim_device(drvdata->csdev);
136
137 if (rc)
138 return rc;
139
140 rc = __tmc_etf_enable_hw(drvdata);
141 if (rc)
142 coresight_disclaim_device(drvdata->csdev);
143 return rc;
144 }
145
tmc_etf_disable_hw(struct tmc_drvdata * drvdata)146 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
147 {
148 struct coresight_device *csdev = drvdata->csdev;
149
150 CS_UNLOCK(drvdata->base);
151
152 tmc_flush_and_stop(drvdata);
153 tmc_disable_hw(drvdata);
154 coresight_disclaim_device_unlocked(csdev);
155 CS_LOCK(drvdata->base);
156 }
157
158 /*
159 * Return the available trace data in the buffer from @pos, with
160 * a maximum limit of @len, updating the @bufpp on where to
161 * find it.
162 */
tmc_etb_get_sysfs_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)163 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
164 loff_t pos, size_t len, char **bufpp)
165 {
166 ssize_t actual = len;
167
168 /* Adjust the len to available size @pos */
169 if (pos + actual > drvdata->len)
170 actual = drvdata->len - pos;
171 if (actual > 0)
172 *bufpp = drvdata->buf + pos;
173 return actual;
174 }
175
tmc_enable_etf_sink_sysfs(struct coresight_device * csdev)176 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
177 {
178 int ret = 0;
179 bool used = false;
180 char *buf = NULL;
181 unsigned long flags;
182 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
183
184 /*
185 * If we don't have a buffer release the lock and allocate memory.
186 * Otherwise keep the lock and move along.
187 */
188 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
189 if (!drvdata->buf) {
190 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
191
192 /* Allocating the memory here while outside of the spinlock */
193 buf = kzalloc(drvdata->size, GFP_KERNEL);
194 if (!buf)
195 return -ENOMEM;
196
197 /* Let's try again */
198 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
199 }
200
201 if (drvdata->reading) {
202 ret = -EBUSY;
203 goto out;
204 }
205
206 /*
207 * In sysFS mode we can have multiple writers per sink. Since this
208 * sink is already enabled no memory is needed and the HW need not be
209 * touched.
210 */
211 if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
212 csdev->refcnt++;
213 goto out;
214 }
215
216 /*
217 * If drvdata::buf isn't NULL, memory was allocated for a previous
218 * trace run but wasn't read. If so simply zero-out the memory.
219 * Otherwise use the memory allocated above.
220 *
221 * The memory is freed when users read the buffer using the
222 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
223 * details.
224 */
225 if (drvdata->buf) {
226 memset(drvdata->buf, 0, drvdata->size);
227 } else {
228 used = true;
229 drvdata->buf = buf;
230 }
231 ret = tmc_etb_enable_hw(drvdata);
232 if (!ret) {
233 coresight_set_mode(csdev, CS_MODE_SYSFS);
234 csdev->refcnt++;
235 } else {
236 /* Free up the buffer if we failed to enable */
237 used = false;
238 }
239 out:
240 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
241
242 /* Free memory outside the spinlock if need be */
243 if (!used)
244 kfree(buf);
245
246 return ret;
247 }
248
tmc_enable_etf_sink_perf(struct coresight_device * csdev,void * data)249 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
250 {
251 int ret = 0;
252 pid_t pid;
253 unsigned long flags;
254 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
255 struct perf_output_handle *handle = data;
256 struct cs_buffers *buf = etm_perf_sink_config(handle);
257
258 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
259 do {
260 ret = -EINVAL;
261 if (drvdata->reading)
262 break;
263 /*
264 * No need to continue if the ETB/ETF is already operated
265 * from sysFS.
266 */
267 if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
268 ret = -EBUSY;
269 break;
270 }
271
272 /* Get a handle on the pid of the process to monitor */
273 pid = buf->pid;
274
275 if (drvdata->pid != -1 && drvdata->pid != pid) {
276 ret = -EBUSY;
277 break;
278 }
279
280 ret = tmc_set_etf_buffer(csdev, handle);
281 if (ret)
282 break;
283
284 /*
285 * No HW configuration is needed if the sink is already in
286 * use for this session.
287 */
288 if (drvdata->pid == pid) {
289 csdev->refcnt++;
290 break;
291 }
292
293 ret = tmc_etb_enable_hw(drvdata);
294 if (!ret) {
295 /* Associate with monitored process. */
296 drvdata->pid = pid;
297 coresight_set_mode(csdev, CS_MODE_PERF);
298 csdev->refcnt++;
299 }
300 } while (0);
301 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
302
303 return ret;
304 }
305
tmc_enable_etf_sink(struct coresight_device * csdev,enum cs_mode mode,void * data)306 static int tmc_enable_etf_sink(struct coresight_device *csdev,
307 enum cs_mode mode, void *data)
308 {
309 int ret;
310
311 switch (mode) {
312 case CS_MODE_SYSFS:
313 ret = tmc_enable_etf_sink_sysfs(csdev);
314 break;
315 case CS_MODE_PERF:
316 ret = tmc_enable_etf_sink_perf(csdev, data);
317 break;
318 /* We shouldn't be here */
319 default:
320 ret = -EINVAL;
321 break;
322 }
323
324 if (ret)
325 return ret;
326
327 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
328 return 0;
329 }
330
tmc_disable_etf_sink(struct coresight_device * csdev)331 static int tmc_disable_etf_sink(struct coresight_device *csdev)
332 {
333 unsigned long flags;
334 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
335
336 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
337
338 if (drvdata->reading) {
339 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
340 return -EBUSY;
341 }
342
343 csdev->refcnt--;
344 if (csdev->refcnt) {
345 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
346 return -EBUSY;
347 }
348
349 /* Complain if we (somehow) got out of sync */
350 WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
351 tmc_etb_disable_hw(drvdata);
352 /* Dissociate from monitored process. */
353 drvdata->pid = -1;
354 coresight_set_mode(csdev, CS_MODE_DISABLED);
355
356 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
357
358 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
359 return 0;
360 }
361
tmc_enable_etf_link(struct coresight_device * csdev,struct coresight_connection * in,struct coresight_connection * out)362 static int tmc_enable_etf_link(struct coresight_device *csdev,
363 struct coresight_connection *in,
364 struct coresight_connection *out)
365 {
366 int ret = 0;
367 unsigned long flags;
368 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
369 bool first_enable = false;
370
371 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
372 if (drvdata->reading) {
373 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
374 return -EBUSY;
375 }
376
377 if (csdev->refcnt == 0) {
378 ret = tmc_etf_enable_hw(drvdata);
379 if (!ret) {
380 coresight_set_mode(csdev, CS_MODE_SYSFS);
381 first_enable = true;
382 }
383 }
384 if (!ret)
385 csdev->refcnt++;
386 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
387
388 if (first_enable)
389 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
390 return ret;
391 }
392
tmc_disable_etf_link(struct coresight_device * csdev,struct coresight_connection * in,struct coresight_connection * out)393 static void tmc_disable_etf_link(struct coresight_device *csdev,
394 struct coresight_connection *in,
395 struct coresight_connection *out)
396 {
397 unsigned long flags;
398 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
399 bool last_disable = false;
400
401 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
402 if (drvdata->reading) {
403 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
404 return;
405 }
406
407 csdev->refcnt--;
408 if (csdev->refcnt == 0) {
409 tmc_etf_disable_hw(drvdata);
410 coresight_set_mode(csdev, CS_MODE_DISABLED);
411 last_disable = true;
412 }
413 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
414
415 if (last_disable)
416 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
417 }
418
tmc_alloc_etf_buffer(struct coresight_device * csdev,struct perf_event * event,void ** pages,int nr_pages,bool overwrite)419 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
420 struct perf_event *event, void **pages,
421 int nr_pages, bool overwrite)
422 {
423 int node;
424 struct cs_buffers *buf;
425
426 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
427
428 /* Allocate memory structure for interaction with Perf */
429 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
430 if (!buf)
431 return NULL;
432
433 buf->pid = task_pid_nr(event->owner);
434 buf->snapshot = overwrite;
435 buf->nr_pages = nr_pages;
436 buf->data_pages = pages;
437
438 return buf;
439 }
440
tmc_free_etf_buffer(void * config)441 static void tmc_free_etf_buffer(void *config)
442 {
443 struct cs_buffers *buf = config;
444
445 kfree(buf);
446 }
447
tmc_set_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle)448 static int tmc_set_etf_buffer(struct coresight_device *csdev,
449 struct perf_output_handle *handle)
450 {
451 int ret = 0;
452 unsigned long head;
453 struct cs_buffers *buf = etm_perf_sink_config(handle);
454
455 if (!buf)
456 return -EINVAL;
457
458 /* wrap head around to the amount of space we have */
459 head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
460
461 /* find the page to write to */
462 buf->cur = head / PAGE_SIZE;
463
464 /* and offset within that page */
465 buf->offset = head % PAGE_SIZE;
466
467 local_set(&buf->data_size, 0);
468
469 return ret;
470 }
471
tmc_update_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle,void * sink_config)472 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
473 struct perf_output_handle *handle,
474 void *sink_config)
475 {
476 bool lost = false;
477 int i, cur;
478 const u32 *barrier;
479 u32 *buf_ptr;
480 u64 read_ptr, write_ptr;
481 u32 status;
482 unsigned long offset, to_read = 0, flags;
483 struct cs_buffers *buf = sink_config;
484 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
485 struct perf_event *event = handle->event;
486
487 if (!buf)
488 return 0;
489
490 /* This shouldn't happen */
491 if (WARN_ON_ONCE(coresight_get_mode(csdev) != CS_MODE_PERF))
492 return 0;
493
494 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
495
496 /* Don't do anything if another tracer is using this sink */
497 if (csdev->refcnt != 1)
498 goto out;
499
500 CS_UNLOCK(drvdata->base);
501
502 tmc_flush_and_stop(drvdata);
503
504 read_ptr = tmc_read_rrp(drvdata);
505 write_ptr = tmc_read_rwp(drvdata);
506
507 /*
508 * Get a hold of the status register and see if a wrap around
509 * has occurred. If so adjust things accordingly.
510 */
511 status = readl_relaxed(drvdata->base + TMC_STS);
512 if (status & TMC_STS_FULL) {
513 lost = true;
514 to_read = drvdata->size;
515 } else {
516 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
517 }
518
519 /*
520 * The TMC RAM buffer may be bigger than the space available in the
521 * perf ring buffer (handle->size). If so advance the RRP so that we
522 * get the latest trace data. In snapshot mode none of that matters
523 * since we are expected to clobber stale data in favour of the latest
524 * traces.
525 */
526 if (!buf->snapshot && to_read > handle->size) {
527 u32 mask = tmc_get_memwidth_mask(drvdata);
528
529 /*
530 * Make sure the new size is aligned in accordance with the
531 * requirement explained in function tmc_get_memwidth_mask().
532 */
533 to_read = handle->size & mask;
534 /* Move the RAM read pointer up */
535 read_ptr = (write_ptr + drvdata->size) - to_read;
536 /* Make sure we are still within our limits */
537 if (read_ptr > (drvdata->size - 1))
538 read_ptr -= drvdata->size;
539 /* Tell the HW */
540 tmc_write_rrp(drvdata, read_ptr);
541 lost = true;
542 }
543
544 /*
545 * Don't set the TRUNCATED flag in snapshot mode because 1) the
546 * captured buffer is expected to be truncated and 2) a full buffer
547 * prevents the event from being re-enabled by the perf core,
548 * resulting in stale data being send to user space.
549 */
550 if (!buf->snapshot && lost)
551 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
552
553 cur = buf->cur;
554 offset = buf->offset;
555 barrier = coresight_barrier_pkt;
556
557 /* for every byte to read */
558 for (i = 0; i < to_read; i += 4) {
559 buf_ptr = buf->data_pages[cur] + offset;
560 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
561
562 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
563 *buf_ptr = *barrier;
564 barrier++;
565 }
566
567 offset += 4;
568 if (offset >= PAGE_SIZE) {
569 offset = 0;
570 cur++;
571 /* wrap around at the end of the buffer */
572 cur &= buf->nr_pages - 1;
573 }
574 }
575
576 /*
577 * In snapshot mode we simply increment the head by the number of byte
578 * that were written. User space will figure out how many bytes to get
579 * from the AUX buffer based on the position of the head.
580 */
581 if (buf->snapshot)
582 handle->head += to_read;
583
584 /*
585 * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
586 * data before the aux_head is updated via perf_aux_output_end(), which
587 * is expected by the perf ring buffer.
588 */
589 CS_LOCK(drvdata->base);
590
591 /*
592 * If the event is active, it is triggered during an AUX pause.
593 * Re-enable the sink so that it is ready when AUX resume is invoked.
594 */
595 if (!event->hw.state)
596 __tmc_etb_enable_hw(drvdata);
597
598 out:
599 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
600
601 return to_read;
602 }
603
tmc_panic_sync_etf(struct coresight_device * csdev)604 static int tmc_panic_sync_etf(struct coresight_device *csdev)
605 {
606 u32 val;
607 struct tmc_crash_metadata *mdata;
608 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
609
610 mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
611
612 /* Make sure we have valid reserved memory */
613 if (!tmc_has_reserved_buffer(drvdata) ||
614 !tmc_has_crash_mdata_buffer(drvdata))
615 return 0;
616
617 tmc_crashdata_set_invalid(drvdata);
618
619 CS_UNLOCK(drvdata->base);
620
621 /* Proceed only if ETF is enabled or configured as sink */
622 val = readl(drvdata->base + TMC_CTL);
623 if (!(val & TMC_CTL_CAPT_EN))
624 goto out;
625 val = readl(drvdata->base + TMC_MODE);
626 if (val != TMC_MODE_CIRCULAR_BUFFER)
627 goto out;
628
629 val = readl(drvdata->base + TMC_FFSR);
630 /* Do manual flush and stop only if its not auto-stopped */
631 if (!(val & TMC_FFSR_FT_STOPPED)) {
632 dev_dbg(&csdev->dev,
633 "%s: Triggering manual flush\n", __func__);
634 tmc_flush_and_stop(drvdata);
635 } else
636 tmc_wait_for_tmcready(drvdata);
637
638 /* Sync registers from hardware to metadata region */
639 mdata->tmc_sts = readl(drvdata->base + TMC_STS);
640 mdata->tmc_mode = readl(drvdata->base + TMC_MODE);
641 mdata->tmc_ffcr = readl(drvdata->base + TMC_FFCR);
642 mdata->tmc_ffsr = readl(drvdata->base + TMC_FFSR);
643
644 /* Sync Internal SRAM to reserved trace buffer region */
645 drvdata->buf = drvdata->resrv_buf.vaddr;
646 tmc_etb_dump_hw(drvdata);
647 /* Store as per RSZ register convention */
648 mdata->tmc_ram_size = drvdata->len >> 2;
649
650 /* Other fields for processing trace buffer reads */
651 mdata->tmc_rrp = 0;
652 mdata->tmc_dba = 0;
653 mdata->tmc_rwp = drvdata->len;
654 mdata->trace_paddr = drvdata->resrv_buf.paddr;
655
656 mdata->version = CS_CRASHDATA_VERSION;
657
658 /*
659 * Make sure all previous writes are ordered,
660 * before we mark valid
661 */
662 dmb(sy);
663 mdata->valid = true;
664 /*
665 * Below order need to maintained, since crc of metadata
666 * is dependent on first
667 */
668 mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, mdata);
669 mdata->crc32_mdata = find_crash_metadata_crc(mdata);
670
671 tmc_disable_hw(drvdata);
672
673 dev_dbg(&csdev->dev, "%s: success\n", __func__);
674 out:
675 CS_UNLOCK(drvdata->base);
676 return 0;
677 }
678
679 static const struct coresight_ops_sink tmc_etf_sink_ops = {
680 .enable = tmc_enable_etf_sink,
681 .disable = tmc_disable_etf_sink,
682 .alloc_buffer = tmc_alloc_etf_buffer,
683 .free_buffer = tmc_free_etf_buffer,
684 .update_buffer = tmc_update_etf_buffer,
685 };
686
687 static const struct coresight_ops_link tmc_etf_link_ops = {
688 .enable = tmc_enable_etf_link,
689 .disable = tmc_disable_etf_link,
690 };
691
692 static const struct coresight_ops_panic tmc_etf_sync_ops = {
693 .sync = tmc_panic_sync_etf,
694 };
695
696 const struct coresight_ops tmc_etb_cs_ops = {
697 .sink_ops = &tmc_etf_sink_ops,
698 };
699
700 const struct coresight_ops tmc_etf_cs_ops = {
701 .sink_ops = &tmc_etf_sink_ops,
702 .link_ops = &tmc_etf_link_ops,
703 .panic_ops = &tmc_etf_sync_ops,
704 };
705
tmc_read_prepare_etb(struct tmc_drvdata * drvdata)706 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
707 {
708 enum tmc_mode mode;
709 int ret = 0;
710 unsigned long flags;
711
712 /* config types are set a boot time and never change */
713 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
714 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
715 return -EINVAL;
716
717 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
718
719 if (drvdata->reading) {
720 ret = -EBUSY;
721 goto out;
722 }
723
724 /* Don't interfere if operated from Perf */
725 if (coresight_get_mode(drvdata->csdev) == CS_MODE_PERF) {
726 ret = -EINVAL;
727 goto out;
728 }
729
730 /* If drvdata::buf is NULL the trace data has been read already */
731 if (drvdata->buf == NULL) {
732 ret = -EINVAL;
733 goto out;
734 }
735
736 /* Disable the TMC if need be */
737 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
738 /* There is no point in reading a TMC in HW FIFO mode */
739 mode = readl_relaxed(drvdata->base + TMC_MODE);
740 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
741 ret = -EINVAL;
742 goto out;
743 }
744 __tmc_etb_disable_hw(drvdata);
745 }
746
747 drvdata->reading = true;
748 out:
749 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
750
751 return ret;
752 }
753
tmc_read_unprepare_etb(struct tmc_drvdata * drvdata)754 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
755 {
756 char *buf = NULL;
757 enum tmc_mode mode;
758 unsigned long flags;
759
760 /* config types are set a boot time and never change */
761 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
762 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
763 return -EINVAL;
764
765 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
766
767 /* Re-enable the TMC if need be */
768 if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
769 /* There is no point in reading a TMC in HW FIFO mode */
770 mode = readl_relaxed(drvdata->base + TMC_MODE);
771 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
772 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
773 return -EINVAL;
774 }
775 /*
776 * The trace run will continue with the same allocated trace
777 * buffer. As such zero-out the buffer so that we don't end
778 * up with stale data.
779 *
780 * Since the tracer is still enabled drvdata::buf
781 * can't be NULL.
782 */
783 memset(drvdata->buf, 0, drvdata->size);
784 /*
785 * Ignore failures to enable the TMC to make sure, we don't
786 * leave the TMC in a "reading" state.
787 */
788 __tmc_etb_enable_hw(drvdata);
789 } else {
790 /*
791 * The ETB/ETF is not tracing and the buffer was just read.
792 * As such prepare to free the trace buffer.
793 */
794 buf = drvdata->buf;
795 drvdata->buf = NULL;
796 }
797
798 drvdata->reading = false;
799 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
800
801 /*
802 * Free allocated memory outside of the spinlock. There is no need
803 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
804 */
805 kfree(buf);
806
807 return 0;
808 }
809