xref: /linux/drivers/hwtracing/coresight/coresight-tmc-etf.c (revision 83bd89291f5cc866f60d32c34e268896c7ba8a3d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2016 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6 
7 #include <linux/atomic.h>
8 #include <linux/circ_buf.h>
9 #include <linux/coresight.h>
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
12 #include "coresight-priv.h"
13 #include "coresight-tmc.h"
14 #include "coresight-etm-perf.h"
15 
16 static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 			      struct perf_output_handle *handle);
18 
__tmc_etb_enable_hw(struct tmc_drvdata * drvdata)19 static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20 {
21 	int rc = 0;
22 	u32 ffcr;
23 
24 	CS_UNLOCK(drvdata->base);
25 
26 	/* Wait for TMCSReady bit to be set */
27 	rc = tmc_wait_for_tmcready(drvdata);
28 	if (rc) {
29 		dev_err(&drvdata->csdev->dev,
30 			"Failed to enable: TMC not ready\n");
31 		CS_LOCK(drvdata->base);
32 		return rc;
33 	}
34 
35 	writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
36 
37 	ffcr = TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI | TMC_FFCR_FON_FLIN |
38 		TMC_FFCR_FON_TRIG_EVT | TMC_FFCR_TRIGON_TRIGIN;
39 	if (drvdata->stop_on_flush)
40 		ffcr |= TMC_FFCR_STOP_ON_FLUSH;
41 	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
42 
43 	writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
44 	tmc_enable_hw(drvdata);
45 
46 	CS_LOCK(drvdata->base);
47 	return rc;
48 }
49 
tmc_etb_enable_hw(struct tmc_drvdata * drvdata)50 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
51 {
52 	int rc = coresight_claim_device(drvdata->csdev);
53 
54 	if (rc)
55 		return rc;
56 
57 	rc = __tmc_etb_enable_hw(drvdata);
58 	if (rc)
59 		coresight_disclaim_device(drvdata->csdev);
60 	return rc;
61 }
62 
tmc_etb_dump_hw(struct tmc_drvdata * drvdata)63 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
64 {
65 	char *bufp;
66 	u32 read_data, lost;
67 
68 	/* Check if the buffer wrapped around. */
69 	lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
70 	bufp = drvdata->buf;
71 	drvdata->len = 0;
72 	while (1) {
73 		read_data = readl_relaxed(drvdata->base + TMC_RRD);
74 		if (read_data == 0xFFFFFFFF)
75 			break;
76 		memcpy(bufp, &read_data, 4);
77 		bufp += 4;
78 		drvdata->len += 4;
79 	}
80 
81 	if (lost)
82 		coresight_insert_barrier_packet(drvdata->buf);
83 	return;
84 }
85 
__tmc_etb_disable_hw(struct tmc_drvdata * drvdata)86 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
87 {
88 	CS_UNLOCK(drvdata->base);
89 
90 	tmc_flush_and_stop(drvdata);
91 	/*
92 	 * When operating in sysFS mode the content of the buffer needs to be
93 	 * read before the TMC is disabled.
94 	 */
95 	if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
96 		tmc_etb_dump_hw(drvdata);
97 	tmc_disable_hw(drvdata);
98 
99 	CS_LOCK(drvdata->base);
100 }
101 
tmc_etb_disable_hw(struct tmc_drvdata * drvdata)102 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
103 {
104 	__tmc_etb_disable_hw(drvdata);
105 	coresight_disclaim_device(drvdata->csdev);
106 }
107 
__tmc_etf_enable_hw(struct tmc_drvdata * drvdata)108 static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
109 {
110 	int rc = 0;
111 
112 	CS_UNLOCK(drvdata->base);
113 
114 	/* Wait for TMCSReady bit to be set */
115 	rc = tmc_wait_for_tmcready(drvdata);
116 	if (rc) {
117 		dev_err(&drvdata->csdev->dev,
118 			"Failed to enable : TMC is not ready\n");
119 		CS_LOCK(drvdata->base);
120 		return rc;
121 	}
122 
123 	writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
124 	writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
125 		       drvdata->base + TMC_FFCR);
126 	writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
127 	tmc_enable_hw(drvdata);
128 
129 	CS_LOCK(drvdata->base);
130 	return rc;
131 }
132 
tmc_etf_enable_hw(struct tmc_drvdata * drvdata)133 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
134 {
135 	int rc = coresight_claim_device(drvdata->csdev);
136 
137 	if (rc)
138 		return rc;
139 
140 	rc = __tmc_etf_enable_hw(drvdata);
141 	if (rc)
142 		coresight_disclaim_device(drvdata->csdev);
143 	return rc;
144 }
145 
tmc_etf_disable_hw(struct tmc_drvdata * drvdata)146 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
147 {
148 	struct coresight_device *csdev = drvdata->csdev;
149 
150 	CS_UNLOCK(drvdata->base);
151 
152 	tmc_flush_and_stop(drvdata);
153 	tmc_disable_hw(drvdata);
154 	coresight_disclaim_device_unlocked(csdev);
155 	CS_LOCK(drvdata->base);
156 }
157 
158 /*
159  * Return the available trace data in the buffer from @pos, with
160  * a maximum limit of @len, updating the @bufpp on where to
161  * find it.
162  */
tmc_etb_get_sysfs_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)163 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
164 				loff_t pos, size_t len, char **bufpp)
165 {
166 	ssize_t actual = len;
167 
168 	/* Adjust the len to available size @pos */
169 	if (pos + actual > drvdata->len)
170 		actual = drvdata->len - pos;
171 	if (actual > 0)
172 		*bufpp = drvdata->buf + pos;
173 	return actual;
174 }
175 
tmc_enable_etf_sink_sysfs(struct coresight_device * csdev)176 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
177 {
178 	int ret = 0;
179 	bool used = false;
180 	char *buf = NULL;
181 	unsigned long flags;
182 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
183 
184 	/*
185 	 * If we don't have a buffer release the lock and allocate memory.
186 	 * Otherwise keep the lock and move along.
187 	 */
188 	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
189 	if (!drvdata->buf) {
190 		raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
191 
192 		/* Allocating the memory here while outside of the spinlock */
193 		buf = kzalloc(drvdata->size, GFP_KERNEL);
194 		if (!buf)
195 			return -ENOMEM;
196 
197 		/* Let's try again */
198 		raw_spin_lock_irqsave(&drvdata->spinlock, flags);
199 	}
200 
201 	if (drvdata->reading) {
202 		ret = -EBUSY;
203 		goto out;
204 	}
205 
206 	/*
207 	 * In sysFS mode we can have multiple writers per sink.  Since this
208 	 * sink is already enabled no memory is needed and the HW need not be
209 	 * touched.
210 	 */
211 	if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
212 		csdev->refcnt++;
213 		goto out;
214 	}
215 
216 	/*
217 	 * If drvdata::buf isn't NULL, memory was allocated for a previous
218 	 * trace run but wasn't read.  If so simply zero-out the memory.
219 	 * Otherwise use the memory allocated above.
220 	 *
221 	 * The memory is freed when users read the buffer using the
222 	 * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
223 	 * details.
224 	 */
225 	if (drvdata->buf) {
226 		memset(drvdata->buf, 0, drvdata->size);
227 	} else {
228 		used = true;
229 		drvdata->buf = buf;
230 	}
231 	ret = tmc_etb_enable_hw(drvdata);
232 	if (!ret) {
233 		coresight_set_mode(csdev, CS_MODE_SYSFS);
234 		csdev->refcnt++;
235 	} else {
236 		/* Free up the buffer if we failed to enable */
237 		used = false;
238 	}
239 out:
240 	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
241 
242 	/* Free memory outside the spinlock if need be */
243 	if (!used)
244 		kfree(buf);
245 
246 	return ret;
247 }
248 
tmc_enable_etf_sink_perf(struct coresight_device * csdev,struct coresight_path * path)249 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev,
250 				    struct coresight_path *path)
251 {
252 	int ret = 0;
253 	pid_t pid;
254 	unsigned long flags;
255 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
256 	struct perf_output_handle *handle = path->handle;
257 	struct cs_buffers *buf = etm_perf_sink_config(handle);
258 
259 	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
260 	do {
261 		ret = -EINVAL;
262 		if (drvdata->reading)
263 			break;
264 		/*
265 		 * No need to continue if the ETB/ETF is already operated
266 		 * from sysFS.
267 		 */
268 		if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
269 			ret = -EBUSY;
270 			break;
271 		}
272 
273 		/* Get a handle on the pid of the process to monitor */
274 		pid = buf->pid;
275 
276 		if (drvdata->pid != -1 && drvdata->pid != pid) {
277 			ret = -EBUSY;
278 			break;
279 		}
280 
281 		ret = tmc_set_etf_buffer(csdev, handle);
282 		if (ret)
283 			break;
284 
285 		/*
286 		 * No HW configuration is needed if the sink is already in
287 		 * use for this session.
288 		 */
289 		if (drvdata->pid == pid) {
290 			csdev->refcnt++;
291 			break;
292 		}
293 
294 		ret  = tmc_etb_enable_hw(drvdata);
295 		if (!ret) {
296 			/* Associate with monitored process. */
297 			drvdata->pid = pid;
298 			coresight_set_mode(csdev, CS_MODE_PERF);
299 			csdev->refcnt++;
300 		}
301 	} while (0);
302 	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
303 
304 	return ret;
305 }
306 
tmc_enable_etf_sink(struct coresight_device * csdev,enum cs_mode mode,struct coresight_path * path)307 static int tmc_enable_etf_sink(struct coresight_device *csdev,
308 			       enum cs_mode mode,
309 			       struct coresight_path *path)
310 {
311 	int ret;
312 
313 	switch (mode) {
314 	case CS_MODE_SYSFS:
315 		ret = tmc_enable_etf_sink_sysfs(csdev);
316 		break;
317 	case CS_MODE_PERF:
318 		ret = tmc_enable_etf_sink_perf(csdev, path);
319 		break;
320 	/* We shouldn't be here */
321 	default:
322 		ret = -EINVAL;
323 		break;
324 	}
325 
326 	if (ret)
327 		return ret;
328 
329 	dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
330 	return 0;
331 }
332 
tmc_disable_etf_sink(struct coresight_device * csdev)333 static int tmc_disable_etf_sink(struct coresight_device *csdev)
334 {
335 	unsigned long flags;
336 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
337 
338 	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
339 
340 	if (drvdata->reading) {
341 		raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
342 		return -EBUSY;
343 	}
344 
345 	csdev->refcnt--;
346 	if (csdev->refcnt) {
347 		raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
348 		return -EBUSY;
349 	}
350 
351 	/* Complain if we (somehow) got out of sync */
352 	WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
353 	tmc_etb_disable_hw(drvdata);
354 	/* Dissociate from monitored process. */
355 	drvdata->pid = -1;
356 	coresight_set_mode(csdev, CS_MODE_DISABLED);
357 
358 	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
359 
360 	dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
361 	return 0;
362 }
363 
tmc_enable_etf_link(struct coresight_device * csdev,struct coresight_connection * in,struct coresight_connection * out)364 static int tmc_enable_etf_link(struct coresight_device *csdev,
365 			       struct coresight_connection *in,
366 			       struct coresight_connection *out)
367 {
368 	int ret = 0;
369 	unsigned long flags;
370 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
371 	bool first_enable = false;
372 
373 	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
374 	if (drvdata->reading) {
375 		raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
376 		return -EBUSY;
377 	}
378 
379 	if (csdev->refcnt == 0) {
380 		ret = tmc_etf_enable_hw(drvdata);
381 		if (!ret) {
382 			coresight_set_mode(csdev, CS_MODE_SYSFS);
383 			first_enable = true;
384 		}
385 	}
386 	if (!ret)
387 		csdev->refcnt++;
388 	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
389 
390 	if (first_enable)
391 		dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
392 	return ret;
393 }
394 
tmc_disable_etf_link(struct coresight_device * csdev,struct coresight_connection * in,struct coresight_connection * out)395 static void tmc_disable_etf_link(struct coresight_device *csdev,
396 				 struct coresight_connection *in,
397 				 struct coresight_connection *out)
398 {
399 	unsigned long flags;
400 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
401 	bool last_disable = false;
402 
403 	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
404 	if (drvdata->reading) {
405 		raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
406 		return;
407 	}
408 
409 	csdev->refcnt--;
410 	if (csdev->refcnt == 0) {
411 		tmc_etf_disable_hw(drvdata);
412 		coresight_set_mode(csdev, CS_MODE_DISABLED);
413 		last_disable = true;
414 	}
415 	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
416 
417 	if (last_disable)
418 		dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
419 }
420 
tmc_alloc_etf_buffer(struct coresight_device * csdev,struct perf_event * event,void ** pages,int nr_pages,bool overwrite)421 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
422 				  struct perf_event *event, void **pages,
423 				  int nr_pages, bool overwrite)
424 {
425 	int node;
426 	struct cs_buffers *buf;
427 
428 	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
429 
430 	/* Allocate memory structure for interaction with Perf */
431 	buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
432 	if (!buf)
433 		return NULL;
434 
435 	buf->pid = task_pid_nr(event->owner);
436 	buf->snapshot = overwrite;
437 	buf->nr_pages = nr_pages;
438 	buf->data_pages = pages;
439 
440 	return buf;
441 }
442 
tmc_free_etf_buffer(void * config)443 static void tmc_free_etf_buffer(void *config)
444 {
445 	struct cs_buffers *buf = config;
446 
447 	kfree(buf);
448 }
449 
tmc_set_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle)450 static int tmc_set_etf_buffer(struct coresight_device *csdev,
451 			      struct perf_output_handle *handle)
452 {
453 	int ret = 0;
454 	unsigned long head;
455 	struct cs_buffers *buf = etm_perf_sink_config(handle);
456 
457 	if (!buf)
458 		return -EINVAL;
459 
460 	/* wrap head around to the amount of space we have */
461 	head = handle->head & (((unsigned long)buf->nr_pages << PAGE_SHIFT) - 1);
462 
463 	/* find the page to write to */
464 	buf->cur = head / PAGE_SIZE;
465 
466 	/* and offset within that page */
467 	buf->offset = head % PAGE_SIZE;
468 
469 	local_set(&buf->data_size, 0);
470 
471 	return ret;
472 }
473 
tmc_update_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle,void * sink_config)474 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
475 				  struct perf_output_handle *handle,
476 				  void *sink_config)
477 {
478 	bool lost = false;
479 	int i, cur;
480 	const u32 *barrier;
481 	u32 *buf_ptr;
482 	u64 read_ptr, write_ptr;
483 	u32 status;
484 	unsigned long offset, to_read = 0, flags;
485 	struct cs_buffers *buf = sink_config;
486 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
487 	struct perf_event *event = handle->event;
488 
489 	if (!buf)
490 		return 0;
491 
492 	/* This shouldn't happen */
493 	if (WARN_ON_ONCE(coresight_get_mode(csdev) != CS_MODE_PERF))
494 		return 0;
495 
496 	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
497 
498 	/* Don't do anything if another tracer is using this sink */
499 	if (csdev->refcnt != 1)
500 		goto out;
501 
502 	CS_UNLOCK(drvdata->base);
503 
504 	tmc_flush_and_stop(drvdata);
505 
506 	read_ptr = tmc_read_rrp(drvdata);
507 	write_ptr = tmc_read_rwp(drvdata);
508 
509 	/*
510 	 * Get a hold of the status register and see if a wrap around
511 	 * has occurred.  If so adjust things accordingly.
512 	 */
513 	status = readl_relaxed(drvdata->base + TMC_STS);
514 	if (status & TMC_STS_FULL) {
515 		lost = true;
516 		to_read = drvdata->size;
517 	} else {
518 		to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
519 	}
520 
521 	/*
522 	 * The TMC RAM buffer may be bigger than the space available in the
523 	 * perf ring buffer (handle->size).  If so advance the RRP so that we
524 	 * get the latest trace data.  In snapshot mode none of that matters
525 	 * since we are expected to clobber stale data in favour of the latest
526 	 * traces.
527 	 */
528 	if (!buf->snapshot && to_read > handle->size) {
529 		u32 mask = tmc_get_memwidth_mask(drvdata);
530 
531 		/*
532 		 * Make sure the new size is aligned in accordance with the
533 		 * requirement explained in function tmc_get_memwidth_mask().
534 		 */
535 		to_read = handle->size & mask;
536 		/* Move the RAM read pointer up */
537 		read_ptr = (write_ptr + drvdata->size) - to_read;
538 		/* Make sure we are still within our limits */
539 		if (read_ptr > (drvdata->size - 1))
540 			read_ptr -= drvdata->size;
541 		/* Tell the HW */
542 		tmc_write_rrp(drvdata, read_ptr);
543 		lost = true;
544 	}
545 
546 	/*
547 	 * Don't set the TRUNCATED flag in snapshot mode because 1) the
548 	 * captured buffer is expected to be truncated and 2) a full buffer
549 	 * prevents the event from being re-enabled by the perf core,
550 	 * resulting in stale data being send to user space.
551 	 */
552 	if (!buf->snapshot && lost)
553 		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
554 
555 	cur = buf->cur;
556 	offset = buf->offset;
557 	barrier = coresight_barrier_pkt;
558 
559 	/* for every byte to read */
560 	for (i = 0; i < to_read; i += 4) {
561 		buf_ptr = buf->data_pages[cur] + offset;
562 		*buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
563 
564 		if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
565 			*buf_ptr = *barrier;
566 			barrier++;
567 		}
568 
569 		offset += 4;
570 		if (offset >= PAGE_SIZE) {
571 			offset = 0;
572 			cur++;
573 			/* wrap around at the end of the buffer */
574 			cur &= buf->nr_pages - 1;
575 		}
576 	}
577 
578 	/*
579 	 * In snapshot mode we simply increment the head by the number of byte
580 	 * that were written.  User space will figure out how many bytes to get
581 	 * from the AUX buffer based on the position of the head.
582 	 */
583 	if (buf->snapshot)
584 		handle->head += to_read;
585 
586 	/*
587 	 * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
588 	 * data before the aux_head is updated via perf_aux_output_end(), which
589 	 * is expected by the perf ring buffer.
590 	 */
591 	CS_LOCK(drvdata->base);
592 
593 	/*
594 	 * If the event is active, it is triggered during an AUX pause.
595 	 * Re-enable the sink so that it is ready when AUX resume is invoked.
596 	 */
597 	if (!event->hw.state)
598 		__tmc_etb_enable_hw(drvdata);
599 
600 out:
601 	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
602 
603 	return to_read;
604 }
605 
tmc_panic_sync_etf(struct coresight_device * csdev)606 static int tmc_panic_sync_etf(struct coresight_device *csdev)
607 {
608 	u32 val;
609 	struct tmc_crash_metadata *mdata;
610 	struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
611 
612 	mdata = (struct tmc_crash_metadata *)drvdata->crash_mdata.vaddr;
613 
614 	/* Make sure we have valid reserved memory */
615 	if (!tmc_has_reserved_buffer(drvdata) ||
616 	    !tmc_has_crash_mdata_buffer(drvdata))
617 		return 0;
618 
619 	tmc_crashdata_set_invalid(drvdata);
620 
621 	CS_UNLOCK(drvdata->base);
622 
623 	/* Proceed only if ETF is enabled or configured as sink */
624 	val = readl(drvdata->base + TMC_CTL);
625 	if (!(val & TMC_CTL_CAPT_EN))
626 		goto out;
627 	val = readl(drvdata->base + TMC_MODE);
628 	if (val != TMC_MODE_CIRCULAR_BUFFER)
629 		goto out;
630 
631 	val = readl(drvdata->base + TMC_FFSR);
632 	/* Do manual flush and stop only if its not auto-stopped */
633 	if (!(val & TMC_FFSR_FT_STOPPED)) {
634 		dev_dbg(&csdev->dev,
635 			 "%s: Triggering manual flush\n", __func__);
636 		tmc_flush_and_stop(drvdata);
637 	} else
638 		tmc_wait_for_tmcready(drvdata);
639 
640 	/* Sync registers from hardware to metadata region */
641 	mdata->tmc_sts = readl(drvdata->base + TMC_STS);
642 	mdata->tmc_mode = readl(drvdata->base + TMC_MODE);
643 	mdata->tmc_ffcr = readl(drvdata->base + TMC_FFCR);
644 	mdata->tmc_ffsr = readl(drvdata->base + TMC_FFSR);
645 
646 	/* Sync Internal SRAM to reserved trace buffer region */
647 	drvdata->buf = drvdata->resrv_buf.vaddr;
648 	tmc_etb_dump_hw(drvdata);
649 	/* Store as per RSZ register convention */
650 	mdata->tmc_ram_size = drvdata->len >> 2;
651 
652 	/* Other fields for processing trace buffer reads */
653 	mdata->tmc_rrp = 0;
654 	mdata->tmc_dba = 0;
655 	mdata->tmc_rwp = drvdata->len;
656 	mdata->trace_paddr = drvdata->resrv_buf.paddr;
657 
658 	mdata->version = CS_CRASHDATA_VERSION;
659 
660 	/*
661 	 * Make sure all previous writes are ordered,
662 	 * before we mark valid
663 	 */
664 	dmb(sy);
665 	mdata->valid = true;
666 	/*
667 	 * Below order need to maintained, since crc of metadata
668 	 * is dependent on first
669 	 */
670 	mdata->crc32_tdata = find_crash_tracedata_crc(drvdata, mdata);
671 	mdata->crc32_mdata = find_crash_metadata_crc(mdata);
672 
673 	tmc_disable_hw(drvdata);
674 
675 	dev_dbg(&csdev->dev, "%s: success\n", __func__);
676 out:
677 	CS_UNLOCK(drvdata->base);
678 	return 0;
679 }
680 
681 static const struct coresight_ops_sink tmc_etf_sink_ops = {
682 	.enable		= tmc_enable_etf_sink,
683 	.disable	= tmc_disable_etf_sink,
684 	.alloc_buffer	= tmc_alloc_etf_buffer,
685 	.free_buffer	= tmc_free_etf_buffer,
686 	.update_buffer	= tmc_update_etf_buffer,
687 };
688 
689 static const struct coresight_ops_link tmc_etf_link_ops = {
690 	.enable		= tmc_enable_etf_link,
691 	.disable	= tmc_disable_etf_link,
692 };
693 
694 static const struct coresight_ops_panic tmc_etf_sync_ops = {
695 	.sync		= tmc_panic_sync_etf,
696 };
697 
698 const struct coresight_ops tmc_etb_cs_ops = {
699 	.sink_ops	= &tmc_etf_sink_ops,
700 };
701 
702 const struct coresight_ops tmc_etf_cs_ops = {
703 	.sink_ops	= &tmc_etf_sink_ops,
704 	.link_ops	= &tmc_etf_link_ops,
705 	.panic_ops	= &tmc_etf_sync_ops,
706 };
707 
tmc_read_prepare_etb(struct tmc_drvdata * drvdata)708 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
709 {
710 	enum tmc_mode mode;
711 	int ret = 0;
712 	unsigned long flags;
713 
714 	/* config types are set a boot time and never change */
715 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
716 			 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
717 		return -EINVAL;
718 
719 	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
720 
721 	if (drvdata->reading) {
722 		ret = -EBUSY;
723 		goto out;
724 	}
725 
726 	/* Don't interfere if operated from Perf */
727 	if (coresight_get_mode(drvdata->csdev) == CS_MODE_PERF) {
728 		ret = -EINVAL;
729 		goto out;
730 	}
731 
732 	/* If drvdata::buf is NULL the trace data has been read already */
733 	if (drvdata->buf == NULL) {
734 		ret = -EINVAL;
735 		goto out;
736 	}
737 
738 	/* Disable the TMC if need be */
739 	if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
740 		/* There is no point in reading a TMC in HW FIFO mode */
741 		mode = readl_relaxed(drvdata->base + TMC_MODE);
742 		if (mode != TMC_MODE_CIRCULAR_BUFFER) {
743 			ret = -EINVAL;
744 			goto out;
745 		}
746 		__tmc_etb_disable_hw(drvdata);
747 	}
748 
749 	drvdata->reading = true;
750 out:
751 	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
752 
753 	return ret;
754 }
755 
tmc_read_unprepare_etb(struct tmc_drvdata * drvdata)756 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
757 {
758 	char *buf = NULL;
759 	enum tmc_mode mode;
760 	unsigned long flags;
761 
762 	/* config types are set a boot time and never change */
763 	if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
764 			 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
765 		return -EINVAL;
766 
767 	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
768 
769 	/* Re-enable the TMC if need be */
770 	if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
771 		/* There is no point in reading a TMC in HW FIFO mode */
772 		mode = readl_relaxed(drvdata->base + TMC_MODE);
773 		if (mode != TMC_MODE_CIRCULAR_BUFFER) {
774 			raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
775 			return -EINVAL;
776 		}
777 		/*
778 		 * The trace run will continue with the same allocated trace
779 		 * buffer. As such zero-out the buffer so that we don't end
780 		 * up with stale data.
781 		 *
782 		 * Since the tracer is still enabled drvdata::buf
783 		 * can't be NULL.
784 		 */
785 		memset(drvdata->buf, 0, drvdata->size);
786 		/*
787 		 * Ignore failures to enable the TMC to make sure, we don't
788 		 * leave the TMC in a "reading" state.
789 		 */
790 		__tmc_etb_enable_hw(drvdata);
791 	} else {
792 		/*
793 		 * The ETB/ETF is not tracing and the buffer was just read.
794 		 * As such prepare to free the trace buffer.
795 		 */
796 		buf = drvdata->buf;
797 		drvdata->buf = NULL;
798 	}
799 
800 	drvdata->reading = false;
801 	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
802 
803 	/*
804 	 * Free allocated memory outside of the spinlock.  There is no need
805 	 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
806 	 */
807 	kfree(buf);
808 
809 	return 0;
810 }
811