1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 *
4 * Description: CoreSight Trace Memory Controller driver
5 */
6
7 #include <linux/acpi.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/idr.h>
13 #include <linux/io.h>
14 #include <linux/iommu.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/miscdevice.h>
18 #include <linux/mutex.h>
19 #include <linux/property.h>
20 #include <linux/uaccess.h>
21 #include <linux/slab.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/spinlock.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/coresight.h>
28 #include <linux/amba/bus.h>
29 #include <linux/platform_device.h>
30
31 #include "coresight-priv.h"
32 #include "coresight-tmc.h"
33
34 DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
35 DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
36 DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
37
tmc_wait_for_tmcready(struct tmc_drvdata * drvdata)38 int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
39 {
40 struct coresight_device *csdev = drvdata->csdev;
41 struct csdev_access *csa = &csdev->access;
42
43 /* Ensure formatter, unformatter and hardware fifo are empty */
44 if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
45 dev_err(&csdev->dev,
46 "timeout while waiting for TMC to be Ready\n");
47 return -EBUSY;
48 }
49 return 0;
50 }
51
tmc_flush_and_stop(struct tmc_drvdata * drvdata)52 void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
53 {
54 struct coresight_device *csdev = drvdata->csdev;
55 struct csdev_access *csa = &csdev->access;
56 u32 ffcr;
57
58 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
59 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
60 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
61 ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
62 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
63 /* Ensure flush completes */
64 if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
65 dev_err(&csdev->dev,
66 "timeout while waiting for completion of Manual Flush\n");
67 }
68
69 tmc_wait_for_tmcready(drvdata);
70 }
71
tmc_enable_hw(struct tmc_drvdata * drvdata)72 void tmc_enable_hw(struct tmc_drvdata *drvdata)
73 {
74 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
75 }
76
tmc_disable_hw(struct tmc_drvdata * drvdata)77 void tmc_disable_hw(struct tmc_drvdata *drvdata)
78 {
79 writel_relaxed(0x0, drvdata->base + TMC_CTL);
80 }
81
tmc_get_memwidth_mask(struct tmc_drvdata * drvdata)82 u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
83 {
84 u32 mask = 0;
85
86 /*
87 * When moving RRP or an offset address forward, the new values must
88 * be byte-address aligned to the width of the trace memory databus
89 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
90 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
91 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
92 * be 0s.
93 */
94 switch (drvdata->memwidth) {
95 case TMC_MEM_INTF_WIDTH_32BITS:
96 case TMC_MEM_INTF_WIDTH_64BITS:
97 case TMC_MEM_INTF_WIDTH_128BITS:
98 mask = GENMASK(31, 4);
99 break;
100 case TMC_MEM_INTF_WIDTH_256BITS:
101 mask = GENMASK(31, 5);
102 break;
103 }
104
105 return mask;
106 }
107
is_tmc_crashdata_valid(struct tmc_drvdata * drvdata)108 static bool is_tmc_crashdata_valid(struct tmc_drvdata *drvdata)
109 {
110 struct tmc_crash_metadata *mdata;
111
112 if (!tmc_has_reserved_buffer(drvdata) ||
113 !tmc_has_crash_mdata_buffer(drvdata))
114 return false;
115
116 mdata = drvdata->crash_mdata.vaddr;
117
118 /* Check version match */
119 if (mdata->version != CS_CRASHDATA_VERSION)
120 return false;
121
122 /* Check for valid metadata */
123 if (!mdata->valid) {
124 dev_dbg(&drvdata->csdev->dev,
125 "Data invalid in tmc crash metadata\n");
126 return false;
127 }
128
129 /*
130 * Buffer address given by metadata for retrieval of trace data
131 * from previous boot is expected to be same as the reserved
132 * trace buffer memory region provided through DTS
133 */
134 if (drvdata->resrv_buf.paddr != mdata->trace_paddr) {
135 dev_dbg(&drvdata->csdev->dev,
136 "Trace buffer address of previous boot invalid\n");
137 return false;
138 }
139
140 /* Check data integrity of metadata */
141 if (mdata->crc32_mdata != find_crash_metadata_crc(mdata)) {
142 dev_err(&drvdata->csdev->dev,
143 "CRC mismatch in tmc crash metadata\n");
144 return false;
145 }
146 /* Check data integrity of tracedata */
147 if (mdata->crc32_tdata != find_crash_tracedata_crc(drvdata, mdata)) {
148 dev_err(&drvdata->csdev->dev,
149 "CRC mismatch in tmc crash tracedata\n");
150 return false;
151 }
152
153 return true;
154 }
155
tmc_get_resvbuf_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)156 static inline ssize_t tmc_get_resvbuf_trace(struct tmc_drvdata *drvdata,
157 loff_t pos, size_t len, char **bufpp)
158 {
159 s64 offset;
160 ssize_t actual = len;
161 struct tmc_resrv_buf *rbuf = &drvdata->resrv_buf;
162
163 if (pos + actual > rbuf->len)
164 actual = rbuf->len - pos;
165 if (actual <= 0)
166 return 0;
167
168 /* Compute the offset from which we read the data */
169 offset = rbuf->offset + pos;
170 if (offset >= rbuf->size)
171 offset -= rbuf->size;
172
173 /* Adjust the length to limit this transaction to end of buffer */
174 actual = (actual < (rbuf->size - offset)) ?
175 actual : rbuf->size - offset;
176
177 *bufpp = (char *)rbuf->vaddr + offset;
178
179 return actual;
180 }
181
tmc_prepare_crashdata(struct tmc_drvdata * drvdata)182 static int tmc_prepare_crashdata(struct tmc_drvdata *drvdata)
183 {
184 char *bufp;
185 ssize_t len;
186 u32 status, size;
187 u64 rrp, rwp, dba;
188 struct tmc_resrv_buf *rbuf;
189 struct tmc_crash_metadata *mdata;
190
191 mdata = drvdata->crash_mdata.vaddr;
192 rbuf = &drvdata->resrv_buf;
193
194 rrp = mdata->tmc_rrp;
195 rwp = mdata->tmc_rwp;
196 dba = mdata->tmc_dba;
197 status = mdata->tmc_sts;
198 size = mdata->tmc_ram_size << 2;
199
200 /* Sync the buffer pointers */
201 rbuf->offset = rrp - dba;
202 if (status & TMC_STS_FULL)
203 rbuf->len = size;
204 else
205 rbuf->len = rwp - rrp;
206
207 /* Additional sanity checks for validating metadata */
208 if ((rbuf->offset > size) ||
209 (rbuf->len > size)) {
210 dev_dbg(&drvdata->csdev->dev,
211 "Offset and length invalid in tmc crash metadata\n");
212 return -EINVAL;
213 }
214
215 if (status & TMC_STS_FULL) {
216 len = tmc_get_resvbuf_trace(drvdata, 0x0,
217 CORESIGHT_BARRIER_PKT_SIZE, &bufp);
218 if (len >= CORESIGHT_BARRIER_PKT_SIZE) {
219 coresight_insert_barrier_packet(bufp);
220 /* Recalculate crc */
221 mdata->crc32_tdata = find_crash_tracedata_crc(drvdata,
222 mdata);
223 mdata->crc32_mdata = find_crash_metadata_crc(mdata);
224 }
225 }
226
227 return 0;
228 }
229
tmc_read_prepare(struct tmc_drvdata * drvdata)230 static int tmc_read_prepare(struct tmc_drvdata *drvdata)
231 {
232 int ret = 0;
233
234 switch (drvdata->config_type) {
235 case TMC_CONFIG_TYPE_ETB:
236 case TMC_CONFIG_TYPE_ETF:
237 ret = tmc_read_prepare_etb(drvdata);
238 break;
239 case TMC_CONFIG_TYPE_ETR:
240 ret = tmc_read_prepare_etr(drvdata);
241 break;
242 default:
243 ret = -EINVAL;
244 }
245
246 if (!ret)
247 dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
248
249 return ret;
250 }
251
tmc_read_unprepare(struct tmc_drvdata * drvdata)252 static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
253 {
254 int ret = 0;
255
256 switch (drvdata->config_type) {
257 case TMC_CONFIG_TYPE_ETB:
258 case TMC_CONFIG_TYPE_ETF:
259 ret = tmc_read_unprepare_etb(drvdata);
260 break;
261 case TMC_CONFIG_TYPE_ETR:
262 ret = tmc_read_unprepare_etr(drvdata);
263 break;
264 default:
265 ret = -EINVAL;
266 }
267
268 if (!ret)
269 dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
270
271 return ret;
272 }
273
tmc_open(struct inode * inode,struct file * file)274 static int tmc_open(struct inode *inode, struct file *file)
275 {
276 int ret;
277 struct tmc_drvdata *drvdata = container_of(file->private_data,
278 struct tmc_drvdata, miscdev);
279
280 ret = tmc_read_prepare(drvdata);
281 if (ret)
282 return ret;
283
284 nonseekable_open(inode, file);
285
286 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
287 return 0;
288 }
289
tmc_get_sysfs_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)290 static ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata, loff_t pos, size_t len,
291 char **bufpp)
292 {
293 switch (drvdata->config_type) {
294 case TMC_CONFIG_TYPE_ETB:
295 case TMC_CONFIG_TYPE_ETF:
296 return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
297 case TMC_CONFIG_TYPE_ETR:
298 return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
299 }
300
301 return -EINVAL;
302 }
303
tmc_read(struct file * file,char __user * data,size_t len,loff_t * ppos)304 static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
305 loff_t *ppos)
306 {
307 char *bufp;
308 ssize_t actual;
309 struct tmc_drvdata *drvdata = container_of(file->private_data,
310 struct tmc_drvdata, miscdev);
311 actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
312 if (actual <= 0)
313 return 0;
314
315 if (copy_to_user(data, bufp, actual)) {
316 dev_dbg(&drvdata->csdev->dev,
317 "%s: copy_to_user failed\n", __func__);
318 return -EFAULT;
319 }
320
321 *ppos += actual;
322 dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
323
324 return actual;
325 }
326
tmc_release(struct inode * inode,struct file * file)327 static int tmc_release(struct inode *inode, struct file *file)
328 {
329 int ret;
330 struct tmc_drvdata *drvdata = container_of(file->private_data,
331 struct tmc_drvdata, miscdev);
332
333 ret = tmc_read_unprepare(drvdata);
334 if (ret)
335 return ret;
336
337 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
338 return 0;
339 }
340
341 static const struct file_operations tmc_fops = {
342 .owner = THIS_MODULE,
343 .open = tmc_open,
344 .read = tmc_read,
345 .release = tmc_release,
346 };
347
tmc_crashdata_open(struct inode * inode,struct file * file)348 static int tmc_crashdata_open(struct inode *inode, struct file *file)
349 {
350 int err = 0;
351 unsigned long flags;
352 struct tmc_resrv_buf *rbuf;
353 struct tmc_crash_metadata *mdata;
354 struct tmc_drvdata *drvdata = container_of(file->private_data,
355 struct tmc_drvdata,
356 crashdev);
357
358 mdata = drvdata->crash_mdata.vaddr;
359 rbuf = &drvdata->resrv_buf;
360
361 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
362 if (mdata->valid)
363 rbuf->reading = true;
364 else
365 err = -ENOENT;
366 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
367 if (err)
368 goto exit;
369
370 nonseekable_open(inode, file);
371 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
372 exit:
373 return err;
374 }
375
tmc_crashdata_read(struct file * file,char __user * data,size_t len,loff_t * ppos)376 static ssize_t tmc_crashdata_read(struct file *file, char __user *data,
377 size_t len, loff_t *ppos)
378 {
379 char *bufp;
380 ssize_t actual;
381 struct tmc_drvdata *drvdata = container_of(file->private_data,
382 struct tmc_drvdata,
383 crashdev);
384
385 actual = tmc_get_resvbuf_trace(drvdata, *ppos, len, &bufp);
386 if (actual <= 0)
387 return 0;
388
389 if (copy_to_user(data, bufp, actual)) {
390 dev_dbg(&drvdata->csdev->dev,
391 "%s: copy_to_user failed\n", __func__);
392 return -EFAULT;
393 }
394
395 *ppos += actual;
396 dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
397
398 return actual;
399 }
400
tmc_crashdata_release(struct inode * inode,struct file * file)401 static int tmc_crashdata_release(struct inode *inode, struct file *file)
402 {
403 int ret = 0;
404 unsigned long flags;
405 struct tmc_resrv_buf *rbuf;
406 struct tmc_drvdata *drvdata = container_of(file->private_data,
407 struct tmc_drvdata,
408 crashdev);
409
410 rbuf = &drvdata->resrv_buf;
411 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
412 rbuf->reading = false;
413 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
414
415 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
416 return ret;
417 }
418
419 static const struct file_operations tmc_crashdata_fops = {
420 .owner = THIS_MODULE,
421 .open = tmc_crashdata_open,
422 .read = tmc_crashdata_read,
423 .release = tmc_crashdata_release,
424 };
425
tmc_get_memwidth(u32 devid)426 static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
427 {
428 enum tmc_mem_intf_width memwidth;
429
430 /*
431 * Excerpt from the TRM:
432 *
433 * DEVID::MEMWIDTH[10:8]
434 * 0x2 Memory interface databus is 32 bits wide.
435 * 0x3 Memory interface databus is 64 bits wide.
436 * 0x4 Memory interface databus is 128 bits wide.
437 * 0x5 Memory interface databus is 256 bits wide.
438 */
439 switch (BMVAL(devid, 8, 10)) {
440 case 0x2:
441 memwidth = TMC_MEM_INTF_WIDTH_32BITS;
442 break;
443 case 0x3:
444 memwidth = TMC_MEM_INTF_WIDTH_64BITS;
445 break;
446 case 0x4:
447 memwidth = TMC_MEM_INTF_WIDTH_128BITS;
448 break;
449 case 0x5:
450 memwidth = TMC_MEM_INTF_WIDTH_256BITS;
451 break;
452 default:
453 memwidth = 0;
454 }
455
456 return memwidth;
457 }
458
459 static struct attribute *coresight_tmc_mgmt_attrs[] = {
460 coresight_simple_reg32(rsz, TMC_RSZ),
461 coresight_simple_reg32(sts, TMC_STS),
462 coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
463 coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
464 coresight_simple_reg32(trg, TMC_TRG),
465 coresight_simple_reg32(ctl, TMC_CTL),
466 coresight_simple_reg32(ffsr, TMC_FFSR),
467 coresight_simple_reg32(ffcr, TMC_FFCR),
468 coresight_simple_reg32(mode, TMC_MODE),
469 coresight_simple_reg32(pscr, TMC_PSCR),
470 coresight_simple_reg32(devid, CORESIGHT_DEVID),
471 coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
472 coresight_simple_reg32(axictl, TMC_AXICTL),
473 coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
474 NULL,
475 };
476
trigger_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)477 static ssize_t trigger_cntr_show(struct device *dev,
478 struct device_attribute *attr, char *buf)
479 {
480 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
481 unsigned long val = drvdata->trigger_cntr;
482
483 return sprintf(buf, "%#lx\n", val);
484 }
485
trigger_cntr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)486 static ssize_t trigger_cntr_store(struct device *dev,
487 struct device_attribute *attr,
488 const char *buf, size_t size)
489 {
490 int ret;
491 unsigned long val;
492 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
493
494 ret = kstrtoul(buf, 16, &val);
495 if (ret)
496 return ret;
497
498 drvdata->trigger_cntr = val;
499 return size;
500 }
501 static DEVICE_ATTR_RW(trigger_cntr);
502
buffer_size_show(struct device * dev,struct device_attribute * attr,char * buf)503 static ssize_t buffer_size_show(struct device *dev,
504 struct device_attribute *attr, char *buf)
505 {
506 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
507
508 return sprintf(buf, "%#x\n", drvdata->size);
509 }
510
buffer_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)511 static ssize_t buffer_size_store(struct device *dev,
512 struct device_attribute *attr,
513 const char *buf, size_t size)
514 {
515 int ret;
516 unsigned long val;
517 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
518
519 /* Only permitted for TMC-ETRs */
520 if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
521 return -EPERM;
522
523 ret = kstrtoul(buf, 0, &val);
524 if (ret)
525 return ret;
526 /* The buffer size should be page aligned */
527 if (val & (PAGE_SIZE - 1))
528 return -EINVAL;
529 drvdata->size = val;
530 return size;
531 }
532
533 static DEVICE_ATTR_RW(buffer_size);
534
stop_on_flush_show(struct device * dev,struct device_attribute * attr,char * buf)535 static ssize_t stop_on_flush_show(struct device *dev,
536 struct device_attribute *attr, char *buf)
537 {
538 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
539
540 return sprintf(buf, "%#x\n", drvdata->stop_on_flush);
541 }
542
stop_on_flush_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)543 static ssize_t stop_on_flush_store(struct device *dev,
544 struct device_attribute *attr,
545 const char *buf, size_t size)
546 {
547 int ret;
548 u8 val;
549 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
550
551 ret = kstrtou8(buf, 0, &val);
552 if (ret)
553 return ret;
554 if (val)
555 drvdata->stop_on_flush = true;
556 else
557 drvdata->stop_on_flush = false;
558
559 return size;
560 }
561
562 static DEVICE_ATTR_RW(stop_on_flush);
563
564
565 static struct attribute *coresight_tmc_attrs[] = {
566 &dev_attr_trigger_cntr.attr,
567 &dev_attr_buffer_size.attr,
568 &dev_attr_stop_on_flush.attr,
569 NULL,
570 };
571
572 static const struct attribute_group coresight_tmc_group = {
573 .attrs = coresight_tmc_attrs,
574 };
575
576 static const struct attribute_group coresight_tmc_mgmt_group = {
577 .attrs = coresight_tmc_mgmt_attrs,
578 .name = "mgmt",
579 };
580
581 static const struct attribute_group *coresight_etf_groups[] = {
582 &coresight_tmc_group,
583 &coresight_tmc_mgmt_group,
584 NULL,
585 };
586
587 static const struct attribute_group *coresight_etr_groups[] = {
588 &coresight_etr_group,
589 &coresight_tmc_group,
590 &coresight_tmc_mgmt_group,
591 NULL,
592 };
593
tmc_etr_can_use_sg(struct device * dev)594 static bool tmc_etr_can_use_sg(struct device *dev)
595 {
596 int ret;
597 u8 val_u8;
598
599 /*
600 * Presence of the property 'arm,scatter-gather' is checked
601 * on the platform for the feature support, rather than its
602 * value.
603 */
604 if (is_of_node(dev->fwnode)) {
605 return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
606 } else if (is_acpi_device_node(dev->fwnode)) {
607 /*
608 * TMC_DEVID_NOSCAT test in tmc_etr_setup_caps(), has already ensured
609 * this property is only checked for Coresight SoC 400 TMC configured
610 * as ETR.
611 */
612 ret = fwnode_property_read_u8(dev->fwnode, "arm-armhc97c-sg-enable", &val_u8);
613 if (!ret)
614 return !!val_u8;
615
616 if (fwnode_property_present(dev->fwnode, "arm,scatter-gather")) {
617 pr_warn_once("Deprecated ACPI property - arm,scatter-gather\n");
618 return true;
619 }
620 }
621 return false;
622 }
623
tmc_etr_has_non_secure_access(struct tmc_drvdata * drvdata)624 static bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
625 {
626 u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
627
628 return (auth & TMC_AUTH_NSID_MASK) == 0x3;
629 }
630
631 static const struct amba_id tmc_ids[];
632
of_tmc_get_reserved_resource_by_name(struct device * dev,const char * name,struct resource * res)633 static int of_tmc_get_reserved_resource_by_name(struct device *dev,
634 const char *name,
635 struct resource *res)
636 {
637 int index, rc = -ENODEV;
638 struct device_node *node;
639
640 if (!is_of_node(dev->fwnode))
641 return -ENODEV;
642
643 index = of_property_match_string(dev->of_node, "memory-region-names",
644 name);
645 if (index < 0)
646 return rc;
647
648 node = of_parse_phandle(dev->of_node, "memory-region", index);
649 if (!node)
650 return rc;
651
652 if (!of_address_to_resource(node, 0, res) &&
653 res->start != 0 && resource_size(res) != 0)
654 rc = 0;
655 of_node_put(node);
656
657 return rc;
658 }
659
tmc_get_reserved_region(struct device * parent)660 static void tmc_get_reserved_region(struct device *parent)
661 {
662 struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
663 struct resource res;
664
665 if (of_tmc_get_reserved_resource_by_name(parent, "tracedata", &res))
666 return;
667
668 drvdata->resrv_buf.vaddr = memremap(res.start,
669 resource_size(&res),
670 MEMREMAP_WC);
671 if (IS_ERR_OR_NULL(drvdata->resrv_buf.vaddr)) {
672 dev_err(parent, "Reserved trace buffer mapping failed\n");
673 return;
674 }
675
676 drvdata->resrv_buf.paddr = res.start;
677 drvdata->resrv_buf.size = resource_size(&res);
678
679 if (of_tmc_get_reserved_resource_by_name(parent, "metadata", &res))
680 return;
681
682 drvdata->crash_mdata.vaddr = memremap(res.start,
683 resource_size(&res),
684 MEMREMAP_WC);
685 if (IS_ERR_OR_NULL(drvdata->crash_mdata.vaddr)) {
686 dev_err(parent, "Metadata memory mapping failed\n");
687 return;
688 }
689
690 drvdata->crash_mdata.paddr = res.start;
691 drvdata->crash_mdata.size = resource_size(&res);
692 }
693
694 /* Detect and initialise the capabilities of a TMC ETR */
tmc_etr_setup_caps(struct device * parent,u32 devid,struct csdev_access * access)695 static int tmc_etr_setup_caps(struct device *parent, u32 devid,
696 struct csdev_access *access)
697 {
698 int rc;
699 u32 tmc_pid, dma_mask = 0;
700 struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
701 void *dev_caps;
702
703 if (!tmc_etr_has_non_secure_access(drvdata))
704 return -EACCES;
705
706 tmc_pid = coresight_get_pid(access);
707 dev_caps = coresight_get_uci_data_from_amba(tmc_ids, tmc_pid);
708
709 /* Set the unadvertised capabilities */
710 tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
711
712 if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
713 tmc_etr_set_cap(drvdata, TMC_ETR_SG);
714
715 /* Check if the AXI address width is available */
716 if (devid & TMC_DEVID_AXIAW_VALID)
717 dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
718 TMC_DEVID_AXIAW_MASK);
719
720 /*
721 * Unless specified in the device configuration, ETR uses a 40-bit
722 * AXI master in place of the embedded SRAM of ETB/ETF.
723 */
724 switch (dma_mask) {
725 case 32:
726 case 40:
727 case 44:
728 case 48:
729 case 52:
730 dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
731 break;
732 default:
733 dma_mask = 40;
734 }
735
736 rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
737 if (rc)
738 dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
739 return rc;
740 }
741
tmc_etr_get_default_buffer_size(struct device * dev)742 static u32 tmc_etr_get_default_buffer_size(struct device *dev)
743 {
744 u32 size;
745
746 if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
747 size = SZ_1M;
748 return size;
749 }
750
tmc_etr_get_max_burst_size(struct device * dev)751 static u32 tmc_etr_get_max_burst_size(struct device *dev)
752 {
753 u32 burst_size;
754
755 if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
756 &burst_size))
757 return TMC_AXICTL_WR_BURST_16;
758
759 /* Only permissible values are 0 to 15 */
760 if (burst_size > 0xF)
761 burst_size = TMC_AXICTL_WR_BURST_16;
762
763 return burst_size;
764 }
765
register_crash_dev_interface(struct tmc_drvdata * drvdata,const char * name)766 static void register_crash_dev_interface(struct tmc_drvdata *drvdata,
767 const char *name)
768 {
769 drvdata->crashdev.name =
770 devm_kasprintf(&drvdata->csdev->dev, GFP_KERNEL, "%s_%s", "crash", name);
771 drvdata->crashdev.minor = MISC_DYNAMIC_MINOR;
772 drvdata->crashdev.fops = &tmc_crashdata_fops;
773 if (misc_register(&drvdata->crashdev)) {
774 dev_dbg(&drvdata->csdev->dev,
775 "Failed to setup user interface for crashdata\n");
776 drvdata->crashdev.fops = NULL;
777 } else
778 dev_info(&drvdata->csdev->dev,
779 "Valid crash tracedata found\n");
780 }
781
__tmc_probe(struct device * dev,struct resource * res)782 static int __tmc_probe(struct device *dev, struct resource *res)
783 {
784 int ret = 0;
785 u32 devid;
786 void __iomem *base;
787 struct coresight_platform_data *pdata = NULL;
788 struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
789 struct coresight_desc desc = { 0 };
790 struct coresight_dev_list *dev_list = NULL;
791
792 ret = -ENOMEM;
793
794 /* Validity for the resource is already checked by the AMBA core */
795 base = devm_ioremap_resource(dev, res);
796 if (IS_ERR(base)) {
797 ret = PTR_ERR(base);
798 goto out;
799 }
800
801 drvdata->base = base;
802 desc.access = CSDEV_ACCESS_IOMEM(base);
803
804 raw_spin_lock_init(&drvdata->spinlock);
805
806 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
807 drvdata->config_type = BMVAL(devid, 6, 7);
808 drvdata->memwidth = tmc_get_memwidth(devid);
809 /* This device is not associated with a session */
810 drvdata->pid = -1;
811 drvdata->etr_mode = ETR_MODE_AUTO;
812
813 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
814 drvdata->size = tmc_etr_get_default_buffer_size(dev);
815 drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
816 } else {
817 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
818 }
819
820 tmc_get_reserved_region(dev);
821
822 desc.dev = dev;
823
824 switch (drvdata->config_type) {
825 case TMC_CONFIG_TYPE_ETB:
826 desc.groups = coresight_etf_groups;
827 desc.type = CORESIGHT_DEV_TYPE_SINK;
828 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
829 desc.ops = &tmc_etb_cs_ops;
830 dev_list = &etb_devs;
831 break;
832 case TMC_CONFIG_TYPE_ETR:
833 desc.groups = coresight_etr_groups;
834 desc.type = CORESIGHT_DEV_TYPE_SINK;
835 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
836 desc.ops = &tmc_etr_cs_ops;
837 ret = tmc_etr_setup_caps(dev, devid, &desc.access);
838 if (ret)
839 goto out;
840 idr_init(&drvdata->idr);
841 mutex_init(&drvdata->idr_mutex);
842 dev_list = &etr_devs;
843 break;
844 case TMC_CONFIG_TYPE_ETF:
845 desc.groups = coresight_etf_groups;
846 desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
847 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
848 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
849 desc.ops = &tmc_etf_cs_ops;
850 dev_list = &etf_devs;
851 break;
852 default:
853 pr_err("%s: Unsupported TMC config\n", desc.name);
854 ret = -EINVAL;
855 goto out;
856 }
857
858 desc.name = coresight_alloc_device_name(dev_list, dev);
859 if (!desc.name) {
860 ret = -ENOMEM;
861 goto out;
862 }
863
864 pdata = coresight_get_platform_data(dev);
865 if (IS_ERR(pdata)) {
866 ret = PTR_ERR(pdata);
867 goto out;
868 }
869 dev->platform_data = pdata;
870 desc.pdata = pdata;
871
872 coresight_clear_self_claim_tag(&desc.access);
873 drvdata->csdev = coresight_register(&desc);
874 if (IS_ERR(drvdata->csdev)) {
875 ret = PTR_ERR(drvdata->csdev);
876 goto out;
877 }
878
879 drvdata->miscdev.name = desc.name;
880 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
881 drvdata->miscdev.fops = &tmc_fops;
882 ret = misc_register(&drvdata->miscdev);
883 if (ret) {
884 coresight_unregister(drvdata->csdev);
885 goto out;
886 }
887
888 out:
889 if (is_tmc_crashdata_valid(drvdata) &&
890 !tmc_prepare_crashdata(drvdata))
891 register_crash_dev_interface(drvdata, desc.name);
892 return ret;
893 }
894
tmc_probe(struct amba_device * adev,const struct amba_id * id)895 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
896 {
897 struct tmc_drvdata *drvdata;
898 int ret;
899
900 drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
901 if (!drvdata)
902 return -ENOMEM;
903
904 amba_set_drvdata(adev, drvdata);
905 ret = __tmc_probe(&adev->dev, &adev->res);
906 if (!ret)
907 pm_runtime_put(&adev->dev);
908
909 return ret;
910 }
911
tmc_shutdown(struct amba_device * adev)912 static void tmc_shutdown(struct amba_device *adev)
913 {
914 unsigned long flags;
915 struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
916
917 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
918
919 if (coresight_get_mode(drvdata->csdev) == CS_MODE_DISABLED)
920 goto out;
921
922 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
923 tmc_etr_disable_hw(drvdata);
924
925 /*
926 * We do not care about coresight unregister here unlike remove
927 * callback which is required for making coresight modular since
928 * the system is going down after this.
929 */
930 out:
931 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
932 }
933
__tmc_remove(struct device * dev)934 static void __tmc_remove(struct device *dev)
935 {
936 struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
937
938 /*
939 * Since misc_open() holds a refcount on the f_ops, which is
940 * etb fops in this case, device is there until last file
941 * handler to this device is closed.
942 */
943 misc_deregister(&drvdata->miscdev);
944 if (drvdata->crashdev.fops)
945 misc_deregister(&drvdata->crashdev);
946 coresight_unregister(drvdata->csdev);
947 }
948
tmc_remove(struct amba_device * adev)949 static void tmc_remove(struct amba_device *adev)
950 {
951 __tmc_remove(&adev->dev);
952 }
953
954 static const struct amba_id tmc_ids[] = {
955 CS_AMBA_ID(0x000bb961),
956 /* Coresight SoC 600 TMC-ETR/ETS */
957 CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
958 /* Coresight SoC 600 TMC-ETB */
959 CS_AMBA_ID(0x000bb9e9),
960 /* Coresight SoC 600 TMC-ETF */
961 CS_AMBA_ID(0x000bb9ea),
962 { 0, 0, NULL },
963 };
964
965 MODULE_DEVICE_TABLE(amba, tmc_ids);
966
967 static struct amba_driver tmc_driver = {
968 .drv = {
969 .name = "coresight-tmc",
970 .suppress_bind_attrs = true,
971 },
972 .probe = tmc_probe,
973 .shutdown = tmc_shutdown,
974 .remove = tmc_remove,
975 .id_table = tmc_ids,
976 };
977
tmc_platform_probe(struct platform_device * pdev)978 static int tmc_platform_probe(struct platform_device *pdev)
979 {
980 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
981 struct tmc_drvdata *drvdata;
982 int ret = 0;
983
984 drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
985 if (!drvdata)
986 return -ENOMEM;
987
988 drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
989 if (IS_ERR(drvdata->pclk))
990 return -ENODEV;
991
992 dev_set_drvdata(&pdev->dev, drvdata);
993 pm_runtime_get_noresume(&pdev->dev);
994 pm_runtime_set_active(&pdev->dev);
995 pm_runtime_enable(&pdev->dev);
996
997 ret = __tmc_probe(&pdev->dev, res);
998 pm_runtime_put(&pdev->dev);
999 if (ret)
1000 pm_runtime_disable(&pdev->dev);
1001
1002 return ret;
1003 }
1004
tmc_platform_remove(struct platform_device * pdev)1005 static void tmc_platform_remove(struct platform_device *pdev)
1006 {
1007 struct tmc_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
1008
1009 if (WARN_ON(!drvdata))
1010 return;
1011
1012 __tmc_remove(&pdev->dev);
1013 pm_runtime_disable(&pdev->dev);
1014 if (!IS_ERR_OR_NULL(drvdata->pclk))
1015 clk_put(drvdata->pclk);
1016 }
1017
1018 #ifdef CONFIG_PM
tmc_runtime_suspend(struct device * dev)1019 static int tmc_runtime_suspend(struct device *dev)
1020 {
1021 struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
1022
1023 if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
1024 clk_disable_unprepare(drvdata->pclk);
1025 return 0;
1026 }
1027
tmc_runtime_resume(struct device * dev)1028 static int tmc_runtime_resume(struct device *dev)
1029 {
1030 struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
1031
1032 if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
1033 clk_prepare_enable(drvdata->pclk);
1034 return 0;
1035 }
1036 #endif
1037
1038 static const struct dev_pm_ops tmc_dev_pm_ops = {
1039 SET_RUNTIME_PM_OPS(tmc_runtime_suspend, tmc_runtime_resume, NULL)
1040 };
1041
1042 #ifdef CONFIG_ACPI
1043 static const struct acpi_device_id tmc_acpi_ids[] = {
1044 {"ARMHC501", 0, 0, 0}, /* ARM CoreSight ETR */
1045 {"ARMHC97C", 0, 0, 0}, /* ARM CoreSight SoC-400 TMC, SoC-600 ETF/ETB */
1046 {},
1047 };
1048 MODULE_DEVICE_TABLE(acpi, tmc_acpi_ids);
1049 #endif
1050
1051 static struct platform_driver tmc_platform_driver = {
1052 .probe = tmc_platform_probe,
1053 .remove = tmc_platform_remove,
1054 .driver = {
1055 .name = "coresight-tmc-platform",
1056 .acpi_match_table = ACPI_PTR(tmc_acpi_ids),
1057 .suppress_bind_attrs = true,
1058 .pm = &tmc_dev_pm_ops,
1059 },
1060 };
1061
tmc_init(void)1062 static int __init tmc_init(void)
1063 {
1064 return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver, THIS_MODULE);
1065 }
1066
tmc_exit(void)1067 static void __exit tmc_exit(void)
1068 {
1069 coresight_remove_driver(&tmc_driver, &tmc_platform_driver);
1070 }
1071 module_init(tmc_init);
1072 module_exit(tmc_exit);
1073
1074 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
1075 MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
1076 MODULE_LICENSE("GPL v2");
1077