1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
3 *
4 * Description: CoreSight Trace Memory Controller driver
5 */
6
7 #include <linux/acpi.h>
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/idr.h>
13 #include <linux/io.h>
14 #include <linux/iommu.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/miscdevice.h>
18 #include <linux/mutex.h>
19 #include <linux/property.h>
20 #include <linux/uaccess.h>
21 #include <linux/slab.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/spinlock.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_reserved_mem.h>
28 #include <linux/coresight.h>
29 #include <linux/amba/bus.h>
30 #include <linux/platform_device.h>
31
32 #include "coresight-priv.h"
33 #include "coresight-tmc.h"
34
tmc_wait_for_tmcready(struct tmc_drvdata * drvdata)35 int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
36 {
37 struct coresight_device *csdev = drvdata->csdev;
38 struct csdev_access *csa = &csdev->access;
39
40 /* Ensure formatter, unformatter and hardware fifo are empty */
41 if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
42 dev_err(&csdev->dev,
43 "timeout while waiting for TMC to be Ready\n");
44 return -EBUSY;
45 }
46 return 0;
47 }
48
tmc_flush_and_stop(struct tmc_drvdata * drvdata)49 void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
50 {
51 struct coresight_device *csdev = drvdata->csdev;
52 struct csdev_access *csa = &csdev->access;
53 u32 ffcr;
54
55 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
56 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
57 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
58 ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
59 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
60 /* Ensure flush completes */
61 if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
62 dev_err(&csdev->dev,
63 "timeout while waiting for completion of Manual Flush\n");
64 }
65
66 tmc_wait_for_tmcready(drvdata);
67 }
68
tmc_enable_hw(struct tmc_drvdata * drvdata)69 void tmc_enable_hw(struct tmc_drvdata *drvdata)
70 {
71 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
72 }
73
tmc_disable_hw(struct tmc_drvdata * drvdata)74 void tmc_disable_hw(struct tmc_drvdata *drvdata)
75 {
76 writel_relaxed(0x0, drvdata->base + TMC_CTL);
77 }
78
tmc_get_memwidth_mask(struct tmc_drvdata * drvdata)79 u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
80 {
81 u32 mask = 0;
82
83 /*
84 * When moving RRP or an offset address forward, the new values must
85 * be byte-address aligned to the width of the trace memory databus
86 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
87 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
88 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
89 * be 0s.
90 */
91 switch (drvdata->memwidth) {
92 case TMC_MEM_INTF_WIDTH_32BITS:
93 case TMC_MEM_INTF_WIDTH_64BITS:
94 case TMC_MEM_INTF_WIDTH_128BITS:
95 mask = GENMASK(31, 4);
96 break;
97 case TMC_MEM_INTF_WIDTH_256BITS:
98 mask = GENMASK(31, 5);
99 break;
100 }
101
102 return mask;
103 }
104
is_tmc_crashdata_valid(struct tmc_drvdata * drvdata)105 static bool is_tmc_crashdata_valid(struct tmc_drvdata *drvdata)
106 {
107 struct tmc_crash_metadata *mdata;
108
109 if (!tmc_has_reserved_buffer(drvdata) ||
110 !tmc_has_crash_mdata_buffer(drvdata))
111 return false;
112
113 mdata = drvdata->crash_mdata.vaddr;
114
115 /* Check version match */
116 if (mdata->version != CS_CRASHDATA_VERSION)
117 return false;
118
119 /* Check for valid metadata */
120 if (!mdata->valid) {
121 dev_dbg(&drvdata->csdev->dev,
122 "Data invalid in tmc crash metadata\n");
123 return false;
124 }
125
126 /*
127 * Buffer address given by metadata for retrieval of trace data
128 * from previous boot is expected to be same as the reserved
129 * trace buffer memory region provided through DTS
130 */
131 if (drvdata->resrv_buf.paddr != mdata->trace_paddr) {
132 dev_dbg(&drvdata->csdev->dev,
133 "Trace buffer address of previous boot invalid\n");
134 return false;
135 }
136
137 /* Check data integrity of metadata */
138 if (mdata->crc32_mdata != find_crash_metadata_crc(mdata)) {
139 dev_err(&drvdata->csdev->dev,
140 "CRC mismatch in tmc crash metadata\n");
141 return false;
142 }
143 /* Check data integrity of tracedata */
144 if (mdata->crc32_tdata != find_crash_tracedata_crc(drvdata, mdata)) {
145 dev_err(&drvdata->csdev->dev,
146 "CRC mismatch in tmc crash tracedata\n");
147 return false;
148 }
149
150 return true;
151 }
152
tmc_get_resvbuf_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)153 static inline ssize_t tmc_get_resvbuf_trace(struct tmc_drvdata *drvdata,
154 loff_t pos, size_t len, char **bufpp)
155 {
156 s64 offset;
157 ssize_t actual = len;
158 struct tmc_resrv_buf *rbuf = &drvdata->resrv_buf;
159
160 if (pos + actual > rbuf->len)
161 actual = rbuf->len - pos;
162 if (actual <= 0)
163 return 0;
164
165 /* Compute the offset from which we read the data */
166 offset = rbuf->offset + pos;
167 if (offset >= rbuf->size)
168 offset -= rbuf->size;
169
170 /* Adjust the length to limit this transaction to end of buffer */
171 actual = (actual < (rbuf->size - offset)) ?
172 actual : rbuf->size - offset;
173
174 *bufpp = (char *)rbuf->vaddr + offset;
175
176 return actual;
177 }
178
tmc_prepare_crashdata(struct tmc_drvdata * drvdata)179 static int tmc_prepare_crashdata(struct tmc_drvdata *drvdata)
180 {
181 char *bufp;
182 ssize_t len;
183 u32 status, size;
184 u64 rrp, rwp, dba;
185 struct tmc_resrv_buf *rbuf;
186 struct tmc_crash_metadata *mdata;
187
188 mdata = drvdata->crash_mdata.vaddr;
189 rbuf = &drvdata->resrv_buf;
190
191 rrp = mdata->tmc_rrp;
192 rwp = mdata->tmc_rwp;
193 dba = mdata->tmc_dba;
194 status = mdata->tmc_sts;
195 size = mdata->tmc_ram_size << 2;
196
197 /* Sync the buffer pointers */
198 rbuf->offset = rrp - dba;
199 if (status & TMC_STS_FULL)
200 rbuf->len = size;
201 else
202 rbuf->len = rwp - rrp;
203
204 /* Additional sanity checks for validating metadata */
205 if ((rbuf->offset > size) ||
206 (rbuf->len > size)) {
207 dev_dbg(&drvdata->csdev->dev,
208 "Offset and length invalid in tmc crash metadata\n");
209 return -EINVAL;
210 }
211
212 if (status & TMC_STS_FULL) {
213 len = tmc_get_resvbuf_trace(drvdata, 0x0,
214 CORESIGHT_BARRIER_PKT_SIZE, &bufp);
215 if (len >= CORESIGHT_BARRIER_PKT_SIZE) {
216 coresight_insert_barrier_packet(bufp);
217 /* Recalculate crc */
218 mdata->crc32_tdata = find_crash_tracedata_crc(drvdata,
219 mdata);
220 mdata->crc32_mdata = find_crash_metadata_crc(mdata);
221 }
222 }
223
224 return 0;
225 }
226
tmc_read_prepare(struct tmc_drvdata * drvdata)227 static int tmc_read_prepare(struct tmc_drvdata *drvdata)
228 {
229 int ret = 0;
230
231 switch (drvdata->config_type) {
232 case TMC_CONFIG_TYPE_ETB:
233 case TMC_CONFIG_TYPE_ETF:
234 ret = tmc_read_prepare_etb(drvdata);
235 break;
236 case TMC_CONFIG_TYPE_ETR:
237 ret = tmc_read_prepare_etr(drvdata);
238 break;
239 default:
240 ret = -EINVAL;
241 }
242
243 if (!ret)
244 dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
245
246 return ret;
247 }
248
tmc_read_unprepare(struct tmc_drvdata * drvdata)249 static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
250 {
251 int ret = 0;
252
253 switch (drvdata->config_type) {
254 case TMC_CONFIG_TYPE_ETB:
255 case TMC_CONFIG_TYPE_ETF:
256 ret = tmc_read_unprepare_etb(drvdata);
257 break;
258 case TMC_CONFIG_TYPE_ETR:
259 ret = tmc_read_unprepare_etr(drvdata);
260 break;
261 default:
262 ret = -EINVAL;
263 }
264
265 if (!ret)
266 dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
267
268 return ret;
269 }
270
tmc_open(struct inode * inode,struct file * file)271 static int tmc_open(struct inode *inode, struct file *file)
272 {
273 int ret;
274 struct tmc_drvdata *drvdata = container_of(file->private_data,
275 struct tmc_drvdata, miscdev);
276
277 ret = tmc_read_prepare(drvdata);
278 if (ret)
279 return ret;
280
281 nonseekable_open(inode, file);
282
283 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
284 return 0;
285 }
286
tmc_get_sysfs_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)287 static ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata, loff_t pos, size_t len,
288 char **bufpp)
289 {
290 switch (drvdata->config_type) {
291 case TMC_CONFIG_TYPE_ETB:
292 case TMC_CONFIG_TYPE_ETF:
293 return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
294 case TMC_CONFIG_TYPE_ETR:
295 return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
296 }
297
298 return -EINVAL;
299 }
300
tmc_read(struct file * file,char __user * data,size_t len,loff_t * ppos)301 static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
302 loff_t *ppos)
303 {
304 char *bufp;
305 ssize_t actual;
306 struct tmc_drvdata *drvdata = container_of(file->private_data,
307 struct tmc_drvdata, miscdev);
308 actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
309 if (actual <= 0)
310 return 0;
311
312 if (copy_to_user(data, bufp, actual)) {
313 dev_dbg(&drvdata->csdev->dev,
314 "%s: copy_to_user failed\n", __func__);
315 return -EFAULT;
316 }
317
318 *ppos += actual;
319 dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
320
321 return actual;
322 }
323
tmc_release(struct inode * inode,struct file * file)324 static int tmc_release(struct inode *inode, struct file *file)
325 {
326 int ret;
327 struct tmc_drvdata *drvdata = container_of(file->private_data,
328 struct tmc_drvdata, miscdev);
329
330 ret = tmc_read_unprepare(drvdata);
331 if (ret)
332 return ret;
333
334 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
335 return 0;
336 }
337
338 static const struct file_operations tmc_fops = {
339 .owner = THIS_MODULE,
340 .open = tmc_open,
341 .read = tmc_read,
342 .release = tmc_release,
343 };
344
tmc_crashdata_open(struct inode * inode,struct file * file)345 static int tmc_crashdata_open(struct inode *inode, struct file *file)
346 {
347 int err = 0;
348 unsigned long flags;
349 struct tmc_resrv_buf *rbuf;
350 struct tmc_crash_metadata *mdata;
351 struct tmc_drvdata *drvdata = container_of(file->private_data,
352 struct tmc_drvdata,
353 crashdev);
354
355 mdata = drvdata->crash_mdata.vaddr;
356 rbuf = &drvdata->resrv_buf;
357
358 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
359 if (mdata->valid)
360 rbuf->reading = true;
361 else
362 err = -ENOENT;
363 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
364 if (err)
365 goto exit;
366
367 nonseekable_open(inode, file);
368 dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
369 exit:
370 return err;
371 }
372
tmc_crashdata_read(struct file * file,char __user * data,size_t len,loff_t * ppos)373 static ssize_t tmc_crashdata_read(struct file *file, char __user *data,
374 size_t len, loff_t *ppos)
375 {
376 char *bufp;
377 ssize_t actual;
378 struct tmc_drvdata *drvdata = container_of(file->private_data,
379 struct tmc_drvdata,
380 crashdev);
381
382 actual = tmc_get_resvbuf_trace(drvdata, *ppos, len, &bufp);
383 if (actual <= 0)
384 return 0;
385
386 if (copy_to_user(data, bufp, actual)) {
387 dev_dbg(&drvdata->csdev->dev,
388 "%s: copy_to_user failed\n", __func__);
389 return -EFAULT;
390 }
391
392 *ppos += actual;
393 dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
394
395 return actual;
396 }
397
tmc_crashdata_release(struct inode * inode,struct file * file)398 static int tmc_crashdata_release(struct inode *inode, struct file *file)
399 {
400 unsigned long flags;
401 struct tmc_resrv_buf *rbuf;
402 struct tmc_drvdata *drvdata = container_of(file->private_data,
403 struct tmc_drvdata,
404 crashdev);
405
406 rbuf = &drvdata->resrv_buf;
407 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
408 rbuf->reading = false;
409 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
410
411 dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
412 return 0;
413 }
414
415 static const struct file_operations tmc_crashdata_fops = {
416 .owner = THIS_MODULE,
417 .open = tmc_crashdata_open,
418 .read = tmc_crashdata_read,
419 .release = tmc_crashdata_release,
420 };
421
tmc_get_memwidth(u32 devid)422 static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
423 {
424 enum tmc_mem_intf_width memwidth;
425
426 /*
427 * Excerpt from the TRM:
428 *
429 * DEVID::MEMWIDTH[10:8]
430 * 0x2 Memory interface databus is 32 bits wide.
431 * 0x3 Memory interface databus is 64 bits wide.
432 * 0x4 Memory interface databus is 128 bits wide.
433 * 0x5 Memory interface databus is 256 bits wide.
434 */
435 switch (BMVAL(devid, 8, 10)) {
436 case 0x2:
437 memwidth = TMC_MEM_INTF_WIDTH_32BITS;
438 break;
439 case 0x3:
440 memwidth = TMC_MEM_INTF_WIDTH_64BITS;
441 break;
442 case 0x4:
443 memwidth = TMC_MEM_INTF_WIDTH_128BITS;
444 break;
445 case 0x5:
446 memwidth = TMC_MEM_INTF_WIDTH_256BITS;
447 break;
448 default:
449 memwidth = 0;
450 }
451
452 return memwidth;
453 }
454
455 static struct attribute *coresight_tmc_mgmt_attrs[] = {
456 coresight_simple_reg32(rsz, TMC_RSZ),
457 coresight_simple_reg32(sts, TMC_STS),
458 coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
459 coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
460 coresight_simple_reg32(trg, TMC_TRG),
461 coresight_simple_reg32(ctl, TMC_CTL),
462 coresight_simple_reg32(ffsr, TMC_FFSR),
463 coresight_simple_reg32(ffcr, TMC_FFCR),
464 coresight_simple_reg32(mode, TMC_MODE),
465 coresight_simple_reg32(pscr, TMC_PSCR),
466 coresight_simple_reg32(devid, CORESIGHT_DEVID),
467 coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
468 coresight_simple_reg32(axictl, TMC_AXICTL),
469 coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
470 NULL,
471 };
472
trigger_cntr_show(struct device * dev,struct device_attribute * attr,char * buf)473 static ssize_t trigger_cntr_show(struct device *dev,
474 struct device_attribute *attr, char *buf)
475 {
476 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
477 unsigned long val = drvdata->trigger_cntr;
478
479 return sprintf(buf, "%#lx\n", val);
480 }
481
trigger_cntr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)482 static ssize_t trigger_cntr_store(struct device *dev,
483 struct device_attribute *attr,
484 const char *buf, size_t size)
485 {
486 int ret;
487 unsigned long val;
488 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
489
490 ret = kstrtoul(buf, 16, &val);
491 if (ret)
492 return ret;
493
494 drvdata->trigger_cntr = val;
495 return size;
496 }
497 static DEVICE_ATTR_RW(trigger_cntr);
498
buffer_size_show(struct device * dev,struct device_attribute * attr,char * buf)499 static ssize_t buffer_size_show(struct device *dev,
500 struct device_attribute *attr, char *buf)
501 {
502 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
503
504 return sprintf(buf, "%#x\n", drvdata->size);
505 }
506
buffer_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)507 static ssize_t buffer_size_store(struct device *dev,
508 struct device_attribute *attr,
509 const char *buf, size_t size)
510 {
511 int ret;
512 unsigned long val;
513 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
514
515 /* Only permitted for TMC-ETRs */
516 if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
517 return -EPERM;
518
519 ret = kstrtoul(buf, 0, &val);
520 if (ret)
521 return ret;
522 /* The buffer size should be page aligned */
523 if (val & (PAGE_SIZE - 1))
524 return -EINVAL;
525 drvdata->size = val;
526 return size;
527 }
528
529 static DEVICE_ATTR_RW(buffer_size);
530
stop_on_flush_show(struct device * dev,struct device_attribute * attr,char * buf)531 static ssize_t stop_on_flush_show(struct device *dev,
532 struct device_attribute *attr, char *buf)
533 {
534 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
535
536 return sprintf(buf, "%#x\n", drvdata->stop_on_flush);
537 }
538
stop_on_flush_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t size)539 static ssize_t stop_on_flush_store(struct device *dev,
540 struct device_attribute *attr,
541 const char *buf, size_t size)
542 {
543 int ret;
544 u8 val;
545 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
546
547 ret = kstrtou8(buf, 0, &val);
548 if (ret)
549 return ret;
550 if (val)
551 drvdata->stop_on_flush = true;
552 else
553 drvdata->stop_on_flush = false;
554
555 return size;
556 }
557
558 static DEVICE_ATTR_RW(stop_on_flush);
559
560
561 static struct attribute *coresight_tmc_attrs[] = {
562 &dev_attr_trigger_cntr.attr,
563 &dev_attr_buffer_size.attr,
564 &dev_attr_stop_on_flush.attr,
565 NULL,
566 };
567
568 static const struct attribute_group coresight_tmc_group = {
569 .attrs = coresight_tmc_attrs,
570 };
571
572 static const struct attribute_group coresight_tmc_mgmt_group = {
573 .attrs = coresight_tmc_mgmt_attrs,
574 .name = "mgmt",
575 };
576
577 static const struct attribute_group *coresight_etf_groups[] = {
578 &coresight_tmc_group,
579 &coresight_tmc_mgmt_group,
580 NULL,
581 };
582
583 static const struct attribute_group *coresight_etr_groups[] = {
584 &coresight_etr_group,
585 &coresight_tmc_group,
586 &coresight_tmc_mgmt_group,
587 NULL,
588 };
589
tmc_etr_can_use_sg(struct device * dev)590 static bool tmc_etr_can_use_sg(struct device *dev)
591 {
592 int ret;
593 u8 val_u8;
594
595 /*
596 * Presence of the property 'arm,scatter-gather' is checked
597 * on the platform for the feature support, rather than its
598 * value.
599 */
600 if (is_of_node(dev->fwnode)) {
601 return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
602 } else if (is_acpi_device_node(dev->fwnode)) {
603 /*
604 * TMC_DEVID_NOSCAT test in tmc_etr_setup_caps(), has already ensured
605 * this property is only checked for Coresight SoC 400 TMC configured
606 * as ETR.
607 */
608 ret = fwnode_property_read_u8(dev->fwnode, "arm-armhc97c-sg-enable", &val_u8);
609 if (!ret)
610 return !!val_u8;
611
612 if (fwnode_property_present(dev->fwnode, "arm,scatter-gather")) {
613 pr_warn_once("Deprecated ACPI property - arm,scatter-gather\n");
614 return true;
615 }
616 }
617 return false;
618 }
619
tmc_etr_has_non_secure_access(struct tmc_drvdata * drvdata)620 static bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
621 {
622 u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
623
624 return (auth & TMC_AUTH_NSID_MASK) == 0x3;
625 }
626
627 static const struct amba_id tmc_ids[];
628
of_tmc_get_reserved_resource_by_name(struct device * dev,const char * name,struct resource * res)629 static int of_tmc_get_reserved_resource_by_name(struct device *dev,
630 const char *name,
631 struct resource *res)
632 {
633 int rc = -ENODEV;
634
635 rc = of_reserved_mem_region_to_resource_byname(dev->of_node, name, res);
636 if (rc < 0)
637 return rc;
638
639 if (res->start == 0 || resource_size(res) == 0)
640 rc = -ENODEV;
641
642 return rc;
643 }
644
tmc_get_reserved_region(struct device * parent)645 static void tmc_get_reserved_region(struct device *parent)
646 {
647 struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
648 struct resource res;
649
650 if (of_tmc_get_reserved_resource_by_name(parent, "tracedata", &res))
651 return;
652
653 drvdata->resrv_buf.vaddr = memremap(res.start,
654 resource_size(&res),
655 MEMREMAP_WC);
656 if (IS_ERR_OR_NULL(drvdata->resrv_buf.vaddr)) {
657 dev_err(parent, "Reserved trace buffer mapping failed\n");
658 return;
659 }
660
661 drvdata->resrv_buf.paddr = res.start;
662 drvdata->resrv_buf.size = resource_size(&res);
663
664 if (of_tmc_get_reserved_resource_by_name(parent, "metadata", &res))
665 return;
666
667 drvdata->crash_mdata.vaddr = memremap(res.start,
668 resource_size(&res),
669 MEMREMAP_WC);
670 if (IS_ERR_OR_NULL(drvdata->crash_mdata.vaddr)) {
671 dev_err(parent, "Metadata memory mapping failed\n");
672 return;
673 }
674
675 drvdata->crash_mdata.paddr = res.start;
676 drvdata->crash_mdata.size = resource_size(&res);
677 }
678
679 /* Detect and initialise the capabilities of a TMC ETR */
tmc_etr_setup_caps(struct device * parent,u32 devid,struct csdev_access * access)680 static int tmc_etr_setup_caps(struct device *parent, u32 devid,
681 struct csdev_access *access)
682 {
683 int rc;
684 u32 tmc_pid, dma_mask = 0;
685 struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
686 void *dev_caps;
687
688 if (!tmc_etr_has_non_secure_access(drvdata))
689 return -EACCES;
690
691 tmc_pid = coresight_get_pid(access);
692 dev_caps = coresight_get_uci_data_from_amba(tmc_ids, tmc_pid);
693
694 /* Set the unadvertised capabilities */
695 tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
696
697 if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
698 tmc_etr_set_cap(drvdata, TMC_ETR_SG);
699
700 /* Check if the AXI address width is available */
701 if (devid & TMC_DEVID_AXIAW_VALID)
702 dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
703 TMC_DEVID_AXIAW_MASK);
704
705 /*
706 * Unless specified in the device configuration, ETR uses a 40-bit
707 * AXI master in place of the embedded SRAM of ETB/ETF.
708 */
709 switch (dma_mask) {
710 case 32:
711 case 40:
712 case 44:
713 case 48:
714 case 52:
715 dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
716 break;
717 default:
718 dma_mask = 40;
719 }
720
721 rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
722 if (rc)
723 dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
724 return rc;
725 }
726
tmc_etr_get_default_buffer_size(struct device * dev)727 static u32 tmc_etr_get_default_buffer_size(struct device *dev)
728 {
729 u32 size;
730
731 if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
732 size = SZ_1M;
733 return size;
734 }
735
tmc_etr_get_max_burst_size(struct device * dev)736 static u32 tmc_etr_get_max_burst_size(struct device *dev)
737 {
738 u32 burst_size;
739
740 if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
741 &burst_size))
742 return TMC_AXICTL_WR_BURST_16;
743
744 /* Only permissible values are 0 to 15 */
745 if (burst_size > 0xF)
746 burst_size = TMC_AXICTL_WR_BURST_16;
747
748 return burst_size;
749 }
750
register_crash_dev_interface(struct tmc_drvdata * drvdata,const char * name)751 static void register_crash_dev_interface(struct tmc_drvdata *drvdata,
752 const char *name)
753 {
754 drvdata->crashdev.name =
755 devm_kasprintf(&drvdata->csdev->dev, GFP_KERNEL, "%s_%s", "crash", name);
756 drvdata->crashdev.minor = MISC_DYNAMIC_MINOR;
757 drvdata->crashdev.fops = &tmc_crashdata_fops;
758 if (misc_register(&drvdata->crashdev)) {
759 dev_dbg(&drvdata->csdev->dev,
760 "Failed to setup user interface for crashdata\n");
761 drvdata->crashdev.fops = NULL;
762 } else
763 dev_info(&drvdata->csdev->dev,
764 "Valid crash tracedata found\n");
765 }
766
__tmc_probe(struct device * dev,struct resource * res)767 static int __tmc_probe(struct device *dev, struct resource *res)
768 {
769 int ret = 0;
770 u32 devid;
771 void __iomem *base;
772 struct coresight_platform_data *pdata = NULL;
773 struct tmc_drvdata *drvdata;
774 struct coresight_desc desc = { 0 };
775 const char *dev_list = NULL;
776
777 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
778 if (!drvdata)
779 return -ENOMEM;
780
781 dev_set_drvdata(dev, drvdata);
782
783 ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
784 if (ret)
785 return ret;
786
787 ret = -ENOMEM;
788
789 /* Validity for the resource is already checked by the AMBA core */
790 base = devm_ioremap_resource(dev, res);
791 if (IS_ERR(base)) {
792 ret = PTR_ERR(base);
793 goto out;
794 }
795
796 drvdata->base = base;
797 desc.access = CSDEV_ACCESS_IOMEM(base);
798
799 raw_spin_lock_init(&drvdata->spinlock);
800
801 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
802 drvdata->config_type = BMVAL(devid, 6, 7);
803 drvdata->memwidth = tmc_get_memwidth(devid);
804 /* This device is not associated with a session */
805 drvdata->pid = -1;
806 drvdata->etr_mode = ETR_MODE_AUTO;
807
808 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
809 drvdata->size = tmc_etr_get_default_buffer_size(dev);
810 drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
811 } else {
812 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
813 }
814
815 tmc_get_reserved_region(dev);
816
817 desc.dev = dev;
818
819 switch (drvdata->config_type) {
820 case TMC_CONFIG_TYPE_ETB:
821 desc.groups = coresight_etf_groups;
822 desc.type = CORESIGHT_DEV_TYPE_SINK;
823 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
824 desc.ops = &tmc_etb_cs_ops;
825 dev_list = "tmc_etb";
826 break;
827 case TMC_CONFIG_TYPE_ETR:
828 desc.groups = coresight_etr_groups;
829 desc.type = CORESIGHT_DEV_TYPE_SINK;
830 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
831 desc.ops = &tmc_etr_cs_ops;
832 ret = tmc_etr_setup_caps(dev, devid, &desc.access);
833 if (ret)
834 goto out;
835 idr_init(&drvdata->idr);
836 mutex_init(&drvdata->idr_mutex);
837 dev_list = "tmc_etr";
838 break;
839 case TMC_CONFIG_TYPE_ETF:
840 desc.groups = coresight_etf_groups;
841 desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
842 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
843 desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
844 desc.ops = &tmc_etf_cs_ops;
845 dev_list = "tmc_etf";
846 break;
847 default:
848 pr_err("%s: Unsupported TMC config\n", desc.name);
849 ret = -EINVAL;
850 goto out;
851 }
852
853 desc.name = coresight_alloc_device_name(dev_list, dev);
854 if (!desc.name) {
855 ret = -ENOMEM;
856 goto out;
857 }
858
859 pdata = coresight_get_platform_data(dev);
860 if (IS_ERR(pdata)) {
861 ret = PTR_ERR(pdata);
862 goto out;
863 }
864 dev->platform_data = pdata;
865 desc.pdata = pdata;
866
867 coresight_clear_self_claim_tag(&desc.access);
868 drvdata->csdev = coresight_register(&desc);
869 if (IS_ERR(drvdata->csdev)) {
870 ret = PTR_ERR(drvdata->csdev);
871 goto out;
872 }
873
874 drvdata->miscdev.name = desc.name;
875 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
876 drvdata->miscdev.fops = &tmc_fops;
877 ret = misc_register(&drvdata->miscdev);
878 if (ret) {
879 coresight_unregister(drvdata->csdev);
880 goto out;
881 }
882
883 out:
884 if (is_tmc_crashdata_valid(drvdata) &&
885 !tmc_prepare_crashdata(drvdata))
886 register_crash_dev_interface(drvdata, desc.name);
887 return ret;
888 }
889
tmc_probe(struct amba_device * adev,const struct amba_id * id)890 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
891 {
892 int ret;
893
894 ret = __tmc_probe(&adev->dev, &adev->res);
895 if (!ret)
896 pm_runtime_put(&adev->dev);
897
898 return ret;
899 }
900
tmc_shutdown(struct amba_device * adev)901 static void tmc_shutdown(struct amba_device *adev)
902 {
903 unsigned long flags;
904 struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
905
906 raw_spin_lock_irqsave(&drvdata->spinlock, flags);
907
908 if (coresight_get_mode(drvdata->csdev) == CS_MODE_DISABLED)
909 goto out;
910
911 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
912 tmc_etr_disable_hw(drvdata);
913
914 /*
915 * We do not care about coresight unregister here unlike remove
916 * callback which is required for making coresight modular since
917 * the system is going down after this.
918 */
919 out:
920 raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
921 }
922
__tmc_remove(struct device * dev)923 static void __tmc_remove(struct device *dev)
924 {
925 struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
926
927 /*
928 * Since misc_open() holds a refcount on the f_ops, which is
929 * etb fops in this case, device is there until last file
930 * handler to this device is closed.
931 */
932 misc_deregister(&drvdata->miscdev);
933 if (drvdata->crashdev.fops)
934 misc_deregister(&drvdata->crashdev);
935 coresight_unregister(drvdata->csdev);
936 }
937
tmc_remove(struct amba_device * adev)938 static void tmc_remove(struct amba_device *adev)
939 {
940 __tmc_remove(&adev->dev);
941 }
942
943 static const struct amba_id tmc_ids[] = {
944 CS_AMBA_ID(0x000bb961),
945 /* Coresight SoC 600 TMC-ETR/ETS */
946 CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
947 /* Coresight SoC 600 TMC-ETB */
948 CS_AMBA_ID(0x000bb9e9),
949 /* Coresight SoC 600 TMC-ETF */
950 CS_AMBA_ID(0x000bb9ea),
951 { 0, 0, NULL },
952 };
953
954 MODULE_DEVICE_TABLE(amba, tmc_ids);
955
956 static struct amba_driver tmc_driver = {
957 .drv = {
958 .name = "coresight-tmc",
959 .suppress_bind_attrs = true,
960 },
961 .probe = tmc_probe,
962 .shutdown = tmc_shutdown,
963 .remove = tmc_remove,
964 .id_table = tmc_ids,
965 };
966
tmc_platform_probe(struct platform_device * pdev)967 static int tmc_platform_probe(struct platform_device *pdev)
968 {
969 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
970 int ret = 0;
971
972 pm_runtime_get_noresume(&pdev->dev);
973 pm_runtime_set_active(&pdev->dev);
974 pm_runtime_enable(&pdev->dev);
975
976 ret = __tmc_probe(&pdev->dev, res);
977 pm_runtime_put(&pdev->dev);
978 if (ret)
979 pm_runtime_disable(&pdev->dev);
980
981 return ret;
982 }
983
tmc_platform_remove(struct platform_device * pdev)984 static void tmc_platform_remove(struct platform_device *pdev)
985 {
986 struct tmc_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
987
988 if (WARN_ON(!drvdata))
989 return;
990
991 __tmc_remove(&pdev->dev);
992 pm_runtime_disable(&pdev->dev);
993 }
994
995 #ifdef CONFIG_PM
tmc_runtime_suspend(struct device * dev)996 static int tmc_runtime_suspend(struct device *dev)
997 {
998 struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
999
1000 clk_disable_unprepare(drvdata->atclk);
1001 clk_disable_unprepare(drvdata->pclk);
1002
1003 return 0;
1004 }
1005
tmc_runtime_resume(struct device * dev)1006 static int tmc_runtime_resume(struct device *dev)
1007 {
1008 struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
1009 int ret;
1010
1011 ret = clk_prepare_enable(drvdata->pclk);
1012 if (ret)
1013 return ret;
1014
1015 ret = clk_prepare_enable(drvdata->atclk);
1016 if (ret)
1017 clk_disable_unprepare(drvdata->pclk);
1018
1019 return ret;
1020 }
1021 #endif
1022
1023 static const struct dev_pm_ops tmc_dev_pm_ops = {
1024 SET_RUNTIME_PM_OPS(tmc_runtime_suspend, tmc_runtime_resume, NULL)
1025 };
1026
1027 #ifdef CONFIG_ACPI
1028 static const struct acpi_device_id tmc_acpi_ids[] = {
1029 {"ARMHC501", 0, 0, 0}, /* ARM CoreSight ETR */
1030 {"ARMHC97C", 0, 0, 0}, /* ARM CoreSight SoC-400 TMC, SoC-600 ETF/ETB */
1031 {},
1032 };
1033 MODULE_DEVICE_TABLE(acpi, tmc_acpi_ids);
1034 #endif
1035
1036 static struct platform_driver tmc_platform_driver = {
1037 .probe = tmc_platform_probe,
1038 .remove = tmc_platform_remove,
1039 .driver = {
1040 .name = "coresight-tmc-platform",
1041 .acpi_match_table = ACPI_PTR(tmc_acpi_ids),
1042 .suppress_bind_attrs = true,
1043 .pm = &tmc_dev_pm_ops,
1044 },
1045 };
1046
tmc_init(void)1047 static int __init tmc_init(void)
1048 {
1049 return coresight_init_driver("tmc", &tmc_driver, &tmc_platform_driver, THIS_MODULE);
1050 }
1051
tmc_exit(void)1052 static void __exit tmc_exit(void)
1053 {
1054 coresight_remove_driver(&tmc_driver, &tmc_platform_driver);
1055 }
1056 module_init(tmc_init);
1057 module_exit(tmc_exit);
1058
1059 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
1060 MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
1061 MODULE_LICENSE("GPL v2");
1062