xref: /linux/drivers/hwtracing/coresight/coresight-tmc-core.c (revision e6a901a00822659181c93c86d8bbc2a17779fddc)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
3  *
4  * Description: CoreSight Trace Memory Controller driver
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/device.h>
11 #include <linux/idr.h>
12 #include <linux/io.h>
13 #include <linux/iommu.h>
14 #include <linux/err.h>
15 #include <linux/fs.h>
16 #include <linux/miscdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/property.h>
19 #include <linux/uaccess.h>
20 #include <linux/slab.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/spinlock.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/of.h>
25 #include <linux/coresight.h>
26 #include <linux/amba/bus.h>
27 
28 #include "coresight-priv.h"
29 #include "coresight-tmc.h"
30 
31 DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
32 DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
33 DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
34 
35 int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
36 {
37 	struct coresight_device *csdev = drvdata->csdev;
38 	struct csdev_access *csa = &csdev->access;
39 
40 	/* Ensure formatter, unformatter and hardware fifo are empty */
41 	if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
42 		dev_err(&csdev->dev,
43 			"timeout while waiting for TMC to be Ready\n");
44 		return -EBUSY;
45 	}
46 	return 0;
47 }
48 
49 void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
50 {
51 	struct coresight_device *csdev = drvdata->csdev;
52 	struct csdev_access *csa = &csdev->access;
53 	u32 ffcr;
54 
55 	ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
56 	ffcr |= TMC_FFCR_STOP_ON_FLUSH;
57 	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
58 	ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
59 	writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
60 	/* Ensure flush completes */
61 	if (coresight_timeout(csa, TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
62 		dev_err(&csdev->dev,
63 		"timeout while waiting for completion of Manual Flush\n");
64 	}
65 
66 	tmc_wait_for_tmcready(drvdata);
67 }
68 
69 void tmc_enable_hw(struct tmc_drvdata *drvdata)
70 {
71 	writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
72 }
73 
74 void tmc_disable_hw(struct tmc_drvdata *drvdata)
75 {
76 	writel_relaxed(0x0, drvdata->base + TMC_CTL);
77 }
78 
79 u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata)
80 {
81 	u32 mask = 0;
82 
83 	/*
84 	 * When moving RRP or an offset address forward, the new values must
85 	 * be byte-address aligned to the width of the trace memory databus
86 	 * _and_ to a frame boundary (16 byte), whichever is the biggest. For
87 	 * example, for 32-bit, 64-bit and 128-bit wide trace memory, the four
88 	 * LSBs must be 0s. For 256-bit wide trace memory, the five LSBs must
89 	 * be 0s.
90 	 */
91 	switch (drvdata->memwidth) {
92 	case TMC_MEM_INTF_WIDTH_32BITS:
93 	case TMC_MEM_INTF_WIDTH_64BITS:
94 	case TMC_MEM_INTF_WIDTH_128BITS:
95 		mask = GENMASK(31, 4);
96 		break;
97 	case TMC_MEM_INTF_WIDTH_256BITS:
98 		mask = GENMASK(31, 5);
99 		break;
100 	}
101 
102 	return mask;
103 }
104 
105 static int tmc_read_prepare(struct tmc_drvdata *drvdata)
106 {
107 	int ret = 0;
108 
109 	switch (drvdata->config_type) {
110 	case TMC_CONFIG_TYPE_ETB:
111 	case TMC_CONFIG_TYPE_ETF:
112 		ret = tmc_read_prepare_etb(drvdata);
113 		break;
114 	case TMC_CONFIG_TYPE_ETR:
115 		ret = tmc_read_prepare_etr(drvdata);
116 		break;
117 	default:
118 		ret = -EINVAL;
119 	}
120 
121 	if (!ret)
122 		dev_dbg(&drvdata->csdev->dev, "TMC read start\n");
123 
124 	return ret;
125 }
126 
127 static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
128 {
129 	int ret = 0;
130 
131 	switch (drvdata->config_type) {
132 	case TMC_CONFIG_TYPE_ETB:
133 	case TMC_CONFIG_TYPE_ETF:
134 		ret = tmc_read_unprepare_etb(drvdata);
135 		break;
136 	case TMC_CONFIG_TYPE_ETR:
137 		ret = tmc_read_unprepare_etr(drvdata);
138 		break;
139 	default:
140 		ret = -EINVAL;
141 	}
142 
143 	if (!ret)
144 		dev_dbg(&drvdata->csdev->dev, "TMC read end\n");
145 
146 	return ret;
147 }
148 
149 static int tmc_open(struct inode *inode, struct file *file)
150 {
151 	int ret;
152 	struct tmc_drvdata *drvdata = container_of(file->private_data,
153 						   struct tmc_drvdata, miscdev);
154 
155 	ret = tmc_read_prepare(drvdata);
156 	if (ret)
157 		return ret;
158 
159 	nonseekable_open(inode, file);
160 
161 	dev_dbg(&drvdata->csdev->dev, "%s: successfully opened\n", __func__);
162 	return 0;
163 }
164 
165 static inline ssize_t tmc_get_sysfs_trace(struct tmc_drvdata *drvdata,
166 					  loff_t pos, size_t len, char **bufpp)
167 {
168 	switch (drvdata->config_type) {
169 	case TMC_CONFIG_TYPE_ETB:
170 	case TMC_CONFIG_TYPE_ETF:
171 		return tmc_etb_get_sysfs_trace(drvdata, pos, len, bufpp);
172 	case TMC_CONFIG_TYPE_ETR:
173 		return tmc_etr_get_sysfs_trace(drvdata, pos, len, bufpp);
174 	}
175 
176 	return -EINVAL;
177 }
178 
179 static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
180 			loff_t *ppos)
181 {
182 	char *bufp;
183 	ssize_t actual;
184 	struct tmc_drvdata *drvdata = container_of(file->private_data,
185 						   struct tmc_drvdata, miscdev);
186 	actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
187 	if (actual <= 0)
188 		return 0;
189 
190 	if (copy_to_user(data, bufp, actual)) {
191 		dev_dbg(&drvdata->csdev->dev,
192 			"%s: copy_to_user failed\n", __func__);
193 		return -EFAULT;
194 	}
195 
196 	*ppos += actual;
197 	dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
198 
199 	return actual;
200 }
201 
202 static int tmc_release(struct inode *inode, struct file *file)
203 {
204 	int ret;
205 	struct tmc_drvdata *drvdata = container_of(file->private_data,
206 						   struct tmc_drvdata, miscdev);
207 
208 	ret = tmc_read_unprepare(drvdata);
209 	if (ret)
210 		return ret;
211 
212 	dev_dbg(&drvdata->csdev->dev, "%s: released\n", __func__);
213 	return 0;
214 }
215 
216 static const struct file_operations tmc_fops = {
217 	.owner		= THIS_MODULE,
218 	.open		= tmc_open,
219 	.read		= tmc_read,
220 	.release	= tmc_release,
221 	.llseek		= no_llseek,
222 };
223 
224 static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
225 {
226 	enum tmc_mem_intf_width memwidth;
227 
228 	/*
229 	 * Excerpt from the TRM:
230 	 *
231 	 * DEVID::MEMWIDTH[10:8]
232 	 * 0x2 Memory interface databus is 32 bits wide.
233 	 * 0x3 Memory interface databus is 64 bits wide.
234 	 * 0x4 Memory interface databus is 128 bits wide.
235 	 * 0x5 Memory interface databus is 256 bits wide.
236 	 */
237 	switch (BMVAL(devid, 8, 10)) {
238 	case 0x2:
239 		memwidth = TMC_MEM_INTF_WIDTH_32BITS;
240 		break;
241 	case 0x3:
242 		memwidth = TMC_MEM_INTF_WIDTH_64BITS;
243 		break;
244 	case 0x4:
245 		memwidth = TMC_MEM_INTF_WIDTH_128BITS;
246 		break;
247 	case 0x5:
248 		memwidth = TMC_MEM_INTF_WIDTH_256BITS;
249 		break;
250 	default:
251 		memwidth = 0;
252 	}
253 
254 	return memwidth;
255 }
256 
257 static struct attribute *coresight_tmc_mgmt_attrs[] = {
258 	coresight_simple_reg32(rsz, TMC_RSZ),
259 	coresight_simple_reg32(sts, TMC_STS),
260 	coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
261 	coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
262 	coresight_simple_reg32(trg, TMC_TRG),
263 	coresight_simple_reg32(ctl, TMC_CTL),
264 	coresight_simple_reg32(ffsr, TMC_FFSR),
265 	coresight_simple_reg32(ffcr, TMC_FFCR),
266 	coresight_simple_reg32(mode, TMC_MODE),
267 	coresight_simple_reg32(pscr, TMC_PSCR),
268 	coresight_simple_reg32(devid, CORESIGHT_DEVID),
269 	coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
270 	coresight_simple_reg32(axictl, TMC_AXICTL),
271 	coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
272 	NULL,
273 };
274 
275 static ssize_t trigger_cntr_show(struct device *dev,
276 				 struct device_attribute *attr, char *buf)
277 {
278 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
279 	unsigned long val = drvdata->trigger_cntr;
280 
281 	return sprintf(buf, "%#lx\n", val);
282 }
283 
284 static ssize_t trigger_cntr_store(struct device *dev,
285 			     struct device_attribute *attr,
286 			     const char *buf, size_t size)
287 {
288 	int ret;
289 	unsigned long val;
290 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
291 
292 	ret = kstrtoul(buf, 16, &val);
293 	if (ret)
294 		return ret;
295 
296 	drvdata->trigger_cntr = val;
297 	return size;
298 }
299 static DEVICE_ATTR_RW(trigger_cntr);
300 
301 static ssize_t buffer_size_show(struct device *dev,
302 				struct device_attribute *attr, char *buf)
303 {
304 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
305 
306 	return sprintf(buf, "%#x\n", drvdata->size);
307 }
308 
309 static ssize_t buffer_size_store(struct device *dev,
310 				 struct device_attribute *attr,
311 				 const char *buf, size_t size)
312 {
313 	int ret;
314 	unsigned long val;
315 	struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
316 
317 	/* Only permitted for TMC-ETRs */
318 	if (drvdata->config_type != TMC_CONFIG_TYPE_ETR)
319 		return -EPERM;
320 
321 	ret = kstrtoul(buf, 0, &val);
322 	if (ret)
323 		return ret;
324 	/* The buffer size should be page aligned */
325 	if (val & (PAGE_SIZE - 1))
326 		return -EINVAL;
327 	drvdata->size = val;
328 	return size;
329 }
330 
331 static DEVICE_ATTR_RW(buffer_size);
332 
333 static struct attribute *coresight_tmc_attrs[] = {
334 	&dev_attr_trigger_cntr.attr,
335 	&dev_attr_buffer_size.attr,
336 	NULL,
337 };
338 
339 static const struct attribute_group coresight_tmc_group = {
340 	.attrs = coresight_tmc_attrs,
341 };
342 
343 static const struct attribute_group coresight_tmc_mgmt_group = {
344 	.attrs = coresight_tmc_mgmt_attrs,
345 	.name = "mgmt",
346 };
347 
348 static const struct attribute_group *coresight_etf_groups[] = {
349 	&coresight_tmc_group,
350 	&coresight_tmc_mgmt_group,
351 	NULL,
352 };
353 
354 static const struct attribute_group *coresight_etr_groups[] = {
355 	&coresight_etr_group,
356 	&coresight_tmc_group,
357 	&coresight_tmc_mgmt_group,
358 	NULL,
359 };
360 
361 static inline bool tmc_etr_can_use_sg(struct device *dev)
362 {
363 	return fwnode_property_present(dev->fwnode, "arm,scatter-gather");
364 }
365 
366 static inline bool tmc_etr_has_non_secure_access(struct tmc_drvdata *drvdata)
367 {
368 	u32 auth = readl_relaxed(drvdata->base + TMC_AUTHSTATUS);
369 
370 	return (auth & TMC_AUTH_NSID_MASK) == 0x3;
371 }
372 
373 /* Detect and initialise the capabilities of a TMC ETR */
374 static int tmc_etr_setup_caps(struct device *parent, u32 devid, void *dev_caps)
375 {
376 	int rc;
377 	u32 dma_mask = 0;
378 	struct tmc_drvdata *drvdata = dev_get_drvdata(parent);
379 
380 	if (!tmc_etr_has_non_secure_access(drvdata))
381 		return -EACCES;
382 
383 	/* Set the unadvertised capabilities */
384 	tmc_etr_init_caps(drvdata, (u32)(unsigned long)dev_caps);
385 
386 	if (!(devid & TMC_DEVID_NOSCAT) && tmc_etr_can_use_sg(parent))
387 		tmc_etr_set_cap(drvdata, TMC_ETR_SG);
388 
389 	/* Check if the AXI address width is available */
390 	if (devid & TMC_DEVID_AXIAW_VALID)
391 		dma_mask = ((devid >> TMC_DEVID_AXIAW_SHIFT) &
392 				TMC_DEVID_AXIAW_MASK);
393 
394 	/*
395 	 * Unless specified in the device configuration, ETR uses a 40-bit
396 	 * AXI master in place of the embedded SRAM of ETB/ETF.
397 	 */
398 	switch (dma_mask) {
399 	case 32:
400 	case 40:
401 	case 44:
402 	case 48:
403 	case 52:
404 		dev_info(parent, "Detected dma mask %dbits\n", dma_mask);
405 		break;
406 	default:
407 		dma_mask = 40;
408 	}
409 
410 	rc = dma_set_mask_and_coherent(parent, DMA_BIT_MASK(dma_mask));
411 	if (rc)
412 		dev_err(parent, "Failed to setup DMA mask: %d\n", rc);
413 	return rc;
414 }
415 
416 static u32 tmc_etr_get_default_buffer_size(struct device *dev)
417 {
418 	u32 size;
419 
420 	if (fwnode_property_read_u32(dev->fwnode, "arm,buffer-size", &size))
421 		size = SZ_1M;
422 	return size;
423 }
424 
425 static u32 tmc_etr_get_max_burst_size(struct device *dev)
426 {
427 	u32 burst_size;
428 
429 	if (fwnode_property_read_u32(dev->fwnode, "arm,max-burst-size",
430 				     &burst_size))
431 		return TMC_AXICTL_WR_BURST_16;
432 
433 	/* Only permissible values are 0 to 15 */
434 	if (burst_size > 0xF)
435 		burst_size = TMC_AXICTL_WR_BURST_16;
436 
437 	return burst_size;
438 }
439 
440 static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
441 {
442 	int ret = 0;
443 	u32 devid;
444 	void __iomem *base;
445 	struct device *dev = &adev->dev;
446 	struct coresight_platform_data *pdata = NULL;
447 	struct tmc_drvdata *drvdata;
448 	struct resource *res = &adev->res;
449 	struct coresight_desc desc = { 0 };
450 	struct coresight_dev_list *dev_list = NULL;
451 
452 	ret = -ENOMEM;
453 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
454 	if (!drvdata)
455 		goto out;
456 
457 	dev_set_drvdata(dev, drvdata);
458 
459 	/* Validity for the resource is already checked by the AMBA core */
460 	base = devm_ioremap_resource(dev, res);
461 	if (IS_ERR(base)) {
462 		ret = PTR_ERR(base);
463 		goto out;
464 	}
465 
466 	drvdata->base = base;
467 	desc.access = CSDEV_ACCESS_IOMEM(base);
468 
469 	spin_lock_init(&drvdata->spinlock);
470 
471 	devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
472 	drvdata->config_type = BMVAL(devid, 6, 7);
473 	drvdata->memwidth = tmc_get_memwidth(devid);
474 	/* This device is not associated with a session */
475 	drvdata->pid = -1;
476 	drvdata->etr_mode = ETR_MODE_AUTO;
477 
478 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
479 		drvdata->size = tmc_etr_get_default_buffer_size(dev);
480 		drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
481 	} else {
482 		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
483 	}
484 
485 	desc.dev = dev;
486 
487 	switch (drvdata->config_type) {
488 	case TMC_CONFIG_TYPE_ETB:
489 		desc.groups = coresight_etf_groups;
490 		desc.type = CORESIGHT_DEV_TYPE_SINK;
491 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
492 		desc.ops = &tmc_etb_cs_ops;
493 		dev_list = &etb_devs;
494 		break;
495 	case TMC_CONFIG_TYPE_ETR:
496 		desc.groups = coresight_etr_groups;
497 		desc.type = CORESIGHT_DEV_TYPE_SINK;
498 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
499 		desc.ops = &tmc_etr_cs_ops;
500 		ret = tmc_etr_setup_caps(dev, devid,
501 					 coresight_get_uci_data(id));
502 		if (ret)
503 			goto out;
504 		idr_init(&drvdata->idr);
505 		mutex_init(&drvdata->idr_mutex);
506 		dev_list = &etr_devs;
507 		break;
508 	case TMC_CONFIG_TYPE_ETF:
509 		desc.groups = coresight_etf_groups;
510 		desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
511 		desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
512 		desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
513 		desc.ops = &tmc_etf_cs_ops;
514 		dev_list = &etf_devs;
515 		break;
516 	default:
517 		pr_err("%s: Unsupported TMC config\n", desc.name);
518 		ret = -EINVAL;
519 		goto out;
520 	}
521 
522 	desc.name = coresight_alloc_device_name(dev_list, dev);
523 	if (!desc.name) {
524 		ret = -ENOMEM;
525 		goto out;
526 	}
527 
528 	pdata = coresight_get_platform_data(dev);
529 	if (IS_ERR(pdata)) {
530 		ret = PTR_ERR(pdata);
531 		goto out;
532 	}
533 	adev->dev.platform_data = pdata;
534 	desc.pdata = pdata;
535 
536 	drvdata->csdev = coresight_register(&desc);
537 	if (IS_ERR(drvdata->csdev)) {
538 		ret = PTR_ERR(drvdata->csdev);
539 		goto out;
540 	}
541 
542 	drvdata->miscdev.name = desc.name;
543 	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
544 	drvdata->miscdev.fops = &tmc_fops;
545 	ret = misc_register(&drvdata->miscdev);
546 	if (ret)
547 		coresight_unregister(drvdata->csdev);
548 	else
549 		pm_runtime_put(&adev->dev);
550 out:
551 	return ret;
552 }
553 
554 static void tmc_shutdown(struct amba_device *adev)
555 {
556 	unsigned long flags;
557 	struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
558 
559 	spin_lock_irqsave(&drvdata->spinlock, flags);
560 
561 	if (coresight_get_mode(drvdata->csdev) == CS_MODE_DISABLED)
562 		goto out;
563 
564 	if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
565 		tmc_etr_disable_hw(drvdata);
566 
567 	/*
568 	 * We do not care about coresight unregister here unlike remove
569 	 * callback which is required for making coresight modular since
570 	 * the system is going down after this.
571 	 */
572 out:
573 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
574 }
575 
576 static void tmc_remove(struct amba_device *adev)
577 {
578 	struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
579 
580 	/*
581 	 * Since misc_open() holds a refcount on the f_ops, which is
582 	 * etb fops in this case, device is there until last file
583 	 * handler to this device is closed.
584 	 */
585 	misc_deregister(&drvdata->miscdev);
586 	coresight_unregister(drvdata->csdev);
587 }
588 
589 static const struct amba_id tmc_ids[] = {
590 	CS_AMBA_ID(0x000bb961),
591 	/* Coresight SoC 600 TMC-ETR/ETS */
592 	CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
593 	/* Coresight SoC 600 TMC-ETB */
594 	CS_AMBA_ID(0x000bb9e9),
595 	/* Coresight SoC 600 TMC-ETF */
596 	CS_AMBA_ID(0x000bb9ea),
597 	{ 0, 0, NULL },
598 };
599 
600 MODULE_DEVICE_TABLE(amba, tmc_ids);
601 
602 static struct amba_driver tmc_driver = {
603 	.drv = {
604 		.name   = "coresight-tmc",
605 		.owner  = THIS_MODULE,
606 		.suppress_bind_attrs = true,
607 	},
608 	.probe		= tmc_probe,
609 	.shutdown	= tmc_shutdown,
610 	.remove		= tmc_remove,
611 	.id_table	= tmc_ids,
612 };
613 
614 module_amba_driver(tmc_driver);
615 
616 MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
617 MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");
618 MODULE_LICENSE("GPL v2");
619