xref: /linux/drivers/hwtracing/coresight/ultrasoc-smb.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Siemens System Memory Buffer driver.
4  * Copyright(c) 2022, HiSilicon Limited.
5  */
6 
7 #include <linux/atomic.h>
8 #include <linux/acpi.h>
9 #include <linux/circ_buf.h>
10 #include <linux/err.h>
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/platform_device.h>
15 
16 #include "coresight-etm-perf.h"
17 #include "coresight-priv.h"
18 #include "ultrasoc-smb.h"
19 
20 DEFINE_CORESIGHT_DEVLIST(sink_devs, "ultra_smb");
21 
22 #define ULTRASOC_SMB_DSM_UUID	"82ae1283-7f6a-4cbe-aa06-53e8fb24db18"
23 
24 static bool smb_buffer_not_empty(struct smb_drv_data *drvdata)
25 {
26 	u32 buf_status = readl(drvdata->base + SMB_LB_INT_STS_REG);
27 
28 	return FIELD_GET(SMB_LB_INT_STS_NOT_EMPTY_MSK, buf_status);
29 }
30 
31 static void smb_update_data_size(struct smb_drv_data *drvdata)
32 {
33 	struct smb_data_buffer *sdb = &drvdata->sdb;
34 	u32 buf_wrptr;
35 
36 	buf_wrptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG) -
37 			  sdb->buf_hw_base;
38 
39 	/* Buffer is full */
40 	if (buf_wrptr == sdb->buf_rdptr && smb_buffer_not_empty(drvdata)) {
41 		sdb->data_size = sdb->buf_size;
42 		return;
43 	}
44 
45 	/* The buffer mode is circular buffer mode */
46 	sdb->data_size = CIRC_CNT(buf_wrptr, sdb->buf_rdptr,
47 				  sdb->buf_size);
48 }
49 
50 /*
51  * The read pointer adds @nbytes bytes (may round up to the beginning)
52  * after the data is read or discarded, while needing to update the
53  * available data size.
54  */
55 static void smb_update_read_ptr(struct smb_drv_data *drvdata, u32 nbytes)
56 {
57 	struct smb_data_buffer *sdb = &drvdata->sdb;
58 
59 	sdb->buf_rdptr += nbytes;
60 	sdb->buf_rdptr %= sdb->buf_size;
61 	writel(sdb->buf_hw_base + sdb->buf_rdptr,
62 	       drvdata->base + SMB_LB_RD_ADDR_REG);
63 
64 	sdb->data_size -= nbytes;
65 }
66 
67 static void smb_reset_buffer(struct smb_drv_data *drvdata)
68 {
69 	struct smb_data_buffer *sdb = &drvdata->sdb;
70 	u32 write_ptr;
71 
72 	/*
73 	 * We must flush and discard any data left in hardware path
74 	 * to avoid corrupting the next session.
75 	 * Note: The write pointer will never exceed the read pointer.
76 	 */
77 	writel(SMB_LB_PURGE_PURGED, drvdata->base + SMB_LB_PURGE_REG);
78 
79 	/* Reset SMB logical buffer status flags */
80 	writel(SMB_LB_INT_STS_RESET, drvdata->base + SMB_LB_INT_STS_REG);
81 
82 	write_ptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG);
83 
84 	/* Do nothing, not data left in hardware path */
85 	if (!write_ptr || write_ptr == sdb->buf_rdptr + sdb->buf_hw_base)
86 		return;
87 
88 	/*
89 	 * The SMB_LB_WR_ADDR_REG register is read-only,
90 	 * Synchronize the read pointer to write pointer.
91 	 */
92 	writel(write_ptr, drvdata->base + SMB_LB_RD_ADDR_REG);
93 	sdb->buf_rdptr = write_ptr - sdb->buf_hw_base;
94 }
95 
96 static int smb_open(struct inode *inode, struct file *file)
97 {
98 	struct smb_drv_data *drvdata = container_of(file->private_data,
99 					struct smb_drv_data, miscdev);
100 
101 	guard(spinlock)(&drvdata->spinlock);
102 
103 	if (drvdata->reading)
104 		return -EBUSY;
105 
106 	if (drvdata->csdev->refcnt)
107 		return -EBUSY;
108 
109 	smb_update_data_size(drvdata);
110 	drvdata->reading = true;
111 
112 	return 0;
113 }
114 
115 static ssize_t smb_read(struct file *file, char __user *data, size_t len,
116 			loff_t *ppos)
117 {
118 	struct smb_drv_data *drvdata = container_of(file->private_data,
119 					struct smb_drv_data, miscdev);
120 	struct smb_data_buffer *sdb = &drvdata->sdb;
121 	struct device *dev = &drvdata->csdev->dev;
122 	ssize_t to_copy = 0;
123 
124 	if (!len)
125 		return 0;
126 
127 	if (!sdb->data_size)
128 		return 0;
129 
130 	to_copy = min(sdb->data_size, len);
131 
132 	/* Copy parts of trace data when read pointer wrap around SMB buffer */
133 	if (sdb->buf_rdptr + to_copy > sdb->buf_size)
134 		to_copy = sdb->buf_size - sdb->buf_rdptr;
135 
136 	if (copy_to_user(data, sdb->buf_base + sdb->buf_rdptr, to_copy)) {
137 		dev_dbg(dev, "Failed to copy data to user\n");
138 		return -EFAULT;
139 	}
140 
141 	*ppos += to_copy;
142 	smb_update_read_ptr(drvdata, to_copy);
143 	if (!sdb->data_size)
144 		smb_reset_buffer(drvdata);
145 
146 	dev_dbg(dev, "%zu bytes copied\n", to_copy);
147 	return to_copy;
148 }
149 
150 static int smb_release(struct inode *inode, struct file *file)
151 {
152 	struct smb_drv_data *drvdata = container_of(file->private_data,
153 					struct smb_drv_data, miscdev);
154 
155 	guard(spinlock)(&drvdata->spinlock);
156 	drvdata->reading = false;
157 
158 	return 0;
159 }
160 
161 static const struct file_operations smb_fops = {
162 	.owner		= THIS_MODULE,
163 	.open		= smb_open,
164 	.read		= smb_read,
165 	.release	= smb_release,
166 };
167 
168 static ssize_t buf_size_show(struct device *dev, struct device_attribute *attr,
169 			     char *buf)
170 {
171 	struct smb_drv_data *drvdata = dev_get_drvdata(dev->parent);
172 
173 	return sysfs_emit(buf, "0x%lx\n", drvdata->sdb.buf_size);
174 }
175 static DEVICE_ATTR_RO(buf_size);
176 
177 static struct attribute *smb_sink_attrs[] = {
178 	coresight_simple_reg32(read_pos, SMB_LB_RD_ADDR_REG),
179 	coresight_simple_reg32(write_pos, SMB_LB_WR_ADDR_REG),
180 	coresight_simple_reg32(buf_status, SMB_LB_INT_STS_REG),
181 	&dev_attr_buf_size.attr,
182 	NULL
183 };
184 
185 static const struct attribute_group smb_sink_group = {
186 	.attrs = smb_sink_attrs,
187 	.name = "mgmt",
188 };
189 
190 static const struct attribute_group *smb_sink_groups[] = {
191 	&smb_sink_group,
192 	NULL
193 };
194 
195 static void smb_enable_hw(struct smb_drv_data *drvdata)
196 {
197 	writel(SMB_GLB_EN_HW_ENABLE, drvdata->base + SMB_GLB_EN_REG);
198 }
199 
200 static void smb_disable_hw(struct smb_drv_data *drvdata)
201 {
202 	writel(0x0, drvdata->base + SMB_GLB_EN_REG);
203 }
204 
205 static void smb_enable_sysfs(struct coresight_device *csdev)
206 {
207 	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
208 
209 	if (coresight_get_mode(csdev) != CS_MODE_DISABLED)
210 		return;
211 
212 	smb_enable_hw(drvdata);
213 	coresight_set_mode(csdev, CS_MODE_SYSFS);
214 }
215 
216 static int smb_enable_perf(struct coresight_device *csdev, void *data)
217 {
218 	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
219 	struct perf_output_handle *handle = data;
220 	struct cs_buffers *buf = etm_perf_sink_config(handle);
221 	pid_t pid;
222 
223 	if (!buf)
224 		return -EINVAL;
225 
226 	/* Get a handle on the pid of the target process */
227 	pid = buf->pid;
228 
229 	/* Device is already in used by other session */
230 	if (drvdata->pid != -1 && drvdata->pid != pid)
231 		return -EBUSY;
232 
233 	if (drvdata->pid == -1) {
234 		smb_enable_hw(drvdata);
235 		drvdata->pid = pid;
236 		coresight_set_mode(csdev, CS_MODE_PERF);
237 	}
238 
239 	return 0;
240 }
241 
242 static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
243 		      void *data)
244 {
245 	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
246 	int ret = 0;
247 
248 	guard(spinlock)(&drvdata->spinlock);
249 
250 	/* Do nothing, the trace data is reading by other interface now */
251 	if (drvdata->reading)
252 		return -EBUSY;
253 
254 	/* Do nothing, the SMB is already enabled as other mode */
255 	if (coresight_get_mode(csdev) != CS_MODE_DISABLED &&
256 	    coresight_get_mode(csdev) != mode)
257 		return -EBUSY;
258 
259 	switch (mode) {
260 	case CS_MODE_SYSFS:
261 		smb_enable_sysfs(csdev);
262 		break;
263 	case CS_MODE_PERF:
264 		ret = smb_enable_perf(csdev, data);
265 		break;
266 	default:
267 		ret = -EINVAL;
268 	}
269 
270 	if (ret)
271 		return ret;
272 
273 	csdev->refcnt++;
274 	dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
275 
276 	return ret;
277 }
278 
279 static int smb_disable(struct coresight_device *csdev)
280 {
281 	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
282 
283 	guard(spinlock)(&drvdata->spinlock);
284 
285 	if (drvdata->reading)
286 		return -EBUSY;
287 
288 	csdev->refcnt--;
289 	if (csdev->refcnt)
290 		return -EBUSY;
291 
292 	/* Complain if we (somehow) got out of sync */
293 	WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
294 
295 	smb_disable_hw(drvdata);
296 
297 	/* Dissociate from the target process. */
298 	drvdata->pid = -1;
299 	coresight_set_mode(csdev, CS_MODE_DISABLED);
300 	dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
301 
302 	return 0;
303 }
304 
305 static void *smb_alloc_buffer(struct coresight_device *csdev,
306 			      struct perf_event *event, void **pages,
307 			      int nr_pages, bool overwrite)
308 {
309 	struct cs_buffers *buf;
310 	int node;
311 
312 	node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
313 	buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
314 	if (!buf)
315 		return NULL;
316 
317 	buf->snapshot = overwrite;
318 	buf->nr_pages = nr_pages;
319 	buf->data_pages = pages;
320 	buf->pid = task_pid_nr(event->owner);
321 
322 	return buf;
323 }
324 
325 static void smb_free_buffer(void *config)
326 {
327 	struct cs_buffers *buf = config;
328 
329 	kfree(buf);
330 }
331 
332 static void smb_sync_perf_buffer(struct smb_drv_data *drvdata,
333 				 struct cs_buffers *buf,
334 				 unsigned long head)
335 {
336 	struct smb_data_buffer *sdb = &drvdata->sdb;
337 	char **dst_pages = (char **)buf->data_pages;
338 	unsigned long to_copy;
339 	long pg_idx, pg_offset;
340 
341 	pg_idx = head >> PAGE_SHIFT;
342 	pg_offset = head & (PAGE_SIZE - 1);
343 
344 	while (sdb->data_size) {
345 		unsigned long pg_space = PAGE_SIZE - pg_offset;
346 
347 		to_copy = min(sdb->data_size, pg_space);
348 
349 		/* Copy parts of trace data when read pointer wrap around */
350 		if (sdb->buf_rdptr + to_copy > sdb->buf_size)
351 			to_copy = sdb->buf_size - sdb->buf_rdptr;
352 
353 		memcpy(dst_pages[pg_idx] + pg_offset,
354 			      sdb->buf_base + sdb->buf_rdptr, to_copy);
355 
356 		pg_offset += to_copy;
357 		if (pg_offset >= PAGE_SIZE) {
358 			pg_offset = 0;
359 			pg_idx++;
360 			pg_idx %= buf->nr_pages;
361 		}
362 		smb_update_read_ptr(drvdata, to_copy);
363 	}
364 
365 	smb_reset_buffer(drvdata);
366 }
367 
368 static unsigned long smb_update_buffer(struct coresight_device *csdev,
369 				       struct perf_output_handle *handle,
370 				       void *sink_config)
371 {
372 	struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
373 	struct smb_data_buffer *sdb = &drvdata->sdb;
374 	struct cs_buffers *buf = sink_config;
375 	unsigned long data_size;
376 	bool lost = false;
377 
378 	if (!buf)
379 		return 0;
380 
381 	guard(spinlock)(&drvdata->spinlock);
382 
383 	/* Don't do anything if another tracer is using this sink. */
384 	if (csdev->refcnt != 1)
385 		return 0;
386 
387 	smb_disable_hw(drvdata);
388 	smb_update_data_size(drvdata);
389 
390 	/*
391 	 * The SMB buffer may be bigger than the space available in the
392 	 * perf ring buffer (handle->size). If so advance the offset so
393 	 * that we get the latest trace data.
394 	 */
395 	if (sdb->data_size > handle->size) {
396 		smb_update_read_ptr(drvdata, sdb->data_size - handle->size);
397 		lost = true;
398 	}
399 
400 	data_size = sdb->data_size;
401 	smb_sync_perf_buffer(drvdata, buf, handle->head);
402 	if (!buf->snapshot && lost)
403 		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
404 
405 	return data_size;
406 }
407 
408 static const struct coresight_ops_sink smb_cs_ops = {
409 	.enable		= smb_enable,
410 	.disable	= smb_disable,
411 	.alloc_buffer	= smb_alloc_buffer,
412 	.free_buffer	= smb_free_buffer,
413 	.update_buffer	= smb_update_buffer,
414 };
415 
416 static const struct coresight_ops cs_ops = {
417 	.sink_ops	= &smb_cs_ops,
418 };
419 
420 static int smb_init_data_buffer(struct platform_device *pdev,
421 				struct smb_data_buffer *sdb)
422 {
423 	struct resource *res;
424 	void *base;
425 
426 	res = platform_get_resource(pdev, IORESOURCE_MEM, SMB_BUF_ADDR_RES);
427 	if (!res) {
428 		dev_err(&pdev->dev, "SMB device failed to get resource\n");
429 		return -EINVAL;
430 	}
431 
432 	sdb->buf_rdptr = 0;
433 	sdb->buf_hw_base = FIELD_GET(SMB_BUF_ADDR_LO_MSK, res->start);
434 	sdb->buf_size = resource_size(res);
435 	if (sdb->buf_size == 0)
436 		return -EINVAL;
437 
438 	/*
439 	 * This is a chunk of memory, use classic mapping with better
440 	 * performance.
441 	 */
442 	base = devm_memremap(&pdev->dev, sdb->buf_hw_base, sdb->buf_size,
443 				MEMREMAP_WB);
444 	if (IS_ERR(base))
445 		return PTR_ERR(base);
446 
447 	sdb->buf_base = base;
448 
449 	return 0;
450 }
451 
452 static void smb_init_hw(struct smb_drv_data *drvdata)
453 {
454 	smb_disable_hw(drvdata);
455 
456 	writel(SMB_LB_CFG_LO_DEFAULT, drvdata->base + SMB_LB_CFG_LO_REG);
457 	writel(SMB_LB_CFG_HI_DEFAULT, drvdata->base + SMB_LB_CFG_HI_REG);
458 	writel(SMB_GLB_CFG_DEFAULT, drvdata->base + SMB_GLB_CFG_REG);
459 	writel(SMB_GLB_INT_CFG, drvdata->base + SMB_GLB_INT_REG);
460 	writel(SMB_LB_INT_CTRL_CFG, drvdata->base + SMB_LB_INT_CTRL_REG);
461 }
462 
463 static int smb_register_sink(struct platform_device *pdev,
464 			     struct smb_drv_data *drvdata)
465 {
466 	struct coresight_platform_data *pdata = NULL;
467 	struct coresight_desc desc = { 0 };
468 	int ret;
469 
470 	pdata = coresight_get_platform_data(&pdev->dev);
471 	if (IS_ERR(pdata))
472 		return PTR_ERR(pdata);
473 
474 	desc.type = CORESIGHT_DEV_TYPE_SINK;
475 	desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
476 	desc.ops = &cs_ops;
477 	desc.pdata = pdata;
478 	desc.dev = &pdev->dev;
479 	desc.groups = smb_sink_groups;
480 	desc.name = coresight_alloc_device_name(&sink_devs, &pdev->dev);
481 	if (!desc.name) {
482 		dev_err(&pdev->dev, "Failed to alloc coresight device name");
483 		return -ENOMEM;
484 	}
485 	desc.access = CSDEV_ACCESS_IOMEM(drvdata->base);
486 
487 	drvdata->csdev = coresight_register(&desc);
488 	if (IS_ERR(drvdata->csdev))
489 		return PTR_ERR(drvdata->csdev);
490 
491 	drvdata->miscdev.name = desc.name;
492 	drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
493 	drvdata->miscdev.fops = &smb_fops;
494 	ret = misc_register(&drvdata->miscdev);
495 	if (ret) {
496 		coresight_unregister(drvdata->csdev);
497 		dev_err(&pdev->dev, "Failed to register misc, ret=%d\n", ret);
498 	}
499 
500 	return ret;
501 }
502 
503 static void smb_unregister_sink(struct smb_drv_data *drvdata)
504 {
505 	misc_deregister(&drvdata->miscdev);
506 	coresight_unregister(drvdata->csdev);
507 }
508 
509 static int smb_config_inport(struct device *dev, bool enable)
510 {
511 	u64 func = enable ? 1 : 0;
512 	union acpi_object *obj;
513 	guid_t guid;
514 	u64 rev = 0;
515 
516 	/*
517 	 * Using DSM calls to enable/disable ultrasoc hardwares on
518 	 * tracing path, to prevent ultrasoc packet format being exposed.
519 	 */
520 	if (guid_parse(ULTRASOC_SMB_DSM_UUID, &guid)) {
521 		dev_err(dev, "Get GUID failed\n");
522 		return -EINVAL;
523 	}
524 
525 	obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, rev, func, NULL);
526 	if (!obj) {
527 		dev_err(dev, "ACPI handle failed\n");
528 		return -ENODEV;
529 	}
530 
531 	ACPI_FREE(obj);
532 
533 	return 0;
534 }
535 
536 static int smb_probe(struct platform_device *pdev)
537 {
538 	struct device *dev = &pdev->dev;
539 	struct smb_drv_data *drvdata;
540 	int ret;
541 
542 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
543 	if (!drvdata)
544 		return -ENOMEM;
545 
546 	drvdata->base = devm_platform_ioremap_resource(pdev, SMB_REG_ADDR_RES);
547 	if (IS_ERR(drvdata->base)) {
548 		dev_err(dev, "Failed to ioremap resource\n");
549 		return PTR_ERR(drvdata->base);
550 	}
551 
552 	smb_init_hw(drvdata);
553 
554 	ret = smb_init_data_buffer(pdev, &drvdata->sdb);
555 	if (ret) {
556 		dev_err(dev, "Failed to init buffer, ret = %d\n", ret);
557 		return ret;
558 	}
559 
560 	ret = smb_config_inport(dev, true);
561 	if (ret)
562 		return ret;
563 
564 	smb_reset_buffer(drvdata);
565 	platform_set_drvdata(pdev, drvdata);
566 	spin_lock_init(&drvdata->spinlock);
567 	drvdata->pid = -1;
568 
569 	ret = smb_register_sink(pdev, drvdata);
570 	if (ret) {
571 		smb_config_inport(&pdev->dev, false);
572 		dev_err(dev, "Failed to register SMB sink\n");
573 		return ret;
574 	}
575 
576 	return 0;
577 }
578 
579 static void smb_remove(struct platform_device *pdev)
580 {
581 	struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
582 
583 	smb_unregister_sink(drvdata);
584 
585 	smb_config_inport(&pdev->dev, false);
586 }
587 
588 #ifdef CONFIG_ACPI
589 static const struct acpi_device_id ultrasoc_smb_acpi_match[] = {
590 	{"HISI03A1", 0, 0, 0},
591 	{}
592 };
593 MODULE_DEVICE_TABLE(acpi, ultrasoc_smb_acpi_match);
594 #endif
595 
596 static struct platform_driver smb_driver = {
597 	.driver = {
598 		.name = "ultrasoc-smb",
599 		.acpi_match_table = ACPI_PTR(ultrasoc_smb_acpi_match),
600 		.suppress_bind_attrs = true,
601 	},
602 	.probe = smb_probe,
603 	.remove_new = smb_remove,
604 };
605 module_platform_driver(smb_driver);
606 
607 MODULE_DESCRIPTION("UltraSoc SMB CoreSight driver");
608 MODULE_LICENSE("Dual MIT/GPL");
609 MODULE_AUTHOR("Jonathan Zhou <jonathan.zhouwen@huawei.com>");
610 MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
611