1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3 * Siemens System Memory Buffer driver.
4 * Copyright(c) 2022, HiSilicon Limited.
5 */
6
7 #include <linux/atomic.h>
8 #include <linux/acpi.h>
9 #include <linux/circ_buf.h>
10 #include <linux/err.h>
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/mod_devicetable.h>
14 #include <linux/platform_device.h>
15
16 #include "coresight-etm-perf.h"
17 #include "coresight-priv.h"
18 #include "ultrasoc-smb.h"
19
20 #define ULTRASOC_SMB_DSM_UUID "82ae1283-7f6a-4cbe-aa06-53e8fb24db18"
21
smb_buffer_not_empty(struct smb_drv_data * drvdata)22 static bool smb_buffer_not_empty(struct smb_drv_data *drvdata)
23 {
24 u32 buf_status = readl(drvdata->base + SMB_LB_INT_STS_REG);
25
26 return FIELD_GET(SMB_LB_INT_STS_NOT_EMPTY_MSK, buf_status);
27 }
28
smb_update_data_size(struct smb_drv_data * drvdata)29 static void smb_update_data_size(struct smb_drv_data *drvdata)
30 {
31 struct smb_data_buffer *sdb = &drvdata->sdb;
32 u32 buf_wrptr;
33
34 buf_wrptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG) -
35 sdb->buf_hw_base;
36
37 /* Buffer is full */
38 if (buf_wrptr == sdb->buf_rdptr && smb_buffer_not_empty(drvdata)) {
39 sdb->data_size = sdb->buf_size;
40 return;
41 }
42
43 /* The buffer mode is circular buffer mode */
44 sdb->data_size = CIRC_CNT(buf_wrptr, sdb->buf_rdptr,
45 sdb->buf_size);
46 }
47
48 /*
49 * The read pointer adds @nbytes bytes (may round up to the beginning)
50 * after the data is read or discarded, while needing to update the
51 * available data size.
52 */
smb_update_read_ptr(struct smb_drv_data * drvdata,u32 nbytes)53 static void smb_update_read_ptr(struct smb_drv_data *drvdata, u32 nbytes)
54 {
55 struct smb_data_buffer *sdb = &drvdata->sdb;
56
57 sdb->buf_rdptr += nbytes;
58 sdb->buf_rdptr %= sdb->buf_size;
59 writel(sdb->buf_hw_base + sdb->buf_rdptr,
60 drvdata->base + SMB_LB_RD_ADDR_REG);
61
62 sdb->data_size -= nbytes;
63 }
64
smb_reset_buffer(struct smb_drv_data * drvdata)65 static void smb_reset_buffer(struct smb_drv_data *drvdata)
66 {
67 struct smb_data_buffer *sdb = &drvdata->sdb;
68 u32 write_ptr;
69
70 /*
71 * We must flush and discard any data left in hardware path
72 * to avoid corrupting the next session.
73 * Note: The write pointer will never exceed the read pointer.
74 */
75 writel(SMB_LB_PURGE_PURGED, drvdata->base + SMB_LB_PURGE_REG);
76
77 /* Reset SMB logical buffer status flags */
78 writel(SMB_LB_INT_STS_RESET, drvdata->base + SMB_LB_INT_STS_REG);
79
80 write_ptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG);
81
82 /* Do nothing, not data left in hardware path */
83 if (!write_ptr || write_ptr == sdb->buf_rdptr + sdb->buf_hw_base)
84 return;
85
86 /*
87 * The SMB_LB_WR_ADDR_REG register is read-only,
88 * Synchronize the read pointer to write pointer.
89 */
90 writel(write_ptr, drvdata->base + SMB_LB_RD_ADDR_REG);
91 sdb->buf_rdptr = write_ptr - sdb->buf_hw_base;
92 }
93
smb_open(struct inode * inode,struct file * file)94 static int smb_open(struct inode *inode, struct file *file)
95 {
96 struct smb_drv_data *drvdata = container_of(file->private_data,
97 struct smb_drv_data, miscdev);
98
99 guard(raw_spinlock)(&drvdata->spinlock);
100
101 if (drvdata->reading)
102 return -EBUSY;
103
104 if (drvdata->csdev->refcnt)
105 return -EBUSY;
106
107 smb_update_data_size(drvdata);
108 drvdata->reading = true;
109
110 return 0;
111 }
112
smb_read(struct file * file,char __user * data,size_t len,loff_t * ppos)113 static ssize_t smb_read(struct file *file, char __user *data, size_t len,
114 loff_t *ppos)
115 {
116 struct smb_drv_data *drvdata = container_of(file->private_data,
117 struct smb_drv_data, miscdev);
118 struct smb_data_buffer *sdb = &drvdata->sdb;
119 struct device *dev = &drvdata->csdev->dev;
120 ssize_t to_copy = 0;
121
122 if (!len)
123 return 0;
124
125 if (!sdb->data_size)
126 return 0;
127
128 to_copy = min(sdb->data_size, len);
129
130 /* Copy parts of trace data when read pointer wrap around SMB buffer */
131 if (sdb->buf_rdptr + to_copy > sdb->buf_size)
132 to_copy = sdb->buf_size - sdb->buf_rdptr;
133
134 if (copy_to_user(data, sdb->buf_base + sdb->buf_rdptr, to_copy)) {
135 dev_dbg(dev, "Failed to copy data to user\n");
136 return -EFAULT;
137 }
138
139 *ppos += to_copy;
140 smb_update_read_ptr(drvdata, to_copy);
141 if (!sdb->data_size)
142 smb_reset_buffer(drvdata);
143
144 dev_dbg(dev, "%zu bytes copied\n", to_copy);
145 return to_copy;
146 }
147
smb_release(struct inode * inode,struct file * file)148 static int smb_release(struct inode *inode, struct file *file)
149 {
150 struct smb_drv_data *drvdata = container_of(file->private_data,
151 struct smb_drv_data, miscdev);
152
153 guard(raw_spinlock)(&drvdata->spinlock);
154 drvdata->reading = false;
155
156 return 0;
157 }
158
159 static const struct file_operations smb_fops = {
160 .owner = THIS_MODULE,
161 .open = smb_open,
162 .read = smb_read,
163 .release = smb_release,
164 };
165
buf_size_show(struct device * dev,struct device_attribute * attr,char * buf)166 static ssize_t buf_size_show(struct device *dev, struct device_attribute *attr,
167 char *buf)
168 {
169 struct smb_drv_data *drvdata = dev_get_drvdata(dev->parent);
170
171 return sysfs_emit(buf, "0x%lx\n", drvdata->sdb.buf_size);
172 }
173 static DEVICE_ATTR_RO(buf_size);
174
175 static struct attribute *smb_sink_attrs[] = {
176 coresight_simple_reg32(read_pos, SMB_LB_RD_ADDR_REG),
177 coresight_simple_reg32(write_pos, SMB_LB_WR_ADDR_REG),
178 coresight_simple_reg32(buf_status, SMB_LB_INT_STS_REG),
179 &dev_attr_buf_size.attr,
180 NULL
181 };
182
183 static const struct attribute_group smb_sink_group = {
184 .attrs = smb_sink_attrs,
185 .name = "mgmt",
186 };
187
188 static const struct attribute_group *smb_sink_groups[] = {
189 &smb_sink_group,
190 NULL
191 };
192
smb_enable_hw(struct smb_drv_data * drvdata)193 static void smb_enable_hw(struct smb_drv_data *drvdata)
194 {
195 writel(SMB_GLB_EN_HW_ENABLE, drvdata->base + SMB_GLB_EN_REG);
196 }
197
smb_disable_hw(struct smb_drv_data * drvdata)198 static void smb_disable_hw(struct smb_drv_data *drvdata)
199 {
200 writel(0x0, drvdata->base + SMB_GLB_EN_REG);
201 }
202
smb_enable_sysfs(struct coresight_device * csdev)203 static void smb_enable_sysfs(struct coresight_device *csdev)
204 {
205 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
206
207 if (coresight_get_mode(csdev) != CS_MODE_DISABLED)
208 return;
209
210 smb_enable_hw(drvdata);
211 coresight_set_mode(csdev, CS_MODE_SYSFS);
212 }
213
smb_enable_perf(struct coresight_device * csdev,struct coresight_path * path)214 static int smb_enable_perf(struct coresight_device *csdev,
215 struct coresight_path *path)
216 {
217 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
218 struct perf_output_handle *handle = path->handle;
219 struct cs_buffers *buf = etm_perf_sink_config(handle);
220 pid_t pid;
221
222 if (!buf)
223 return -EINVAL;
224
225 /* Get a handle on the pid of the target process */
226 pid = buf->pid;
227
228 /* Device is already in used by other session */
229 if (drvdata->pid != -1 && drvdata->pid != pid)
230 return -EBUSY;
231
232 if (drvdata->pid == -1) {
233 smb_enable_hw(drvdata);
234 drvdata->pid = pid;
235 coresight_set_mode(csdev, CS_MODE_PERF);
236 }
237
238 return 0;
239 }
240
smb_enable(struct coresight_device * csdev,enum cs_mode mode,struct coresight_path * path)241 static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
242 struct coresight_path *path)
243 {
244 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
245 int ret = 0;
246
247 guard(raw_spinlock)(&drvdata->spinlock);
248
249 /* Do nothing, the trace data is reading by other interface now */
250 if (drvdata->reading)
251 return -EBUSY;
252
253 /* Do nothing, the SMB is already enabled as other mode */
254 if (coresight_get_mode(csdev) != CS_MODE_DISABLED &&
255 coresight_get_mode(csdev) != mode)
256 return -EBUSY;
257
258 switch (mode) {
259 case CS_MODE_SYSFS:
260 smb_enable_sysfs(csdev);
261 break;
262 case CS_MODE_PERF:
263 ret = smb_enable_perf(csdev, path);
264 break;
265 default:
266 ret = -EINVAL;
267 }
268
269 if (ret)
270 return ret;
271
272 csdev->refcnt++;
273 dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
274
275 return ret;
276 }
277
smb_disable(struct coresight_device * csdev)278 static int smb_disable(struct coresight_device *csdev)
279 {
280 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
281
282 guard(raw_spinlock)(&drvdata->spinlock);
283
284 if (drvdata->reading)
285 return -EBUSY;
286
287 csdev->refcnt--;
288 if (csdev->refcnt)
289 return -EBUSY;
290
291 /* Complain if we (somehow) got out of sync */
292 WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
293
294 smb_disable_hw(drvdata);
295
296 /* Dissociate from the target process. */
297 drvdata->pid = -1;
298 coresight_set_mode(csdev, CS_MODE_DISABLED);
299 dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
300
301 return 0;
302 }
303
smb_alloc_buffer(struct coresight_device * csdev,struct perf_event * event,void ** pages,int nr_pages,bool overwrite)304 static void *smb_alloc_buffer(struct coresight_device *csdev,
305 struct perf_event *event, void **pages,
306 int nr_pages, bool overwrite)
307 {
308 struct cs_buffers *buf;
309 int node;
310
311 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
312 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
313 if (!buf)
314 return NULL;
315
316 buf->snapshot = overwrite;
317 buf->nr_pages = nr_pages;
318 buf->data_pages = pages;
319 buf->pid = task_pid_nr(event->owner);
320
321 return buf;
322 }
323
smb_free_buffer(void * config)324 static void smb_free_buffer(void *config)
325 {
326 struct cs_buffers *buf = config;
327
328 kfree(buf);
329 }
330
smb_sync_perf_buffer(struct smb_drv_data * drvdata,struct cs_buffers * buf,unsigned long head)331 static void smb_sync_perf_buffer(struct smb_drv_data *drvdata,
332 struct cs_buffers *buf,
333 unsigned long head)
334 {
335 struct smb_data_buffer *sdb = &drvdata->sdb;
336 char **dst_pages = (char **)buf->data_pages;
337 unsigned long to_copy;
338 long pg_idx, pg_offset;
339
340 pg_idx = head >> PAGE_SHIFT;
341 pg_offset = head & (PAGE_SIZE - 1);
342
343 while (sdb->data_size) {
344 unsigned long pg_space = PAGE_SIZE - pg_offset;
345
346 to_copy = min(sdb->data_size, pg_space);
347
348 /* Copy parts of trace data when read pointer wrap around */
349 if (sdb->buf_rdptr + to_copy > sdb->buf_size)
350 to_copy = sdb->buf_size - sdb->buf_rdptr;
351
352 memcpy(dst_pages[pg_idx] + pg_offset,
353 sdb->buf_base + sdb->buf_rdptr, to_copy);
354
355 pg_offset += to_copy;
356 if (pg_offset >= PAGE_SIZE) {
357 pg_offset = 0;
358 pg_idx++;
359 pg_idx %= buf->nr_pages;
360 }
361 smb_update_read_ptr(drvdata, to_copy);
362 }
363
364 smb_reset_buffer(drvdata);
365 }
366
smb_update_buffer(struct coresight_device * csdev,struct perf_output_handle * handle,void * sink_config)367 static unsigned long smb_update_buffer(struct coresight_device *csdev,
368 struct perf_output_handle *handle,
369 void *sink_config)
370 {
371 struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
372 struct smb_data_buffer *sdb = &drvdata->sdb;
373 struct cs_buffers *buf = sink_config;
374 unsigned long data_size;
375 bool lost = false;
376
377 if (!buf)
378 return 0;
379
380 guard(raw_spinlock)(&drvdata->spinlock);
381
382 /* Don't do anything if another tracer is using this sink. */
383 if (csdev->refcnt != 1)
384 return 0;
385
386 smb_disable_hw(drvdata);
387 smb_update_data_size(drvdata);
388
389 /*
390 * The SMB buffer may be bigger than the space available in the
391 * perf ring buffer (handle->size). If so advance the offset so
392 * that we get the latest trace data.
393 */
394 if (sdb->data_size > handle->size) {
395 smb_update_read_ptr(drvdata, sdb->data_size - handle->size);
396 lost = true;
397 }
398
399 data_size = sdb->data_size;
400 smb_sync_perf_buffer(drvdata, buf, handle->head);
401 if (!buf->snapshot && lost)
402 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
403
404 return data_size;
405 }
406
407 static const struct coresight_ops_sink smb_cs_ops = {
408 .enable = smb_enable,
409 .disable = smb_disable,
410 .alloc_buffer = smb_alloc_buffer,
411 .free_buffer = smb_free_buffer,
412 .update_buffer = smb_update_buffer,
413 };
414
415 static const struct coresight_ops cs_ops = {
416 .sink_ops = &smb_cs_ops,
417 };
418
smb_init_data_buffer(struct platform_device * pdev,struct smb_data_buffer * sdb)419 static int smb_init_data_buffer(struct platform_device *pdev,
420 struct smb_data_buffer *sdb)
421 {
422 struct resource *res;
423 void *base;
424
425 res = platform_get_resource(pdev, IORESOURCE_MEM, SMB_BUF_ADDR_RES);
426 if (!res) {
427 dev_err(&pdev->dev, "SMB device failed to get resource\n");
428 return -EINVAL;
429 }
430
431 sdb->buf_rdptr = 0;
432 sdb->buf_hw_base = FIELD_GET(SMB_BUF_ADDR_LO_MSK, res->start);
433 sdb->buf_size = resource_size(res);
434 if (sdb->buf_size == 0)
435 return -EINVAL;
436
437 /*
438 * This is a chunk of memory, use classic mapping with better
439 * performance.
440 */
441 base = devm_memremap(&pdev->dev, sdb->buf_hw_base, sdb->buf_size,
442 MEMREMAP_WB);
443 if (IS_ERR(base))
444 return PTR_ERR(base);
445
446 sdb->buf_base = base;
447
448 return 0;
449 }
450
smb_init_hw(struct smb_drv_data * drvdata)451 static void smb_init_hw(struct smb_drv_data *drvdata)
452 {
453 smb_disable_hw(drvdata);
454
455 writel(SMB_LB_CFG_LO_DEFAULT, drvdata->base + SMB_LB_CFG_LO_REG);
456 writel(SMB_LB_CFG_HI_DEFAULT, drvdata->base + SMB_LB_CFG_HI_REG);
457 writel(SMB_GLB_CFG_DEFAULT, drvdata->base + SMB_GLB_CFG_REG);
458 writel(SMB_GLB_INT_CFG, drvdata->base + SMB_GLB_INT_REG);
459 writel(SMB_LB_INT_CTRL_CFG, drvdata->base + SMB_LB_INT_CTRL_REG);
460 }
461
smb_register_sink(struct platform_device * pdev,struct smb_drv_data * drvdata)462 static int smb_register_sink(struct platform_device *pdev,
463 struct smb_drv_data *drvdata)
464 {
465 struct coresight_platform_data *pdata = NULL;
466 struct coresight_desc desc = { 0 };
467 int ret;
468
469 pdata = coresight_get_platform_data(&pdev->dev);
470 if (IS_ERR(pdata))
471 return PTR_ERR(pdata);
472
473 desc.type = CORESIGHT_DEV_TYPE_SINK;
474 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
475 desc.ops = &cs_ops;
476 desc.pdata = pdata;
477 desc.dev = &pdev->dev;
478 desc.groups = smb_sink_groups;
479 desc.name = coresight_alloc_device_name("ultra_smb", &pdev->dev);
480 if (!desc.name) {
481 dev_err(&pdev->dev, "Failed to alloc coresight device name");
482 return -ENOMEM;
483 }
484 desc.access = CSDEV_ACCESS_IOMEM(drvdata->base);
485
486 drvdata->csdev = coresight_register(&desc);
487 if (IS_ERR(drvdata->csdev))
488 return PTR_ERR(drvdata->csdev);
489
490 drvdata->miscdev.name = desc.name;
491 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
492 drvdata->miscdev.fops = &smb_fops;
493 ret = misc_register(&drvdata->miscdev);
494 if (ret) {
495 coresight_unregister(drvdata->csdev);
496 dev_err(&pdev->dev, "Failed to register misc, ret=%d\n", ret);
497 }
498
499 return ret;
500 }
501
smb_unregister_sink(struct smb_drv_data * drvdata)502 static void smb_unregister_sink(struct smb_drv_data *drvdata)
503 {
504 misc_deregister(&drvdata->miscdev);
505 coresight_unregister(drvdata->csdev);
506 }
507
smb_config_inport(struct device * dev,bool enable)508 static int smb_config_inport(struct device *dev, bool enable)
509 {
510 u64 func = enable ? 1 : 0;
511 union acpi_object *obj;
512 guid_t guid;
513 u64 rev = 0;
514
515 /*
516 * Using DSM calls to enable/disable ultrasoc hardwares on
517 * tracing path, to prevent ultrasoc packet format being exposed.
518 */
519 if (guid_parse(ULTRASOC_SMB_DSM_UUID, &guid)) {
520 dev_err(dev, "Get GUID failed\n");
521 return -EINVAL;
522 }
523
524 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, rev, func, NULL);
525 if (!obj) {
526 dev_err(dev, "ACPI handle failed\n");
527 return -ENODEV;
528 }
529
530 ACPI_FREE(obj);
531
532 return 0;
533 }
534
smb_probe(struct platform_device * pdev)535 static int smb_probe(struct platform_device *pdev)
536 {
537 struct device *dev = &pdev->dev;
538 struct smb_drv_data *drvdata;
539 int ret;
540
541 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
542 if (!drvdata)
543 return -ENOMEM;
544
545 drvdata->base = devm_platform_ioremap_resource(pdev, SMB_REG_ADDR_RES);
546 if (IS_ERR(drvdata->base)) {
547 dev_err(dev, "Failed to ioremap resource\n");
548 return PTR_ERR(drvdata->base);
549 }
550
551 smb_init_hw(drvdata);
552
553 ret = smb_init_data_buffer(pdev, &drvdata->sdb);
554 if (ret) {
555 dev_err(dev, "Failed to init buffer, ret = %d\n", ret);
556 return ret;
557 }
558
559 ret = smb_config_inport(dev, true);
560 if (ret)
561 return ret;
562
563 smb_reset_buffer(drvdata);
564 platform_set_drvdata(pdev, drvdata);
565 raw_spin_lock_init(&drvdata->spinlock);
566 drvdata->pid = -1;
567
568 ret = smb_register_sink(pdev, drvdata);
569 if (ret) {
570 smb_config_inport(&pdev->dev, false);
571 dev_err(dev, "Failed to register SMB sink\n");
572 return ret;
573 }
574
575 return 0;
576 }
577
smb_remove(struct platform_device * pdev)578 static void smb_remove(struct platform_device *pdev)
579 {
580 struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
581
582 smb_unregister_sink(drvdata);
583
584 smb_config_inport(&pdev->dev, false);
585 }
586
587 #ifdef CONFIG_ACPI
588 static const struct acpi_device_id ultrasoc_smb_acpi_match[] = {
589 {"HISI03A1", 0, 0, 0},
590 {}
591 };
592 MODULE_DEVICE_TABLE(acpi, ultrasoc_smb_acpi_match);
593 #endif
594
595 static struct platform_driver smb_driver = {
596 .driver = {
597 .name = "ultrasoc-smb",
598 .acpi_match_table = ACPI_PTR(ultrasoc_smb_acpi_match),
599 .suppress_bind_attrs = true,
600 },
601 .probe = smb_probe,
602 .remove = smb_remove,
603 };
604 module_platform_driver(smb_driver);
605
606 MODULE_DESCRIPTION("UltraSoc SMB CoreSight driver");
607 MODULE_LICENSE("Dual MIT/GPL");
608 MODULE_AUTHOR("Jonathan Zhou <jonathan.zhouwen@huawei.com>");
609 MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
610