1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Passthrough DMA device driver 4 * -- Based on the CCP driver 5 * 6 * Copyright (C) 2016,2021 Advanced Micro Devices, Inc. 7 * 8 * Author: Sanjay R Mehta <sanju.mehta@amd.com> 9 * Author: Gary R Hook <gary.hook@amd.com> 10 */ 11 12 #include <linux/debugfs.h> 13 #include <linux/seq_file.h> 14 15 #include "ptdma.h" 16 #include "../ae4dma/ae4dma.h" 17 18 /* DebugFS helpers */ 19 #define RI_VERSION_NUM 0x0000003F 20 21 #define RI_NUM_VQM 0x00078000 22 #define RI_NVQM_SHIFT 15 23 24 static int pt_debugfs_info_show(struct seq_file *s, void *p) 25 { 26 struct pt_device *pt = s->private; 27 struct ae4_device *ae4; 28 unsigned int regval; 29 30 seq_printf(s, "Device name: %s\n", dev_name(pt->dev)); 31 32 if (pt->ver == AE4_DMA_VERSION) { 33 ae4 = container_of(pt, struct ae4_device, pt); 34 seq_printf(s, " # Queues: %d\n", ae4->cmd_q_count); 35 seq_printf(s, " # Cmds per queue: %d\n", CMD_Q_LEN); 36 } else { 37 seq_printf(s, " # Queues: %d\n", 1); 38 seq_printf(s, " # Cmds: %d\n", pt->cmd_count); 39 } 40 41 regval = ioread32(pt->io_regs + CMD_PT_VERSION); 42 43 seq_printf(s, " Version: %d\n", regval & RI_VERSION_NUM); 44 seq_puts(s, " Engines:"); 45 seq_puts(s, "\n"); 46 seq_printf(s, " Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT); 47 48 return 0; 49 } 50 51 /* 52 * Return a formatted buffer containing the current 53 * statistics of queue for PTDMA 54 */ 55 static int pt_debugfs_stats_show(struct seq_file *s, void *p) 56 { 57 struct pt_device *pt = s->private; 58 59 seq_printf(s, "Total Interrupts Handled: %ld\n", pt->total_interrupts); 60 61 return 0; 62 } 63 64 static int pt_debugfs_queue_show(struct seq_file *s, void *p) 65 { 66 struct pt_cmd_queue *cmd_q = s->private; 67 struct pt_device *pt; 68 unsigned int regval; 69 70 if (!cmd_q) 71 return 0; 72 73 seq_printf(s, " Pass-Thru: %ld\n", cmd_q->total_pt_ops); 74 75 pt = cmd_q->pt; 76 if (pt->ver == AE4_DMA_VERSION) { 77 regval = readl(cmd_q->reg_control + 0x4); 78 seq_printf(s, " Enabled Interrupts:: status 0x%x\n", regval); 79 } else { 80 regval = ioread32(cmd_q->reg_control + 0x000C); 81 82 seq_puts(s, " Enabled Interrupts:"); 83 if (regval & INT_EMPTY_QUEUE) 84 seq_puts(s, " EMPTY"); 85 if (regval & INT_QUEUE_STOPPED) 86 seq_puts(s, " STOPPED"); 87 if (regval & INT_ERROR) 88 seq_puts(s, " ERROR"); 89 if (regval & INT_COMPLETION) 90 seq_puts(s, " COMPLETION"); 91 seq_puts(s, "\n"); 92 } 93 94 return 0; 95 } 96 97 DEFINE_SHOW_ATTRIBUTE(pt_debugfs_info); 98 DEFINE_SHOW_ATTRIBUTE(pt_debugfs_queue); 99 DEFINE_SHOW_ATTRIBUTE(pt_debugfs_stats); 100 101 void ptdma_debugfs_setup(struct pt_device *pt) 102 { 103 struct dentry *debugfs_q_instance; 104 struct ae4_cmd_queue *ae4cmd_q; 105 struct pt_cmd_queue *cmd_q; 106 struct ae4_device *ae4; 107 char name[30]; 108 int i; 109 110 if (!debugfs_initialized()) 111 return; 112 113 debugfs_create_file("info", 0400, pt->dma_dev.dbg_dev_root, pt, 114 &pt_debugfs_info_fops); 115 116 debugfs_create_file("stats", 0400, pt->dma_dev.dbg_dev_root, pt, 117 &pt_debugfs_stats_fops); 118 119 120 if (pt->ver == AE4_DMA_VERSION) { 121 ae4 = container_of(pt, struct ae4_device, pt); 122 for (i = 0; i < ae4->cmd_q_count; i++) { 123 ae4cmd_q = &ae4->ae4cmd_q[i]; 124 cmd_q = &ae4cmd_q->cmd_q; 125 126 memset(name, 0, sizeof(name)); 127 snprintf(name, 29, "q%d", ae4cmd_q->id); 128 129 debugfs_q_instance = 130 debugfs_create_dir(name, pt->dma_dev.dbg_dev_root); 131 132 debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q, 133 &pt_debugfs_queue_fops); 134 } 135 } else { 136 debugfs_q_instance = 137 debugfs_create_dir("q", pt->dma_dev.dbg_dev_root); 138 cmd_q = &pt->cmd_q; 139 debugfs_create_file("stats", 0400, debugfs_q_instance, cmd_q, 140 &pt_debugfs_queue_fops); 141 } 142 } 143 EXPORT_SYMBOL_GPL(ptdma_debugfs_setup); 144