1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * AMD IOMMU driver
4 *
5 * Copyright (C) 2018 Advanced Micro Devices, Inc.
6 *
7 * Author: Gary R Hook <gary.hook@amd.com>
8 */
9
10 #include <linux/debugfs.h>
11 #include <linux/pci.h>
12
13 #include "amd_iommu.h"
14 #include "../irq_remapping.h"
15
16 static struct dentry *amd_iommu_debugfs;
17
18 #define MAX_NAME_LEN 20
19 #define OFS_IN_SZ 8
20 #define DEVID_IN_SZ 16
21
22 static int sbdf = -1;
23
iommu_mmio_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)24 static ssize_t iommu_mmio_write(struct file *filp, const char __user *ubuf,
25 size_t cnt, loff_t *ppos)
26 {
27 struct seq_file *m = filp->private_data;
28 struct amd_iommu *iommu = m->private;
29 int ret;
30
31 iommu->dbg_mmio_offset = -1;
32
33 if (cnt > OFS_IN_SZ)
34 return -EINVAL;
35
36 ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_mmio_offset);
37 if (ret)
38 return ret;
39
40 if (iommu->dbg_mmio_offset > iommu->mmio_phys_end - 4) {
41 iommu->dbg_mmio_offset = -1;
42 return -EINVAL;
43 }
44
45 return cnt;
46 }
47
iommu_mmio_show(struct seq_file * m,void * unused)48 static int iommu_mmio_show(struct seq_file *m, void *unused)
49 {
50 struct amd_iommu *iommu = m->private;
51 u64 value;
52
53 if (iommu->dbg_mmio_offset < 0) {
54 seq_puts(m, "Please provide mmio register's offset\n");
55 return 0;
56 }
57
58 value = readq(iommu->mmio_base + iommu->dbg_mmio_offset);
59 seq_printf(m, "Offset:0x%x Value:0x%016llx\n", iommu->dbg_mmio_offset, value);
60
61 return 0;
62 }
63 DEFINE_SHOW_STORE_ATTRIBUTE(iommu_mmio);
64
iommu_capability_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)65 static ssize_t iommu_capability_write(struct file *filp, const char __user *ubuf,
66 size_t cnt, loff_t *ppos)
67 {
68 struct seq_file *m = filp->private_data;
69 struct amd_iommu *iommu = m->private;
70 int ret;
71
72 iommu->dbg_cap_offset = -1;
73
74 if (cnt > OFS_IN_SZ)
75 return -EINVAL;
76
77 ret = kstrtou32_from_user(ubuf, cnt, 0, &iommu->dbg_cap_offset);
78 if (ret)
79 return ret;
80
81 /* Capability register at offset 0x14 is the last IOMMU capability register. */
82 if (iommu->dbg_cap_offset > 0x14) {
83 iommu->dbg_cap_offset = -1;
84 return -EINVAL;
85 }
86
87 return cnt;
88 }
89
iommu_capability_show(struct seq_file * m,void * unused)90 static int iommu_capability_show(struct seq_file *m, void *unused)
91 {
92 struct amd_iommu *iommu = m->private;
93 u32 value;
94 int err;
95
96 if (iommu->dbg_cap_offset < 0) {
97 seq_puts(m, "Please provide capability register's offset in the range [0x00 - 0x14]\n");
98 return 0;
99 }
100
101 err = pci_read_config_dword(iommu->dev, iommu->cap_ptr + iommu->dbg_cap_offset, &value);
102 if (err) {
103 seq_printf(m, "Not able to read capability register at 0x%x\n",
104 iommu->dbg_cap_offset);
105 return 0;
106 }
107
108 seq_printf(m, "Offset:0x%x Value:0x%08x\n", iommu->dbg_cap_offset, value);
109
110 return 0;
111 }
112 DEFINE_SHOW_STORE_ATTRIBUTE(iommu_capability);
113
iommu_cmdbuf_show(struct seq_file * m,void * unused)114 static int iommu_cmdbuf_show(struct seq_file *m, void *unused)
115 {
116 struct amd_iommu *iommu = m->private;
117 struct iommu_cmd *cmd;
118 unsigned long flag;
119 u32 head, tail;
120 int i;
121
122 raw_spin_lock_irqsave(&iommu->lock, flag);
123 head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
124 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
125 seq_printf(m, "CMD Buffer Head Offset:%d Tail Offset:%d\n",
126 (head >> 4) & 0x7fff, (tail >> 4) & 0x7fff);
127 for (i = 0; i < CMD_BUFFER_ENTRIES; i++) {
128 cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd));
129 seq_printf(m, "%3d: %08x %08x %08x %08x\n", i, cmd->data[0],
130 cmd->data[1], cmd->data[2], cmd->data[3]);
131 }
132 raw_spin_unlock_irqrestore(&iommu->lock, flag);
133
134 return 0;
135 }
136 DEFINE_SHOW_ATTRIBUTE(iommu_cmdbuf);
137
devid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)138 static ssize_t devid_write(struct file *filp, const char __user *ubuf,
139 size_t cnt, loff_t *ppos)
140 {
141 struct amd_iommu_pci_seg *pci_seg;
142 int seg, bus, slot, func;
143 struct amd_iommu *iommu;
144 char *srcid_ptr;
145 u16 devid;
146 int i;
147
148 sbdf = -1;
149
150 if (cnt >= DEVID_IN_SZ)
151 return -EINVAL;
152
153 srcid_ptr = memdup_user_nul(ubuf, cnt);
154 if (IS_ERR(srcid_ptr))
155 return PTR_ERR(srcid_ptr);
156
157 i = sscanf(srcid_ptr, "%x:%x:%x.%x", &seg, &bus, &slot, &func);
158 if (i != 4) {
159 i = sscanf(srcid_ptr, "%x:%x.%x", &bus, &slot, &func);
160 if (i != 3) {
161 kfree(srcid_ptr);
162 return -EINVAL;
163 }
164 seg = 0;
165 }
166
167 devid = PCI_DEVID(bus, PCI_DEVFN(slot, func));
168
169 /* Check if user device id input is a valid input */
170 for_each_pci_segment(pci_seg) {
171 if (pci_seg->id != seg)
172 continue;
173 if (devid > pci_seg->last_bdf) {
174 kfree(srcid_ptr);
175 return -EINVAL;
176 }
177 iommu = pci_seg->rlookup_table[devid];
178 if (!iommu) {
179 kfree(srcid_ptr);
180 return -ENODEV;
181 }
182 break;
183 }
184
185 if (pci_seg->id != seg) {
186 kfree(srcid_ptr);
187 return -EINVAL;
188 }
189
190 sbdf = PCI_SEG_DEVID_TO_SBDF(seg, devid);
191
192 kfree(srcid_ptr);
193
194 return cnt;
195 }
196
devid_show(struct seq_file * m,void * unused)197 static int devid_show(struct seq_file *m, void *unused)
198 {
199 u16 devid;
200
201 if (sbdf >= 0) {
202 devid = PCI_SBDF_TO_DEVID(sbdf);
203 seq_printf(m, "%04x:%02x:%02x.%x\n", PCI_SBDF_TO_SEGID(sbdf),
204 PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
205 } else
206 seq_puts(m, "No or Invalid input provided\n");
207
208 return 0;
209 }
210 DEFINE_SHOW_STORE_ATTRIBUTE(devid);
211
dump_dte(struct seq_file * m,struct amd_iommu_pci_seg * pci_seg,u16 devid)212 static void dump_dte(struct seq_file *m, struct amd_iommu_pci_seg *pci_seg, u16 devid)
213 {
214 struct dev_table_entry *dev_table;
215 struct amd_iommu *iommu;
216
217 iommu = pci_seg->rlookup_table[devid];
218 if (!iommu)
219 return;
220
221 dev_table = get_dev_table(iommu);
222 if (!dev_table) {
223 seq_puts(m, "Device table not found");
224 return;
225 }
226
227 seq_printf(m, "%-12s %16s %16s %16s %16s iommu\n", "DeviceId",
228 "QWORD[3]", "QWORD[2]", "QWORD[1]", "QWORD[0]");
229 seq_printf(m, "%04x:%02x:%02x.%x ", pci_seg->id, PCI_BUS_NUM(devid),
230 PCI_SLOT(devid), PCI_FUNC(devid));
231 for (int i = 3; i >= 0; --i)
232 seq_printf(m, "%016llx ", dev_table[devid].data[i]);
233 seq_printf(m, "iommu%d\n", iommu->index);
234 }
235
iommu_devtbl_show(struct seq_file * m,void * unused)236 static int iommu_devtbl_show(struct seq_file *m, void *unused)
237 {
238 struct amd_iommu_pci_seg *pci_seg;
239 u16 seg, devid;
240
241 if (sbdf < 0) {
242 seq_puts(m, "Enter a valid device ID to 'devid' file\n");
243 return 0;
244 }
245 seg = PCI_SBDF_TO_SEGID(sbdf);
246 devid = PCI_SBDF_TO_DEVID(sbdf);
247
248 for_each_pci_segment(pci_seg) {
249 if (pci_seg->id != seg)
250 continue;
251 dump_dte(m, pci_seg, devid);
252 break;
253 }
254
255 return 0;
256 }
257 DEFINE_SHOW_ATTRIBUTE(iommu_devtbl);
258
dump_128_irte(struct seq_file * m,struct irq_remap_table * table,u16 int_tab_len)259 static void dump_128_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
260 {
261 struct irte_ga *ptr, *irte;
262 int index;
263
264 for (index = 0; index < int_tab_len; index++) {
265 ptr = (struct irte_ga *)table->table;
266 irte = &ptr[index];
267
268 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
269 !irte->lo.fields_vapic.valid)
270 continue;
271 else if (!irte->lo.fields_remap.valid)
272 continue;
273 seq_printf(m, "IRT[%04d] %016llx %016llx\n", index, irte->hi.val, irte->lo.val);
274 }
275 }
276
dump_32_irte(struct seq_file * m,struct irq_remap_table * table,u16 int_tab_len)277 static void dump_32_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
278 {
279 union irte *ptr, *irte;
280 int index;
281
282 for (index = 0; index < int_tab_len; index++) {
283 ptr = (union irte *)table->table;
284 irte = &ptr[index];
285
286 if (!irte->fields.valid)
287 continue;
288 seq_printf(m, "IRT[%04d] %08x\n", index, irte->val);
289 }
290 }
291
dump_irte(struct seq_file * m,u16 devid,struct amd_iommu_pci_seg * pci_seg)292 static void dump_irte(struct seq_file *m, u16 devid, struct amd_iommu_pci_seg *pci_seg)
293 {
294 struct dev_table_entry *dev_table;
295 struct irq_remap_table *table;
296 struct amd_iommu *iommu;
297 unsigned long flags;
298 u16 int_tab_len;
299
300 table = pci_seg->irq_lookup_table[devid];
301 if (!table) {
302 seq_printf(m, "IRQ lookup table not set for %04x:%02x:%02x:%x\n",
303 pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
304 return;
305 }
306
307 iommu = pci_seg->rlookup_table[devid];
308 if (!iommu)
309 return;
310
311 dev_table = get_dev_table(iommu);
312 if (!dev_table) {
313 seq_puts(m, "Device table not found");
314 return;
315 }
316
317 int_tab_len = dev_table[devid].data[2] & DTE_INTTABLEN_MASK;
318 if (int_tab_len != DTE_INTTABLEN_512 && int_tab_len != DTE_INTTABLEN_2K) {
319 seq_puts(m, "The device's DTE contains an invalid IRT length value.");
320 return;
321 }
322
323 seq_printf(m, "DeviceId %04x:%02x:%02x.%x\n", pci_seg->id, PCI_BUS_NUM(devid),
324 PCI_SLOT(devid), PCI_FUNC(devid));
325
326 raw_spin_lock_irqsave(&table->lock, flags);
327 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
328 dump_128_irte(m, table, BIT(int_tab_len >> 1));
329 else
330 dump_32_irte(m, table, BIT(int_tab_len >> 1));
331 seq_puts(m, "\n");
332 raw_spin_unlock_irqrestore(&table->lock, flags);
333 }
334
iommu_irqtbl_show(struct seq_file * m,void * unused)335 static int iommu_irqtbl_show(struct seq_file *m, void *unused)
336 {
337 struct amd_iommu_pci_seg *pci_seg;
338 u16 devid, seg;
339
340 if (!irq_remapping_enabled) {
341 seq_puts(m, "Interrupt remapping is disabled\n");
342 return 0;
343 }
344
345 if (sbdf < 0) {
346 seq_puts(m, "Enter a valid device ID to 'devid' file\n");
347 return 0;
348 }
349
350 seg = PCI_SBDF_TO_SEGID(sbdf);
351 devid = PCI_SBDF_TO_DEVID(sbdf);
352
353 for_each_pci_segment(pci_seg) {
354 if (pci_seg->id != seg)
355 continue;
356 dump_irte(m, devid, pci_seg);
357 break;
358 }
359
360 return 0;
361 }
362 DEFINE_SHOW_ATTRIBUTE(iommu_irqtbl);
363
amd_iommu_debugfs_setup(void)364 void amd_iommu_debugfs_setup(void)
365 {
366 struct amd_iommu *iommu;
367 char name[MAX_NAME_LEN + 1];
368
369 amd_iommu_debugfs = debugfs_create_dir("amd", iommu_debugfs_dir);
370
371 for_each_iommu(iommu) {
372 iommu->dbg_mmio_offset = -1;
373 iommu->dbg_cap_offset = -1;
374
375 snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
376 iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
377
378 debugfs_create_file("mmio", 0644, iommu->debugfs, iommu,
379 &iommu_mmio_fops);
380 debugfs_create_file("capability", 0644, iommu->debugfs, iommu,
381 &iommu_capability_fops);
382 debugfs_create_file("cmdbuf", 0444, iommu->debugfs, iommu,
383 &iommu_cmdbuf_fops);
384 }
385
386 debugfs_create_file("devid", 0644, amd_iommu_debugfs, NULL,
387 &devid_fops);
388 debugfs_create_file("devtbl", 0444, amd_iommu_debugfs, NULL,
389 &iommu_devtbl_fops);
390 debugfs_create_file("irqtbl", 0444, amd_iommu_debugfs, NULL,
391 &iommu_irqtbl_fops);
392 }
393