xref: /linux/drivers/iommu/amd/debugfs.c (revision 30e0ff6d6a83586486674c343db5e9d933bd92e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AMD IOMMU driver
4  *
5  * Copyright (C) 2018 Advanced Micro Devices, Inc.
6  *
7  * Author: Gary R Hook <gary.hook@amd.com>
8  */
9 
10 #include <linux/debugfs.h>
11 #include <linux/pci.h>
12 
13 #include "amd_iommu.h"
14 #include "../irq_remapping.h"
15 
16 static struct dentry *amd_iommu_debugfs;
17 
18 #define	MAX_NAME_LEN	20
19 #define	OFS_IN_SZ	8
20 #define	DEVID_IN_SZ	16
21 
22 static int sbdf = -1;
23 
iommu_mmio_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)24 static ssize_t iommu_mmio_write(struct file *filp, const char __user *ubuf,
25 				size_t cnt, loff_t *ppos)
26 {
27 	struct seq_file *m = filp->private_data;
28 	struct amd_iommu *iommu = m->private;
29 	int ret, dbg_mmio_offset = iommu->dbg_mmio_offset = -1;
30 
31 	if (cnt > OFS_IN_SZ)
32 		return -EINVAL;
33 
34 	ret = kstrtos32_from_user(ubuf, cnt, 0, &dbg_mmio_offset);
35 	if (ret)
36 		return ret;
37 
38 	if (dbg_mmio_offset < 0 || dbg_mmio_offset >
39 			iommu->mmio_phys_end - sizeof(u64))
40 		return -EINVAL;
41 
42 	iommu->dbg_mmio_offset = dbg_mmio_offset;
43 	return cnt;
44 }
45 
iommu_mmio_show(struct seq_file * m,void * unused)46 static int iommu_mmio_show(struct seq_file *m, void *unused)
47 {
48 	struct amd_iommu *iommu = m->private;
49 	u64 value;
50 	int dbg_mmio_offset = iommu->dbg_mmio_offset;
51 
52 	if (dbg_mmio_offset < 0 || dbg_mmio_offset >
53 			iommu->mmio_phys_end - sizeof(u64)) {
54 		seq_puts(m, "Please provide mmio register's offset\n");
55 		return 0;
56 	}
57 
58 	value = readq(iommu->mmio_base + dbg_mmio_offset);
59 	seq_printf(m, "Offset:0x%x Value:0x%016llx\n", dbg_mmio_offset, value);
60 
61 	return 0;
62 }
63 DEFINE_SHOW_STORE_ATTRIBUTE(iommu_mmio);
64 
iommu_capability_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)65 static ssize_t iommu_capability_write(struct file *filp, const char __user *ubuf,
66 				      size_t cnt, loff_t *ppos)
67 {
68 	struct seq_file *m = filp->private_data;
69 	struct amd_iommu *iommu = m->private;
70 	int ret, dbg_cap_offset = iommu->dbg_cap_offset = -1;
71 
72 	if (cnt > OFS_IN_SZ)
73 		return -EINVAL;
74 
75 	ret = kstrtos32_from_user(ubuf, cnt, 0, &dbg_cap_offset);
76 	if (ret)
77 		return ret;
78 
79 	/* Capability register at offset 0x14 is the last IOMMU capability register. */
80 	if (dbg_cap_offset < 0 || dbg_cap_offset > 0x14)
81 		return -EINVAL;
82 
83 	iommu->dbg_cap_offset = dbg_cap_offset;
84 	return cnt;
85 }
86 
iommu_capability_show(struct seq_file * m,void * unused)87 static int iommu_capability_show(struct seq_file *m, void *unused)
88 {
89 	struct amd_iommu *iommu = m->private;
90 	u32 value;
91 	int err, dbg_cap_offset = iommu->dbg_cap_offset;
92 
93 	if (dbg_cap_offset < 0 || dbg_cap_offset > 0x14) {
94 		seq_puts(m, "Please provide capability register's offset in the range [0x00 - 0x14]\n");
95 		return 0;
96 	}
97 
98 	err = pci_read_config_dword(iommu->dev, iommu->cap_ptr + dbg_cap_offset, &value);
99 	if (err) {
100 		seq_printf(m, "Not able to read capability register at 0x%x\n",
101 			   dbg_cap_offset);
102 		return 0;
103 	}
104 
105 	seq_printf(m, "Offset:0x%x Value:0x%08x\n", dbg_cap_offset, value);
106 
107 	return 0;
108 }
109 DEFINE_SHOW_STORE_ATTRIBUTE(iommu_capability);
110 
iommu_cmdbuf_show(struct seq_file * m,void * unused)111 static int iommu_cmdbuf_show(struct seq_file *m, void *unused)
112 {
113 	struct amd_iommu *iommu = m->private;
114 	struct iommu_cmd *cmd;
115 	unsigned long flag;
116 	u32 head, tail;
117 	int i;
118 
119 	raw_spin_lock_irqsave(&iommu->lock, flag);
120 	head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
121 	tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
122 	seq_printf(m, "CMD Buffer Head Offset:%d Tail Offset:%d\n",
123 		   (head >> 4) & 0x7fff, (tail >> 4) & 0x7fff);
124 	for (i = 0; i < CMD_BUFFER_ENTRIES; i++) {
125 		cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd));
126 		seq_printf(m, "%3d: %08x %08x %08x %08x\n", i, cmd->data[0],
127 			   cmd->data[1], cmd->data[2], cmd->data[3]);
128 	}
129 	raw_spin_unlock_irqrestore(&iommu->lock, flag);
130 
131 	return 0;
132 }
133 DEFINE_SHOW_ATTRIBUTE(iommu_cmdbuf);
134 
devid_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)135 static ssize_t devid_write(struct file *filp, const char __user *ubuf,
136 			   size_t cnt, loff_t *ppos)
137 {
138 	struct amd_iommu_pci_seg *pci_seg;
139 	int seg, bus, slot, func;
140 	struct amd_iommu *iommu;
141 	char *srcid_ptr;
142 	u16 devid;
143 	int i;
144 
145 	sbdf = -1;
146 
147 	if (cnt >= DEVID_IN_SZ)
148 		return -EINVAL;
149 
150 	srcid_ptr = memdup_user_nul(ubuf, cnt);
151 	if (IS_ERR(srcid_ptr))
152 		return PTR_ERR(srcid_ptr);
153 
154 	i = sscanf(srcid_ptr, "%x:%x:%x.%x", &seg, &bus, &slot, &func);
155 	if (i != 4) {
156 		i = sscanf(srcid_ptr, "%x:%x.%x", &bus, &slot, &func);
157 		if (i != 3) {
158 			kfree(srcid_ptr);
159 			return -EINVAL;
160 		}
161 		seg = 0;
162 	}
163 
164 	devid = PCI_DEVID(bus, PCI_DEVFN(slot, func));
165 
166 	/* Check if user device id input is a valid input */
167 	for_each_pci_segment(pci_seg) {
168 		if (pci_seg->id != seg)
169 			continue;
170 		if (devid > pci_seg->last_bdf) {
171 			kfree(srcid_ptr);
172 			return -EINVAL;
173 		}
174 		iommu = pci_seg->rlookup_table[devid];
175 		if (!iommu) {
176 			kfree(srcid_ptr);
177 			return -ENODEV;
178 		}
179 		break;
180 	}
181 
182 	if (pci_seg->id != seg) {
183 		kfree(srcid_ptr);
184 		return -EINVAL;
185 	}
186 
187 	sbdf = PCI_SEG_DEVID_TO_SBDF(seg, devid);
188 
189 	kfree(srcid_ptr);
190 
191 	return cnt;
192 }
193 
devid_show(struct seq_file * m,void * unused)194 static int devid_show(struct seq_file *m, void *unused)
195 {
196 	u16 devid;
197 	int sbdf_shadow = sbdf;
198 
199 	if (sbdf_shadow >= 0) {
200 		devid = PCI_SBDF_TO_DEVID(sbdf_shadow);
201 		seq_printf(m, "%04x:%02x:%02x.%x\n", PCI_SBDF_TO_SEGID(sbdf_shadow),
202 			   PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
203 	} else
204 		seq_puts(m, "No or Invalid input provided\n");
205 
206 	return 0;
207 }
208 DEFINE_SHOW_STORE_ATTRIBUTE(devid);
209 
dump_dte(struct seq_file * m,struct amd_iommu_pci_seg * pci_seg,u16 devid)210 static void dump_dte(struct seq_file *m, struct amd_iommu_pci_seg *pci_seg, u16 devid)
211 {
212 	struct dev_table_entry *dev_table;
213 	struct amd_iommu *iommu;
214 
215 	iommu = pci_seg->rlookup_table[devid];
216 	if (!iommu)
217 		return;
218 
219 	dev_table = get_dev_table(iommu);
220 	if (!dev_table) {
221 		seq_puts(m, "Device table not found");
222 		return;
223 	}
224 
225 	seq_printf(m, "%-12s %16s %16s %16s %16s iommu\n", "DeviceId",
226 		   "QWORD[3]", "QWORD[2]", "QWORD[1]", "QWORD[0]");
227 	seq_printf(m, "%04x:%02x:%02x.%x ", pci_seg->id, PCI_BUS_NUM(devid),
228 		   PCI_SLOT(devid), PCI_FUNC(devid));
229 	for (int i = 3; i >= 0; --i)
230 		seq_printf(m, "%016llx ", dev_table[devid].data[i]);
231 	seq_printf(m, "iommu%d\n", iommu->index);
232 }
233 
iommu_devtbl_show(struct seq_file * m,void * unused)234 static int iommu_devtbl_show(struct seq_file *m, void *unused)
235 {
236 	struct amd_iommu_pci_seg *pci_seg;
237 	u16 seg, devid;
238 	int sbdf_shadow = sbdf;
239 
240 	if (sbdf_shadow < 0) {
241 		seq_puts(m, "Enter a valid device ID to 'devid' file\n");
242 		return 0;
243 	}
244 	seg = PCI_SBDF_TO_SEGID(sbdf_shadow);
245 	devid = PCI_SBDF_TO_DEVID(sbdf_shadow);
246 
247 	for_each_pci_segment(pci_seg) {
248 		if (pci_seg->id != seg)
249 			continue;
250 		dump_dte(m, pci_seg, devid);
251 		break;
252 	}
253 
254 	return 0;
255 }
256 DEFINE_SHOW_ATTRIBUTE(iommu_devtbl);
257 
dump_128_irte(struct seq_file * m,struct irq_remap_table * table,u16 int_tab_len)258 static void dump_128_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
259 {
260 	struct irte_ga *ptr, *irte;
261 	int index;
262 
263 	for (index = 0; index < int_tab_len; index++) {
264 		ptr = (struct irte_ga *)table->table;
265 		irte = &ptr[index];
266 
267 		if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
268 		    !irte->lo.fields_vapic.valid)
269 			continue;
270 		else if (!irte->lo.fields_remap.valid)
271 			continue;
272 		seq_printf(m, "IRT[%04d] %016llx %016llx\n", index, irte->hi.val, irte->lo.val);
273 	}
274 }
275 
dump_32_irte(struct seq_file * m,struct irq_remap_table * table,u16 int_tab_len)276 static void dump_32_irte(struct seq_file *m, struct irq_remap_table *table, u16 int_tab_len)
277 {
278 	union irte *ptr, *irte;
279 	int index;
280 
281 	for (index = 0; index < int_tab_len; index++) {
282 		ptr = (union irte *)table->table;
283 		irte = &ptr[index];
284 
285 		if (!irte->fields.valid)
286 			continue;
287 		seq_printf(m, "IRT[%04d] %08x\n", index, irte->val);
288 	}
289 }
290 
dump_irte(struct seq_file * m,u16 devid,struct amd_iommu_pci_seg * pci_seg)291 static void dump_irte(struct seq_file *m, u16 devid, struct amd_iommu_pci_seg *pci_seg)
292 {
293 	struct dev_table_entry *dev_table;
294 	struct irq_remap_table *table;
295 	struct amd_iommu *iommu;
296 	unsigned long flags;
297 	u16 int_tab_len;
298 
299 	table = pci_seg->irq_lookup_table[devid];
300 	if (!table) {
301 		seq_printf(m, "IRQ lookup table not set for %04x:%02x:%02x:%x\n",
302 			   pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid));
303 		return;
304 	}
305 
306 	iommu = pci_seg->rlookup_table[devid];
307 	if (!iommu)
308 		return;
309 
310 	dev_table = get_dev_table(iommu);
311 	if (!dev_table) {
312 		seq_puts(m, "Device table not found");
313 		return;
314 	}
315 
316 	int_tab_len = dev_table[devid].data[2] & DTE_INTTABLEN_MASK;
317 	if (int_tab_len != DTE_INTTABLEN_512 && int_tab_len != DTE_INTTABLEN_2K) {
318 		seq_puts(m, "The device's DTE contains an invalid IRT length value.");
319 		return;
320 	}
321 
322 	seq_printf(m, "DeviceId %04x:%02x:%02x.%x\n", pci_seg->id, PCI_BUS_NUM(devid),
323 		   PCI_SLOT(devid), PCI_FUNC(devid));
324 
325 	raw_spin_lock_irqsave(&table->lock, flags);
326 	if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
327 		dump_128_irte(m, table, BIT(int_tab_len >> 1));
328 	else
329 		dump_32_irte(m, table, BIT(int_tab_len >> 1));
330 	seq_puts(m, "\n");
331 	raw_spin_unlock_irqrestore(&table->lock, flags);
332 }
333 
iommu_irqtbl_show(struct seq_file * m,void * unused)334 static int iommu_irqtbl_show(struct seq_file *m, void *unused)
335 {
336 	struct amd_iommu_pci_seg *pci_seg;
337 	u16 devid, seg;
338 	int sbdf_shadow = sbdf;
339 
340 	if (!irq_remapping_enabled) {
341 		seq_puts(m, "Interrupt remapping is disabled\n");
342 		return 0;
343 	}
344 
345 	if (sbdf_shadow < 0) {
346 		seq_puts(m, "Enter a valid device ID to 'devid' file\n");
347 		return 0;
348 	}
349 
350 	seg = PCI_SBDF_TO_SEGID(sbdf_shadow);
351 	devid = PCI_SBDF_TO_DEVID(sbdf_shadow);
352 
353 	for_each_pci_segment(pci_seg) {
354 		if (pci_seg->id != seg)
355 			continue;
356 		dump_irte(m, devid, pci_seg);
357 		break;
358 	}
359 
360 	return 0;
361 }
362 DEFINE_SHOW_ATTRIBUTE(iommu_irqtbl);
363 
amd_iommu_debugfs_setup(void)364 void amd_iommu_debugfs_setup(void)
365 {
366 	struct amd_iommu *iommu;
367 	char name[MAX_NAME_LEN + 1];
368 
369 	amd_iommu_debugfs = debugfs_create_dir("amd", iommu_debugfs_dir);
370 
371 	for_each_iommu(iommu) {
372 		iommu->dbg_mmio_offset = -1;
373 		iommu->dbg_cap_offset = -1;
374 
375 		snprintf(name, MAX_NAME_LEN, "iommu%02d", iommu->index);
376 		iommu->debugfs = debugfs_create_dir(name, amd_iommu_debugfs);
377 
378 		debugfs_create_file("mmio", 0644, iommu->debugfs, iommu,
379 				    &iommu_mmio_fops);
380 		debugfs_create_file("capability", 0644, iommu->debugfs, iommu,
381 				    &iommu_capability_fops);
382 		debugfs_create_file("cmdbuf", 0444, iommu->debugfs, iommu,
383 				    &iommu_cmdbuf_fops);
384 	}
385 
386 	debugfs_create_file("devid", 0644, amd_iommu_debugfs, NULL,
387 			    &devid_fops);
388 	debugfs_create_file("devtbl", 0444, amd_iommu_debugfs, NULL,
389 			    &iommu_devtbl_fops);
390 	debugfs_create_file("irqtbl", 0444, amd_iommu_debugfs, NULL,
391 			    &iommu_irqtbl_fops);
392 }
393