1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright © 2018 Intel Corporation. 4 * 5 * Authors: Gayatri Kammela <gayatri.kammela@intel.com> 6 * Sohil Mehta <sohil.mehta@intel.com> 7 * Jacob Pan <jacob.jun.pan@linux.intel.com> 8 * Lu Baolu <baolu.lu@linux.intel.com> 9 */ 10 11 #include <linux/debugfs.h> 12 #include <linux/dmar.h> 13 #include <linux/pci.h> 14 15 #include <asm/irq_remapping.h> 16 17 #include "iommu.h" 18 #include "pasid.h" 19 #include "perf.h" 20 21 struct tbl_walk { 22 u16 bus; 23 u16 devfn; 24 u32 pasid; 25 struct root_entry *rt_entry; 26 struct context_entry *ctx_entry; 27 struct pasid_entry *pasid_tbl_entry; 28 }; 29 30 struct iommu_regset { 31 int offset; 32 const char *regs; 33 }; 34 35 #define DEBUG_BUFFER_SIZE 1024 36 static char debug_buf[DEBUG_BUFFER_SIZE]; 37 38 #define IOMMU_REGSET_ENTRY(_reg_) \ 39 { DMAR_##_reg_##_REG, __stringify(_reg_) } 40 41 static const struct iommu_regset iommu_regs_32[] = { 42 IOMMU_REGSET_ENTRY(VER), 43 IOMMU_REGSET_ENTRY(GCMD), 44 IOMMU_REGSET_ENTRY(GSTS), 45 IOMMU_REGSET_ENTRY(FSTS), 46 IOMMU_REGSET_ENTRY(FECTL), 47 IOMMU_REGSET_ENTRY(FEDATA), 48 IOMMU_REGSET_ENTRY(FEADDR), 49 IOMMU_REGSET_ENTRY(FEUADDR), 50 IOMMU_REGSET_ENTRY(PMEN), 51 IOMMU_REGSET_ENTRY(PLMBASE), 52 IOMMU_REGSET_ENTRY(PLMLIMIT), 53 IOMMU_REGSET_ENTRY(ICS), 54 IOMMU_REGSET_ENTRY(PRS), 55 IOMMU_REGSET_ENTRY(PECTL), 56 IOMMU_REGSET_ENTRY(PEDATA), 57 IOMMU_REGSET_ENTRY(PEADDR), 58 IOMMU_REGSET_ENTRY(PEUADDR), 59 }; 60 61 static const struct iommu_regset iommu_regs_64[] = { 62 IOMMU_REGSET_ENTRY(CAP), 63 IOMMU_REGSET_ENTRY(ECAP), 64 IOMMU_REGSET_ENTRY(RTADDR), 65 IOMMU_REGSET_ENTRY(CCMD), 66 IOMMU_REGSET_ENTRY(AFLOG), 67 IOMMU_REGSET_ENTRY(PHMBASE), 68 IOMMU_REGSET_ENTRY(PHMLIMIT), 69 IOMMU_REGSET_ENTRY(IQH), 70 IOMMU_REGSET_ENTRY(IQT), 71 IOMMU_REGSET_ENTRY(IQA), 72 IOMMU_REGSET_ENTRY(IRTA), 73 IOMMU_REGSET_ENTRY(PQH), 74 IOMMU_REGSET_ENTRY(PQT), 75 IOMMU_REGSET_ENTRY(PQA), 76 IOMMU_REGSET_ENTRY(MTRRCAP), 77 IOMMU_REGSET_ENTRY(MTRRDEF), 78 IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000), 79 IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000), 80 IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000), 81 IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000), 82 IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000), 83 IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000), 84 IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000), 85 IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000), 86 IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000), 87 IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000), 88 IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000), 89 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0), 90 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0), 91 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1), 92 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1), 93 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2), 94 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2), 95 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3), 96 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3), 97 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4), 98 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4), 99 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5), 100 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5), 101 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6), 102 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6), 103 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7), 104 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7), 105 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8), 106 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8), 107 IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9), 108 IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9), 109 IOMMU_REGSET_ENTRY(VCCAP), 110 IOMMU_REGSET_ENTRY(VCMD), 111 IOMMU_REGSET_ENTRY(VCRSP), 112 }; 113 114 static struct dentry *intel_iommu_debug; 115 116 static int iommu_regset_show(struct seq_file *m, void *unused) 117 { 118 struct dmar_drhd_unit *drhd; 119 struct intel_iommu *iommu; 120 unsigned long flag; 121 int i, ret = 0; 122 u64 value; 123 124 rcu_read_lock(); 125 for_each_active_iommu(iommu, drhd) { 126 if (!drhd->reg_base_addr) { 127 seq_puts(m, "IOMMU: Invalid base address\n"); 128 ret = -EINVAL; 129 goto out; 130 } 131 132 seq_printf(m, "IOMMU: %s Register Base Address: %llx\n", 133 iommu->name, drhd->reg_base_addr); 134 seq_puts(m, "Name\t\t\tOffset\t\tContents\n"); 135 /* 136 * Publish the contents of the 64-bit hardware registers 137 * by adding the offset to the pointer (virtual address). 138 */ 139 raw_spin_lock_irqsave(&iommu->register_lock, flag); 140 for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) { 141 value = dmar_readl(iommu->reg + iommu_regs_32[i].offset); 142 seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", 143 iommu_regs_32[i].regs, iommu_regs_32[i].offset, 144 value); 145 } 146 for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) { 147 value = dmar_readq(iommu->reg + iommu_regs_64[i].offset); 148 seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n", 149 iommu_regs_64[i].regs, iommu_regs_64[i].offset, 150 value); 151 } 152 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 153 seq_putc(m, '\n'); 154 } 155 out: 156 rcu_read_unlock(); 157 158 return ret; 159 } 160 DEFINE_SHOW_ATTRIBUTE(iommu_regset); 161 162 static inline void print_tbl_walk(struct seq_file *m) 163 { 164 struct tbl_walk *tbl_wlk = m->private; 165 166 seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t", 167 tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn), 168 PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi, 169 tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi, 170 tbl_wlk->ctx_entry->lo); 171 172 /* 173 * A legacy mode DMAR doesn't support PASID, hence default it to -1 174 * indicating that it's invalid. Also, default all PASID related fields 175 * to 0. 176 */ 177 if (!tbl_wlk->pasid_tbl_entry) 178 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1, 179 (u64)0, (u64)0, (u64)0); 180 else 181 seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", 182 tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2], 183 tbl_wlk->pasid_tbl_entry->val[1], 184 tbl_wlk->pasid_tbl_entry->val[0]); 185 } 186 187 static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry, 188 u16 dir_idx) 189 { 190 struct tbl_walk *tbl_wlk = m->private; 191 u8 tbl_idx; 192 193 for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) { 194 if (pasid_pte_is_present(tbl_entry)) { 195 tbl_wlk->pasid_tbl_entry = tbl_entry; 196 tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx; 197 print_tbl_walk(m); 198 } 199 200 tbl_entry++; 201 } 202 } 203 204 static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr, 205 u16 pasid_dir_size) 206 { 207 struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr); 208 struct pasid_entry *pasid_tbl; 209 u16 dir_idx; 210 211 for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) { 212 pasid_tbl = get_pasid_table_from_pde(dir_entry); 213 if (pasid_tbl) 214 pasid_tbl_walk(m, pasid_tbl, dir_idx); 215 216 dir_entry++; 217 } 218 } 219 220 static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus) 221 { 222 struct context_entry *context; 223 u16 devfn, pasid_dir_size; 224 u64 pasid_dir_ptr; 225 226 for (devfn = 0; devfn < 256; devfn++) { 227 struct tbl_walk tbl_wlk = {0}; 228 229 /* 230 * Scalable mode root entry points to upper scalable mode 231 * context table and lower scalable mode context table. Each 232 * scalable mode context table has 128 context entries where as 233 * legacy mode context table has 256 context entries. So in 234 * scalable mode, the context entries for former 128 devices are 235 * in the lower scalable mode context table, while the latter 236 * 128 devices are in the upper scalable mode context table. 237 * In scalable mode, when devfn > 127, iommu_context_addr() 238 * automatically refers to upper scalable mode context table and 239 * hence the caller doesn't have to worry about differences 240 * between scalable mode and non scalable mode. 241 */ 242 context = iommu_context_addr(iommu, bus, devfn, 0); 243 if (!context) 244 return; 245 246 if (!context_present(context)) 247 continue; 248 249 tbl_wlk.bus = bus; 250 tbl_wlk.devfn = devfn; 251 tbl_wlk.rt_entry = &iommu->root_entry[bus]; 252 tbl_wlk.ctx_entry = context; 253 m->private = &tbl_wlk; 254 255 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) { 256 pasid_dir_ptr = context->lo & VTD_PAGE_MASK; 257 pasid_dir_size = get_pasid_dir_size(context); 258 pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size); 259 continue; 260 } 261 262 print_tbl_walk(m); 263 } 264 } 265 266 static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu) 267 { 268 u16 bus; 269 270 spin_lock(&iommu->lock); 271 seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name, 272 (u64)virt_to_phys(iommu->root_entry)); 273 seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n"); 274 275 /* 276 * No need to check if the root entry is present or not because 277 * iommu_context_addr() performs the same check before returning 278 * context entry. 279 */ 280 for (bus = 0; bus < 256; bus++) 281 ctx_tbl_walk(m, iommu, bus); 282 spin_unlock(&iommu->lock); 283 } 284 285 static int dmar_translation_struct_show(struct seq_file *m, void *unused) 286 { 287 struct dmar_drhd_unit *drhd; 288 struct intel_iommu *iommu; 289 u32 sts; 290 291 rcu_read_lock(); 292 for_each_active_iommu(iommu, drhd) { 293 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); 294 if (!(sts & DMA_GSTS_TES)) { 295 seq_printf(m, "DMA Remapping is not enabled on %s\n", 296 iommu->name); 297 continue; 298 } 299 root_tbl_walk(m, iommu); 300 seq_putc(m, '\n'); 301 } 302 rcu_read_unlock(); 303 304 return 0; 305 } 306 DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct); 307 308 static inline unsigned long level_to_directory_size(int level) 309 { 310 return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1)); 311 } 312 313 static inline void 314 dump_page_info(struct seq_file *m, unsigned long iova, u64 *path) 315 { 316 seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx", 317 iova >> VTD_PAGE_SHIFT, path[5], path[4], path[3]); 318 if (path[2]) { 319 seq_printf(m, "\t0x%016llx", path[2]); 320 if (path[1]) 321 seq_printf(m, "\t0x%016llx", path[1]); 322 } 323 seq_putc(m, '\n'); 324 } 325 326 static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde, 327 int level, unsigned long start, 328 u64 *path) 329 { 330 int i; 331 332 if (level > 5 || level < 1) 333 return; 334 335 for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT); 336 i++, pde++, start += level_to_directory_size(level)) { 337 if (!dma_pte_present(pde)) 338 continue; 339 340 path[level] = pde->val; 341 if (dma_pte_superpage(pde) || level == 1) 342 dump_page_info(m, start, path); 343 else 344 pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)), 345 level - 1, start, path); 346 path[level] = 0; 347 } 348 } 349 350 static int domain_translation_struct_show(struct seq_file *m, 351 struct device_domain_info *info, 352 ioasid_t pasid) 353 { 354 bool scalable, found = false; 355 struct dmar_drhd_unit *drhd; 356 struct intel_iommu *iommu; 357 u16 devfn, bus, seg; 358 359 bus = info->bus; 360 devfn = info->devfn; 361 seg = info->segment; 362 363 rcu_read_lock(); 364 for_each_active_iommu(iommu, drhd) { 365 struct context_entry *context; 366 u64 pgd, path[6] = { 0 }; 367 u32 sts, agaw; 368 369 if (seg != iommu->segment) 370 continue; 371 372 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); 373 if (!(sts & DMA_GSTS_TES)) { 374 seq_printf(m, "DMA Remapping is not enabled on %s\n", 375 iommu->name); 376 continue; 377 } 378 if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) 379 scalable = true; 380 else 381 scalable = false; 382 383 /* 384 * The iommu->lock is held across the callback, which will 385 * block calls to domain_attach/domain_detach. Hence, 386 * the domain of the device will not change during traversal. 387 * 388 * Traversing page table possibly races with the iommu_unmap() 389 * interface. This could be solved by RCU-freeing the page 390 * table pages in the iommu_unmap() path. 391 */ 392 spin_lock(&iommu->lock); 393 394 context = iommu_context_addr(iommu, bus, devfn, 0); 395 if (!context || !context_present(context)) 396 goto iommu_unlock; 397 398 if (scalable) { /* scalable mode */ 399 struct pasid_entry *pasid_tbl, *pasid_tbl_entry; 400 struct pasid_dir_entry *dir_tbl, *dir_entry; 401 u16 dir_idx, tbl_idx, pgtt; 402 u64 pasid_dir_ptr; 403 404 pasid_dir_ptr = context->lo & VTD_PAGE_MASK; 405 406 /* Dump specified device domain mappings with PASID. */ 407 dir_idx = pasid >> PASID_PDE_SHIFT; 408 tbl_idx = pasid & PASID_PTE_MASK; 409 410 dir_tbl = phys_to_virt(pasid_dir_ptr); 411 dir_entry = &dir_tbl[dir_idx]; 412 413 pasid_tbl = get_pasid_table_from_pde(dir_entry); 414 if (!pasid_tbl) 415 goto iommu_unlock; 416 417 pasid_tbl_entry = &pasid_tbl[tbl_idx]; 418 if (!pasid_pte_is_present(pasid_tbl_entry)) 419 goto iommu_unlock; 420 421 /* 422 * According to PASID Granular Translation Type(PGTT), 423 * get the page table pointer. 424 */ 425 pgtt = (u16)(pasid_tbl_entry->val[0] & GENMASK_ULL(8, 6)) >> 6; 426 agaw = (u8)(pasid_tbl_entry->val[0] & GENMASK_ULL(4, 2)) >> 2; 427 428 switch (pgtt) { 429 case PASID_ENTRY_PGTT_FL_ONLY: 430 pgd = pasid_tbl_entry->val[2]; 431 break; 432 case PASID_ENTRY_PGTT_SL_ONLY: 433 case PASID_ENTRY_PGTT_NESTED: 434 pgd = pasid_tbl_entry->val[0]; 435 break; 436 default: 437 goto iommu_unlock; 438 } 439 pgd &= VTD_PAGE_MASK; 440 } else { /* legacy mode */ 441 pgd = context->lo & VTD_PAGE_MASK; 442 agaw = context->hi & 7; 443 } 444 445 seq_printf(m, "Device %04x:%02x:%02x.%x ", 446 iommu->segment, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 447 448 if (scalable) 449 seq_printf(m, "with pasid %x @0x%llx\n", pasid, pgd); 450 else 451 seq_printf(m, "@0x%llx\n", pgd); 452 453 seq_printf(m, "%-17s\t%-18s\t%-18s\t%-18s\t%-18s\t%-s\n", 454 "IOVA_PFN", "PML5E", "PML4E", "PDPE", "PDE", "PTE"); 455 pgtable_walk_level(m, phys_to_virt(pgd), agaw + 2, 0, path); 456 457 found = true; 458 iommu_unlock: 459 spin_unlock(&iommu->lock); 460 if (found) 461 break; 462 } 463 rcu_read_unlock(); 464 465 return 0; 466 } 467 468 static int dev_domain_translation_struct_show(struct seq_file *m, void *unused) 469 { 470 struct device_domain_info *info = (struct device_domain_info *)m->private; 471 472 return domain_translation_struct_show(m, info, IOMMU_NO_PASID); 473 } 474 DEFINE_SHOW_ATTRIBUTE(dev_domain_translation_struct); 475 476 static int pasid_domain_translation_struct_show(struct seq_file *m, void *unused) 477 { 478 struct dev_pasid_info *dev_pasid = (struct dev_pasid_info *)m->private; 479 struct device_domain_info *info = dev_iommu_priv_get(dev_pasid->dev); 480 481 return domain_translation_struct_show(m, info, dev_pasid->pasid); 482 } 483 DEFINE_SHOW_ATTRIBUTE(pasid_domain_translation_struct); 484 485 static void invalidation_queue_entry_show(struct seq_file *m, 486 struct intel_iommu *iommu) 487 { 488 int index, shift = qi_shift(iommu); 489 struct qi_desc *desc; 490 int offset; 491 492 if (ecap_smts(iommu->ecap)) 493 seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n"); 494 else 495 seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n"); 496 497 for (index = 0; index < QI_LENGTH; index++) { 498 offset = index << shift; 499 desc = iommu->qi->desc + offset; 500 if (ecap_smts(iommu->ecap)) 501 seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n", 502 index, desc->qw0, desc->qw1, 503 desc->qw2, desc->qw3, 504 iommu->qi->desc_status[index]); 505 else 506 seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n", 507 index, desc->qw0, desc->qw1, 508 iommu->qi->desc_status[index]); 509 } 510 } 511 512 static int invalidation_queue_show(struct seq_file *m, void *unused) 513 { 514 struct dmar_drhd_unit *drhd; 515 struct intel_iommu *iommu; 516 unsigned long flags; 517 struct q_inval *qi; 518 int shift; 519 520 rcu_read_lock(); 521 for_each_active_iommu(iommu, drhd) { 522 qi = iommu->qi; 523 shift = qi_shift(iommu); 524 525 if (!qi || !ecap_qis(iommu->ecap)) 526 continue; 527 528 seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name); 529 530 raw_spin_lock_irqsave(&qi->q_lock, flags); 531 seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n", 532 (u64)virt_to_phys(qi->desc), 533 dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift, 534 dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift); 535 invalidation_queue_entry_show(m, iommu); 536 raw_spin_unlock_irqrestore(&qi->q_lock, flags); 537 seq_putc(m, '\n'); 538 } 539 rcu_read_unlock(); 540 541 return 0; 542 } 543 DEFINE_SHOW_ATTRIBUTE(invalidation_queue); 544 545 #ifdef CONFIG_IRQ_REMAP 546 static void ir_tbl_remap_entry_show(struct seq_file *m, 547 struct intel_iommu *iommu) 548 { 549 struct irte *ri_entry; 550 unsigned long flags; 551 int idx; 552 553 seq_puts(m, " Entry SrcID DstID Vct IRTE_high\t\tIRTE_low\n"); 554 555 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 556 for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) { 557 ri_entry = &iommu->ir_table->base[idx]; 558 if (!ri_entry->present || ri_entry->p_pst) 559 continue; 560 561 seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x %016llx\t%016llx\n", 562 idx, PCI_BUS_NUM(ri_entry->sid), 563 PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid), 564 ri_entry->dest_id, ri_entry->vector, 565 ri_entry->high, ri_entry->low); 566 } 567 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 568 } 569 570 static void ir_tbl_posted_entry_show(struct seq_file *m, 571 struct intel_iommu *iommu) 572 { 573 struct irte *pi_entry; 574 unsigned long flags; 575 int idx; 576 577 seq_puts(m, " Entry SrcID PDA_high PDA_low Vct IRTE_high\t\tIRTE_low\n"); 578 579 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 580 for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) { 581 pi_entry = &iommu->ir_table->base[idx]; 582 if (!pi_entry->present || !pi_entry->p_pst) 583 continue; 584 585 seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x %016llx\t%016llx\n", 586 idx, PCI_BUS_NUM(pi_entry->sid), 587 PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid), 588 pi_entry->pda_h, pi_entry->pda_l << 6, 589 pi_entry->vector, pi_entry->high, 590 pi_entry->low); 591 } 592 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 593 } 594 595 /* 596 * For active IOMMUs go through the Interrupt remapping 597 * table and print valid entries in a table format for 598 * Remapped and Posted Interrupts. 599 */ 600 static int ir_translation_struct_show(struct seq_file *m, void *unused) 601 { 602 struct dmar_drhd_unit *drhd; 603 struct intel_iommu *iommu; 604 u64 irta; 605 u32 sts; 606 607 rcu_read_lock(); 608 for_each_active_iommu(iommu, drhd) { 609 if (!ecap_ir_support(iommu->ecap)) 610 continue; 611 612 seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n", 613 iommu->name); 614 615 sts = dmar_readl(iommu->reg + DMAR_GSTS_REG); 616 if (iommu->ir_table && (sts & DMA_GSTS_IRES)) { 617 irta = virt_to_phys(iommu->ir_table->base); 618 seq_printf(m, " IR table address:%llx\n", irta); 619 ir_tbl_remap_entry_show(m, iommu); 620 } else { 621 seq_puts(m, "Interrupt Remapping is not enabled\n"); 622 } 623 seq_putc(m, '\n'); 624 } 625 626 seq_puts(m, "****\n\n"); 627 628 for_each_active_iommu(iommu, drhd) { 629 if (!cap_pi_support(iommu->cap)) 630 continue; 631 632 seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n", 633 iommu->name); 634 635 if (iommu->ir_table) { 636 irta = virt_to_phys(iommu->ir_table->base); 637 seq_printf(m, " IR table address:%llx\n", irta); 638 ir_tbl_posted_entry_show(m, iommu); 639 } else { 640 seq_puts(m, "Interrupt Remapping is not enabled\n"); 641 } 642 seq_putc(m, '\n'); 643 } 644 rcu_read_unlock(); 645 646 return 0; 647 } 648 DEFINE_SHOW_ATTRIBUTE(ir_translation_struct); 649 #endif 650 651 static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu, 652 struct dmar_drhd_unit *drhd) 653 { 654 int ret; 655 656 seq_printf(m, "IOMMU: %s Register Base Address: %llx\n", 657 iommu->name, drhd->reg_base_addr); 658 659 ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE); 660 if (ret < 0) 661 seq_puts(m, "Failed to get latency snapshot"); 662 else 663 seq_puts(m, debug_buf); 664 seq_puts(m, "\n"); 665 } 666 667 static int latency_show(struct seq_file *m, void *v) 668 { 669 struct dmar_drhd_unit *drhd; 670 struct intel_iommu *iommu; 671 672 rcu_read_lock(); 673 for_each_active_iommu(iommu, drhd) 674 latency_show_one(m, iommu, drhd); 675 rcu_read_unlock(); 676 677 return 0; 678 } 679 680 static int dmar_perf_latency_open(struct inode *inode, struct file *filp) 681 { 682 return single_open(filp, latency_show, NULL); 683 } 684 685 static ssize_t dmar_perf_latency_write(struct file *filp, 686 const char __user *ubuf, 687 size_t cnt, loff_t *ppos) 688 { 689 struct dmar_drhd_unit *drhd; 690 struct intel_iommu *iommu; 691 int counting; 692 char buf[64]; 693 694 if (cnt > 63) 695 cnt = 63; 696 697 if (copy_from_user(&buf, ubuf, cnt)) 698 return -EFAULT; 699 700 buf[cnt] = 0; 701 702 if (kstrtoint(buf, 0, &counting)) 703 return -EINVAL; 704 705 switch (counting) { 706 case 0: 707 rcu_read_lock(); 708 for_each_active_iommu(iommu, drhd) { 709 dmar_latency_disable(iommu, DMAR_LATENCY_INV_IOTLB); 710 dmar_latency_disable(iommu, DMAR_LATENCY_INV_DEVTLB); 711 dmar_latency_disable(iommu, DMAR_LATENCY_INV_IEC); 712 dmar_latency_disable(iommu, DMAR_LATENCY_PRQ); 713 } 714 rcu_read_unlock(); 715 break; 716 case 1: 717 rcu_read_lock(); 718 for_each_active_iommu(iommu, drhd) 719 dmar_latency_enable(iommu, DMAR_LATENCY_INV_IOTLB); 720 rcu_read_unlock(); 721 break; 722 case 2: 723 rcu_read_lock(); 724 for_each_active_iommu(iommu, drhd) 725 dmar_latency_enable(iommu, DMAR_LATENCY_INV_DEVTLB); 726 rcu_read_unlock(); 727 break; 728 case 3: 729 rcu_read_lock(); 730 for_each_active_iommu(iommu, drhd) 731 dmar_latency_enable(iommu, DMAR_LATENCY_INV_IEC); 732 rcu_read_unlock(); 733 break; 734 case 4: 735 rcu_read_lock(); 736 for_each_active_iommu(iommu, drhd) 737 dmar_latency_enable(iommu, DMAR_LATENCY_PRQ); 738 rcu_read_unlock(); 739 break; 740 default: 741 return -EINVAL; 742 } 743 744 *ppos += cnt; 745 return cnt; 746 } 747 748 static const struct file_operations dmar_perf_latency_fops = { 749 .open = dmar_perf_latency_open, 750 .write = dmar_perf_latency_write, 751 .read = seq_read, 752 .llseek = seq_lseek, 753 .release = single_release, 754 }; 755 756 void __init intel_iommu_debugfs_init(void) 757 { 758 intel_iommu_debug = debugfs_create_dir("intel", iommu_debugfs_dir); 759 760 debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL, 761 &iommu_regset_fops); 762 debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug, 763 NULL, &dmar_translation_struct_fops); 764 debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug, 765 NULL, &invalidation_queue_fops); 766 #ifdef CONFIG_IRQ_REMAP 767 debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug, 768 NULL, &ir_translation_struct_fops); 769 #endif 770 debugfs_create_file("dmar_perf_latency", 0644, intel_iommu_debug, 771 NULL, &dmar_perf_latency_fops); 772 } 773 774 /* 775 * Create a debugfs directory for each device, and then create a 776 * debugfs file in this directory for users to dump the page table 777 * of the default domain. e.g. 778 * /sys/kernel/debug/iommu/intel/0000:00:01.0/domain_translation_struct 779 */ 780 void intel_iommu_debugfs_create_dev(struct device_domain_info *info) 781 { 782 info->debugfs_dentry = debugfs_create_dir(dev_name(info->dev), intel_iommu_debug); 783 784 debugfs_create_file("domain_translation_struct", 0444, info->debugfs_dentry, 785 info, &dev_domain_translation_struct_fops); 786 } 787 788 /* Remove the device debugfs directory. */ 789 void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) 790 { 791 debugfs_remove_recursive(info->debugfs_dentry); 792 } 793 794 /* 795 * Create a debugfs directory per pair of {device, pasid}, then create the 796 * corresponding debugfs file in this directory for users to dump its page 797 * table. e.g. 798 * /sys/kernel/debug/iommu/intel/0000:00:01.0/1/domain_translation_struct 799 * 800 * The debugfs only dumps the page tables whose mappings are created and 801 * destroyed by the iommu_map/unmap() interfaces. Check the mapping type 802 * of the domain before creating debugfs directory. 803 */ 804 void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) 805 { 806 struct device_domain_info *info = dev_iommu_priv_get(dev_pasid->dev); 807 char dir_name[10]; 808 809 sprintf(dir_name, "%x", dev_pasid->pasid); 810 dev_pasid->debugfs_dentry = debugfs_create_dir(dir_name, info->debugfs_dentry); 811 812 debugfs_create_file("domain_translation_struct", 0444, dev_pasid->debugfs_dentry, 813 dev_pasid, &pasid_domain_translation_struct_fops); 814 } 815 816 /* Remove the device pasid debugfs directory. */ 817 void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) 818 { 819 debugfs_remove_recursive(dev_pasid->debugfs_dentry); 820 } 821