Lines Matching +full:ats +full:- +full:supported

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2014 Intel Corporation.
17 #include <linux/dma-direct.h>
21 #include <linux/pci-ats.h>
28 #include "../dma-iommu.h"
30 #include "../iommu-pages.h"
38 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
39 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
40 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
41 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
49 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
50 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
55 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
62 * set to 1 to panic kernel if can't successfully enable VT-d
77 if (!(re->lo & 1)) in root_entry_lctp()
80 return re->lo & VTD_PAGE_MASK; in root_entry_lctp()
89 if (!(re->hi & 1)) in root_entry_uctp()
92 return re->hi & VTD_PAGE_MASK; in root_entry_uctp()
101 if (*rid_lhs < PCI_DEVID(info->bus, info->devfn)) in device_rid_cmp_key()
102 return -1; in device_rid_cmp_key()
104 if (*rid_lhs > PCI_DEVID(info->bus, info->devfn)) in device_rid_cmp_key()
114 u16 key = PCI_DEVID(info->bus, info->devfn); in device_rid_cmp()
120 * Looks up an IOMMU-probed device using its source ID.
136 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
137 node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key); in device_rbtree_find()
140 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
142 return info ? info->dev : NULL; in device_rbtree_find()
151 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
152 curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp); in device_rbtree_insert()
153 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
155 return -EEXIST; in device_rbtree_insert()
162 struct intel_iommu *iommu = info->iommu; in device_rbtree_remove()
165 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
166 rb_erase(&info->node, &iommu->device_rbtree); in device_rbtree_remove()
167 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
193 u8 atc_required:1; /* ATS is required */
223 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
228 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
235 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
237 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
243 return -EINVAL; in intel_iommu_setup()
263 pr_info("Disable supported super page\n"); in intel_iommu_setup()
272 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
275 pr_notice("Unknown option - '%s'\n", str); in intel_iommu_setup()
289 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in domain_pfn_supported()
295 * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
296 * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
303 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
304 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
307 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
311 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
323 for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { in __iommu_calculate_agaw()
342 * get a supported less agaw for iommus that don't support the default agaw.
352 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
355 /* Return the super pagesize bitmap if supported. */
361 * 1-level super page supports page size of 2MiB, 2-level super page in domain_super_pgsize_bitmap()
364 if (domain->iommu_superpage == 1) in domain_super_pgsize_bitmap()
366 else if (domain->iommu_superpage == 2) in domain_super_pgsize_bitmap()
375 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
386 entry = &root->lo; in iommu_context_addr()
389 devfn -= 0x80; in iommu_context_addr()
390 entry = &root->hi; in iommu_context_addr()
401 context = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); in iommu_context_addr()
414 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
415 * sub-hierarchy of a candidate PCI-PCI bridge
416 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
417 * @bridge: the candidate PCI-PCI bridge
419 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
432 if (pbridge->subordinate && in is_downstream_to_pci_bridge()
433 pbridge->subordinate->number <= pdev->bus->number && in is_downstream_to_pci_bridge()
434 pbridge->subordinate->busn_res.end >= pdev->bus->number) in is_downstream_to_pci_bridge()
451 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); in quirk_ioat_snb_local_iommu()
454 dev_info(&pdev->dev, "failed to run vt-d quirk\n"); in quirk_ioat_snb_local_iommu()
461 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { in quirk_ioat_snb_local_iommu()
462 …pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"… in quirk_ioat_snb_local_iommu()
472 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
478 if (pdev->vendor == PCI_VENDOR_ID_INTEL && in iommu_is_dummy()
479 pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB && in iommu_is_dummy()
507 dev = &pf_pdev->dev; in device_lookup_iommu()
508 segment = pci_domain_nr(pdev->bus); in device_lookup_iommu()
510 dev = &ACPI_COMPANION(dev)->dev; in device_lookup_iommu()
514 if (pdev && segment != drhd->segment) in device_lookup_iommu()
517 for_each_active_dev_scope(drhd->devices, in device_lookup_iommu()
518 drhd->devices_cnt, i, tmp) { in device_lookup_iommu()
524 if (pdev && pdev->is_virtfn) in device_lookup_iommu()
528 *bus = drhd->devices[i].bus; in device_lookup_iommu()
529 *devfn = drhd->devices[i].devfn; in device_lookup_iommu()
538 if (pdev && drhd->include_all) { in device_lookup_iommu()
541 *bus = pdev->bus->number; in device_lookup_iommu()
542 *devfn = pdev->devfn; in device_lookup_iommu()
560 if (!domain->iommu_coherency) in domain_flush_cache()
569 if (!iommu->root_entry) in free_context_table()
585 iommu_free_page(iommu->root_entry); in free_context_table()
586 iommu->root_entry = NULL; in free_context_table()
600 pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val); in pgtable_walk()
603 pr_info("page table not present at level %d\n", level - 1); in pgtable_walk()
611 level--; in pgtable_walk()
627 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
630 if (!iommu->root_entry) { in dmar_fault_dump_ptes()
634 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
638 rt_entry->hi, rt_entry->lo); in dmar_fault_dump_ptes()
640 pr_info("root entry: 0x%016llx", rt_entry->lo); in dmar_fault_dump_ptes()
650 ctx_entry->hi, ctx_entry->lo); in dmar_fault_dump_ptes()
658 level = agaw_to_level(ctx_entry->hi & 7); in dmar_fault_dump_ptes()
659 pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
669 dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
671 /* For request-without-pasid, get the pasid from context entry */ in dmar_fault_dump_ptes()
677 pr_info("pasid dir entry: 0x%016llx\n", pde->val); in dmar_fault_dump_ptes()
687 for (i = 0; i < ARRAY_SIZE(pte->val); i++) in dmar_fault_dump_ptes()
688 pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]); in dmar_fault_dump_ptes()
696 level = pte->val[2] & BIT_ULL(2) ? 5 : 4; in dmar_fault_dump_ptes()
697 pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
699 level = agaw_to_level((pte->val[0] >> 2) & 0x7); in dmar_fault_dump_ptes()
700 pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
713 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
720 parent = domain->pgd; in pfn_to_dma_pte()
735 tmp_page = iommu_alloc_page_node(domain->nid, gfp); in pfn_to_dma_pte()
742 if (domain->use_first_level) in pfn_to_dma_pte()
746 if (!try_cmpxchg64(&pte->val, &tmp, pteval)) in pfn_to_dma_pte()
756 level--; in pfn_to_dma_pte()
771 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
774 parent = domain->pgd; in dma_pfn_level_pte()
792 total--; in dma_pfn_level_pte()
824 (void *)pte - (void *)first_pte); in dma_pte_clear_range()
848 dma_pte_free_level(domain, level - 1, retain_level, in dma_pte_free_level()
858 last_pfn < level_pfn + level_size(level) - 1)) { in dma_pte_free_level()
880 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level, in dma_pte_free_pagetable()
881 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
884 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
885 iommu_free_page(domain->pgd); in dma_pte_free_pagetable()
886 domain->pgd = NULL; in dma_pte_free_pagetable()
893 know the hardware page-walk will no longer touch them.
903 list_add_tail(&pg->lru, freelist); in dma_pte_list_pagetables()
911 dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_list_pagetables()
934 last_pfn >= level_pfn + level_size(level) - 1) { in dma_pte_clear_level()
938 dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_clear_level()
946 dma_pte_clear_level(domain, level - 1, in dma_pte_clear_level()
957 (void *)++last_pte - (void *)first_pte); in dma_pte_clear_level()
971 dma_pte_clear_level(domain, agaw_to_level(domain->agaw), in domain_unmap()
972 domain->pgd, 0, start_pfn, last_pfn, freelist); in domain_unmap()
975 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
976 struct page *pgd_page = virt_to_page(domain->pgd); in domain_unmap()
977 list_add_tail(&pgd_page->lru, freelist); in domain_unmap()
978 domain->pgd = NULL; in domain_unmap()
987 root = iommu_alloc_page_node(iommu->node, GFP_ATOMIC); in iommu_alloc_root_entry()
990 iommu->name); in iommu_alloc_root_entry()
991 return -ENOMEM; in iommu_alloc_root_entry()
995 iommu->root_entry = root; in iommu_alloc_root_entry()
1006 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1010 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1011 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1013 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1019 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1025 if (cap_esrtps(iommu->cap)) in iommu_set_root_entry()
1028 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1031 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1039 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1042 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1043 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1049 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1072 pr_warn("%s: Unexpected context-cache invalidation type 0x%llx\n", in __iommu_flush_context()
1073 iommu->name, type); in __iommu_flush_context()
1078 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1079 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1085 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1091 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1110 iommu->name, type); in __iommu_flush_iotlb()
1114 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1117 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1120 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1121 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1127 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1145 spin_lock_irqsave(&domain->lock, flags); in domain_lookup_dev_info()
1146 list_for_each_entry(info, &domain->devices, link) { in domain_lookup_dev_info()
1147 if (info->iommu == iommu && info->bus == bus && in domain_lookup_dev_info()
1148 info->devfn == devfn) { in domain_lookup_dev_info()
1149 spin_unlock_irqrestore(&domain->lock, flags); in domain_lookup_dev_info()
1153 spin_unlock_irqrestore(&domain->lock, flags); in domain_lookup_dev_info()
1161 * check because it applies only to the built-in QAT devices and it doesn't
1167 if (pdev->vendor != PCI_VENDOR_ID_INTEL) in dev_needs_extra_dtlb_flush()
1170 if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK) in dev_needs_extra_dtlb_flush()
1180 if (!dev_is_pci(info->dev)) in iommu_enable_pci_caps()
1183 pdev = to_pci_dev(info->dev); in iommu_enable_pci_caps()
1184 if (info->ats_supported && pci_ats_page_aligned(pdev) && in iommu_enable_pci_caps()
1186 info->ats_enabled = 1; in iommu_enable_pci_caps()
1193 if (!dev_is_pci(info->dev)) in iommu_disable_pci_caps()
1196 pdev = to_pci_dev(info->dev); in iommu_disable_pci_caps()
1198 if (info->ats_enabled) { in iommu_disable_pci_caps()
1200 info->ats_enabled = 0; in iommu_disable_pci_caps()
1214 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1217 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1218 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1220 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1226 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1234 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1235 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1236 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1242 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1250 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1251 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1254 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1255 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1256 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1262 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1269 ndomains = cap_ndoms(iommu->cap); in iommu_init_domains()
1270 pr_debug("%s: Number of Domains supported <%d>\n", in iommu_init_domains()
1271 iommu->name, ndomains); in iommu_init_domains()
1273 spin_lock_init(&iommu->lock); in iommu_init_domains()
1275 iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL); in iommu_init_domains()
1276 if (!iommu->domain_ids) in iommu_init_domains()
1277 return -ENOMEM; in iommu_init_domains()
1281 * with domain-id 0, hence we need to pre-allocate it. We also in iommu_init_domains()
1282 * use domain-id 0 as a marker for non-allocated domain-id, so in iommu_init_domains()
1285 set_bit(0, iommu->domain_ids); in iommu_init_domains()
1288 * Vt-d spec rev3.0 (section 6.2.3.1) requires that each pasid in iommu_init_domains()
1289 * entry for first-level or pass-through translation modes should in iommu_init_domains()
1291 * second-level or nested translation. We reserve a domain id for in iommu_init_domains()
1295 set_bit(FLPT_DEFAULT_DID, iommu->domain_ids); in iommu_init_domains()
1302 if (!iommu->domain_ids) in disable_dmar_iommu()
1309 if (WARN_ON(bitmap_weight(iommu->domain_ids, cap_ndoms(iommu->cap)) in disable_dmar_iommu()
1313 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1319 if (iommu->domain_ids) { in free_dmar_iommu()
1320 bitmap_free(iommu->domain_ids); in free_dmar_iommu()
1321 iommu->domain_ids = NULL; in free_dmar_iommu()
1324 if (iommu->copied_tables) { in free_dmar_iommu()
1325 bitmap_free(iommu->copied_tables); in free_dmar_iommu()
1326 iommu->copied_tables = NULL; in free_dmar_iommu()
1332 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1347 if (ecap_flts(iommu->ecap) ^ ecap_slts(iommu->ecap)) in first_level_by_default()
1348 return ecap_flts(iommu->ecap); in first_level_by_default()
1357 int num, ret = -ENOSPC; in domain_attach_iommu()
1359 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_attach_iommu()
1364 return -ENOMEM; in domain_attach_iommu()
1366 spin_lock(&iommu->lock); in domain_attach_iommu()
1367 curr = xa_load(&domain->iommu_array, iommu->seq_id); in domain_attach_iommu()
1369 curr->refcnt++; in domain_attach_iommu()
1370 spin_unlock(&iommu->lock); in domain_attach_iommu()
1375 ndomains = cap_ndoms(iommu->cap); in domain_attach_iommu()
1376 num = find_first_zero_bit(iommu->domain_ids, ndomains); in domain_attach_iommu()
1378 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1382 set_bit(num, iommu->domain_ids); in domain_attach_iommu()
1383 info->refcnt = 1; in domain_attach_iommu()
1384 info->did = num; in domain_attach_iommu()
1385 info->iommu = iommu; in domain_attach_iommu()
1386 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, in domain_attach_iommu()
1389 ret = xa_err(curr) ? : -EBUSY; in domain_attach_iommu()
1393 spin_unlock(&iommu->lock); in domain_attach_iommu()
1397 clear_bit(info->did, iommu->domain_ids); in domain_attach_iommu()
1399 spin_unlock(&iommu->lock); in domain_attach_iommu()
1408 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_detach_iommu()
1411 spin_lock(&iommu->lock); in domain_detach_iommu()
1412 info = xa_load(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1413 if (--info->refcnt == 0) { in domain_detach_iommu()
1414 clear_bit(info->did, iommu->domain_ids); in domain_detach_iommu()
1415 xa_erase(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1416 domain->nid = NUMA_NO_NODE; in domain_detach_iommu()
1419 spin_unlock(&iommu->lock); in domain_detach_iommu()
1424 if (domain->pgd) { in domain_exit()
1427 domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist); in domain_exit()
1431 if (WARN_ON(!list_empty(&domain->devices))) in domain_exit()
1434 kfree(domain->qi_batch); in domain_exit()
1440 * in-flight DMA and copied pgtable, but there is no unmapping
1442 * the newly-mapped device. For kdump, at this point, the device
1444 * in-flight DMA will exist, and we don't need to worry anymore
1456 assert_spin_locked(&iommu->lock); in copied_context_tear_down()
1461 if (did_old < cap_ndoms(iommu->cap)) { in copied_context_tear_down()
1462 iommu->flush.flush_context(iommu, did_old, in copied_context_tear_down()
1466 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in copied_context_tear_down()
1474 * It's a non-present to present mapping. If hardware doesn't cache
1475 * non-present entry we only need to flush the write-buffer. If the
1476 * _does_ cache non-present entries, then it does so in the special
1482 if (cap_caching_mode(iommu->cap)) { in context_present_cache_flush()
1483 iommu->flush.flush_context(iommu, 0, in context_present_cache_flush()
1487 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in context_present_cache_flush()
1501 struct dma_pte *pgd = domain->pgd; in domain_context_mapping_one()
1508 spin_lock(&iommu->lock); in domain_context_mapping_one()
1509 ret = -ENOMEM; in domain_context_mapping_one()
1522 if (info && info->ats_supported) in domain_context_mapping_one()
1528 context_set_address_width(context, domain->agaw); in domain_context_mapping_one()
1532 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
1538 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1546 struct device_domain_info *info = dev_iommu_priv_get(&pdev->dev); in domain_context_mapping_cb()
1547 struct intel_iommu *iommu = info->iommu; in domain_context_mapping_cb()
1558 struct intel_iommu *iommu = info->iommu; in domain_context_mapping()
1559 u8 bus = info->bus, devfn = info->devfn; in domain_context_mapping()
1575 support = domain->iommu_superpage; in hardware_largepage_caps()
1589 support--; in hardware_largepage_caps()
1613 start_pfn + lvl_pages - 1, in switch_to_super_page()
1638 if (unlikely(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1))) in __domain_mapping()
1639 return -EINVAL; in __domain_mapping()
1642 return -EINVAL; in __domain_mapping()
1644 if (!(prot & DMA_PTE_WRITE) && domain->nested_parent) { in __domain_mapping()
1645 …pr_err_ratelimited("Read-only mapping is disallowed on the domain which serves as the parent in a … in __domain_mapping()
1646 return -EINVAL; in __domain_mapping()
1651 if (domain->use_first_level) { in __domain_mapping()
1657 domain->has_mappings = true; in __domain_mapping()
1671 return -ENOMEM; in __domain_mapping()
1684 end_pfn = iov_pfn + pages_to_remove - 1; in __domain_mapping()
1695 if (!try_cmpxchg64_local(&pte->val, &tmp, pteval)) { in __domain_mapping()
1700 dumps--; in __domain_mapping()
1706 nr_pages -= lvl_pages; in __domain_mapping()
1727 (void *)pte - (void *)first_pte); in __domain_mapping()
1737 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one()
1741 spin_lock(&iommu->lock); in domain_context_clear_one()
1744 spin_unlock(&iommu->lock); in domain_context_clear_one()
1751 spin_unlock(&iommu->lock); in domain_context_clear_one()
1797 struct dma_pte *pgd = domain->pgd; in domain_setup_first_level()
1800 level = agaw_to_level(domain->agaw); in domain_setup_first_level()
1802 return -EINVAL; in domain_setup_first_level()
1807 if (domain->force_snooping) in domain_setup_first_level()
1819 struct intel_iommu *iommu = info->iommu; in dmar_domain_attach_device()
1827 info->domain = domain; in dmar_domain_attach_device()
1828 spin_lock_irqsave(&domain->lock, flags); in dmar_domain_attach_device()
1829 list_add(&info->link, &domain->devices); in dmar_domain_attach_device()
1830 spin_unlock_irqrestore(&domain->lock, flags); in dmar_domain_attach_device()
1837 else if (domain->use_first_level) in dmar_domain_attach_device()
1861 * device_rmrr_is_relaxable - Test whether the RMRR of this device
1892 struct intel_iommu *iommu = info->iommu; in device_def_domain_type()
1898 if (!ecap_pass_through(iommu->ecap)) in device_def_domain_type()
1916 * (for example, while enabling interrupt-remapping) then in intel_iommu_init_qi()
1919 if (!iommu->qi) { in intel_iommu_init_qi()
1923 dmar_fault(-1, iommu); in intel_iommu_init_qi()
1925 * Disable queued invalidation if supported and already enabled in intel_iommu_init_qi()
1935 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
1936 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
1938 iommu->name); in intel_iommu_init_qi()
1940 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
1941 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
1942 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
1992 ret = -ENOMEM; in copy_context_table()
1998 new_ce = iommu_alloc_page_node(iommu->node, GFP_KERNEL); in copy_context_table()
2012 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
2013 set_bit(did, iommu->domain_ids); in copy_context_table()
2040 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
2051 return -EINVAL; in copy_translation_tables()
2053 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); in copy_translation_tables()
2054 if (!iommu->copied_tables) in copy_translation_tables()
2055 return -ENOMEM; in copy_translation_tables()
2059 return -EINVAL; in copy_translation_tables()
2063 return -ENOMEM; in copy_translation_tables()
2065 /* This is too big for the stack - allocate it from slab */ in copy_translation_tables()
2067 ret = -ENOMEM; in copy_translation_tables()
2077 iommu->name, bus); in copy_translation_tables()
2082 spin_lock(&iommu->lock); in copy_translation_tables()
2091 iommu->root_entry[bus].lo = val; in copy_translation_tables()
2098 iommu->root_entry[bus].hi = val; in copy_translation_tables()
2101 spin_unlock(&iommu->lock); in copy_translation_tables()
2105 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
2126 if (drhd->ignored) { in init_dmars()
2134 * than the smallest supported. in init_dmars()
2137 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
2155 iommu->name); in init_dmars()
2168 pr_info("Translation already enabled - trying to copy translation structures\n"); in init_dmars()
2174 * enabled - but failed to copy over the in init_dmars()
2175 * old root-entry table. Try to proceed in init_dmars()
2177 * allocating a clean root-entry table. in init_dmars()
2182 iommu->name); in init_dmars()
2187 iommu->name); in init_dmars()
2214 if (drhd->ignored) { in init_dmars()
2226 if (ecap_prs(iommu->ecap)) { in init_dmars()
2261 if (!drhd->include_all) { in init_no_remapping_devices()
2262 for_each_active_dev_scope(drhd->devices, in init_no_remapping_devices()
2263 drhd->devices_cnt, i, dev) in init_no_remapping_devices()
2266 if (i == drhd->devices_cnt) in init_no_remapping_devices()
2267 drhd->ignored = 1; in init_no_remapping_devices()
2272 if (drhd->include_all) in init_no_remapping_devices()
2275 for_each_active_dev_scope(drhd->devices, in init_no_remapping_devices()
2276 drhd->devices_cnt, i, dev) in init_no_remapping_devices()
2279 if (i < drhd->devices_cnt) in init_no_remapping_devices()
2284 drhd->gfx_dedicated = 1; in init_no_remapping_devices()
2286 drhd->ignored = 1; in init_no_remapping_devices()
2298 if (iommu->qi) { in init_iommu_hw()
2306 if (drhd->ignored) { in init_iommu_hw()
2331 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
2333 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
2349 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
2351 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
2352 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
2353 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
2354 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
2355 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
2356 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
2357 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
2358 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
2360 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
2381 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
2383 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
2384 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
2385 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
2386 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
2387 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
2388 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
2389 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
2390 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
2392 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
2412 if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) || in rmrr_sanity_check()
2413 !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) || in rmrr_sanity_check()
2414 rmrr->end_address <= rmrr->base_address || in rmrr_sanity_check()
2416 return -EINVAL; in rmrr_sanity_check()
2429 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n" in dmar_parse_one_rmrr()
2431 rmrr->base_address, rmrr->end_address, in dmar_parse_one_rmrr()
2442 rmrru->hdr = header; in dmar_parse_one_rmrr()
2444 rmrru->base_address = rmrr->base_address; in dmar_parse_one_rmrr()
2445 rmrru->end_address = rmrr->end_address; in dmar_parse_one_rmrr()
2447 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), in dmar_parse_one_rmrr()
2448 ((void *)rmrr) + rmrr->header.length, in dmar_parse_one_rmrr()
2449 &rmrru->devices_cnt); in dmar_parse_one_rmrr()
2450 if (rmrru->devices_cnt && rmrru->devices == NULL) in dmar_parse_one_rmrr()
2453 list_add(&rmrru->list, &dmar_rmrr_units); in dmar_parse_one_rmrr()
2459 return -ENOMEM; in dmar_parse_one_rmrr()
2469 tmp = (struct acpi_dmar_atsr *)atsru->hdr; in dmar_find_atsr()
2470 if (atsr->segment != tmp->segment) in dmar_find_atsr()
2472 if (atsr->header.length != tmp->header.length) in dmar_find_atsr()
2474 if (memcmp(atsr, tmp, atsr->header.length) == 0) in dmar_find_atsr()
2494 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); in dmar_parse_one_atsr()
2496 return -ENOMEM; in dmar_parse_one_atsr()
2503 atsru->hdr = (void *)(atsru + 1); in dmar_parse_one_atsr()
2504 memcpy(atsru->hdr, hdr, hdr->length); in dmar_parse_one_atsr()
2505 atsru->include_all = atsr->flags & 0x1; in dmar_parse_one_atsr()
2506 if (!atsru->include_all) { in dmar_parse_one_atsr()
2507 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), in dmar_parse_one_atsr()
2508 (void *)atsr + atsr->header.length, in dmar_parse_one_atsr()
2509 &atsru->devices_cnt); in dmar_parse_one_atsr()
2510 if (atsru->devices_cnt && atsru->devices == NULL) { in dmar_parse_one_atsr()
2512 return -ENOMEM; in dmar_parse_one_atsr()
2516 list_add_rcu(&atsru->list, &dmar_atsr_units); in dmar_parse_one_atsr()
2523 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); in intel_iommu_free_atsr()
2535 list_del_rcu(&atsru->list); in dmar_release_one_atsr()
2555 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) { in dmar_check_one_atsr()
2556 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, in dmar_check_one_atsr()
2558 return -EBUSY; in dmar_check_one_atsr()
2571 tmp = (struct acpi_dmar_satc *)satcu->hdr; in dmar_find_satc()
2572 if (satc->segment != tmp->segment) in dmar_find_satc()
2574 if (satc->header.length != tmp->header.length) in dmar_find_satc()
2576 if (memcmp(satc, tmp, satc->header.length) == 0) in dmar_find_satc()
2596 satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL); in dmar_parse_one_satc()
2598 return -ENOMEM; in dmar_parse_one_satc()
2600 satcu->hdr = (void *)(satcu + 1); in dmar_parse_one_satc()
2601 memcpy(satcu->hdr, hdr, hdr->length); in dmar_parse_one_satc()
2602 satcu->atc_required = satc->flags & 0x1; in dmar_parse_one_satc()
2603 satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1), in dmar_parse_one_satc()
2604 (void *)satc + satc->header.length, in dmar_parse_one_satc()
2605 &satcu->devices_cnt); in dmar_parse_one_satc()
2606 if (satcu->devices_cnt && !satcu->devices) { in dmar_parse_one_satc()
2608 return -ENOMEM; in dmar_parse_one_satc()
2610 list_add_rcu(&satcu->list, &dmar_satc_units); in dmar_parse_one_satc()
2617 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add()
2627 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
2638 if (dmaru->ignored) { in intel_iommu_add()
2650 if (ecap_prs(iommu->ecap)) { in intel_iommu_add()
2676 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug()
2681 return -EINVAL; in dmar_iommu_hotplug()
2700 list_del(&rmrru->list); in intel_iommu_free_dmars()
2701 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); in intel_iommu_free_dmars()
2706 list_del(&atsru->list); in intel_iommu_free_dmars()
2710 list_del(&satcu->list); in intel_iommu_free_dmars()
2711 dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt); in intel_iommu_free_dmars()
2727 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); in dmar_find_matched_satc_unit()
2728 if (satc->segment != pci_domain_nr(dev->bus)) in dmar_find_matched_satc_unit()
2730 for_each_dev_scope(satcu->devices, satcu->devices_cnt, i, tmp) in dmar_find_matched_satc_unit()
2754 * This device supports ATS as it is in SATC table. in dmar_ats_supported()
2755 * When IOMMU is in legacy mode, enabling ATS is done in dmar_ats_supported()
2757 * ATS, hence OS should not enable this device ATS in dmar_ats_supported()
2760 return !(satcu->atc_required && !sm_supported(iommu)); in dmar_ats_supported()
2762 for (bus = dev->bus; bus; bus = bus->parent) { in dmar_ats_supported()
2763 bridge = bus->self; in dmar_ats_supported()
2764 /* If it's an integrated device, allow ATS */ in dmar_ats_supported()
2767 /* Connected via non-PCIe: no ATS */ in dmar_ats_supported()
2778 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); in dmar_ats_supported()
2779 if (atsr->segment != pci_domain_nr(dev->bus)) in dmar_ats_supported()
2782 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp) in dmar_ats_supported()
2783 if (tmp == &bridge->dev) in dmar_ats_supported()
2786 if (atsru->include_all) in dmar_ats_supported()
2810 rmrr = container_of(rmrru->hdr, in dmar_iommu_notify_scope_dev()
2812 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2814 ((void *)rmrr) + rmrr->header.length, in dmar_iommu_notify_scope_dev()
2815 rmrr->segment, rmrru->devices, in dmar_iommu_notify_scope_dev()
2816 rmrru->devices_cnt); in dmar_iommu_notify_scope_dev()
2819 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2820 dmar_remove_dev_scope(info, rmrr->segment, in dmar_iommu_notify_scope_dev()
2821 rmrru->devices, rmrru->devices_cnt); in dmar_iommu_notify_scope_dev()
2826 if (atsru->include_all) in dmar_iommu_notify_scope_dev()
2829 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); in dmar_iommu_notify_scope_dev()
2830 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2832 (void *)atsr + atsr->header.length, in dmar_iommu_notify_scope_dev()
2833 atsr->segment, atsru->devices, in dmar_iommu_notify_scope_dev()
2834 atsru->devices_cnt); in dmar_iommu_notify_scope_dev()
2839 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2840 if (dmar_remove_dev_scope(info, atsr->segment, in dmar_iommu_notify_scope_dev()
2841 atsru->devices, atsru->devices_cnt)) in dmar_iommu_notify_scope_dev()
2846 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); in dmar_iommu_notify_scope_dev()
2847 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2849 (void *)satc + satc->header.length, in dmar_iommu_notify_scope_dev()
2850 satc->segment, satcu->devices, in dmar_iommu_notify_scope_dev()
2851 satcu->devices_cnt); in dmar_iommu_notify_scope_dev()
2856 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2857 if (dmar_remove_dev_scope(info, satc->segment, in dmar_iommu_notify_scope_dev()
2858 satcu->devices, satcu->devices_cnt)) in dmar_iommu_notify_scope_dev()
2906 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
2916 return sysfs_emit(buf, "%llx\n", iommu->reg_phys); in address_show()
2924 return sysfs_emit(buf, "%llx\n", iommu->cap); in cap_show()
2932 return sysfs_emit(buf, "%llx\n", iommu->ecap); in ecap_show()
2940 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
2949 bitmap_weight(iommu->domain_ids, in domains_used_show()
2950 cap_ndoms(iommu->cap))); in domains_used_show()
2965 .name = "intel-iommu",
2979 if (pdev->external_facing) { in has_external_pci()
2993 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
2996 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
3011 /* To avoid a -Wunused-but-set-variable warning. */ in probe_acpi_namespace_devices()
3017 for_each_active_dev_scope(drhd->devices, in probe_acpi_namespace_devices()
3018 drhd->devices_cnt, i, dev) { in probe_acpi_namespace_devices()
3022 if (dev->bus != &acpi_bus_type) in probe_acpi_namespace_devices()
3026 mutex_lock(&adev->physical_node_lock); in probe_acpi_namespace_devices()
3028 &adev->physical_node_list, node) { in probe_acpi_namespace_devices()
3029 ret = iommu_probe_device(pn->dev); in probe_acpi_namespace_devices()
3033 mutex_unlock(&adev->physical_node_lock); in probe_acpi_namespace_devices()
3049 pr_warn("Forcing Intel-IOMMU to enabled\n"); in tboot_force_iommu()
3059 int ret = -ENODEV; in intel_iommu_init()
3145 * page-selective invalidations that are required for efficient in intel_iommu_init()
3148 * the virtual and physical IOMMU page-tables. in intel_iommu_init()
3150 if (cap_caching_mode(iommu->cap) && in intel_iommu_init()
3155 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
3157 "%s", iommu->name); in intel_iommu_init()
3158 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
3168 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
3196 * NB - intel-iommu lacks any sort of reference counting for the users of
3203 if (!dev_is_pci(info->dev)) { in domain_context_clear()
3204 domain_context_clear_one(info, info->bus, info->devfn); in domain_context_clear()
3208 pci_for_each_dma_alias(to_pci_dev(info->dev), in domain_context_clear()
3220 struct intel_iommu *iommu = info->iommu; in device_block_translation()
3223 if (info->domain) in device_block_translation()
3224 cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID); in device_block_translation()
3235 if (!info->domain) in device_block_translation()
3238 spin_lock_irqsave(&info->domain->lock, flags); in device_block_translation()
3239 list_del(&info->link); in device_block_translation()
3240 spin_unlock_irqrestore(&info->domain->lock, flags); in device_block_translation()
3242 domain_detach_iommu(info->domain, iommu); in device_block_translation()
3243 info->domain = NULL; in device_block_translation()
3266 return cap_fl1gp_support(iommu->cap) ? 2 : 1; in iommu_superpage_capability()
3268 return fls(cap_super_page_val(iommu->cap)); in iommu_superpage_capability()
3274 struct intel_iommu *iommu = info->iommu; in paging_domain_alloc()
3280 return ERR_PTR(-ENOMEM); in paging_domain_alloc()
3282 INIT_LIST_HEAD(&domain->devices); in paging_domain_alloc()
3283 INIT_LIST_HEAD(&domain->dev_pasids); in paging_domain_alloc()
3284 INIT_LIST_HEAD(&domain->cache_tags); in paging_domain_alloc()
3285 spin_lock_init(&domain->lock); in paging_domain_alloc()
3286 spin_lock_init(&domain->cache_lock); in paging_domain_alloc()
3287 xa_init(&domain->iommu_array); in paging_domain_alloc()
3289 domain->nid = dev_to_node(dev); in paging_domain_alloc()
3290 domain->use_first_level = first_stage; in paging_domain_alloc()
3293 addr_width = agaw_to_width(iommu->agaw); in paging_domain_alloc()
3294 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_alloc()
3295 addr_width = cap_mgaw(iommu->cap); in paging_domain_alloc()
3296 domain->gaw = addr_width; in paging_domain_alloc()
3297 domain->agaw = iommu->agaw; in paging_domain_alloc()
3298 domain->max_addr = __DOMAIN_MAX_ADDR(addr_width); in paging_domain_alloc()
3301 domain->iommu_coherency = iommu_paging_structure_coherency(iommu); in paging_domain_alloc()
3304 domain->domain.pgsize_bitmap = SZ_4K; in paging_domain_alloc()
3305 domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage); in paging_domain_alloc()
3306 domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); in paging_domain_alloc()
3309 * IOVA aperture: First-level translation restricts the input-address in paging_domain_alloc()
3311 * as address bit [N-1], where N is 48-bits with 4-level paging and in paging_domain_alloc()
3312 * 57-bits with 5-level paging). Hence, skip bit [N-1]. in paging_domain_alloc()
3314 domain->domain.geometry.force_aperture = true; in paging_domain_alloc()
3315 domain->domain.geometry.aperture_start = 0; in paging_domain_alloc()
3317 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); in paging_domain_alloc()
3319 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); in paging_domain_alloc()
3322 domain->pgd = iommu_alloc_page_node(domain->nid, GFP_KERNEL); in paging_domain_alloc()
3323 if (!domain->pgd) { in paging_domain_alloc()
3325 return ERR_PTR(-ENOMEM); in paging_domain_alloc()
3327 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in paging_domain_alloc()
3339 struct intel_iommu *iommu = info->iommu; in intel_iommu_domain_alloc_paging_flags()
3347 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3349 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3351 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3359 if (!sm_supported(iommu) || !ecap_slts(iommu->ecap)) in intel_iommu_domain_alloc_paging_flags()
3360 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3369 domain = &dmar_domain->domain; in intel_iommu_domain_alloc_paging_flags()
3370 domain->type = IOMMU_DOMAIN_UNMANAGED; in intel_iommu_domain_alloc_paging_flags()
3371 domain->owner = &intel_iommu_ops; in intel_iommu_domain_alloc_paging_flags()
3372 domain->ops = intel_iommu_ops.default_domain_ops; in intel_iommu_domain_alloc_paging_flags()
3375 dmar_domain->nested_parent = true; in intel_iommu_domain_alloc_paging_flags()
3376 INIT_LIST_HEAD(&dmar_domain->s1_domains); in intel_iommu_domain_alloc_paging_flags()
3377 spin_lock_init(&dmar_domain->s1_lock); in intel_iommu_domain_alloc_paging_flags()
3381 if (dmar_domain->use_first_level) { in intel_iommu_domain_alloc_paging_flags()
3383 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3385 domain->dirty_ops = &intel_dirty_ops; in intel_iommu_domain_alloc_paging_flags()
3395 WARN_ON(dmar_domain->nested_parent && in intel_iommu_domain_free()
3396 !list_empty(&dmar_domain->s1_domains)); in intel_iommu_domain_free()
3404 struct intel_iommu *iommu = info->iommu; in paging_domain_compatible()
3407 if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) in paging_domain_compatible()
3408 return -EPERM; in paging_domain_compatible()
3410 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in paging_domain_compatible()
3411 return -EINVAL; in paging_domain_compatible()
3413 if (domain->dirty_ops && !ssads_supported(iommu)) in paging_domain_compatible()
3414 return -EINVAL; in paging_domain_compatible()
3416 if (dmar_domain->iommu_coherency != in paging_domain_compatible()
3418 return -EINVAL; in paging_domain_compatible()
3420 if (dmar_domain->iommu_superpage != in paging_domain_compatible()
3421 iommu_superpage_capability(iommu, dmar_domain->use_first_level)) in paging_domain_compatible()
3422 return -EINVAL; in paging_domain_compatible()
3424 if (dmar_domain->use_first_level && in paging_domain_compatible()
3425 (!sm_supported(iommu) || !ecap_flts(iommu->ecap))) in paging_domain_compatible()
3426 return -EINVAL; in paging_domain_compatible()
3429 addr_width = agaw_to_width(iommu->agaw); in paging_domain_compatible()
3430 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_compatible()
3431 addr_width = cap_mgaw(iommu->cap); in paging_domain_compatible()
3433 if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw) in paging_domain_compatible()
3434 return -EINVAL; in paging_domain_compatible()
3437 context_copied(iommu, info->bus, info->devfn)) in paging_domain_compatible()
3469 if (dmar_domain->set_pte_snp) in intel_iommu_map()
3473 if (dmar_domain->max_addr < max_addr) { in intel_iommu_map()
3477 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; in intel_iommu_map()
3481 __func__, dmar_domain->gaw, max_addr); in intel_iommu_map()
3482 return -EFAULT; in intel_iommu_map()
3484 dmar_domain->max_addr = max_addr; in intel_iommu_map()
3503 return -EINVAL; in intel_iommu_map_pages()
3506 return -EINVAL; in intel_iommu_map_pages()
3524 size argument if it happens to be a large-page mapping. */ in intel_iommu_unmap()
3533 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT; in intel_iommu_unmap()
3535 domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist); in intel_iommu_unmap()
3537 if (dmar_domain->max_addr == iova + size) in intel_iommu_unmap()
3538 dmar_domain->max_addr = iova; in intel_iommu_unmap()
3541 * We do not use page-selective IOTLB invalidation in flush queue, in intel_iommu_unmap()
3564 cache_tag_flush_range(to_dmar_domain(domain), gather->start, in intel_iommu_tlb_sync()
3565 gather->end, list_empty(&gather->freelist)); in intel_iommu_tlb_sync()
3566 iommu_put_pages_list(&gather->freelist); in intel_iommu_tlb_sync()
3582 VTD_PAGE_SHIFT) - 1)); in intel_iommu_iova_to_phys()
3592 assert_spin_locked(&domain->lock); in domain_support_force_snooping()
3593 list_for_each_entry(info, &domain->devices, link) { in domain_support_force_snooping()
3594 if (!ecap_sc_support(info->iommu->ecap)) { in domain_support_force_snooping()
3607 assert_spin_locked(&domain->lock); in domain_set_force_snooping()
3609 * Second level page table supports per-PTE snoop control. The in domain_set_force_snooping()
3612 if (!domain->use_first_level) { in domain_set_force_snooping()
3613 domain->set_pte_snp = true; in domain_set_force_snooping()
3617 list_for_each_entry(info, &domain->devices, link) in domain_set_force_snooping()
3618 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, in domain_set_force_snooping()
3627 if (dmar_domain->force_snooping) in intel_iommu_enforce_cache_coherency()
3630 spin_lock_irqsave(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3632 (!dmar_domain->use_first_level && dmar_domain->has_mappings)) { in intel_iommu_enforce_cache_coherency()
3633 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3638 dmar_domain->force_snooping = true; in intel_iommu_enforce_cache_coherency()
3639 spin_unlock_irqrestore(&dmar_domain->lock, flags); in intel_iommu_enforce_cache_coherency()
3655 return ecap_sc_support(info->iommu->ecap); in intel_iommu_capable()
3657 return ssads_supported(info->iommu); in intel_iommu_capable()
3672 if (!iommu || !iommu->iommu.ops) in intel_iommu_probe_device()
3673 return ERR_PTR(-ENODEV); in intel_iommu_probe_device()
3677 return ERR_PTR(-ENOMEM); in intel_iommu_probe_device()
3680 info->bus = pdev->bus->number; in intel_iommu_probe_device()
3681 info->devfn = pdev->devfn; in intel_iommu_probe_device()
3682 info->segment = pci_domain_nr(pdev->bus); in intel_iommu_probe_device()
3684 info->bus = bus; in intel_iommu_probe_device()
3685 info->devfn = devfn; in intel_iommu_probe_device()
3686 info->segment = iommu->segment; in intel_iommu_probe_device()
3689 info->dev = dev; in intel_iommu_probe_device()
3690 info->iommu = iommu; in intel_iommu_probe_device()
3692 if (ecap_dev_iotlb_support(iommu->ecap) && in intel_iommu_probe_device()
3695 info->ats_supported = 1; in intel_iommu_probe_device()
3696 info->dtlb_extra_inval = dev_needs_extra_dtlb_flush(pdev); in intel_iommu_probe_device()
3705 if (ecap_dit(iommu->ecap)) in intel_iommu_probe_device()
3706 info->pfsid = pci_dev_id(pci_physfn(pdev)); in intel_iommu_probe_device()
3707 info->ats_qdep = pci_ats_queue_depth(pdev); in intel_iommu_probe_device()
3714 info->pasid_supported = features | 1; in intel_iommu_probe_device()
3717 if (info->ats_supported && ecap_prs(iommu->ecap) && in intel_iommu_probe_device()
3719 info->pri_supported = 1; in intel_iommu_probe_device()
3738 if (!context_copied(iommu, info->bus, info->devfn)) { in intel_iommu_probe_device()
3749 * device is undefined if you enable PASID support after ATS support. in intel_iommu_probe_device()
3753 if (info->pasid_supported && in intel_iommu_probe_device()
3754 !pci_enable_pasid(pdev, info->pasid_supported & ~1)) in intel_iommu_probe_device()
3755 info->pasid_enabled = 1; in intel_iommu_probe_device()
3757 return &iommu->iommu; in intel_iommu_probe_device()
3771 struct intel_iommu *iommu = info->iommu; in intel_iommu_release_device()
3773 if (info->pasid_enabled) { in intel_iommu_release_device()
3775 info->pasid_enabled = 0; in intel_iommu_release_device()
3778 mutex_lock(&iommu->iopf_lock); in intel_iommu_release_device()
3781 mutex_unlock(&iommu->iopf_lock); in intel_iommu_release_device()
3784 !context_copied(iommu, info->bus, info->devfn)) in intel_iommu_release_device()
3804 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, in intel_iommu_get_resv_regions()
3814 length = rmrr->end_address - rmrr->base_address + 1; in intel_iommu_get_resv_regions()
3819 resv = iommu_alloc_resv_region(rmrr->base_address, in intel_iommu_get_resv_regions()
3825 list_add_tail(&resv->list, head); in intel_iommu_get_resv_regions()
3834 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { in intel_iommu_get_resv_regions()
3839 list_add_tail(&reg->list, head); in intel_iommu_get_resv_regions()
3845 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, in intel_iommu_get_resv_regions()
3849 list_add_tail(&reg->list, head); in intel_iommu_get_resv_regions()
3865 return -EINVAL; in intel_iommu_enable_sva()
3867 iommu = info->iommu; in intel_iommu_enable_sva()
3869 return -EINVAL; in intel_iommu_enable_sva()
3871 if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) in intel_iommu_enable_sva()
3872 return -ENODEV; in intel_iommu_enable_sva()
3874 if (!info->pasid_enabled || !info->ats_enabled) in intel_iommu_enable_sva()
3875 return -EINVAL; in intel_iommu_enable_sva()
3878 * Devices having device-specific I/O fault handling should not in intel_iommu_enable_sva()
3880 * capability of device-specific IOPF. Therefore, IOMMU can only in intel_iommu_enable_sva()
3881 * default that if the device driver enables SVA on a non-PRI in intel_iommu_enable_sva()
3884 if (!info->pri_supported) in intel_iommu_enable_sva()
3888 if (!info->pri_enabled) in intel_iommu_enable_sva()
3889 return -EINVAL; in intel_iommu_enable_sva()
3896 struct intel_iommu *iommu = info->iommu; in context_flip_pri()
3897 u8 bus = info->bus, devfn = info->devfn; in context_flip_pri()
3901 spin_lock(&iommu->lock); in context_flip_pri()
3903 spin_unlock(&iommu->lock); in context_flip_pri()
3904 return -EINVAL; in context_flip_pri()
3909 spin_unlock(&iommu->lock); in context_flip_pri()
3910 return -ENODEV; in context_flip_pri()
3919 if (!ecap_coherent(iommu->ecap)) in context_flip_pri()
3922 spin_unlock(&iommu->lock); in context_flip_pri()
3934 if (!pdev || !info || !info->ats_enabled || !info->pri_supported) in intel_iommu_enable_iopf()
3935 return -ENODEV; in intel_iommu_enable_iopf()
3937 if (info->pri_enabled) in intel_iommu_enable_iopf()
3938 return -EBUSY; in intel_iommu_enable_iopf()
3940 iommu = info->iommu; in intel_iommu_enable_iopf()
3942 return -EINVAL; in intel_iommu_enable_iopf()
3945 if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev)) in intel_iommu_enable_iopf()
3946 return -EINVAL; in intel_iommu_enable_iopf()
3952 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
3964 info->pri_enabled = 1; in intel_iommu_enable_iopf()
3970 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
3978 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_iopf()
3980 if (!info->pri_enabled) in intel_iommu_disable_iopf()
3981 return -EINVAL; in intel_iommu_disable_iopf()
3990 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_disable_iopf()
4001 info->pri_enabled = 0; in intel_iommu_disable_iopf()
4017 return -ENODEV; in intel_iommu_dev_enable_feat()
4032 return -ENODEV; in intel_iommu_dev_disable_feat()
4040 return translation_pre_enabled(info->iommu) && !info->domain; in intel_iommu_is_attach_deferred()
4050 if (pdev->untrusted) { in risky_device()
4053 pdev->vendor, pdev->device); in risky_device()
4063 cache_tag_flush_range_np(to_dmar_domain(domain), iova, iova + size - 1); in intel_iommu_iotlb_sync_map()
4073 struct intel_iommu *iommu = info->iommu; in domain_remove_dev_pasid()
4081 if (domain->type == IOMMU_DOMAIN_IDENTITY) in domain_remove_dev_pasid()
4085 spin_lock_irqsave(&dmar_domain->lock, flags); in domain_remove_dev_pasid()
4086 list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { in domain_remove_dev_pasid()
4087 if (curr->dev == dev && curr->pasid == pasid) { in domain_remove_dev_pasid()
4088 list_del(&curr->link_domain); in domain_remove_dev_pasid()
4094 spin_unlock_irqrestore(&dmar_domain->lock, flags); in domain_remove_dev_pasid()
4107 intel_pasid_tear_down_entry(info->iommu, dev, pasid, false); in intel_iommu_remove_dev_pasid()
4117 struct intel_iommu *iommu = info->iommu; in domain_add_dev_pasid()
4124 return ERR_PTR(-ENOMEM); in domain_add_dev_pasid()
4134 dev_pasid->dev = dev; in domain_add_dev_pasid()
4135 dev_pasid->pasid = pasid; in domain_add_dev_pasid()
4136 spin_lock_irqsave(&dmar_domain->lock, flags); in domain_add_dev_pasid()
4137 list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids); in domain_add_dev_pasid()
4138 spin_unlock_irqrestore(&dmar_domain->lock, flags); in domain_add_dev_pasid()
4154 struct intel_iommu *iommu = info->iommu; in intel_iommu_set_dev_pasid()
4158 if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) in intel_iommu_set_dev_pasid()
4159 return -EINVAL; in intel_iommu_set_dev_pasid()
4162 return -EOPNOTSUPP; in intel_iommu_set_dev_pasid()
4164 if (domain->dirty_ops) in intel_iommu_set_dev_pasid()
4165 return -EINVAL; in intel_iommu_set_dev_pasid()
4167 if (context_copied(iommu, info->bus, info->devfn)) in intel_iommu_set_dev_pasid()
4168 return -EBUSY; in intel_iommu_set_dev_pasid()
4178 if (dmar_domain->use_first_level) in intel_iommu_set_dev_pasid()
4201 struct intel_iommu *iommu = info->iommu; in intel_iommu_hw_info()
4206 return ERR_PTR(-ENOMEM); in intel_iommu_hw_info()
4208 vtd->flags = IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17; in intel_iommu_hw_info()
4209 vtd->cap_reg = iommu->cap; in intel_iommu_hw_info()
4210 vtd->ecap_reg = iommu->ecap; in intel_iommu_hw_info()
4218 * hold the domain->lock when calling it.
4226 ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev, in device_set_dirty_tracking()
4242 spin_lock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4243 list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { in parent_domain_set_dirty_tracking()
4244 spin_lock_irqsave(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4245 ret = device_set_dirty_tracking(&s1_domain->devices, enable); in parent_domain_set_dirty_tracking()
4246 spin_unlock_irqrestore(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4250 spin_unlock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4254 list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { in parent_domain_set_dirty_tracking()
4255 spin_lock_irqsave(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4256 device_set_dirty_tracking(&s1_domain->devices, in parent_domain_set_dirty_tracking()
4257 domain->dirty_tracking); in parent_domain_set_dirty_tracking()
4258 spin_unlock_irqrestore(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4260 spin_unlock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4270 spin_lock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4271 if (dmar_domain->dirty_tracking == enable) in intel_iommu_set_dirty_tracking()
4274 ret = device_set_dirty_tracking(&dmar_domain->devices, enable); in intel_iommu_set_dirty_tracking()
4278 if (dmar_domain->nested_parent) { in intel_iommu_set_dirty_tracking()
4284 dmar_domain->dirty_tracking = enable; in intel_iommu_set_dirty_tracking()
4286 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4291 device_set_dirty_tracking(&dmar_domain->devices, in intel_iommu_set_dirty_tracking()
4292 dmar_domain->dirty_tracking); in intel_iommu_set_dirty_tracking()
4293 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4303 unsigned long end = iova + size - 1; in intel_iommu_read_and_clear_dirty()
4312 if (!dmar_domain->dirty_tracking && dirty->bitmap) in intel_iommu_read_and_clear_dirty()
4313 return -EINVAL; in intel_iommu_read_and_clear_dirty()
4343 struct intel_iommu *iommu = info->iommu; in context_setup_pass_through()
4346 spin_lock(&iommu->lock); in context_setup_pass_through()
4349 spin_unlock(&iommu->lock); in context_setup_pass_through()
4350 return -ENOMEM; in context_setup_pass_through()
4354 spin_unlock(&iommu->lock); in context_setup_pass_through()
4364 * AGAW value supported by hardware. And ASR is ignored by hardware. in context_setup_pass_through()
4366 context_set_address_width(context, iommu->msagaw); in context_setup_pass_through()
4370 if (!ecap_coherent(iommu->ecap)) in context_setup_pass_through()
4373 spin_unlock(&iommu->lock); in context_setup_pass_through()
4382 if (dev != &pdev->dev) in context_setup_pass_through_cb()
4393 return context_setup_pass_through(dev, info->bus, info->devfn); in device_setup_pass_through()
4402 struct intel_iommu *iommu = info->iommu; in identity_domain_attach_dev()
4426 struct intel_iommu *iommu = info->iommu; in identity_domain_set_dev_pasid()
4430 return -EOPNOTSUPP; in identity_domain_set_dev_pasid()
4451 struct intel_iommu *iommu = info->iommu; in intel_iommu_domain_alloc_paging()
4460 return &dmar_domain->domain; in intel_iommu_domain_alloc_paging()
4551 pci_info(dev, "Forcing write-buffer flush capability\n"); in quirk_iommu_rwbf()
4604 ver = (dev->device >> 8) & 0xff; in quirk_igfx_skip_te_disable()
4623 message if VT-d is actually disabled.
4644 known-broken BIOSes _don't_ actually hide it, so far. */ in check_tylersburg_isoch()
4661 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ in check_tylersburg_isoch()
4688 * Here we deal with a device TLB defect where device may inadvertently issue ATS
4719 if (likely(!info->dtlb_extra_inval)) in quirk_extra_dev_tlb_flush()
4722 sid = PCI_DEVID(info->bus, info->devfn); in quirk_extra_dev_tlb_flush()
4724 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4727 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4737 * VT-d spec. The VT-d hardware implementation may support some but not
4742 * - 0: Command successful without any error;
4743 * - Negative: software error value;
4744 * - Nonzero positive: failure status code defined in Table 48.
4752 if (!cap_ecmds(iommu->cap)) in ecmd_submit_sync()
4753 return -ENODEV; in ecmd_submit_sync()
4755 raw_spin_lock_irqsave(&iommu->register_lock, flags); in ecmd_submit_sync()
4757 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG); in ecmd_submit_sync()
4759 ret = -EBUSY; in ecmd_submit_sync()
4765 * - There is no side effect if an ecmd doesn't require an in ecmd_submit_sync()
4767 * - It's not invoked in any critical path. The extra MMIO in ecmd_submit_sync()
4770 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob); in ecmd_submit_sync()
4771 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT)); in ecmd_submit_sync()
4777 ret = -ETIMEDOUT; in ecmd_submit_sync()
4783 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in ecmd_submit_sync()