Lines Matching +full:pcie +full:- +full:ob

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2006-2014 Intel Corporation.
17 #include <linux/dma-direct.h>
21 #include <linux/pci-ats.h>
28 #include "../dma-iommu.h"
30 #include "../iommu-pages.h"
38 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
39 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
40 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
48 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
49 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
54 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
60 #define rwbf_required(iommu) (rwbf_quirk || cap_rwbf((iommu)->cap))
63 * set to 1 to panic kernel if can't successfully enable VT-d
78 if (!(re->lo & 1)) in root_entry_lctp()
81 return re->lo & VTD_PAGE_MASK; in root_entry_lctp()
90 if (!(re->hi & 1)) in root_entry_uctp()
93 return re->hi & VTD_PAGE_MASK; in root_entry_uctp()
102 if (*rid_lhs < PCI_DEVID(info->bus, info->devfn)) in device_rid_cmp_key()
103 return -1; in device_rid_cmp_key()
105 if (*rid_lhs > PCI_DEVID(info->bus, info->devfn)) in device_rid_cmp_key()
115 u16 key = PCI_DEVID(info->bus, info->devfn); in device_rid_cmp()
121 * Looks up an IOMMU-probed device using its source ID.
137 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
138 node = rb_find(&rid, &iommu->device_rbtree, device_rid_cmp_key); in device_rbtree_find()
141 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_find()
143 return info ? info->dev : NULL; in device_rbtree_find()
152 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
153 curr = rb_find_add(&info->node, &iommu->device_rbtree, device_rid_cmp); in device_rbtree_insert()
154 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_insert()
156 return -EEXIST; in device_rbtree_insert()
163 struct intel_iommu *iommu = info->iommu; in device_rbtree_remove()
166 spin_lock_irqsave(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
167 rb_erase(&info->node, &iommu->device_rbtree); in device_rbtree_remove()
168 spin_unlock_irqrestore(&iommu->device_rbtree_lock, flags); in device_rbtree_remove()
224 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED); in translation_pre_enabled()
229 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED; in clear_translation_pre_enabled()
236 gsts = readl(iommu->reg + DMAR_GSTS_REG); in init_translation_status()
238 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED; in init_translation_status()
244 return -EINVAL; in intel_iommu_setup()
273 pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); in intel_iommu_setup()
276 pr_notice("Unknown option - '%s'\n", str); in intel_iommu_setup()
290 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; in domain_pfn_supported()
297 * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
304 fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); in __iommu_calculate_sagaw()
305 sl_sagaw = cap_sagaw(iommu->cap); in __iommu_calculate_sagaw()
308 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in __iommu_calculate_sagaw()
312 if (!ecap_slts(iommu->ecap)) in __iommu_calculate_sagaw()
324 for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) { in __iommu_calculate_agaw()
353 ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap); in iommu_paging_structure_coherency()
362 * 1-level super page supports page size of 2MiB, 2-level super page in domain_super_pgsize_bitmap()
365 if (domain->iommu_superpage == 1) in domain_super_pgsize_bitmap()
367 else if (domain->iommu_superpage == 2) in domain_super_pgsize_bitmap()
376 struct root_entry *root = &iommu->root_entry[bus]; in iommu_context_addr()
387 entry = &root->lo; in iommu_context_addr()
390 devfn -= 0x80; in iommu_context_addr()
391 entry = &root->hi; in iommu_context_addr()
402 context = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, in iommu_context_addr()
416 * is_downstream_to_pci_bridge - test if a device belongs to the PCI
417 * sub-hierarchy of a candidate PCI-PCI bridge
418 * @dev: candidate PCI device belonging to @bridge PCI sub-hierarchy
419 * @bridge: the candidate PCI-PCI bridge
421 * Return: true if @dev belongs to @bridge PCI sub-hierarchy, else false.
434 if (pbridge->subordinate && in is_downstream_to_pci_bridge()
435 pbridge->subordinate->number <= pdev->bus->number && in is_downstream_to_pci_bridge()
436 pbridge->subordinate->busn_res.end >= pdev->bus->number) in is_downstream_to_pci_bridge()
453 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar); in quirk_ioat_snb_local_iommu()
456 dev_info(&pdev->dev, "failed to run vt-d quirk\n"); in quirk_ioat_snb_local_iommu()
463 if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { in quirk_ioat_snb_local_iommu()
464 …pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"… in quirk_ioat_snb_local_iommu()
474 if (!iommu || iommu->drhd->ignored) in iommu_is_dummy()
480 if (pdev->vendor == PCI_VENDOR_ID_INTEL && in iommu_is_dummy()
481 pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB && in iommu_is_dummy()
509 dev = &pf_pdev->dev; in device_lookup_iommu()
510 segment = pci_domain_nr(pdev->bus); in device_lookup_iommu()
512 dev = &ACPI_COMPANION(dev)->dev; in device_lookup_iommu()
516 if (pdev && segment != drhd->segment) in device_lookup_iommu()
519 for_each_active_dev_scope(drhd->devices, in device_lookup_iommu()
520 drhd->devices_cnt, i, tmp) { in device_lookup_iommu()
526 if (pdev && pdev->is_virtfn) in device_lookup_iommu()
530 *bus = drhd->devices[i].bus; in device_lookup_iommu()
531 *devfn = drhd->devices[i].devfn; in device_lookup_iommu()
540 if (pdev && drhd->include_all) { in device_lookup_iommu()
543 *bus = pdev->bus->number; in device_lookup_iommu()
544 *devfn = pdev->devfn; in device_lookup_iommu()
562 if (!domain->iommu_coherency) in domain_flush_cache()
571 if (!iommu->root_entry) in free_context_table()
587 iommu_free_pages(iommu->root_entry); in free_context_table()
588 iommu->root_entry = NULL; in free_context_table()
602 pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val); in pgtable_walk()
605 pr_info("page table not present at level %d\n", level - 1); in pgtable_walk()
613 level--; in pgtable_walk()
629 pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); in dmar_fault_dump_ptes()
632 if (!iommu->root_entry) { in dmar_fault_dump_ptes()
636 rt_entry = &iommu->root_entry[bus]; in dmar_fault_dump_ptes()
640 rt_entry->hi, rt_entry->lo); in dmar_fault_dump_ptes()
642 pr_info("root entry: 0x%016llx", rt_entry->lo); in dmar_fault_dump_ptes()
652 ctx_entry->hi, ctx_entry->lo); in dmar_fault_dump_ptes()
660 level = agaw_to_level(ctx_entry->hi & 7); in dmar_fault_dump_ptes()
661 pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
671 dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
673 /* For request-without-pasid, get the pasid from context entry */ in dmar_fault_dump_ptes()
679 pr_info("pasid dir entry: 0x%016llx\n", pde->val); in dmar_fault_dump_ptes()
689 for (i = 0; i < ARRAY_SIZE(pte->val); i++) in dmar_fault_dump_ptes()
690 pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]); in dmar_fault_dump_ptes()
698 level = pte->val[2] & BIT_ULL(2) ? 5 : 4; in dmar_fault_dump_ptes()
699 pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
701 level = agaw_to_level((pte->val[0] >> 2) & 0x7); in dmar_fault_dump_ptes()
702 pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK); in dmar_fault_dump_ptes()
715 int level = agaw_to_level(domain->agaw); in pfn_to_dma_pte()
722 parent = domain->pgd; in pfn_to_dma_pte()
737 tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp, in pfn_to_dma_pte()
746 if (domain->use_first_level) in pfn_to_dma_pte()
750 if (!try_cmpxchg64(&pte->val, &tmp, pteval)) in pfn_to_dma_pte()
760 level--; in pfn_to_dma_pte()
775 int total = agaw_to_level(domain->agaw); in dma_pfn_level_pte()
778 parent = domain->pgd; in dma_pfn_level_pte()
796 total--; in dma_pfn_level_pte()
828 (void *)pte - (void *)first_pte); in dma_pte_clear_range()
852 dma_pte_free_level(domain, level - 1, retain_level, in dma_pte_free_level()
862 last_pfn < level_pfn + level_size(level) - 1)) { in dma_pte_free_level()
884 dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level, in dma_pte_free_pagetable()
885 domain->pgd, 0, start_pfn, last_pfn); in dma_pte_free_pagetable()
888 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in dma_pte_free_pagetable()
889 iommu_free_pages(domain->pgd); in dma_pte_free_pagetable()
890 domain->pgd = NULL; in dma_pte_free_pagetable()
897 know the hardware page-walk will no longer touch them.
913 dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_list_pagetables()
936 last_pfn >= level_pfn + level_size(level) - 1) { in dma_pte_clear_level()
940 dma_pte_list_pagetables(domain, level - 1, pte, freelist); in dma_pte_clear_level()
948 dma_pte_clear_level(domain, level - 1, in dma_pte_clear_level()
959 (void *)++last_pte - (void *)first_pte); in dma_pte_clear_level()
974 dma_pte_clear_level(domain, agaw_to_level(domain->agaw), in domain_unmap()
975 domain->pgd, 0, start_pfn, last_pfn, freelist); in domain_unmap()
978 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { in domain_unmap()
979 iommu_pages_list_add(freelist, domain->pgd); in domain_unmap()
980 domain->pgd = NULL; in domain_unmap()
989 root = iommu_alloc_pages_node_sz(iommu->node, GFP_ATOMIC, SZ_4K); in iommu_alloc_root_entry()
992 iommu->name); in iommu_alloc_root_entry()
993 return -ENOMEM; in iommu_alloc_root_entry()
997 iommu->root_entry = root; in iommu_alloc_root_entry()
1008 addr = virt_to_phys(iommu->root_entry); in iommu_set_root_entry()
1012 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_set_root_entry()
1013 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr); in iommu_set_root_entry()
1015 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); in iommu_set_root_entry()
1021 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_set_root_entry()
1027 if (cap_esrtps(iommu->cap)) in iommu_set_root_entry()
1030 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); in iommu_set_root_entry()
1033 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in iommu_set_root_entry()
1041 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) in iommu_flush_write_buffer()
1044 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1045 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); in iommu_flush_write_buffer()
1051 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_flush_write_buffer()
1074 pr_warn("%s: Unexpected context-cache invalidation type 0x%llx\n", in __iommu_flush_context()
1075 iommu->name, type); in __iommu_flush_context()
1080 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_context()
1081 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); in __iommu_flush_context()
1087 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_context()
1093 int tlb_offset = ecap_iotlb_offset(iommu->ecap); in __iommu_flush_iotlb()
1112 iommu->name, type); in __iommu_flush_iotlb()
1116 if (cap_write_drain(iommu->cap)) in __iommu_flush_iotlb()
1119 raw_spin_lock_irqsave(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1122 dmar_writeq(iommu->reg + tlb_offset, val_iva); in __iommu_flush_iotlb()
1123 dmar_writeq(iommu->reg + tlb_offset + 8, val); in __iommu_flush_iotlb()
1129 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in __iommu_flush_iotlb()
1147 spin_lock_irqsave(&domain->lock, flags); in domain_lookup_dev_info()
1148 list_for_each_entry(info, &domain->devices, link) { in domain_lookup_dev_info()
1149 if (info->iommu == iommu && info->bus == bus && in domain_lookup_dev_info()
1150 info->devfn == devfn) { in domain_lookup_dev_info()
1151 spin_unlock_irqrestore(&domain->lock, flags); in domain_lookup_dev_info()
1155 spin_unlock_irqrestore(&domain->lock, flags); in domain_lookup_dev_info()
1163 * check because it applies only to the built-in QAT devices and it doesn't
1169 if (pdev->vendor != PCI_VENDOR_ID_INTEL) in dev_needs_extra_dtlb_flush()
1172 if ((pdev->device & 0xfffc) != BUGGY_QAT_DEVID_MASK) in dev_needs_extra_dtlb_flush()
1182 if (!info->ats_supported) in iommu_enable_pci_ats()
1185 pdev = to_pci_dev(info->dev); in iommu_enable_pci_ats()
1190 info->ats_enabled = 1; in iommu_enable_pci_ats()
1195 if (!info->ats_enabled) in iommu_disable_pci_ats()
1198 pci_disable_ats(to_pci_dev(info->dev)); in iommu_disable_pci_ats()
1199 info->ats_enabled = 0; in iommu_disable_pci_ats()
1206 if (!info->ats_enabled || !info->pri_supported) in iommu_enable_pci_pri()
1209 pdev = to_pci_dev(info->dev); in iommu_enable_pci_pri()
1211 if (info->pasid_enabled && !pci_prg_resp_pasid_required(pdev)) in iommu_enable_pci_pri()
1218 info->pri_enabled = 1; in iommu_enable_pci_pri()
1223 if (!info->pri_enabled) in iommu_disable_pci_pri()
1226 if (WARN_ON(info->iopf_refcount)) in iommu_disable_pci_pri()
1227 iopf_queue_remove_device(info->iommu->iopf_queue, info->dev); in iommu_disable_pci_pri()
1229 pci_disable_pri(to_pci_dev(info->dev)); in iommu_disable_pci_pri()
1230 info->pri_enabled = 0; in iommu_disable_pci_pri()
1243 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap)) in iommu_disable_protect_mem_regions()
1246 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1247 pmen = readl(iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1249 writel(pmen, iommu->reg + DMAR_PMEN_REG); in iommu_disable_protect_mem_regions()
1255 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_disable_protect_mem_regions()
1263 raw_spin_lock_irqsave(&iommu->register_lock, flags); in iommu_enable_translation()
1264 iommu->gcmd |= DMA_GCMD_TE; in iommu_enable_translation()
1265 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_enable_translation()
1271 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in iommu_enable_translation()
1279 if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && in iommu_disable_translation()
1280 (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) in iommu_disable_translation()
1283 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_disable_translation()
1284 iommu->gcmd &= ~DMA_GCMD_TE; in iommu_disable_translation()
1285 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); in iommu_disable_translation()
1291 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_disable_translation()
1300 if (WARN_ON(!ida_is_empty(&iommu->domain_ida))) in disable_dmar_iommu()
1303 if (iommu->gcmd & DMA_GCMD_TE) in disable_dmar_iommu()
1309 if (iommu->copied_tables) { in free_dmar_iommu()
1310 bitmap_free(iommu->copied_tables); in free_dmar_iommu()
1311 iommu->copied_tables = NULL; in free_dmar_iommu()
1317 if (ecap_prs(iommu->ecap)) in free_dmar_iommu()
1332 if (ecap_flts(iommu->ecap) ^ ecap_slts(iommu->ecap)) in first_level_by_default()
1333 return ecap_flts(iommu->ecap); in first_level_by_default()
1341 int num, ret = -ENOSPC; in domain_attach_iommu()
1343 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_attach_iommu()
1348 return -ENOMEM; in domain_attach_iommu()
1350 guard(mutex)(&iommu->did_lock); in domain_attach_iommu()
1351 curr = xa_load(&domain->iommu_array, iommu->seq_id); in domain_attach_iommu()
1353 curr->refcnt++; in domain_attach_iommu()
1358 num = ida_alloc_range(&iommu->domain_ida, IDA_START_DID, in domain_attach_iommu()
1359 cap_ndoms(iommu->cap) - 1, GFP_KERNEL); in domain_attach_iommu()
1361 pr_err("%s: No free domain ids\n", iommu->name); in domain_attach_iommu()
1365 info->refcnt = 1; in domain_attach_iommu()
1366 info->did = num; in domain_attach_iommu()
1367 info->iommu = iommu; in domain_attach_iommu()
1368 curr = xa_cmpxchg(&domain->iommu_array, iommu->seq_id, in domain_attach_iommu()
1371 ret = xa_err(curr) ? : -EBUSY; in domain_attach_iommu()
1378 ida_free(&iommu->domain_ida, info->did); in domain_attach_iommu()
1388 if (domain->domain.type == IOMMU_DOMAIN_SVA) in domain_detach_iommu()
1391 guard(mutex)(&iommu->did_lock); in domain_detach_iommu()
1392 info = xa_load(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1393 if (--info->refcnt == 0) { in domain_detach_iommu()
1394 ida_free(&iommu->domain_ida, info->did); in domain_detach_iommu()
1395 xa_erase(&domain->iommu_array, iommu->seq_id); in domain_detach_iommu()
1402 * in-flight DMA and copied pgtable, but there is no unmapping
1404 * the newly-mapped device. For kdump, at this point, the device
1406 * in-flight DMA will exist, and we don't need to worry anymore
1418 assert_spin_locked(&iommu->lock); in copied_context_tear_down()
1423 if (did_old < cap_ndoms(iommu->cap)) { in copied_context_tear_down()
1424 iommu->flush.flush_context(iommu, did_old, in copied_context_tear_down()
1428 iommu->flush.flush_iotlb(iommu, did_old, 0, 0, in copied_context_tear_down()
1436 * It's a non-present to present mapping. If hardware doesn't cache
1437 * non-present entry we only need to flush the write-buffer. If the
1438 * _does_ cache non-present entries, then it does so in the special
1444 if (cap_caching_mode(iommu->cap)) { in context_present_cache_flush()
1445 iommu->flush.flush_context(iommu, 0, in context_present_cache_flush()
1449 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in context_present_cache_flush()
1463 struct dma_pte *pgd = domain->pgd; in domain_context_mapping_one()
1468 return -EINVAL; in domain_context_mapping_one()
1473 spin_lock(&iommu->lock); in domain_context_mapping_one()
1474 ret = -ENOMEM; in domain_context_mapping_one()
1487 if (info && info->ats_supported) in domain_context_mapping_one()
1493 context_set_address_width(context, domain->agaw); in domain_context_mapping_one()
1497 if (!ecap_coherent(iommu->ecap)) in domain_context_mapping_one()
1503 spin_unlock(&iommu->lock); in domain_context_mapping_one()
1511 struct device_domain_info *info = dev_iommu_priv_get(&pdev->dev); in domain_context_mapping_cb()
1512 struct intel_iommu *iommu = info->iommu; in domain_context_mapping_cb()
1523 struct intel_iommu *iommu = info->iommu; in domain_context_mapping()
1524 u8 bus = info->bus, devfn = info->devfn; in domain_context_mapping()
1547 support = domain->iommu_superpage; in hardware_largepage_caps()
1561 support--; in hardware_largepage_caps()
1585 start_pfn + lvl_pages - 1, in switch_to_super_page()
1610 if (unlikely(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1))) in __domain_mapping()
1611 return -EINVAL; in __domain_mapping()
1614 return -EINVAL; in __domain_mapping()
1616 if (!(prot & DMA_PTE_WRITE) && domain->nested_parent) { in __domain_mapping()
1617 …pr_err_ratelimited("Read-only mapping is disallowed on the domain which serves as the parent in a … in __domain_mapping()
1618 return -EINVAL; in __domain_mapping()
1622 if (domain->use_first_level) { in __domain_mapping()
1628 domain->has_mappings = true; in __domain_mapping()
1642 return -ENOMEM; in __domain_mapping()
1655 end_pfn = iov_pfn + pages_to_remove - 1; in __domain_mapping()
1666 if (!try_cmpxchg64_local(&pte->val, &tmp, pteval)) { in __domain_mapping()
1671 dumps--; in __domain_mapping()
1677 nr_pages -= lvl_pages; in __domain_mapping()
1698 (void *)pte - (void *)first_pte); in __domain_mapping()
1708 struct intel_iommu *iommu = info->iommu; in domain_context_clear_one()
1712 spin_lock(&iommu->lock); in domain_context_clear_one()
1715 spin_unlock(&iommu->lock); in domain_context_clear_one()
1722 spin_unlock(&iommu->lock); in domain_context_clear_one()
1767 struct dma_pte *pgd = domain->pgd; in domain_setup_first_level()
1770 level = agaw_to_level(domain->agaw); in domain_setup_first_level()
1772 return -EINVAL; in domain_setup_first_level()
1777 if (domain->force_snooping) in domain_setup_first_level()
1789 struct intel_iommu *iommu = info->iommu; in dmar_domain_attach_device()
1797 info->domain = domain; in dmar_domain_attach_device()
1798 info->domain_attached = true; in dmar_domain_attach_device()
1799 spin_lock_irqsave(&domain->lock, flags); in dmar_domain_attach_device()
1800 list_add(&info->link, &domain->devices); in dmar_domain_attach_device()
1801 spin_unlock_irqrestore(&domain->lock, flags); in dmar_domain_attach_device()
1815 ret = -EINVAL; in dmar_domain_attach_device()
1832 * device_rmrr_is_relaxable - Test whether the RMRR of this device
1863 struct intel_iommu *iommu = info->iommu; in device_def_domain_type()
1869 if (!ecap_pass_through(iommu->ecap)) in device_def_domain_type()
1887 * (for example, while enabling interrupt-remapping) then in intel_iommu_init_qi()
1890 if (!iommu->qi) { in intel_iommu_init_qi()
1894 dmar_fault(-1, iommu); in intel_iommu_init_qi()
1906 iommu->flush.flush_context = __iommu_flush_context; in intel_iommu_init_qi()
1907 iommu->flush.flush_iotlb = __iommu_flush_iotlb; in intel_iommu_init_qi()
1909 iommu->name); in intel_iommu_init_qi()
1911 iommu->flush.flush_context = qi_flush_context; in intel_iommu_init_qi()
1912 iommu->flush.flush_iotlb = qi_flush_iotlb; in intel_iommu_init_qi()
1913 pr_info("%s: Using Queued invalidation\n", iommu->name); in intel_iommu_init_qi()
1963 ret = -ENOMEM; in copy_context_table()
1969 new_ce = iommu_alloc_pages_node_sz(iommu->node, in copy_context_table()
1984 if (did >= 0 && did < cap_ndoms(iommu->cap)) in copy_context_table()
1985 ida_alloc_range(&iommu->domain_ida, did, did, GFP_KERNEL); in copy_context_table()
2012 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG); in copy_translation_tables()
2023 return -EINVAL; in copy_translation_tables()
2025 iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL); in copy_translation_tables()
2026 if (!iommu->copied_tables) in copy_translation_tables()
2027 return -ENOMEM; in copy_translation_tables()
2031 return -EINVAL; in copy_translation_tables()
2035 return -ENOMEM; in copy_translation_tables()
2037 /* This is too big for the stack - allocate it from slab */ in copy_translation_tables()
2039 ret = -ENOMEM; in copy_translation_tables()
2049 iommu->name, bus); in copy_translation_tables()
2054 spin_lock(&iommu->lock); in copy_translation_tables()
2063 iommu->root_entry[bus].lo = val; in copy_translation_tables()
2070 iommu->root_entry[bus].hi = val; in copy_translation_tables()
2073 spin_unlock(&iommu->lock); in copy_translation_tables()
2077 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE); in copy_translation_tables()
2094 if (drhd->ignored) { in init_dmars()
2105 u32 temp = 2 << ecap_pss(iommu->ecap); in init_dmars()
2118 iommu->name); in init_dmars()
2131 pr_info("Translation already enabled - trying to copy translation structures\n"); in init_dmars()
2137 * enabled - but failed to copy over the in init_dmars()
2138 * old root-entry table. Try to proceed in init_dmars()
2140 * allocating a clean root-entry table. in init_dmars()
2145 iommu->name); in init_dmars()
2150 iommu->name); in init_dmars()
2177 if (drhd->ignored) { in init_dmars()
2189 if (ecap_prs(iommu->ecap)) { in init_dmars()
2224 if (!drhd->include_all) { in init_no_remapping_devices()
2225 for_each_active_dev_scope(drhd->devices, in init_no_remapping_devices()
2226 drhd->devices_cnt, i, dev) in init_no_remapping_devices()
2229 if (i == drhd->devices_cnt) in init_no_remapping_devices()
2230 drhd->ignored = 1; in init_no_remapping_devices()
2235 if (drhd->include_all) in init_no_remapping_devices()
2238 for_each_active_dev_scope(drhd->devices, in init_no_remapping_devices()
2239 drhd->devices_cnt, i, dev) in init_no_remapping_devices()
2242 if (i < drhd->devices_cnt) in init_no_remapping_devices()
2247 drhd->gfx_dedicated = 1; in init_no_remapping_devices()
2249 drhd->ignored = 1; in init_no_remapping_devices()
2261 if (iommu->qi) { in init_iommu_hw()
2269 if (drhd->ignored) { in init_iommu_hw()
2294 iommu->flush.flush_context(iommu, 0, 0, 0, in iommu_flush_all()
2296 iommu->flush.flush_iotlb(iommu, 0, 0, 0, in iommu_flush_all()
2312 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_suspend()
2314 iommu->iommu_state[SR_DMAR_FECTL_REG] = in iommu_suspend()
2315 readl(iommu->reg + DMAR_FECTL_REG); in iommu_suspend()
2316 iommu->iommu_state[SR_DMAR_FEDATA_REG] = in iommu_suspend()
2317 readl(iommu->reg + DMAR_FEDATA_REG); in iommu_suspend()
2318 iommu->iommu_state[SR_DMAR_FEADDR_REG] = in iommu_suspend()
2319 readl(iommu->reg + DMAR_FEADDR_REG); in iommu_suspend()
2320 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = in iommu_suspend()
2321 readl(iommu->reg + DMAR_FEUADDR_REG); in iommu_suspend()
2323 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_suspend()
2344 raw_spin_lock_irqsave(&iommu->register_lock, flag); in iommu_resume()
2346 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], in iommu_resume()
2347 iommu->reg + DMAR_FECTL_REG); in iommu_resume()
2348 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG], in iommu_resume()
2349 iommu->reg + DMAR_FEDATA_REG); in iommu_resume()
2350 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG], in iommu_resume()
2351 iommu->reg + DMAR_FEADDR_REG); in iommu_resume()
2352 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], in iommu_resume()
2353 iommu->reg + DMAR_FEUADDR_REG); in iommu_resume()
2355 raw_spin_unlock_irqrestore(&iommu->register_lock, flag); in iommu_resume()
2375 if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) || in rmrr_sanity_check()
2376 !IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) || in rmrr_sanity_check()
2377 rmrr->end_address <= rmrr->base_address || in rmrr_sanity_check()
2379 return -EINVAL; in rmrr_sanity_check()
2392 "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n" in dmar_parse_one_rmrr()
2394 rmrr->base_address, rmrr->end_address, in dmar_parse_one_rmrr()
2405 rmrru->hdr = header; in dmar_parse_one_rmrr()
2407 rmrru->base_address = rmrr->base_address; in dmar_parse_one_rmrr()
2408 rmrru->end_address = rmrr->end_address; in dmar_parse_one_rmrr()
2410 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), in dmar_parse_one_rmrr()
2411 ((void *)rmrr) + rmrr->header.length, in dmar_parse_one_rmrr()
2412 &rmrru->devices_cnt); in dmar_parse_one_rmrr()
2413 if (rmrru->devices_cnt && rmrru->devices == NULL) in dmar_parse_one_rmrr()
2416 list_add(&rmrru->list, &dmar_rmrr_units); in dmar_parse_one_rmrr()
2422 return -ENOMEM; in dmar_parse_one_rmrr()
2432 tmp = (struct acpi_dmar_atsr *)atsru->hdr; in dmar_find_atsr()
2433 if (atsr->segment != tmp->segment) in dmar_find_atsr()
2435 if (atsr->header.length != tmp->header.length) in dmar_find_atsr()
2437 if (memcmp(atsr, tmp, atsr->header.length) == 0) in dmar_find_atsr()
2457 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL); in dmar_parse_one_atsr()
2459 return -ENOMEM; in dmar_parse_one_atsr()
2466 atsru->hdr = (void *)(atsru + 1); in dmar_parse_one_atsr()
2467 memcpy(atsru->hdr, hdr, hdr->length); in dmar_parse_one_atsr()
2468 atsru->include_all = atsr->flags & 0x1; in dmar_parse_one_atsr()
2469 if (!atsru->include_all) { in dmar_parse_one_atsr()
2470 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), in dmar_parse_one_atsr()
2471 (void *)atsr + atsr->header.length, in dmar_parse_one_atsr()
2472 &atsru->devices_cnt); in dmar_parse_one_atsr()
2473 if (atsru->devices_cnt && atsru->devices == NULL) { in dmar_parse_one_atsr()
2475 return -ENOMEM; in dmar_parse_one_atsr()
2479 list_add_rcu(&atsru->list, &dmar_atsr_units); in dmar_parse_one_atsr()
2486 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt); in intel_iommu_free_atsr()
2498 list_del_rcu(&atsru->list); in dmar_release_one_atsr()
2518 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) { in dmar_check_one_atsr()
2519 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt, in dmar_check_one_atsr()
2521 return -EBUSY; in dmar_check_one_atsr()
2534 tmp = (struct acpi_dmar_satc *)satcu->hdr; in dmar_find_satc()
2535 if (satc->segment != tmp->segment) in dmar_find_satc()
2537 if (satc->header.length != tmp->header.length) in dmar_find_satc()
2539 if (memcmp(satc, tmp, satc->header.length) == 0) in dmar_find_satc()
2559 satcu = kzalloc(sizeof(*satcu) + hdr->length, GFP_KERNEL); in dmar_parse_one_satc()
2561 return -ENOMEM; in dmar_parse_one_satc()
2563 satcu->hdr = (void *)(satcu + 1); in dmar_parse_one_satc()
2564 memcpy(satcu->hdr, hdr, hdr->length); in dmar_parse_one_satc()
2565 satcu->atc_required = satc->flags & 0x1; in dmar_parse_one_satc()
2566 satcu->devices = dmar_alloc_dev_scope((void *)(satc + 1), in dmar_parse_one_satc()
2567 (void *)satc + satc->header.length, in dmar_parse_one_satc()
2568 &satcu->devices_cnt); in dmar_parse_one_satc()
2569 if (satcu->devices_cnt && !satcu->devices) { in dmar_parse_one_satc()
2571 return -ENOMEM; in dmar_parse_one_satc()
2573 list_add_rcu(&satcu->list, &dmar_satc_units); in dmar_parse_one_satc()
2580 struct intel_iommu *iommu = dmaru->iommu; in intel_iommu_add()
2586 if (iommu->gcmd & DMA_GCMD_TE) in intel_iommu_add()
2595 if (dmaru->ignored) { in intel_iommu_add()
2607 if (ecap_prs(iommu->ecap)) { in intel_iommu_add()
2633 struct intel_iommu *iommu = dmaru->iommu; in dmar_iommu_hotplug()
2638 return -EINVAL; in dmar_iommu_hotplug()
2657 list_del(&rmrru->list); in intel_iommu_free_dmars()
2658 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); in intel_iommu_free_dmars()
2663 list_del(&atsru->list); in intel_iommu_free_dmars()
2667 list_del(&satcu->list); in intel_iommu_free_dmars()
2668 dmar_free_dev_scope(&satcu->devices, &satcu->devices_cnt); in intel_iommu_free_dmars()
2683 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); in dmar_find_matched_satc_unit()
2684 if (satc->segment != pci_domain_nr(dev->bus)) in dmar_find_matched_satc_unit()
2686 for_each_dev_scope(satcu->devices, satcu->devices_cnt, i, tmp) in dmar_find_matched_satc_unit()
2717 return !(satcu->atc_required && !sm_supported(iommu)); in dmar_ats_supported()
2719 for (bus = dev->bus; bus; bus = bus->parent) { in dmar_ats_supported()
2720 bridge = bus->self; in dmar_ats_supported()
2724 /* Connected via non-PCIe: no ATS */ in dmar_ats_supported()
2735 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); in dmar_ats_supported()
2736 if (atsr->segment != pci_domain_nr(dev->bus)) in dmar_ats_supported()
2739 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp) in dmar_ats_supported()
2740 if (tmp == &bridge->dev) in dmar_ats_supported()
2743 if (atsru->include_all) in dmar_ats_supported()
2767 rmrr = container_of(rmrru->hdr, in dmar_iommu_notify_scope_dev()
2769 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2771 ((void *)rmrr) + rmrr->header.length, in dmar_iommu_notify_scope_dev()
2772 rmrr->segment, rmrru->devices, in dmar_iommu_notify_scope_dev()
2773 rmrru->devices_cnt); in dmar_iommu_notify_scope_dev()
2776 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2777 dmar_remove_dev_scope(info, rmrr->segment, in dmar_iommu_notify_scope_dev()
2778 rmrru->devices, rmrru->devices_cnt); in dmar_iommu_notify_scope_dev()
2783 if (atsru->include_all) in dmar_iommu_notify_scope_dev()
2786 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); in dmar_iommu_notify_scope_dev()
2787 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2789 (void *)atsr + atsr->header.length, in dmar_iommu_notify_scope_dev()
2790 atsr->segment, atsru->devices, in dmar_iommu_notify_scope_dev()
2791 atsru->devices_cnt); in dmar_iommu_notify_scope_dev()
2796 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2797 if (dmar_remove_dev_scope(info, atsr->segment, in dmar_iommu_notify_scope_dev()
2798 atsru->devices, atsru->devices_cnt)) in dmar_iommu_notify_scope_dev()
2803 satc = container_of(satcu->hdr, struct acpi_dmar_satc, header); in dmar_iommu_notify_scope_dev()
2804 if (info->event == BUS_NOTIFY_ADD_DEVICE) { in dmar_iommu_notify_scope_dev()
2806 (void *)satc + satc->header.length, in dmar_iommu_notify_scope_dev()
2807 satc->segment, satcu->devices, in dmar_iommu_notify_scope_dev()
2808 satcu->devices_cnt); in dmar_iommu_notify_scope_dev()
2813 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) { in dmar_iommu_notify_scope_dev()
2814 if (dmar_remove_dev_scope(info, satc->segment, in dmar_iommu_notify_scope_dev()
2815 satcu->devices, satcu->devices_cnt)) in dmar_iommu_notify_scope_dev()
2845 iommu = drhd->iommu; in intel_iommu_shutdown()
2866 u32 ver = readl(iommu->reg + DMAR_VER_REG); in version_show()
2876 return sysfs_emit(buf, "%llx\n", iommu->reg_phys); in address_show()
2884 return sysfs_emit(buf, "%llx\n", iommu->cap); in cap_show()
2892 return sysfs_emit(buf, "%llx\n", iommu->ecap); in ecap_show()
2900 return sysfs_emit(buf, "%ld\n", cap_ndoms(iommu->cap)); in domains_supported_show()
2911 for (id = 0; id < cap_ndoms(iommu->cap); id++) in domains_used_show()
2912 if (ida_exists(&iommu->domain_ida, id)) in domains_used_show()
2930 .name = "intel-iommu",
2944 if (pdev->external_facing) { in has_external_pci()
2958 pr_info("Intel-IOMMU force enabled due to platform opt in\n"); in platform_optin_force_iommu()
2961 * If Intel-IOMMU is disabled by default, we will apply identity in platform_optin_force_iommu()
2976 /* To avoid a -Wunused-but-set-variable warning. */ in probe_acpi_namespace_devices()
2982 for_each_active_dev_scope(drhd->devices, in probe_acpi_namespace_devices()
2983 drhd->devices_cnt, i, dev) { in probe_acpi_namespace_devices()
2987 if (dev->bus != &acpi_bus_type) in probe_acpi_namespace_devices()
2992 mutex_lock(&adev->physical_node_lock); in probe_acpi_namespace_devices()
2994 &adev->physical_node_list, node) { in probe_acpi_namespace_devices()
2995 ret = iommu_probe_device(pn->dev); in probe_acpi_namespace_devices()
2999 mutex_unlock(&adev->physical_node_lock); in probe_acpi_namespace_devices()
3016 pr_warn("Forcing Intel-IOMMU to enabled\n"); in tboot_force_iommu()
3026 int ret = -ENODEV; in intel_iommu_init()
3112 * page-selective invalidations that are required for efficient in intel_iommu_init()
3115 * the virtual and physical IOMMU page-tables. in intel_iommu_init()
3117 if (cap_caching_mode(iommu->cap) && in intel_iommu_init()
3122 iommu_device_sysfs_add(&iommu->iommu, NULL, in intel_iommu_init()
3124 "%s", iommu->name); in intel_iommu_init()
3131 iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL); in intel_iommu_init()
3142 if (!drhd->ignored && !translation_pre_enabled(iommu)) in intel_iommu_init()
3170 * NB - intel-iommu lacks any sort of reference counting for the users of
3177 if (!dev_is_pci(info->dev)) { in domain_context_clear()
3178 domain_context_clear_one(info, info->bus, info->devfn); in domain_context_clear()
3182 pci_for_each_dma_alias(to_pci_dev(info->dev), in domain_context_clear()
3195 struct intel_iommu *iommu = info->iommu; in device_block_translation()
3199 if (!info->domain_attached) in device_block_translation()
3202 if (info->domain) in device_block_translation()
3203 cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID); in device_block_translation()
3214 info->domain_attached = false; in device_block_translation()
3216 if (!info->domain) in device_block_translation()
3219 spin_lock_irqsave(&info->domain->lock, flags); in device_block_translation()
3220 list_del(&info->link); in device_block_translation()
3221 spin_unlock_irqrestore(&info->domain->lock, flags); in device_block_translation()
3223 domain_detach_iommu(info->domain, iommu); in device_block_translation()
3224 info->domain = NULL; in device_block_translation()
3232 iopf_for_domain_remove(info->domain ? &info->domain->domain : NULL, dev); in blocking_domain_attach_dev()
3255 return cap_fl1gp_support(iommu->cap) ? 2 : 1; in iommu_superpage_capability()
3257 return fls(cap_super_page_val(iommu->cap)); in iommu_superpage_capability()
3263 struct intel_iommu *iommu = info->iommu; in paging_domain_alloc()
3269 return ERR_PTR(-ENOMEM); in paging_domain_alloc()
3271 INIT_LIST_HEAD(&domain->devices); in paging_domain_alloc()
3272 INIT_LIST_HEAD(&domain->dev_pasids); in paging_domain_alloc()
3273 INIT_LIST_HEAD(&domain->cache_tags); in paging_domain_alloc()
3274 spin_lock_init(&domain->lock); in paging_domain_alloc()
3275 spin_lock_init(&domain->cache_lock); in paging_domain_alloc()
3276 xa_init(&domain->iommu_array); in paging_domain_alloc()
3277 INIT_LIST_HEAD(&domain->s1_domains); in paging_domain_alloc()
3278 spin_lock_init(&domain->s1_lock); in paging_domain_alloc()
3280 domain->nid = dev_to_node(dev); in paging_domain_alloc()
3281 domain->use_first_level = first_stage; in paging_domain_alloc()
3283 domain->domain.type = IOMMU_DOMAIN_UNMANAGED; in paging_domain_alloc()
3286 addr_width = agaw_to_width(iommu->agaw); in paging_domain_alloc()
3287 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_alloc()
3288 addr_width = cap_mgaw(iommu->cap); in paging_domain_alloc()
3289 domain->gaw = addr_width; in paging_domain_alloc()
3290 domain->agaw = iommu->agaw; in paging_domain_alloc()
3291 domain->max_addr = __DOMAIN_MAX_ADDR(addr_width); in paging_domain_alloc()
3294 domain->iommu_coherency = iommu_paging_structure_coherency(iommu); in paging_domain_alloc()
3297 domain->domain.pgsize_bitmap = SZ_4K; in paging_domain_alloc()
3298 domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage); in paging_domain_alloc()
3299 domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain); in paging_domain_alloc()
3302 * IOVA aperture: First-level translation restricts the input-address in paging_domain_alloc()
3304 * as address bit [N-1], where N is 48-bits with 4-level paging and in paging_domain_alloc()
3305 * 57-bits with 5-level paging). Hence, skip bit [N-1]. in paging_domain_alloc()
3307 domain->domain.geometry.force_aperture = true; in paging_domain_alloc()
3308 domain->domain.geometry.aperture_start = 0; in paging_domain_alloc()
3310 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); in paging_domain_alloc()
3312 domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); in paging_domain_alloc()
3315 domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K); in paging_domain_alloc()
3316 if (!domain->pgd) { in paging_domain_alloc()
3318 return ERR_PTR(-ENOMEM); in paging_domain_alloc()
3320 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); in paging_domain_alloc()
3332 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_first_stage()
3335 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in intel_iommu_domain_alloc_first_stage()
3336 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_first_stage()
3342 dmar_domain->domain.ops = &intel_fs_paging_domain_ops; in intel_iommu_domain_alloc_first_stage()
3349 dmar_domain->iotlb_sync_map = true; in intel_iommu_domain_alloc_first_stage()
3351 return &dmar_domain->domain; in intel_iommu_domain_alloc_first_stage()
3363 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_second_stage()
3369 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_second_stage()
3372 if (sm_supported(iommu) && !ecap_slts(iommu->ecap)) in intel_iommu_domain_alloc_second_stage()
3373 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_second_stage()
3379 dmar_domain->domain.ops = &intel_ss_paging_domain_ops; in intel_iommu_domain_alloc_second_stage()
3380 dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT; in intel_iommu_domain_alloc_second_stage()
3383 dmar_domain->domain.dirty_ops = &intel_dirty_ops; in intel_iommu_domain_alloc_second_stage()
3390 if (rwbf_required(iommu) || cap_caching_mode(iommu->cap)) in intel_iommu_domain_alloc_second_stage()
3391 dmar_domain->iotlb_sync_map = true; in intel_iommu_domain_alloc_second_stage()
3393 return &dmar_domain->domain; in intel_iommu_domain_alloc_second_stage()
3401 struct intel_iommu *iommu = info->iommu; in intel_iommu_domain_alloc_paging_flags()
3405 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_domain_alloc_paging_flags()
3409 if (domain != ERR_PTR(-EOPNOTSUPP)) in intel_iommu_domain_alloc_paging_flags()
3418 if (WARN_ON(dmar_domain->nested_parent && in intel_iommu_domain_free()
3419 !list_empty(&dmar_domain->s1_domains))) in intel_iommu_domain_free()
3422 if (WARN_ON(!list_empty(&dmar_domain->devices))) in intel_iommu_domain_free()
3425 if (dmar_domain->pgd) { in intel_iommu_domain_free()
3429 domain_unmap(dmar_domain, 0, DOMAIN_MAX_PFN(dmar_domain->gaw), in intel_iommu_domain_free()
3434 kfree(dmar_domain->qi_batch); in intel_iommu_domain_free()
3441 if (WARN_ON(dmar_domain->domain.dirty_ops || in paging_domain_compatible_first_stage()
3442 dmar_domain->nested_parent)) in paging_domain_compatible_first_stage()
3443 return -EINVAL; in paging_domain_compatible_first_stage()
3446 if (!sm_supported(iommu) || !ecap_flts(iommu->ecap)) in paging_domain_compatible_first_stage()
3447 return -EINVAL; in paging_domain_compatible_first_stage()
3450 if (!cap_fl1gp_support(iommu->cap) && in paging_domain_compatible_first_stage()
3451 (dmar_domain->domain.pgsize_bitmap & SZ_1G)) in paging_domain_compatible_first_stage()
3452 return -EINVAL; in paging_domain_compatible_first_stage()
3455 if ((rwbf_required(iommu)) && !dmar_domain->iotlb_sync_map) in paging_domain_compatible_first_stage()
3456 return -EINVAL; in paging_domain_compatible_first_stage()
3465 unsigned int sslps = cap_super_page_val(iommu->cap); in paging_domain_compatible_second_stage()
3467 if (dmar_domain->domain.dirty_ops && !ssads_supported(iommu)) in paging_domain_compatible_second_stage()
3468 return -EINVAL; in paging_domain_compatible_second_stage()
3469 if (dmar_domain->nested_parent && !nested_supported(iommu)) in paging_domain_compatible_second_stage()
3470 return -EINVAL; in paging_domain_compatible_second_stage()
3473 if (sm_supported(iommu) && !ecap_slts(iommu->ecap)) in paging_domain_compatible_second_stage()
3474 return -EINVAL; in paging_domain_compatible_second_stage()
3477 if (!(sslps & BIT(0)) && (dmar_domain->domain.pgsize_bitmap & SZ_2M)) in paging_domain_compatible_second_stage()
3478 return -EINVAL; in paging_domain_compatible_second_stage()
3479 if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G)) in paging_domain_compatible_second_stage()
3480 return -EINVAL; in paging_domain_compatible_second_stage()
3483 if ((rwbf_required(iommu) || cap_caching_mode(iommu->cap)) && in paging_domain_compatible_second_stage()
3484 !dmar_domain->iotlb_sync_map) in paging_domain_compatible_second_stage()
3485 return -EINVAL; in paging_domain_compatible_second_stage()
3494 struct intel_iommu *iommu = info->iommu; in paging_domain_compatible()
3495 int ret = -EINVAL; in paging_domain_compatible()
3503 ret = -EINVAL; in paging_domain_compatible()
3509 * dmar_domain->lock in paging_domain_compatible()
3511 if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) in paging_domain_compatible()
3512 return -EINVAL; in paging_domain_compatible()
3514 if (dmar_domain->iommu_coherency != in paging_domain_compatible()
3516 return -EINVAL; in paging_domain_compatible()
3520 addr_width = agaw_to_width(iommu->agaw); in paging_domain_compatible()
3521 if (addr_width > cap_mgaw(iommu->cap)) in paging_domain_compatible()
3522 addr_width = cap_mgaw(iommu->cap); in paging_domain_compatible()
3524 if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw) in paging_domain_compatible()
3525 return -EINVAL; in paging_domain_compatible()
3528 context_copied(iommu, info->bus, info->devfn)) in paging_domain_compatible()
3568 if (dmar_domain->set_pte_snp) in intel_iommu_map()
3572 if (dmar_domain->max_addr < max_addr) { in intel_iommu_map()
3576 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1; in intel_iommu_map()
3580 __func__, dmar_domain->gaw, max_addr); in intel_iommu_map()
3581 return -EFAULT; in intel_iommu_map()
3583 dmar_domain->max_addr = max_addr; in intel_iommu_map()
3602 return -EINVAL; in intel_iommu_map_pages()
3605 return -EINVAL; in intel_iommu_map_pages()
3623 size argument if it happens to be a large-page mapping. */ in intel_iommu_unmap()
3632 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT; in intel_iommu_unmap()
3634 domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist); in intel_iommu_unmap()
3636 if (dmar_domain->max_addr == iova + size) in intel_iommu_unmap()
3637 dmar_domain->max_addr = iova; in intel_iommu_unmap()
3640 * We do not use page-selective IOTLB invalidation in flush queue, in intel_iommu_unmap()
3663 cache_tag_flush_range(to_dmar_domain(domain), gather->start, in intel_iommu_tlb_sync()
3664 gather->end, in intel_iommu_tlb_sync()
3665 iommu_pages_list_empty(&gather->freelist)); in intel_iommu_tlb_sync()
3666 iommu_put_pages_list(&gather->freelist); in intel_iommu_tlb_sync()
3682 VTD_PAGE_SHIFT) - 1)); in intel_iommu_iova_to_phys()
3692 assert_spin_locked(&domain->lock); in domain_support_force_snooping()
3693 list_for_each_entry(info, &domain->devices, link) { in domain_support_force_snooping()
3694 if (!ecap_sc_support(info->iommu->ecap)) { in domain_support_force_snooping()
3708 guard(spinlock_irqsave)(&dmar_domain->lock); in intel_iommu_enforce_cache_coherency_fs()
3710 if (dmar_domain->force_snooping) in intel_iommu_enforce_cache_coherency_fs()
3716 dmar_domain->force_snooping = true; in intel_iommu_enforce_cache_coherency_fs()
3717 list_for_each_entry(info, &dmar_domain->devices, link) in intel_iommu_enforce_cache_coherency_fs()
3718 intel_pasid_setup_page_snoop_control(info->iommu, info->dev, in intel_iommu_enforce_cache_coherency_fs()
3727 guard(spinlock_irqsave)(&dmar_domain->lock); in intel_iommu_enforce_cache_coherency_ss()
3729 dmar_domain->has_mappings) in intel_iommu_enforce_cache_coherency_ss()
3733 * Second level page table supports per-PTE snoop control. The in intel_iommu_enforce_cache_coherency_ss()
3736 dmar_domain->set_pte_snp = true; in intel_iommu_enforce_cache_coherency_ss()
3737 dmar_domain->force_snooping = true; in intel_iommu_enforce_cache_coherency_ss()
3752 return ecap_sc_support(info->iommu->ecap); in intel_iommu_capable()
3754 return ssads_supported(info->iommu); in intel_iommu_capable()
3769 if (!iommu || !iommu->iommu.ops) in intel_iommu_probe_device()
3770 return ERR_PTR(-ENODEV); in intel_iommu_probe_device()
3774 return ERR_PTR(-ENOMEM); in intel_iommu_probe_device()
3777 info->bus = pdev->bus->number; in intel_iommu_probe_device()
3778 info->devfn = pdev->devfn; in intel_iommu_probe_device()
3779 info->segment = pci_domain_nr(pdev->bus); in intel_iommu_probe_device()
3781 info->bus = bus; in intel_iommu_probe_device()
3782 info->devfn = devfn; in intel_iommu_probe_device()
3783 info->segment = iommu->segment; in intel_iommu_probe_device()
3786 info->dev = dev; in intel_iommu_probe_device()
3787 info->iommu = iommu; in intel_iommu_probe_device()
3789 if (ecap_dev_iotlb_support(iommu->ecap) && in intel_iommu_probe_device()
3792 info->ats_supported = 1; in intel_iommu_probe_device()
3793 info->dtlb_extra_inval = dev_needs_extra_dtlb_flush(pdev); in intel_iommu_probe_device()
3802 if (ecap_dit(iommu->ecap)) in intel_iommu_probe_device()
3803 info->pfsid = pci_dev_id(pci_physfn(pdev)); in intel_iommu_probe_device()
3804 info->ats_qdep = pci_ats_queue_depth(pdev); in intel_iommu_probe_device()
3811 info->pasid_supported = features | 1; in intel_iommu_probe_device()
3814 if (info->ats_supported && ecap_prs(iommu->ecap) && in intel_iommu_probe_device()
3816 info->pri_supported = 1; in intel_iommu_probe_device()
3835 if (!context_copied(iommu, info->bus, info->devfn)) { in intel_iommu_probe_device()
3844 return &iommu->iommu; in intel_iommu_probe_device()
3858 struct intel_iommu *iommu = info->iommu; in intel_iommu_probe_finalize()
3861 * The PCIe spec, in its wisdom, declares that the behaviour of the in intel_iommu_probe_finalize()
3866 if (info->pasid_supported && in intel_iommu_probe_finalize()
3867 !pci_enable_pasid(to_pci_dev(dev), info->pasid_supported & ~1)) in intel_iommu_probe_finalize()
3868 info->pasid_enabled = 1; in intel_iommu_probe_finalize()
3873 if (info->ats_enabled && info->domain) { in intel_iommu_probe_finalize()
3874 u16 did = domain_id_iommu(info->domain, iommu); in intel_iommu_probe_finalize()
3876 if (cache_tag_assign(info->domain, did, dev, in intel_iommu_probe_finalize()
3887 struct intel_iommu *iommu = info->iommu; in intel_iommu_release_device()
3892 if (info->pasid_enabled) { in intel_iommu_release_device()
3894 info->pasid_enabled = 0; in intel_iommu_release_device()
3897 mutex_lock(&iommu->iopf_lock); in intel_iommu_release_device()
3900 mutex_unlock(&iommu->iopf_lock); in intel_iommu_release_device()
3903 !context_copied(iommu, info->bus, info->devfn)) in intel_iommu_release_device()
3922 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, in intel_iommu_get_resv_regions()
3932 length = rmrr->end_address - rmrr->base_address + 1; in intel_iommu_get_resv_regions()
3937 resv = iommu_alloc_resv_region(rmrr->base_address, in intel_iommu_get_resv_regions()
3943 list_add_tail(&resv->list, head); in intel_iommu_get_resv_regions()
3952 if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { in intel_iommu_get_resv_regions()
3957 list_add_tail(&reg->list, head); in intel_iommu_get_resv_regions()
3963 IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, in intel_iommu_get_resv_regions()
3967 list_add_tail(&reg->list, head); in intel_iommu_get_resv_regions()
3980 struct intel_iommu *iommu = info->iommu; in intel_iommu_enable_iopf()
3983 if (!info->pri_enabled) in intel_iommu_enable_iopf()
3984 return -ENODEV; in intel_iommu_enable_iopf()
3988 if (info->iopf_refcount) { in intel_iommu_enable_iopf()
3989 info->iopf_refcount++; in intel_iommu_enable_iopf()
3993 ret = iopf_queue_add_device(iommu->iopf_queue, dev); in intel_iommu_enable_iopf()
3997 info->iopf_refcount = 1; in intel_iommu_enable_iopf()
4005 struct intel_iommu *iommu = info->iommu; in intel_iommu_disable_iopf()
4007 if (WARN_ON(!info->pri_enabled || !info->iopf_refcount)) in intel_iommu_disable_iopf()
4011 if (--info->iopf_refcount) in intel_iommu_disable_iopf()
4014 iopf_queue_remove_device(iommu->iopf_queue, dev); in intel_iommu_disable_iopf()
4021 return translation_pre_enabled(info->iommu) && !info->domain; in intel_iommu_is_attach_deferred()
4031 if (pdev->untrusted) { in risky_device()
4034 pdev->vendor, pdev->device); in risky_device()
4046 if (dmar_domain->iotlb_sync_map) in intel_iommu_iotlb_sync_map()
4047 cache_tag_flush_range_np(dmar_domain, iova, iova + size - 1); in intel_iommu_iotlb_sync_map()
4057 struct intel_iommu *iommu = info->iommu; in domain_remove_dev_pasid()
4065 if (domain->type == IOMMU_DOMAIN_IDENTITY) in domain_remove_dev_pasid()
4069 spin_lock_irqsave(&dmar_domain->lock, flags); in domain_remove_dev_pasid()
4070 list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { in domain_remove_dev_pasid()
4071 if (curr->dev == dev && curr->pasid == pasid) { in domain_remove_dev_pasid()
4072 list_del(&curr->link_domain); in domain_remove_dev_pasid()
4077 spin_unlock_irqrestore(&dmar_domain->lock, flags); in domain_remove_dev_pasid()
4093 intel_pasid_tear_down_entry(info->iommu, dev, pasid, false); in blocking_domain_set_dev_pasid()
4106 struct intel_iommu *iommu = info->iommu; in domain_add_dev_pasid()
4113 return ERR_PTR(-ENOMEM); in domain_add_dev_pasid()
4123 dev_pasid->dev = dev; in domain_add_dev_pasid()
4124 dev_pasid->pasid = pasid; in domain_add_dev_pasid()
4125 spin_lock_irqsave(&dmar_domain->lock, flags); in domain_add_dev_pasid()
4126 list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids); in domain_add_dev_pasid()
4127 spin_unlock_irqrestore(&dmar_domain->lock, flags); in domain_add_dev_pasid()
4143 struct intel_iommu *iommu = info->iommu; in intel_iommu_set_dev_pasid()
4147 if (WARN_ON_ONCE(!(domain->type & __IOMMU_DOMAIN_PAGING))) in intel_iommu_set_dev_pasid()
4148 return -EINVAL; in intel_iommu_set_dev_pasid()
4151 return -EOPNOTSUPP; in intel_iommu_set_dev_pasid()
4153 if (domain->dirty_ops) in intel_iommu_set_dev_pasid()
4154 return -EINVAL; in intel_iommu_set_dev_pasid()
4156 if (context_copied(iommu, info->bus, info->devfn)) in intel_iommu_set_dev_pasid()
4157 return -EBUSY; in intel_iommu_set_dev_pasid()
4178 ret = -EINVAL; in intel_iommu_set_dev_pasid()
4200 struct intel_iommu *iommu = info->iommu; in intel_iommu_hw_info()
4205 return ERR_PTR(-EOPNOTSUPP); in intel_iommu_hw_info()
4209 return ERR_PTR(-ENOMEM); in intel_iommu_hw_info()
4211 vtd->flags = IOMMU_HW_INFO_VTD_ERRATA_772415_SPR17; in intel_iommu_hw_info()
4212 vtd->cap_reg = iommu->cap; in intel_iommu_hw_info()
4213 vtd->ecap_reg = iommu->ecap; in intel_iommu_hw_info()
4221 * hold the domain->lock when calling it.
4229 ret = intel_pasid_setup_dirty_tracking(info->iommu, info->dev, in device_set_dirty_tracking()
4245 spin_lock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4246 list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { in parent_domain_set_dirty_tracking()
4247 spin_lock_irqsave(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4248 ret = device_set_dirty_tracking(&s1_domain->devices, enable); in parent_domain_set_dirty_tracking()
4249 spin_unlock_irqrestore(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4253 spin_unlock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4257 list_for_each_entry(s1_domain, &domain->s1_domains, s2_link) { in parent_domain_set_dirty_tracking()
4258 spin_lock_irqsave(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4259 device_set_dirty_tracking(&s1_domain->devices, in parent_domain_set_dirty_tracking()
4260 domain->dirty_tracking); in parent_domain_set_dirty_tracking()
4261 spin_unlock_irqrestore(&s1_domain->lock, flags); in parent_domain_set_dirty_tracking()
4263 spin_unlock(&domain->s1_lock); in parent_domain_set_dirty_tracking()
4273 spin_lock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4274 if (dmar_domain->dirty_tracking == enable) in intel_iommu_set_dirty_tracking()
4277 ret = device_set_dirty_tracking(&dmar_domain->devices, enable); in intel_iommu_set_dirty_tracking()
4281 if (dmar_domain->nested_parent) { in intel_iommu_set_dirty_tracking()
4287 dmar_domain->dirty_tracking = enable; in intel_iommu_set_dirty_tracking()
4289 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4294 device_set_dirty_tracking(&dmar_domain->devices, in intel_iommu_set_dirty_tracking()
4295 dmar_domain->dirty_tracking); in intel_iommu_set_dirty_tracking()
4296 spin_unlock(&dmar_domain->lock); in intel_iommu_set_dirty_tracking()
4306 unsigned long end = iova + size - 1; in intel_iommu_read_and_clear_dirty()
4315 if (!dmar_domain->dirty_tracking && dirty->bitmap) in intel_iommu_read_and_clear_dirty()
4316 return -EINVAL; in intel_iommu_read_and_clear_dirty()
4346 struct intel_iommu *iommu = info->iommu; in context_setup_pass_through()
4349 spin_lock(&iommu->lock); in context_setup_pass_through()
4352 spin_unlock(&iommu->lock); in context_setup_pass_through()
4353 return -ENOMEM; in context_setup_pass_through()
4357 spin_unlock(&iommu->lock); in context_setup_pass_through()
4369 context_set_address_width(context, iommu->msagaw); in context_setup_pass_through()
4373 if (!ecap_coherent(iommu->ecap)) in context_setup_pass_through()
4376 spin_unlock(&iommu->lock); in context_setup_pass_through()
4393 return context_setup_pass_through(dev, info->bus, info->devfn); in device_setup_pass_through()
4402 struct intel_iommu *iommu = info->iommu; in identity_domain_attach_dev()
4421 info->domain_attached = true; in identity_domain_attach_dev()
4431 struct intel_iommu *iommu = info->iommu; in identity_domain_set_dev_pasid()
4435 return -EOPNOTSUPP; in identity_domain_set_dev_pasid()
4560 pci_info(dev, "Forcing write-buffer flush capability\n"); in quirk_iommu_rwbf()
4612 ver = (dev->device >> 8) & 0xff; in quirk_igfx_skip_te_disable()
4631 message if VT-d is actually disabled.
4652 known-broken BIOSes _don't_ actually hide it, so far. */ in check_tylersburg_isoch()
4669 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */ in check_tylersburg_isoch()
4727 if (likely(!info->dtlb_extra_inval)) in quirk_extra_dev_tlb_flush()
4730 sid = PCI_DEVID(info->bus, info->devfn); in quirk_extra_dev_tlb_flush()
4732 qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4735 qi_flush_dev_iotlb_pasid(info->iommu, sid, info->pfsid, in quirk_extra_dev_tlb_flush()
4745 * VT-d spec. The VT-d hardware implementation may support some but not
4750 * - 0: Command successful without any error;
4751 * - Negative: software error value;
4752 * - Nonzero positive: failure status code defined in Table 48.
4754 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob) in ecmd_submit_sync() argument
4760 if (!cap_ecmds(iommu->cap)) in ecmd_submit_sync()
4761 return -ENODEV; in ecmd_submit_sync()
4763 raw_spin_lock_irqsave(&iommu->register_lock, flags); in ecmd_submit_sync()
4765 res = dmar_readq(iommu->reg + DMAR_ECRSP_REG); in ecmd_submit_sync()
4767 ret = -EBUSY; in ecmd_submit_sync()
4773 * - There is no side effect if an ecmd doesn't require an in ecmd_submit_sync()
4775 * - It's not invoked in any critical path. The extra MMIO in ecmd_submit_sync()
4778 dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob); in ecmd_submit_sync()
4779 dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT)); in ecmd_submit_sync()
4785 ret = -ETIMEDOUT; in ecmd_submit_sync()
4791 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); in ecmd_submit_sync()