Lines Matching +full:ats +full:- +full:supported
1 // SPDX-License-Identifier: GPL-2.0
3 * intel-pasid.c - PASID idr, table and entry manipulation
18 #include <linux/pci-ats.h>
23 #include "../iommu-pages.h"
36 * single-thread context.
49 return -ENODEV; in intel_pasid_alloc_table()
50 if (WARN_ON(info->pasid_table)) in intel_pasid_alloc_table()
51 return -EEXIST; in intel_pasid_alloc_table()
55 return -ENOMEM; in intel_pasid_alloc_table()
57 if (info->pasid_supported) in intel_pasid_alloc_table()
61 size = max_pasid >> (PASID_PDE_SHIFT - 3); in intel_pasid_alloc_table()
63 dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order); in intel_pasid_alloc_table()
66 return -ENOMEM; in intel_pasid_alloc_table()
69 pasid_table->table = dir; in intel_pasid_alloc_table()
70 pasid_table->order = order; in intel_pasid_alloc_table()
71 pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3); in intel_pasid_alloc_table()
72 info->pasid_table = pasid_table; in intel_pasid_alloc_table()
74 if (!ecap_coherent(info->iommu->ecap)) in intel_pasid_alloc_table()
75 clflush_cache_range(pasid_table->table, (1 << order) * PAGE_SIZE); in intel_pasid_alloc_table()
89 if (!info || !dev_is_pci(dev) || !info->pasid_table) in intel_pasid_free_table()
92 pasid_table = info->pasid_table; in intel_pasid_free_table()
93 info->pasid_table = NULL; in intel_pasid_free_table()
96 dir = pasid_table->table; in intel_pasid_free_table()
97 max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT; in intel_pasid_free_table()
103 iommu_free_pages(pasid_table->table, pasid_table->order); in intel_pasid_free_table()
115 return info->pasid_table; in intel_pasid_get_table()
123 if (!info || !info->pasid_table) in intel_pasid_get_dev_max_id()
126 return info->pasid_table->max_pasid; in intel_pasid_get_dev_max_id()
141 dir = pasid_table->table; in intel_pasid_get_entry()
151 entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC); in intel_pasid_get_entry()
167 if (!ecap_coherent(info->iommu->ecap)) { in intel_pasid_get_entry()
217 if (!info || !info->ats_enabled) in devtlb_invalidation_with_pasid()
223 sid = PCI_DEVID(info->bus, info->devfn); in devtlb_invalidation_with_pasid()
224 qdep = info->ats_qdep; in devtlb_invalidation_with_pasid()
225 pfsid = info->pfsid; in devtlb_invalidation_with_pasid()
229 * devTLB flush w/o PASID should be used. For non-zero PASID under in devtlb_invalidation_with_pasid()
234 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); in devtlb_invalidation_with_pasid()
236 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); in devtlb_invalidation_with_pasid()
245 spin_lock(&iommu->lock); in intel_pasid_tear_down_entry()
248 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
254 WARN_ON(READ_ONCE(pte->val[0]) != 0); in intel_pasid_tear_down_entry()
255 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
261 * that the pasid entry is non-present with the Fault in intel_pasid_tear_down_entry()
266 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
275 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
277 if (!ecap_coherent(iommu->ecap)) in intel_pasid_tear_down_entry()
283 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); in intel_pasid_tear_down_entry()
285 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_pasid_tear_down_entry()
294 * Caller of it should not modify the in-use pasid table entries.
300 if (!ecap_coherent(iommu->ecap)) in pasid_flush_caches()
303 if (cap_caching_mode(iommu->cap)) { in pasid_flush_caches()
305 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); in pasid_flush_caches()
315 * - Flush cacheline if needed
316 * - Flush the caches per Table 28 ”Guidance to Software for Invalidations“
317 * of VT-d spec 5.0.
324 if (!ecap_coherent(iommu->ecap)) in intel_pasid_flush_present()
328 * VT-d spec 5.0 table28 states guides for cache invalidation: in intel_pasid_flush_present()
330 * - PASID-selective-within-Domain PASID-cache invalidation in intel_pasid_flush_present()
331 * - PASID-selective PASID-based IOTLB invalidation in intel_pasid_flush_present()
332 * - If (pasid is RID_PASID) in intel_pasid_flush_present()
333 * - Global Device-TLB invalidation to affected functions in intel_pasid_flush_present()
335 * - PASID-based Device-TLB invalidation (with S=1 and in intel_pasid_flush_present()
339 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); in intel_pasid_flush_present()
352 lockdep_assert_held(&iommu->lock); in pasid_pte_config_first_level()
366 pasid_set_address_width(pte, iommu->agaw); in pasid_pte_config_first_level()
367 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in pasid_pte_config_first_level()
380 if (!ecap_flts(iommu->ecap)) { in intel_pasid_setup_first_level()
382 iommu->name); in intel_pasid_setup_first_level()
383 return -EINVAL; in intel_pasid_setup_first_level()
386 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) { in intel_pasid_setup_first_level()
387 pr_err("No 5-level paging support for first-level on %s\n", in intel_pasid_setup_first_level()
388 iommu->name); in intel_pasid_setup_first_level()
389 return -EINVAL; in intel_pasid_setup_first_level()
392 spin_lock(&iommu->lock); in intel_pasid_setup_first_level()
395 spin_unlock(&iommu->lock); in intel_pasid_setup_first_level()
396 return -ENODEV; in intel_pasid_setup_first_level()
400 spin_unlock(&iommu->lock); in intel_pasid_setup_first_level()
401 return -EBUSY; in intel_pasid_setup_first_level()
406 spin_unlock(&iommu->lock); in intel_pasid_setup_first_level()
420 if (!ecap_flts(iommu->ecap)) { in intel_pasid_replace_first_level()
422 iommu->name); in intel_pasid_replace_first_level()
423 return -EINVAL; in intel_pasid_replace_first_level()
426 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) { in intel_pasid_replace_first_level()
427 pr_err("No 5-level paging support for first-level on %s\n", in intel_pasid_replace_first_level()
428 iommu->name); in intel_pasid_replace_first_level()
429 return -EINVAL; in intel_pasid_replace_first_level()
434 spin_lock(&iommu->lock); in intel_pasid_replace_first_level()
437 spin_unlock(&iommu->lock); in intel_pasid_replace_first_level()
438 return -ENODEV; in intel_pasid_replace_first_level()
442 spin_unlock(&iommu->lock); in intel_pasid_replace_first_level()
443 return -EINVAL; in intel_pasid_replace_first_level()
449 spin_unlock(&iommu->lock); in intel_pasid_replace_first_level()
465 lockdep_assert_held(&iommu->lock); in pasid_pte_config_second_level()
473 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in pasid_pte_config_second_level()
493 if (!ecap_slts(iommu->ecap)) { in intel_pasid_setup_second_level()
495 iommu->name); in intel_pasid_setup_second_level()
496 return -EINVAL; in intel_pasid_setup_second_level()
499 pgd = domain->pgd; in intel_pasid_setup_second_level()
503 spin_lock(&iommu->lock); in intel_pasid_setup_second_level()
506 spin_unlock(&iommu->lock); in intel_pasid_setup_second_level()
507 return -ENODEV; in intel_pasid_setup_second_level()
511 spin_unlock(&iommu->lock); in intel_pasid_setup_second_level()
512 return -EBUSY; in intel_pasid_setup_second_level()
515 pasid_pte_config_second_level(iommu, pte, pgd_val, domain->agaw, in intel_pasid_setup_second_level()
516 did, domain->dirty_tracking); in intel_pasid_setup_second_level()
517 spin_unlock(&iommu->lock); in intel_pasid_setup_second_level()
538 if (!ecap_slts(iommu->ecap)) { in intel_pasid_replace_second_level()
540 iommu->name); in intel_pasid_replace_second_level()
541 return -EINVAL; in intel_pasid_replace_second_level()
544 pgd = domain->pgd; in intel_pasid_replace_second_level()
549 domain->agaw, did, in intel_pasid_replace_second_level()
550 domain->dirty_tracking); in intel_pasid_replace_second_level()
552 spin_lock(&iommu->lock); in intel_pasid_replace_second_level()
555 spin_unlock(&iommu->lock); in intel_pasid_replace_second_level()
556 return -ENODEV; in intel_pasid_replace_second_level()
560 spin_unlock(&iommu->lock); in intel_pasid_replace_second_level()
561 return -EINVAL; in intel_pasid_replace_second_level()
567 spin_unlock(&iommu->lock); in intel_pasid_replace_second_level()
585 spin_lock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
589 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
592 return -ENODEV; in intel_pasid_setup_dirty_tracking()
599 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
602 "Dirty tracking not supported on translation type %d\n", in intel_pasid_setup_dirty_tracking()
604 return -EOPNOTSUPP; in intel_pasid_setup_dirty_tracking()
608 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
616 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
618 if (!ecap_coherent(iommu->ecap)) in intel_pasid_setup_dirty_tracking()
622 * From VT-d spec table 25 "Guidance to Software for Invalidations": in intel_pasid_setup_dirty_tracking()
624 * - PASID-selective-within-Domain PASID-cache invalidation in intel_pasid_setup_dirty_tracking()
626 * - Domain-selective IOTLB invalidation in intel_pasid_setup_dirty_tracking()
628 * - PASID-selective PASID-based IOTLB invalidation in intel_pasid_setup_dirty_tracking()
629 * - If (pasid is RID_PASID) in intel_pasid_setup_dirty_tracking()
630 * - Global Device-TLB invalidation to affected functions in intel_pasid_setup_dirty_tracking()
632 * - PASID-based Device-TLB invalidation (with S=1 and in intel_pasid_setup_dirty_tracking()
637 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_pasid_setup_dirty_tracking()
650 lockdep_assert_held(&iommu->lock); in pasid_pte_config_pass_through()
654 pasid_set_address_width(pte, iommu->agaw); in pasid_pte_config_pass_through()
657 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in pasid_pte_config_pass_through()
667 spin_lock(&iommu->lock); in intel_pasid_setup_pass_through()
670 spin_unlock(&iommu->lock); in intel_pasid_setup_pass_through()
671 return -ENODEV; in intel_pasid_setup_pass_through()
675 spin_unlock(&iommu->lock); in intel_pasid_setup_pass_through()
676 return -EBUSY; in intel_pasid_setup_pass_through()
680 spin_unlock(&iommu->lock); in intel_pasid_setup_pass_through()
696 spin_lock(&iommu->lock); in intel_pasid_replace_pass_through()
699 spin_unlock(&iommu->lock); in intel_pasid_replace_pass_through()
700 return -ENODEV; in intel_pasid_replace_pass_through()
704 spin_unlock(&iommu->lock); in intel_pasid_replace_pass_through()
705 return -EINVAL; in intel_pasid_replace_pass_through()
711 spin_unlock(&iommu->lock); in intel_pasid_replace_pass_through()
728 spin_lock(&iommu->lock); in intel_pasid_setup_page_snoop_control()
731 spin_unlock(&iommu->lock); in intel_pasid_setup_page_snoop_control()
737 spin_unlock(&iommu->lock); in intel_pasid_setup_page_snoop_control()
748 struct dma_pte *pgd = s2_domain->pgd; in pasid_pte_config_nestd()
750 lockdep_assert_held(&iommu->lock); in pasid_pte_config_nestd()
754 if (s1_cfg->addr_width == ADDR_WIDTH_5LEVEL) in pasid_pte_config_nestd()
757 pasid_set_flptr(pte, s1_cfg->pgtbl_addr); in pasid_pte_config_nestd()
759 if (s1_cfg->flags & IOMMU_VTD_S1_SRE) { in pasid_pte_config_nestd()
761 if (s1_cfg->flags & IOMMU_VTD_S1_WPE) in pasid_pte_config_nestd()
765 if (s1_cfg->flags & IOMMU_VTD_S1_EAFE) in pasid_pte_config_nestd()
768 if (s2_domain->force_snooping) in pasid_pte_config_nestd()
774 pasid_set_address_width(pte, s2_domain->agaw); in pasid_pte_config_nestd()
775 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in pasid_pte_config_nestd()
776 if (s2_domain->dirty_tracking) in pasid_pte_config_nestd()
783 * intel_pasid_setup_nested() - Set up PASID entry for nested translation.
787 * @domain: User stage-1 domain nested on a stage-2 domain
796 struct iommu_hwpt_vtd_s1 *s1_cfg = &domain->s1_cfg; in intel_pasid_setup_nested()
797 struct dmar_domain *s2_domain = domain->s2_domain; in intel_pasid_setup_nested()
801 /* Address width should match the address width supported by hardware */ in intel_pasid_setup_nested()
802 switch (s1_cfg->addr_width) { in intel_pasid_setup_nested()
806 if (!cap_fl5lp_support(iommu->cap)) { in intel_pasid_setup_nested()
808 "5-level paging not supported\n"); in intel_pasid_setup_nested()
809 return -EINVAL; in intel_pasid_setup_nested()
813 dev_err_ratelimited(dev, "Invalid stage-1 address width %d\n", in intel_pasid_setup_nested()
814 s1_cfg->addr_width); in intel_pasid_setup_nested()
815 return -EINVAL; in intel_pasid_setup_nested()
818 if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) { in intel_pasid_setup_nested()
820 iommu->name); in intel_pasid_setup_nested()
821 return -EINVAL; in intel_pasid_setup_nested()
824 if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) { in intel_pasid_setup_nested()
826 iommu->name); in intel_pasid_setup_nested()
827 return -EINVAL; in intel_pasid_setup_nested()
830 spin_lock(&iommu->lock); in intel_pasid_setup_nested()
833 spin_unlock(&iommu->lock); in intel_pasid_setup_nested()
834 return -ENODEV; in intel_pasid_setup_nested()
837 spin_unlock(&iommu->lock); in intel_pasid_setup_nested()
838 return -EBUSY; in intel_pasid_setup_nested()
842 spin_unlock(&iommu->lock); in intel_pasid_setup_nested()
853 struct iommu_hwpt_vtd_s1 *s1_cfg = &domain->s1_cfg; in intel_pasid_replace_nested()
854 struct dmar_domain *s2_domain = domain->s2_domain; in intel_pasid_replace_nested()
858 /* Address width should match the address width supported by hardware */ in intel_pasid_replace_nested()
859 switch (s1_cfg->addr_width) { in intel_pasid_replace_nested()
863 if (!cap_fl5lp_support(iommu->cap)) { in intel_pasid_replace_nested()
865 "5-level paging not supported\n"); in intel_pasid_replace_nested()
866 return -EINVAL; in intel_pasid_replace_nested()
870 dev_err_ratelimited(dev, "Invalid stage-1 address width %d\n", in intel_pasid_replace_nested()
871 s1_cfg->addr_width); in intel_pasid_replace_nested()
872 return -EINVAL; in intel_pasid_replace_nested()
875 if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) { in intel_pasid_replace_nested()
877 iommu->name); in intel_pasid_replace_nested()
878 return -EINVAL; in intel_pasid_replace_nested()
881 if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) { in intel_pasid_replace_nested()
883 iommu->name); in intel_pasid_replace_nested()
884 return -EINVAL; in intel_pasid_replace_nested()
889 spin_lock(&iommu->lock); in intel_pasid_replace_nested()
892 spin_unlock(&iommu->lock); in intel_pasid_replace_nested()
893 return -ENODEV; in intel_pasid_replace_nested()
897 spin_unlock(&iommu->lock); in intel_pasid_replace_nested()
898 return -EINVAL; in intel_pasid_replace_nested()
904 spin_unlock(&iommu->lock); in intel_pasid_replace_nested()
913 * Interfaces to setup or teardown a pasid table to the scalable-mode
920 struct intel_iommu *iommu = info->iommu; in device_pasid_table_teardown()
924 spin_lock(&iommu->lock); in device_pasid_table_teardown()
927 spin_unlock(&iommu->lock); in device_pasid_table_teardown()
934 spin_unlock(&iommu->lock); in device_pasid_table_teardown()
942 if (dev == &pdev->dev) in pci_pasid_table_teardown()
953 device_pasid_table_teardown(dev, info->bus, info->devfn); in intel_pasid_teardown_sm_context()
969 max_pde = table->max_pasid >> PASID_PDE_SHIFT; in context_get_sm_pds()
974 return pds - 7; in context_get_sm_pds()
981 struct pasid_table *table = info->pasid_table; in context_entry_set_pasid_table()
982 struct intel_iommu *iommu = info->iommu; in context_entry_set_pasid_table()
988 context->lo = (u64)virt_to_phys(table->table) | context_pdts(pds); in context_entry_set_pasid_table()
991 if (info->ats_supported) in context_entry_set_pasid_table()
993 if (info->pasid_supported) in context_entry_set_pasid_table()
995 if (info->pri_supported) in context_entry_set_pasid_table()
1008 struct intel_iommu *iommu = info->iommu; in device_pasid_table_setup()
1011 spin_lock(&iommu->lock); in device_pasid_table_setup()
1014 spin_unlock(&iommu->lock); in device_pasid_table_setup()
1015 return -ENOMEM; in device_pasid_table_setup()
1019 spin_unlock(&iommu->lock); in device_pasid_table_setup()
1029 * the in-flight DMA and copied pgtable, but there is no in device_pasid_table_setup()
1036 iommu->flush.flush_context(iommu, 0, in device_pasid_table_setup()
1041 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in device_pasid_table_setup()
1046 * its driver probe stage, so no in-flight DMA will exist, in device_pasid_table_setup()
1053 spin_unlock(&iommu->lock); in device_pasid_table_setup()
1056 * It's a non-present to present mapping. If hardware doesn't cache in device_pasid_table_setup()
1057 * non-present entry we don't need to flush the caches. If it does in device_pasid_table_setup()
1058 * cache non-present entries, then it does so in the special in device_pasid_table_setup()
1061 if (cap_caching_mode(iommu->cap)) { in device_pasid_table_setup()
1062 iommu->flush.flush_context(iommu, 0, in device_pasid_table_setup()
1066 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); in device_pasid_table_setup()
1076 if (dev != &pdev->dev) in pci_pasid_table_setup()
1093 return device_pasid_table_setup(dev, info->bus, info->devfn); in intel_pasid_setup_sm_context()
1099 * Global Device-TLB invalidation following changes in a context entry which
1104 if (!info->ats_enabled) in __context_flush_dev_iotlb()
1107 qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn), in __context_flush_dev_iotlb()
1108 info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH); in __context_flush_dev_iotlb()
1114 * performance-critical path. in __context_flush_dev_iotlb()
1117 info->ats_qdep); in __context_flush_dev_iotlb()
1125 * non-present.
1130 struct intel_iommu *iommu = info->iommu; in intel_context_flush_no_pasid()
1133 * Device-selective context-cache invalidation. The Domain-ID field in intel_context_flush_no_pasid()
1134 * of the Context-cache Invalidate Descriptor is ignored by hardware in intel_context_flush_no_pasid()
1138 iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn), in intel_context_flush_no_pasid()
1143 * - Domain-selective IOTLB invalidation in intel_context_flush_no_pasid()
1144 * - Global Device-TLB invalidation to all affected functions in intel_context_flush_no_pasid()
1147 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_context_flush_no_pasid()