Lines Matching full:iommu
15 #include <linux/iommu.h>
21 #include "iommu.h"
23 #include "../iommu-pages.h"
26 * Intel IOMMU system wide PASID name space:
63 dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order); in intel_pasid_alloc_table()
74 if (!ecap_coherent(info->iommu->ecap)) in intel_pasid_alloc_table()
151 entries = iommu_alloc_page_node(info->iommu->node, GFP_ATOMIC); in intel_pasid_get_entry()
167 if (!ecap_coherent(info->iommu->ecap)) { in intel_pasid_get_entry()
195 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, in pasid_cache_invalidation_with_pasid() argument
206 qi_submit_sync(iommu, &desc, 1, 0); in pasid_cache_invalidation_with_pasid()
210 devtlb_invalidation_with_pasid(struct intel_iommu *iommu, in devtlb_invalidation_with_pasid() argument
234 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); in devtlb_invalidation_with_pasid()
236 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); in devtlb_invalidation_with_pasid()
239 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, in intel_pasid_tear_down_entry() argument
245 spin_lock(&iommu->lock); in intel_pasid_tear_down_entry()
248 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
255 spin_unlock(&iommu->lock); in intel_pasid_tear_down_entry()
257 if (!ecap_coherent(iommu->ecap)) in intel_pasid_tear_down_entry()
260 pasid_cache_invalidation_with_pasid(iommu, did, pasid); in intel_pasid_tear_down_entry()
263 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); in intel_pasid_tear_down_entry()
265 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_pasid_tear_down_entry()
267 devtlb_invalidation_with_pasid(iommu, dev, pasid); in intel_pasid_tear_down_entry()
274 static void pasid_flush_caches(struct intel_iommu *iommu, in pasid_flush_caches() argument
278 if (!ecap_coherent(iommu->ecap)) in pasid_flush_caches()
281 if (cap_caching_mode(iommu->cap)) { in pasid_flush_caches()
282 pasid_cache_invalidation_with_pasid(iommu, did, pasid); in pasid_flush_caches()
283 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); in pasid_flush_caches()
285 iommu_flush_write_buffer(iommu); in pasid_flush_caches()
293 int intel_pasid_setup_first_level(struct intel_iommu *iommu, in intel_pasid_setup_first_level() argument
299 if (!ecap_flts(iommu->ecap)) { in intel_pasid_setup_first_level()
301 iommu->name); in intel_pasid_setup_first_level()
305 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) { in intel_pasid_setup_first_level()
307 iommu->name); in intel_pasid_setup_first_level()
311 spin_lock(&iommu->lock); in intel_pasid_setup_first_level()
314 spin_unlock(&iommu->lock); in intel_pasid_setup_first_level()
319 spin_unlock(&iommu->lock); in intel_pasid_setup_first_level()
335 pasid_set_address_width(pte, iommu->agaw); in intel_pasid_setup_first_level()
336 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in intel_pasid_setup_first_level()
341 spin_unlock(&iommu->lock); in intel_pasid_setup_first_level()
343 pasid_flush_caches(iommu, pte, pasid, did); in intel_pasid_setup_first_level()
349 * Skip top levels of page tables for iommu which has less agaw
353 struct intel_iommu *iommu, in iommu_skip_agaw() argument
358 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { in iommu_skip_agaw()
370 int intel_pasid_setup_second_level(struct intel_iommu *iommu, in intel_pasid_setup_second_level() argument
384 if (!ecap_slts(iommu->ecap)) { in intel_pasid_setup_second_level()
386 iommu->name); in intel_pasid_setup_second_level()
391 agaw = iommu_skip_agaw(domain, iommu, &pgd); in intel_pasid_setup_second_level()
398 did = domain_id_iommu(domain, iommu); in intel_pasid_setup_second_level()
400 spin_lock(&iommu->lock); in intel_pasid_setup_second_level()
403 spin_unlock(&iommu->lock); in intel_pasid_setup_second_level()
408 spin_unlock(&iommu->lock); in intel_pasid_setup_second_level()
418 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in intel_pasid_setup_second_level()
423 spin_unlock(&iommu->lock); in intel_pasid_setup_second_level()
425 pasid_flush_caches(iommu, pte, pasid, did); in intel_pasid_setup_second_level()
433 int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, in intel_pasid_setup_dirty_tracking() argument
440 spin_lock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
444 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
454 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
463 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
471 spin_unlock(&iommu->lock); in intel_pasid_setup_dirty_tracking()
473 if (!ecap_coherent(iommu->ecap)) in intel_pasid_setup_dirty_tracking()
490 pasid_cache_invalidation_with_pasid(iommu, did, pasid); in intel_pasid_setup_dirty_tracking()
492 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_pasid_setup_dirty_tracking()
494 devtlb_invalidation_with_pasid(iommu, dev, pasid); in intel_pasid_setup_dirty_tracking()
502 int intel_pasid_setup_pass_through(struct intel_iommu *iommu, in intel_pasid_setup_pass_through() argument
508 spin_lock(&iommu->lock); in intel_pasid_setup_pass_through()
511 spin_unlock(&iommu->lock); in intel_pasid_setup_pass_through()
516 spin_unlock(&iommu->lock); in intel_pasid_setup_pass_through()
522 pasid_set_address_width(pte, iommu->agaw); in intel_pasid_setup_pass_through()
525 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in intel_pasid_setup_pass_through()
527 spin_unlock(&iommu->lock); in intel_pasid_setup_pass_through()
529 pasid_flush_caches(iommu, pte, pasid, did); in intel_pasid_setup_pass_through()
537 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, in intel_pasid_setup_page_snoop_control() argument
543 spin_lock(&iommu->lock); in intel_pasid_setup_page_snoop_control()
546 spin_unlock(&iommu->lock); in intel_pasid_setup_page_snoop_control()
552 spin_unlock(&iommu->lock); in intel_pasid_setup_page_snoop_control()
554 if (!ecap_coherent(iommu->ecap)) in intel_pasid_setup_page_snoop_control()
568 pasid_cache_invalidation_with_pasid(iommu, did, pasid); in intel_pasid_setup_page_snoop_control()
569 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); in intel_pasid_setup_page_snoop_control()
571 devtlb_invalidation_with_pasid(iommu, dev, pasid); in intel_pasid_setup_page_snoop_control()
576 * @iommu: IOMMU which the device belong to
585 int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, in intel_pasid_setup_nested() argument
591 u16 did = domain_id_iommu(domain, iommu); in intel_pasid_setup_nested()
600 if (!cap_fl5lp_support(iommu->cap)) { in intel_pasid_setup_nested()
612 if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) { in intel_pasid_setup_nested()
614 iommu->name); in intel_pasid_setup_nested()
618 if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) { in intel_pasid_setup_nested()
620 iommu->name); in intel_pasid_setup_nested()
624 spin_lock(&iommu->lock); in intel_pasid_setup_nested()
627 spin_unlock(&iommu->lock); in intel_pasid_setup_nested()
631 spin_unlock(&iommu->lock); in intel_pasid_setup_nested()
658 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); in intel_pasid_setup_nested()
663 spin_unlock(&iommu->lock); in intel_pasid_setup_nested()
665 pasid_flush_caches(iommu, pte, pasid, did); in intel_pasid_setup_nested()
678 struct intel_iommu *iommu = info->iommu; in device_pasid_table_teardown() local
682 spin_lock(&iommu->lock); in device_pasid_table_teardown()
683 context = iommu_context_addr(iommu, bus, devfn, false); in device_pasid_table_teardown()
685 spin_unlock(&iommu->lock); in device_pasid_table_teardown()
691 __iommu_flush_cache(iommu, context, sizeof(*context)); in device_pasid_table_teardown()
692 spin_unlock(&iommu->lock); in device_pasid_table_teardown()
740 struct intel_iommu *iommu = info->iommu; in context_entry_set_pasid_table() local
756 __iommu_flush_cache(iommu, context, sizeof(*context)); in context_entry_set_pasid_table()
764 struct intel_iommu *iommu = info->iommu; in device_pasid_table_setup() local
767 spin_lock(&iommu->lock); in device_pasid_table_setup()
768 context = iommu_context_addr(iommu, bus, devfn, true); in device_pasid_table_setup()
770 spin_unlock(&iommu->lock); in device_pasid_table_setup()
774 if (context_present(context) && !context_copied(iommu, bus, devfn)) { in device_pasid_table_setup()
775 spin_unlock(&iommu->lock); in device_pasid_table_setup()
779 if (context_copied(iommu, bus, devfn)) { in device_pasid_table_setup()
781 __iommu_flush_cache(iommu, context, sizeof(*context)); in device_pasid_table_setup()
792 iommu->flush.flush_context(iommu, 0, in device_pasid_table_setup()
796 qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); in device_pasid_table_setup()
797 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); in device_pasid_table_setup()
798 devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID); in device_pasid_table_setup()
805 clear_context_copied(iommu, bus, devfn); in device_pasid_table_setup()
809 spin_unlock(&iommu->lock); in device_pasid_table_setup()
817 if (cap_caching_mode(iommu->cap)) { in device_pasid_table_setup()
818 iommu->flush.flush_context(iommu, 0, in device_pasid_table_setup()
822 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH); in device_pasid_table_setup()
863 qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn), in __context_flush_dev_iotlb()
879 * IOMMU is in scalable mode and all PASID table entries of the device were
886 struct intel_iommu *iommu = info->iommu; in intel_context_flush_present() local
896 iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn), in intel_context_flush_present()
904 if (!sm_supported(iommu)) { in intel_context_flush_present()
905 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_context_flush_present()
919 * If the IOMMU is running in scalable mode and there might in intel_context_flush_present()
924 assert_spin_locked(&iommu->lock); in intel_context_flush_present()
931 qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0); in intel_context_flush_present()
932 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); in intel_context_flush_present()