Lines Matching +full:iommu +full:- +full:map +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
15 #include <linux/dma-direct.h>
16 #include <linux/dma-map-ops.h>
19 #include <linux/iommu.h>
20 #include <linux/iommu-dma.h>
30 #include <linux/pci-p2pdma.h>
37 #include "dma-iommu.h"
38 #include "iommu-pages.h"
75 /* Options for dma-iommu use */
95 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
113 /* Per-CPU flush queue structure */
122 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
126 assert_spin_locked(&fq->lock); in fq_full()
127 return (((fq->tail + 1) & fq->mod_mask) == fq->head); in fq_full()
132 unsigned int idx = fq->tail; in fq_ring_add()
134 assert_spin_locked(&fq->lock); in fq_ring_add()
136 fq->tail = (idx + 1) & fq->mod_mask; in fq_ring_add()
143 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); in fq_ring_free_locked()
146 assert_spin_locked(&fq->lock); in fq_ring_free_locked()
150 if (fq->entries[idx].counter >= counter) in fq_ring_free_locked()
153 iommu_put_pages_list(&fq->entries[idx].freelist); in fq_ring_free_locked()
154 free_iova_fast(&cookie->iovad, in fq_ring_free_locked()
155 fq->entries[idx].iova_pfn, in fq_ring_free_locked()
156 fq->entries[idx].pages); in fq_ring_free_locked()
158 fq->entries[idx].freelist = in fq_ring_free_locked()
159 IOMMU_PAGES_LIST_INIT(fq->entries[idx].freelist); in fq_ring_free_locked()
160 fq->head = (fq->head + 1) & fq->mod_mask; in fq_ring_free_locked()
168 spin_lock_irqsave(&fq->lock, flags); in fq_ring_free()
170 spin_unlock_irqrestore(&fq->lock, flags); in fq_ring_free()
175 atomic64_inc(&cookie->fq_flush_start_cnt); in fq_flush_iotlb()
176 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); in fq_flush_iotlb()
177 atomic64_inc(&cookie->fq_flush_finish_cnt); in fq_flush_iotlb()
186 atomic_set(&cookie->fq_timer_on, 0); in fq_flush_timeout()
189 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) { in fq_flush_timeout()
190 fq_ring_free(cookie, cookie->single_fq); in fq_flush_timeout()
193 fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu)); in fq_flush_timeout()
206 * Order against the IOMMU driver's pagetable update from unmapping in queue_iova()
214 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) in queue_iova()
215 fq = cookie->single_fq; in queue_iova()
217 fq = raw_cpu_ptr(cookie->percpu_fq); in queue_iova()
219 spin_lock_irqsave(&fq->lock, flags); in queue_iova()
235 fq->entries[idx].iova_pfn = pfn; in queue_iova()
236 fq->entries[idx].pages = pages; in queue_iova()
237 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); in queue_iova()
238 iommu_pages_list_splice(freelist, &fq->entries[idx].freelist); in queue_iova()
240 spin_unlock_irqrestore(&fq->lock, flags); in queue_iova()
243 if (!atomic_read(&cookie->fq_timer_on) && in queue_iova()
244 !atomic_xchg(&cookie->fq_timer_on, 1)) in queue_iova()
245 mod_timer(&cookie->fq_timer, in queue_iova()
246 jiffies + msecs_to_jiffies(cookie->options.fq_timeout)); in queue_iova()
254 iommu_put_pages_list(&fq->entries[idx].freelist); in iommu_dma_free_fq_single()
267 iommu_put_pages_list(&fq->entries[idx].freelist); in iommu_dma_free_fq_percpu()
275 if (!cookie->fq_domain) in iommu_dma_free_fq()
278 timer_delete_sync(&cookie->fq_timer); in iommu_dma_free_fq()
279 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) in iommu_dma_free_fq()
280 iommu_dma_free_fq_single(cookie->single_fq); in iommu_dma_free_fq()
282 iommu_dma_free_fq_percpu(cookie->percpu_fq); in iommu_dma_free_fq()
289 fq->head = 0; in iommu_dma_init_one_fq()
290 fq->tail = 0; in iommu_dma_init_one_fq()
291 fq->mod_mask = fq_size - 1; in iommu_dma_init_one_fq()
293 spin_lock_init(&fq->lock); in iommu_dma_init_one_fq()
296 fq->entries[i].freelist = in iommu_dma_init_one_fq()
297 IOMMU_PAGES_LIST_INIT(fq->entries[i].freelist); in iommu_dma_init_one_fq()
302 size_t fq_size = cookie->options.fq_size; in iommu_dma_init_fq_single()
307 return -ENOMEM; in iommu_dma_init_fq_single()
309 cookie->single_fq = queue; in iommu_dma_init_fq_single()
316 size_t fq_size = cookie->options.fq_size; in iommu_dma_init_fq_percpu()
323 return -ENOMEM; in iommu_dma_init_fq_percpu()
327 cookie->percpu_fq = queue; in iommu_dma_init_fq_percpu()
334 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_fq()
337 if (cookie->fq_domain) in iommu_dma_init_fq()
340 atomic64_set(&cookie->fq_flush_start_cnt, 0); in iommu_dma_init_fq()
341 atomic64_set(&cookie->fq_flush_finish_cnt, 0); in iommu_dma_init_fq()
343 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) in iommu_dma_init_fq()
350 return -ENOMEM; in iommu_dma_init_fq()
353 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); in iommu_dma_init_fq()
354 atomic_set(&cookie->fq_timer_on, 0); in iommu_dma_init_fq()
360 WRITE_ONCE(cookie->fq_domain, domain); in iommu_dma_init_fq()
365 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
366 * @domain: IOMMU domain to prepare for DMA-API usage
372 if (domain->cookie_type != IOMMU_COOKIE_NONE) in iommu_get_dma_cookie()
373 return -EEXIST; in iommu_get_dma_cookie()
377 return -ENOMEM; in iommu_get_dma_cookie()
379 INIT_LIST_HEAD(&cookie->msi_page_list); in iommu_get_dma_cookie()
380 domain->cookie_type = IOMMU_COOKIE_DMA_IOVA; in iommu_get_dma_cookie()
381 domain->iova_cookie = cookie; in iommu_get_dma_cookie()
386 * iommu_get_msi_cookie - Acquire just MSI remapping resources
387 * @domain: IOMMU domain to prepare
401 if (domain->type != IOMMU_DOMAIN_UNMANAGED) in iommu_get_msi_cookie()
402 return -EINVAL; in iommu_get_msi_cookie()
404 if (domain->cookie_type != IOMMU_COOKIE_NONE) in iommu_get_msi_cookie()
405 return -EEXIST; in iommu_get_msi_cookie()
409 return -ENOMEM; in iommu_get_msi_cookie()
411 cookie->msi_iova = base; in iommu_get_msi_cookie()
412 INIT_LIST_HEAD(&cookie->msi_page_list); in iommu_get_msi_cookie()
413 domain->cookie_type = IOMMU_COOKIE_DMA_MSI; in iommu_get_msi_cookie()
414 domain->msi_cookie = cookie; in iommu_get_msi_cookie()
420 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
421 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
425 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_put_dma_cookie()
428 if (cookie->iovad.granule) { in iommu_put_dma_cookie()
430 put_iova_domain(&cookie->iovad); in iommu_put_dma_cookie()
432 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) in iommu_put_dma_cookie()
438 * iommu_put_msi_cookie - Release a domain's MSI mapping resources
439 * @domain: IOMMU domain previously prepared by iommu_get_msi_cookie()
443 struct iommu_dma_msi_cookie *cookie = domain->msi_cookie; in iommu_put_msi_cookie()
446 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) in iommu_put_msi_cookie()
452 * iommu_dma_get_resv_regions - Reserved region driver helper
456 * IOMMU drivers can use this to implement their .get_resv_regions callback
457 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
464 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) in iommu_dma_get_resv_regions()
467 if (dev->of_node) in iommu_dma_get_resv_regions()
475 struct iova_domain *iovad = &cookie->iovad; in cookie_init_hw_msi_region()
479 start -= iova_offset(iovad, start); in cookie_init_hw_msi_region()
480 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); in cookie_init_hw_msi_region()
485 return -ENOMEM; in cookie_init_hw_msi_region()
487 msi_page->phys = start; in cookie_init_hw_msi_region()
488 msi_page->iova = start; in cookie_init_hw_msi_region()
489 INIT_LIST_HEAD(&msi_page->list); in cookie_init_hw_msi_region()
490 list_add(&msi_page->list, &cookie->msi_page_list); in cookie_init_hw_msi_region()
491 start += iovad->granule; in cookie_init_hw_msi_region()
503 return res_a->res->start > res_b->res->start; in iommu_dma_ranges_sort()
509 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); in iova_reserve_pci_windows()
514 resource_list_for_each_entry(window, &bridge->windows) { in iova_reserve_pci_windows()
515 if (resource_type(window->res) != IORESOURCE_MEM) in iova_reserve_pci_windows()
518 lo = iova_pfn(iovad, window->res->start - window->offset); in iova_reserve_pci_windows()
519 hi = iova_pfn(iovad, window->res->end - window->offset); in iova_reserve_pci_windows()
524 list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort); in iova_reserve_pci_windows()
525 resource_list_for_each_entry(window, &bridge->dma_ranges) { in iova_reserve_pci_windows()
526 end = window->res->start - window->offset; in iova_reserve_pci_windows()
533 /* DMA ranges should be non-overlapping */ in iova_reserve_pci_windows()
534 dev_err(&dev->dev, in iova_reserve_pci_windows()
535 "Failed to reserve IOVA [%pa-%pa]\n", in iova_reserve_pci_windows()
537 return -EINVAL; in iova_reserve_pci_windows()
540 start = window->res->end - window->offset + 1; in iova_reserve_pci_windows()
542 if (window->node.next == &bridge->dma_ranges && in iova_reserve_pci_windows()
555 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iova_reserve_iommu_regions()
556 struct iova_domain *iovad = &cookie->iovad; in iova_reserve_iommu_regions()
572 if (region->type == IOMMU_RESV_SW_MSI) in iova_reserve_iommu_regions()
575 lo = iova_pfn(iovad, region->start); in iova_reserve_iommu_regions()
576 hi = iova_pfn(iovad, region->start + region->length - 1); in iova_reserve_iommu_regions()
579 if (region->type == IOMMU_RESV_MSI) in iova_reserve_iommu_regions()
580 ret = cookie_init_hw_msi_region(cookie, region->start, in iova_reserve_iommu_regions()
581 region->start + region->length); in iova_reserve_iommu_regions()
592 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; in dev_is_untrusted()
616 * If kmalloc() buffers are not DMA-safe for this device and in dev_use_sg_swiotlb()
622 if (!dma_kmalloc_size_aligned(s->length)) in dev_use_sg_swiotlb()
630 * iommu_dma_init_options - Initialize dma-iommu options
634 * This allows tuning dma-iommu specific to device properties
640 if (dev->iommu->shadow_on_flush) { in iommu_dma_init_options()
641 options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE; in iommu_dma_init_options()
642 options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT; in iommu_dma_init_options()
643 options->fq_size = IOVA_SINGLE_FQ_SIZE; in iommu_dma_init_options()
645 options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE; in iommu_dma_init_options()
646 options->fq_size = IOVA_DEFAULT_FQ_SIZE; in iommu_dma_init_options()
647 options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT; in iommu_dma_init_options()
652 * iommu_dma_init_domain - Initialise a DMA mapping domain
653 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
662 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_init_domain()
663 const struct bus_dma_region *map = dev->dma_range_map; in iommu_dma_init_domain() local
668 if (!cookie || domain->cookie_type != IOMMU_COOKIE_DMA_IOVA) in iommu_dma_init_domain()
669 return -EINVAL; in iommu_dma_init_domain()
671 iovad = &cookie->iovad; in iommu_dma_init_domain()
674 order = __ffs(domain->pgsize_bitmap); in iommu_dma_init_domain()
678 if (map) { in iommu_dma_init_domain()
679 if (dma_range_map_min(map) > domain->geometry.aperture_end || in iommu_dma_init_domain()
680 dma_range_map_max(map) < domain->geometry.aperture_start) { in iommu_dma_init_domain()
681 pr_warn("specified DMA range outside IOMMU capability\n"); in iommu_dma_init_domain()
682 return -EFAULT; in iommu_dma_init_domain()
687 domain->geometry.aperture_start >> order); in iommu_dma_init_domain()
689 /* start_pfn is always nonzero for an already-initialised domain */ in iommu_dma_init_domain()
690 if (iovad->start_pfn) { in iommu_dma_init_domain()
691 if (1UL << order != iovad->granule || in iommu_dma_init_domain()
692 base_pfn != iovad->start_pfn) { in iommu_dma_init_domain()
694 return -EFAULT; in iommu_dma_init_domain()
705 iommu_dma_init_options(&cookie->options, dev); in iommu_dma_init_domain()
708 if (domain->type == IOMMU_DOMAIN_DMA_FQ && in iommu_dma_init_domain()
710 domain->type = IOMMU_DOMAIN_DMA; in iommu_dma_init_domain()
716 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
719 * @coherent: Is the DMA master cache-coherent?
722 * Return: corresponding IOMMU API page protection flags
747 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_alloc_iova()
748 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_alloc_iova()
751 if (domain->cookie_type == IOMMU_COOKIE_DMA_MSI) { in iommu_dma_alloc_iova()
752 domain->msi_cookie->msi_iova += size; in iommu_dma_alloc_iova()
753 return domain->msi_cookie->msi_iova - size; in iommu_dma_alloc_iova()
759 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); in iommu_dma_alloc_iova()
761 if (domain->geometry.force_aperture) in iommu_dma_alloc_iova()
762 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); in iommu_dma_alloc_iova()
765 * Try to use all the 32-bit PCI addresses first. The original SAC vs. in iommu_dma_alloc_iova()
768 * venture into the 64-bit space until necessary. in iommu_dma_alloc_iova()
772 * some inherent bug in handling >32-bit addresses, or not all the in iommu_dma_alloc_iova()
773 * expected address bits are wired up between the device and the IOMMU. in iommu_dma_alloc_iova()
775 if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) { in iommu_dma_alloc_iova()
781 dev->iommu->pci_32bit_workaround = false; in iommu_dma_alloc_iova()
782 dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit)); in iommu_dma_alloc_iova()
793 struct iova_domain *iovad = &domain->iova_cookie->iovad; in iommu_dma_free_iova()
796 if (domain->cookie_type == IOMMU_COOKIE_DMA_MSI) in iommu_dma_free_iova()
797 domain->msi_cookie->msi_iova -= size; in iommu_dma_free_iova()
798 else if (gather && gather->queued) in iommu_dma_free_iova()
799 queue_iova(domain->iova_cookie, iova_pfn(iovad, iova), in iommu_dma_free_iova()
801 &gather->freelist); in iommu_dma_free_iova()
811 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_unmap()
812 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_unmap()
817 dma_addr -= iova_off; in __iommu_dma_unmap()
820 iotlb_gather.queued = READ_ONCE(cookie->fq_domain); in __iommu_dma_unmap()
834 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_map()
835 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_map()
854 if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) { in __iommu_dma_map()
863 while (count--) in __iommu_dma_free_pages()
882 /* IOMMU can map any pages, so himem can also be used here */ in __iommu_dma_alloc_pages()
890 * Higher-order allocations are a convenience rather in __iommu_dma_alloc_pages()
892 * falling back to minimum-order allocations. in __iommu_dma_alloc_pages()
913 count -= order_size; in __iommu_dma_alloc_pages()
914 while (order_size--) in __iommu_dma_alloc_pages()
922 * but an IOMMU which supports smaller pages might not map the whole thing.
928 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_alloc_noncontiguous()
929 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_alloc_noncontiguous()
932 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; in __iommu_dma_alloc_noncontiguous()
941 min_size = alloc_sizes & -alloc_sizes; in __iommu_dma_alloc_noncontiguous()
958 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); in __iommu_dma_alloc_noncontiguous()
963 * Remove the zone/policy flags from the GFP - these are applied to the in __iommu_dma_alloc_noncontiguous()
976 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) in __iommu_dma_alloc_noncontiguous()
977 arch_dma_prep_coherent(sg_page(sg), sg->length); in __iommu_dma_alloc_noncontiguous()
980 ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot, in __iommu_dma_alloc_noncontiguous()
985 sgt->sgl->dma_address = iova; in __iommu_dma_alloc_noncontiguous()
986 sgt->sgl->dma_length = size; in __iommu_dma_alloc_noncontiguous()
1009 *dma_handle = sgt.sgl->dma_address; in iommu_dma_alloc_remap()
1027 * the DMA-API internal vmaping and freeing easier we stash away the page
1029 * e.g. when a vmap-variant that takes a scatterlist comes along.
1047 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, attrs); in iommu_dma_alloc_noncontiguous()
1048 if (!sh->pages) { in iommu_dma_alloc_noncontiguous()
1052 return &sh->sgt; in iommu_dma_alloc_noncontiguous()
1060 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); in iommu_dma_free_noncontiguous()
1061 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); in iommu_dma_free_noncontiguous()
1062 sg_free_table(&sh->sgt); in iommu_dma_free_noncontiguous()
1071 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); in iommu_dma_vmap_noncontiguous()
1079 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) in iommu_dma_mmap_noncontiguous()
1080 return -ENXIO; in iommu_dma_mmap_noncontiguous()
1081 return vm_map_pages(vma, sgt_handle(sgt)->pages, count); in iommu_dma_mmap_noncontiguous()
1123 sg->length, dir); in iommu_dma_sync_sg_for_cpu()
1126 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_cpu()
1139 sg->length, dir); in iommu_dma_sync_sg_for_device()
1142 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); in iommu_dma_sync_sg_for_device()
1149 struct iova_domain *iovad = &domain->iova_cookie->iovad; in iommu_dma_map_swiotlb()
1152 dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); in iommu_dma_map_swiotlb()
1163 * kernel data, so zero the pre- and post-padding. in iommu_dma_map_swiotlb()
1170 /* Pre-padding */ in iommu_dma_map_swiotlb()
1172 memset((void *)start, 0, virt - start); in iommu_dma_map_swiotlb()
1174 /* Post-padding */ in iommu_dma_map_swiotlb()
1176 memset((void *)start, 0, iova_align(iovad, start) - start); in iommu_dma_map_swiotlb()
1184 * the IOMMU granule. Returns non-zero if either the start or end
1201 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_map_page()
1202 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_map_page()
1244 * Prepare a successfully-mapped scatterlist to give back to the caller.
1263 unsigned int s_iova_len = s->length; in __finalise_sg()
1281 s->offset += s_iova_off; in __finalise_sg()
1282 s->length = s_length; in __finalise_sg()
1286 * - there is a valid output segment to append to in __finalise_sg()
1287 * - and this segment starts on an IOVA page boundary in __finalise_sg()
1288 * - but doesn't fall at a segment boundary in __finalise_sg()
1289 * - and wouldn't make the resulting output segment too long in __finalise_sg()
1292 (max_len - cur_len >= s_length)) { in __finalise_sg()
1328 s->offset += sg_dma_address(s); in __invalidate_sg()
1330 s->length = sg_dma_len(s); in __invalidate_sg()
1358 s->offset, s->length, dir, attrs); in iommu_dma_map_sg_swiotlb()
1361 sg_dma_len(s) = s->length; in iommu_dma_map_sg_swiotlb()
1368 return -EIO; in iommu_dma_map_sg_swiotlb()
1373 * any old buffer layout, but the IOMMU API requires everything to be
1374 * aligned to IOMMU pages. Hence the need for this complicated bit of
1375 * impedance-matching, to be able to hand off a suitably-aligned list,
1382 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_map_sg()
1383 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_map_sg()
1389 unsigned long mask = dma_get_seg_boundary(dev); in iommu_dma_map_sg() local
1407 * IOVA granules for the IOMMU driver to handle. With some clever in iommu_dma_map_sg()
1408 * trickery we can modify the list in-place, but reversibly, by in iommu_dma_map_sg()
1409 * stashing the unaligned parts in the as-yet-unused DMA fields. in iommu_dma_map_sg()
1412 size_t s_iova_off = iova_offset(iovad, s->offset); in iommu_dma_map_sg()
1413 size_t s_length = s->length; in iommu_dma_map_sg()
1414 size_t pad_len = (mask - iova_len + 1) & mask; in iommu_dma_map_sg()
1432 s->dma_address = pci_p2pdma_bus_addr_map(&p2pdma_state, in iommu_dma_map_sg()
1434 sg_dma_len(s) = sg->length; in iommu_dma_map_sg()
1438 ret = -EREMOTEIO; in iommu_dma_map_sg()
1444 s->offset -= s_iova_off; in iommu_dma_map_sg()
1446 s->length = s_length; in iommu_dma_map_sg()
1450 * depend on these assumptions about the segment boundary mask: in iommu_dma_map_sg()
1451 * - If mask size >= IOVA size, then the IOVA range cannot in iommu_dma_map_sg()
1453 * - If mask size < IOVA size, then the IOVA range must start in iommu_dma_map_sg()
1457 * - The mask must be a power of 2, so pad_len == 0 if in iommu_dma_map_sg()
1461 if (pad_len && pad_len < s_length - 1) { in iommu_dma_map_sg()
1462 prev->length += pad_len; in iommu_dma_map_sg()
1475 ret = -ENOMEM; in iommu_dma_map_sg()
1480 * We'll leave any physical concatenation to the IOMMU driver's in iommu_dma_map_sg()
1481 * implementation - it knows better than we do. in iommu_dma_map_sg()
1494 if (ret != -ENOMEM && ret != -EREMOTEIO) in iommu_dma_map_sg()
1495 return -EINVAL; in iommu_dma_map_sg()
1532 nents -= i; in iommu_dma_unmap_sg()
1546 __iommu_dma_unmap(dev, start, end - start); in iommu_dma_unmap_sg()
1569 /* Non-coherent atomic allocation? Easy */ in __iommu_dma_free()
1576 * If it the address is remapped, then it's either non-coherent in __iommu_dma_free()
1663 dev->coherent_dma_mask); in iommu_dma_alloc()
1677 unsigned long pfn, off = vma->vm_pgoff; in iommu_dma_mmap()
1680 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in iommu_dma_mmap()
1685 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) in iommu_dma_mmap()
1686 return -ENXIO; in iommu_dma_mmap()
1698 return remap_pfn_range(vma, vma->vm_start, pfn + off, in iommu_dma_mmap()
1699 vma->vm_end - vma->vm_start, in iommu_dma_mmap()
1700 vma->vm_page_prot); in iommu_dma_mmap()
1726 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in iommu_dma_get_sgtable()
1734 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; in iommu_dma_get_merge_boundary()
1751 * dma_iova_try_alloc - Try to allocate an IOVA space
1757 * Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space
1763 * Returns %true if the IOVA-based DMA API can be used and IOVA space has been
1780 cookie = domain->iova_cookie; in dma_iova_try_alloc()
1781 iovad = &cookie->iovad; in dma_iova_try_alloc()
1792 * DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu in dma_iova_try_alloc()
1794 * didn't use this interface to map SIZE_MAX. in dma_iova_try_alloc()
1805 state->addr = addr + iova_off; in dma_iova_try_alloc()
1806 state->__size = size; in dma_iova_try_alloc()
1812 * dma_iova_free - Free an IOVA space
1826 struct iommu_dma_cookie *cookie = domain->iova_cookie; in dma_iova_free()
1827 struct iova_domain *iovad = &cookie->iovad; in dma_iova_free()
1828 size_t iova_start_pad = iova_offset(iovad, state->addr); in dma_iova_free()
1831 iommu_dma_free_iova(domain, state->addr - iova_start_pad, in dma_iova_free()
1855 struct iova_domain *iovad = &domain->iova_cookie->iovad; in iommu_dma_iova_bounce_and_link()
1861 return -ENOMEM; in iommu_dma_iova_bounce_and_link()
1863 error = __dma_iova_link(dev, addr - iova_start_pad, in iommu_dma_iova_bounce_and_link()
1864 bounce_phys - iova_start_pad, in iommu_dma_iova_bounce_and_link()
1877 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_iova_link_swiotlb()
1878 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_iova_link_swiotlb()
1881 dma_addr_t addr = state->addr + offset; in iommu_dma_iova_link_swiotlb()
1886 size_t bounce_len = min(size, iovad->granule - iova_start_pad); in iommu_dma_iova_link_swiotlb()
1892 state->__size |= DMA_IOVA_USE_SWIOTLB; in iommu_dma_iova_link_swiotlb()
1895 size -= bounce_len; in iommu_dma_iova_link_swiotlb()
1900 size -= iova_end_pad; in iommu_dma_iova_link_swiotlb()
1912 state->__size |= DMA_IOVA_USE_SWIOTLB; in iommu_dma_iova_link_swiotlb()
1923 * dma_iova_link - Link a range of IOVA space
1927 * @offset: offset into the IOVA state to map into
1944 struct iommu_dma_cookie *cookie = domain->iova_cookie; in dma_iova_link()
1945 struct iova_domain *iovad = &cookie->iovad; in dma_iova_link()
1949 return -EIO; in dma_iova_link()
1956 return __dma_iova_link(dev, state->addr + offset - iova_start_pad, in dma_iova_link()
1957 phys - iova_start_pad, in dma_iova_link()
1963 * dma_iova_sync - Sync IOTLB
1970 * the IOVA-contiguous range created by one ore more dma_iova_link() calls
1977 struct iommu_dma_cookie *cookie = domain->iova_cookie; in dma_iova_sync()
1978 struct iova_domain *iovad = &cookie->iovad; in dma_iova_sync()
1979 dma_addr_t addr = state->addr + offset; in dma_iova_sync()
1982 return iommu_sync_map(domain, addr - iova_start_pad, in dma_iova_sync()
1992 struct iommu_dma_cookie *cookie = domain->iova_cookie; in iommu_dma_iova_unlink_range_slow()
1993 struct iova_domain *iovad = &cookie->iovad; in iommu_dma_iova_unlink_range_slow()
2007 end - addr, iovad->granule - iova_start_pad); in iommu_dma_iova_unlink_range_slow()
2026 struct iommu_dma_cookie *cookie = domain->iova_cookie; in __iommu_dma_iova_unlink()
2027 struct iova_domain *iovad = &cookie->iovad; in __iommu_dma_iova_unlink()
2028 dma_addr_t addr = state->addr + offset; in __iommu_dma_iova_unlink()
2033 if ((state->__size & DMA_IOVA_USE_SWIOTLB) || in __iommu_dma_iova_unlink()
2038 iotlb_gather.queued = free_iova && READ_ONCE(cookie->fq_domain); in __iommu_dma_iova_unlink()
2041 addr -= iova_start_pad; in __iommu_dma_iova_unlink()
2052 * dma_iova_unlink - Unlink a range of IOVA space
2071 * dma_iova_destroy - Finish a DMA mapping transaction
2103 dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; in iommu_setup_dma_ops()
2105 dev->dma_iommu = iommu_is_dma_domain(domain); in iommu_setup_dma_ops()
2106 if (dev->dma_iommu && iommu_dma_init_domain(domain, dev)) in iommu_setup_dma_ops()
2111 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", in iommu_setup_dma_ops()
2113 dev->dma_iommu = false; in iommu_setup_dma_ops()
2118 return domain && (domain->cookie_type == IOMMU_COOKIE_DMA_IOVA || in has_msi_cookie()
2119 domain->cookie_type == IOMMU_COOKIE_DMA_MSI); in has_msi_cookie()
2124 switch (domain->cookie_type) { in cookie_msi_granule()
2126 return domain->iova_cookie->iovad.granule; in cookie_msi_granule()
2136 switch (domain->cookie_type) { in cookie_msi_pages()
2138 return &domain->iova_cookie->msi_page_list; in cookie_msi_pages()
2140 return &domain->msi_cookie->msi_page_list; in cookie_msi_pages()
2155 msi_addr &= ~(phys_addr_t)(size - 1); in iommu_dma_get_msi_page()
2157 if (msi_page->phys == msi_addr) in iommu_dma_get_msi_page()
2171 INIT_LIST_HEAD(&msi_page->list); in iommu_dma_get_msi_page()
2172 msi_page->phys = msi_addr; in iommu_dma_get_msi_page()
2173 msi_page->iova = iova; in iommu_dma_get_msi_page()
2174 list_add(&msi_page->list, msi_page_list); in iommu_dma_get_msi_page()
2198 return -ENOMEM; in iommu_dma_sw_msi()
2200 msi_desc_set_iommu_msi_iova(desc, msi_page->iova, in iommu_dma_sw_msi()