Lines Matching +full:iommu +full:- +full:map +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/dma-mapping.c
5 * Copyright (C) 2000-2004 Russell King
17 #include <linux/dma-direct.h>
18 #include <linux/dma-map-ops.h>
22 #include <linux/iommu.h>
33 #include <asm/dma-iommu.h>
34 #include <asm/mach/map.h>
36 #include <asm/xen/xen-ops.h>
84 if (buf->virt == virt) { in arm_dma_buffer_find()
85 list_del(&buf->list); in arm_dma_buffer_find()
111 * lurking in the kernel direct-mapped region is invalidated. in __dma_clear_buffer()
123 size -= PAGE_SIZE; in __dma_clear_buffer()
139 * specified gfp mask. Note that 'size' must be page aligned.
207 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); in atomic_pool_init()
211 * The atomic pool is only used for non-coherent allocations in atomic_pool_init()
226 atomic_pool_size, -1); in atomic_pool_init()
244 return -ENOMEM; in atomic_pool_init()
276 struct map_desc map; in dma_contiguous_remap() local
284 map.pfn = __phys_to_pfn(start); in dma_contiguous_remap()
285 map.virtual = __phys_to_virt(start); in dma_contiguous_remap()
286 map.length = end - start; in dma_contiguous_remap()
287 map.type = MT_MEMORY_DMA_READY; in dma_contiguous_remap()
290 * Clear previous low-memory mapping to ensure that the in dma_contiguous_remap()
305 iotable_init(&map, 1); in dma_contiguous_remap()
336 * non-coherent in __alloc_remap_buffer()
462 return __alloc_simple_buffer(args->dev, args->size, args->gfp, in simple_allocator_alloc()
468 __dma_free_buffer(args->page, args->size); in simple_allocator_free()
479 return __alloc_from_contiguous(args->dev, args->size, args->prot, in cma_allocator_alloc()
480 ret_page, args->caller, in cma_allocator_alloc()
481 args->want_vaddr, args->coherent_flag, in cma_allocator_alloc()
482 args->gfp); in cma_allocator_alloc()
487 __free_from_contiguous(args->dev, args->page, args->cpu_addr, in cma_allocator_free()
488 args->size, args->want_vaddr); in cma_allocator_free()
499 return __alloc_from_pool(args->size, ret_page); in pool_allocator_alloc()
504 __free_from_pool(args->cpu_addr, args->size); in pool_allocator_free()
515 return __alloc_remap_buffer(args->dev, args->size, args->gfp, in remap_allocator_alloc()
516 args->prot, ret_page, args->caller, in remap_allocator_alloc()
517 args->want_vaddr); in remap_allocator_alloc()
522 if (args->want_vaddr) in remap_allocator_free()
523 dma_common_free_remap(args->cpu_addr, args->size); in remap_allocator_free()
525 __dma_free_buffer(args->page, args->size); in remap_allocator_free()
537 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); in __dma_alloc() local
553 u64 limit = (mask + 1) & ~mask; in __dma_alloc()
555 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", in __dma_alloc()
556 size, mask); in __dma_alloc()
566 if (mask < 0xffffffffULL) in __dma_alloc()
576 buf->allocator = &cma_allocator; in __dma_alloc()
578 buf->allocator = &simple_allocator; in __dma_alloc()
580 buf->allocator = &remap_allocator; in __dma_alloc()
582 buf->allocator = &pool_allocator; in __dma_alloc()
584 addr = buf->allocator->alloc(&args, &page); in __dma_alloc()
590 buf->virt = args.want_vaddr ? addr : page; in __dma_alloc()
593 list_add(&buf->list, &arm_dma_bufs); in __dma_alloc()
623 buf->allocator->free(&args); in __arm_dma_free()
651 len = PAGE_SIZE - offset; in dma_cache_maint_page()
670 left -= len; in dma_cache_maint_page()
677 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
692 /* FIXME: non-speculating: flush on bidirectional mappings? */ in __dma_page_cpu_to_dev()
700 /* FIXME: non-speculating: not required */ in __dma_page_dev_to_cpu()
709 * Mark the D-cache clean for these pages to avoid extra flushing. in __dma_page_dev_to_cpu()
716 size_t sz = folio_size(folio) - offset; in __dma_page_dev_to_cpu()
721 set_bit(PG_dcache_clean, &folio->flags); in __dma_page_dev_to_cpu()
723 size -= sz; in __dma_page_dev_to_cpu()
752 /* IOMMU */
762 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova()
771 align = (1 << order) - 1; in __alloc_iova()
773 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova()
774 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova()
775 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
776 mapping->bits, 0, count, align); in __alloc_iova()
778 if (start > mapping->bits) in __alloc_iova()
781 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
790 if (i == mapping->nr_bitmaps) { in __alloc_iova()
792 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
796 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova()
797 mapping->bits, 0, count, align); in __alloc_iova()
799 if (start > mapping->bits) { in __alloc_iova()
800 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
804 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova()
806 spin_unlock_irqrestore(&mapping->lock, flags); in __alloc_iova()
808 iova = mapping->base + (mapping_size * i); in __alloc_iova()
818 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __free_iova()
826 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; in __free_iova()
827 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); in __free_iova()
829 bitmap_base = mapping->base + mapping_size * bitmap_index; in __free_iova()
831 start = (addr - bitmap_base) >> PAGE_SHIFT; in __free_iova()
844 spin_lock_irqsave(&mapping->lock, flags); in __free_iova()
845 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); in __free_iova()
846 spin_unlock_irqrestore(&mapping->lock, flags); in __free_iova()
886 order_idx = ARRAY_SIZE(iommu_order_array) - 1; in __iommu_alloc_buffer()
889 * IOMMU can map any pages, so himem can also be used here in __iommu_alloc_buffer()
905 /* See if it's easy to allocate a high-order chunk */ in __iommu_alloc_buffer()
922 while (--j) in __iommu_alloc_buffer()
928 count -= 1 << order; in __iommu_alloc_buffer()
933 while (i--) in __iommu_alloc_buffer()
986 len = (j - i) << PAGE_SHIFT; in __iommu_create_mapping()
987 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping()
997 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); in __iommu_create_mapping()
1007 * add optional in-page offset from iova to size and align in __iommu_remove_mapping()
1013 iommu_unmap(mapping->domain, iova, size); in __iommu_remove_mapping()
1081 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; in arm_iommu_alloc_attrs()
1124 return -ENXIO; in arm_iommu_mmap_attrs()
1126 if (vma->vm_pgoff >= nr_pages) in arm_iommu_mmap_attrs()
1127 return -ENXIO; in arm_iommu_mmap_attrs()
1129 if (!dev->dma_coherent) in arm_iommu_mmap_attrs()
1130 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); in arm_iommu_mmap_attrs()
1146 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; in arm_iommu_free_attrs()
1176 return -ENXIO; in arm_iommu_get_sgtable()
1183 * Map a part of the scatter-gather list into contiguous io address space
1201 return -ENOMEM; in __map_sg_chunk()
1205 unsigned int len = PAGE_ALIGN(s->offset + s->length); in __map_sg_chunk()
1207 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in __map_sg_chunk()
1208 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in __map_sg_chunk()
1212 ret = iommu_map(mapping->domain, iova, phys, len, prot, in __map_sg_chunk()
1223 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); in __map_sg_chunk()
1229 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1232 * @nents: number of buffers to map
1235 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1245 unsigned int offset = s->offset; in arm_iommu_map_sg()
1246 unsigned int size = s->offset + s->length; in arm_iommu_map_sg()
1252 s->dma_length = 0; in arm_iommu_map_sg()
1254 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { in arm_iommu_map_sg()
1256 &dma->dma_address, dir, attrs); in arm_iommu_map_sg()
1260 dma->dma_address += offset; in arm_iommu_map_sg()
1261 dma->dma_length = size - offset; in arm_iommu_map_sg()
1263 size = offset = s->offset; in arm_iommu_map_sg()
1268 size += s->length; in arm_iommu_map_sg()
1270 ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs); in arm_iommu_map_sg()
1274 dma->dma_address += offset; in arm_iommu_map_sg()
1275 dma->dma_length = size - offset; in arm_iommu_map_sg()
1282 if (ret == -ENOMEM) in arm_iommu_map_sg()
1284 return -EINVAL; in arm_iommu_map_sg()
1288 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1309 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in arm_iommu_unmap_sg()
1310 __dma_page_dev_to_cpu(sg_page(s), s->offset, in arm_iommu_unmap_sg()
1311 s->length, dir); in arm_iommu_unmap_sg()
1319 * @nents: number of buffers to map (returned from dma_map_sg)
1329 if (dev->dma_coherent) in arm_iommu_sync_sg_for_cpu()
1333 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_cpu()
1341 * @nents: number of buffers to map (returned from dma_map_sg)
1351 if (dev->dma_coherent) in arm_iommu_sync_sg_for_device()
1355 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); in arm_iommu_sync_sg_for_device()
1363 * @size: size of buffer to map
1366 * IOMMU aware version of arm_dma_map_page()
1376 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) in arm_iommu_map_page()
1385 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, in arm_iommu_map_page()
1403 * IOMMU aware version of arm_dma_unmap_page()
1417 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { in arm_iommu_unmap_page()
1418 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_unmap_page()
1422 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_page()
1427 * arm_iommu_map_resource - map a device resource for DMA
1430 * @size: size of resource to map
1450 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL); in arm_iommu_map_resource()
1461 * arm_iommu_unmap_resource - unmap a device DMA resource
1464 * @size: size of resource to map
1479 iommu_unmap(mapping->domain, iova, len); in arm_iommu_unmap_resource()
1491 if (dev->dma_coherent || !iova) in arm_iommu_sync_single_for_cpu()
1494 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_cpu()
1506 if (dev->dma_coherent || !iova) in arm_iommu_sync_single_for_device()
1509 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_device()
1535 * @dev: pointer to the client device (for IOMMU calls)
1541 * mapping with IOMMU aware functions.
1553 int err = -ENOMEM; in arm_iommu_create_mapping()
1555 /* currently only 32-bit DMA address space is supported */ in arm_iommu_create_mapping()
1557 return ERR_PTR(-ERANGE); in arm_iommu_create_mapping()
1560 return ERR_PTR(-EINVAL); in arm_iommu_create_mapping()
1571 mapping->bitmap_size = bitmap_size; in arm_iommu_create_mapping()
1572 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), in arm_iommu_create_mapping()
1574 if (!mapping->bitmaps) in arm_iommu_create_mapping()
1577 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); in arm_iommu_create_mapping()
1578 if (!mapping->bitmaps[0]) in arm_iommu_create_mapping()
1581 mapping->nr_bitmaps = 1; in arm_iommu_create_mapping()
1582 mapping->extensions = extensions; in arm_iommu_create_mapping()
1583 mapping->base = base; in arm_iommu_create_mapping()
1584 mapping->bits = BITS_PER_BYTE * bitmap_size; in arm_iommu_create_mapping()
1586 spin_lock_init(&mapping->lock); in arm_iommu_create_mapping()
1588 mapping->domain = iommu_paging_domain_alloc(dev); in arm_iommu_create_mapping()
1589 if (IS_ERR(mapping->domain)) { in arm_iommu_create_mapping()
1590 err = PTR_ERR(mapping->domain); in arm_iommu_create_mapping()
1594 kref_init(&mapping->kref); in arm_iommu_create_mapping()
1597 kfree(mapping->bitmaps[0]); in arm_iommu_create_mapping()
1599 kfree(mapping->bitmaps); in arm_iommu_create_mapping()
1613 iommu_domain_free(mapping->domain); in release_iommu_mapping()
1614 for (i = 0; i < mapping->nr_bitmaps; i++) in release_iommu_mapping()
1615 kfree(mapping->bitmaps[i]); in release_iommu_mapping()
1616 kfree(mapping->bitmaps); in release_iommu_mapping()
1624 if (mapping->nr_bitmaps >= mapping->extensions) in extend_iommu_mapping()
1625 return -EINVAL; in extend_iommu_mapping()
1627 next_bitmap = mapping->nr_bitmaps; in extend_iommu_mapping()
1628 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, in extend_iommu_mapping()
1630 if (!mapping->bitmaps[next_bitmap]) in extend_iommu_mapping()
1631 return -ENOMEM; in extend_iommu_mapping()
1633 mapping->nr_bitmaps++; in extend_iommu_mapping()
1641 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_release_mapping()
1650 err = iommu_attach_device(mapping->domain, dev); in __arm_iommu_attach_device()
1654 kref_get(&mapping->kref); in __arm_iommu_attach_device()
1657 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); in __arm_iommu_attach_device()
1669 * IOMMU aware version.
1692 * Detaches the provided device from a previously attached map.
1693 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
1705 iommu_detach_device(mapping->domain, dev); in arm_iommu_detach_device()
1706 kref_put(&mapping->kref, release_iommu_mapping); in arm_iommu_detach_device()
1710 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); in arm_iommu_detach_device()
1719 if (dev->dma_range_map) { in arm_setup_iommu_dma_ops()
1720 dma_base = dma_range_map_min(dev->dma_range_map); in arm_setup_iommu_dma_ops()
1721 size = dma_range_map_max(dev->dma_range_map) - dma_base; in arm_setup_iommu_dma_ops()
1725 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", in arm_setup_iommu_dma_ops()
1764 * Due to legacy code that sets the ->dma_coherent flag from a bus in arch_setup_dma_ops()
1765 * notifier we can't just assign coherent to the ->dma_coherent flag in arch_setup_dma_ops()
1770 dev->dma_coherent = true; in arch_setup_dma_ops()
1777 if (dev->dma_ops) in arch_setup_dma_ops()
1784 dev->archdata.dma_ops_setup = true; in arch_setup_dma_ops()
1789 if (!dev->archdata.dma_ops_setup) in arch_teardown_dma_ops()
1793 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ in arch_teardown_dma_ops()
1800 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), in arch_sync_dma_for_device()
1807 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), in arch_sync_dma_for_cpu()