Lines Matching full:size
43 size_t size; member
53 size_t size; member
107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) in __dma_clear_buffer() argument
115 phys_addr_t end = base + size; in __dma_clear_buffer()
116 while (size > 0) { in __dma_clear_buffer()
123 size -= PAGE_SIZE; in __dma_clear_buffer()
129 memset(ptr, 0, size); in __dma_clear_buffer()
131 dmac_flush_range(ptr, ptr + size); in __dma_clear_buffer()
132 outer_flush_range(__pa(ptr), __pa(ptr) + size); in __dma_clear_buffer()
138 * Allocate a DMA buffer for 'dev' of size 'size' using the
139 * specified gfp mask. Note that 'size' must be page aligned.
141 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, in __dma_alloc_buffer() argument
144 unsigned long order = get_order(size); in __dma_alloc_buffer()
155 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer()
158 __dma_clear_buffer(page, size, coherent_flag); in __dma_alloc_buffer()
164 * Free a DMA buffer. 'size' must be page aligned.
166 static void __dma_free_buffer(struct page *page, size_t size) in __dma_free_buffer() argument
168 struct page *e = page + (size >> PAGE_SHIFT); in __dma_free_buffer()
176 static void *__alloc_from_contiguous(struct device *dev, size_t size,
181 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
254 unsigned long size; member
262 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument
265 dma_mmu_remap[dma_mmu_remap_num].size = size; in dma_contiguous_early_fixup()
275 phys_addr_t end = start + dma_mmu_remap[i].size; in dma_contiguous_remap()
319 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) in __dma_remap() argument
322 unsigned end = start + size; in __dma_remap()
324 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); in __dma_remap()
328 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_remap_buffer() argument
338 page = __dma_alloc_buffer(dev, size, gfp, NORMAL); in __alloc_remap_buffer()
344 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_remap_buffer()
346 __dma_free_buffer(page, size); in __alloc_remap_buffer()
355 static void *__alloc_from_pool(size_t size, struct page **ret_page) in __alloc_from_pool() argument
365 val = gen_pool_alloc(atomic_pool, size); in __alloc_from_pool()
376 static bool __in_atomic_pool(void *start, size_t size) in __in_atomic_pool() argument
378 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); in __in_atomic_pool()
381 static int __free_from_pool(void *start, size_t size) in __free_from_pool() argument
383 if (!__in_atomic_pool(start, size)) in __free_from_pool()
386 gen_pool_free(atomic_pool, (unsigned long)start, size); in __free_from_pool()
391 static void *__alloc_from_contiguous(struct device *dev, size_t size, in __alloc_from_contiguous() argument
396 unsigned long order = get_order(size); in __alloc_from_contiguous()
397 size_t count = size >> PAGE_SHIFT; in __alloc_from_contiguous()
405 __dma_clear_buffer(page, size, coherent_flag); in __alloc_from_contiguous()
411 ptr = dma_common_contiguous_remap(page, size, prot, caller); in __alloc_from_contiguous()
417 __dma_remap(page, size, prot); in __alloc_from_contiguous()
427 void *cpu_addr, size_t size, bool want_vaddr) in __free_from_contiguous() argument
431 dma_common_free_remap(cpu_addr, size); in __free_from_contiguous()
433 __dma_remap(page, size, PAGE_KERNEL); in __free_from_contiguous()
435 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); in __free_from_contiguous()
446 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_simple_buffer() argument
451 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); in __alloc_simple_buffer()
462 return __alloc_simple_buffer(args->dev, args->size, args->gfp, in simple_allocator_alloc()
468 __dma_free_buffer(args->page, args->size); in simple_allocator_free()
479 return __alloc_from_contiguous(args->dev, args->size, args->prot, in cma_allocator_alloc()
488 args->size, args->want_vaddr); in cma_allocator_free()
499 return __alloc_from_pool(args->size, ret_page); in pool_allocator_alloc()
504 __free_from_pool(args->cpu_addr, args->size); in pool_allocator_free()
515 return __alloc_remap_buffer(args->dev, args->size, args->gfp, in remap_allocator_alloc()
523 dma_common_free_remap(args->cpu_addr, args->size); in remap_allocator_free()
525 __dma_free_buffer(args->page, args->size); in remap_allocator_free()
533 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, in __dma_alloc() argument
544 .size = PAGE_ALIGN(size), in __dma_alloc()
554 if (limit && size >= limit) { in __dma_alloc()
556 size, mask); in __dma_alloc()
605 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, in __arm_dma_free() argument
613 .size = PAGE_ALIGN(size), in __arm_dma_free()
628 size_t size, enum dma_data_direction dir, in dma_cache_maint_page() argument
632 size_t left = size; in dma_cache_maint_page()
680 size_t size, enum dma_data_direction dir) in __dma_page_cpu_to_dev() argument
684 dma_cache_maint_page(page, off, size, dir, dmac_map_area); in __dma_page_cpu_to_dev()
688 outer_inv_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
690 outer_clean_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
696 size_t size, enum dma_data_direction dir) in __dma_page_dev_to_cpu() argument
703 outer_inv_range(paddr, paddr + size); in __dma_page_dev_to_cpu()
705 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); in __dma_page_dev_to_cpu()
711 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { in __dma_page_dev_to_cpu()
718 if (size < sz) in __dma_page_dev_to_cpu()
723 size -= sz; in __dma_page_dev_to_cpu()
724 if (!size) in __dma_page_dev_to_cpu()
757 size_t size) in __alloc_iova() argument
759 unsigned int order = get_order(size); in __alloc_iova()
770 count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __alloc_iova()
788 * address range of size bytes. in __alloc_iova()
815 dma_addr_t addr, size_t size) in __free_iova() argument
823 if (!size) in __free_iova()
833 if (addr + size > bitmap_base + mapping_size) { in __free_iova()
842 count = size >> PAGE_SHIFT; in __free_iova()
852 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, in __iommu_alloc_buffer() argument
857 int count = size >> PAGE_SHIFT; in __iommu_alloc_buffer()
868 unsigned long order = get_order(size); in __iommu_alloc_buffer()
876 __dma_clear_buffer(page, size, coherent_flag); in __iommu_alloc_buffer()
941 size_t size, unsigned long attrs) in __iommu_free_buffer() argument
943 int count = size >> PAGE_SHIFT; in __iommu_free_buffer()
962 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, in __iommu_create_mapping() argument
966 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __iommu_create_mapping()
970 dma_addr = __alloc_iova(mapping, size); in __iommu_create_mapping()
998 __free_iova(mapping, dma_addr, size); in __iommu_create_mapping()
1002 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) in __iommu_remove_mapping() argument
1007 * add optional in-page offset from iova to size and align in __iommu_remove_mapping()
1008 * result to page size in __iommu_remove_mapping()
1010 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); in __iommu_remove_mapping()
1013 iommu_unmap(mapping->domain, iova, size); in __iommu_remove_mapping()
1014 __free_iova(mapping, iova, size); in __iommu_remove_mapping()
1040 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, in __iommu_alloc_simple() argument
1048 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __iommu_alloc_simple()
1050 addr = __alloc_from_pool(size, &page); in __iommu_alloc_simple()
1054 *handle = __iommu_create_mapping(dev, &page, size, attrs); in __iommu_alloc_simple()
1061 __free_from_pool(addr, size); in __iommu_alloc_simple()
1066 dma_addr_t handle, size_t size, int coherent_flag) in __iommu_free_atomic() argument
1068 __iommu_remove_mapping(dev, handle, size); in __iommu_free_atomic()
1070 __dma_free_buffer(virt_to_page(cpu_addr), size); in __iommu_free_atomic()
1072 __free_from_pool(cpu_addr, size); in __iommu_free_atomic()
1075 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, in arm_iommu_alloc_attrs() argument
1084 size = PAGE_ALIGN(size); in arm_iommu_alloc_attrs()
1087 return __iommu_alloc_simple(dev, size, gfp, handle, in arm_iommu_alloc_attrs()
1090 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); in arm_iommu_alloc_attrs()
1094 *handle = __iommu_create_mapping(dev, pages, size, attrs); in arm_iommu_alloc_attrs()
1101 addr = dma_common_pages_remap(pages, size, prot, in arm_iommu_alloc_attrs()
1109 __iommu_remove_mapping(dev, *handle, size); in arm_iommu_alloc_attrs()
1111 __iommu_free_buffer(dev, pages, size, attrs); in arm_iommu_alloc_attrs()
1116 void *cpu_addr, dma_addr_t dma_addr, size_t size, in arm_iommu_mmap_attrs() argument
1120 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in arm_iommu_mmap_attrs()
1143 static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, in arm_iommu_free_attrs() argument
1148 size = PAGE_ALIGN(size); in arm_iommu_free_attrs()
1150 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { in arm_iommu_free_attrs()
1151 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); in arm_iommu_free_attrs()
1162 dma_common_free_remap(cpu_addr, size); in arm_iommu_free_attrs()
1164 __iommu_remove_mapping(dev, handle, size); in arm_iommu_free_attrs()
1165 __iommu_free_buffer(dev, pages, size, attrs); in arm_iommu_free_attrs()
1170 size_t size, unsigned long attrs) in arm_iommu_get_sgtable() argument
1172 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in arm_iommu_get_sgtable()
1178 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, in arm_iommu_get_sgtable()
1186 size_t size, dma_addr_t *handle, in __map_sg_chunk() argument
1196 size = PAGE_ALIGN(size); in __map_sg_chunk()
1199 iova_base = iova = __alloc_iova(mapping, size); in __map_sg_chunk()
1203 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { in __map_sg_chunk()
1224 __free_iova(mapping, iova_base, size); in __map_sg_chunk()
1246 unsigned int size = s->offset + s->length; in arm_iommu_map_sg() local
1254 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { in arm_iommu_map_sg()
1255 ret = __map_sg_chunk(dev, start, size, in arm_iommu_map_sg()
1261 dma->dma_length = size - offset; in arm_iommu_map_sg()
1263 size = offset = s->offset; in arm_iommu_map_sg()
1268 size += s->length; in arm_iommu_map_sg()
1270 ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs); in arm_iommu_map_sg()
1275 dma->dma_length = size - offset; in arm_iommu_map_sg()
1363 * @size: size of buffer to map
1369 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_iommu_map_page() argument
1374 int ret, prot, len = PAGE_ALIGN(size + offset); in arm_iommu_map_page()
1377 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_map_page()
1400 * @size: size of buffer (same as passed to dma_map_page)
1406 size_t size, enum dma_data_direction dir, unsigned long attrs) in arm_iommu_unmap_page() argument
1412 int len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_page()
1419 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_unmap_page()
1430 * @size: size of resource to map
1434 phys_addr_t phys_addr, size_t size, in arm_iommu_map_resource() argument
1442 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_map_resource()
1464 * @size: size of resource to map
1468 size_t size, enum dma_data_direction dir, in arm_iommu_unmap_resource() argument
1474 size_t len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_resource()
1484 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_cpu() argument
1495 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_sync_single_for_cpu()
1499 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_device() argument
1510 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_sync_single_for_device()
1537 * @size: maximum size of the valid IO address space
1547 arm_iommu_create_mapping(struct device *dev, dma_addr_t base, u64 size) in arm_iommu_create_mapping() argument
1549 unsigned int bits = size >> PAGE_SHIFT; in arm_iommu_create_mapping()
1556 if (size > DMA_BIT_MASK(32) + 1) in arm_iommu_create_mapping()
1717 u64 dma_base = 0, size = 1ULL << 32; in arm_setup_iommu_dma_ops() local
1721 size = dma_range_map_max(dev->dma_range_map) - dma_base; in arm_setup_iommu_dma_ops()
1723 mapping = arm_iommu_create_mapping(dev, dma_base, size); in arm_setup_iommu_dma_ops()
1726 size, dev_name(dev)); in arm_setup_iommu_dma_ops()
1797 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, in arch_sync_dma_for_device() argument
1801 size, dir); in arch_sync_dma_for_device()
1804 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, in arch_sync_dma_for_cpu() argument
1808 size, dir); in arch_sync_dma_for_cpu()
1811 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, in arch_dma_alloc() argument
1814 return __dma_alloc(dev, size, dma_handle, gfp, in arch_dma_alloc()
1819 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, in arch_dma_free() argument
1822 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); in arch_dma_free()