1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mm/dma-mapping.c 4 * 5 * Copyright (C) 2000-2004 Russell King 6 * 7 * DMA uncached mapping support. 8 */ 9 #include <linux/module.h> 10 #include <linux/mm.h> 11 #include <linux/genalloc.h> 12 #include <linux/gfp.h> 13 #include <linux/errno.h> 14 #include <linux/list.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/dma-direct.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/dma-noncoherent.h> 20 #include <linux/dma-contiguous.h> 21 #include <linux/highmem.h> 22 #include <linux/memblock.h> 23 #include <linux/slab.h> 24 #include <linux/iommu.h> 25 #include <linux/io.h> 26 #include <linux/vmalloc.h> 27 #include <linux/sizes.h> 28 #include <linux/cma.h> 29 30 #include <asm/memory.h> 31 #include <asm/highmem.h> 32 #include <asm/cacheflush.h> 33 #include <asm/tlbflush.h> 34 #include <asm/mach/arch.h> 35 #include <asm/dma-iommu.h> 36 #include <asm/mach/map.h> 37 #include <asm/system_info.h> 38 #include <asm/dma-contiguous.h> 39 #include <xen/swiotlb-xen.h> 40 41 #include "dma.h" 42 #include "mm.h" 43 44 struct arm_dma_alloc_args { 45 struct device *dev; 46 size_t size; 47 gfp_t gfp; 48 pgprot_t prot; 49 const void *caller; 50 bool want_vaddr; 51 int coherent_flag; 52 }; 53 54 struct arm_dma_free_args { 55 struct device *dev; 56 size_t size; 57 void *cpu_addr; 58 struct page *page; 59 bool want_vaddr; 60 }; 61 62 #define NORMAL 0 63 #define COHERENT 1 64 65 struct arm_dma_allocator { 66 void *(*alloc)(struct arm_dma_alloc_args *args, 67 struct page **ret_page); 68 void (*free)(struct arm_dma_free_args *args); 69 }; 70 71 struct arm_dma_buffer { 72 struct list_head list; 73 void *virt; 74 struct arm_dma_allocator *allocator; 75 }; 76 77 static LIST_HEAD(arm_dma_bufs); 78 static DEFINE_SPINLOCK(arm_dma_bufs_lock); 79 80 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 81 { 82 struct arm_dma_buffer *buf, *found = NULL; 83 unsigned long flags; 84 85 spin_lock_irqsave(&arm_dma_bufs_lock, flags); 86 list_for_each_entry(buf, &arm_dma_bufs, list) { 87 if (buf->virt == virt) { 88 list_del(&buf->list); 89 found = buf; 90 break; 91 } 92 } 93 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 94 return found; 95 } 96 97 /* 98 * The DMA API is built upon the notion of "buffer ownership". A buffer 99 * is either exclusively owned by the CPU (and therefore may be accessed 100 * by it) or exclusively owned by the DMA device. These helper functions 101 * represent the transitions between these two ownership states. 102 * 103 * Note, however, that on later ARMs, this notion does not work due to 104 * speculative prefetches. We model our approach on the assumption that 105 * the CPU does do speculative prefetches, which means we clean caches 106 * before transfers and delay cache invalidation until transfer completion. 107 * 108 */ 109 static void __dma_page_cpu_to_dev(struct page *, unsigned long, 110 size_t, enum dma_data_direction); 111 static void __dma_page_dev_to_cpu(struct page *, unsigned long, 112 size_t, enum dma_data_direction); 113 114 /** 115 * arm_dma_map_page - map a portion of a page for streaming DMA 116 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 117 * @page: page that buffer resides in 118 * @offset: offset into page for start of buffer 119 * @size: size of buffer to map 120 * @dir: DMA transfer direction 121 * 122 * Ensure that any data held in the cache is appropriately discarded 123 * or written back. 124 * 125 * The device owns this memory once this call has completed. The CPU 126 * can regain ownership by calling dma_unmap_page(). 127 */ 128 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 129 unsigned long offset, size_t size, enum dma_data_direction dir, 130 unsigned long attrs) 131 { 132 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 133 __dma_page_cpu_to_dev(page, offset, size, dir); 134 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 135 } 136 137 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 138 unsigned long offset, size_t size, enum dma_data_direction dir, 139 unsigned long attrs) 140 { 141 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 142 } 143 144 /** 145 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 146 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 147 * @handle: DMA address of buffer 148 * @size: size of buffer (same as passed to dma_map_page) 149 * @dir: DMA transfer direction (same as passed to dma_map_page) 150 * 151 * Unmap a page streaming mode DMA translation. The handle and size 152 * must match what was provided in the previous dma_map_page() call. 153 * All other usages are undefined. 154 * 155 * After this call, reads by the CPU to the buffer are guaranteed to see 156 * whatever the device wrote there. 157 */ 158 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 159 size_t size, enum dma_data_direction dir, unsigned long attrs) 160 { 161 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 162 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 163 handle & ~PAGE_MASK, size, dir); 164 } 165 166 static void arm_dma_sync_single_for_cpu(struct device *dev, 167 dma_addr_t handle, size_t size, enum dma_data_direction dir) 168 { 169 unsigned int offset = handle & (PAGE_SIZE - 1); 170 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 171 __dma_page_dev_to_cpu(page, offset, size, dir); 172 } 173 174 static void arm_dma_sync_single_for_device(struct device *dev, 175 dma_addr_t handle, size_t size, enum dma_data_direction dir) 176 { 177 unsigned int offset = handle & (PAGE_SIZE - 1); 178 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 179 __dma_page_cpu_to_dev(page, offset, size, dir); 180 } 181 182 /* 183 * Return whether the given device DMA address mask can be supported 184 * properly. For example, if your device can only drive the low 24-bits 185 * during bus mastering, then you would pass 0x00ffffff as the mask 186 * to this function. 187 */ 188 static int arm_dma_supported(struct device *dev, u64 mask) 189 { 190 unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); 191 192 /* 193 * Translate the device's DMA mask to a PFN limit. This 194 * PFN number includes the page which we can DMA to. 195 */ 196 return dma_to_pfn(dev, mask) >= max_dma_pfn; 197 } 198 199 const struct dma_map_ops arm_dma_ops = { 200 .alloc = arm_dma_alloc, 201 .free = arm_dma_free, 202 .mmap = arm_dma_mmap, 203 .get_sgtable = arm_dma_get_sgtable, 204 .map_page = arm_dma_map_page, 205 .unmap_page = arm_dma_unmap_page, 206 .map_sg = arm_dma_map_sg, 207 .unmap_sg = arm_dma_unmap_sg, 208 .map_resource = dma_direct_map_resource, 209 .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 210 .sync_single_for_device = arm_dma_sync_single_for_device, 211 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 212 .sync_sg_for_device = arm_dma_sync_sg_for_device, 213 .dma_supported = arm_dma_supported, 214 .get_required_mask = dma_direct_get_required_mask, 215 }; 216 EXPORT_SYMBOL(arm_dma_ops); 217 218 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 219 dma_addr_t *handle, gfp_t gfp, unsigned long attrs); 220 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 221 dma_addr_t handle, unsigned long attrs); 222 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 223 void *cpu_addr, dma_addr_t dma_addr, size_t size, 224 unsigned long attrs); 225 226 const struct dma_map_ops arm_coherent_dma_ops = { 227 .alloc = arm_coherent_dma_alloc, 228 .free = arm_coherent_dma_free, 229 .mmap = arm_coherent_dma_mmap, 230 .get_sgtable = arm_dma_get_sgtable, 231 .map_page = arm_coherent_dma_map_page, 232 .map_sg = arm_dma_map_sg, 233 .map_resource = dma_direct_map_resource, 234 .dma_supported = arm_dma_supported, 235 .get_required_mask = dma_direct_get_required_mask, 236 }; 237 EXPORT_SYMBOL(arm_coherent_dma_ops); 238 239 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) 240 { 241 /* 242 * Ensure that the allocated pages are zeroed, and that any data 243 * lurking in the kernel direct-mapped region is invalidated. 244 */ 245 if (PageHighMem(page)) { 246 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 247 phys_addr_t end = base + size; 248 while (size > 0) { 249 void *ptr = kmap_atomic(page); 250 memset(ptr, 0, PAGE_SIZE); 251 if (coherent_flag != COHERENT) 252 dmac_flush_range(ptr, ptr + PAGE_SIZE); 253 kunmap_atomic(ptr); 254 page++; 255 size -= PAGE_SIZE; 256 } 257 if (coherent_flag != COHERENT) 258 outer_flush_range(base, end); 259 } else { 260 void *ptr = page_address(page); 261 memset(ptr, 0, size); 262 if (coherent_flag != COHERENT) { 263 dmac_flush_range(ptr, ptr + size); 264 outer_flush_range(__pa(ptr), __pa(ptr) + size); 265 } 266 } 267 } 268 269 /* 270 * Allocate a DMA buffer for 'dev' of size 'size' using the 271 * specified gfp mask. Note that 'size' must be page aligned. 272 */ 273 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, 274 gfp_t gfp, int coherent_flag) 275 { 276 unsigned long order = get_order(size); 277 struct page *page, *p, *e; 278 279 page = alloc_pages(gfp, order); 280 if (!page) 281 return NULL; 282 283 /* 284 * Now split the huge page and free the excess pages 285 */ 286 split_page(page, order); 287 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 288 __free_page(p); 289 290 __dma_clear_buffer(page, size, coherent_flag); 291 292 return page; 293 } 294 295 /* 296 * Free a DMA buffer. 'size' must be page aligned. 297 */ 298 static void __dma_free_buffer(struct page *page, size_t size) 299 { 300 struct page *e = page + (size >> PAGE_SHIFT); 301 302 while (page < e) { 303 __free_page(page); 304 page++; 305 } 306 } 307 308 static void *__alloc_from_contiguous(struct device *dev, size_t size, 309 pgprot_t prot, struct page **ret_page, 310 const void *caller, bool want_vaddr, 311 int coherent_flag, gfp_t gfp); 312 313 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 314 pgprot_t prot, struct page **ret_page, 315 const void *caller, bool want_vaddr); 316 317 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 318 static struct gen_pool *atomic_pool __ro_after_init; 319 320 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 321 322 static int __init early_coherent_pool(char *p) 323 { 324 atomic_pool_size = memparse(p, &p); 325 return 0; 326 } 327 early_param("coherent_pool", early_coherent_pool); 328 329 /* 330 * Initialise the coherent pool for atomic allocations. 331 */ 332 static int __init atomic_pool_init(void) 333 { 334 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 335 gfp_t gfp = GFP_KERNEL | GFP_DMA; 336 struct page *page; 337 void *ptr; 338 339 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 340 if (!atomic_pool) 341 goto out; 342 /* 343 * The atomic pool is only used for non-coherent allocations 344 * so we must pass NORMAL for coherent_flag. 345 */ 346 if (dev_get_cma_area(NULL)) 347 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 348 &page, atomic_pool_init, true, NORMAL, 349 GFP_KERNEL); 350 else 351 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 352 &page, atomic_pool_init, true); 353 if (ptr) { 354 int ret; 355 356 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 357 page_to_phys(page), 358 atomic_pool_size, -1); 359 if (ret) 360 goto destroy_genpool; 361 362 gen_pool_set_algo(atomic_pool, 363 gen_pool_first_fit_order_align, 364 NULL); 365 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 366 atomic_pool_size / 1024); 367 return 0; 368 } 369 370 destroy_genpool: 371 gen_pool_destroy(atomic_pool); 372 atomic_pool = NULL; 373 out: 374 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", 375 atomic_pool_size / 1024); 376 return -ENOMEM; 377 } 378 /* 379 * CMA is activated by core_initcall, so we must be called after it. 380 */ 381 postcore_initcall(atomic_pool_init); 382 383 struct dma_contig_early_reserve { 384 phys_addr_t base; 385 unsigned long size; 386 }; 387 388 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 389 390 static int dma_mmu_remap_num __initdata; 391 392 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 393 { 394 dma_mmu_remap[dma_mmu_remap_num].base = base; 395 dma_mmu_remap[dma_mmu_remap_num].size = size; 396 dma_mmu_remap_num++; 397 } 398 399 void __init dma_contiguous_remap(void) 400 { 401 int i; 402 for (i = 0; i < dma_mmu_remap_num; i++) { 403 phys_addr_t start = dma_mmu_remap[i].base; 404 phys_addr_t end = start + dma_mmu_remap[i].size; 405 struct map_desc map; 406 unsigned long addr; 407 408 if (end > arm_lowmem_limit) 409 end = arm_lowmem_limit; 410 if (start >= end) 411 continue; 412 413 map.pfn = __phys_to_pfn(start); 414 map.virtual = __phys_to_virt(start); 415 map.length = end - start; 416 map.type = MT_MEMORY_DMA_READY; 417 418 /* 419 * Clear previous low-memory mapping to ensure that the 420 * TLB does not see any conflicting entries, then flush 421 * the TLB of the old entries before creating new mappings. 422 * 423 * This ensures that any speculatively loaded TLB entries 424 * (even though they may be rare) can not cause any problems, 425 * and ensures that this code is architecturally compliant. 426 */ 427 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 428 addr += PMD_SIZE) 429 pmd_clear(pmd_off_k(addr)); 430 431 flush_tlb_kernel_range(__phys_to_virt(start), 432 __phys_to_virt(end)); 433 434 iotable_init(&map, 1); 435 } 436 } 437 438 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) 439 { 440 struct page *page = virt_to_page(addr); 441 pgprot_t prot = *(pgprot_t *)data; 442 443 set_pte_ext(pte, mk_pte(page, prot), 0); 444 return 0; 445 } 446 447 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 448 { 449 unsigned long start = (unsigned long) page_address(page); 450 unsigned end = start + size; 451 452 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 453 flush_tlb_kernel_range(start, end); 454 } 455 456 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 457 pgprot_t prot, struct page **ret_page, 458 const void *caller, bool want_vaddr) 459 { 460 struct page *page; 461 void *ptr = NULL; 462 /* 463 * __alloc_remap_buffer is only called when the device is 464 * non-coherent 465 */ 466 page = __dma_alloc_buffer(dev, size, gfp, NORMAL); 467 if (!page) 468 return NULL; 469 if (!want_vaddr) 470 goto out; 471 472 ptr = dma_common_contiguous_remap(page, size, prot, caller); 473 if (!ptr) { 474 __dma_free_buffer(page, size); 475 return NULL; 476 } 477 478 out: 479 *ret_page = page; 480 return ptr; 481 } 482 483 static void *__alloc_from_pool(size_t size, struct page **ret_page) 484 { 485 unsigned long val; 486 void *ptr = NULL; 487 488 if (!atomic_pool) { 489 WARN(1, "coherent pool not initialised!\n"); 490 return NULL; 491 } 492 493 val = gen_pool_alloc(atomic_pool, size); 494 if (val) { 495 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 496 497 *ret_page = phys_to_page(phys); 498 ptr = (void *)val; 499 } 500 501 return ptr; 502 } 503 504 static bool __in_atomic_pool(void *start, size_t size) 505 { 506 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); 507 } 508 509 static int __free_from_pool(void *start, size_t size) 510 { 511 if (!__in_atomic_pool(start, size)) 512 return 0; 513 514 gen_pool_free(atomic_pool, (unsigned long)start, size); 515 516 return 1; 517 } 518 519 static void *__alloc_from_contiguous(struct device *dev, size_t size, 520 pgprot_t prot, struct page **ret_page, 521 const void *caller, bool want_vaddr, 522 int coherent_flag, gfp_t gfp) 523 { 524 unsigned long order = get_order(size); 525 size_t count = size >> PAGE_SHIFT; 526 struct page *page; 527 void *ptr = NULL; 528 529 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); 530 if (!page) 531 return NULL; 532 533 __dma_clear_buffer(page, size, coherent_flag); 534 535 if (!want_vaddr) 536 goto out; 537 538 if (PageHighMem(page)) { 539 ptr = dma_common_contiguous_remap(page, size, prot, caller); 540 if (!ptr) { 541 dma_release_from_contiguous(dev, page, count); 542 return NULL; 543 } 544 } else { 545 __dma_remap(page, size, prot); 546 ptr = page_address(page); 547 } 548 549 out: 550 *ret_page = page; 551 return ptr; 552 } 553 554 static void __free_from_contiguous(struct device *dev, struct page *page, 555 void *cpu_addr, size_t size, bool want_vaddr) 556 { 557 if (want_vaddr) { 558 if (PageHighMem(page)) 559 dma_common_free_remap(cpu_addr, size); 560 else 561 __dma_remap(page, size, PAGE_KERNEL); 562 } 563 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 564 } 565 566 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 567 { 568 prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 569 pgprot_writecombine(prot) : 570 pgprot_dmacoherent(prot); 571 return prot; 572 } 573 574 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 575 struct page **ret_page) 576 { 577 struct page *page; 578 /* __alloc_simple_buffer is only called when the device is coherent */ 579 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); 580 if (!page) 581 return NULL; 582 583 *ret_page = page; 584 return page_address(page); 585 } 586 587 static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 588 struct page **ret_page) 589 { 590 return __alloc_simple_buffer(args->dev, args->size, args->gfp, 591 ret_page); 592 } 593 594 static void simple_allocator_free(struct arm_dma_free_args *args) 595 { 596 __dma_free_buffer(args->page, args->size); 597 } 598 599 static struct arm_dma_allocator simple_allocator = { 600 .alloc = simple_allocator_alloc, 601 .free = simple_allocator_free, 602 }; 603 604 static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 605 struct page **ret_page) 606 { 607 return __alloc_from_contiguous(args->dev, args->size, args->prot, 608 ret_page, args->caller, 609 args->want_vaddr, args->coherent_flag, 610 args->gfp); 611 } 612 613 static void cma_allocator_free(struct arm_dma_free_args *args) 614 { 615 __free_from_contiguous(args->dev, args->page, args->cpu_addr, 616 args->size, args->want_vaddr); 617 } 618 619 static struct arm_dma_allocator cma_allocator = { 620 .alloc = cma_allocator_alloc, 621 .free = cma_allocator_free, 622 }; 623 624 static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 625 struct page **ret_page) 626 { 627 return __alloc_from_pool(args->size, ret_page); 628 } 629 630 static void pool_allocator_free(struct arm_dma_free_args *args) 631 { 632 __free_from_pool(args->cpu_addr, args->size); 633 } 634 635 static struct arm_dma_allocator pool_allocator = { 636 .alloc = pool_allocator_alloc, 637 .free = pool_allocator_free, 638 }; 639 640 static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 641 struct page **ret_page) 642 { 643 return __alloc_remap_buffer(args->dev, args->size, args->gfp, 644 args->prot, ret_page, args->caller, 645 args->want_vaddr); 646 } 647 648 static void remap_allocator_free(struct arm_dma_free_args *args) 649 { 650 if (args->want_vaddr) 651 dma_common_free_remap(args->cpu_addr, args->size); 652 653 __dma_free_buffer(args->page, args->size); 654 } 655 656 static struct arm_dma_allocator remap_allocator = { 657 .alloc = remap_allocator_alloc, 658 .free = remap_allocator_free, 659 }; 660 661 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 662 gfp_t gfp, pgprot_t prot, bool is_coherent, 663 unsigned long attrs, const void *caller) 664 { 665 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 666 struct page *page = NULL; 667 void *addr; 668 bool allowblock, cma; 669 struct arm_dma_buffer *buf; 670 struct arm_dma_alloc_args args = { 671 .dev = dev, 672 .size = PAGE_ALIGN(size), 673 .gfp = gfp, 674 .prot = prot, 675 .caller = caller, 676 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 677 .coherent_flag = is_coherent ? COHERENT : NORMAL, 678 }; 679 680 #ifdef CONFIG_DMA_API_DEBUG 681 u64 limit = (mask + 1) & ~mask; 682 if (limit && size >= limit) { 683 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 684 size, mask); 685 return NULL; 686 } 687 #endif 688 689 buf = kzalloc(sizeof(*buf), 690 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 691 if (!buf) 692 return NULL; 693 694 if (mask < 0xffffffffULL) 695 gfp |= GFP_DMA; 696 697 /* 698 * Following is a work-around (a.k.a. hack) to prevent pages 699 * with __GFP_COMP being passed to split_page() which cannot 700 * handle them. The real problem is that this flag probably 701 * should be 0 on ARM as it is not supported on this 702 * platform; see CONFIG_HUGETLBFS. 703 */ 704 gfp &= ~(__GFP_COMP); 705 args.gfp = gfp; 706 707 *handle = DMA_MAPPING_ERROR; 708 allowblock = gfpflags_allow_blocking(gfp); 709 cma = allowblock ? dev_get_cma_area(dev) : false; 710 711 if (cma) 712 buf->allocator = &cma_allocator; 713 else if (is_coherent) 714 buf->allocator = &simple_allocator; 715 else if (allowblock) 716 buf->allocator = &remap_allocator; 717 else 718 buf->allocator = &pool_allocator; 719 720 addr = buf->allocator->alloc(&args, &page); 721 722 if (page) { 723 unsigned long flags; 724 725 *handle = pfn_to_dma(dev, page_to_pfn(page)); 726 buf->virt = args.want_vaddr ? addr : page; 727 728 spin_lock_irqsave(&arm_dma_bufs_lock, flags); 729 list_add(&buf->list, &arm_dma_bufs); 730 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 731 } else { 732 kfree(buf); 733 } 734 735 return args.want_vaddr ? addr : page; 736 } 737 738 /* 739 * Allocate DMA-coherent memory space and return both the kernel remapped 740 * virtual and bus address for that space. 741 */ 742 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 743 gfp_t gfp, unsigned long attrs) 744 { 745 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 746 747 return __dma_alloc(dev, size, handle, gfp, prot, false, 748 attrs, __builtin_return_address(0)); 749 } 750 751 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 752 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 753 { 754 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 755 attrs, __builtin_return_address(0)); 756 } 757 758 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 759 void *cpu_addr, dma_addr_t dma_addr, size_t size, 760 unsigned long attrs) 761 { 762 int ret = -ENXIO; 763 unsigned long nr_vma_pages = vma_pages(vma); 764 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 765 unsigned long pfn = dma_to_pfn(dev, dma_addr); 766 unsigned long off = vma->vm_pgoff; 767 768 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 769 return ret; 770 771 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 772 ret = remap_pfn_range(vma, vma->vm_start, 773 pfn + off, 774 vma->vm_end - vma->vm_start, 775 vma->vm_page_prot); 776 } 777 778 return ret; 779 } 780 781 /* 782 * Create userspace mapping for the DMA-coherent memory. 783 */ 784 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 785 void *cpu_addr, dma_addr_t dma_addr, size_t size, 786 unsigned long attrs) 787 { 788 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 789 } 790 791 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 792 void *cpu_addr, dma_addr_t dma_addr, size_t size, 793 unsigned long attrs) 794 { 795 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 796 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 797 } 798 799 /* 800 * Free a buffer as defined by the above mapping. 801 */ 802 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 803 dma_addr_t handle, unsigned long attrs, 804 bool is_coherent) 805 { 806 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 807 struct arm_dma_buffer *buf; 808 struct arm_dma_free_args args = { 809 .dev = dev, 810 .size = PAGE_ALIGN(size), 811 .cpu_addr = cpu_addr, 812 .page = page, 813 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 814 }; 815 816 buf = arm_dma_buffer_find(cpu_addr); 817 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 818 return; 819 820 buf->allocator->free(&args); 821 kfree(buf); 822 } 823 824 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 825 dma_addr_t handle, unsigned long attrs) 826 { 827 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 828 } 829 830 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 831 dma_addr_t handle, unsigned long attrs) 832 { 833 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 834 } 835 836 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 837 void *cpu_addr, dma_addr_t handle, size_t size, 838 unsigned long attrs) 839 { 840 unsigned long pfn = dma_to_pfn(dev, handle); 841 struct page *page; 842 int ret; 843 844 /* If the PFN is not valid, we do not have a struct page */ 845 if (!pfn_valid(pfn)) 846 return -ENXIO; 847 848 page = pfn_to_page(pfn); 849 850 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 851 if (unlikely(ret)) 852 return ret; 853 854 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 855 return 0; 856 } 857 858 static void dma_cache_maint_page(struct page *page, unsigned long offset, 859 size_t size, enum dma_data_direction dir, 860 void (*op)(const void *, size_t, int)) 861 { 862 unsigned long pfn; 863 size_t left = size; 864 865 pfn = page_to_pfn(page) + offset / PAGE_SIZE; 866 offset %= PAGE_SIZE; 867 868 /* 869 * A single sg entry may refer to multiple physically contiguous 870 * pages. But we still need to process highmem pages individually. 871 * If highmem is not configured then the bulk of this loop gets 872 * optimized out. 873 */ 874 do { 875 size_t len = left; 876 void *vaddr; 877 878 page = pfn_to_page(pfn); 879 880 if (PageHighMem(page)) { 881 if (len + offset > PAGE_SIZE) 882 len = PAGE_SIZE - offset; 883 884 if (cache_is_vipt_nonaliasing()) { 885 vaddr = kmap_atomic(page); 886 op(vaddr + offset, len, dir); 887 kunmap_atomic(vaddr); 888 } else { 889 vaddr = kmap_high_get(page); 890 if (vaddr) { 891 op(vaddr + offset, len, dir); 892 kunmap_high(page); 893 } 894 } 895 } else { 896 vaddr = page_address(page) + offset; 897 op(vaddr, len, dir); 898 } 899 offset = 0; 900 pfn++; 901 left -= len; 902 } while (left); 903 } 904 905 /* 906 * Make an area consistent for devices. 907 * Note: Drivers should NOT use this function directly, as it will break 908 * platforms with CONFIG_DMABOUNCE. 909 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 910 */ 911 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 912 size_t size, enum dma_data_direction dir) 913 { 914 phys_addr_t paddr; 915 916 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 917 918 paddr = page_to_phys(page) + off; 919 if (dir == DMA_FROM_DEVICE) { 920 outer_inv_range(paddr, paddr + size); 921 } else { 922 outer_clean_range(paddr, paddr + size); 923 } 924 /* FIXME: non-speculating: flush on bidirectional mappings? */ 925 } 926 927 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 928 size_t size, enum dma_data_direction dir) 929 { 930 phys_addr_t paddr = page_to_phys(page) + off; 931 932 /* FIXME: non-speculating: not required */ 933 /* in any case, don't bother invalidating if DMA to device */ 934 if (dir != DMA_TO_DEVICE) { 935 outer_inv_range(paddr, paddr + size); 936 937 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 938 } 939 940 /* 941 * Mark the D-cache clean for these pages to avoid extra flushing. 942 */ 943 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 944 unsigned long pfn; 945 size_t left = size; 946 947 pfn = page_to_pfn(page) + off / PAGE_SIZE; 948 off %= PAGE_SIZE; 949 if (off) { 950 pfn++; 951 left -= PAGE_SIZE - off; 952 } 953 while (left >= PAGE_SIZE) { 954 page = pfn_to_page(pfn++); 955 set_bit(PG_dcache_clean, &page->flags); 956 left -= PAGE_SIZE; 957 } 958 } 959 } 960 961 /** 962 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 963 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 964 * @sg: list of buffers 965 * @nents: number of buffers to map 966 * @dir: DMA transfer direction 967 * 968 * Map a set of buffers described by scatterlist in streaming mode for DMA. 969 * This is the scatter-gather version of the dma_map_single interface. 970 * Here the scatter gather list elements are each tagged with the 971 * appropriate dma address and length. They are obtained via 972 * sg_dma_{address,length}. 973 * 974 * Device ownership issues as mentioned for dma_map_single are the same 975 * here. 976 */ 977 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 978 enum dma_data_direction dir, unsigned long attrs) 979 { 980 const struct dma_map_ops *ops = get_dma_ops(dev); 981 struct scatterlist *s; 982 int i, j; 983 984 for_each_sg(sg, s, nents, i) { 985 #ifdef CONFIG_NEED_SG_DMA_LENGTH 986 s->dma_length = s->length; 987 #endif 988 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 989 s->length, dir, attrs); 990 if (dma_mapping_error(dev, s->dma_address)) 991 goto bad_mapping; 992 } 993 return nents; 994 995 bad_mapping: 996 for_each_sg(sg, s, i, j) 997 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 998 return 0; 999 } 1000 1001 /** 1002 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1003 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1004 * @sg: list of buffers 1005 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1006 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1007 * 1008 * Unmap a set of streaming mode DMA translations. Again, CPU access 1009 * rules concerning calls here are the same as for dma_unmap_single(). 1010 */ 1011 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1012 enum dma_data_direction dir, unsigned long attrs) 1013 { 1014 const struct dma_map_ops *ops = get_dma_ops(dev); 1015 struct scatterlist *s; 1016 1017 int i; 1018 1019 for_each_sg(sg, s, nents, i) 1020 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 1021 } 1022 1023 /** 1024 * arm_dma_sync_sg_for_cpu 1025 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1026 * @sg: list of buffers 1027 * @nents: number of buffers to map (returned from dma_map_sg) 1028 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1029 */ 1030 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1031 int nents, enum dma_data_direction dir) 1032 { 1033 const struct dma_map_ops *ops = get_dma_ops(dev); 1034 struct scatterlist *s; 1035 int i; 1036 1037 for_each_sg(sg, s, nents, i) 1038 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 1039 dir); 1040 } 1041 1042 /** 1043 * arm_dma_sync_sg_for_device 1044 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1045 * @sg: list of buffers 1046 * @nents: number of buffers to map (returned from dma_map_sg) 1047 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1048 */ 1049 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1050 int nents, enum dma_data_direction dir) 1051 { 1052 const struct dma_map_ops *ops = get_dma_ops(dev); 1053 struct scatterlist *s; 1054 int i; 1055 1056 for_each_sg(sg, s, nents, i) 1057 ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 1058 dir); 1059 } 1060 1061 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 1062 { 1063 /* 1064 * When CONFIG_ARM_LPAE is set, physical address can extend above 1065 * 32-bits, which then can't be addressed by devices that only support 1066 * 32-bit DMA. 1067 * Use the generic dma-direct / swiotlb ops code in that case, as that 1068 * handles bounce buffering for us. 1069 */ 1070 if (IS_ENABLED(CONFIG_ARM_LPAE)) 1071 return NULL; 1072 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 1073 } 1074 1075 #ifdef CONFIG_ARM_DMA_USE_IOMMU 1076 1077 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) 1078 { 1079 int prot = 0; 1080 1081 if (attrs & DMA_ATTR_PRIVILEGED) 1082 prot |= IOMMU_PRIV; 1083 1084 switch (dir) { 1085 case DMA_BIDIRECTIONAL: 1086 return prot | IOMMU_READ | IOMMU_WRITE; 1087 case DMA_TO_DEVICE: 1088 return prot | IOMMU_READ; 1089 case DMA_FROM_DEVICE: 1090 return prot | IOMMU_WRITE; 1091 default: 1092 return prot; 1093 } 1094 } 1095 1096 /* IOMMU */ 1097 1098 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 1099 1100 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 1101 size_t size) 1102 { 1103 unsigned int order = get_order(size); 1104 unsigned int align = 0; 1105 unsigned int count, start; 1106 size_t mapping_size = mapping->bits << PAGE_SHIFT; 1107 unsigned long flags; 1108 dma_addr_t iova; 1109 int i; 1110 1111 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 1112 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 1113 1114 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1115 align = (1 << order) - 1; 1116 1117 spin_lock_irqsave(&mapping->lock, flags); 1118 for (i = 0; i < mapping->nr_bitmaps; i++) { 1119 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 1120 mapping->bits, 0, count, align); 1121 1122 if (start > mapping->bits) 1123 continue; 1124 1125 bitmap_set(mapping->bitmaps[i], start, count); 1126 break; 1127 } 1128 1129 /* 1130 * No unused range found. Try to extend the existing mapping 1131 * and perform a second attempt to reserve an IO virtual 1132 * address range of size bytes. 1133 */ 1134 if (i == mapping->nr_bitmaps) { 1135 if (extend_iommu_mapping(mapping)) { 1136 spin_unlock_irqrestore(&mapping->lock, flags); 1137 return DMA_MAPPING_ERROR; 1138 } 1139 1140 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 1141 mapping->bits, 0, count, align); 1142 1143 if (start > mapping->bits) { 1144 spin_unlock_irqrestore(&mapping->lock, flags); 1145 return DMA_MAPPING_ERROR; 1146 } 1147 1148 bitmap_set(mapping->bitmaps[i], start, count); 1149 } 1150 spin_unlock_irqrestore(&mapping->lock, flags); 1151 1152 iova = mapping->base + (mapping_size * i); 1153 iova += start << PAGE_SHIFT; 1154 1155 return iova; 1156 } 1157 1158 static inline void __free_iova(struct dma_iommu_mapping *mapping, 1159 dma_addr_t addr, size_t size) 1160 { 1161 unsigned int start, count; 1162 size_t mapping_size = mapping->bits << PAGE_SHIFT; 1163 unsigned long flags; 1164 dma_addr_t bitmap_base; 1165 u32 bitmap_index; 1166 1167 if (!size) 1168 return; 1169 1170 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 1171 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 1172 1173 bitmap_base = mapping->base + mapping_size * bitmap_index; 1174 1175 start = (addr - bitmap_base) >> PAGE_SHIFT; 1176 1177 if (addr + size > bitmap_base + mapping_size) { 1178 /* 1179 * The address range to be freed reaches into the iova 1180 * range of the next bitmap. This should not happen as 1181 * we don't allow this in __alloc_iova (at the 1182 * moment). 1183 */ 1184 BUG(); 1185 } else 1186 count = size >> PAGE_SHIFT; 1187 1188 spin_lock_irqsave(&mapping->lock, flags); 1189 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 1190 spin_unlock_irqrestore(&mapping->lock, flags); 1191 } 1192 1193 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 1194 static const int iommu_order_array[] = { 9, 8, 4, 0 }; 1195 1196 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1197 gfp_t gfp, unsigned long attrs, 1198 int coherent_flag) 1199 { 1200 struct page **pages; 1201 int count = size >> PAGE_SHIFT; 1202 int array_size = count * sizeof(struct page *); 1203 int i = 0; 1204 int order_idx = 0; 1205 1206 if (array_size <= PAGE_SIZE) 1207 pages = kzalloc(array_size, GFP_KERNEL); 1208 else 1209 pages = vzalloc(array_size); 1210 if (!pages) 1211 return NULL; 1212 1213 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 1214 { 1215 unsigned long order = get_order(size); 1216 struct page *page; 1217 1218 page = dma_alloc_from_contiguous(dev, count, order, 1219 gfp & __GFP_NOWARN); 1220 if (!page) 1221 goto error; 1222 1223 __dma_clear_buffer(page, size, coherent_flag); 1224 1225 for (i = 0; i < count; i++) 1226 pages[i] = page + i; 1227 1228 return pages; 1229 } 1230 1231 /* Go straight to 4K chunks if caller says it's OK. */ 1232 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 1233 order_idx = ARRAY_SIZE(iommu_order_array) - 1; 1234 1235 /* 1236 * IOMMU can map any pages, so himem can also be used here 1237 */ 1238 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1239 1240 while (count) { 1241 int j, order; 1242 1243 order = iommu_order_array[order_idx]; 1244 1245 /* Drop down when we get small */ 1246 if (__fls(count) < order) { 1247 order_idx++; 1248 continue; 1249 } 1250 1251 if (order) { 1252 /* See if it's easy to allocate a high-order chunk */ 1253 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 1254 1255 /* Go down a notch at first sign of pressure */ 1256 if (!pages[i]) { 1257 order_idx++; 1258 continue; 1259 } 1260 } else { 1261 pages[i] = alloc_pages(gfp, 0); 1262 if (!pages[i]) 1263 goto error; 1264 } 1265 1266 if (order) { 1267 split_page(pages[i], order); 1268 j = 1 << order; 1269 while (--j) 1270 pages[i + j] = pages[i] + j; 1271 } 1272 1273 __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); 1274 i += 1 << order; 1275 count -= 1 << order; 1276 } 1277 1278 return pages; 1279 error: 1280 while (i--) 1281 if (pages[i]) 1282 __free_pages(pages[i], 0); 1283 kvfree(pages); 1284 return NULL; 1285 } 1286 1287 static int __iommu_free_buffer(struct device *dev, struct page **pages, 1288 size_t size, unsigned long attrs) 1289 { 1290 int count = size >> PAGE_SHIFT; 1291 int i; 1292 1293 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 1294 dma_release_from_contiguous(dev, pages[0], count); 1295 } else { 1296 for (i = 0; i < count; i++) 1297 if (pages[i]) 1298 __free_pages(pages[i], 0); 1299 } 1300 1301 kvfree(pages); 1302 return 0; 1303 } 1304 1305 /* 1306 * Create a mapping in device IO address space for specified pages 1307 */ 1308 static dma_addr_t 1309 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, 1310 unsigned long attrs) 1311 { 1312 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1313 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1314 dma_addr_t dma_addr, iova; 1315 int i; 1316 1317 dma_addr = __alloc_iova(mapping, size); 1318 if (dma_addr == DMA_MAPPING_ERROR) 1319 return dma_addr; 1320 1321 iova = dma_addr; 1322 for (i = 0; i < count; ) { 1323 int ret; 1324 1325 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 1326 phys_addr_t phys = page_to_phys(pages[i]); 1327 unsigned int len, j; 1328 1329 for (j = i + 1; j < count; j++, next_pfn++) 1330 if (page_to_pfn(pages[j]) != next_pfn) 1331 break; 1332 1333 len = (j - i) << PAGE_SHIFT; 1334 ret = iommu_map(mapping->domain, iova, phys, len, 1335 __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); 1336 if (ret < 0) 1337 goto fail; 1338 iova += len; 1339 i = j; 1340 } 1341 return dma_addr; 1342 fail: 1343 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 1344 __free_iova(mapping, dma_addr, size); 1345 return DMA_MAPPING_ERROR; 1346 } 1347 1348 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 1349 { 1350 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1351 1352 /* 1353 * add optional in-page offset from iova to size and align 1354 * result to page size 1355 */ 1356 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 1357 iova &= PAGE_MASK; 1358 1359 iommu_unmap(mapping->domain, iova, size); 1360 __free_iova(mapping, iova, size); 1361 return 0; 1362 } 1363 1364 static struct page **__atomic_get_pages(void *addr) 1365 { 1366 struct page *page; 1367 phys_addr_t phys; 1368 1369 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 1370 page = phys_to_page(phys); 1371 1372 return (struct page **)page; 1373 } 1374 1375 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 1376 { 1377 if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1378 return __atomic_get_pages(cpu_addr); 1379 1380 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1381 return cpu_addr; 1382 1383 return dma_common_find_pages(cpu_addr); 1384 } 1385 1386 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 1387 dma_addr_t *handle, int coherent_flag, 1388 unsigned long attrs) 1389 { 1390 struct page *page; 1391 void *addr; 1392 1393 if (coherent_flag == COHERENT) 1394 addr = __alloc_simple_buffer(dev, size, gfp, &page); 1395 else 1396 addr = __alloc_from_pool(size, &page); 1397 if (!addr) 1398 return NULL; 1399 1400 *handle = __iommu_create_mapping(dev, &page, size, attrs); 1401 if (*handle == DMA_MAPPING_ERROR) 1402 goto err_mapping; 1403 1404 return addr; 1405 1406 err_mapping: 1407 __free_from_pool(addr, size); 1408 return NULL; 1409 } 1410 1411 static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1412 dma_addr_t handle, size_t size, int coherent_flag) 1413 { 1414 __iommu_remove_mapping(dev, handle, size); 1415 if (coherent_flag == COHERENT) 1416 __dma_free_buffer(virt_to_page(cpu_addr), size); 1417 else 1418 __free_from_pool(cpu_addr, size); 1419 } 1420 1421 static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, 1422 dma_addr_t *handle, gfp_t gfp, unsigned long attrs, 1423 int coherent_flag) 1424 { 1425 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 1426 struct page **pages; 1427 void *addr = NULL; 1428 1429 *handle = DMA_MAPPING_ERROR; 1430 size = PAGE_ALIGN(size); 1431 1432 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 1433 return __iommu_alloc_simple(dev, size, gfp, handle, 1434 coherent_flag, attrs); 1435 1436 /* 1437 * Following is a work-around (a.k.a. hack) to prevent pages 1438 * with __GFP_COMP being passed to split_page() which cannot 1439 * handle them. The real problem is that this flag probably 1440 * should be 0 on ARM as it is not supported on this 1441 * platform; see CONFIG_HUGETLBFS. 1442 */ 1443 gfp &= ~(__GFP_COMP); 1444 1445 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); 1446 if (!pages) 1447 return NULL; 1448 1449 *handle = __iommu_create_mapping(dev, pages, size, attrs); 1450 if (*handle == DMA_MAPPING_ERROR) 1451 goto err_buffer; 1452 1453 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1454 return pages; 1455 1456 addr = dma_common_pages_remap(pages, size, prot, 1457 __builtin_return_address(0)); 1458 if (!addr) 1459 goto err_mapping; 1460 1461 return addr; 1462 1463 err_mapping: 1464 __iommu_remove_mapping(dev, *handle, size); 1465 err_buffer: 1466 __iommu_free_buffer(dev, pages, size, attrs); 1467 return NULL; 1468 } 1469 1470 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1471 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1472 { 1473 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); 1474 } 1475 1476 static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, 1477 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1478 { 1479 return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); 1480 } 1481 1482 static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 1483 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1484 unsigned long attrs) 1485 { 1486 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1487 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1488 int err; 1489 1490 if (!pages) 1491 return -ENXIO; 1492 1493 if (vma->vm_pgoff >= nr_pages) 1494 return -ENXIO; 1495 1496 err = vm_map_pages(vma, pages, nr_pages); 1497 if (err) 1498 pr_err("Remapping memory failed: %d\n", err); 1499 1500 return err; 1501 } 1502 static int arm_iommu_mmap_attrs(struct device *dev, 1503 struct vm_area_struct *vma, void *cpu_addr, 1504 dma_addr_t dma_addr, size_t size, unsigned long attrs) 1505 { 1506 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1507 1508 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 1509 } 1510 1511 static int arm_coherent_iommu_mmap_attrs(struct device *dev, 1512 struct vm_area_struct *vma, void *cpu_addr, 1513 dma_addr_t dma_addr, size_t size, unsigned long attrs) 1514 { 1515 return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 1516 } 1517 1518 /* 1519 * free a page as defined by the above mapping. 1520 * Must not be called with IRQs disabled. 1521 */ 1522 static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1523 dma_addr_t handle, unsigned long attrs, int coherent_flag) 1524 { 1525 struct page **pages; 1526 size = PAGE_ALIGN(size); 1527 1528 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { 1529 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); 1530 return; 1531 } 1532 1533 pages = __iommu_get_pages(cpu_addr, attrs); 1534 if (!pages) { 1535 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1536 return; 1537 } 1538 1539 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 1540 dma_common_free_remap(cpu_addr, size); 1541 1542 __iommu_remove_mapping(dev, handle, size); 1543 __iommu_free_buffer(dev, pages, size, attrs); 1544 } 1545 1546 static void arm_iommu_free_attrs(struct device *dev, size_t size, 1547 void *cpu_addr, dma_addr_t handle, 1548 unsigned long attrs) 1549 { 1550 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); 1551 } 1552 1553 static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, 1554 void *cpu_addr, dma_addr_t handle, unsigned long attrs) 1555 { 1556 __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); 1557 } 1558 1559 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1560 void *cpu_addr, dma_addr_t dma_addr, 1561 size_t size, unsigned long attrs) 1562 { 1563 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1564 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1565 1566 if (!pages) 1567 return -ENXIO; 1568 1569 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1570 GFP_KERNEL); 1571 } 1572 1573 /* 1574 * Map a part of the scatter-gather list into contiguous io address space 1575 */ 1576 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1577 size_t size, dma_addr_t *handle, 1578 enum dma_data_direction dir, unsigned long attrs, 1579 bool is_coherent) 1580 { 1581 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1582 dma_addr_t iova, iova_base; 1583 int ret = 0; 1584 unsigned int count; 1585 struct scatterlist *s; 1586 int prot; 1587 1588 size = PAGE_ALIGN(size); 1589 *handle = DMA_MAPPING_ERROR; 1590 1591 iova_base = iova = __alloc_iova(mapping, size); 1592 if (iova == DMA_MAPPING_ERROR) 1593 return -ENOMEM; 1594 1595 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1596 phys_addr_t phys = page_to_phys(sg_page(s)); 1597 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1598 1599 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1600 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1601 1602 prot = __dma_info_to_prot(dir, attrs); 1603 1604 ret = iommu_map(mapping->domain, iova, phys, len, prot); 1605 if (ret < 0) 1606 goto fail; 1607 count += len >> PAGE_SHIFT; 1608 iova += len; 1609 } 1610 *handle = iova_base; 1611 1612 return 0; 1613 fail: 1614 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 1615 __free_iova(mapping, iova_base, size); 1616 return ret; 1617 } 1618 1619 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1620 enum dma_data_direction dir, unsigned long attrs, 1621 bool is_coherent) 1622 { 1623 struct scatterlist *s = sg, *dma = sg, *start = sg; 1624 int i, count = 0; 1625 unsigned int offset = s->offset; 1626 unsigned int size = s->offset + s->length; 1627 unsigned int max = dma_get_max_seg_size(dev); 1628 1629 for (i = 1; i < nents; i++) { 1630 s = sg_next(s); 1631 1632 s->dma_address = DMA_MAPPING_ERROR; 1633 s->dma_length = 0; 1634 1635 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1636 if (__map_sg_chunk(dev, start, size, &dma->dma_address, 1637 dir, attrs, is_coherent) < 0) 1638 goto bad_mapping; 1639 1640 dma->dma_address += offset; 1641 dma->dma_length = size - offset; 1642 1643 size = offset = s->offset; 1644 start = s; 1645 dma = sg_next(dma); 1646 count += 1; 1647 } 1648 size += s->length; 1649 } 1650 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 1651 is_coherent) < 0) 1652 goto bad_mapping; 1653 1654 dma->dma_address += offset; 1655 dma->dma_length = size - offset; 1656 1657 return count+1; 1658 1659 bad_mapping: 1660 for_each_sg(sg, s, count, i) 1661 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 1662 return 0; 1663 } 1664 1665 /** 1666 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1667 * @dev: valid struct device pointer 1668 * @sg: list of buffers 1669 * @nents: number of buffers to map 1670 * @dir: DMA transfer direction 1671 * 1672 * Map a set of i/o coherent buffers described by scatterlist in streaming 1673 * mode for DMA. The scatter gather list elements are merged together (if 1674 * possible) and tagged with the appropriate dma address and length. They are 1675 * obtained via sg_dma_{address,length}. 1676 */ 1677 static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1678 int nents, enum dma_data_direction dir, unsigned long attrs) 1679 { 1680 return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 1681 } 1682 1683 /** 1684 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1685 * @dev: valid struct device pointer 1686 * @sg: list of buffers 1687 * @nents: number of buffers to map 1688 * @dir: DMA transfer direction 1689 * 1690 * Map a set of buffers described by scatterlist in streaming mode for DMA. 1691 * The scatter gather list elements are merged together (if possible) and 1692 * tagged with the appropriate dma address and length. They are obtained via 1693 * sg_dma_{address,length}. 1694 */ 1695 static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1696 int nents, enum dma_data_direction dir, unsigned long attrs) 1697 { 1698 return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 1699 } 1700 1701 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1702 int nents, enum dma_data_direction dir, 1703 unsigned long attrs, bool is_coherent) 1704 { 1705 struct scatterlist *s; 1706 int i; 1707 1708 for_each_sg(sg, s, nents, i) { 1709 if (sg_dma_len(s)) 1710 __iommu_remove_mapping(dev, sg_dma_address(s), 1711 sg_dma_len(s)); 1712 if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1713 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1714 s->length, dir); 1715 } 1716 } 1717 1718 /** 1719 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1720 * @dev: valid struct device pointer 1721 * @sg: list of buffers 1722 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1723 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1724 * 1725 * Unmap a set of streaming mode DMA translations. Again, CPU access 1726 * rules concerning calls here are the same as for dma_unmap_single(). 1727 */ 1728 static void arm_coherent_iommu_unmap_sg(struct device *dev, 1729 struct scatterlist *sg, int nents, enum dma_data_direction dir, 1730 unsigned long attrs) 1731 { 1732 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 1733 } 1734 1735 /** 1736 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1737 * @dev: valid struct device pointer 1738 * @sg: list of buffers 1739 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1740 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1741 * 1742 * Unmap a set of streaming mode DMA translations. Again, CPU access 1743 * rules concerning calls here are the same as for dma_unmap_single(). 1744 */ 1745 static void arm_iommu_unmap_sg(struct device *dev, 1746 struct scatterlist *sg, int nents, 1747 enum dma_data_direction dir, 1748 unsigned long attrs) 1749 { 1750 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 1751 } 1752 1753 /** 1754 * arm_iommu_sync_sg_for_cpu 1755 * @dev: valid struct device pointer 1756 * @sg: list of buffers 1757 * @nents: number of buffers to map (returned from dma_map_sg) 1758 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1759 */ 1760 static void arm_iommu_sync_sg_for_cpu(struct device *dev, 1761 struct scatterlist *sg, 1762 int nents, enum dma_data_direction dir) 1763 { 1764 struct scatterlist *s; 1765 int i; 1766 1767 for_each_sg(sg, s, nents, i) 1768 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 1769 1770 } 1771 1772 /** 1773 * arm_iommu_sync_sg_for_device 1774 * @dev: valid struct device pointer 1775 * @sg: list of buffers 1776 * @nents: number of buffers to map (returned from dma_map_sg) 1777 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1778 */ 1779 static void arm_iommu_sync_sg_for_device(struct device *dev, 1780 struct scatterlist *sg, 1781 int nents, enum dma_data_direction dir) 1782 { 1783 struct scatterlist *s; 1784 int i; 1785 1786 for_each_sg(sg, s, nents, i) 1787 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1788 } 1789 1790 1791 /** 1792 * arm_coherent_iommu_map_page 1793 * @dev: valid struct device pointer 1794 * @page: page that buffer resides in 1795 * @offset: offset into page for start of buffer 1796 * @size: size of buffer to map 1797 * @dir: DMA transfer direction 1798 * 1799 * Coherent IOMMU aware version of arm_dma_map_page() 1800 */ 1801 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 1802 unsigned long offset, size_t size, enum dma_data_direction dir, 1803 unsigned long attrs) 1804 { 1805 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1806 dma_addr_t dma_addr; 1807 int ret, prot, len = PAGE_ALIGN(size + offset); 1808 1809 dma_addr = __alloc_iova(mapping, len); 1810 if (dma_addr == DMA_MAPPING_ERROR) 1811 return dma_addr; 1812 1813 prot = __dma_info_to_prot(dir, attrs); 1814 1815 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 1816 if (ret < 0) 1817 goto fail; 1818 1819 return dma_addr + offset; 1820 fail: 1821 __free_iova(mapping, dma_addr, len); 1822 return DMA_MAPPING_ERROR; 1823 } 1824 1825 /** 1826 * arm_iommu_map_page 1827 * @dev: valid struct device pointer 1828 * @page: page that buffer resides in 1829 * @offset: offset into page for start of buffer 1830 * @size: size of buffer to map 1831 * @dir: DMA transfer direction 1832 * 1833 * IOMMU aware version of arm_dma_map_page() 1834 */ 1835 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 1836 unsigned long offset, size_t size, enum dma_data_direction dir, 1837 unsigned long attrs) 1838 { 1839 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1840 __dma_page_cpu_to_dev(page, offset, size, dir); 1841 1842 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 1843 } 1844 1845 /** 1846 * arm_coherent_iommu_unmap_page 1847 * @dev: valid struct device pointer 1848 * @handle: DMA address of buffer 1849 * @size: size of buffer (same as passed to dma_map_page) 1850 * @dir: DMA transfer direction (same as passed to dma_map_page) 1851 * 1852 * Coherent IOMMU aware version of arm_dma_unmap_page() 1853 */ 1854 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1855 size_t size, enum dma_data_direction dir, unsigned long attrs) 1856 { 1857 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1858 dma_addr_t iova = handle & PAGE_MASK; 1859 int offset = handle & ~PAGE_MASK; 1860 int len = PAGE_ALIGN(size + offset); 1861 1862 if (!iova) 1863 return; 1864 1865 iommu_unmap(mapping->domain, iova, len); 1866 __free_iova(mapping, iova, len); 1867 } 1868 1869 /** 1870 * arm_iommu_unmap_page 1871 * @dev: valid struct device pointer 1872 * @handle: DMA address of buffer 1873 * @size: size of buffer (same as passed to dma_map_page) 1874 * @dir: DMA transfer direction (same as passed to dma_map_page) 1875 * 1876 * IOMMU aware version of arm_dma_unmap_page() 1877 */ 1878 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1879 size_t size, enum dma_data_direction dir, unsigned long attrs) 1880 { 1881 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1882 dma_addr_t iova = handle & PAGE_MASK; 1883 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1884 int offset = handle & ~PAGE_MASK; 1885 int len = PAGE_ALIGN(size + offset); 1886 1887 if (!iova) 1888 return; 1889 1890 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1891 __dma_page_dev_to_cpu(page, offset, size, dir); 1892 1893 iommu_unmap(mapping->domain, iova, len); 1894 __free_iova(mapping, iova, len); 1895 } 1896 1897 /** 1898 * arm_iommu_map_resource - map a device resource for DMA 1899 * @dev: valid struct device pointer 1900 * @phys_addr: physical address of resource 1901 * @size: size of resource to map 1902 * @dir: DMA transfer direction 1903 */ 1904 static dma_addr_t arm_iommu_map_resource(struct device *dev, 1905 phys_addr_t phys_addr, size_t size, 1906 enum dma_data_direction dir, unsigned long attrs) 1907 { 1908 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1909 dma_addr_t dma_addr; 1910 int ret, prot; 1911 phys_addr_t addr = phys_addr & PAGE_MASK; 1912 unsigned int offset = phys_addr & ~PAGE_MASK; 1913 size_t len = PAGE_ALIGN(size + offset); 1914 1915 dma_addr = __alloc_iova(mapping, len); 1916 if (dma_addr == DMA_MAPPING_ERROR) 1917 return dma_addr; 1918 1919 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; 1920 1921 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); 1922 if (ret < 0) 1923 goto fail; 1924 1925 return dma_addr + offset; 1926 fail: 1927 __free_iova(mapping, dma_addr, len); 1928 return DMA_MAPPING_ERROR; 1929 } 1930 1931 /** 1932 * arm_iommu_unmap_resource - unmap a device DMA resource 1933 * @dev: valid struct device pointer 1934 * @dma_handle: DMA address to resource 1935 * @size: size of resource to map 1936 * @dir: DMA transfer direction 1937 */ 1938 static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, 1939 size_t size, enum dma_data_direction dir, 1940 unsigned long attrs) 1941 { 1942 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1943 dma_addr_t iova = dma_handle & PAGE_MASK; 1944 unsigned int offset = dma_handle & ~PAGE_MASK; 1945 size_t len = PAGE_ALIGN(size + offset); 1946 1947 if (!iova) 1948 return; 1949 1950 iommu_unmap(mapping->domain, iova, len); 1951 __free_iova(mapping, iova, len); 1952 } 1953 1954 static void arm_iommu_sync_single_for_cpu(struct device *dev, 1955 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1956 { 1957 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1958 dma_addr_t iova = handle & PAGE_MASK; 1959 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1960 unsigned int offset = handle & ~PAGE_MASK; 1961 1962 if (!iova) 1963 return; 1964 1965 __dma_page_dev_to_cpu(page, offset, size, dir); 1966 } 1967 1968 static void arm_iommu_sync_single_for_device(struct device *dev, 1969 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1970 { 1971 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1972 dma_addr_t iova = handle & PAGE_MASK; 1973 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1974 unsigned int offset = handle & ~PAGE_MASK; 1975 1976 if (!iova) 1977 return; 1978 1979 __dma_page_cpu_to_dev(page, offset, size, dir); 1980 } 1981 1982 static const struct dma_map_ops iommu_ops = { 1983 .alloc = arm_iommu_alloc_attrs, 1984 .free = arm_iommu_free_attrs, 1985 .mmap = arm_iommu_mmap_attrs, 1986 .get_sgtable = arm_iommu_get_sgtable, 1987 1988 .map_page = arm_iommu_map_page, 1989 .unmap_page = arm_iommu_unmap_page, 1990 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 1991 .sync_single_for_device = arm_iommu_sync_single_for_device, 1992 1993 .map_sg = arm_iommu_map_sg, 1994 .unmap_sg = arm_iommu_unmap_sg, 1995 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 1996 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1997 1998 .map_resource = arm_iommu_map_resource, 1999 .unmap_resource = arm_iommu_unmap_resource, 2000 2001 .dma_supported = arm_dma_supported, 2002 }; 2003 2004 static const struct dma_map_ops iommu_coherent_ops = { 2005 .alloc = arm_coherent_iommu_alloc_attrs, 2006 .free = arm_coherent_iommu_free_attrs, 2007 .mmap = arm_coherent_iommu_mmap_attrs, 2008 .get_sgtable = arm_iommu_get_sgtable, 2009 2010 .map_page = arm_coherent_iommu_map_page, 2011 .unmap_page = arm_coherent_iommu_unmap_page, 2012 2013 .map_sg = arm_coherent_iommu_map_sg, 2014 .unmap_sg = arm_coherent_iommu_unmap_sg, 2015 2016 .map_resource = arm_iommu_map_resource, 2017 .unmap_resource = arm_iommu_unmap_resource, 2018 2019 .dma_supported = arm_dma_supported, 2020 }; 2021 2022 /** 2023 * arm_iommu_create_mapping 2024 * @bus: pointer to the bus holding the client device (for IOMMU calls) 2025 * @base: start address of the valid IO address space 2026 * @size: maximum size of the valid IO address space 2027 * 2028 * Creates a mapping structure which holds information about used/unused 2029 * IO address ranges, which is required to perform memory allocation and 2030 * mapping with IOMMU aware functions. 2031 * 2032 * The client device need to be attached to the mapping with 2033 * arm_iommu_attach_device function. 2034 */ 2035 struct dma_iommu_mapping * 2036 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 2037 { 2038 unsigned int bits = size >> PAGE_SHIFT; 2039 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 2040 struct dma_iommu_mapping *mapping; 2041 int extensions = 1; 2042 int err = -ENOMEM; 2043 2044 /* currently only 32-bit DMA address space is supported */ 2045 if (size > DMA_BIT_MASK(32) + 1) 2046 return ERR_PTR(-ERANGE); 2047 2048 if (!bitmap_size) 2049 return ERR_PTR(-EINVAL); 2050 2051 if (bitmap_size > PAGE_SIZE) { 2052 extensions = bitmap_size / PAGE_SIZE; 2053 bitmap_size = PAGE_SIZE; 2054 } 2055 2056 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 2057 if (!mapping) 2058 goto err; 2059 2060 mapping->bitmap_size = bitmap_size; 2061 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), 2062 GFP_KERNEL); 2063 if (!mapping->bitmaps) 2064 goto err2; 2065 2066 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 2067 if (!mapping->bitmaps[0]) 2068 goto err3; 2069 2070 mapping->nr_bitmaps = 1; 2071 mapping->extensions = extensions; 2072 mapping->base = base; 2073 mapping->bits = BITS_PER_BYTE * bitmap_size; 2074 2075 spin_lock_init(&mapping->lock); 2076 2077 mapping->domain = iommu_domain_alloc(bus); 2078 if (!mapping->domain) 2079 goto err4; 2080 2081 kref_init(&mapping->kref); 2082 return mapping; 2083 err4: 2084 kfree(mapping->bitmaps[0]); 2085 err3: 2086 kfree(mapping->bitmaps); 2087 err2: 2088 kfree(mapping); 2089 err: 2090 return ERR_PTR(err); 2091 } 2092 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 2093 2094 static void release_iommu_mapping(struct kref *kref) 2095 { 2096 int i; 2097 struct dma_iommu_mapping *mapping = 2098 container_of(kref, struct dma_iommu_mapping, kref); 2099 2100 iommu_domain_free(mapping->domain); 2101 for (i = 0; i < mapping->nr_bitmaps; i++) 2102 kfree(mapping->bitmaps[i]); 2103 kfree(mapping->bitmaps); 2104 kfree(mapping); 2105 } 2106 2107 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 2108 { 2109 int next_bitmap; 2110 2111 if (mapping->nr_bitmaps >= mapping->extensions) 2112 return -EINVAL; 2113 2114 next_bitmap = mapping->nr_bitmaps; 2115 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 2116 GFP_ATOMIC); 2117 if (!mapping->bitmaps[next_bitmap]) 2118 return -ENOMEM; 2119 2120 mapping->nr_bitmaps++; 2121 2122 return 0; 2123 } 2124 2125 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 2126 { 2127 if (mapping) 2128 kref_put(&mapping->kref, release_iommu_mapping); 2129 } 2130 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 2131 2132 static int __arm_iommu_attach_device(struct device *dev, 2133 struct dma_iommu_mapping *mapping) 2134 { 2135 int err; 2136 2137 err = iommu_attach_device(mapping->domain, dev); 2138 if (err) 2139 return err; 2140 2141 kref_get(&mapping->kref); 2142 to_dma_iommu_mapping(dev) = mapping; 2143 2144 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 2145 return 0; 2146 } 2147 2148 /** 2149 * arm_iommu_attach_device 2150 * @dev: valid struct device pointer 2151 * @mapping: io address space mapping structure (returned from 2152 * arm_iommu_create_mapping) 2153 * 2154 * Attaches specified io address space mapping to the provided device. 2155 * This replaces the dma operations (dma_map_ops pointer) with the 2156 * IOMMU aware version. 2157 * 2158 * More than one client might be attached to the same io address space 2159 * mapping. 2160 */ 2161 int arm_iommu_attach_device(struct device *dev, 2162 struct dma_iommu_mapping *mapping) 2163 { 2164 int err; 2165 2166 err = __arm_iommu_attach_device(dev, mapping); 2167 if (err) 2168 return err; 2169 2170 set_dma_ops(dev, &iommu_ops); 2171 return 0; 2172 } 2173 EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 2174 2175 /** 2176 * arm_iommu_detach_device 2177 * @dev: valid struct device pointer 2178 * 2179 * Detaches the provided device from a previously attached map. 2180 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. 2181 */ 2182 void arm_iommu_detach_device(struct device *dev) 2183 { 2184 struct dma_iommu_mapping *mapping; 2185 2186 mapping = to_dma_iommu_mapping(dev); 2187 if (!mapping) { 2188 dev_warn(dev, "Not attached\n"); 2189 return; 2190 } 2191 2192 iommu_detach_device(mapping->domain, dev); 2193 kref_put(&mapping->kref, release_iommu_mapping); 2194 to_dma_iommu_mapping(dev) = NULL; 2195 set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); 2196 2197 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 2198 } 2199 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 2200 2201 static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 2202 { 2203 return coherent ? &iommu_coherent_ops : &iommu_ops; 2204 } 2205 2206 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 2207 const struct iommu_ops *iommu) 2208 { 2209 struct dma_iommu_mapping *mapping; 2210 2211 if (!iommu) 2212 return false; 2213 2214 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 2215 if (IS_ERR(mapping)) { 2216 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 2217 size, dev_name(dev)); 2218 return false; 2219 } 2220 2221 if (__arm_iommu_attach_device(dev, mapping)) { 2222 pr_warn("Failed to attached device %s to IOMMU_mapping\n", 2223 dev_name(dev)); 2224 arm_iommu_release_mapping(mapping); 2225 return false; 2226 } 2227 2228 return true; 2229 } 2230 2231 static void arm_teardown_iommu_dma_ops(struct device *dev) 2232 { 2233 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 2234 2235 if (!mapping) 2236 return; 2237 2238 arm_iommu_detach_device(dev); 2239 arm_iommu_release_mapping(mapping); 2240 } 2241 2242 #else 2243 2244 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 2245 const struct iommu_ops *iommu) 2246 { 2247 return false; 2248 } 2249 2250 static void arm_teardown_iommu_dma_ops(struct device *dev) { } 2251 2252 #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 2253 2254 #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 2255 2256 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 2257 const struct iommu_ops *iommu, bool coherent) 2258 { 2259 const struct dma_map_ops *dma_ops; 2260 2261 dev->archdata.dma_coherent = coherent; 2262 #ifdef CONFIG_SWIOTLB 2263 dev->dma_coherent = coherent; 2264 #endif 2265 2266 /* 2267 * Don't override the dma_ops if they have already been set. Ideally 2268 * this should be the only location where dma_ops are set, remove this 2269 * check when all other callers of set_dma_ops will have disappeared. 2270 */ 2271 if (dev->dma_ops) 2272 return; 2273 2274 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 2275 dma_ops = arm_get_iommu_dma_map_ops(coherent); 2276 else 2277 dma_ops = arm_get_dma_map_ops(coherent); 2278 2279 set_dma_ops(dev, dma_ops); 2280 2281 #ifdef CONFIG_XEN 2282 if (xen_initial_domain()) 2283 dev->dma_ops = &xen_swiotlb_dma_ops; 2284 #endif 2285 dev->archdata.dma_ops_setup = true; 2286 } 2287 2288 void arch_teardown_dma_ops(struct device *dev) 2289 { 2290 if (!dev->archdata.dma_ops_setup) 2291 return; 2292 2293 arm_teardown_iommu_dma_ops(dev); 2294 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 2295 set_dma_ops(dev, NULL); 2296 } 2297 2298 #ifdef CONFIG_SWIOTLB 2299 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 2300 enum dma_data_direction dir) 2301 { 2302 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2303 size, dir); 2304 } 2305 2306 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 2307 enum dma_data_direction dir) 2308 { 2309 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 2310 size, dir); 2311 } 2312 2313 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2314 gfp_t gfp, unsigned long attrs) 2315 { 2316 return __dma_alloc(dev, size, dma_handle, gfp, 2317 __get_dma_pgprot(attrs, PAGE_KERNEL), false, 2318 attrs, __builtin_return_address(0)); 2319 } 2320 2321 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 2322 dma_addr_t dma_handle, unsigned long attrs) 2323 { 2324 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); 2325 } 2326 #endif /* CONFIG_SWIOTLB */ 2327