1 /* 2 * linux/arch/arm/mm/dma-mapping.c 3 * 4 * Copyright (C) 2000-2004 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA uncached mapping support. 11 */ 12 #include <linux/module.h> 13 #include <linux/mm.h> 14 #include <linux/gfp.h> 15 #include <linux/errno.h> 16 #include <linux/list.h> 17 #include <linux/init.h> 18 #include <linux/device.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/dma-contiguous.h> 21 #include <linux/highmem.h> 22 #include <linux/memblock.h> 23 #include <linux/slab.h> 24 #include <linux/iommu.h> 25 #include <linux/io.h> 26 #include <linux/vmalloc.h> 27 #include <linux/sizes.h> 28 29 #include <asm/memory.h> 30 #include <asm/highmem.h> 31 #include <asm/cacheflush.h> 32 #include <asm/tlbflush.h> 33 #include <asm/mach/arch.h> 34 #include <asm/dma-iommu.h> 35 #include <asm/mach/map.h> 36 #include <asm/system_info.h> 37 #include <asm/dma-contiguous.h> 38 39 #include "mm.h" 40 41 /* 42 * The DMA API is built upon the notion of "buffer ownership". A buffer 43 * is either exclusively owned by the CPU (and therefore may be accessed 44 * by it) or exclusively owned by the DMA device. These helper functions 45 * represent the transitions between these two ownership states. 46 * 47 * Note, however, that on later ARMs, this notion does not work due to 48 * speculative prefetches. We model our approach on the assumption that 49 * the CPU does do speculative prefetches, which means we clean caches 50 * before transfers and delay cache invalidation until transfer completion. 51 * 52 */ 53 static void __dma_page_cpu_to_dev(struct page *, unsigned long, 54 size_t, enum dma_data_direction); 55 static void __dma_page_dev_to_cpu(struct page *, unsigned long, 56 size_t, enum dma_data_direction); 57 58 /** 59 * arm_dma_map_page - map a portion of a page for streaming DMA 60 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 61 * @page: page that buffer resides in 62 * @offset: offset into page for start of buffer 63 * @size: size of buffer to map 64 * @dir: DMA transfer direction 65 * 66 * Ensure that any data held in the cache is appropriately discarded 67 * or written back. 68 * 69 * The device owns this memory once this call has completed. The CPU 70 * can regain ownership by calling dma_unmap_page(). 71 */ 72 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 73 unsigned long offset, size_t size, enum dma_data_direction dir, 74 struct dma_attrs *attrs) 75 { 76 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 77 __dma_page_cpu_to_dev(page, offset, size, dir); 78 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 79 } 80 81 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 82 unsigned long offset, size_t size, enum dma_data_direction dir, 83 struct dma_attrs *attrs) 84 { 85 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 86 } 87 88 /** 89 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 90 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 91 * @handle: DMA address of buffer 92 * @size: size of buffer (same as passed to dma_map_page) 93 * @dir: DMA transfer direction (same as passed to dma_map_page) 94 * 95 * Unmap a page streaming mode DMA translation. The handle and size 96 * must match what was provided in the previous dma_map_page() call. 97 * All other usages are undefined. 98 * 99 * After this call, reads by the CPU to the buffer are guaranteed to see 100 * whatever the device wrote there. 101 */ 102 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 103 size_t size, enum dma_data_direction dir, 104 struct dma_attrs *attrs) 105 { 106 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 107 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 108 handle & ~PAGE_MASK, size, dir); 109 } 110 111 static void arm_dma_sync_single_for_cpu(struct device *dev, 112 dma_addr_t handle, size_t size, enum dma_data_direction dir) 113 { 114 unsigned int offset = handle & (PAGE_SIZE - 1); 115 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 116 __dma_page_dev_to_cpu(page, offset, size, dir); 117 } 118 119 static void arm_dma_sync_single_for_device(struct device *dev, 120 dma_addr_t handle, size_t size, enum dma_data_direction dir) 121 { 122 unsigned int offset = handle & (PAGE_SIZE - 1); 123 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 124 __dma_page_cpu_to_dev(page, offset, size, dir); 125 } 126 127 struct dma_map_ops arm_dma_ops = { 128 .alloc = arm_dma_alloc, 129 .free = arm_dma_free, 130 .mmap = arm_dma_mmap, 131 .get_sgtable = arm_dma_get_sgtable, 132 .map_page = arm_dma_map_page, 133 .unmap_page = arm_dma_unmap_page, 134 .map_sg = arm_dma_map_sg, 135 .unmap_sg = arm_dma_unmap_sg, 136 .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 137 .sync_single_for_device = arm_dma_sync_single_for_device, 138 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 139 .sync_sg_for_device = arm_dma_sync_sg_for_device, 140 .set_dma_mask = arm_dma_set_mask, 141 }; 142 EXPORT_SYMBOL(arm_dma_ops); 143 144 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 145 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); 146 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 147 dma_addr_t handle, struct dma_attrs *attrs); 148 149 struct dma_map_ops arm_coherent_dma_ops = { 150 .alloc = arm_coherent_dma_alloc, 151 .free = arm_coherent_dma_free, 152 .mmap = arm_dma_mmap, 153 .get_sgtable = arm_dma_get_sgtable, 154 .map_page = arm_coherent_dma_map_page, 155 .map_sg = arm_dma_map_sg, 156 .set_dma_mask = arm_dma_set_mask, 157 }; 158 EXPORT_SYMBOL(arm_coherent_dma_ops); 159 160 static u64 get_coherent_dma_mask(struct device *dev) 161 { 162 u64 mask = (u64)arm_dma_limit; 163 164 if (dev) { 165 mask = dev->coherent_dma_mask; 166 167 /* 168 * Sanity check the DMA mask - it must be non-zero, and 169 * must be able to be satisfied by a DMA allocation. 170 */ 171 if (mask == 0) { 172 dev_warn(dev, "coherent DMA mask is unset\n"); 173 return 0; 174 } 175 176 if ((~mask) & (u64)arm_dma_limit) { 177 dev_warn(dev, "coherent DMA mask %#llx is smaller " 178 "than system GFP_DMA mask %#llx\n", 179 mask, (u64)arm_dma_limit); 180 return 0; 181 } 182 } 183 184 return mask; 185 } 186 187 static void __dma_clear_buffer(struct page *page, size_t size) 188 { 189 /* 190 * Ensure that the allocated pages are zeroed, and that any data 191 * lurking in the kernel direct-mapped region is invalidated. 192 */ 193 if (PageHighMem(page)) { 194 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 195 phys_addr_t end = base + size; 196 while (size > 0) { 197 void *ptr = kmap_atomic(page); 198 memset(ptr, 0, PAGE_SIZE); 199 dmac_flush_range(ptr, ptr + PAGE_SIZE); 200 kunmap_atomic(ptr); 201 page++; 202 size -= PAGE_SIZE; 203 } 204 outer_flush_range(base, end); 205 } else { 206 void *ptr = page_address(page); 207 memset(ptr, 0, size); 208 dmac_flush_range(ptr, ptr + size); 209 outer_flush_range(__pa(ptr), __pa(ptr) + size); 210 } 211 } 212 213 /* 214 * Allocate a DMA buffer for 'dev' of size 'size' using the 215 * specified gfp mask. Note that 'size' must be page aligned. 216 */ 217 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 218 { 219 unsigned long order = get_order(size); 220 struct page *page, *p, *e; 221 222 page = alloc_pages(gfp, order); 223 if (!page) 224 return NULL; 225 226 /* 227 * Now split the huge page and free the excess pages 228 */ 229 split_page(page, order); 230 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 231 __free_page(p); 232 233 __dma_clear_buffer(page, size); 234 235 return page; 236 } 237 238 /* 239 * Free a DMA buffer. 'size' must be page aligned. 240 */ 241 static void __dma_free_buffer(struct page *page, size_t size) 242 { 243 struct page *e = page + (size >> PAGE_SHIFT); 244 245 while (page < e) { 246 __free_page(page); 247 page++; 248 } 249 } 250 251 #ifdef CONFIG_MMU 252 #ifdef CONFIG_HUGETLB_PAGE 253 #error ARM Coherent DMA allocator does not (yet) support huge TLB 254 #endif 255 256 static void *__alloc_from_contiguous(struct device *dev, size_t size, 257 pgprot_t prot, struct page **ret_page, 258 const void *caller); 259 260 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 261 pgprot_t prot, struct page **ret_page, 262 const void *caller); 263 264 static void * 265 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 266 const void *caller) 267 { 268 struct vm_struct *area; 269 unsigned long addr; 270 271 /* 272 * DMA allocation can be mapped to user space, so lets 273 * set VM_USERMAP flags too. 274 */ 275 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 276 caller); 277 if (!area) 278 return NULL; 279 addr = (unsigned long)area->addr; 280 area->phys_addr = __pfn_to_phys(page_to_pfn(page)); 281 282 if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { 283 vunmap((void *)addr); 284 return NULL; 285 } 286 return (void *)addr; 287 } 288 289 static void __dma_free_remap(void *cpu_addr, size_t size) 290 { 291 unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; 292 struct vm_struct *area = find_vm_area(cpu_addr); 293 if (!area || (area->flags & flags) != flags) { 294 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 295 return; 296 } 297 unmap_kernel_range((unsigned long)cpu_addr, size); 298 vunmap(cpu_addr); 299 } 300 301 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 302 303 struct dma_pool { 304 size_t size; 305 spinlock_t lock; 306 unsigned long *bitmap; 307 unsigned long nr_pages; 308 void *vaddr; 309 struct page **pages; 310 }; 311 312 static struct dma_pool atomic_pool = { 313 .size = DEFAULT_DMA_COHERENT_POOL_SIZE, 314 }; 315 316 static int __init early_coherent_pool(char *p) 317 { 318 atomic_pool.size = memparse(p, &p); 319 return 0; 320 } 321 early_param("coherent_pool", early_coherent_pool); 322 323 void __init init_dma_coherent_pool_size(unsigned long size) 324 { 325 /* 326 * Catch any attempt to set the pool size too late. 327 */ 328 BUG_ON(atomic_pool.vaddr); 329 330 /* 331 * Set architecture specific coherent pool size only if 332 * it has not been changed by kernel command line parameter. 333 */ 334 if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) 335 atomic_pool.size = size; 336 } 337 338 /* 339 * Initialise the coherent pool for atomic allocations. 340 */ 341 static int __init atomic_pool_init(void) 342 { 343 struct dma_pool *pool = &atomic_pool; 344 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); 345 gfp_t gfp = GFP_KERNEL | GFP_DMA; 346 unsigned long nr_pages = pool->size >> PAGE_SHIFT; 347 unsigned long *bitmap; 348 struct page *page; 349 struct page **pages; 350 void *ptr; 351 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); 352 353 bitmap = kzalloc(bitmap_size, GFP_KERNEL); 354 if (!bitmap) 355 goto no_bitmap; 356 357 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); 358 if (!pages) 359 goto no_pages; 360 361 if (IS_ENABLED(CONFIG_CMA)) 362 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, 363 atomic_pool_init); 364 else 365 ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, 366 atomic_pool_init); 367 if (ptr) { 368 int i; 369 370 for (i = 0; i < nr_pages; i++) 371 pages[i] = page + i; 372 373 spin_lock_init(&pool->lock); 374 pool->vaddr = ptr; 375 pool->pages = pages; 376 pool->bitmap = bitmap; 377 pool->nr_pages = nr_pages; 378 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", 379 (unsigned)pool->size / 1024); 380 return 0; 381 } 382 383 kfree(pages); 384 no_pages: 385 kfree(bitmap); 386 no_bitmap: 387 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", 388 (unsigned)pool->size / 1024); 389 return -ENOMEM; 390 } 391 /* 392 * CMA is activated by core_initcall, so we must be called after it. 393 */ 394 postcore_initcall(atomic_pool_init); 395 396 struct dma_contig_early_reserve { 397 phys_addr_t base; 398 unsigned long size; 399 }; 400 401 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 402 403 static int dma_mmu_remap_num __initdata; 404 405 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 406 { 407 dma_mmu_remap[dma_mmu_remap_num].base = base; 408 dma_mmu_remap[dma_mmu_remap_num].size = size; 409 dma_mmu_remap_num++; 410 } 411 412 void __init dma_contiguous_remap(void) 413 { 414 int i; 415 for (i = 0; i < dma_mmu_remap_num; i++) { 416 phys_addr_t start = dma_mmu_remap[i].base; 417 phys_addr_t end = start + dma_mmu_remap[i].size; 418 struct map_desc map; 419 unsigned long addr; 420 421 if (end > arm_lowmem_limit) 422 end = arm_lowmem_limit; 423 if (start >= end) 424 continue; 425 426 map.pfn = __phys_to_pfn(start); 427 map.virtual = __phys_to_virt(start); 428 map.length = end - start; 429 map.type = MT_MEMORY_DMA_READY; 430 431 /* 432 * Clear previous low-memory mapping 433 */ 434 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 435 addr += PMD_SIZE) 436 pmd_clear(pmd_off_k(addr)); 437 438 iotable_init(&map, 1); 439 } 440 } 441 442 static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, 443 void *data) 444 { 445 struct page *page = virt_to_page(addr); 446 pgprot_t prot = *(pgprot_t *)data; 447 448 set_pte_ext(pte, mk_pte(page, prot), 0); 449 return 0; 450 } 451 452 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 453 { 454 unsigned long start = (unsigned long) page_address(page); 455 unsigned end = start + size; 456 457 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 458 dsb(); 459 flush_tlb_kernel_range(start, end); 460 } 461 462 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 463 pgprot_t prot, struct page **ret_page, 464 const void *caller) 465 { 466 struct page *page; 467 void *ptr; 468 page = __dma_alloc_buffer(dev, size, gfp); 469 if (!page) 470 return NULL; 471 472 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); 473 if (!ptr) { 474 __dma_free_buffer(page, size); 475 return NULL; 476 } 477 478 *ret_page = page; 479 return ptr; 480 } 481 482 static void *__alloc_from_pool(size_t size, struct page **ret_page) 483 { 484 struct dma_pool *pool = &atomic_pool; 485 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 486 unsigned int pageno; 487 unsigned long flags; 488 void *ptr = NULL; 489 unsigned long align_mask; 490 491 if (!pool->vaddr) { 492 WARN(1, "coherent pool not initialised!\n"); 493 return NULL; 494 } 495 496 /* 497 * Align the region allocation - allocations from pool are rather 498 * small, so align them to their order in pages, minimum is a page 499 * size. This helps reduce fragmentation of the DMA space. 500 */ 501 align_mask = (1 << get_order(size)) - 1; 502 503 spin_lock_irqsave(&pool->lock, flags); 504 pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, 505 0, count, align_mask); 506 if (pageno < pool->nr_pages) { 507 bitmap_set(pool->bitmap, pageno, count); 508 ptr = pool->vaddr + PAGE_SIZE * pageno; 509 *ret_page = pool->pages[pageno]; 510 } else { 511 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" 512 "Please increase it with coherent_pool= kernel parameter!\n", 513 (unsigned)pool->size / 1024); 514 } 515 spin_unlock_irqrestore(&pool->lock, flags); 516 517 return ptr; 518 } 519 520 static bool __in_atomic_pool(void *start, size_t size) 521 { 522 struct dma_pool *pool = &atomic_pool; 523 void *end = start + size; 524 void *pool_start = pool->vaddr; 525 void *pool_end = pool->vaddr + pool->size; 526 527 if (start < pool_start || start >= pool_end) 528 return false; 529 530 if (end <= pool_end) 531 return true; 532 533 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", 534 start, end - 1, pool_start, pool_end - 1); 535 536 return false; 537 } 538 539 static int __free_from_pool(void *start, size_t size) 540 { 541 struct dma_pool *pool = &atomic_pool; 542 unsigned long pageno, count; 543 unsigned long flags; 544 545 if (!__in_atomic_pool(start, size)) 546 return 0; 547 548 pageno = (start - pool->vaddr) >> PAGE_SHIFT; 549 count = size >> PAGE_SHIFT; 550 551 spin_lock_irqsave(&pool->lock, flags); 552 bitmap_clear(pool->bitmap, pageno, count); 553 spin_unlock_irqrestore(&pool->lock, flags); 554 555 return 1; 556 } 557 558 static void *__alloc_from_contiguous(struct device *dev, size_t size, 559 pgprot_t prot, struct page **ret_page, 560 const void *caller) 561 { 562 unsigned long order = get_order(size); 563 size_t count = size >> PAGE_SHIFT; 564 struct page *page; 565 void *ptr; 566 567 page = dma_alloc_from_contiguous(dev, count, order); 568 if (!page) 569 return NULL; 570 571 __dma_clear_buffer(page, size); 572 573 if (PageHighMem(page)) { 574 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); 575 if (!ptr) { 576 dma_release_from_contiguous(dev, page, count); 577 return NULL; 578 } 579 } else { 580 __dma_remap(page, size, prot); 581 ptr = page_address(page); 582 } 583 *ret_page = page; 584 return ptr; 585 } 586 587 static void __free_from_contiguous(struct device *dev, struct page *page, 588 void *cpu_addr, size_t size) 589 { 590 if (PageHighMem(page)) 591 __dma_free_remap(cpu_addr, size); 592 else 593 __dma_remap(page, size, pgprot_kernel); 594 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 595 } 596 597 static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) 598 { 599 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? 600 pgprot_writecombine(prot) : 601 pgprot_dmacoherent(prot); 602 return prot; 603 } 604 605 #define nommu() 0 606 607 #else /* !CONFIG_MMU */ 608 609 #define nommu() 1 610 611 #define __get_dma_pgprot(attrs, prot) __pgprot(0) 612 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL 613 #define __alloc_from_pool(size, ret_page) NULL 614 #define __alloc_from_contiguous(dev, size, prot, ret, c) NULL 615 #define __free_from_pool(cpu_addr, size) 0 616 #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) 617 #define __dma_free_remap(cpu_addr, size) do { } while (0) 618 619 #endif /* CONFIG_MMU */ 620 621 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 622 struct page **ret_page) 623 { 624 struct page *page; 625 page = __dma_alloc_buffer(dev, size, gfp); 626 if (!page) 627 return NULL; 628 629 *ret_page = page; 630 return page_address(page); 631 } 632 633 634 635 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 636 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) 637 { 638 u64 mask = get_coherent_dma_mask(dev); 639 struct page *page = NULL; 640 void *addr; 641 642 #ifdef CONFIG_DMA_API_DEBUG 643 u64 limit = (mask + 1) & ~mask; 644 if (limit && size >= limit) { 645 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 646 size, mask); 647 return NULL; 648 } 649 #endif 650 651 if (!mask) 652 return NULL; 653 654 if (mask < 0xffffffffULL) 655 gfp |= GFP_DMA; 656 657 /* 658 * Following is a work-around (a.k.a. hack) to prevent pages 659 * with __GFP_COMP being passed to split_page() which cannot 660 * handle them. The real problem is that this flag probably 661 * should be 0 on ARM as it is not supported on this 662 * platform; see CONFIG_HUGETLBFS. 663 */ 664 gfp &= ~(__GFP_COMP); 665 666 *handle = DMA_ERROR_CODE; 667 size = PAGE_ALIGN(size); 668 669 if (is_coherent || nommu()) 670 addr = __alloc_simple_buffer(dev, size, gfp, &page); 671 else if (!(gfp & __GFP_WAIT)) 672 addr = __alloc_from_pool(size, &page); 673 else if (!IS_ENABLED(CONFIG_CMA)) 674 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 675 else 676 addr = __alloc_from_contiguous(dev, size, prot, &page, caller); 677 678 if (addr) 679 *handle = pfn_to_dma(dev, page_to_pfn(page)); 680 681 return addr; 682 } 683 684 /* 685 * Allocate DMA-coherent memory space and return both the kernel remapped 686 * virtual and bus address for that space. 687 */ 688 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 689 gfp_t gfp, struct dma_attrs *attrs) 690 { 691 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 692 void *memory; 693 694 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 695 return memory; 696 697 return __dma_alloc(dev, size, handle, gfp, prot, false, 698 __builtin_return_address(0)); 699 } 700 701 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, 702 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 703 { 704 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 705 void *memory; 706 707 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 708 return memory; 709 710 return __dma_alloc(dev, size, handle, gfp, prot, true, 711 __builtin_return_address(0)); 712 } 713 714 /* 715 * Create userspace mapping for the DMA-coherent memory. 716 */ 717 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 718 void *cpu_addr, dma_addr_t dma_addr, size_t size, 719 struct dma_attrs *attrs) 720 { 721 int ret = -ENXIO; 722 #ifdef CONFIG_MMU 723 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 724 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 725 unsigned long pfn = dma_to_pfn(dev, dma_addr); 726 unsigned long off = vma->vm_pgoff; 727 728 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 729 730 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) 731 return ret; 732 733 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 734 ret = remap_pfn_range(vma, vma->vm_start, 735 pfn + off, 736 vma->vm_end - vma->vm_start, 737 vma->vm_page_prot); 738 } 739 #endif /* CONFIG_MMU */ 740 741 return ret; 742 } 743 744 /* 745 * Free a buffer as defined by the above mapping. 746 */ 747 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 748 dma_addr_t handle, struct dma_attrs *attrs, 749 bool is_coherent) 750 { 751 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 752 753 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 754 return; 755 756 size = PAGE_ALIGN(size); 757 758 if (is_coherent || nommu()) { 759 __dma_free_buffer(page, size); 760 } else if (__free_from_pool(cpu_addr, size)) { 761 return; 762 } else if (!IS_ENABLED(CONFIG_CMA)) { 763 __dma_free_remap(cpu_addr, size); 764 __dma_free_buffer(page, size); 765 } else { 766 /* 767 * Non-atomic allocations cannot be freed with IRQs disabled 768 */ 769 WARN_ON(irqs_disabled()); 770 __free_from_contiguous(dev, page, cpu_addr, size); 771 } 772 } 773 774 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 775 dma_addr_t handle, struct dma_attrs *attrs) 776 { 777 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 778 } 779 780 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 781 dma_addr_t handle, struct dma_attrs *attrs) 782 { 783 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 784 } 785 786 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 787 void *cpu_addr, dma_addr_t handle, size_t size, 788 struct dma_attrs *attrs) 789 { 790 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 791 int ret; 792 793 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 794 if (unlikely(ret)) 795 return ret; 796 797 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 798 return 0; 799 } 800 801 static void dma_cache_maint_page(struct page *page, unsigned long offset, 802 size_t size, enum dma_data_direction dir, 803 void (*op)(const void *, size_t, int)) 804 { 805 unsigned long pfn; 806 size_t left = size; 807 808 pfn = page_to_pfn(page) + offset / PAGE_SIZE; 809 offset %= PAGE_SIZE; 810 811 /* 812 * A single sg entry may refer to multiple physically contiguous 813 * pages. But we still need to process highmem pages individually. 814 * If highmem is not configured then the bulk of this loop gets 815 * optimized out. 816 */ 817 do { 818 size_t len = left; 819 void *vaddr; 820 821 page = pfn_to_page(pfn); 822 823 if (PageHighMem(page)) { 824 if (len + offset > PAGE_SIZE) 825 len = PAGE_SIZE - offset; 826 vaddr = kmap_high_get(page); 827 if (vaddr) { 828 vaddr += offset; 829 op(vaddr, len, dir); 830 kunmap_high(page); 831 } else if (cache_is_vipt()) { 832 /* unmapped pages might still be cached */ 833 vaddr = kmap_atomic(page); 834 op(vaddr + offset, len, dir); 835 kunmap_atomic(vaddr); 836 } 837 } else { 838 vaddr = page_address(page) + offset; 839 op(vaddr, len, dir); 840 } 841 offset = 0; 842 pfn++; 843 left -= len; 844 } while (left); 845 } 846 847 /* 848 * Make an area consistent for devices. 849 * Note: Drivers should NOT use this function directly, as it will break 850 * platforms with CONFIG_DMABOUNCE. 851 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 852 */ 853 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 854 size_t size, enum dma_data_direction dir) 855 { 856 unsigned long paddr; 857 858 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 859 860 paddr = page_to_phys(page) + off; 861 if (dir == DMA_FROM_DEVICE) { 862 outer_inv_range(paddr, paddr + size); 863 } else { 864 outer_clean_range(paddr, paddr + size); 865 } 866 /* FIXME: non-speculating: flush on bidirectional mappings? */ 867 } 868 869 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 870 size_t size, enum dma_data_direction dir) 871 { 872 unsigned long paddr = page_to_phys(page) + off; 873 874 /* FIXME: non-speculating: not required */ 875 /* don't bother invalidating if DMA to device */ 876 if (dir != DMA_TO_DEVICE) 877 outer_inv_range(paddr, paddr + size); 878 879 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 880 881 /* 882 * Mark the D-cache clean for this page to avoid extra flushing. 883 */ 884 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) 885 set_bit(PG_dcache_clean, &page->flags); 886 } 887 888 /** 889 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 890 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 891 * @sg: list of buffers 892 * @nents: number of buffers to map 893 * @dir: DMA transfer direction 894 * 895 * Map a set of buffers described by scatterlist in streaming mode for DMA. 896 * This is the scatter-gather version of the dma_map_single interface. 897 * Here the scatter gather list elements are each tagged with the 898 * appropriate dma address and length. They are obtained via 899 * sg_dma_{address,length}. 900 * 901 * Device ownership issues as mentioned for dma_map_single are the same 902 * here. 903 */ 904 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 905 enum dma_data_direction dir, struct dma_attrs *attrs) 906 { 907 struct dma_map_ops *ops = get_dma_ops(dev); 908 struct scatterlist *s; 909 int i, j; 910 911 for_each_sg(sg, s, nents, i) { 912 #ifdef CONFIG_NEED_SG_DMA_LENGTH 913 s->dma_length = s->length; 914 #endif 915 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 916 s->length, dir, attrs); 917 if (dma_mapping_error(dev, s->dma_address)) 918 goto bad_mapping; 919 } 920 return nents; 921 922 bad_mapping: 923 for_each_sg(sg, s, i, j) 924 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 925 return 0; 926 } 927 928 /** 929 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 930 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 931 * @sg: list of buffers 932 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 933 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 934 * 935 * Unmap a set of streaming mode DMA translations. Again, CPU access 936 * rules concerning calls here are the same as for dma_unmap_single(). 937 */ 938 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 939 enum dma_data_direction dir, struct dma_attrs *attrs) 940 { 941 struct dma_map_ops *ops = get_dma_ops(dev); 942 struct scatterlist *s; 943 944 int i; 945 946 for_each_sg(sg, s, nents, i) 947 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 948 } 949 950 /** 951 * arm_dma_sync_sg_for_cpu 952 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 953 * @sg: list of buffers 954 * @nents: number of buffers to map (returned from dma_map_sg) 955 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 956 */ 957 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 958 int nents, enum dma_data_direction dir) 959 { 960 struct dma_map_ops *ops = get_dma_ops(dev); 961 struct scatterlist *s; 962 int i; 963 964 for_each_sg(sg, s, nents, i) 965 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 966 dir); 967 } 968 969 /** 970 * arm_dma_sync_sg_for_device 971 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 972 * @sg: list of buffers 973 * @nents: number of buffers to map (returned from dma_map_sg) 974 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 975 */ 976 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 977 int nents, enum dma_data_direction dir) 978 { 979 struct dma_map_ops *ops = get_dma_ops(dev); 980 struct scatterlist *s; 981 int i; 982 983 for_each_sg(sg, s, nents, i) 984 ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 985 dir); 986 } 987 988 /* 989 * Return whether the given device DMA address mask can be supported 990 * properly. For example, if your device can only drive the low 24-bits 991 * during bus mastering, then you would pass 0x00ffffff as the mask 992 * to this function. 993 */ 994 int dma_supported(struct device *dev, u64 mask) 995 { 996 if (mask < (u64)arm_dma_limit) 997 return 0; 998 return 1; 999 } 1000 EXPORT_SYMBOL(dma_supported); 1001 1002 int arm_dma_set_mask(struct device *dev, u64 dma_mask) 1003 { 1004 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 1005 return -EIO; 1006 1007 *dev->dma_mask = dma_mask; 1008 1009 return 0; 1010 } 1011 1012 #define PREALLOC_DMA_DEBUG_ENTRIES 4096 1013 1014 static int __init dma_debug_do_init(void) 1015 { 1016 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 1017 return 0; 1018 } 1019 fs_initcall(dma_debug_do_init); 1020 1021 #ifdef CONFIG_ARM_DMA_USE_IOMMU 1022 1023 /* IOMMU */ 1024 1025 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 1026 size_t size) 1027 { 1028 unsigned int order = get_order(size); 1029 unsigned int align = 0; 1030 unsigned int count, start; 1031 unsigned long flags; 1032 1033 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 1034 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 1035 1036 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + 1037 (1 << mapping->order) - 1) >> mapping->order; 1038 1039 if (order > mapping->order) 1040 align = (1 << (order - mapping->order)) - 1; 1041 1042 spin_lock_irqsave(&mapping->lock, flags); 1043 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, 1044 count, align); 1045 if (start > mapping->bits) { 1046 spin_unlock_irqrestore(&mapping->lock, flags); 1047 return DMA_ERROR_CODE; 1048 } 1049 1050 bitmap_set(mapping->bitmap, start, count); 1051 spin_unlock_irqrestore(&mapping->lock, flags); 1052 1053 return mapping->base + (start << (mapping->order + PAGE_SHIFT)); 1054 } 1055 1056 static inline void __free_iova(struct dma_iommu_mapping *mapping, 1057 dma_addr_t addr, size_t size) 1058 { 1059 unsigned int start = (addr - mapping->base) >> 1060 (mapping->order + PAGE_SHIFT); 1061 unsigned int count = ((size >> PAGE_SHIFT) + 1062 (1 << mapping->order) - 1) >> mapping->order; 1063 unsigned long flags; 1064 1065 spin_lock_irqsave(&mapping->lock, flags); 1066 bitmap_clear(mapping->bitmap, start, count); 1067 spin_unlock_irqrestore(&mapping->lock, flags); 1068 } 1069 1070 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 1071 gfp_t gfp, struct dma_attrs *attrs) 1072 { 1073 struct page **pages; 1074 int count = size >> PAGE_SHIFT; 1075 int array_size = count * sizeof(struct page *); 1076 int i = 0; 1077 1078 if (array_size <= PAGE_SIZE) 1079 pages = kzalloc(array_size, gfp); 1080 else 1081 pages = vzalloc(array_size); 1082 if (!pages) 1083 return NULL; 1084 1085 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) 1086 { 1087 unsigned long order = get_order(size); 1088 struct page *page; 1089 1090 page = dma_alloc_from_contiguous(dev, count, order); 1091 if (!page) 1092 goto error; 1093 1094 __dma_clear_buffer(page, size); 1095 1096 for (i = 0; i < count; i++) 1097 pages[i] = page + i; 1098 1099 return pages; 1100 } 1101 1102 /* 1103 * IOMMU can map any pages, so himem can also be used here 1104 */ 1105 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 1106 1107 while (count) { 1108 int j, order = __fls(count); 1109 1110 pages[i] = alloc_pages(gfp, order); 1111 while (!pages[i] && order) 1112 pages[i] = alloc_pages(gfp, --order); 1113 if (!pages[i]) 1114 goto error; 1115 1116 if (order) { 1117 split_page(pages[i], order); 1118 j = 1 << order; 1119 while (--j) 1120 pages[i + j] = pages[i] + j; 1121 } 1122 1123 __dma_clear_buffer(pages[i], PAGE_SIZE << order); 1124 i += 1 << order; 1125 count -= 1 << order; 1126 } 1127 1128 return pages; 1129 error: 1130 while (i--) 1131 if (pages[i]) 1132 __free_pages(pages[i], 0); 1133 if (array_size <= PAGE_SIZE) 1134 kfree(pages); 1135 else 1136 vfree(pages); 1137 return NULL; 1138 } 1139 1140 static int __iommu_free_buffer(struct device *dev, struct page **pages, 1141 size_t size, struct dma_attrs *attrs) 1142 { 1143 int count = size >> PAGE_SHIFT; 1144 int array_size = count * sizeof(struct page *); 1145 int i; 1146 1147 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { 1148 dma_release_from_contiguous(dev, pages[0], count); 1149 } else { 1150 for (i = 0; i < count; i++) 1151 if (pages[i]) 1152 __free_pages(pages[i], 0); 1153 } 1154 1155 if (array_size <= PAGE_SIZE) 1156 kfree(pages); 1157 else 1158 vfree(pages); 1159 return 0; 1160 } 1161 1162 /* 1163 * Create a CPU mapping for a specified pages 1164 */ 1165 static void * 1166 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, 1167 const void *caller) 1168 { 1169 unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1170 struct vm_struct *area; 1171 unsigned long p; 1172 1173 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, 1174 caller); 1175 if (!area) 1176 return NULL; 1177 1178 area->pages = pages; 1179 area->nr_pages = nr_pages; 1180 p = (unsigned long)area->addr; 1181 1182 for (i = 0; i < nr_pages; i++) { 1183 phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); 1184 if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) 1185 goto err; 1186 p += PAGE_SIZE; 1187 } 1188 return area->addr; 1189 err: 1190 unmap_kernel_range((unsigned long)area->addr, size); 1191 vunmap(area->addr); 1192 return NULL; 1193 } 1194 1195 /* 1196 * Create a mapping in device IO address space for specified pages 1197 */ 1198 static dma_addr_t 1199 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) 1200 { 1201 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1202 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1203 dma_addr_t dma_addr, iova; 1204 int i, ret = DMA_ERROR_CODE; 1205 1206 dma_addr = __alloc_iova(mapping, size); 1207 if (dma_addr == DMA_ERROR_CODE) 1208 return dma_addr; 1209 1210 iova = dma_addr; 1211 for (i = 0; i < count; ) { 1212 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 1213 phys_addr_t phys = page_to_phys(pages[i]); 1214 unsigned int len, j; 1215 1216 for (j = i + 1; j < count; j++, next_pfn++) 1217 if (page_to_pfn(pages[j]) != next_pfn) 1218 break; 1219 1220 len = (j - i) << PAGE_SHIFT; 1221 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1222 if (ret < 0) 1223 goto fail; 1224 iova += len; 1225 i = j; 1226 } 1227 return dma_addr; 1228 fail: 1229 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 1230 __free_iova(mapping, dma_addr, size); 1231 return DMA_ERROR_CODE; 1232 } 1233 1234 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 1235 { 1236 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1237 1238 /* 1239 * add optional in-page offset from iova to size and align 1240 * result to page size 1241 */ 1242 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 1243 iova &= PAGE_MASK; 1244 1245 iommu_unmap(mapping->domain, iova, size); 1246 __free_iova(mapping, iova, size); 1247 return 0; 1248 } 1249 1250 static struct page **__atomic_get_pages(void *addr) 1251 { 1252 struct dma_pool *pool = &atomic_pool; 1253 struct page **pages = pool->pages; 1254 int offs = (addr - pool->vaddr) >> PAGE_SHIFT; 1255 1256 return pages + offs; 1257 } 1258 1259 static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) 1260 { 1261 struct vm_struct *area; 1262 1263 if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1264 return __atomic_get_pages(cpu_addr); 1265 1266 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1267 return cpu_addr; 1268 1269 area = find_vm_area(cpu_addr); 1270 if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) 1271 return area->pages; 1272 return NULL; 1273 } 1274 1275 static void *__iommu_alloc_atomic(struct device *dev, size_t size, 1276 dma_addr_t *handle) 1277 { 1278 struct page *page; 1279 void *addr; 1280 1281 addr = __alloc_from_pool(size, &page); 1282 if (!addr) 1283 return NULL; 1284 1285 *handle = __iommu_create_mapping(dev, &page, size); 1286 if (*handle == DMA_ERROR_CODE) 1287 goto err_mapping; 1288 1289 return addr; 1290 1291 err_mapping: 1292 __free_from_pool(addr, size); 1293 return NULL; 1294 } 1295 1296 static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1297 dma_addr_t handle, size_t size) 1298 { 1299 __iommu_remove_mapping(dev, handle, size); 1300 __free_from_pool(cpu_addr, size); 1301 } 1302 1303 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1304 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) 1305 { 1306 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); 1307 struct page **pages; 1308 void *addr = NULL; 1309 1310 *handle = DMA_ERROR_CODE; 1311 size = PAGE_ALIGN(size); 1312 1313 if (gfp & GFP_ATOMIC) 1314 return __iommu_alloc_atomic(dev, size, handle); 1315 1316 pages = __iommu_alloc_buffer(dev, size, gfp, attrs); 1317 if (!pages) 1318 return NULL; 1319 1320 *handle = __iommu_create_mapping(dev, pages, size); 1321 if (*handle == DMA_ERROR_CODE) 1322 goto err_buffer; 1323 1324 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) 1325 return pages; 1326 1327 addr = __iommu_alloc_remap(pages, size, gfp, prot, 1328 __builtin_return_address(0)); 1329 if (!addr) 1330 goto err_mapping; 1331 1332 return addr; 1333 1334 err_mapping: 1335 __iommu_remove_mapping(dev, *handle, size); 1336 err_buffer: 1337 __iommu_free_buffer(dev, pages, size, attrs); 1338 return NULL; 1339 } 1340 1341 static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 1342 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1343 struct dma_attrs *attrs) 1344 { 1345 unsigned long uaddr = vma->vm_start; 1346 unsigned long usize = vma->vm_end - vma->vm_start; 1347 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1348 1349 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1350 1351 if (!pages) 1352 return -ENXIO; 1353 1354 do { 1355 int ret = vm_insert_page(vma, uaddr, *pages++); 1356 if (ret) { 1357 pr_err("Remapping memory failed: %d\n", ret); 1358 return ret; 1359 } 1360 uaddr += PAGE_SIZE; 1361 usize -= PAGE_SIZE; 1362 } while (usize > 0); 1363 1364 return 0; 1365 } 1366 1367 /* 1368 * free a page as defined by the above mapping. 1369 * Must not be called with IRQs disabled. 1370 */ 1371 void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1372 dma_addr_t handle, struct dma_attrs *attrs) 1373 { 1374 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1375 size = PAGE_ALIGN(size); 1376 1377 if (!pages) { 1378 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1379 return; 1380 } 1381 1382 if (__in_atomic_pool(cpu_addr, size)) { 1383 __iommu_free_atomic(dev, cpu_addr, handle, size); 1384 return; 1385 } 1386 1387 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { 1388 unmap_kernel_range((unsigned long)cpu_addr, size); 1389 vunmap(cpu_addr); 1390 } 1391 1392 __iommu_remove_mapping(dev, handle, size); 1393 __iommu_free_buffer(dev, pages, size, attrs); 1394 } 1395 1396 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1397 void *cpu_addr, dma_addr_t dma_addr, 1398 size_t size, struct dma_attrs *attrs) 1399 { 1400 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1401 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1402 1403 if (!pages) 1404 return -ENXIO; 1405 1406 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1407 GFP_KERNEL); 1408 } 1409 1410 /* 1411 * Map a part of the scatter-gather list into contiguous io address space 1412 */ 1413 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1414 size_t size, dma_addr_t *handle, 1415 enum dma_data_direction dir, struct dma_attrs *attrs, 1416 bool is_coherent) 1417 { 1418 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1419 dma_addr_t iova, iova_base; 1420 int ret = 0; 1421 unsigned int count; 1422 struct scatterlist *s; 1423 1424 size = PAGE_ALIGN(size); 1425 *handle = DMA_ERROR_CODE; 1426 1427 iova_base = iova = __alloc_iova(mapping, size); 1428 if (iova == DMA_ERROR_CODE) 1429 return -ENOMEM; 1430 1431 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1432 phys_addr_t phys = page_to_phys(sg_page(s)); 1433 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1434 1435 if (!is_coherent && 1436 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1437 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1438 1439 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1440 if (ret < 0) 1441 goto fail; 1442 count += len >> PAGE_SHIFT; 1443 iova += len; 1444 } 1445 *handle = iova_base; 1446 1447 return 0; 1448 fail: 1449 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 1450 __free_iova(mapping, iova_base, size); 1451 return ret; 1452 } 1453 1454 static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 1455 enum dma_data_direction dir, struct dma_attrs *attrs, 1456 bool is_coherent) 1457 { 1458 struct scatterlist *s = sg, *dma = sg, *start = sg; 1459 int i, count = 0; 1460 unsigned int offset = s->offset; 1461 unsigned int size = s->offset + s->length; 1462 unsigned int max = dma_get_max_seg_size(dev); 1463 1464 for (i = 1; i < nents; i++) { 1465 s = sg_next(s); 1466 1467 s->dma_address = DMA_ERROR_CODE; 1468 s->dma_length = 0; 1469 1470 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1471 if (__map_sg_chunk(dev, start, size, &dma->dma_address, 1472 dir, attrs, is_coherent) < 0) 1473 goto bad_mapping; 1474 1475 dma->dma_address += offset; 1476 dma->dma_length = size - offset; 1477 1478 size = offset = s->offset; 1479 start = s; 1480 dma = sg_next(dma); 1481 count += 1; 1482 } 1483 size += s->length; 1484 } 1485 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 1486 is_coherent) < 0) 1487 goto bad_mapping; 1488 1489 dma->dma_address += offset; 1490 dma->dma_length = size - offset; 1491 1492 return count+1; 1493 1494 bad_mapping: 1495 for_each_sg(sg, s, count, i) 1496 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 1497 return 0; 1498 } 1499 1500 /** 1501 * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1502 * @dev: valid struct device pointer 1503 * @sg: list of buffers 1504 * @nents: number of buffers to map 1505 * @dir: DMA transfer direction 1506 * 1507 * Map a set of i/o coherent buffers described by scatterlist in streaming 1508 * mode for DMA. The scatter gather list elements are merged together (if 1509 * possible) and tagged with the appropriate dma address and length. They are 1510 * obtained via sg_dma_{address,length}. 1511 */ 1512 int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1513 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1514 { 1515 return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 1516 } 1517 1518 /** 1519 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1520 * @dev: valid struct device pointer 1521 * @sg: list of buffers 1522 * @nents: number of buffers to map 1523 * @dir: DMA transfer direction 1524 * 1525 * Map a set of buffers described by scatterlist in streaming mode for DMA. 1526 * The scatter gather list elements are merged together (if possible) and 1527 * tagged with the appropriate dma address and length. They are obtained via 1528 * sg_dma_{address,length}. 1529 */ 1530 int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1531 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1532 { 1533 return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 1534 } 1535 1536 static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1537 int nents, enum dma_data_direction dir, struct dma_attrs *attrs, 1538 bool is_coherent) 1539 { 1540 struct scatterlist *s; 1541 int i; 1542 1543 for_each_sg(sg, s, nents, i) { 1544 if (sg_dma_len(s)) 1545 __iommu_remove_mapping(dev, sg_dma_address(s), 1546 sg_dma_len(s)); 1547 if (!is_coherent && 1548 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1549 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1550 s->length, dir); 1551 } 1552 } 1553 1554 /** 1555 * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1556 * @dev: valid struct device pointer 1557 * @sg: list of buffers 1558 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1559 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1560 * 1561 * Unmap a set of streaming mode DMA translations. Again, CPU access 1562 * rules concerning calls here are the same as for dma_unmap_single(). 1563 */ 1564 void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 1565 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 1566 { 1567 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 1568 } 1569 1570 /** 1571 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1572 * @dev: valid struct device pointer 1573 * @sg: list of buffers 1574 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1575 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1576 * 1577 * Unmap a set of streaming mode DMA translations. Again, CPU access 1578 * rules concerning calls here are the same as for dma_unmap_single(). 1579 */ 1580 void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 1581 enum dma_data_direction dir, struct dma_attrs *attrs) 1582 { 1583 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 1584 } 1585 1586 /** 1587 * arm_iommu_sync_sg_for_cpu 1588 * @dev: valid struct device pointer 1589 * @sg: list of buffers 1590 * @nents: number of buffers to map (returned from dma_map_sg) 1591 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1592 */ 1593 void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 1594 int nents, enum dma_data_direction dir) 1595 { 1596 struct scatterlist *s; 1597 int i; 1598 1599 for_each_sg(sg, s, nents, i) 1600 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 1601 1602 } 1603 1604 /** 1605 * arm_iommu_sync_sg_for_device 1606 * @dev: valid struct device pointer 1607 * @sg: list of buffers 1608 * @nents: number of buffers to map (returned from dma_map_sg) 1609 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1610 */ 1611 void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 1612 int nents, enum dma_data_direction dir) 1613 { 1614 struct scatterlist *s; 1615 int i; 1616 1617 for_each_sg(sg, s, nents, i) 1618 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1619 } 1620 1621 1622 /** 1623 * arm_coherent_iommu_map_page 1624 * @dev: valid struct device pointer 1625 * @page: page that buffer resides in 1626 * @offset: offset into page for start of buffer 1627 * @size: size of buffer to map 1628 * @dir: DMA transfer direction 1629 * 1630 * Coherent IOMMU aware version of arm_dma_map_page() 1631 */ 1632 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 1633 unsigned long offset, size_t size, enum dma_data_direction dir, 1634 struct dma_attrs *attrs) 1635 { 1636 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1637 dma_addr_t dma_addr; 1638 int ret, len = PAGE_ALIGN(size + offset); 1639 1640 dma_addr = __alloc_iova(mapping, len); 1641 if (dma_addr == DMA_ERROR_CODE) 1642 return dma_addr; 1643 1644 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); 1645 if (ret < 0) 1646 goto fail; 1647 1648 return dma_addr + offset; 1649 fail: 1650 __free_iova(mapping, dma_addr, len); 1651 return DMA_ERROR_CODE; 1652 } 1653 1654 /** 1655 * arm_iommu_map_page 1656 * @dev: valid struct device pointer 1657 * @page: page that buffer resides in 1658 * @offset: offset into page for start of buffer 1659 * @size: size of buffer to map 1660 * @dir: DMA transfer direction 1661 * 1662 * IOMMU aware version of arm_dma_map_page() 1663 */ 1664 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 1665 unsigned long offset, size_t size, enum dma_data_direction dir, 1666 struct dma_attrs *attrs) 1667 { 1668 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1669 __dma_page_cpu_to_dev(page, offset, size, dir); 1670 1671 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 1672 } 1673 1674 /** 1675 * arm_coherent_iommu_unmap_page 1676 * @dev: valid struct device pointer 1677 * @handle: DMA address of buffer 1678 * @size: size of buffer (same as passed to dma_map_page) 1679 * @dir: DMA transfer direction (same as passed to dma_map_page) 1680 * 1681 * Coherent IOMMU aware version of arm_dma_unmap_page() 1682 */ 1683 static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1684 size_t size, enum dma_data_direction dir, 1685 struct dma_attrs *attrs) 1686 { 1687 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1688 dma_addr_t iova = handle & PAGE_MASK; 1689 int offset = handle & ~PAGE_MASK; 1690 int len = PAGE_ALIGN(size + offset); 1691 1692 if (!iova) 1693 return; 1694 1695 iommu_unmap(mapping->domain, iova, len); 1696 __free_iova(mapping, iova, len); 1697 } 1698 1699 /** 1700 * arm_iommu_unmap_page 1701 * @dev: valid struct device pointer 1702 * @handle: DMA address of buffer 1703 * @size: size of buffer (same as passed to dma_map_page) 1704 * @dir: DMA transfer direction (same as passed to dma_map_page) 1705 * 1706 * IOMMU aware version of arm_dma_unmap_page() 1707 */ 1708 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1709 size_t size, enum dma_data_direction dir, 1710 struct dma_attrs *attrs) 1711 { 1712 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1713 dma_addr_t iova = handle & PAGE_MASK; 1714 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1715 int offset = handle & ~PAGE_MASK; 1716 int len = PAGE_ALIGN(size + offset); 1717 1718 if (!iova) 1719 return; 1720 1721 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1722 __dma_page_dev_to_cpu(page, offset, size, dir); 1723 1724 iommu_unmap(mapping->domain, iova, len); 1725 __free_iova(mapping, iova, len); 1726 } 1727 1728 static void arm_iommu_sync_single_for_cpu(struct device *dev, 1729 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1730 { 1731 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1732 dma_addr_t iova = handle & PAGE_MASK; 1733 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1734 unsigned int offset = handle & ~PAGE_MASK; 1735 1736 if (!iova) 1737 return; 1738 1739 __dma_page_dev_to_cpu(page, offset, size, dir); 1740 } 1741 1742 static void arm_iommu_sync_single_for_device(struct device *dev, 1743 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1744 { 1745 struct dma_iommu_mapping *mapping = dev->archdata.mapping; 1746 dma_addr_t iova = handle & PAGE_MASK; 1747 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1748 unsigned int offset = handle & ~PAGE_MASK; 1749 1750 if (!iova) 1751 return; 1752 1753 __dma_page_cpu_to_dev(page, offset, size, dir); 1754 } 1755 1756 struct dma_map_ops iommu_ops = { 1757 .alloc = arm_iommu_alloc_attrs, 1758 .free = arm_iommu_free_attrs, 1759 .mmap = arm_iommu_mmap_attrs, 1760 .get_sgtable = arm_iommu_get_sgtable, 1761 1762 .map_page = arm_iommu_map_page, 1763 .unmap_page = arm_iommu_unmap_page, 1764 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 1765 .sync_single_for_device = arm_iommu_sync_single_for_device, 1766 1767 .map_sg = arm_iommu_map_sg, 1768 .unmap_sg = arm_iommu_unmap_sg, 1769 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 1770 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1771 1772 .set_dma_mask = arm_dma_set_mask, 1773 }; 1774 1775 struct dma_map_ops iommu_coherent_ops = { 1776 .alloc = arm_iommu_alloc_attrs, 1777 .free = arm_iommu_free_attrs, 1778 .mmap = arm_iommu_mmap_attrs, 1779 .get_sgtable = arm_iommu_get_sgtable, 1780 1781 .map_page = arm_coherent_iommu_map_page, 1782 .unmap_page = arm_coherent_iommu_unmap_page, 1783 1784 .map_sg = arm_coherent_iommu_map_sg, 1785 .unmap_sg = arm_coherent_iommu_unmap_sg, 1786 1787 .set_dma_mask = arm_dma_set_mask, 1788 }; 1789 1790 /** 1791 * arm_iommu_create_mapping 1792 * @bus: pointer to the bus holding the client device (for IOMMU calls) 1793 * @base: start address of the valid IO address space 1794 * @size: size of the valid IO address space 1795 * @order: accuracy of the IO addresses allocations 1796 * 1797 * Creates a mapping structure which holds information about used/unused 1798 * IO address ranges, which is required to perform memory allocation and 1799 * mapping with IOMMU aware functions. 1800 * 1801 * The client device need to be attached to the mapping with 1802 * arm_iommu_attach_device function. 1803 */ 1804 struct dma_iommu_mapping * 1805 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, 1806 int order) 1807 { 1808 unsigned int count = size >> (PAGE_SHIFT + order); 1809 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); 1810 struct dma_iommu_mapping *mapping; 1811 int err = -ENOMEM; 1812 1813 if (!count) 1814 return ERR_PTR(-EINVAL); 1815 1816 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 1817 if (!mapping) 1818 goto err; 1819 1820 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 1821 if (!mapping->bitmap) 1822 goto err2; 1823 1824 mapping->base = base; 1825 mapping->bits = BITS_PER_BYTE * bitmap_size; 1826 mapping->order = order; 1827 spin_lock_init(&mapping->lock); 1828 1829 mapping->domain = iommu_domain_alloc(bus); 1830 if (!mapping->domain) 1831 goto err3; 1832 1833 kref_init(&mapping->kref); 1834 return mapping; 1835 err3: 1836 kfree(mapping->bitmap); 1837 err2: 1838 kfree(mapping); 1839 err: 1840 return ERR_PTR(err); 1841 } 1842 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 1843 1844 static void release_iommu_mapping(struct kref *kref) 1845 { 1846 struct dma_iommu_mapping *mapping = 1847 container_of(kref, struct dma_iommu_mapping, kref); 1848 1849 iommu_domain_free(mapping->domain); 1850 kfree(mapping->bitmap); 1851 kfree(mapping); 1852 } 1853 1854 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 1855 { 1856 if (mapping) 1857 kref_put(&mapping->kref, release_iommu_mapping); 1858 } 1859 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 1860 1861 /** 1862 * arm_iommu_attach_device 1863 * @dev: valid struct device pointer 1864 * @mapping: io address space mapping structure (returned from 1865 * arm_iommu_create_mapping) 1866 * 1867 * Attaches specified io address space mapping to the provided device, 1868 * this replaces the dma operations (dma_map_ops pointer) with the 1869 * IOMMU aware version. More than one client might be attached to 1870 * the same io address space mapping. 1871 */ 1872 int arm_iommu_attach_device(struct device *dev, 1873 struct dma_iommu_mapping *mapping) 1874 { 1875 int err; 1876 1877 err = iommu_attach_device(mapping->domain, dev); 1878 if (err) 1879 return err; 1880 1881 kref_get(&mapping->kref); 1882 dev->archdata.mapping = mapping; 1883 set_dma_ops(dev, &iommu_ops); 1884 1885 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 1886 return 0; 1887 } 1888 EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 1889 1890 /** 1891 * arm_iommu_detach_device 1892 * @dev: valid struct device pointer 1893 * 1894 * Detaches the provided device from a previously attached map. 1895 * This voids the dma operations (dma_map_ops pointer) 1896 */ 1897 void arm_iommu_detach_device(struct device *dev) 1898 { 1899 struct dma_iommu_mapping *mapping; 1900 1901 mapping = to_dma_iommu_mapping(dev); 1902 if (!mapping) { 1903 dev_warn(dev, "Not attached\n"); 1904 return; 1905 } 1906 1907 iommu_detach_device(mapping->domain, dev); 1908 kref_put(&mapping->kref, release_iommu_mapping); 1909 mapping = NULL; 1910 set_dma_ops(dev, NULL); 1911 1912 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 1913 } 1914 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 1915 1916 #endif 1917