xref: /linux/arch/arm/mm/dma-mapping.c (revision 98f4a2c27c76e7eaf75c2f3f25487fabca62ef3d)
1 /*
2  *  linux/arch/arm/mm/dma-mapping.c
3  *
4  *  Copyright (C) 2000-2004 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  DMA uncached mapping support.
11  */
12 #include <linux/module.h>
13 #include <linux/mm.h>
14 #include <linux/gfp.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/highmem.h>
21 
22 #include <asm/memory.h>
23 #include <asm/highmem.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
26 #include <asm/sizes.h>
27 
28 static u64 get_coherent_dma_mask(struct device *dev)
29 {
30 	u64 mask = ISA_DMA_THRESHOLD;
31 
32 	if (dev) {
33 		mask = dev->coherent_dma_mask;
34 
35 		/*
36 		 * Sanity check the DMA mask - it must be non-zero, and
37 		 * must be able to be satisfied by a DMA allocation.
38 		 */
39 		if (mask == 0) {
40 			dev_warn(dev, "coherent DMA mask is unset\n");
41 			return 0;
42 		}
43 
44 		if ((~mask) & ISA_DMA_THRESHOLD) {
45 			dev_warn(dev, "coherent DMA mask %#llx is smaller "
46 				 "than system GFP_DMA mask %#llx\n",
47 				 mask, (unsigned long long)ISA_DMA_THRESHOLD);
48 			return 0;
49 		}
50 	}
51 
52 	return mask;
53 }
54 
55 /*
56  * Allocate a DMA buffer for 'dev' of size 'size' using the
57  * specified gfp mask.  Note that 'size' must be page aligned.
58  */
59 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
60 {
61 	unsigned long order = get_order(size);
62 	struct page *page, *p, *e;
63 	void *ptr;
64 	u64 mask = get_coherent_dma_mask(dev);
65 
66 #ifdef CONFIG_DMA_API_DEBUG
67 	u64 limit = (mask + 1) & ~mask;
68 	if (limit && size >= limit) {
69 		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
70 			size, mask);
71 		return NULL;
72 	}
73 #endif
74 
75 	if (!mask)
76 		return NULL;
77 
78 	if (mask < 0xffffffffULL)
79 		gfp |= GFP_DMA;
80 
81 	page = alloc_pages(gfp, order);
82 	if (!page)
83 		return NULL;
84 
85 	/*
86 	 * Now split the huge page and free the excess pages
87 	 */
88 	split_page(page, order);
89 	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
90 		__free_page(p);
91 
92 	/*
93 	 * Ensure that the allocated pages are zeroed, and that any data
94 	 * lurking in the kernel direct-mapped region is invalidated.
95 	 */
96 	ptr = page_address(page);
97 	memset(ptr, 0, size);
98 	dmac_flush_range(ptr, ptr + size);
99 	outer_flush_range(__pa(ptr), __pa(ptr) + size);
100 
101 	return page;
102 }
103 
104 /*
105  * Free a DMA buffer.  'size' must be page aligned.
106  */
107 static void __dma_free_buffer(struct page *page, size_t size)
108 {
109 	struct page *e = page + (size >> PAGE_SHIFT);
110 
111 	while (page < e) {
112 		__free_page(page);
113 		page++;
114 	}
115 }
116 
117 #ifdef CONFIG_MMU
118 /* Sanity check size */
119 #if (CONSISTENT_DMA_SIZE % SZ_2M)
120 #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
121 #endif
122 
123 #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
124 #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
125 #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
126 
127 /*
128  * These are the page tables (2MB each) covering uncached, DMA consistent allocations
129  */
130 static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
131 
132 #include "vmregion.h"
133 
134 static struct arm_vmregion_head consistent_head = {
135 	.vm_lock	= __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
136 	.vm_list	= LIST_HEAD_INIT(consistent_head.vm_list),
137 	.vm_start	= CONSISTENT_BASE,
138 	.vm_end		= CONSISTENT_END,
139 };
140 
141 #ifdef CONFIG_HUGETLB_PAGE
142 #error ARM Coherent DMA allocator does not (yet) support huge TLB
143 #endif
144 
145 /*
146  * Initialise the consistent memory allocation.
147  */
148 static int __init consistent_init(void)
149 {
150 	int ret = 0;
151 	pgd_t *pgd;
152 	pud_t *pud;
153 	pmd_t *pmd;
154 	pte_t *pte;
155 	int i = 0;
156 	u32 base = CONSISTENT_BASE;
157 
158 	do {
159 		pgd = pgd_offset(&init_mm, base);
160 
161 		pud = pud_alloc(&init_mm, pgd, base);
162 		if (!pud) {
163 			printk(KERN_ERR "%s: no pud tables\n", __func__);
164 			ret = -ENOMEM;
165 			break;
166 		}
167 
168 		pmd = pmd_alloc(&init_mm, pud, base);
169 		if (!pmd) {
170 			printk(KERN_ERR "%s: no pmd tables\n", __func__);
171 			ret = -ENOMEM;
172 			break;
173 		}
174 		WARN_ON(!pmd_none(*pmd));
175 
176 		pte = pte_alloc_kernel(pmd, base);
177 		if (!pte) {
178 			printk(KERN_ERR "%s: no pte tables\n", __func__);
179 			ret = -ENOMEM;
180 			break;
181 		}
182 
183 		consistent_pte[i++] = pte;
184 		base += (1 << PGDIR_SHIFT);
185 	} while (base < CONSISTENT_END);
186 
187 	return ret;
188 }
189 
190 core_initcall(consistent_init);
191 
192 static void *
193 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
194 {
195 	struct arm_vmregion *c;
196 	size_t align;
197 	int bit;
198 
199 	if (!consistent_pte[0]) {
200 		printk(KERN_ERR "%s: not initialised\n", __func__);
201 		dump_stack();
202 		return NULL;
203 	}
204 
205 	/*
206 	 * Align the virtual region allocation - maximum alignment is
207 	 * a section size, minimum is a page size.  This helps reduce
208 	 * fragmentation of the DMA space, and also prevents allocations
209 	 * smaller than a section from crossing a section boundary.
210 	 */
211 	bit = fls(size - 1);
212 	if (bit > SECTION_SHIFT)
213 		bit = SECTION_SHIFT;
214 	align = 1 << bit;
215 
216 	/*
217 	 * Allocate a virtual address in the consistent mapping region.
218 	 */
219 	c = arm_vmregion_alloc(&consistent_head, align, size,
220 			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
221 	if (c) {
222 		pte_t *pte;
223 		int idx = CONSISTENT_PTE_INDEX(c->vm_start);
224 		u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
225 
226 		pte = consistent_pte[idx] + off;
227 		c->vm_pages = page;
228 
229 		do {
230 			BUG_ON(!pte_none(*pte));
231 
232 			set_pte_ext(pte, mk_pte(page, prot), 0);
233 			page++;
234 			pte++;
235 			off++;
236 			if (off >= PTRS_PER_PTE) {
237 				off = 0;
238 				pte = consistent_pte[++idx];
239 			}
240 		} while (size -= PAGE_SIZE);
241 
242 		dsb();
243 
244 		return (void *)c->vm_start;
245 	}
246 	return NULL;
247 }
248 
249 static void __dma_free_remap(void *cpu_addr, size_t size)
250 {
251 	struct arm_vmregion *c;
252 	unsigned long addr;
253 	pte_t *ptep;
254 	int idx;
255 	u32 off;
256 
257 	c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
258 	if (!c) {
259 		printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
260 		       __func__, cpu_addr);
261 		dump_stack();
262 		return;
263 	}
264 
265 	if ((c->vm_end - c->vm_start) != size) {
266 		printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
267 		       __func__, c->vm_end - c->vm_start, size);
268 		dump_stack();
269 		size = c->vm_end - c->vm_start;
270 	}
271 
272 	idx = CONSISTENT_PTE_INDEX(c->vm_start);
273 	off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
274 	ptep = consistent_pte[idx] + off;
275 	addr = c->vm_start;
276 	do {
277 		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
278 
279 		ptep++;
280 		addr += PAGE_SIZE;
281 		off++;
282 		if (off >= PTRS_PER_PTE) {
283 			off = 0;
284 			ptep = consistent_pte[++idx];
285 		}
286 
287 		if (pte_none(pte) || !pte_present(pte))
288 			printk(KERN_CRIT "%s: bad page in kernel page table\n",
289 			       __func__);
290 	} while (size -= PAGE_SIZE);
291 
292 	flush_tlb_kernel_range(c->vm_start, c->vm_end);
293 
294 	arm_vmregion_free(&consistent_head, c);
295 }
296 
297 #else	/* !CONFIG_MMU */
298 
299 #define __dma_alloc_remap(page, size, gfp, prot)	page_address(page)
300 #define __dma_free_remap(addr, size)			do { } while (0)
301 
302 #endif	/* CONFIG_MMU */
303 
304 static void *
305 __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
306 	    pgprot_t prot)
307 {
308 	struct page *page;
309 	void *addr;
310 
311 	*handle = ~0;
312 	size = PAGE_ALIGN(size);
313 
314 	page = __dma_alloc_buffer(dev, size, gfp);
315 	if (!page)
316 		return NULL;
317 
318 	if (!arch_is_coherent())
319 		addr = __dma_alloc_remap(page, size, gfp, prot);
320 	else
321 		addr = page_address(page);
322 
323 	if (addr)
324 		*handle = pfn_to_dma(dev, page_to_pfn(page));
325 
326 	return addr;
327 }
328 
329 /*
330  * Allocate DMA-coherent memory space and return both the kernel remapped
331  * virtual and bus address for that space.
332  */
333 void *
334 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
335 {
336 	void *memory;
337 
338 	if (dma_alloc_from_coherent(dev, size, handle, &memory))
339 		return memory;
340 
341 	return __dma_alloc(dev, size, handle, gfp,
342 			   pgprot_dmacoherent(pgprot_kernel));
343 }
344 EXPORT_SYMBOL(dma_alloc_coherent);
345 
346 /*
347  * Allocate a writecombining region, in much the same way as
348  * dma_alloc_coherent above.
349  */
350 void *
351 dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
352 {
353 	return __dma_alloc(dev, size, handle, gfp,
354 			   pgprot_writecombine(pgprot_kernel));
355 }
356 EXPORT_SYMBOL(dma_alloc_writecombine);
357 
358 static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
359 		    void *cpu_addr, dma_addr_t dma_addr, size_t size)
360 {
361 	int ret = -ENXIO;
362 #ifdef CONFIG_MMU
363 	unsigned long user_size, kern_size;
364 	struct arm_vmregion *c;
365 
366 	user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
367 
368 	c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
369 	if (c) {
370 		unsigned long off = vma->vm_pgoff;
371 
372 		kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
373 
374 		if (off < kern_size &&
375 		    user_size <= (kern_size - off)) {
376 			ret = remap_pfn_range(vma, vma->vm_start,
377 					      page_to_pfn(c->vm_pages) + off,
378 					      user_size << PAGE_SHIFT,
379 					      vma->vm_page_prot);
380 		}
381 	}
382 #endif	/* CONFIG_MMU */
383 
384 	return ret;
385 }
386 
387 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
388 		      void *cpu_addr, dma_addr_t dma_addr, size_t size)
389 {
390 	vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
391 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
392 }
393 EXPORT_SYMBOL(dma_mmap_coherent);
394 
395 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
396 			  void *cpu_addr, dma_addr_t dma_addr, size_t size)
397 {
398 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
399 	return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
400 }
401 EXPORT_SYMBOL(dma_mmap_writecombine);
402 
403 /*
404  * free a page as defined by the above mapping.
405  * Must not be called with IRQs disabled.
406  */
407 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
408 {
409 	WARN_ON(irqs_disabled());
410 
411 	if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
412 		return;
413 
414 	size = PAGE_ALIGN(size);
415 
416 	if (!arch_is_coherent())
417 		__dma_free_remap(cpu_addr, size);
418 
419 	__dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
420 }
421 EXPORT_SYMBOL(dma_free_coherent);
422 
423 /*
424  * Make an area consistent for devices.
425  * Note: Drivers should NOT use this function directly, as it will break
426  * platforms with CONFIG_DMABOUNCE.
427  * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
428  */
429 void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
430 	enum dma_data_direction dir)
431 {
432 	unsigned long paddr;
433 
434 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
435 
436 	dmac_map_area(kaddr, size, dir);
437 
438 	paddr = __pa(kaddr);
439 	if (dir == DMA_FROM_DEVICE) {
440 		outer_inv_range(paddr, paddr + size);
441 	} else {
442 		outer_clean_range(paddr, paddr + size);
443 	}
444 	/* FIXME: non-speculating: flush on bidirectional mappings? */
445 }
446 EXPORT_SYMBOL(___dma_single_cpu_to_dev);
447 
448 void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
449 	enum dma_data_direction dir)
450 {
451 	BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
452 
453 	/* FIXME: non-speculating: not required */
454 	/* don't bother invalidating if DMA to device */
455 	if (dir != DMA_TO_DEVICE) {
456 		unsigned long paddr = __pa(kaddr);
457 		outer_inv_range(paddr, paddr + size);
458 	}
459 
460 	dmac_unmap_area(kaddr, size, dir);
461 }
462 EXPORT_SYMBOL(___dma_single_dev_to_cpu);
463 
464 static void dma_cache_maint_page(struct page *page, unsigned long offset,
465 	size_t size, enum dma_data_direction dir,
466 	void (*op)(const void *, size_t, int))
467 {
468 	/*
469 	 * A single sg entry may refer to multiple physically contiguous
470 	 * pages.  But we still need to process highmem pages individually.
471 	 * If highmem is not configured then the bulk of this loop gets
472 	 * optimized out.
473 	 */
474 	size_t left = size;
475 	do {
476 		size_t len = left;
477 		void *vaddr;
478 
479 		if (PageHighMem(page)) {
480 			if (len + offset > PAGE_SIZE) {
481 				if (offset >= PAGE_SIZE) {
482 					page += offset / PAGE_SIZE;
483 					offset %= PAGE_SIZE;
484 				}
485 				len = PAGE_SIZE - offset;
486 			}
487 			vaddr = kmap_high_get(page);
488 			if (vaddr) {
489 				vaddr += offset;
490 				op(vaddr, len, dir);
491 				kunmap_high(page);
492 			} else if (cache_is_vipt()) {
493 				/* unmapped pages might still be cached */
494 				vaddr = kmap_atomic(page);
495 				op(vaddr + offset, len, dir);
496 				kunmap_atomic(vaddr);
497 			}
498 		} else {
499 			vaddr = page_address(page) + offset;
500 			op(vaddr, len, dir);
501 		}
502 		offset = 0;
503 		page++;
504 		left -= len;
505 	} while (left);
506 }
507 
508 void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
509 	size_t size, enum dma_data_direction dir)
510 {
511 	unsigned long paddr;
512 
513 	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
514 
515 	paddr = page_to_phys(page) + off;
516 	if (dir == DMA_FROM_DEVICE) {
517 		outer_inv_range(paddr, paddr + size);
518 	} else {
519 		outer_clean_range(paddr, paddr + size);
520 	}
521 	/* FIXME: non-speculating: flush on bidirectional mappings? */
522 }
523 EXPORT_SYMBOL(___dma_page_cpu_to_dev);
524 
525 void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
526 	size_t size, enum dma_data_direction dir)
527 {
528 	unsigned long paddr = page_to_phys(page) + off;
529 
530 	/* FIXME: non-speculating: not required */
531 	/* don't bother invalidating if DMA to device */
532 	if (dir != DMA_TO_DEVICE)
533 		outer_inv_range(paddr, paddr + size);
534 
535 	dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
536 
537 	/*
538 	 * Mark the D-cache clean for this page to avoid extra flushing.
539 	 */
540 	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
541 		set_bit(PG_dcache_clean, &page->flags);
542 }
543 EXPORT_SYMBOL(___dma_page_dev_to_cpu);
544 
545 /**
546  * dma_map_sg - map a set of SG buffers for streaming mode DMA
547  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
548  * @sg: list of buffers
549  * @nents: number of buffers to map
550  * @dir: DMA transfer direction
551  *
552  * Map a set of buffers described by scatterlist in streaming mode for DMA.
553  * This is the scatter-gather version of the dma_map_single interface.
554  * Here the scatter gather list elements are each tagged with the
555  * appropriate dma address and length.  They are obtained via
556  * sg_dma_{address,length}.
557  *
558  * Device ownership issues as mentioned for dma_map_single are the same
559  * here.
560  */
561 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
562 		enum dma_data_direction dir)
563 {
564 	struct scatterlist *s;
565 	int i, j;
566 
567 	BUG_ON(!valid_dma_direction(dir));
568 
569 	for_each_sg(sg, s, nents, i) {
570 		s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
571 						s->length, dir);
572 		if (dma_mapping_error(dev, s->dma_address))
573 			goto bad_mapping;
574 	}
575 	debug_dma_map_sg(dev, sg, nents, nents, dir);
576 	return nents;
577 
578  bad_mapping:
579 	for_each_sg(sg, s, i, j)
580 		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
581 	return 0;
582 }
583 EXPORT_SYMBOL(dma_map_sg);
584 
585 /**
586  * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
587  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
588  * @sg: list of buffers
589  * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
590  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
591  *
592  * Unmap a set of streaming mode DMA translations.  Again, CPU access
593  * rules concerning calls here are the same as for dma_unmap_single().
594  */
595 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
596 		enum dma_data_direction dir)
597 {
598 	struct scatterlist *s;
599 	int i;
600 
601 	debug_dma_unmap_sg(dev, sg, nents, dir);
602 
603 	for_each_sg(sg, s, nents, i)
604 		__dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
605 }
606 EXPORT_SYMBOL(dma_unmap_sg);
607 
608 /**
609  * dma_sync_sg_for_cpu
610  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
611  * @sg: list of buffers
612  * @nents: number of buffers to map (returned from dma_map_sg)
613  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
614  */
615 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
616 			int nents, enum dma_data_direction dir)
617 {
618 	struct scatterlist *s;
619 	int i;
620 
621 	for_each_sg(sg, s, nents, i) {
622 		if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
623 					    sg_dma_len(s), dir))
624 			continue;
625 
626 		__dma_page_dev_to_cpu(sg_page(s), s->offset,
627 				      s->length, dir);
628 	}
629 
630 	debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
631 }
632 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
633 
634 /**
635  * dma_sync_sg_for_device
636  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
637  * @sg: list of buffers
638  * @nents: number of buffers to map (returned from dma_map_sg)
639  * @dir: DMA transfer direction (same as was passed to dma_map_sg)
640  */
641 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
642 			int nents, enum dma_data_direction dir)
643 {
644 	struct scatterlist *s;
645 	int i;
646 
647 	for_each_sg(sg, s, nents, i) {
648 		if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
649 					sg_dma_len(s), dir))
650 			continue;
651 
652 		__dma_page_cpu_to_dev(sg_page(s), s->offset,
653 				      s->length, dir);
654 	}
655 
656 	debug_dma_sync_sg_for_device(dev, sg, nents, dir);
657 }
658 EXPORT_SYMBOL(dma_sync_sg_for_device);
659 
660 #define PREALLOC_DMA_DEBUG_ENTRIES	4096
661 
662 static int __init dma_debug_do_init(void)
663 {
664 	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
665 	return 0;
666 }
667 fs_initcall(dma_debug_do_init);
668