xref: /linux/arch/powerpc/kernel/iommu.c (revision 6b2d2cec1081a979e0efd6a1e9559e5a01a3c10e)
1 /*
2  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3  *
4  * Rewrite, cleanup, new allocation schemes, virtual merging:
5  * Copyright (C) 2004 Olof Johansson, IBM Corporation
6  *               and  Ben. Herrenschmidt, IBM Corporation
7  *
8  * Dynamic DMA mapping support, bus-independent parts.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23  */
24 
25 
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitops.h>
34 #include <asm/io.h>
35 #include <asm/prom.h>
36 #include <asm/iommu.h>
37 #include <asm/pci-bridge.h>
38 #include <asm/machdep.h>
39 #include <asm/kdump.h>
40 
41 #define DBG(...)
42 
43 #ifdef CONFIG_IOMMU_VMERGE
44 static int novmerge = 0;
45 #else
46 static int novmerge = 1;
47 #endif
48 
49 static int protect4gb = 1;
50 
51 static inline unsigned long iommu_num_pages(unsigned long vaddr,
52 					    unsigned long slen)
53 {
54 	unsigned long npages;
55 
56 	npages = IOMMU_PAGE_ALIGN(vaddr + slen) - (vaddr & IOMMU_PAGE_MASK);
57 	npages >>= IOMMU_PAGE_SHIFT;
58 
59 	return npages;
60 }
61 
62 static int __init setup_protect4gb(char *str)
63 {
64 	if (strcmp(str, "on") == 0)
65 		protect4gb = 1;
66 	else if (strcmp(str, "off") == 0)
67 		protect4gb = 0;
68 
69 	return 1;
70 }
71 
72 static int __init setup_iommu(char *str)
73 {
74 	if (!strcmp(str, "novmerge"))
75 		novmerge = 1;
76 	else if (!strcmp(str, "vmerge"))
77 		novmerge = 0;
78 	return 1;
79 }
80 
81 __setup("protect4gb=", setup_protect4gb);
82 __setup("iommu=", setup_iommu);
83 
84 static unsigned long iommu_range_alloc(struct iommu_table *tbl,
85                                        unsigned long npages,
86                                        unsigned long *handle,
87                                        unsigned long mask,
88                                        unsigned int align_order)
89 {
90 	unsigned long n, end, i, start;
91 	unsigned long limit;
92 	int largealloc = npages > 15;
93 	int pass = 0;
94 	unsigned long align_mask;
95 
96 	align_mask = 0xffffffffffffffffl >> (64 - align_order);
97 
98 	/* This allocator was derived from x86_64's bit string search */
99 
100 	/* Sanity check */
101 	if (unlikely(npages == 0)) {
102 		if (printk_ratelimit())
103 			WARN_ON(1);
104 		return DMA_ERROR_CODE;
105 	}
106 
107 	if (handle && *handle)
108 		start = *handle;
109 	else
110 		start = largealloc ? tbl->it_largehint : tbl->it_hint;
111 
112 	/* Use only half of the table for small allocs (15 pages or less) */
113 	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
114 
115 	if (largealloc && start < tbl->it_halfpoint)
116 		start = tbl->it_halfpoint;
117 
118 	/* The case below can happen if we have a small segment appended
119 	 * to a large, or when the previous alloc was at the very end of
120 	 * the available space. If so, go back to the initial start.
121 	 */
122 	if (start >= limit)
123 		start = largealloc ? tbl->it_largehint : tbl->it_hint;
124 
125  again:
126 
127 	if (limit + tbl->it_offset > mask) {
128 		limit = mask - tbl->it_offset + 1;
129 		/* If we're constrained on address range, first try
130 		 * at the masked hint to avoid O(n) search complexity,
131 		 * but on second pass, start at 0.
132 		 */
133 		if ((start & mask) >= limit || pass > 0)
134 			start = 0;
135 		else
136 			start &= mask;
137 	}
138 
139 	n = find_next_zero_bit(tbl->it_map, limit, start);
140 
141 	/* Align allocation */
142 	n = (n + align_mask) & ~align_mask;
143 
144 	end = n + npages;
145 
146 	if (unlikely(end >= limit)) {
147 		if (likely(pass < 2)) {
148 			/* First failure, just rescan the half of the table.
149 			 * Second failure, rescan the other half of the table.
150 			 */
151 			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
152 			limit = pass ? tbl->it_size : limit;
153 			pass++;
154 			goto again;
155 		} else {
156 			/* Third failure, give up */
157 			return DMA_ERROR_CODE;
158 		}
159 	}
160 
161 	for (i = n; i < end; i++)
162 		if (test_bit(i, tbl->it_map)) {
163 			start = i+1;
164 			goto again;
165 		}
166 
167 	for (i = n; i < end; i++)
168 		__set_bit(i, tbl->it_map);
169 
170 	/* Bump the hint to a new block for small allocs. */
171 	if (largealloc) {
172 		/* Don't bump to new block to avoid fragmentation */
173 		tbl->it_largehint = end;
174 	} else {
175 		/* Overflow will be taken care of at the next allocation */
176 		tbl->it_hint = (end + tbl->it_blocksize - 1) &
177 		                ~(tbl->it_blocksize - 1);
178 	}
179 
180 	/* Update handle for SG allocations */
181 	if (handle)
182 		*handle = end;
183 
184 	return n;
185 }
186 
187 static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page,
188 		       unsigned int npages, enum dma_data_direction direction,
189 		       unsigned long mask, unsigned int align_order)
190 {
191 	unsigned long entry, flags;
192 	dma_addr_t ret = DMA_ERROR_CODE;
193 
194 	spin_lock_irqsave(&(tbl->it_lock), flags);
195 
196 	entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order);
197 
198 	if (unlikely(entry == DMA_ERROR_CODE)) {
199 		spin_unlock_irqrestore(&(tbl->it_lock), flags);
200 		return DMA_ERROR_CODE;
201 	}
202 
203 	entry += tbl->it_offset;	/* Offset into real TCE table */
204 	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
205 
206 	/* Put the TCEs in the HW table */
207 	ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK,
208 			 direction);
209 
210 
211 	/* Flush/invalidate TLB caches if necessary */
212 	if (ppc_md.tce_flush)
213 		ppc_md.tce_flush(tbl);
214 
215 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
216 
217 	/* Make sure updates are seen by hardware */
218 	mb();
219 
220 	return ret;
221 }
222 
223 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
224 			 unsigned int npages)
225 {
226 	unsigned long entry, free_entry;
227 	unsigned long i;
228 
229 	entry = dma_addr >> IOMMU_PAGE_SHIFT;
230 	free_entry = entry - tbl->it_offset;
231 
232 	if (((free_entry + npages) > tbl->it_size) ||
233 	    (entry < tbl->it_offset)) {
234 		if (printk_ratelimit()) {
235 			printk(KERN_INFO "iommu_free: invalid entry\n");
236 			printk(KERN_INFO "\tentry     = 0x%lx\n", entry);
237 			printk(KERN_INFO "\tdma_addr  = 0x%lx\n", (u64)dma_addr);
238 			printk(KERN_INFO "\tTable     = 0x%lx\n", (u64)tbl);
239 			printk(KERN_INFO "\tbus#      = 0x%lx\n", (u64)tbl->it_busno);
240 			printk(KERN_INFO "\tsize      = 0x%lx\n", (u64)tbl->it_size);
241 			printk(KERN_INFO "\tstartOff  = 0x%lx\n", (u64)tbl->it_offset);
242 			printk(KERN_INFO "\tindex     = 0x%lx\n", (u64)tbl->it_index);
243 			WARN_ON(1);
244 		}
245 		return;
246 	}
247 
248 	ppc_md.tce_free(tbl, entry, npages);
249 
250 	for (i = 0; i < npages; i++)
251 		__clear_bit(free_entry+i, tbl->it_map);
252 }
253 
254 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
255 		unsigned int npages)
256 {
257 	unsigned long flags;
258 
259 	spin_lock_irqsave(&(tbl->it_lock), flags);
260 
261 	__iommu_free(tbl, dma_addr, npages);
262 
263 	/* Make sure TLB cache is flushed if the HW needs it. We do
264 	 * not do an mb() here on purpose, it is not needed on any of
265 	 * the current platforms.
266 	 */
267 	if (ppc_md.tce_flush)
268 		ppc_md.tce_flush(tbl);
269 
270 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
271 }
272 
273 int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist,
274 		 int nelems, unsigned long mask,
275 		 enum dma_data_direction direction)
276 {
277 	dma_addr_t dma_next = 0, dma_addr;
278 	unsigned long flags;
279 	struct scatterlist *s, *outs, *segstart;
280 	int outcount, incount, i;
281 	unsigned int align;
282 	unsigned long handle;
283 
284 	BUG_ON(direction == DMA_NONE);
285 
286 	if ((nelems == 0) || !tbl)
287 		return 0;
288 
289 	outs = s = segstart = &sglist[0];
290 	outcount = 1;
291 	incount = nelems;
292 	handle = 0;
293 
294 	/* Init first segment length for backout at failure */
295 	outs->dma_length = 0;
296 
297 	DBG("sg mapping %d elements:\n", nelems);
298 
299 	spin_lock_irqsave(&(tbl->it_lock), flags);
300 
301 	for_each_sg(sglist, s, nelems, i) {
302 		unsigned long vaddr, npages, entry, slen;
303 
304 		slen = s->length;
305 		/* Sanity check */
306 		if (slen == 0) {
307 			dma_next = 0;
308 			continue;
309 		}
310 		/* Allocate iommu entries for that segment */
311 		vaddr = (unsigned long) sg_virt(s);
312 		npages = iommu_num_pages(vaddr, slen);
313 		align = 0;
314 		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
315 		    (vaddr & ~PAGE_MASK) == 0)
316 			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
317 		entry = iommu_range_alloc(tbl, npages, &handle,
318 					  mask >> IOMMU_PAGE_SHIFT, align);
319 
320 		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
321 
322 		/* Handle failure */
323 		if (unlikely(entry == DMA_ERROR_CODE)) {
324 			if (printk_ratelimit())
325 				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
326 				       " npages %lx\n", tbl, vaddr, npages);
327 			goto failure;
328 		}
329 
330 		/* Convert entry to a dma_addr_t */
331 		entry += tbl->it_offset;
332 		dma_addr = entry << IOMMU_PAGE_SHIFT;
333 		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
334 
335 		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
336 			    npages, entry, dma_addr);
337 
338 		/* Insert into HW table */
339 		ppc_md.tce_build(tbl, entry, npages, vaddr & IOMMU_PAGE_MASK, direction);
340 
341 		/* If we are in an open segment, try merging */
342 		if (segstart != s) {
343 			DBG("  - trying merge...\n");
344 			/* We cannot merge if:
345 			 * - allocated dma_addr isn't contiguous to previous allocation
346 			 */
347 			if (novmerge || (dma_addr != dma_next)) {
348 				/* Can't merge: create a new segment */
349 				segstart = s;
350 				outcount++;
351 				outs = sg_next(outs);
352 				DBG("    can't merge, new segment.\n");
353 			} else {
354 				outs->dma_length += s->length;
355 				DBG("    merged, new len: %ux\n", outs->dma_length);
356 			}
357 		}
358 
359 		if (segstart == s) {
360 			/* This is a new segment, fill entries */
361 			DBG("  - filling new segment.\n");
362 			outs->dma_address = dma_addr;
363 			outs->dma_length = slen;
364 		}
365 
366 		/* Calculate next page pointer for contiguous check */
367 		dma_next = dma_addr + slen;
368 
369 		DBG("  - dma next is: %lx\n", dma_next);
370 	}
371 
372 	/* Flush/invalidate TLB caches if necessary */
373 	if (ppc_md.tce_flush)
374 		ppc_md.tce_flush(tbl);
375 
376 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
377 
378 	DBG("mapped %d elements:\n", outcount);
379 
380 	/* For the sake of iommu_unmap_sg, we clear out the length in the
381 	 * next entry of the sglist if we didn't fill the list completely
382 	 */
383 	if (outcount < incount) {
384 		outs = sg_next(outs);
385 		outs->dma_address = DMA_ERROR_CODE;
386 		outs->dma_length = 0;
387 	}
388 
389 	/* Make sure updates are seen by hardware */
390 	mb();
391 
392 	return outcount;
393 
394  failure:
395 	for_each_sg(sglist, s, nelems, i) {
396 		if (s->dma_length != 0) {
397 			unsigned long vaddr, npages;
398 
399 			vaddr = s->dma_address & IOMMU_PAGE_MASK;
400 			npages = iommu_num_pages(s->dma_address, s->dma_length);
401 			__iommu_free(tbl, vaddr, npages);
402 			s->dma_address = DMA_ERROR_CODE;
403 			s->dma_length = 0;
404 		}
405 		if (s == outs)
406 			break;
407 	}
408 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
409 	return 0;
410 }
411 
412 
413 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
414 		int nelems, enum dma_data_direction direction)
415 {
416 	struct scatterlist *sg;
417 	unsigned long flags;
418 
419 	BUG_ON(direction == DMA_NONE);
420 
421 	if (!tbl)
422 		return;
423 
424 	spin_lock_irqsave(&(tbl->it_lock), flags);
425 
426 	sg = sglist;
427 	while (nelems--) {
428 		unsigned int npages;
429 		dma_addr_t dma_handle = sg->dma_address;
430 
431 		if (sg->dma_length == 0)
432 			break;
433 		npages = iommu_num_pages(dma_handle, sg->dma_length);
434 		__iommu_free(tbl, dma_handle, npages);
435 		sg = sg_next(sg);
436 	}
437 
438 	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
439 	 * do not do an mb() here, the affected platforms do not need it
440 	 * when freeing.
441 	 */
442 	if (ppc_md.tce_flush)
443 		ppc_md.tce_flush(tbl);
444 
445 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
446 }
447 
448 /*
449  * Build a iommu_table structure.  This contains a bit map which
450  * is used to manage allocation of the tce space.
451  */
452 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
453 {
454 	unsigned long sz;
455 	unsigned long start_index, end_index;
456 	unsigned long entries_per_4g;
457 	unsigned long index;
458 	static int welcomed = 0;
459 	struct page *page;
460 
461 	/* Set aside 1/4 of the table for large allocations. */
462 	tbl->it_halfpoint = tbl->it_size * 3 / 4;
463 
464 	/* number of bytes needed for the bitmap */
465 	sz = (tbl->it_size + 7) >> 3;
466 
467 	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
468 	if (!page)
469 		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
470 	tbl->it_map = page_address(page);
471 	memset(tbl->it_map, 0, sz);
472 
473 	tbl->it_hint = 0;
474 	tbl->it_largehint = tbl->it_halfpoint;
475 	spin_lock_init(&tbl->it_lock);
476 
477 #ifdef CONFIG_CRASH_DUMP
478 	if (ppc_md.tce_get) {
479 		unsigned long tceval;
480 		unsigned long tcecount = 0;
481 
482 		/*
483 		 * Reserve the existing mappings left by the first kernel.
484 		 */
485 		for (index = 0; index < tbl->it_size; index++) {
486 			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
487 			/*
488 			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
489 			 */
490 			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
491 				__set_bit(index, tbl->it_map);
492 				tcecount++;
493 			}
494 		}
495 		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
496 			printk(KERN_WARNING "TCE table is full; ");
497 			printk(KERN_WARNING "freeing %d entries for the kdump boot\n",
498 				KDUMP_MIN_TCE_ENTRIES);
499 			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
500 				index < tbl->it_size; index++)
501 				__clear_bit(index, tbl->it_map);
502 		}
503 	}
504 #else
505 	/* Clear the hardware table in case firmware left allocations in it */
506 	ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
507 #endif
508 
509 	/*
510 	 * DMA cannot cross 4 GB boundary.  Mark last entry of each 4
511 	 * GB chunk as reserved.
512 	 */
513 	if (protect4gb) {
514 		entries_per_4g = 0x100000000l >> IOMMU_PAGE_SHIFT;
515 
516 		/* Mark the last bit before a 4GB boundary as used */
517 		start_index = tbl->it_offset | (entries_per_4g - 1);
518 		start_index -= tbl->it_offset;
519 
520 		end_index = tbl->it_size;
521 
522 		for (index = start_index; index < end_index - 1; index += entries_per_4g)
523 			__set_bit(index, tbl->it_map);
524 	}
525 
526 	if (!welcomed) {
527 		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
528 		       novmerge ? "disabled" : "enabled");
529 		welcomed = 1;
530 	}
531 
532 	return tbl;
533 }
534 
535 void iommu_free_table(struct device_node *dn)
536 {
537 	struct pci_dn *pdn = dn->data;
538 	struct iommu_table *tbl = pdn->iommu_table;
539 	unsigned long bitmap_sz, i;
540 	unsigned int order;
541 
542 	if (!tbl || !tbl->it_map) {
543 		printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__,
544 				dn->full_name);
545 		return;
546 	}
547 
548 	/* verify that table contains no entries */
549 	/* it_size is in entries, and we're examining 64 at a time */
550 	for (i = 0; i < (tbl->it_size/64); i++) {
551 		if (tbl->it_map[i] != 0) {
552 			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
553 				__FUNCTION__, dn->full_name);
554 			break;
555 		}
556 	}
557 
558 	/* calculate bitmap size in bytes */
559 	bitmap_sz = (tbl->it_size + 7) / 8;
560 
561 	/* free bitmap */
562 	order = get_order(bitmap_sz);
563 	free_pages((unsigned long) tbl->it_map, order);
564 
565 	/* free table */
566 	kfree(tbl);
567 }
568 
569 /* Creates TCEs for a user provided buffer.  The user buffer must be
570  * contiguous real kernel storage (not vmalloc).  The address of the buffer
571  * passed here is the kernel (virtual) address of the buffer.  The buffer
572  * need not be page aligned, the dma_addr_t returned will point to the same
573  * byte within the page as vaddr.
574  */
575 dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
576 		size_t size, unsigned long mask,
577 		enum dma_data_direction direction)
578 {
579 	dma_addr_t dma_handle = DMA_ERROR_CODE;
580 	unsigned long uaddr;
581 	unsigned int npages, align;
582 
583 	BUG_ON(direction == DMA_NONE);
584 
585 	uaddr = (unsigned long)vaddr;
586 	npages = iommu_num_pages(uaddr, size);
587 
588 	if (tbl) {
589 		align = 0;
590 		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
591 		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
592 			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
593 
594 		dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
595 					 mask >> IOMMU_PAGE_SHIFT, align);
596 		if (dma_handle == DMA_ERROR_CODE) {
597 			if (printk_ratelimit())  {
598 				printk(KERN_INFO "iommu_alloc failed, "
599 						"tbl %p vaddr %p npages %d\n",
600 						tbl, vaddr, npages);
601 			}
602 		} else
603 			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
604 	}
605 
606 	return dma_handle;
607 }
608 
609 void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
610 		size_t size, enum dma_data_direction direction)
611 {
612 	unsigned int npages;
613 
614 	BUG_ON(direction == DMA_NONE);
615 
616 	if (tbl) {
617 		npages = iommu_num_pages(dma_handle, size);
618 		iommu_free(tbl, dma_handle, npages);
619 	}
620 }
621 
622 /* Allocates a contiguous real buffer and creates mappings over it.
623  * Returns the virtual address of the buffer and sets dma_handle
624  * to the dma address (mapping) of the first page.
625  */
626 void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
627 		dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
628 {
629 	void *ret = NULL;
630 	dma_addr_t mapping;
631 	unsigned int order;
632 	unsigned int nio_pages, io_order;
633 	struct page *page;
634 
635 	size = PAGE_ALIGN(size);
636 	order = get_order(size);
637 
638  	/*
639 	 * Client asked for way too much space.  This is checked later
640 	 * anyway.  It is easier to debug here for the drivers than in
641 	 * the tce tables.
642 	 */
643 	if (order >= IOMAP_MAX_ORDER) {
644 		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
645 		return NULL;
646 	}
647 
648 	if (!tbl)
649 		return NULL;
650 
651 	/* Alloc enough pages (and possibly more) */
652 	page = alloc_pages_node(node, flag, order);
653 	if (!page)
654 		return NULL;
655 	ret = page_address(page);
656 	memset(ret, 0, size);
657 
658 	/* Set up tces to cover the allocated range */
659 	nio_pages = size >> IOMMU_PAGE_SHIFT;
660 	io_order = get_iommu_order(size);
661 	mapping = iommu_alloc(tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
662 			      mask >> IOMMU_PAGE_SHIFT, io_order);
663 	if (mapping == DMA_ERROR_CODE) {
664 		free_pages((unsigned long)ret, order);
665 		return NULL;
666 	}
667 	*dma_handle = mapping;
668 	return ret;
669 }
670 
671 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
672 			 void *vaddr, dma_addr_t dma_handle)
673 {
674 	if (tbl) {
675 		unsigned int nio_pages;
676 
677 		size = PAGE_ALIGN(size);
678 		nio_pages = size >> IOMMU_PAGE_SHIFT;
679 		iommu_free(tbl, dma_handle, nio_pages);
680 		size = PAGE_ALIGN(size);
681 		free_pages((unsigned long)vaddr, get_order(size));
682 	}
683 }
684