xref: /linux/arch/powerpc/kernel/iommu.c (revision 4dc7ccf7e9d9bca1989b840be9e8e84911387cf2)
1 /*
2  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3  *
4  * Rewrite, cleanup, new allocation schemes, virtual merging:
5  * Copyright (C) 2004 Olof Johansson, IBM Corporation
6  *               and  Ben. Herrenschmidt, IBM Corporation
7  *
8  * Dynamic DMA mapping support, bus-independent parts.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
23  */
24 
25 
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/bitmap.h>
34 #include <linux/iommu-helper.h>
35 #include <linux/crash_dump.h>
36 #include <asm/io.h>
37 #include <asm/prom.h>
38 #include <asm/iommu.h>
39 #include <asm/pci-bridge.h>
40 #include <asm/machdep.h>
41 #include <asm/kdump.h>
42 
43 #define DBG(...)
44 
45 static int novmerge;
46 static int protect4gb = 1;
47 
48 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
49 
50 static int __init setup_protect4gb(char *str)
51 {
52 	if (strcmp(str, "on") == 0)
53 		protect4gb = 1;
54 	else if (strcmp(str, "off") == 0)
55 		protect4gb = 0;
56 
57 	return 1;
58 }
59 
60 static int __init setup_iommu(char *str)
61 {
62 	if (!strcmp(str, "novmerge"))
63 		novmerge = 1;
64 	else if (!strcmp(str, "vmerge"))
65 		novmerge = 0;
66 	return 1;
67 }
68 
69 __setup("protect4gb=", setup_protect4gb);
70 __setup("iommu=", setup_iommu);
71 
72 static unsigned long iommu_range_alloc(struct device *dev,
73 				       struct iommu_table *tbl,
74                                        unsigned long npages,
75                                        unsigned long *handle,
76                                        unsigned long mask,
77                                        unsigned int align_order)
78 {
79 	unsigned long n, end, start;
80 	unsigned long limit;
81 	int largealloc = npages > 15;
82 	int pass = 0;
83 	unsigned long align_mask;
84 	unsigned long boundary_size;
85 
86 	align_mask = 0xffffffffffffffffl >> (64 - align_order);
87 
88 	/* This allocator was derived from x86_64's bit string search */
89 
90 	/* Sanity check */
91 	if (unlikely(npages == 0)) {
92 		if (printk_ratelimit())
93 			WARN_ON(1);
94 		return DMA_ERROR_CODE;
95 	}
96 
97 	if (handle && *handle)
98 		start = *handle;
99 	else
100 		start = largealloc ? tbl->it_largehint : tbl->it_hint;
101 
102 	/* Use only half of the table for small allocs (15 pages or less) */
103 	limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
104 
105 	if (largealloc && start < tbl->it_halfpoint)
106 		start = tbl->it_halfpoint;
107 
108 	/* The case below can happen if we have a small segment appended
109 	 * to a large, or when the previous alloc was at the very end of
110 	 * the available space. If so, go back to the initial start.
111 	 */
112 	if (start >= limit)
113 		start = largealloc ? tbl->it_largehint : tbl->it_hint;
114 
115  again:
116 
117 	if (limit + tbl->it_offset > mask) {
118 		limit = mask - tbl->it_offset + 1;
119 		/* If we're constrained on address range, first try
120 		 * at the masked hint to avoid O(n) search complexity,
121 		 * but on second pass, start at 0.
122 		 */
123 		if ((start & mask) >= limit || pass > 0)
124 			start = 0;
125 		else
126 			start &= mask;
127 	}
128 
129 	if (dev)
130 		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
131 				      1 << IOMMU_PAGE_SHIFT);
132 	else
133 		boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT);
134 	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
135 
136 	n = iommu_area_alloc(tbl->it_map, limit, start, npages,
137 			     tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
138 			     align_mask);
139 	if (n == -1) {
140 		if (likely(pass < 2)) {
141 			/* First failure, just rescan the half of the table.
142 			 * Second failure, rescan the other half of the table.
143 			 */
144 			start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
145 			limit = pass ? tbl->it_size : limit;
146 			pass++;
147 			goto again;
148 		} else {
149 			/* Third failure, give up */
150 			return DMA_ERROR_CODE;
151 		}
152 	}
153 
154 	end = n + npages;
155 
156 	/* Bump the hint to a new block for small allocs. */
157 	if (largealloc) {
158 		/* Don't bump to new block to avoid fragmentation */
159 		tbl->it_largehint = end;
160 	} else {
161 		/* Overflow will be taken care of at the next allocation */
162 		tbl->it_hint = (end + tbl->it_blocksize - 1) &
163 		                ~(tbl->it_blocksize - 1);
164 	}
165 
166 	/* Update handle for SG allocations */
167 	if (handle)
168 		*handle = end;
169 
170 	return n;
171 }
172 
173 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
174 			      void *page, unsigned int npages,
175 			      enum dma_data_direction direction,
176 			      unsigned long mask, unsigned int align_order,
177 			      struct dma_attrs *attrs)
178 {
179 	unsigned long entry, flags;
180 	dma_addr_t ret = DMA_ERROR_CODE;
181 	int build_fail;
182 
183 	spin_lock_irqsave(&(tbl->it_lock), flags);
184 
185 	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
186 
187 	if (unlikely(entry == DMA_ERROR_CODE)) {
188 		spin_unlock_irqrestore(&(tbl->it_lock), flags);
189 		return DMA_ERROR_CODE;
190 	}
191 
192 	entry += tbl->it_offset;	/* Offset into real TCE table */
193 	ret = entry << IOMMU_PAGE_SHIFT;	/* Set the return dma address */
194 
195 	/* Put the TCEs in the HW table */
196 	build_fail = ppc_md.tce_build(tbl, entry, npages,
197 	                              (unsigned long)page & IOMMU_PAGE_MASK,
198 	                              direction, attrs);
199 
200 	/* ppc_md.tce_build() only returns non-zero for transient errors.
201 	 * Clean up the table bitmap in this case and return
202 	 * DMA_ERROR_CODE. For all other errors the functionality is
203 	 * not altered.
204 	 */
205 	if (unlikely(build_fail)) {
206 		__iommu_free(tbl, ret, npages);
207 
208 		spin_unlock_irqrestore(&(tbl->it_lock), flags);
209 		return DMA_ERROR_CODE;
210 	}
211 
212 	/* Flush/invalidate TLB caches if necessary */
213 	if (ppc_md.tce_flush)
214 		ppc_md.tce_flush(tbl);
215 
216 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
217 
218 	/* Make sure updates are seen by hardware */
219 	mb();
220 
221 	return ret;
222 }
223 
224 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
225 			 unsigned int npages)
226 {
227 	unsigned long entry, free_entry;
228 
229 	entry = dma_addr >> IOMMU_PAGE_SHIFT;
230 	free_entry = entry - tbl->it_offset;
231 
232 	if (((free_entry + npages) > tbl->it_size) ||
233 	    (entry < tbl->it_offset)) {
234 		if (printk_ratelimit()) {
235 			printk(KERN_INFO "iommu_free: invalid entry\n");
236 			printk(KERN_INFO "\tentry     = 0x%lx\n", entry);
237 			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
238 			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
239 			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
240 			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
241 			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
242 			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
243 			WARN_ON(1);
244 		}
245 		return;
246 	}
247 
248 	ppc_md.tce_free(tbl, entry, npages);
249 	bitmap_clear(tbl->it_map, free_entry, npages);
250 }
251 
252 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
253 		unsigned int npages)
254 {
255 	unsigned long flags;
256 
257 	spin_lock_irqsave(&(tbl->it_lock), flags);
258 
259 	__iommu_free(tbl, dma_addr, npages);
260 
261 	/* Make sure TLB cache is flushed if the HW needs it. We do
262 	 * not do an mb() here on purpose, it is not needed on any of
263 	 * the current platforms.
264 	 */
265 	if (ppc_md.tce_flush)
266 		ppc_md.tce_flush(tbl);
267 
268 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
269 }
270 
271 int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
272 		 struct scatterlist *sglist, int nelems,
273 		 unsigned long mask, enum dma_data_direction direction,
274 		 struct dma_attrs *attrs)
275 {
276 	dma_addr_t dma_next = 0, dma_addr;
277 	unsigned long flags;
278 	struct scatterlist *s, *outs, *segstart;
279 	int outcount, incount, i, build_fail = 0;
280 	unsigned int align;
281 	unsigned long handle;
282 	unsigned int max_seg_size;
283 
284 	BUG_ON(direction == DMA_NONE);
285 
286 	if ((nelems == 0) || !tbl)
287 		return 0;
288 
289 	outs = s = segstart = &sglist[0];
290 	outcount = 1;
291 	incount = nelems;
292 	handle = 0;
293 
294 	/* Init first segment length for backout at failure */
295 	outs->dma_length = 0;
296 
297 	DBG("sg mapping %d elements:\n", nelems);
298 
299 	spin_lock_irqsave(&(tbl->it_lock), flags);
300 
301 	max_seg_size = dma_get_max_seg_size(dev);
302 	for_each_sg(sglist, s, nelems, i) {
303 		unsigned long vaddr, npages, entry, slen;
304 
305 		slen = s->length;
306 		/* Sanity check */
307 		if (slen == 0) {
308 			dma_next = 0;
309 			continue;
310 		}
311 		/* Allocate iommu entries for that segment */
312 		vaddr = (unsigned long) sg_virt(s);
313 		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE);
314 		align = 0;
315 		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
316 		    (vaddr & ~PAGE_MASK) == 0)
317 			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
318 		entry = iommu_range_alloc(dev, tbl, npages, &handle,
319 					  mask >> IOMMU_PAGE_SHIFT, align);
320 
321 		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
322 
323 		/* Handle failure */
324 		if (unlikely(entry == DMA_ERROR_CODE)) {
325 			if (printk_ratelimit())
326 				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
327 				       " npages %lx\n", tbl, vaddr, npages);
328 			goto failure;
329 		}
330 
331 		/* Convert entry to a dma_addr_t */
332 		entry += tbl->it_offset;
333 		dma_addr = entry << IOMMU_PAGE_SHIFT;
334 		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);
335 
336 		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
337 			    npages, entry, dma_addr);
338 
339 		/* Insert into HW table */
340 		build_fail = ppc_md.tce_build(tbl, entry, npages,
341 		                              vaddr & IOMMU_PAGE_MASK,
342 		                              direction, attrs);
343 		if(unlikely(build_fail))
344 			goto failure;
345 
346 		/* If we are in an open segment, try merging */
347 		if (segstart != s) {
348 			DBG("  - trying merge...\n");
349 			/* We cannot merge if:
350 			 * - allocated dma_addr isn't contiguous to previous allocation
351 			 */
352 			if (novmerge || (dma_addr != dma_next) ||
353 			    (outs->dma_length + s->length > max_seg_size)) {
354 				/* Can't merge: create a new segment */
355 				segstart = s;
356 				outcount++;
357 				outs = sg_next(outs);
358 				DBG("    can't merge, new segment.\n");
359 			} else {
360 				outs->dma_length += s->length;
361 				DBG("    merged, new len: %ux\n", outs->dma_length);
362 			}
363 		}
364 
365 		if (segstart == s) {
366 			/* This is a new segment, fill entries */
367 			DBG("  - filling new segment.\n");
368 			outs->dma_address = dma_addr;
369 			outs->dma_length = slen;
370 		}
371 
372 		/* Calculate next page pointer for contiguous check */
373 		dma_next = dma_addr + slen;
374 
375 		DBG("  - dma next is: %lx\n", dma_next);
376 	}
377 
378 	/* Flush/invalidate TLB caches if necessary */
379 	if (ppc_md.tce_flush)
380 		ppc_md.tce_flush(tbl);
381 
382 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
383 
384 	DBG("mapped %d elements:\n", outcount);
385 
386 	/* For the sake of iommu_unmap_sg, we clear out the length in the
387 	 * next entry of the sglist if we didn't fill the list completely
388 	 */
389 	if (outcount < incount) {
390 		outs = sg_next(outs);
391 		outs->dma_address = DMA_ERROR_CODE;
392 		outs->dma_length = 0;
393 	}
394 
395 	/* Make sure updates are seen by hardware */
396 	mb();
397 
398 	return outcount;
399 
400  failure:
401 	for_each_sg(sglist, s, nelems, i) {
402 		if (s->dma_length != 0) {
403 			unsigned long vaddr, npages;
404 
405 			vaddr = s->dma_address & IOMMU_PAGE_MASK;
406 			npages = iommu_num_pages(s->dma_address, s->dma_length,
407 						 IOMMU_PAGE_SIZE);
408 			__iommu_free(tbl, vaddr, npages);
409 			s->dma_address = DMA_ERROR_CODE;
410 			s->dma_length = 0;
411 		}
412 		if (s == outs)
413 			break;
414 	}
415 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
416 	return 0;
417 }
418 
419 
420 void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
421 		int nelems, enum dma_data_direction direction,
422 		struct dma_attrs *attrs)
423 {
424 	struct scatterlist *sg;
425 	unsigned long flags;
426 
427 	BUG_ON(direction == DMA_NONE);
428 
429 	if (!tbl)
430 		return;
431 
432 	spin_lock_irqsave(&(tbl->it_lock), flags);
433 
434 	sg = sglist;
435 	while (nelems--) {
436 		unsigned int npages;
437 		dma_addr_t dma_handle = sg->dma_address;
438 
439 		if (sg->dma_length == 0)
440 			break;
441 		npages = iommu_num_pages(dma_handle, sg->dma_length,
442 					 IOMMU_PAGE_SIZE);
443 		__iommu_free(tbl, dma_handle, npages);
444 		sg = sg_next(sg);
445 	}
446 
447 	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
448 	 * do not do an mb() here, the affected platforms do not need it
449 	 * when freeing.
450 	 */
451 	if (ppc_md.tce_flush)
452 		ppc_md.tce_flush(tbl);
453 
454 	spin_unlock_irqrestore(&(tbl->it_lock), flags);
455 }
456 
457 static void iommu_table_clear(struct iommu_table *tbl)
458 {
459 	if (!is_kdump_kernel()) {
460 		/* Clear the table in case firmware left allocations in it */
461 		ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
462 		return;
463 	}
464 
465 #ifdef CONFIG_CRASH_DUMP
466 	if (ppc_md.tce_get) {
467 		unsigned long index, tceval, tcecount = 0;
468 
469 		/* Reserve the existing mappings left by the first kernel. */
470 		for (index = 0; index < tbl->it_size; index++) {
471 			tceval = ppc_md.tce_get(tbl, index + tbl->it_offset);
472 			/*
473 			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
474 			 */
475 			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
476 				__set_bit(index, tbl->it_map);
477 				tcecount++;
478 			}
479 		}
480 
481 		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
482 			printk(KERN_WARNING "TCE table is full; freeing ");
483 			printk(KERN_WARNING "%d entries for the kdump boot\n",
484 				KDUMP_MIN_TCE_ENTRIES);
485 			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
486 				index < tbl->it_size; index++)
487 				__clear_bit(index, tbl->it_map);
488 		}
489 	}
490 #endif
491 }
492 
493 /*
494  * Build a iommu_table structure.  This contains a bit map which
495  * is used to manage allocation of the tce space.
496  */
497 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
498 {
499 	unsigned long sz;
500 	static int welcomed = 0;
501 	struct page *page;
502 
503 	/* Set aside 1/4 of the table for large allocations. */
504 	tbl->it_halfpoint = tbl->it_size * 3 / 4;
505 
506 	/* number of bytes needed for the bitmap */
507 	sz = (tbl->it_size + 7) >> 3;
508 
509 	page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
510 	if (!page)
511 		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
512 	tbl->it_map = page_address(page);
513 	memset(tbl->it_map, 0, sz);
514 
515 	tbl->it_hint = 0;
516 	tbl->it_largehint = tbl->it_halfpoint;
517 	spin_lock_init(&tbl->it_lock);
518 
519 	iommu_table_clear(tbl);
520 
521 	if (!welcomed) {
522 		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
523 		       novmerge ? "disabled" : "enabled");
524 		welcomed = 1;
525 	}
526 
527 	return tbl;
528 }
529 
530 void iommu_free_table(struct iommu_table *tbl, const char *node_name)
531 {
532 	unsigned long bitmap_sz, i;
533 	unsigned int order;
534 
535 	if (!tbl || !tbl->it_map) {
536 		printk(KERN_ERR "%s: expected TCE map for %s\n", __func__,
537 				node_name);
538 		return;
539 	}
540 
541 	/* verify that table contains no entries */
542 	/* it_size is in entries, and we're examining 64 at a time */
543 	for (i = 0; i < (tbl->it_size/64); i++) {
544 		if (tbl->it_map[i] != 0) {
545 			printk(KERN_WARNING "%s: Unexpected TCEs for %s\n",
546 				__func__, node_name);
547 			break;
548 		}
549 	}
550 
551 	/* calculate bitmap size in bytes */
552 	bitmap_sz = (tbl->it_size + 7) / 8;
553 
554 	/* free bitmap */
555 	order = get_order(bitmap_sz);
556 	free_pages((unsigned long) tbl->it_map, order);
557 
558 	/* free table */
559 	kfree(tbl);
560 }
561 
562 /* Creates TCEs for a user provided buffer.  The user buffer must be
563  * contiguous real kernel storage (not vmalloc).  The address passed here
564  * comprises a page address and offset into that page. The dma_addr_t
565  * returned will point to the same byte within the page as was passed in.
566  */
567 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
568 			  struct page *page, unsigned long offset, size_t size,
569 			  unsigned long mask, enum dma_data_direction direction,
570 			  struct dma_attrs *attrs)
571 {
572 	dma_addr_t dma_handle = DMA_ERROR_CODE;
573 	void *vaddr;
574 	unsigned long uaddr;
575 	unsigned int npages, align;
576 
577 	BUG_ON(direction == DMA_NONE);
578 
579 	vaddr = page_address(page) + offset;
580 	uaddr = (unsigned long)vaddr;
581 	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE);
582 
583 	if (tbl) {
584 		align = 0;
585 		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE &&
586 		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
587 			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
588 
589 		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
590 					 mask >> IOMMU_PAGE_SHIFT, align,
591 					 attrs);
592 		if (dma_handle == DMA_ERROR_CODE) {
593 			if (printk_ratelimit())  {
594 				printk(KERN_INFO "iommu_alloc failed, "
595 						"tbl %p vaddr %p npages %d\n",
596 						tbl, vaddr, npages);
597 			}
598 		} else
599 			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
600 	}
601 
602 	return dma_handle;
603 }
604 
605 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
606 		      size_t size, enum dma_data_direction direction,
607 		      struct dma_attrs *attrs)
608 {
609 	unsigned int npages;
610 
611 	BUG_ON(direction == DMA_NONE);
612 
613 	if (tbl) {
614 		npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE);
615 		iommu_free(tbl, dma_handle, npages);
616 	}
617 }
618 
619 /* Allocates a contiguous real buffer and creates mappings over it.
620  * Returns the virtual address of the buffer and sets dma_handle
621  * to the dma address (mapping) of the first page.
622  */
623 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
624 			   size_t size,	dma_addr_t *dma_handle,
625 			   unsigned long mask, gfp_t flag, int node)
626 {
627 	void *ret = NULL;
628 	dma_addr_t mapping;
629 	unsigned int order;
630 	unsigned int nio_pages, io_order;
631 	struct page *page;
632 
633 	size = PAGE_ALIGN(size);
634 	order = get_order(size);
635 
636  	/*
637 	 * Client asked for way too much space.  This is checked later
638 	 * anyway.  It is easier to debug here for the drivers than in
639 	 * the tce tables.
640 	 */
641 	if (order >= IOMAP_MAX_ORDER) {
642 		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
643 		return NULL;
644 	}
645 
646 	if (!tbl)
647 		return NULL;
648 
649 	/* Alloc enough pages (and possibly more) */
650 	page = alloc_pages_node(node, flag, order);
651 	if (!page)
652 		return NULL;
653 	ret = page_address(page);
654 	memset(ret, 0, size);
655 
656 	/* Set up tces to cover the allocated range */
657 	nio_pages = size >> IOMMU_PAGE_SHIFT;
658 	io_order = get_iommu_order(size);
659 	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
660 			      mask >> IOMMU_PAGE_SHIFT, io_order, NULL);
661 	if (mapping == DMA_ERROR_CODE) {
662 		free_pages((unsigned long)ret, order);
663 		return NULL;
664 	}
665 	*dma_handle = mapping;
666 	return ret;
667 }
668 
669 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
670 			 void *vaddr, dma_addr_t dma_handle)
671 {
672 	if (tbl) {
673 		unsigned int nio_pages;
674 
675 		size = PAGE_ALIGN(size);
676 		nio_pages = size >> IOMMU_PAGE_SHIFT;
677 		iommu_free(tbl, dma_handle, nio_pages);
678 		size = PAGE_ALIGN(size);
679 		free_pages((unsigned long)vaddr, get_order(size));
680 	}
681 }
682