xref: /linux/arch/powerpc/kernel/iommu.c (revision 9052e9c95d908d6c3d7570aadc8898e1d871c8bb)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4  *
5  * Rewrite, cleanup, new allocation schemes, virtual merging:
6  * Copyright (C) 2004 Olof Johansson, IBM Corporation
7  *               and  Ben. Herrenschmidt, IBM Corporation
8  *
9  * Dynamic DMA mapping support, bus-independent parts.
10  */
11 
12 
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bitmap.h>
21 #include <linux/iommu-helper.h>
22 #include <linux/crash_dump.h>
23 #include <linux/hash.h>
24 #include <linux/fault-inject.h>
25 #include <linux/pci.h>
26 #include <linux/iommu.h>
27 #include <linux/sched.h>
28 #include <linux/debugfs.h>
29 #include <asm/io.h>
30 #include <asm/prom.h>
31 #include <asm/iommu.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/kdump.h>
35 #include <asm/fadump.h>
36 #include <asm/vio.h>
37 #include <asm/tce.h>
38 #include <asm/mmu_context.h>
39 
40 #define DBG(...)
41 
42 #ifdef CONFIG_IOMMU_DEBUGFS
43 static int iommu_debugfs_weight_get(void *data, u64 *val)
44 {
45 	struct iommu_table *tbl = data;
46 	*val = bitmap_weight(tbl->it_map, tbl->it_size);
47 	return 0;
48 }
49 DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
50 
51 static void iommu_debugfs_add(struct iommu_table *tbl)
52 {
53 	char name[10];
54 	struct dentry *liobn_entry;
55 
56 	sprintf(name, "%08lx", tbl->it_index);
57 	liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
58 
59 	debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
60 	debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
61 	debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
62 	debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
63 	debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
64 	debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
65 	debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
66 }
67 
68 static void iommu_debugfs_del(struct iommu_table *tbl)
69 {
70 	char name[10];
71 	struct dentry *liobn_entry;
72 
73 	sprintf(name, "%08lx", tbl->it_index);
74 	liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
75 	debugfs_remove(liobn_entry);
76 }
77 #else
78 static void iommu_debugfs_add(struct iommu_table *tbl){}
79 static void iommu_debugfs_del(struct iommu_table *tbl){}
80 #endif
81 
82 static int novmerge;
83 
84 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
85 
86 static int __init setup_iommu(char *str)
87 {
88 	if (!strcmp(str, "novmerge"))
89 		novmerge = 1;
90 	else if (!strcmp(str, "vmerge"))
91 		novmerge = 0;
92 	return 1;
93 }
94 
95 __setup("iommu=", setup_iommu);
96 
97 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
98 
99 /*
100  * We precalculate the hash to avoid doing it on every allocation.
101  *
102  * The hash is important to spread CPUs across all the pools. For example,
103  * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
104  * with 4 pools all primary threads would map to the same pool.
105  */
106 static int __init setup_iommu_pool_hash(void)
107 {
108 	unsigned int i;
109 
110 	for_each_possible_cpu(i)
111 		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
112 
113 	return 0;
114 }
115 subsys_initcall(setup_iommu_pool_hash);
116 
117 #ifdef CONFIG_FAIL_IOMMU
118 
119 static DECLARE_FAULT_ATTR(fail_iommu);
120 
121 static int __init setup_fail_iommu(char *str)
122 {
123 	return setup_fault_attr(&fail_iommu, str);
124 }
125 __setup("fail_iommu=", setup_fail_iommu);
126 
127 static bool should_fail_iommu(struct device *dev)
128 {
129 	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
130 }
131 
132 static int __init fail_iommu_debugfs(void)
133 {
134 	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
135 						       NULL, &fail_iommu);
136 
137 	return PTR_ERR_OR_ZERO(dir);
138 }
139 late_initcall(fail_iommu_debugfs);
140 
141 static ssize_t fail_iommu_show(struct device *dev,
142 			       struct device_attribute *attr, char *buf)
143 {
144 	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
145 }
146 
147 static ssize_t fail_iommu_store(struct device *dev,
148 				struct device_attribute *attr, const char *buf,
149 				size_t count)
150 {
151 	int i;
152 
153 	if (count > 0 && sscanf(buf, "%d", &i) > 0)
154 		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
155 
156 	return count;
157 }
158 
159 static DEVICE_ATTR_RW(fail_iommu);
160 
161 static int fail_iommu_bus_notify(struct notifier_block *nb,
162 				 unsigned long action, void *data)
163 {
164 	struct device *dev = data;
165 
166 	if (action == BUS_NOTIFY_ADD_DEVICE) {
167 		if (device_create_file(dev, &dev_attr_fail_iommu))
168 			pr_warn("Unable to create IOMMU fault injection sysfs "
169 				"entries\n");
170 	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
171 		device_remove_file(dev, &dev_attr_fail_iommu);
172 	}
173 
174 	return 0;
175 }
176 
177 static struct notifier_block fail_iommu_bus_notifier = {
178 	.notifier_call = fail_iommu_bus_notify
179 };
180 
181 static int __init fail_iommu_setup(void)
182 {
183 #ifdef CONFIG_PCI
184 	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
185 #endif
186 #ifdef CONFIG_IBMVIO
187 	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
188 #endif
189 
190 	return 0;
191 }
192 /*
193  * Must execute after PCI and VIO subsystem have initialised but before
194  * devices are probed.
195  */
196 arch_initcall(fail_iommu_setup);
197 #else
198 static inline bool should_fail_iommu(struct device *dev)
199 {
200 	return false;
201 }
202 #endif
203 
204 static unsigned long iommu_range_alloc(struct device *dev,
205 				       struct iommu_table *tbl,
206                                        unsigned long npages,
207                                        unsigned long *handle,
208                                        unsigned long mask,
209                                        unsigned int align_order)
210 {
211 	unsigned long n, end, start;
212 	unsigned long limit;
213 	int largealloc = npages > 15;
214 	int pass = 0;
215 	unsigned long align_mask;
216 	unsigned long flags;
217 	unsigned int pool_nr;
218 	struct iommu_pool *pool;
219 
220 	align_mask = (1ull << align_order) - 1;
221 
222 	/* This allocator was derived from x86_64's bit string search */
223 
224 	/* Sanity check */
225 	if (unlikely(npages == 0)) {
226 		if (printk_ratelimit())
227 			WARN_ON(1);
228 		return DMA_MAPPING_ERROR;
229 	}
230 
231 	if (should_fail_iommu(dev))
232 		return DMA_MAPPING_ERROR;
233 
234 	/*
235 	 * We don't need to disable preemption here because any CPU can
236 	 * safely use any IOMMU pool.
237 	 */
238 	pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
239 
240 	if (largealloc)
241 		pool = &(tbl->large_pool);
242 	else
243 		pool = &(tbl->pools[pool_nr]);
244 
245 	spin_lock_irqsave(&(pool->lock), flags);
246 
247 again:
248 	if ((pass == 0) && handle && *handle &&
249 	    (*handle >= pool->start) && (*handle < pool->end))
250 		start = *handle;
251 	else
252 		start = pool->hint;
253 
254 	limit = pool->end;
255 
256 	/* The case below can happen if we have a small segment appended
257 	 * to a large, or when the previous alloc was at the very end of
258 	 * the available space. If so, go back to the initial start.
259 	 */
260 	if (start >= limit)
261 		start = pool->start;
262 
263 	if (limit + tbl->it_offset > mask) {
264 		limit = mask - tbl->it_offset + 1;
265 		/* If we're constrained on address range, first try
266 		 * at the masked hint to avoid O(n) search complexity,
267 		 * but on second pass, start at 0 in pool 0.
268 		 */
269 		if ((start & mask) >= limit || pass > 0) {
270 			spin_unlock(&(pool->lock));
271 			pool = &(tbl->pools[0]);
272 			spin_lock(&(pool->lock));
273 			start = pool->start;
274 		} else {
275 			start &= mask;
276 		}
277 	}
278 
279 	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
280 			dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
281 			align_mask);
282 	if (n == -1) {
283 		if (likely(pass == 0)) {
284 			/* First try the pool from the start */
285 			pool->hint = pool->start;
286 			pass++;
287 			goto again;
288 
289 		} else if (pass <= tbl->nr_pools) {
290 			/* Now try scanning all the other pools */
291 			spin_unlock(&(pool->lock));
292 			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
293 			pool = &tbl->pools[pool_nr];
294 			spin_lock(&(pool->lock));
295 			pool->hint = pool->start;
296 			pass++;
297 			goto again;
298 
299 		} else if (pass == tbl->nr_pools + 1) {
300 			/* Last resort: try largepool */
301 			spin_unlock(&pool->lock);
302 			pool = &tbl->large_pool;
303 			spin_lock(&pool->lock);
304 			pool->hint = pool->start;
305 			pass++;
306 			goto again;
307 
308 		} else {
309 			/* Give up */
310 			spin_unlock_irqrestore(&(pool->lock), flags);
311 			return DMA_MAPPING_ERROR;
312 		}
313 	}
314 
315 	end = n + npages;
316 
317 	/* Bump the hint to a new block for small allocs. */
318 	if (largealloc) {
319 		/* Don't bump to new block to avoid fragmentation */
320 		pool->hint = end;
321 	} else {
322 		/* Overflow will be taken care of at the next allocation */
323 		pool->hint = (end + tbl->it_blocksize - 1) &
324 		                ~(tbl->it_blocksize - 1);
325 	}
326 
327 	/* Update handle for SG allocations */
328 	if (handle)
329 		*handle = end;
330 
331 	spin_unlock_irqrestore(&(pool->lock), flags);
332 
333 	return n;
334 }
335 
336 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
337 			      void *page, unsigned int npages,
338 			      enum dma_data_direction direction,
339 			      unsigned long mask, unsigned int align_order,
340 			      unsigned long attrs)
341 {
342 	unsigned long entry;
343 	dma_addr_t ret = DMA_MAPPING_ERROR;
344 	int build_fail;
345 
346 	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
347 
348 	if (unlikely(entry == DMA_MAPPING_ERROR))
349 		return DMA_MAPPING_ERROR;
350 
351 	entry += tbl->it_offset;	/* Offset into real TCE table */
352 	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
353 
354 	/* Put the TCEs in the HW table */
355 	build_fail = tbl->it_ops->set(tbl, entry, npages,
356 				      (unsigned long)page &
357 				      IOMMU_PAGE_MASK(tbl), direction, attrs);
358 
359 	/* tbl->it_ops->set() only returns non-zero for transient errors.
360 	 * Clean up the table bitmap in this case and return
361 	 * DMA_MAPPING_ERROR. For all other errors the functionality is
362 	 * not altered.
363 	 */
364 	if (unlikely(build_fail)) {
365 		__iommu_free(tbl, ret, npages);
366 		return DMA_MAPPING_ERROR;
367 	}
368 
369 	/* Flush/invalidate TLB caches if necessary */
370 	if (tbl->it_ops->flush)
371 		tbl->it_ops->flush(tbl);
372 
373 	/* Make sure updates are seen by hardware */
374 	mb();
375 
376 	return ret;
377 }
378 
379 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
380 			     unsigned int npages)
381 {
382 	unsigned long entry, free_entry;
383 
384 	entry = dma_addr >> tbl->it_page_shift;
385 	free_entry = entry - tbl->it_offset;
386 
387 	if (((free_entry + npages) > tbl->it_size) ||
388 	    (entry < tbl->it_offset)) {
389 		if (printk_ratelimit()) {
390 			printk(KERN_INFO "iommu_free: invalid entry\n");
391 			printk(KERN_INFO "\tentry     = 0x%lx\n", entry);
392 			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
393 			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
394 			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
395 			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
396 			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
397 			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
398 			WARN_ON(1);
399 		}
400 
401 		return false;
402 	}
403 
404 	return true;
405 }
406 
407 static struct iommu_pool *get_pool(struct iommu_table *tbl,
408 				   unsigned long entry)
409 {
410 	struct iommu_pool *p;
411 	unsigned long largepool_start = tbl->large_pool.start;
412 
413 	/* The large pool is the last pool at the top of the table */
414 	if (entry >= largepool_start) {
415 		p = &tbl->large_pool;
416 	} else {
417 		unsigned int pool_nr = entry / tbl->poolsize;
418 
419 		BUG_ON(pool_nr > tbl->nr_pools);
420 		p = &tbl->pools[pool_nr];
421 	}
422 
423 	return p;
424 }
425 
426 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
427 			 unsigned int npages)
428 {
429 	unsigned long entry, free_entry;
430 	unsigned long flags;
431 	struct iommu_pool *pool;
432 
433 	entry = dma_addr >> tbl->it_page_shift;
434 	free_entry = entry - tbl->it_offset;
435 
436 	pool = get_pool(tbl, free_entry);
437 
438 	if (!iommu_free_check(tbl, dma_addr, npages))
439 		return;
440 
441 	tbl->it_ops->clear(tbl, entry, npages);
442 
443 	spin_lock_irqsave(&(pool->lock), flags);
444 	bitmap_clear(tbl->it_map, free_entry, npages);
445 	spin_unlock_irqrestore(&(pool->lock), flags);
446 }
447 
448 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
449 		unsigned int npages)
450 {
451 	__iommu_free(tbl, dma_addr, npages);
452 
453 	/* Make sure TLB cache is flushed if the HW needs it. We do
454 	 * not do an mb() here on purpose, it is not needed on any of
455 	 * the current platforms.
456 	 */
457 	if (tbl->it_ops->flush)
458 		tbl->it_ops->flush(tbl);
459 }
460 
461 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
462 		     struct scatterlist *sglist, int nelems,
463 		     unsigned long mask, enum dma_data_direction direction,
464 		     unsigned long attrs)
465 {
466 	dma_addr_t dma_next = 0, dma_addr;
467 	struct scatterlist *s, *outs, *segstart;
468 	int outcount, incount, i, build_fail = 0;
469 	unsigned int align;
470 	unsigned long handle;
471 	unsigned int max_seg_size;
472 
473 	BUG_ON(direction == DMA_NONE);
474 
475 	if ((nelems == 0) || !tbl)
476 		return -EINVAL;
477 
478 	outs = s = segstart = &sglist[0];
479 	outcount = 1;
480 	incount = nelems;
481 	handle = 0;
482 
483 	/* Init first segment length for backout at failure */
484 	outs->dma_length = 0;
485 
486 	DBG("sg mapping %d elements:\n", nelems);
487 
488 	max_seg_size = dma_get_max_seg_size(dev);
489 	for_each_sg(sglist, s, nelems, i) {
490 		unsigned long vaddr, npages, entry, slen;
491 
492 		slen = s->length;
493 		/* Sanity check */
494 		if (slen == 0) {
495 			dma_next = 0;
496 			continue;
497 		}
498 		/* Allocate iommu entries for that segment */
499 		vaddr = (unsigned long) sg_virt(s);
500 		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
501 		align = 0;
502 		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
503 		    (vaddr & ~PAGE_MASK) == 0)
504 			align = PAGE_SHIFT - tbl->it_page_shift;
505 		entry = iommu_range_alloc(dev, tbl, npages, &handle,
506 					  mask >> tbl->it_page_shift, align);
507 
508 		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
509 
510 		/* Handle failure */
511 		if (unlikely(entry == DMA_MAPPING_ERROR)) {
512 			if (!(attrs & DMA_ATTR_NO_WARN) &&
513 			    printk_ratelimit())
514 				dev_info(dev, "iommu_alloc failed, tbl %p "
515 					 "vaddr %lx npages %lu\n", tbl, vaddr,
516 					 npages);
517 			goto failure;
518 		}
519 
520 		/* Convert entry to a dma_addr_t */
521 		entry += tbl->it_offset;
522 		dma_addr = entry << tbl->it_page_shift;
523 		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
524 
525 		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
526 			    npages, entry, dma_addr);
527 
528 		/* Insert into HW table */
529 		build_fail = tbl->it_ops->set(tbl, entry, npages,
530 					      vaddr & IOMMU_PAGE_MASK(tbl),
531 					      direction, attrs);
532 		if(unlikely(build_fail))
533 			goto failure;
534 
535 		/* If we are in an open segment, try merging */
536 		if (segstart != s) {
537 			DBG("  - trying merge...\n");
538 			/* We cannot merge if:
539 			 * - allocated dma_addr isn't contiguous to previous allocation
540 			 */
541 			if (novmerge || (dma_addr != dma_next) ||
542 			    (outs->dma_length + s->length > max_seg_size)) {
543 				/* Can't merge: create a new segment */
544 				segstart = s;
545 				outcount++;
546 				outs = sg_next(outs);
547 				DBG("    can't merge, new segment.\n");
548 			} else {
549 				outs->dma_length += s->length;
550 				DBG("    merged, new len: %ux\n", outs->dma_length);
551 			}
552 		}
553 
554 		if (segstart == s) {
555 			/* This is a new segment, fill entries */
556 			DBG("  - filling new segment.\n");
557 			outs->dma_address = dma_addr;
558 			outs->dma_length = slen;
559 		}
560 
561 		/* Calculate next page pointer for contiguous check */
562 		dma_next = dma_addr + slen;
563 
564 		DBG("  - dma next is: %lx\n", dma_next);
565 	}
566 
567 	/* Flush/invalidate TLB caches if necessary */
568 	if (tbl->it_ops->flush)
569 		tbl->it_ops->flush(tbl);
570 
571 	DBG("mapped %d elements:\n", outcount);
572 
573 	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
574 	 * next entry of the sglist if we didn't fill the list completely
575 	 */
576 	if (outcount < incount) {
577 		outs = sg_next(outs);
578 		outs->dma_length = 0;
579 	}
580 
581 	/* Make sure updates are seen by hardware */
582 	mb();
583 
584 	return outcount;
585 
586  failure:
587 	for_each_sg(sglist, s, nelems, i) {
588 		if (s->dma_length != 0) {
589 			unsigned long vaddr, npages;
590 
591 			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
592 			npages = iommu_num_pages(s->dma_address, s->dma_length,
593 						 IOMMU_PAGE_SIZE(tbl));
594 			__iommu_free(tbl, vaddr, npages);
595 			s->dma_length = 0;
596 		}
597 		if (s == outs)
598 			break;
599 	}
600 	return -EIO;
601 }
602 
603 
604 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
605 			int nelems, enum dma_data_direction direction,
606 			unsigned long attrs)
607 {
608 	struct scatterlist *sg;
609 
610 	BUG_ON(direction == DMA_NONE);
611 
612 	if (!tbl)
613 		return;
614 
615 	sg = sglist;
616 	while (nelems--) {
617 		unsigned int npages;
618 		dma_addr_t dma_handle = sg->dma_address;
619 
620 		if (sg->dma_length == 0)
621 			break;
622 		npages = iommu_num_pages(dma_handle, sg->dma_length,
623 					 IOMMU_PAGE_SIZE(tbl));
624 		__iommu_free(tbl, dma_handle, npages);
625 		sg = sg_next(sg);
626 	}
627 
628 	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
629 	 * do not do an mb() here, the affected platforms do not need it
630 	 * when freeing.
631 	 */
632 	if (tbl->it_ops->flush)
633 		tbl->it_ops->flush(tbl);
634 }
635 
636 static void iommu_table_clear(struct iommu_table *tbl)
637 {
638 	/*
639 	 * In case of firmware assisted dump system goes through clean
640 	 * reboot process at the time of system crash. Hence it's safe to
641 	 * clear the TCE entries if firmware assisted dump is active.
642 	 */
643 	if (!is_kdump_kernel() || is_fadump_active()) {
644 		/* Clear the table in case firmware left allocations in it */
645 		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
646 		return;
647 	}
648 
649 #ifdef CONFIG_CRASH_DUMP
650 	if (tbl->it_ops->get) {
651 		unsigned long index, tceval, tcecount = 0;
652 
653 		/* Reserve the existing mappings left by the first kernel. */
654 		for (index = 0; index < tbl->it_size; index++) {
655 			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
656 			/*
657 			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
658 			 */
659 			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
660 				__set_bit(index, tbl->it_map);
661 				tcecount++;
662 			}
663 		}
664 
665 		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
666 			printk(KERN_WARNING "TCE table is full; freeing ");
667 			printk(KERN_WARNING "%d entries for the kdump boot\n",
668 				KDUMP_MIN_TCE_ENTRIES);
669 			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
670 				index < tbl->it_size; index++)
671 				__clear_bit(index, tbl->it_map);
672 		}
673 	}
674 #endif
675 }
676 
677 static void iommu_table_reserve_pages(struct iommu_table *tbl,
678 		unsigned long res_start, unsigned long res_end)
679 {
680 	int i;
681 
682 	WARN_ON_ONCE(res_end < res_start);
683 	/*
684 	 * Reserve page 0 so it will not be used for any mappings.
685 	 * This avoids buggy drivers that consider page 0 to be invalid
686 	 * to crash the machine or even lose data.
687 	 */
688 	if (tbl->it_offset == 0)
689 		set_bit(0, tbl->it_map);
690 
691 	if (res_start < tbl->it_offset)
692 		res_start = tbl->it_offset;
693 
694 	if (res_end > (tbl->it_offset + tbl->it_size))
695 		res_end = tbl->it_offset + tbl->it_size;
696 
697 	/* Check if res_start..res_end is a valid range in the table */
698 	if (res_start >= res_end) {
699 		tbl->it_reserved_start = tbl->it_offset;
700 		tbl->it_reserved_end = tbl->it_offset;
701 		return;
702 	}
703 
704 	tbl->it_reserved_start = res_start;
705 	tbl->it_reserved_end = res_end;
706 
707 	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
708 		set_bit(i - tbl->it_offset, tbl->it_map);
709 }
710 
711 /*
712  * Build a iommu_table structure.  This contains a bit map which
713  * is used to manage allocation of the tce space.
714  */
715 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
716 		unsigned long res_start, unsigned long res_end)
717 {
718 	unsigned long sz;
719 	static int welcomed = 0;
720 	unsigned int i;
721 	struct iommu_pool *p;
722 
723 	BUG_ON(!tbl->it_ops);
724 
725 	/* number of bytes needed for the bitmap */
726 	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
727 
728 	tbl->it_map = vzalloc_node(sz, nid);
729 	if (!tbl->it_map) {
730 		pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
731 		return NULL;
732 	}
733 
734 	iommu_table_reserve_pages(tbl, res_start, res_end);
735 
736 	/* We only split the IOMMU table if we have 1GB or more of space */
737 	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
738 		tbl->nr_pools = IOMMU_NR_POOLS;
739 	else
740 		tbl->nr_pools = 1;
741 
742 	/* We reserve the top 1/4 of the table for large allocations */
743 	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
744 
745 	for (i = 0; i < tbl->nr_pools; i++) {
746 		p = &tbl->pools[i];
747 		spin_lock_init(&(p->lock));
748 		p->start = tbl->poolsize * i;
749 		p->hint = p->start;
750 		p->end = p->start + tbl->poolsize;
751 	}
752 
753 	p = &tbl->large_pool;
754 	spin_lock_init(&(p->lock));
755 	p->start = tbl->poolsize * i;
756 	p->hint = p->start;
757 	p->end = tbl->it_size;
758 
759 	iommu_table_clear(tbl);
760 
761 	if (!welcomed) {
762 		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
763 		       novmerge ? "disabled" : "enabled");
764 		welcomed = 1;
765 	}
766 
767 	iommu_debugfs_add(tbl);
768 
769 	return tbl;
770 }
771 
772 bool iommu_table_in_use(struct iommu_table *tbl)
773 {
774 	unsigned long start = 0, end;
775 
776 	/* ignore reserved bit0 */
777 	if (tbl->it_offset == 0)
778 		start = 1;
779 	end = tbl->it_reserved_start - tbl->it_offset;
780 	if (find_next_bit(tbl->it_map, end, start) != end)
781 		return true;
782 
783 	start = tbl->it_reserved_end - tbl->it_offset;
784 	end = tbl->it_size;
785 	return find_next_bit(tbl->it_map, end, start) != end;
786 }
787 
788 static void iommu_table_free(struct kref *kref)
789 {
790 	struct iommu_table *tbl;
791 
792 	tbl = container_of(kref, struct iommu_table, it_kref);
793 
794 	if (tbl->it_ops->free)
795 		tbl->it_ops->free(tbl);
796 
797 	if (!tbl->it_map) {
798 		kfree(tbl);
799 		return;
800 	}
801 
802 	iommu_debugfs_del(tbl);
803 
804 	/* verify that table contains no entries */
805 	if (iommu_table_in_use(tbl))
806 		pr_warn("%s: Unexpected TCEs\n", __func__);
807 
808 	/* free bitmap */
809 	vfree(tbl->it_map);
810 
811 	/* free table */
812 	kfree(tbl);
813 }
814 
815 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
816 {
817 	if (kref_get_unless_zero(&tbl->it_kref))
818 		return tbl;
819 
820 	return NULL;
821 }
822 EXPORT_SYMBOL_GPL(iommu_tce_table_get);
823 
824 int iommu_tce_table_put(struct iommu_table *tbl)
825 {
826 	if (WARN_ON(!tbl))
827 		return 0;
828 
829 	return kref_put(&tbl->it_kref, iommu_table_free);
830 }
831 EXPORT_SYMBOL_GPL(iommu_tce_table_put);
832 
833 /* Creates TCEs for a user provided buffer.  The user buffer must be
834  * contiguous real kernel storage (not vmalloc).  The address passed here
835  * comprises a page address and offset into that page. The dma_addr_t
836  * returned will point to the same byte within the page as was passed in.
837  */
838 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
839 			  struct page *page, unsigned long offset, size_t size,
840 			  unsigned long mask, enum dma_data_direction direction,
841 			  unsigned long attrs)
842 {
843 	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
844 	void *vaddr;
845 	unsigned long uaddr;
846 	unsigned int npages, align;
847 
848 	BUG_ON(direction == DMA_NONE);
849 
850 	vaddr = page_address(page) + offset;
851 	uaddr = (unsigned long)vaddr;
852 
853 	if (tbl) {
854 		npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
855 		align = 0;
856 		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
857 		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
858 			align = PAGE_SHIFT - tbl->it_page_shift;
859 
860 		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
861 					 mask >> tbl->it_page_shift, align,
862 					 attrs);
863 		if (dma_handle == DMA_MAPPING_ERROR) {
864 			if (!(attrs & DMA_ATTR_NO_WARN) &&
865 			    printk_ratelimit())  {
866 				dev_info(dev, "iommu_alloc failed, tbl %p "
867 					 "vaddr %p npages %d\n", tbl, vaddr,
868 					 npages);
869 			}
870 		} else
871 			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
872 	}
873 
874 	return dma_handle;
875 }
876 
877 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
878 		      size_t size, enum dma_data_direction direction,
879 		      unsigned long attrs)
880 {
881 	unsigned int npages;
882 
883 	BUG_ON(direction == DMA_NONE);
884 
885 	if (tbl) {
886 		npages = iommu_num_pages(dma_handle, size,
887 					 IOMMU_PAGE_SIZE(tbl));
888 		iommu_free(tbl, dma_handle, npages);
889 	}
890 }
891 
892 /* Allocates a contiguous real buffer and creates mappings over it.
893  * Returns the virtual address of the buffer and sets dma_handle
894  * to the dma address (mapping) of the first page.
895  */
896 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
897 			   size_t size,	dma_addr_t *dma_handle,
898 			   unsigned long mask, gfp_t flag, int node)
899 {
900 	void *ret = NULL;
901 	dma_addr_t mapping;
902 	unsigned int order;
903 	unsigned int nio_pages, io_order;
904 	struct page *page;
905 
906 	size = PAGE_ALIGN(size);
907 	order = get_order(size);
908 
909  	/*
910 	 * Client asked for way too much space.  This is checked later
911 	 * anyway.  It is easier to debug here for the drivers than in
912 	 * the tce tables.
913 	 */
914 	if (order >= IOMAP_MAX_ORDER) {
915 		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
916 			 size);
917 		return NULL;
918 	}
919 
920 	if (!tbl)
921 		return NULL;
922 
923 	/* Alloc enough pages (and possibly more) */
924 	page = alloc_pages_node(node, flag, order);
925 	if (!page)
926 		return NULL;
927 	ret = page_address(page);
928 	memset(ret, 0, size);
929 
930 	/* Set up tces to cover the allocated range */
931 	nio_pages = size >> tbl->it_page_shift;
932 	io_order = get_iommu_order(size, tbl);
933 	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
934 			      mask >> tbl->it_page_shift, io_order, 0);
935 	if (mapping == DMA_MAPPING_ERROR) {
936 		free_pages((unsigned long)ret, order);
937 		return NULL;
938 	}
939 	*dma_handle = mapping;
940 	return ret;
941 }
942 
943 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
944 			 void *vaddr, dma_addr_t dma_handle)
945 {
946 	if (tbl) {
947 		unsigned int nio_pages;
948 
949 		size = PAGE_ALIGN(size);
950 		nio_pages = size >> tbl->it_page_shift;
951 		iommu_free(tbl, dma_handle, nio_pages);
952 		size = PAGE_ALIGN(size);
953 		free_pages((unsigned long)vaddr, get_order(size));
954 	}
955 }
956 
957 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
958 {
959 	switch (dir) {
960 	case DMA_BIDIRECTIONAL:
961 		return TCE_PCI_READ | TCE_PCI_WRITE;
962 	case DMA_FROM_DEVICE:
963 		return TCE_PCI_WRITE;
964 	case DMA_TO_DEVICE:
965 		return TCE_PCI_READ;
966 	default:
967 		return 0;
968 	}
969 }
970 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
971 
972 #ifdef CONFIG_IOMMU_API
973 /*
974  * SPAPR TCE API
975  */
976 static void group_release(void *iommu_data)
977 {
978 	struct iommu_table_group *table_group = iommu_data;
979 
980 	table_group->group = NULL;
981 }
982 
983 void iommu_register_group(struct iommu_table_group *table_group,
984 		int pci_domain_number, unsigned long pe_num)
985 {
986 	struct iommu_group *grp;
987 	char *name;
988 
989 	grp = iommu_group_alloc();
990 	if (IS_ERR(grp)) {
991 		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
992 				PTR_ERR(grp));
993 		return;
994 	}
995 	table_group->group = grp;
996 	iommu_group_set_iommudata(grp, table_group, group_release);
997 	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
998 			pci_domain_number, pe_num);
999 	if (!name)
1000 		return;
1001 	iommu_group_set_name(grp, name);
1002 	kfree(name);
1003 }
1004 
1005 enum dma_data_direction iommu_tce_direction(unsigned long tce)
1006 {
1007 	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1008 		return DMA_BIDIRECTIONAL;
1009 	else if (tce & TCE_PCI_READ)
1010 		return DMA_TO_DEVICE;
1011 	else if (tce & TCE_PCI_WRITE)
1012 		return DMA_FROM_DEVICE;
1013 	else
1014 		return DMA_NONE;
1015 }
1016 EXPORT_SYMBOL_GPL(iommu_tce_direction);
1017 
1018 void iommu_flush_tce(struct iommu_table *tbl)
1019 {
1020 	/* Flush/invalidate TLB caches if necessary */
1021 	if (tbl->it_ops->flush)
1022 		tbl->it_ops->flush(tbl);
1023 
1024 	/* Make sure updates are seen by hardware */
1025 	mb();
1026 }
1027 EXPORT_SYMBOL_GPL(iommu_flush_tce);
1028 
1029 int iommu_tce_check_ioba(unsigned long page_shift,
1030 		unsigned long offset, unsigned long size,
1031 		unsigned long ioba, unsigned long npages)
1032 {
1033 	unsigned long mask = (1UL << page_shift) - 1;
1034 
1035 	if (ioba & mask)
1036 		return -EINVAL;
1037 
1038 	ioba >>= page_shift;
1039 	if (ioba < offset)
1040 		return -EINVAL;
1041 
1042 	if ((ioba + 1) > (offset + size))
1043 		return -EINVAL;
1044 
1045 	return 0;
1046 }
1047 EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1048 
1049 int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1050 {
1051 	unsigned long mask = (1UL << page_shift) - 1;
1052 
1053 	if (gpa & mask)
1054 		return -EINVAL;
1055 
1056 	return 0;
1057 }
1058 EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1059 
1060 extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1061 		struct iommu_table *tbl,
1062 		unsigned long entry, unsigned long *hpa,
1063 		enum dma_data_direction *direction)
1064 {
1065 	long ret;
1066 	unsigned long size = 0;
1067 
1068 	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
1069 	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1070 			(*direction == DMA_BIDIRECTIONAL)) &&
1071 			!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1072 					&size))
1073 		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1074 
1075 	return ret;
1076 }
1077 EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1078 
1079 void iommu_tce_kill(struct iommu_table *tbl,
1080 		unsigned long entry, unsigned long pages)
1081 {
1082 	if (tbl->it_ops->tce_kill)
1083 		tbl->it_ops->tce_kill(tbl, entry, pages, false);
1084 }
1085 EXPORT_SYMBOL_GPL(iommu_tce_kill);
1086 
1087 int iommu_take_ownership(struct iommu_table *tbl)
1088 {
1089 	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1090 	int ret = 0;
1091 
1092 	/*
1093 	 * VFIO does not control TCE entries allocation and the guest
1094 	 * can write new TCEs on top of existing ones so iommu_tce_build()
1095 	 * must be able to release old pages. This functionality
1096 	 * requires exchange() callback defined so if it is not
1097 	 * implemented, we disallow taking ownership over the table.
1098 	 */
1099 	if (!tbl->it_ops->xchg_no_kill)
1100 		return -EINVAL;
1101 
1102 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1103 	for (i = 0; i < tbl->nr_pools; i++)
1104 		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1105 
1106 	if (iommu_table_in_use(tbl)) {
1107 		pr_err("iommu_tce: it_map is not empty");
1108 		ret = -EBUSY;
1109 	} else {
1110 		memset(tbl->it_map, 0xff, sz);
1111 	}
1112 
1113 	for (i = 0; i < tbl->nr_pools; i++)
1114 		spin_unlock(&tbl->pools[i].lock);
1115 	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1116 
1117 	return ret;
1118 }
1119 EXPORT_SYMBOL_GPL(iommu_take_ownership);
1120 
1121 void iommu_release_ownership(struct iommu_table *tbl)
1122 {
1123 	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1124 
1125 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1126 	for (i = 0; i < tbl->nr_pools; i++)
1127 		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1128 
1129 	memset(tbl->it_map, 0, sz);
1130 
1131 	iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1132 			tbl->it_reserved_end);
1133 
1134 	for (i = 0; i < tbl->nr_pools; i++)
1135 		spin_unlock(&tbl->pools[i].lock);
1136 	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1137 }
1138 EXPORT_SYMBOL_GPL(iommu_release_ownership);
1139 
1140 int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1141 {
1142 	/*
1143 	 * The sysfs entries should be populated before
1144 	 * binding IOMMU group. If sysfs entries isn't
1145 	 * ready, we simply bail.
1146 	 */
1147 	if (!device_is_registered(dev))
1148 		return -ENOENT;
1149 
1150 	if (device_iommu_mapped(dev)) {
1151 		pr_debug("%s: Skipping device %s with iommu group %d\n",
1152 			 __func__, dev_name(dev),
1153 			 iommu_group_id(dev->iommu_group));
1154 		return -EBUSY;
1155 	}
1156 
1157 	pr_debug("%s: Adding %s to iommu group %d\n",
1158 		 __func__, dev_name(dev),  iommu_group_id(table_group->group));
1159 
1160 	return iommu_group_add_device(table_group->group, dev);
1161 }
1162 EXPORT_SYMBOL_GPL(iommu_add_device);
1163 
1164 void iommu_del_device(struct device *dev)
1165 {
1166 	/*
1167 	 * Some devices might not have IOMMU table and group
1168 	 * and we needn't detach them from the associated
1169 	 * IOMMU groups
1170 	 */
1171 	if (!device_iommu_mapped(dev)) {
1172 		pr_debug("iommu_tce: skipping device %s with no tbl\n",
1173 			 dev_name(dev));
1174 		return;
1175 	}
1176 
1177 	iommu_group_remove_device(dev);
1178 }
1179 EXPORT_SYMBOL_GPL(iommu_del_device);
1180 #endif /* CONFIG_IOMMU_API */
1181