xref: /linux/arch/powerpc/kernel/iommu.c (revision c42813b71a06a2ff4a155aa87ac609feeab76cf3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4  *
5  * Rewrite, cleanup, new allocation schemes, virtual merging:
6  * Copyright (C) 2004 Olof Johansson, IBM Corporation
7  *               and  Ben. Herrenschmidt, IBM Corporation
8  *
9  * Dynamic DMA mapping support, bus-independent parts.
10  */
11 
12 
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/mm.h>
17 #include <linux/spinlock.h>
18 #include <linux/string.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/bitmap.h>
21 #include <linux/iommu-helper.h>
22 #include <linux/crash_dump.h>
23 #include <linux/hash.h>
24 #include <linux/fault-inject.h>
25 #include <linux/pci.h>
26 #include <linux/iommu.h>
27 #include <linux/sched.h>
28 #include <linux/debugfs.h>
29 #include <asm/io.h>
30 #include <asm/prom.h>
31 #include <asm/iommu.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/kdump.h>
35 #include <asm/fadump.h>
36 #include <asm/vio.h>
37 #include <asm/tce.h>
38 #include <asm/mmu_context.h>
39 
40 #define DBG(...)
41 
42 #ifdef CONFIG_IOMMU_DEBUGFS
43 static int iommu_debugfs_weight_get(void *data, u64 *val)
44 {
45 	struct iommu_table *tbl = data;
46 	*val = bitmap_weight(tbl->it_map, tbl->it_size);
47 	return 0;
48 }
49 DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
50 
51 static void iommu_debugfs_add(struct iommu_table *tbl)
52 {
53 	char name[10];
54 	struct dentry *liobn_entry;
55 
56 	sprintf(name, "%08lx", tbl->it_index);
57 	liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
58 
59 	debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
60 	debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
61 	debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
62 	debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
63 	debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
64 	debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
65 	debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
66 }
67 
68 static void iommu_debugfs_del(struct iommu_table *tbl)
69 {
70 	char name[10];
71 	struct dentry *liobn_entry;
72 
73 	sprintf(name, "%08lx", tbl->it_index);
74 	liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
75 	debugfs_remove(liobn_entry);
76 }
77 #else
78 static void iommu_debugfs_add(struct iommu_table *tbl){}
79 static void iommu_debugfs_del(struct iommu_table *tbl){}
80 #endif
81 
82 static int novmerge;
83 
84 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
85 
86 static int __init setup_iommu(char *str)
87 {
88 	if (!strcmp(str, "novmerge"))
89 		novmerge = 1;
90 	else if (!strcmp(str, "vmerge"))
91 		novmerge = 0;
92 	return 1;
93 }
94 
95 __setup("iommu=", setup_iommu);
96 
97 static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
98 
99 /*
100  * We precalculate the hash to avoid doing it on every allocation.
101  *
102  * The hash is important to spread CPUs across all the pools. For example,
103  * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
104  * with 4 pools all primary threads would map to the same pool.
105  */
106 static int __init setup_iommu_pool_hash(void)
107 {
108 	unsigned int i;
109 
110 	for_each_possible_cpu(i)
111 		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
112 
113 	return 0;
114 }
115 subsys_initcall(setup_iommu_pool_hash);
116 
117 #ifdef CONFIG_FAIL_IOMMU
118 
119 static DECLARE_FAULT_ATTR(fail_iommu);
120 
121 static int __init setup_fail_iommu(char *str)
122 {
123 	return setup_fault_attr(&fail_iommu, str);
124 }
125 __setup("fail_iommu=", setup_fail_iommu);
126 
127 static bool should_fail_iommu(struct device *dev)
128 {
129 	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
130 }
131 
132 static int __init fail_iommu_debugfs(void)
133 {
134 	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
135 						       NULL, &fail_iommu);
136 
137 	return PTR_ERR_OR_ZERO(dir);
138 }
139 late_initcall(fail_iommu_debugfs);
140 
141 static ssize_t fail_iommu_show(struct device *dev,
142 			       struct device_attribute *attr, char *buf)
143 {
144 	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
145 }
146 
147 static ssize_t fail_iommu_store(struct device *dev,
148 				struct device_attribute *attr, const char *buf,
149 				size_t count)
150 {
151 	int i;
152 
153 	if (count > 0 && sscanf(buf, "%d", &i) > 0)
154 		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
155 
156 	return count;
157 }
158 
159 static DEVICE_ATTR_RW(fail_iommu);
160 
161 static int fail_iommu_bus_notify(struct notifier_block *nb,
162 				 unsigned long action, void *data)
163 {
164 	struct device *dev = data;
165 
166 	if (action == BUS_NOTIFY_ADD_DEVICE) {
167 		if (device_create_file(dev, &dev_attr_fail_iommu))
168 			pr_warn("Unable to create IOMMU fault injection sysfs "
169 				"entries\n");
170 	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
171 		device_remove_file(dev, &dev_attr_fail_iommu);
172 	}
173 
174 	return 0;
175 }
176 
177 static struct notifier_block fail_iommu_bus_notifier = {
178 	.notifier_call = fail_iommu_bus_notify
179 };
180 
181 static int __init fail_iommu_setup(void)
182 {
183 #ifdef CONFIG_PCI
184 	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
185 #endif
186 #ifdef CONFIG_IBMVIO
187 	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
188 #endif
189 
190 	return 0;
191 }
192 /*
193  * Must execute after PCI and VIO subsystem have initialised but before
194  * devices are probed.
195  */
196 arch_initcall(fail_iommu_setup);
197 #else
198 static inline bool should_fail_iommu(struct device *dev)
199 {
200 	return false;
201 }
202 #endif
203 
204 static unsigned long iommu_range_alloc(struct device *dev,
205 				       struct iommu_table *tbl,
206                                        unsigned long npages,
207                                        unsigned long *handle,
208                                        unsigned long mask,
209                                        unsigned int align_order)
210 {
211 	unsigned long n, end, start;
212 	unsigned long limit;
213 	int largealloc = npages > 15;
214 	int pass = 0;
215 	unsigned long align_mask;
216 	unsigned long flags;
217 	unsigned int pool_nr;
218 	struct iommu_pool *pool;
219 
220 	align_mask = (1ull << align_order) - 1;
221 
222 	/* This allocator was derived from x86_64's bit string search */
223 
224 	/* Sanity check */
225 	if (unlikely(npages == 0)) {
226 		if (printk_ratelimit())
227 			WARN_ON(1);
228 		return DMA_MAPPING_ERROR;
229 	}
230 
231 	if (should_fail_iommu(dev))
232 		return DMA_MAPPING_ERROR;
233 
234 	/*
235 	 * We don't need to disable preemption here because any CPU can
236 	 * safely use any IOMMU pool.
237 	 */
238 	pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
239 
240 	if (largealloc)
241 		pool = &(tbl->large_pool);
242 	else
243 		pool = &(tbl->pools[pool_nr]);
244 
245 	spin_lock_irqsave(&(pool->lock), flags);
246 
247 again:
248 	if ((pass == 0) && handle && *handle &&
249 	    (*handle >= pool->start) && (*handle < pool->end))
250 		start = *handle;
251 	else
252 		start = pool->hint;
253 
254 	limit = pool->end;
255 
256 	/* The case below can happen if we have a small segment appended
257 	 * to a large, or when the previous alloc was at the very end of
258 	 * the available space. If so, go back to the initial start.
259 	 */
260 	if (start >= limit)
261 		start = pool->start;
262 
263 	if (limit + tbl->it_offset > mask) {
264 		limit = mask - tbl->it_offset + 1;
265 		/* If we're constrained on address range, first try
266 		 * at the masked hint to avoid O(n) search complexity,
267 		 * but on second pass, start at 0 in pool 0.
268 		 */
269 		if ((start & mask) >= limit || pass > 0) {
270 			spin_unlock(&(pool->lock));
271 			pool = &(tbl->pools[0]);
272 			spin_lock(&(pool->lock));
273 			start = pool->start;
274 		} else {
275 			start &= mask;
276 		}
277 	}
278 
279 	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
280 			dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
281 			align_mask);
282 	if (n == -1) {
283 		if (likely(pass == 0)) {
284 			/* First try the pool from the start */
285 			pool->hint = pool->start;
286 			pass++;
287 			goto again;
288 
289 		} else if (pass <= tbl->nr_pools) {
290 			/* Now try scanning all the other pools */
291 			spin_unlock(&(pool->lock));
292 			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
293 			pool = &tbl->pools[pool_nr];
294 			spin_lock(&(pool->lock));
295 			pool->hint = pool->start;
296 			pass++;
297 			goto again;
298 
299 		} else if (pass == tbl->nr_pools + 1) {
300 			/* Last resort: try largepool */
301 			spin_unlock(&pool->lock);
302 			pool = &tbl->large_pool;
303 			spin_lock(&pool->lock);
304 			pool->hint = pool->start;
305 			pass++;
306 			goto again;
307 
308 		} else {
309 			/* Give up */
310 			spin_unlock_irqrestore(&(pool->lock), flags);
311 			return DMA_MAPPING_ERROR;
312 		}
313 	}
314 
315 	end = n + npages;
316 
317 	/* Bump the hint to a new block for small allocs. */
318 	if (largealloc) {
319 		/* Don't bump to new block to avoid fragmentation */
320 		pool->hint = end;
321 	} else {
322 		/* Overflow will be taken care of at the next allocation */
323 		pool->hint = (end + tbl->it_blocksize - 1) &
324 		                ~(tbl->it_blocksize - 1);
325 	}
326 
327 	/* Update handle for SG allocations */
328 	if (handle)
329 		*handle = end;
330 
331 	spin_unlock_irqrestore(&(pool->lock), flags);
332 
333 	return n;
334 }
335 
336 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
337 			      void *page, unsigned int npages,
338 			      enum dma_data_direction direction,
339 			      unsigned long mask, unsigned int align_order,
340 			      unsigned long attrs)
341 {
342 	unsigned long entry;
343 	dma_addr_t ret = DMA_MAPPING_ERROR;
344 	int build_fail;
345 
346 	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
347 
348 	if (unlikely(entry == DMA_MAPPING_ERROR))
349 		return DMA_MAPPING_ERROR;
350 
351 	entry += tbl->it_offset;	/* Offset into real TCE table */
352 	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
353 
354 	/* Put the TCEs in the HW table */
355 	build_fail = tbl->it_ops->set(tbl, entry, npages,
356 				      (unsigned long)page &
357 				      IOMMU_PAGE_MASK(tbl), direction, attrs);
358 
359 	/* tbl->it_ops->set() only returns non-zero for transient errors.
360 	 * Clean up the table bitmap in this case and return
361 	 * DMA_MAPPING_ERROR. For all other errors the functionality is
362 	 * not altered.
363 	 */
364 	if (unlikely(build_fail)) {
365 		__iommu_free(tbl, ret, npages);
366 		return DMA_MAPPING_ERROR;
367 	}
368 
369 	/* Flush/invalidate TLB caches if necessary */
370 	if (tbl->it_ops->flush)
371 		tbl->it_ops->flush(tbl);
372 
373 	/* Make sure updates are seen by hardware */
374 	mb();
375 
376 	return ret;
377 }
378 
379 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
380 			     unsigned int npages)
381 {
382 	unsigned long entry, free_entry;
383 
384 	entry = dma_addr >> tbl->it_page_shift;
385 	free_entry = entry - tbl->it_offset;
386 
387 	if (((free_entry + npages) > tbl->it_size) ||
388 	    (entry < tbl->it_offset)) {
389 		if (printk_ratelimit()) {
390 			printk(KERN_INFO "iommu_free: invalid entry\n");
391 			printk(KERN_INFO "\tentry     = 0x%lx\n", entry);
392 			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
393 			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
394 			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
395 			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
396 			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
397 			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
398 			WARN_ON(1);
399 		}
400 
401 		return false;
402 	}
403 
404 	return true;
405 }
406 
407 static struct iommu_pool *get_pool(struct iommu_table *tbl,
408 				   unsigned long entry)
409 {
410 	struct iommu_pool *p;
411 	unsigned long largepool_start = tbl->large_pool.start;
412 
413 	/* The large pool is the last pool at the top of the table */
414 	if (entry >= largepool_start) {
415 		p = &tbl->large_pool;
416 	} else {
417 		unsigned int pool_nr = entry / tbl->poolsize;
418 
419 		BUG_ON(pool_nr > tbl->nr_pools);
420 		p = &tbl->pools[pool_nr];
421 	}
422 
423 	return p;
424 }
425 
426 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
427 			 unsigned int npages)
428 {
429 	unsigned long entry, free_entry;
430 	unsigned long flags;
431 	struct iommu_pool *pool;
432 
433 	entry = dma_addr >> tbl->it_page_shift;
434 	free_entry = entry - tbl->it_offset;
435 
436 	pool = get_pool(tbl, free_entry);
437 
438 	if (!iommu_free_check(tbl, dma_addr, npages))
439 		return;
440 
441 	tbl->it_ops->clear(tbl, entry, npages);
442 
443 	spin_lock_irqsave(&(pool->lock), flags);
444 	bitmap_clear(tbl->it_map, free_entry, npages);
445 	spin_unlock_irqrestore(&(pool->lock), flags);
446 }
447 
448 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
449 		unsigned int npages)
450 {
451 	__iommu_free(tbl, dma_addr, npages);
452 
453 	/* Make sure TLB cache is flushed if the HW needs it. We do
454 	 * not do an mb() here on purpose, it is not needed on any of
455 	 * the current platforms.
456 	 */
457 	if (tbl->it_ops->flush)
458 		tbl->it_ops->flush(tbl);
459 }
460 
461 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
462 		     struct scatterlist *sglist, int nelems,
463 		     unsigned long mask, enum dma_data_direction direction,
464 		     unsigned long attrs)
465 {
466 	dma_addr_t dma_next = 0, dma_addr;
467 	struct scatterlist *s, *outs, *segstart;
468 	int outcount, incount, i, build_fail = 0;
469 	unsigned int align;
470 	unsigned long handle;
471 	unsigned int max_seg_size;
472 
473 	BUG_ON(direction == DMA_NONE);
474 
475 	if ((nelems == 0) || !tbl)
476 		return -EINVAL;
477 
478 	outs = s = segstart = &sglist[0];
479 	outcount = 1;
480 	incount = nelems;
481 	handle = 0;
482 
483 	/* Init first segment length for backout at failure */
484 	outs->dma_length = 0;
485 
486 	DBG("sg mapping %d elements:\n", nelems);
487 
488 	max_seg_size = dma_get_max_seg_size(dev);
489 	for_each_sg(sglist, s, nelems, i) {
490 		unsigned long vaddr, npages, entry, slen;
491 
492 		slen = s->length;
493 		/* Sanity check */
494 		if (slen == 0) {
495 			dma_next = 0;
496 			continue;
497 		}
498 		/* Allocate iommu entries for that segment */
499 		vaddr = (unsigned long) sg_virt(s);
500 		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
501 		align = 0;
502 		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
503 		    (vaddr & ~PAGE_MASK) == 0)
504 			align = PAGE_SHIFT - tbl->it_page_shift;
505 		entry = iommu_range_alloc(dev, tbl, npages, &handle,
506 					  mask >> tbl->it_page_shift, align);
507 
508 		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
509 
510 		/* Handle failure */
511 		if (unlikely(entry == DMA_MAPPING_ERROR)) {
512 			if (!(attrs & DMA_ATTR_NO_WARN) &&
513 			    printk_ratelimit())
514 				dev_info(dev, "iommu_alloc failed, tbl %p "
515 					 "vaddr %lx npages %lu\n", tbl, vaddr,
516 					 npages);
517 			goto failure;
518 		}
519 
520 		/* Convert entry to a dma_addr_t */
521 		entry += tbl->it_offset;
522 		dma_addr = entry << tbl->it_page_shift;
523 		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
524 
525 		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
526 			    npages, entry, dma_addr);
527 
528 		/* Insert into HW table */
529 		build_fail = tbl->it_ops->set(tbl, entry, npages,
530 					      vaddr & IOMMU_PAGE_MASK(tbl),
531 					      direction, attrs);
532 		if(unlikely(build_fail))
533 			goto failure;
534 
535 		/* If we are in an open segment, try merging */
536 		if (segstart != s) {
537 			DBG("  - trying merge...\n");
538 			/* We cannot merge if:
539 			 * - allocated dma_addr isn't contiguous to previous allocation
540 			 */
541 			if (novmerge || (dma_addr != dma_next) ||
542 			    (outs->dma_length + s->length > max_seg_size)) {
543 				/* Can't merge: create a new segment */
544 				segstart = s;
545 				outcount++;
546 				outs = sg_next(outs);
547 				DBG("    can't merge, new segment.\n");
548 			} else {
549 				outs->dma_length += s->length;
550 				DBG("    merged, new len: %ux\n", outs->dma_length);
551 			}
552 		}
553 
554 		if (segstart == s) {
555 			/* This is a new segment, fill entries */
556 			DBG("  - filling new segment.\n");
557 			outs->dma_address = dma_addr;
558 			outs->dma_length = slen;
559 		}
560 
561 		/* Calculate next page pointer for contiguous check */
562 		dma_next = dma_addr + slen;
563 
564 		DBG("  - dma next is: %lx\n", dma_next);
565 	}
566 
567 	/* Flush/invalidate TLB caches if necessary */
568 	if (tbl->it_ops->flush)
569 		tbl->it_ops->flush(tbl);
570 
571 	DBG("mapped %d elements:\n", outcount);
572 
573 	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
574 	 * next entry of the sglist if we didn't fill the list completely
575 	 */
576 	if (outcount < incount) {
577 		outs = sg_next(outs);
578 		outs->dma_length = 0;
579 	}
580 
581 	/* Make sure updates are seen by hardware */
582 	mb();
583 
584 	return outcount;
585 
586  failure:
587 	for_each_sg(sglist, s, nelems, i) {
588 		if (s->dma_length != 0) {
589 			unsigned long vaddr, npages;
590 
591 			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
592 			npages = iommu_num_pages(s->dma_address, s->dma_length,
593 						 IOMMU_PAGE_SIZE(tbl));
594 			__iommu_free(tbl, vaddr, npages);
595 			s->dma_length = 0;
596 		}
597 		if (s == outs)
598 			break;
599 	}
600 	return -EIO;
601 }
602 
603 
604 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
605 			int nelems, enum dma_data_direction direction,
606 			unsigned long attrs)
607 {
608 	struct scatterlist *sg;
609 
610 	BUG_ON(direction == DMA_NONE);
611 
612 	if (!tbl)
613 		return;
614 
615 	sg = sglist;
616 	while (nelems--) {
617 		unsigned int npages;
618 		dma_addr_t dma_handle = sg->dma_address;
619 
620 		if (sg->dma_length == 0)
621 			break;
622 		npages = iommu_num_pages(dma_handle, sg->dma_length,
623 					 IOMMU_PAGE_SIZE(tbl));
624 		__iommu_free(tbl, dma_handle, npages);
625 		sg = sg_next(sg);
626 	}
627 
628 	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
629 	 * do not do an mb() here, the affected platforms do not need it
630 	 * when freeing.
631 	 */
632 	if (tbl->it_ops->flush)
633 		tbl->it_ops->flush(tbl);
634 }
635 
636 static void iommu_table_clear(struct iommu_table *tbl)
637 {
638 	/*
639 	 * In case of firmware assisted dump system goes through clean
640 	 * reboot process at the time of system crash. Hence it's safe to
641 	 * clear the TCE entries if firmware assisted dump is active.
642 	 */
643 	if (!is_kdump_kernel() || is_fadump_active()) {
644 		/* Clear the table in case firmware left allocations in it */
645 		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
646 		return;
647 	}
648 
649 #ifdef CONFIG_CRASH_DUMP
650 	if (tbl->it_ops->get) {
651 		unsigned long index, tceval, tcecount = 0;
652 
653 		/* Reserve the existing mappings left by the first kernel. */
654 		for (index = 0; index < tbl->it_size; index++) {
655 			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
656 			/*
657 			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
658 			 */
659 			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
660 				__set_bit(index, tbl->it_map);
661 				tcecount++;
662 			}
663 		}
664 
665 		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
666 			printk(KERN_WARNING "TCE table is full; freeing ");
667 			printk(KERN_WARNING "%d entries for the kdump boot\n",
668 				KDUMP_MIN_TCE_ENTRIES);
669 			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
670 				index < tbl->it_size; index++)
671 				__clear_bit(index, tbl->it_map);
672 		}
673 	}
674 #endif
675 }
676 
677 static void iommu_table_reserve_pages(struct iommu_table *tbl,
678 		unsigned long res_start, unsigned long res_end)
679 {
680 	int i;
681 
682 	WARN_ON_ONCE(res_end < res_start);
683 	/*
684 	 * Reserve page 0 so it will not be used for any mappings.
685 	 * This avoids buggy drivers that consider page 0 to be invalid
686 	 * to crash the machine or even lose data.
687 	 */
688 	if (tbl->it_offset == 0)
689 		set_bit(0, tbl->it_map);
690 
691 	tbl->it_reserved_start = res_start;
692 	tbl->it_reserved_end = res_end;
693 
694 	/* Check if res_start..res_end isn't empty and overlaps the table */
695 	if (res_start && res_end &&
696 			(tbl->it_offset + tbl->it_size < res_start ||
697 			 res_end < tbl->it_offset))
698 		return;
699 
700 	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
701 		set_bit(i - tbl->it_offset, tbl->it_map);
702 }
703 
704 static void iommu_table_release_pages(struct iommu_table *tbl)
705 {
706 	int i;
707 
708 	/*
709 	 * In case we have reserved the first bit, we should not emit
710 	 * the warning below.
711 	 */
712 	if (tbl->it_offset == 0)
713 		clear_bit(0, tbl->it_map);
714 
715 	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
716 		clear_bit(i - tbl->it_offset, tbl->it_map);
717 }
718 
719 /*
720  * Build a iommu_table structure.  This contains a bit map which
721  * is used to manage allocation of the tce space.
722  */
723 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
724 		unsigned long res_start, unsigned long res_end)
725 {
726 	unsigned long sz;
727 	static int welcomed = 0;
728 	unsigned int i;
729 	struct iommu_pool *p;
730 
731 	BUG_ON(!tbl->it_ops);
732 
733 	/* number of bytes needed for the bitmap */
734 	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
735 
736 	tbl->it_map = vzalloc_node(sz, nid);
737 	if (!tbl->it_map) {
738 		pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
739 		return NULL;
740 	}
741 
742 	iommu_table_reserve_pages(tbl, res_start, res_end);
743 
744 	/* We only split the IOMMU table if we have 1GB or more of space */
745 	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
746 		tbl->nr_pools = IOMMU_NR_POOLS;
747 	else
748 		tbl->nr_pools = 1;
749 
750 	/* We reserve the top 1/4 of the table for large allocations */
751 	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
752 
753 	for (i = 0; i < tbl->nr_pools; i++) {
754 		p = &tbl->pools[i];
755 		spin_lock_init(&(p->lock));
756 		p->start = tbl->poolsize * i;
757 		p->hint = p->start;
758 		p->end = p->start + tbl->poolsize;
759 	}
760 
761 	p = &tbl->large_pool;
762 	spin_lock_init(&(p->lock));
763 	p->start = tbl->poolsize * i;
764 	p->hint = p->start;
765 	p->end = tbl->it_size;
766 
767 	iommu_table_clear(tbl);
768 
769 	if (!welcomed) {
770 		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
771 		       novmerge ? "disabled" : "enabled");
772 		welcomed = 1;
773 	}
774 
775 	iommu_debugfs_add(tbl);
776 
777 	return tbl;
778 }
779 
780 static void iommu_table_free(struct kref *kref)
781 {
782 	struct iommu_table *tbl;
783 
784 	tbl = container_of(kref, struct iommu_table, it_kref);
785 
786 	if (tbl->it_ops->free)
787 		tbl->it_ops->free(tbl);
788 
789 	if (!tbl->it_map) {
790 		kfree(tbl);
791 		return;
792 	}
793 
794 	iommu_debugfs_del(tbl);
795 
796 	iommu_table_release_pages(tbl);
797 
798 	/* verify that table contains no entries */
799 	if (!bitmap_empty(tbl->it_map, tbl->it_size))
800 		pr_warn("%s: Unexpected TCEs\n", __func__);
801 
802 	/* free bitmap */
803 	vfree(tbl->it_map);
804 
805 	/* free table */
806 	kfree(tbl);
807 }
808 
809 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
810 {
811 	if (kref_get_unless_zero(&tbl->it_kref))
812 		return tbl;
813 
814 	return NULL;
815 }
816 EXPORT_SYMBOL_GPL(iommu_tce_table_get);
817 
818 int iommu_tce_table_put(struct iommu_table *tbl)
819 {
820 	if (WARN_ON(!tbl))
821 		return 0;
822 
823 	return kref_put(&tbl->it_kref, iommu_table_free);
824 }
825 EXPORT_SYMBOL_GPL(iommu_tce_table_put);
826 
827 /* Creates TCEs for a user provided buffer.  The user buffer must be
828  * contiguous real kernel storage (not vmalloc).  The address passed here
829  * comprises a page address and offset into that page. The dma_addr_t
830  * returned will point to the same byte within the page as was passed in.
831  */
832 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
833 			  struct page *page, unsigned long offset, size_t size,
834 			  unsigned long mask, enum dma_data_direction direction,
835 			  unsigned long attrs)
836 {
837 	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
838 	void *vaddr;
839 	unsigned long uaddr;
840 	unsigned int npages, align;
841 
842 	BUG_ON(direction == DMA_NONE);
843 
844 	vaddr = page_address(page) + offset;
845 	uaddr = (unsigned long)vaddr;
846 
847 	if (tbl) {
848 		npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
849 		align = 0;
850 		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
851 		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
852 			align = PAGE_SHIFT - tbl->it_page_shift;
853 
854 		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
855 					 mask >> tbl->it_page_shift, align,
856 					 attrs);
857 		if (dma_handle == DMA_MAPPING_ERROR) {
858 			if (!(attrs & DMA_ATTR_NO_WARN) &&
859 			    printk_ratelimit())  {
860 				dev_info(dev, "iommu_alloc failed, tbl %p "
861 					 "vaddr %p npages %d\n", tbl, vaddr,
862 					 npages);
863 			}
864 		} else
865 			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
866 	}
867 
868 	return dma_handle;
869 }
870 
871 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
872 		      size_t size, enum dma_data_direction direction,
873 		      unsigned long attrs)
874 {
875 	unsigned int npages;
876 
877 	BUG_ON(direction == DMA_NONE);
878 
879 	if (tbl) {
880 		npages = iommu_num_pages(dma_handle, size,
881 					 IOMMU_PAGE_SIZE(tbl));
882 		iommu_free(tbl, dma_handle, npages);
883 	}
884 }
885 
886 /* Allocates a contiguous real buffer and creates mappings over it.
887  * Returns the virtual address of the buffer and sets dma_handle
888  * to the dma address (mapping) of the first page.
889  */
890 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
891 			   size_t size,	dma_addr_t *dma_handle,
892 			   unsigned long mask, gfp_t flag, int node)
893 {
894 	void *ret = NULL;
895 	dma_addr_t mapping;
896 	unsigned int order;
897 	unsigned int nio_pages, io_order;
898 	struct page *page;
899 
900 	size = PAGE_ALIGN(size);
901 	order = get_order(size);
902 
903  	/*
904 	 * Client asked for way too much space.  This is checked later
905 	 * anyway.  It is easier to debug here for the drivers than in
906 	 * the tce tables.
907 	 */
908 	if (order >= IOMAP_MAX_ORDER) {
909 		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
910 			 size);
911 		return NULL;
912 	}
913 
914 	if (!tbl)
915 		return NULL;
916 
917 	/* Alloc enough pages (and possibly more) */
918 	page = alloc_pages_node(node, flag, order);
919 	if (!page)
920 		return NULL;
921 	ret = page_address(page);
922 	memset(ret, 0, size);
923 
924 	/* Set up tces to cover the allocated range */
925 	nio_pages = size >> tbl->it_page_shift;
926 	io_order = get_iommu_order(size, tbl);
927 	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
928 			      mask >> tbl->it_page_shift, io_order, 0);
929 	if (mapping == DMA_MAPPING_ERROR) {
930 		free_pages((unsigned long)ret, order);
931 		return NULL;
932 	}
933 	*dma_handle = mapping;
934 	return ret;
935 }
936 
937 void iommu_free_coherent(struct iommu_table *tbl, size_t size,
938 			 void *vaddr, dma_addr_t dma_handle)
939 {
940 	if (tbl) {
941 		unsigned int nio_pages;
942 
943 		size = PAGE_ALIGN(size);
944 		nio_pages = size >> tbl->it_page_shift;
945 		iommu_free(tbl, dma_handle, nio_pages);
946 		size = PAGE_ALIGN(size);
947 		free_pages((unsigned long)vaddr, get_order(size));
948 	}
949 }
950 
951 unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
952 {
953 	switch (dir) {
954 	case DMA_BIDIRECTIONAL:
955 		return TCE_PCI_READ | TCE_PCI_WRITE;
956 	case DMA_FROM_DEVICE:
957 		return TCE_PCI_WRITE;
958 	case DMA_TO_DEVICE:
959 		return TCE_PCI_READ;
960 	default:
961 		return 0;
962 	}
963 }
964 EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
965 
966 #ifdef CONFIG_IOMMU_API
967 /*
968  * SPAPR TCE API
969  */
970 static void group_release(void *iommu_data)
971 {
972 	struct iommu_table_group *table_group = iommu_data;
973 
974 	table_group->group = NULL;
975 }
976 
977 void iommu_register_group(struct iommu_table_group *table_group,
978 		int pci_domain_number, unsigned long pe_num)
979 {
980 	struct iommu_group *grp;
981 	char *name;
982 
983 	grp = iommu_group_alloc();
984 	if (IS_ERR(grp)) {
985 		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
986 				PTR_ERR(grp));
987 		return;
988 	}
989 	table_group->group = grp;
990 	iommu_group_set_iommudata(grp, table_group, group_release);
991 	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
992 			pci_domain_number, pe_num);
993 	if (!name)
994 		return;
995 	iommu_group_set_name(grp, name);
996 	kfree(name);
997 }
998 
999 enum dma_data_direction iommu_tce_direction(unsigned long tce)
1000 {
1001 	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1002 		return DMA_BIDIRECTIONAL;
1003 	else if (tce & TCE_PCI_READ)
1004 		return DMA_TO_DEVICE;
1005 	else if (tce & TCE_PCI_WRITE)
1006 		return DMA_FROM_DEVICE;
1007 	else
1008 		return DMA_NONE;
1009 }
1010 EXPORT_SYMBOL_GPL(iommu_tce_direction);
1011 
1012 void iommu_flush_tce(struct iommu_table *tbl)
1013 {
1014 	/* Flush/invalidate TLB caches if necessary */
1015 	if (tbl->it_ops->flush)
1016 		tbl->it_ops->flush(tbl);
1017 
1018 	/* Make sure updates are seen by hardware */
1019 	mb();
1020 }
1021 EXPORT_SYMBOL_GPL(iommu_flush_tce);
1022 
1023 int iommu_tce_check_ioba(unsigned long page_shift,
1024 		unsigned long offset, unsigned long size,
1025 		unsigned long ioba, unsigned long npages)
1026 {
1027 	unsigned long mask = (1UL << page_shift) - 1;
1028 
1029 	if (ioba & mask)
1030 		return -EINVAL;
1031 
1032 	ioba >>= page_shift;
1033 	if (ioba < offset)
1034 		return -EINVAL;
1035 
1036 	if ((ioba + 1) > (offset + size))
1037 		return -EINVAL;
1038 
1039 	return 0;
1040 }
1041 EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1042 
1043 int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1044 {
1045 	unsigned long mask = (1UL << page_shift) - 1;
1046 
1047 	if (gpa & mask)
1048 		return -EINVAL;
1049 
1050 	return 0;
1051 }
1052 EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1053 
1054 extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1055 		struct iommu_table *tbl,
1056 		unsigned long entry, unsigned long *hpa,
1057 		enum dma_data_direction *direction)
1058 {
1059 	long ret;
1060 	unsigned long size = 0;
1061 
1062 	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
1063 	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1064 			(*direction == DMA_BIDIRECTIONAL)) &&
1065 			!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1066 					&size))
1067 		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1068 
1069 	return ret;
1070 }
1071 EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1072 
1073 void iommu_tce_kill(struct iommu_table *tbl,
1074 		unsigned long entry, unsigned long pages)
1075 {
1076 	if (tbl->it_ops->tce_kill)
1077 		tbl->it_ops->tce_kill(tbl, entry, pages, false);
1078 }
1079 EXPORT_SYMBOL_GPL(iommu_tce_kill);
1080 
1081 int iommu_take_ownership(struct iommu_table *tbl)
1082 {
1083 	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1084 	int ret = 0;
1085 
1086 	/*
1087 	 * VFIO does not control TCE entries allocation and the guest
1088 	 * can write new TCEs on top of existing ones so iommu_tce_build()
1089 	 * must be able to release old pages. This functionality
1090 	 * requires exchange() callback defined so if it is not
1091 	 * implemented, we disallow taking ownership over the table.
1092 	 */
1093 	if (!tbl->it_ops->xchg_no_kill)
1094 		return -EINVAL;
1095 
1096 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1097 	for (i = 0; i < tbl->nr_pools; i++)
1098 		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1099 
1100 	iommu_table_release_pages(tbl);
1101 
1102 	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1103 		pr_err("iommu_tce: it_map is not empty");
1104 		ret = -EBUSY;
1105 		/* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
1106 		iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1107 				tbl->it_reserved_end);
1108 	} else {
1109 		memset(tbl->it_map, 0xff, sz);
1110 	}
1111 
1112 	for (i = 0; i < tbl->nr_pools; i++)
1113 		spin_unlock(&tbl->pools[i].lock);
1114 	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1115 
1116 	return ret;
1117 }
1118 EXPORT_SYMBOL_GPL(iommu_take_ownership);
1119 
1120 void iommu_release_ownership(struct iommu_table *tbl)
1121 {
1122 	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1123 
1124 	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1125 	for (i = 0; i < tbl->nr_pools; i++)
1126 		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1127 
1128 	memset(tbl->it_map, 0, sz);
1129 
1130 	iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1131 			tbl->it_reserved_end);
1132 
1133 	for (i = 0; i < tbl->nr_pools; i++)
1134 		spin_unlock(&tbl->pools[i].lock);
1135 	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1136 }
1137 EXPORT_SYMBOL_GPL(iommu_release_ownership);
1138 
1139 int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1140 {
1141 	/*
1142 	 * The sysfs entries should be populated before
1143 	 * binding IOMMU group. If sysfs entries isn't
1144 	 * ready, we simply bail.
1145 	 */
1146 	if (!device_is_registered(dev))
1147 		return -ENOENT;
1148 
1149 	if (device_iommu_mapped(dev)) {
1150 		pr_debug("%s: Skipping device %s with iommu group %d\n",
1151 			 __func__, dev_name(dev),
1152 			 iommu_group_id(dev->iommu_group));
1153 		return -EBUSY;
1154 	}
1155 
1156 	pr_debug("%s: Adding %s to iommu group %d\n",
1157 		 __func__, dev_name(dev),  iommu_group_id(table_group->group));
1158 
1159 	return iommu_group_add_device(table_group->group, dev);
1160 }
1161 EXPORT_SYMBOL_GPL(iommu_add_device);
1162 
1163 void iommu_del_device(struct device *dev)
1164 {
1165 	/*
1166 	 * Some devices might not have IOMMU table and group
1167 	 * and we needn't detach them from the associated
1168 	 * IOMMU groups
1169 	 */
1170 	if (!device_iommu_mapped(dev)) {
1171 		pr_debug("iommu_tce: skipping device %s with no tbl\n",
1172 			 dev_name(dev));
1173 		return;
1174 	}
1175 
1176 	iommu_group_remove_device(dev);
1177 }
1178 EXPORT_SYMBOL_GPL(iommu_del_device);
1179 #endif /* CONFIG_IOMMU_API */
1180