xref: /linux/drivers/iommu/dma-iommu.c (revision 726e2d0cf2bbc14e3bf38491cddda1a56fe18663)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * A fairly generic DMA-API to IOMMU-API glue layer.
4  *
5  * Copyright (C) 2014-2015 ARM Ltd.
6  *
7  * based in part on arch/arm/mm/dma-mapping.c:
8  * Copyright (C) 2000-2004 Russell King
9  */
10 
11 #include <linux/acpi_iort.h>
12 #include <linux/atomic.h>
13 #include <linux/crash_dump.h>
14 #include <linux/device.h>
15 #include <linux/dma-direct.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/gfp.h>
18 #include <linux/huge_mm.h>
19 #include <linux/iommu.h>
20 #include <linux/iommu-dma.h>
21 #include <linux/iova.h>
22 #include <linux/irq.h>
23 #include <linux/list_sort.h>
24 #include <linux/memremap.h>
25 #include <linux/mm.h>
26 #include <linux/mutex.h>
27 #include <linux/of_iommu.h>
28 #include <linux/pci.h>
29 #include <linux/scatterlist.h>
30 #include <linux/spinlock.h>
31 #include <linux/swiotlb.h>
32 #include <linux/vmalloc.h>
33 #include <trace/events/swiotlb.h>
34 
35 #include "dma-iommu.h"
36 #include "iommu-pages.h"
37 
38 struct iommu_dma_msi_page {
39 	struct list_head	list;
40 	dma_addr_t		iova;
41 	phys_addr_t		phys;
42 };
43 
44 enum iommu_dma_cookie_type {
45 	IOMMU_DMA_IOVA_COOKIE,
46 	IOMMU_DMA_MSI_COOKIE,
47 };
48 
49 enum iommu_dma_queue_type {
50 	IOMMU_DMA_OPTS_PER_CPU_QUEUE,
51 	IOMMU_DMA_OPTS_SINGLE_QUEUE,
52 };
53 
54 struct iommu_dma_options {
55 	enum iommu_dma_queue_type qt;
56 	size_t		fq_size;
57 	unsigned int	fq_timeout;
58 };
59 
60 struct iommu_dma_cookie {
61 	enum iommu_dma_cookie_type	type;
62 	union {
63 		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
64 		struct {
65 			struct iova_domain	iovad;
66 			/* Flush queue */
67 			union {
68 				struct iova_fq	*single_fq;
69 				struct iova_fq	__percpu *percpu_fq;
70 			};
71 			/* Number of TLB flushes that have been started */
72 			atomic64_t		fq_flush_start_cnt;
73 			/* Number of TLB flushes that have been finished */
74 			atomic64_t		fq_flush_finish_cnt;
75 			/* Timer to regularily empty the flush queues */
76 			struct timer_list	fq_timer;
77 			/* 1 when timer is active, 0 when not */
78 			atomic_t		fq_timer_on;
79 		};
80 		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
81 		dma_addr_t		msi_iova;
82 	};
83 	struct list_head		msi_page_list;
84 
85 	/* Domain for flush queue callback; NULL if flush queue not in use */
86 	struct iommu_domain		*fq_domain;
87 	/* Options for dma-iommu use */
88 	struct iommu_dma_options	options;
89 	struct mutex			mutex;
90 };
91 
92 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
93 bool iommu_dma_forcedac __read_mostly;
94 
95 static int __init iommu_dma_forcedac_setup(char *str)
96 {
97 	int ret = kstrtobool(str, &iommu_dma_forcedac);
98 
99 	if (!ret && iommu_dma_forcedac)
100 		pr_info("Forcing DAC for PCI devices\n");
101 	return ret;
102 }
103 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
104 
105 /* Number of entries per flush queue */
106 #define IOVA_DEFAULT_FQ_SIZE	256
107 #define IOVA_SINGLE_FQ_SIZE	32768
108 
109 /* Timeout (in ms) after which entries are flushed from the queue */
110 #define IOVA_DEFAULT_FQ_TIMEOUT	10
111 #define IOVA_SINGLE_FQ_TIMEOUT	1000
112 
113 /* Flush queue entry for deferred flushing */
114 struct iova_fq_entry {
115 	unsigned long iova_pfn;
116 	unsigned long pages;
117 	struct list_head freelist;
118 	u64 counter; /* Flush counter when this entry was added */
119 };
120 
121 /* Per-CPU flush queue structure */
122 struct iova_fq {
123 	spinlock_t lock;
124 	unsigned int head, tail;
125 	unsigned int mod_mask;
126 	struct iova_fq_entry entries[];
127 };
128 
129 #define fq_ring_for_each(i, fq) \
130 	for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
131 
132 static inline bool fq_full(struct iova_fq *fq)
133 {
134 	assert_spin_locked(&fq->lock);
135 	return (((fq->tail + 1) & fq->mod_mask) == fq->head);
136 }
137 
138 static inline unsigned int fq_ring_add(struct iova_fq *fq)
139 {
140 	unsigned int idx = fq->tail;
141 
142 	assert_spin_locked(&fq->lock);
143 
144 	fq->tail = (idx + 1) & fq->mod_mask;
145 
146 	return idx;
147 }
148 
149 static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
150 {
151 	u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
152 	unsigned int idx;
153 
154 	assert_spin_locked(&fq->lock);
155 
156 	fq_ring_for_each(idx, fq) {
157 
158 		if (fq->entries[idx].counter >= counter)
159 			break;
160 
161 		iommu_put_pages_list(&fq->entries[idx].freelist);
162 		free_iova_fast(&cookie->iovad,
163 			       fq->entries[idx].iova_pfn,
164 			       fq->entries[idx].pages);
165 
166 		fq->head = (fq->head + 1) & fq->mod_mask;
167 	}
168 }
169 
170 static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
171 {
172 	unsigned long flags;
173 
174 	spin_lock_irqsave(&fq->lock, flags);
175 	fq_ring_free_locked(cookie, fq);
176 	spin_unlock_irqrestore(&fq->lock, flags);
177 }
178 
179 static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
180 {
181 	atomic64_inc(&cookie->fq_flush_start_cnt);
182 	cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
183 	atomic64_inc(&cookie->fq_flush_finish_cnt);
184 }
185 
186 static void fq_flush_timeout(struct timer_list *t)
187 {
188 	struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
189 	int cpu;
190 
191 	atomic_set(&cookie->fq_timer_on, 0);
192 	fq_flush_iotlb(cookie);
193 
194 	if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) {
195 		fq_ring_free(cookie, cookie->single_fq);
196 	} else {
197 		for_each_possible_cpu(cpu)
198 			fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu));
199 	}
200 }
201 
202 static void queue_iova(struct iommu_dma_cookie *cookie,
203 		unsigned long pfn, unsigned long pages,
204 		struct list_head *freelist)
205 {
206 	struct iova_fq *fq;
207 	unsigned long flags;
208 	unsigned int idx;
209 
210 	/*
211 	 * Order against the IOMMU driver's pagetable update from unmapping
212 	 * @pte, to guarantee that fq_flush_iotlb() observes that if called
213 	 * from a different CPU before we release the lock below. Full barrier
214 	 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
215 	 * written fq state here.
216 	 */
217 	smp_mb();
218 
219 	if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
220 		fq = cookie->single_fq;
221 	else
222 		fq = raw_cpu_ptr(cookie->percpu_fq);
223 
224 	spin_lock_irqsave(&fq->lock, flags);
225 
226 	/*
227 	 * First remove all entries from the flush queue that have already been
228 	 * flushed out on another CPU. This makes the fq_full() check below less
229 	 * likely to be true.
230 	 */
231 	fq_ring_free_locked(cookie, fq);
232 
233 	if (fq_full(fq)) {
234 		fq_flush_iotlb(cookie);
235 		fq_ring_free_locked(cookie, fq);
236 	}
237 
238 	idx = fq_ring_add(fq);
239 
240 	fq->entries[idx].iova_pfn = pfn;
241 	fq->entries[idx].pages    = pages;
242 	fq->entries[idx].counter  = atomic64_read(&cookie->fq_flush_start_cnt);
243 	list_splice(freelist, &fq->entries[idx].freelist);
244 
245 	spin_unlock_irqrestore(&fq->lock, flags);
246 
247 	/* Avoid false sharing as much as possible. */
248 	if (!atomic_read(&cookie->fq_timer_on) &&
249 	    !atomic_xchg(&cookie->fq_timer_on, 1))
250 		mod_timer(&cookie->fq_timer,
251 			  jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
252 }
253 
254 static void iommu_dma_free_fq_single(struct iova_fq *fq)
255 {
256 	int idx;
257 
258 	fq_ring_for_each(idx, fq)
259 		iommu_put_pages_list(&fq->entries[idx].freelist);
260 	vfree(fq);
261 }
262 
263 static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
264 {
265 	int cpu, idx;
266 
267 	/* The IOVAs will be torn down separately, so just free our queued pages */
268 	for_each_possible_cpu(cpu) {
269 		struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
270 
271 		fq_ring_for_each(idx, fq)
272 			iommu_put_pages_list(&fq->entries[idx].freelist);
273 	}
274 
275 	free_percpu(percpu_fq);
276 }
277 
278 static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
279 {
280 	if (!cookie->fq_domain)
281 		return;
282 
283 	del_timer_sync(&cookie->fq_timer);
284 	if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
285 		iommu_dma_free_fq_single(cookie->single_fq);
286 	else
287 		iommu_dma_free_fq_percpu(cookie->percpu_fq);
288 }
289 
290 static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
291 {
292 	int i;
293 
294 	fq->head = 0;
295 	fq->tail = 0;
296 	fq->mod_mask = fq_size - 1;
297 
298 	spin_lock_init(&fq->lock);
299 
300 	for (i = 0; i < fq_size; i++)
301 		INIT_LIST_HEAD(&fq->entries[i].freelist);
302 }
303 
304 static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
305 {
306 	size_t fq_size = cookie->options.fq_size;
307 	struct iova_fq *queue;
308 
309 	queue = vmalloc(struct_size(queue, entries, fq_size));
310 	if (!queue)
311 		return -ENOMEM;
312 	iommu_dma_init_one_fq(queue, fq_size);
313 	cookie->single_fq = queue;
314 
315 	return 0;
316 }
317 
318 static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
319 {
320 	size_t fq_size = cookie->options.fq_size;
321 	struct iova_fq __percpu *queue;
322 	int cpu;
323 
324 	queue = __alloc_percpu(struct_size(queue, entries, fq_size),
325 			       __alignof__(*queue));
326 	if (!queue)
327 		return -ENOMEM;
328 
329 	for_each_possible_cpu(cpu)
330 		iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
331 	cookie->percpu_fq = queue;
332 	return 0;
333 }
334 
335 /* sysfs updates are serialised by the mutex of the group owning @domain */
336 int iommu_dma_init_fq(struct iommu_domain *domain)
337 {
338 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
339 	int rc;
340 
341 	if (cookie->fq_domain)
342 		return 0;
343 
344 	atomic64_set(&cookie->fq_flush_start_cnt,  0);
345 	atomic64_set(&cookie->fq_flush_finish_cnt, 0);
346 
347 	if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
348 		rc = iommu_dma_init_fq_single(cookie);
349 	else
350 		rc = iommu_dma_init_fq_percpu(cookie);
351 
352 	if (rc) {
353 		pr_warn("iova flush queue initialization failed\n");
354 		return -ENOMEM;
355 	}
356 
357 	timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
358 	atomic_set(&cookie->fq_timer_on, 0);
359 	/*
360 	 * Prevent incomplete fq state being observable. Pairs with path from
361 	 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
362 	 */
363 	smp_wmb();
364 	WRITE_ONCE(cookie->fq_domain, domain);
365 	return 0;
366 }
367 
368 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
369 {
370 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
371 		return cookie->iovad.granule;
372 	return PAGE_SIZE;
373 }
374 
375 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
376 {
377 	struct iommu_dma_cookie *cookie;
378 
379 	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
380 	if (cookie) {
381 		INIT_LIST_HEAD(&cookie->msi_page_list);
382 		cookie->type = type;
383 	}
384 	return cookie;
385 }
386 
387 /**
388  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
389  * @domain: IOMMU domain to prepare for DMA-API usage
390  */
391 int iommu_get_dma_cookie(struct iommu_domain *domain)
392 {
393 	if (domain->iova_cookie)
394 		return -EEXIST;
395 
396 	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
397 	if (!domain->iova_cookie)
398 		return -ENOMEM;
399 
400 	mutex_init(&domain->iova_cookie->mutex);
401 	return 0;
402 }
403 
404 /**
405  * iommu_get_msi_cookie - Acquire just MSI remapping resources
406  * @domain: IOMMU domain to prepare
407  * @base: Start address of IOVA region for MSI mappings
408  *
409  * Users who manage their own IOVA allocation and do not want DMA API support,
410  * but would still like to take advantage of automatic MSI remapping, can use
411  * this to initialise their own domain appropriately. Users should reserve a
412  * contiguous IOVA region, starting at @base, large enough to accommodate the
413  * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
414  * used by the devices attached to @domain.
415  */
416 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
417 {
418 	struct iommu_dma_cookie *cookie;
419 
420 	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
421 		return -EINVAL;
422 
423 	if (domain->iova_cookie)
424 		return -EEXIST;
425 
426 	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
427 	if (!cookie)
428 		return -ENOMEM;
429 
430 	cookie->msi_iova = base;
431 	domain->iova_cookie = cookie;
432 	return 0;
433 }
434 EXPORT_SYMBOL(iommu_get_msi_cookie);
435 
436 /**
437  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
438  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
439  *          iommu_get_msi_cookie()
440  */
441 void iommu_put_dma_cookie(struct iommu_domain *domain)
442 {
443 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
444 	struct iommu_dma_msi_page *msi, *tmp;
445 
446 	if (!cookie)
447 		return;
448 
449 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
450 		iommu_dma_free_fq(cookie);
451 		put_iova_domain(&cookie->iovad);
452 	}
453 
454 	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
455 		list_del(&msi->list);
456 		kfree(msi);
457 	}
458 	kfree(cookie);
459 	domain->iova_cookie = NULL;
460 }
461 
462 /**
463  * iommu_dma_get_resv_regions - Reserved region driver helper
464  * @dev: Device from iommu_get_resv_regions()
465  * @list: Reserved region list from iommu_get_resv_regions()
466  *
467  * IOMMU drivers can use this to implement their .get_resv_regions callback
468  * for general non-IOMMU-specific reservations. Currently, this covers GICv3
469  * ITS region reservation on ACPI based ARM platforms that may require HW MSI
470  * reservation.
471  */
472 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
473 {
474 
475 	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
476 		iort_iommu_get_resv_regions(dev, list);
477 
478 	if (dev->of_node)
479 		of_iommu_get_resv_regions(dev, list);
480 }
481 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
482 
483 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
484 		phys_addr_t start, phys_addr_t end)
485 {
486 	struct iova_domain *iovad = &cookie->iovad;
487 	struct iommu_dma_msi_page *msi_page;
488 	int i, num_pages;
489 
490 	start -= iova_offset(iovad, start);
491 	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
492 
493 	for (i = 0; i < num_pages; i++) {
494 		msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
495 		if (!msi_page)
496 			return -ENOMEM;
497 
498 		msi_page->phys = start;
499 		msi_page->iova = start;
500 		INIT_LIST_HEAD(&msi_page->list);
501 		list_add(&msi_page->list, &cookie->msi_page_list);
502 		start += iovad->granule;
503 	}
504 
505 	return 0;
506 }
507 
508 static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
509 		const struct list_head *b)
510 {
511 	struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
512 	struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
513 
514 	return res_a->res->start > res_b->res->start;
515 }
516 
517 static int iova_reserve_pci_windows(struct pci_dev *dev,
518 		struct iova_domain *iovad)
519 {
520 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
521 	struct resource_entry *window;
522 	unsigned long lo, hi;
523 	phys_addr_t start = 0, end;
524 
525 	resource_list_for_each_entry(window, &bridge->windows) {
526 		if (resource_type(window->res) != IORESOURCE_MEM)
527 			continue;
528 
529 		lo = iova_pfn(iovad, window->res->start - window->offset);
530 		hi = iova_pfn(iovad, window->res->end - window->offset);
531 		reserve_iova(iovad, lo, hi);
532 	}
533 
534 	/* Get reserved DMA windows from host bridge */
535 	list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
536 	resource_list_for_each_entry(window, &bridge->dma_ranges) {
537 		end = window->res->start - window->offset;
538 resv_iova:
539 		if (end > start) {
540 			lo = iova_pfn(iovad, start);
541 			hi = iova_pfn(iovad, end);
542 			reserve_iova(iovad, lo, hi);
543 		} else if (end < start) {
544 			/* DMA ranges should be non-overlapping */
545 			dev_err(&dev->dev,
546 				"Failed to reserve IOVA [%pa-%pa]\n",
547 				&start, &end);
548 			return -EINVAL;
549 		}
550 
551 		start = window->res->end - window->offset + 1;
552 		/* If window is last entry */
553 		if (window->node.next == &bridge->dma_ranges &&
554 		    end != ~(phys_addr_t)0) {
555 			end = ~(phys_addr_t)0;
556 			goto resv_iova;
557 		}
558 	}
559 
560 	return 0;
561 }
562 
563 static int iova_reserve_iommu_regions(struct device *dev,
564 		struct iommu_domain *domain)
565 {
566 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
567 	struct iova_domain *iovad = &cookie->iovad;
568 	struct iommu_resv_region *region;
569 	LIST_HEAD(resv_regions);
570 	int ret = 0;
571 
572 	if (dev_is_pci(dev)) {
573 		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
574 		if (ret)
575 			return ret;
576 	}
577 
578 	iommu_get_resv_regions(dev, &resv_regions);
579 	list_for_each_entry(region, &resv_regions, list) {
580 		unsigned long lo, hi;
581 
582 		/* We ARE the software that manages these! */
583 		if (region->type == IOMMU_RESV_SW_MSI)
584 			continue;
585 
586 		lo = iova_pfn(iovad, region->start);
587 		hi = iova_pfn(iovad, region->start + region->length - 1);
588 		reserve_iova(iovad, lo, hi);
589 
590 		if (region->type == IOMMU_RESV_MSI)
591 			ret = cookie_init_hw_msi_region(cookie, region->start,
592 					region->start + region->length);
593 		if (ret)
594 			break;
595 	}
596 	iommu_put_resv_regions(dev, &resv_regions);
597 
598 	return ret;
599 }
600 
601 static bool dev_is_untrusted(struct device *dev)
602 {
603 	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
604 }
605 
606 static bool dev_use_swiotlb(struct device *dev, size_t size,
607 			    enum dma_data_direction dir)
608 {
609 	return IS_ENABLED(CONFIG_SWIOTLB) &&
610 		(dev_is_untrusted(dev) ||
611 		 dma_kmalloc_needs_bounce(dev, size, dir));
612 }
613 
614 static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
615 			       int nents, enum dma_data_direction dir)
616 {
617 	struct scatterlist *s;
618 	int i;
619 
620 	if (!IS_ENABLED(CONFIG_SWIOTLB))
621 		return false;
622 
623 	if (dev_is_untrusted(dev))
624 		return true;
625 
626 	/*
627 	 * If kmalloc() buffers are not DMA-safe for this device and
628 	 * direction, check the individual lengths in the sg list. If any
629 	 * element is deemed unsafe, use the swiotlb for bouncing.
630 	 */
631 	if (!dma_kmalloc_safe(dev, dir)) {
632 		for_each_sg(sg, s, nents, i)
633 			if (!dma_kmalloc_size_aligned(s->length))
634 				return true;
635 	}
636 
637 	return false;
638 }
639 
640 /**
641  * iommu_dma_init_options - Initialize dma-iommu options
642  * @options: The options to be initialized
643  * @dev: Device the options are set for
644  *
645  * This allows tuning dma-iommu specific to device properties
646  */
647 static void iommu_dma_init_options(struct iommu_dma_options *options,
648 				   struct device *dev)
649 {
650 	/* Shadowing IOTLB flushes do better with a single large queue */
651 	if (dev->iommu->shadow_on_flush) {
652 		options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
653 		options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
654 		options->fq_size = IOVA_SINGLE_FQ_SIZE;
655 	} else {
656 		options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
657 		options->fq_size = IOVA_DEFAULT_FQ_SIZE;
658 		options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
659 	}
660 }
661 
662 /**
663  * iommu_dma_init_domain - Initialise a DMA mapping domain
664  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
665  * @dev: Device the domain is being initialised for
666  *
667  * If the geometry and dma_range_map include address 0, we reserve that page
668  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
669  * any change which could make prior IOVAs invalid will fail.
670  */
671 static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev)
672 {
673 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
674 	const struct bus_dma_region *map = dev->dma_range_map;
675 	unsigned long order, base_pfn;
676 	struct iova_domain *iovad;
677 	int ret;
678 
679 	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
680 		return -EINVAL;
681 
682 	iovad = &cookie->iovad;
683 
684 	/* Use the smallest supported page size for IOVA granularity */
685 	order = __ffs(domain->pgsize_bitmap);
686 	base_pfn = 1;
687 
688 	/* Check the domain allows at least some access to the device... */
689 	if (map) {
690 		if (dma_range_map_min(map) > domain->geometry.aperture_end ||
691 		    dma_range_map_max(map) < domain->geometry.aperture_start) {
692 			pr_warn("specified DMA range outside IOMMU capability\n");
693 			return -EFAULT;
694 		}
695 	}
696 	/* ...then finally give it a kicking to make sure it fits */
697 	base_pfn = max_t(unsigned long, base_pfn,
698 			 domain->geometry.aperture_start >> order);
699 
700 	/* start_pfn is always nonzero for an already-initialised domain */
701 	mutex_lock(&cookie->mutex);
702 	if (iovad->start_pfn) {
703 		if (1UL << order != iovad->granule ||
704 		    base_pfn != iovad->start_pfn) {
705 			pr_warn("Incompatible range for DMA domain\n");
706 			ret = -EFAULT;
707 			goto done_unlock;
708 		}
709 
710 		ret = 0;
711 		goto done_unlock;
712 	}
713 
714 	init_iova_domain(iovad, 1UL << order, base_pfn);
715 	ret = iova_domain_init_rcaches(iovad);
716 	if (ret)
717 		goto done_unlock;
718 
719 	iommu_dma_init_options(&cookie->options, dev);
720 
721 	/* If the FQ fails we can simply fall back to strict mode */
722 	if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
723 	    (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
724 		domain->type = IOMMU_DOMAIN_DMA;
725 
726 	ret = iova_reserve_iommu_regions(dev, domain);
727 
728 done_unlock:
729 	mutex_unlock(&cookie->mutex);
730 	return ret;
731 }
732 
733 /**
734  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
735  *                    page flags.
736  * @dir: Direction of DMA transfer
737  * @coherent: Is the DMA master cache-coherent?
738  * @attrs: DMA attributes for the mapping
739  *
740  * Return: corresponding IOMMU API page protection flags
741  */
742 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
743 		     unsigned long attrs)
744 {
745 	int prot = coherent ? IOMMU_CACHE : 0;
746 
747 	if (attrs & DMA_ATTR_PRIVILEGED)
748 		prot |= IOMMU_PRIV;
749 
750 	switch (dir) {
751 	case DMA_BIDIRECTIONAL:
752 		return prot | IOMMU_READ | IOMMU_WRITE;
753 	case DMA_TO_DEVICE:
754 		return prot | IOMMU_READ;
755 	case DMA_FROM_DEVICE:
756 		return prot | IOMMU_WRITE;
757 	default:
758 		return 0;
759 	}
760 }
761 
762 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
763 		size_t size, u64 dma_limit, struct device *dev)
764 {
765 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
766 	struct iova_domain *iovad = &cookie->iovad;
767 	unsigned long shift, iova_len, iova;
768 
769 	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
770 		cookie->msi_iova += size;
771 		return cookie->msi_iova - size;
772 	}
773 
774 	shift = iova_shift(iovad);
775 	iova_len = size >> shift;
776 
777 	dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
778 
779 	if (domain->geometry.force_aperture)
780 		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
781 
782 	/*
783 	 * Try to use all the 32-bit PCI addresses first. The original SAC vs.
784 	 * DAC reasoning loses relevance with PCIe, but enough hardware and
785 	 * firmware bugs are still lurking out there that it's safest not to
786 	 * venture into the 64-bit space until necessary.
787 	 *
788 	 * If your device goes wrong after seeing the notice then likely either
789 	 * its driver is not setting DMA masks accurately, the hardware has
790 	 * some inherent bug in handling >32-bit addresses, or not all the
791 	 * expected address bits are wired up between the device and the IOMMU.
792 	 */
793 	if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
794 		iova = alloc_iova_fast(iovad, iova_len,
795 				       DMA_BIT_MASK(32) >> shift, false);
796 		if (iova)
797 			goto done;
798 
799 		dev->iommu->pci_32bit_workaround = false;
800 		dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
801 	}
802 
803 	iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
804 done:
805 	return (dma_addr_t)iova << shift;
806 }
807 
808 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
809 		dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
810 {
811 	struct iova_domain *iovad = &cookie->iovad;
812 
813 	/* The MSI case is only ever cleaning up its most recent allocation */
814 	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
815 		cookie->msi_iova -= size;
816 	else if (gather && gather->queued)
817 		queue_iova(cookie, iova_pfn(iovad, iova),
818 				size >> iova_shift(iovad),
819 				&gather->freelist);
820 	else
821 		free_iova_fast(iovad, iova_pfn(iovad, iova),
822 				size >> iova_shift(iovad));
823 }
824 
825 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
826 		size_t size)
827 {
828 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
829 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
830 	struct iova_domain *iovad = &cookie->iovad;
831 	size_t iova_off = iova_offset(iovad, dma_addr);
832 	struct iommu_iotlb_gather iotlb_gather;
833 	size_t unmapped;
834 
835 	dma_addr -= iova_off;
836 	size = iova_align(iovad, size + iova_off);
837 	iommu_iotlb_gather_init(&iotlb_gather);
838 	iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
839 
840 	unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
841 	WARN_ON(unmapped != size);
842 
843 	if (!iotlb_gather.queued)
844 		iommu_iotlb_sync(domain, &iotlb_gather);
845 	iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
846 }
847 
848 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
849 		size_t size, int prot, u64 dma_mask)
850 {
851 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
852 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
853 	struct iova_domain *iovad = &cookie->iovad;
854 	size_t iova_off = iova_offset(iovad, phys);
855 	dma_addr_t iova;
856 
857 	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
858 	    iommu_deferred_attach(dev, domain))
859 		return DMA_MAPPING_ERROR;
860 
861 	/* If anyone ever wants this we'd need support in the IOVA allocator */
862 	if (dev_WARN_ONCE(dev, dma_get_min_align_mask(dev) > iova_mask(iovad),
863 	    "Unsupported alignment constraint\n"))
864 		return DMA_MAPPING_ERROR;
865 
866 	size = iova_align(iovad, size + iova_off);
867 
868 	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
869 	if (!iova)
870 		return DMA_MAPPING_ERROR;
871 
872 	if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
873 		iommu_dma_free_iova(cookie, iova, size, NULL);
874 		return DMA_MAPPING_ERROR;
875 	}
876 	return iova + iova_off;
877 }
878 
879 static void __iommu_dma_free_pages(struct page **pages, int count)
880 {
881 	while (count--)
882 		__free_page(pages[count]);
883 	kvfree(pages);
884 }
885 
886 static struct page **__iommu_dma_alloc_pages(struct device *dev,
887 		unsigned int count, unsigned long order_mask, gfp_t gfp)
888 {
889 	struct page **pages;
890 	unsigned int i = 0, nid = dev_to_node(dev);
891 
892 	order_mask &= GENMASK(MAX_PAGE_ORDER, 0);
893 	if (!order_mask)
894 		return NULL;
895 
896 	pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
897 	if (!pages)
898 		return NULL;
899 
900 	/* IOMMU can map any pages, so himem can also be used here */
901 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
902 
903 	while (count) {
904 		struct page *page = NULL;
905 		unsigned int order_size;
906 
907 		/*
908 		 * Higher-order allocations are a convenience rather
909 		 * than a necessity, hence using __GFP_NORETRY until
910 		 * falling back to minimum-order allocations.
911 		 */
912 		for (order_mask &= GENMASK(__fls(count), 0);
913 		     order_mask; order_mask &= ~order_size) {
914 			unsigned int order = __fls(order_mask);
915 			gfp_t alloc_flags = gfp;
916 
917 			order_size = 1U << order;
918 			if (order_mask > order_size)
919 				alloc_flags |= __GFP_NORETRY;
920 			page = alloc_pages_node(nid, alloc_flags, order);
921 			if (!page)
922 				continue;
923 			if (order)
924 				split_page(page, order);
925 			break;
926 		}
927 		if (!page) {
928 			__iommu_dma_free_pages(pages, i);
929 			return NULL;
930 		}
931 		count -= order_size;
932 		while (order_size--)
933 			pages[i++] = page++;
934 	}
935 	return pages;
936 }
937 
938 /*
939  * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
940  * but an IOMMU which supports smaller pages might not map the whole thing.
941  */
942 static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
943 		size_t size, struct sg_table *sgt, gfp_t gfp, unsigned long attrs)
944 {
945 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
946 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
947 	struct iova_domain *iovad = &cookie->iovad;
948 	bool coherent = dev_is_dma_coherent(dev);
949 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
950 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
951 	struct page **pages;
952 	dma_addr_t iova;
953 	ssize_t ret;
954 
955 	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
956 	    iommu_deferred_attach(dev, domain))
957 		return NULL;
958 
959 	min_size = alloc_sizes & -alloc_sizes;
960 	if (min_size < PAGE_SIZE) {
961 		min_size = PAGE_SIZE;
962 		alloc_sizes |= PAGE_SIZE;
963 	} else {
964 		size = ALIGN(size, min_size);
965 	}
966 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
967 		alloc_sizes = min_size;
968 
969 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
970 	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
971 					gfp);
972 	if (!pages)
973 		return NULL;
974 
975 	size = iova_align(iovad, size);
976 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
977 	if (!iova)
978 		goto out_free_pages;
979 
980 	/*
981 	 * Remove the zone/policy flags from the GFP - these are applied to the
982 	 * __iommu_dma_alloc_pages() but are not used for the supporting
983 	 * internal allocations that follow.
984 	 */
985 	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
986 
987 	if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
988 		goto out_free_iova;
989 
990 	if (!(ioprot & IOMMU_CACHE)) {
991 		struct scatterlist *sg;
992 		int i;
993 
994 		for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
995 			arch_dma_prep_coherent(sg_page(sg), sg->length);
996 	}
997 
998 	ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
999 			   gfp);
1000 	if (ret < 0 || ret < size)
1001 		goto out_free_sg;
1002 
1003 	sgt->sgl->dma_address = iova;
1004 	sgt->sgl->dma_length = size;
1005 	return pages;
1006 
1007 out_free_sg:
1008 	sg_free_table(sgt);
1009 out_free_iova:
1010 	iommu_dma_free_iova(cookie, iova, size, NULL);
1011 out_free_pages:
1012 	__iommu_dma_free_pages(pages, count);
1013 	return NULL;
1014 }
1015 
1016 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
1017 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
1018 {
1019 	struct page **pages;
1020 	struct sg_table sgt;
1021 	void *vaddr;
1022 	pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1023 
1024 	pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, attrs);
1025 	if (!pages)
1026 		return NULL;
1027 	*dma_handle = sgt.sgl->dma_address;
1028 	sg_free_table(&sgt);
1029 	vaddr = dma_common_pages_remap(pages, size, prot,
1030 			__builtin_return_address(0));
1031 	if (!vaddr)
1032 		goto out_unmap;
1033 	return vaddr;
1034 
1035 out_unmap:
1036 	__iommu_dma_unmap(dev, *dma_handle, size);
1037 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
1038 	return NULL;
1039 }
1040 
1041 struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
1042 	       enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
1043 {
1044 	struct dma_sgt_handle *sh;
1045 
1046 	sh = kmalloc(sizeof(*sh), gfp);
1047 	if (!sh)
1048 		return NULL;
1049 
1050 	sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, attrs);
1051 	if (!sh->pages) {
1052 		kfree(sh);
1053 		return NULL;
1054 	}
1055 	return &sh->sgt;
1056 }
1057 
1058 void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
1059 		struct sg_table *sgt, enum dma_data_direction dir)
1060 {
1061 	struct dma_sgt_handle *sh = sgt_handle(sgt);
1062 
1063 	__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
1064 	__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
1065 	sg_free_table(&sh->sgt);
1066 	kfree(sh);
1067 }
1068 
1069 void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1070 		size_t size, enum dma_data_direction dir)
1071 {
1072 	phys_addr_t phys;
1073 
1074 	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
1075 		return;
1076 
1077 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
1078 	if (!dev_is_dma_coherent(dev))
1079 		arch_sync_dma_for_cpu(phys, size, dir);
1080 
1081 	swiotlb_sync_single_for_cpu(dev, phys, size, dir);
1082 }
1083 
1084 void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
1085 		size_t size, enum dma_data_direction dir)
1086 {
1087 	phys_addr_t phys;
1088 
1089 	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
1090 		return;
1091 
1092 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
1093 	swiotlb_sync_single_for_device(dev, phys, size, dir);
1094 
1095 	if (!dev_is_dma_coherent(dev))
1096 		arch_sync_dma_for_device(phys, size, dir);
1097 }
1098 
1099 void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
1100 		int nelems, enum dma_data_direction dir)
1101 {
1102 	struct scatterlist *sg;
1103 	int i;
1104 
1105 	if (sg_dma_is_swiotlb(sgl))
1106 		for_each_sg(sgl, sg, nelems, i)
1107 			iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
1108 						      sg->length, dir);
1109 	else if (!dev_is_dma_coherent(dev))
1110 		for_each_sg(sgl, sg, nelems, i)
1111 			arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
1112 }
1113 
1114 void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
1115 		int nelems, enum dma_data_direction dir)
1116 {
1117 	struct scatterlist *sg;
1118 	int i;
1119 
1120 	if (sg_dma_is_swiotlb(sgl))
1121 		for_each_sg(sgl, sg, nelems, i)
1122 			iommu_dma_sync_single_for_device(dev,
1123 							 sg_dma_address(sg),
1124 							 sg->length, dir);
1125 	else if (!dev_is_dma_coherent(dev))
1126 		for_each_sg(sgl, sg, nelems, i)
1127 			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
1128 }
1129 
1130 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
1131 	      unsigned long offset, size_t size, enum dma_data_direction dir,
1132 	      unsigned long attrs)
1133 {
1134 	phys_addr_t phys = page_to_phys(page) + offset;
1135 	bool coherent = dev_is_dma_coherent(dev);
1136 	int prot = dma_info_to_prot(dir, coherent, attrs);
1137 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1138 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
1139 	struct iova_domain *iovad = &cookie->iovad;
1140 	dma_addr_t iova, dma_mask = dma_get_mask(dev);
1141 
1142 	/*
1143 	 * If both the physical buffer start address and size are
1144 	 * page aligned, we don't need to use a bounce page.
1145 	 */
1146 	if (dev_use_swiotlb(dev, size, dir) &&
1147 	    iova_offset(iovad, phys | size)) {
1148 		if (!is_swiotlb_active(dev)) {
1149 			dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
1150 			return DMA_MAPPING_ERROR;
1151 		}
1152 
1153 		trace_swiotlb_bounced(dev, phys, size);
1154 
1155 		phys = swiotlb_tbl_map_single(dev, phys, size,
1156 					      iova_mask(iovad), dir, attrs);
1157 
1158 		if (phys == DMA_MAPPING_ERROR)
1159 			return DMA_MAPPING_ERROR;
1160 
1161 		/*
1162 		 * Untrusted devices should not see padding areas with random
1163 		 * leftover kernel data, so zero the pre- and post-padding.
1164 		 * swiotlb_tbl_map_single() has initialized the bounce buffer
1165 		 * proper to the contents of the original memory buffer.
1166 		 */
1167 		if (dev_is_untrusted(dev)) {
1168 			size_t start, virt = (size_t)phys_to_virt(phys);
1169 
1170 			/* Pre-padding */
1171 			start = iova_align_down(iovad, virt);
1172 			memset((void *)start, 0, virt - start);
1173 
1174 			/* Post-padding */
1175 			start = virt + size;
1176 			memset((void *)start, 0,
1177 			       iova_align(iovad, start) - start);
1178 		}
1179 	}
1180 
1181 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1182 		arch_sync_dma_for_device(phys, size, dir);
1183 
1184 	iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
1185 	if (iova == DMA_MAPPING_ERROR)
1186 		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
1187 	return iova;
1188 }
1189 
1190 void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
1191 		size_t size, enum dma_data_direction dir, unsigned long attrs)
1192 {
1193 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1194 	phys_addr_t phys;
1195 
1196 	phys = iommu_iova_to_phys(domain, dma_handle);
1197 	if (WARN_ON(!phys))
1198 		return;
1199 
1200 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
1201 		arch_sync_dma_for_cpu(phys, size, dir);
1202 
1203 	__iommu_dma_unmap(dev, dma_handle, size);
1204 
1205 	swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
1206 }
1207 
1208 /*
1209  * Prepare a successfully-mapped scatterlist to give back to the caller.
1210  *
1211  * At this point the segments are already laid out by iommu_dma_map_sg() to
1212  * avoid individually crossing any boundaries, so we merely need to check a
1213  * segment's start address to avoid concatenating across one.
1214  */
1215 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
1216 		dma_addr_t dma_addr)
1217 {
1218 	struct scatterlist *s, *cur = sg;
1219 	unsigned long seg_mask = dma_get_seg_boundary(dev);
1220 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
1221 	int i, count = 0;
1222 
1223 	for_each_sg(sg, s, nents, i) {
1224 		/* Restore this segment's original unaligned fields first */
1225 		dma_addr_t s_dma_addr = sg_dma_address(s);
1226 		unsigned int s_iova_off = sg_dma_address(s);
1227 		unsigned int s_length = sg_dma_len(s);
1228 		unsigned int s_iova_len = s->length;
1229 
1230 		sg_dma_address(s) = DMA_MAPPING_ERROR;
1231 		sg_dma_len(s) = 0;
1232 
1233 		if (sg_dma_is_bus_address(s)) {
1234 			if (i > 0)
1235 				cur = sg_next(cur);
1236 
1237 			sg_dma_unmark_bus_address(s);
1238 			sg_dma_address(cur) = s_dma_addr;
1239 			sg_dma_len(cur) = s_length;
1240 			sg_dma_mark_bus_address(cur);
1241 			count++;
1242 			cur_len = 0;
1243 			continue;
1244 		}
1245 
1246 		s->offset += s_iova_off;
1247 		s->length = s_length;
1248 
1249 		/*
1250 		 * Now fill in the real DMA data. If...
1251 		 * - there is a valid output segment to append to
1252 		 * - and this segment starts on an IOVA page boundary
1253 		 * - but doesn't fall at a segment boundary
1254 		 * - and wouldn't make the resulting output segment too long
1255 		 */
1256 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
1257 		    (max_len - cur_len >= s_length)) {
1258 			/* ...then concatenate it with the previous one */
1259 			cur_len += s_length;
1260 		} else {
1261 			/* Otherwise start the next output segment */
1262 			if (i > 0)
1263 				cur = sg_next(cur);
1264 			cur_len = s_length;
1265 			count++;
1266 
1267 			sg_dma_address(cur) = dma_addr + s_iova_off;
1268 		}
1269 
1270 		sg_dma_len(cur) = cur_len;
1271 		dma_addr += s_iova_len;
1272 
1273 		if (s_length + s_iova_off < s_iova_len)
1274 			cur_len = 0;
1275 	}
1276 	return count;
1277 }
1278 
1279 /*
1280  * If mapping failed, then just restore the original list,
1281  * but making sure the DMA fields are invalidated.
1282  */
1283 static void __invalidate_sg(struct scatterlist *sg, int nents)
1284 {
1285 	struct scatterlist *s;
1286 	int i;
1287 
1288 	for_each_sg(sg, s, nents, i) {
1289 		if (sg_dma_is_bus_address(s)) {
1290 			sg_dma_unmark_bus_address(s);
1291 		} else {
1292 			if (sg_dma_address(s) != DMA_MAPPING_ERROR)
1293 				s->offset += sg_dma_address(s);
1294 			if (sg_dma_len(s))
1295 				s->length = sg_dma_len(s);
1296 		}
1297 		sg_dma_address(s) = DMA_MAPPING_ERROR;
1298 		sg_dma_len(s) = 0;
1299 	}
1300 }
1301 
1302 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1303 		int nents, enum dma_data_direction dir, unsigned long attrs)
1304 {
1305 	struct scatterlist *s;
1306 	int i;
1307 
1308 	for_each_sg(sg, s, nents, i)
1309 		iommu_dma_unmap_page(dev, sg_dma_address(s),
1310 				sg_dma_len(s), dir, attrs);
1311 }
1312 
1313 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1314 		int nents, enum dma_data_direction dir, unsigned long attrs)
1315 {
1316 	struct scatterlist *s;
1317 	int i;
1318 
1319 	sg_dma_mark_swiotlb(sg);
1320 
1321 	for_each_sg(sg, s, nents, i) {
1322 		sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
1323 				s->offset, s->length, dir, attrs);
1324 		if (sg_dma_address(s) == DMA_MAPPING_ERROR)
1325 			goto out_unmap;
1326 		sg_dma_len(s) = s->length;
1327 	}
1328 
1329 	return nents;
1330 
1331 out_unmap:
1332 	iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
1333 	return -EIO;
1334 }
1335 
1336 /*
1337  * The DMA API client is passing in a scatterlist which could describe
1338  * any old buffer layout, but the IOMMU API requires everything to be
1339  * aligned to IOMMU pages. Hence the need for this complicated bit of
1340  * impedance-matching, to be able to hand off a suitably-aligned list,
1341  * but still preserve the original offsets and sizes for the caller.
1342  */
1343 int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1344 		enum dma_data_direction dir, unsigned long attrs)
1345 {
1346 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1347 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
1348 	struct iova_domain *iovad = &cookie->iovad;
1349 	struct scatterlist *s, *prev = NULL;
1350 	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
1351 	struct pci_p2pdma_map_state p2pdma_state = {};
1352 	enum pci_p2pdma_map_type map;
1353 	dma_addr_t iova;
1354 	size_t iova_len = 0;
1355 	unsigned long mask = dma_get_seg_boundary(dev);
1356 	ssize_t ret;
1357 	int i;
1358 
1359 	if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
1360 		ret = iommu_deferred_attach(dev, domain);
1361 		if (ret)
1362 			goto out;
1363 	}
1364 
1365 	if (dev_use_sg_swiotlb(dev, sg, nents, dir))
1366 		return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
1367 
1368 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1369 		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
1370 
1371 	/*
1372 	 * Work out how much IOVA space we need, and align the segments to
1373 	 * IOVA granules for the IOMMU driver to handle. With some clever
1374 	 * trickery we can modify the list in-place, but reversibly, by
1375 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
1376 	 */
1377 	for_each_sg(sg, s, nents, i) {
1378 		size_t s_iova_off = iova_offset(iovad, s->offset);
1379 		size_t s_length = s->length;
1380 		size_t pad_len = (mask - iova_len + 1) & mask;
1381 
1382 		if (is_pci_p2pdma_page(sg_page(s))) {
1383 			map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
1384 			switch (map) {
1385 			case PCI_P2PDMA_MAP_BUS_ADDR:
1386 				/*
1387 				 * iommu_map_sg() will skip this segment as
1388 				 * it is marked as a bus address,
1389 				 * __finalise_sg() will copy the dma address
1390 				 * into the output segment.
1391 				 */
1392 				continue;
1393 			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
1394 				/*
1395 				 * Mapping through host bridge should be
1396 				 * mapped with regular IOVAs, thus we
1397 				 * do nothing here and continue below.
1398 				 */
1399 				break;
1400 			default:
1401 				ret = -EREMOTEIO;
1402 				goto out_restore_sg;
1403 			}
1404 		}
1405 
1406 		sg_dma_address(s) = s_iova_off;
1407 		sg_dma_len(s) = s_length;
1408 		s->offset -= s_iova_off;
1409 		s_length = iova_align(iovad, s_length + s_iova_off);
1410 		s->length = s_length;
1411 
1412 		/*
1413 		 * Due to the alignment of our single IOVA allocation, we can
1414 		 * depend on these assumptions about the segment boundary mask:
1415 		 * - If mask size >= IOVA size, then the IOVA range cannot
1416 		 *   possibly fall across a boundary, so we don't care.
1417 		 * - If mask size < IOVA size, then the IOVA range must start
1418 		 *   exactly on a boundary, therefore we can lay things out
1419 		 *   based purely on segment lengths without needing to know
1420 		 *   the actual addresses beforehand.
1421 		 * - The mask must be a power of 2, so pad_len == 0 if
1422 		 *   iova_len == 0, thus we cannot dereference prev the first
1423 		 *   time through here (i.e. before it has a meaningful value).
1424 		 */
1425 		if (pad_len && pad_len < s_length - 1) {
1426 			prev->length += pad_len;
1427 			iova_len += pad_len;
1428 		}
1429 
1430 		iova_len += s_length;
1431 		prev = s;
1432 	}
1433 
1434 	if (!iova_len)
1435 		return __finalise_sg(dev, sg, nents, 0);
1436 
1437 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1438 	if (!iova) {
1439 		ret = -ENOMEM;
1440 		goto out_restore_sg;
1441 	}
1442 
1443 	/*
1444 	 * We'll leave any physical concatenation to the IOMMU driver's
1445 	 * implementation - it knows better than we do.
1446 	 */
1447 	ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
1448 	if (ret < 0 || ret < iova_len)
1449 		goto out_free_iova;
1450 
1451 	return __finalise_sg(dev, sg, nents, iova);
1452 
1453 out_free_iova:
1454 	iommu_dma_free_iova(cookie, iova, iova_len, NULL);
1455 out_restore_sg:
1456 	__invalidate_sg(sg, nents);
1457 out:
1458 	if (ret != -ENOMEM && ret != -EREMOTEIO)
1459 		return -EINVAL;
1460 	return ret;
1461 }
1462 
1463 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1464 		enum dma_data_direction dir, unsigned long attrs)
1465 {
1466 	dma_addr_t end = 0, start;
1467 	struct scatterlist *tmp;
1468 	int i;
1469 
1470 	if (sg_dma_is_swiotlb(sg)) {
1471 		iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
1472 		return;
1473 	}
1474 
1475 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1476 		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1477 
1478 	/*
1479 	 * The scatterlist segments are mapped into a single
1480 	 * contiguous IOVA allocation, the start and end points
1481 	 * just have to be determined.
1482 	 */
1483 	for_each_sg(sg, tmp, nents, i) {
1484 		if (sg_dma_is_bus_address(tmp)) {
1485 			sg_dma_unmark_bus_address(tmp);
1486 			continue;
1487 		}
1488 
1489 		if (sg_dma_len(tmp) == 0)
1490 			break;
1491 
1492 		start = sg_dma_address(tmp);
1493 		break;
1494 	}
1495 
1496 	nents -= i;
1497 	for_each_sg(tmp, tmp, nents, i) {
1498 		if (sg_dma_is_bus_address(tmp)) {
1499 			sg_dma_unmark_bus_address(tmp);
1500 			continue;
1501 		}
1502 
1503 		if (sg_dma_len(tmp) == 0)
1504 			break;
1505 
1506 		end = sg_dma_address(tmp) + sg_dma_len(tmp);
1507 	}
1508 
1509 	if (end)
1510 		__iommu_dma_unmap(dev, start, end - start);
1511 }
1512 
1513 dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
1514 		size_t size, enum dma_data_direction dir, unsigned long attrs)
1515 {
1516 	return __iommu_dma_map(dev, phys, size,
1517 			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
1518 			dma_get_mask(dev));
1519 }
1520 
1521 void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
1522 		size_t size, enum dma_data_direction dir, unsigned long attrs)
1523 {
1524 	__iommu_dma_unmap(dev, handle, size);
1525 }
1526 
1527 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1528 {
1529 	size_t alloc_size = PAGE_ALIGN(size);
1530 	int count = alloc_size >> PAGE_SHIFT;
1531 	struct page *page = NULL, **pages = NULL;
1532 
1533 	/* Non-coherent atomic allocation? Easy */
1534 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1535 	    dma_free_from_pool(dev, cpu_addr, alloc_size))
1536 		return;
1537 
1538 	if (is_vmalloc_addr(cpu_addr)) {
1539 		/*
1540 		 * If it the address is remapped, then it's either non-coherent
1541 		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1542 		 */
1543 		pages = dma_common_find_pages(cpu_addr);
1544 		if (!pages)
1545 			page = vmalloc_to_page(cpu_addr);
1546 		dma_common_free_remap(cpu_addr, alloc_size);
1547 	} else {
1548 		/* Lowmem means a coherent atomic or CMA allocation */
1549 		page = virt_to_page(cpu_addr);
1550 	}
1551 
1552 	if (pages)
1553 		__iommu_dma_free_pages(pages, count);
1554 	if (page)
1555 		dma_free_contiguous(dev, page, alloc_size);
1556 }
1557 
1558 void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1559 		dma_addr_t handle, unsigned long attrs)
1560 {
1561 	__iommu_dma_unmap(dev, handle, size);
1562 	__iommu_dma_free(dev, size, cpu_addr);
1563 }
1564 
1565 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1566 		struct page **pagep, gfp_t gfp, unsigned long attrs)
1567 {
1568 	bool coherent = dev_is_dma_coherent(dev);
1569 	size_t alloc_size = PAGE_ALIGN(size);
1570 	int node = dev_to_node(dev);
1571 	struct page *page = NULL;
1572 	void *cpu_addr;
1573 
1574 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
1575 	if (!page)
1576 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
1577 	if (!page)
1578 		return NULL;
1579 
1580 	if (!coherent || PageHighMem(page)) {
1581 		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1582 
1583 		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1584 				prot, __builtin_return_address(0));
1585 		if (!cpu_addr)
1586 			goto out_free_pages;
1587 
1588 		if (!coherent)
1589 			arch_dma_prep_coherent(page, size);
1590 	} else {
1591 		cpu_addr = page_address(page);
1592 	}
1593 
1594 	*pagep = page;
1595 	memset(cpu_addr, 0, alloc_size);
1596 	return cpu_addr;
1597 out_free_pages:
1598 	dma_free_contiguous(dev, page, alloc_size);
1599 	return NULL;
1600 }
1601 
1602 void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
1603 		gfp_t gfp, unsigned long attrs)
1604 {
1605 	bool coherent = dev_is_dma_coherent(dev);
1606 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1607 	struct page *page = NULL;
1608 	void *cpu_addr;
1609 
1610 	gfp |= __GFP_ZERO;
1611 
1612 	if (gfpflags_allow_blocking(gfp) &&
1613 	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1614 		return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
1615 	}
1616 
1617 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1618 	    !gfpflags_allow_blocking(gfp) && !coherent)
1619 		page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1620 					       gfp, NULL);
1621 	else
1622 		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1623 	if (!cpu_addr)
1624 		return NULL;
1625 
1626 	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1627 			dev->coherent_dma_mask);
1628 	if (*handle == DMA_MAPPING_ERROR) {
1629 		__iommu_dma_free(dev, size, cpu_addr);
1630 		return NULL;
1631 	}
1632 
1633 	return cpu_addr;
1634 }
1635 
1636 int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1637 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
1638 		unsigned long attrs)
1639 {
1640 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1641 	unsigned long pfn, off = vma->vm_pgoff;
1642 	int ret;
1643 
1644 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1645 
1646 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1647 		return ret;
1648 
1649 	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1650 		return -ENXIO;
1651 
1652 	if (is_vmalloc_addr(cpu_addr)) {
1653 		struct page **pages = dma_common_find_pages(cpu_addr);
1654 
1655 		if (pages)
1656 			return vm_map_pages(vma, pages, nr_pages);
1657 		pfn = vmalloc_to_pfn(cpu_addr);
1658 	} else {
1659 		pfn = page_to_pfn(virt_to_page(cpu_addr));
1660 	}
1661 
1662 	return remap_pfn_range(vma, vma->vm_start, pfn + off,
1663 			       vma->vm_end - vma->vm_start,
1664 			       vma->vm_page_prot);
1665 }
1666 
1667 int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1668 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
1669 		unsigned long attrs)
1670 {
1671 	struct page *page;
1672 	int ret;
1673 
1674 	if (is_vmalloc_addr(cpu_addr)) {
1675 		struct page **pages = dma_common_find_pages(cpu_addr);
1676 
1677 		if (pages) {
1678 			return sg_alloc_table_from_pages(sgt, pages,
1679 					PAGE_ALIGN(size) >> PAGE_SHIFT,
1680 					0, size, GFP_KERNEL);
1681 		}
1682 
1683 		page = vmalloc_to_page(cpu_addr);
1684 	} else {
1685 		page = virt_to_page(cpu_addr);
1686 	}
1687 
1688 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1689 	if (!ret)
1690 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1691 	return ret;
1692 }
1693 
1694 unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1695 {
1696 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1697 
1698 	return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1699 }
1700 
1701 size_t iommu_dma_opt_mapping_size(void)
1702 {
1703 	return iova_rcache_range();
1704 }
1705 
1706 size_t iommu_dma_max_mapping_size(struct device *dev)
1707 {
1708 	if (dev_is_untrusted(dev))
1709 		return swiotlb_max_mapping_size(dev);
1710 
1711 	return SIZE_MAX;
1712 }
1713 
1714 void iommu_setup_dma_ops(struct device *dev)
1715 {
1716 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1717 
1718 	if (dev_is_pci(dev))
1719 		dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
1720 
1721 	dev->dma_iommu = iommu_is_dma_domain(domain);
1722 	if (dev->dma_iommu && iommu_dma_init_domain(domain, dev))
1723 		goto out_err;
1724 
1725 	return;
1726 out_err:
1727 	pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1728 		dev_name(dev));
1729 	dev->dma_iommu = false;
1730 }
1731 
1732 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1733 		phys_addr_t msi_addr, struct iommu_domain *domain)
1734 {
1735 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
1736 	struct iommu_dma_msi_page *msi_page;
1737 	dma_addr_t iova;
1738 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1739 	size_t size = cookie_msi_granule(cookie);
1740 
1741 	msi_addr &= ~(phys_addr_t)(size - 1);
1742 	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1743 		if (msi_page->phys == msi_addr)
1744 			return msi_page;
1745 
1746 	msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
1747 	if (!msi_page)
1748 		return NULL;
1749 
1750 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1751 	if (!iova)
1752 		goto out_free_page;
1753 
1754 	if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
1755 		goto out_free_iova;
1756 
1757 	INIT_LIST_HEAD(&msi_page->list);
1758 	msi_page->phys = msi_addr;
1759 	msi_page->iova = iova;
1760 	list_add(&msi_page->list, &cookie->msi_page_list);
1761 	return msi_page;
1762 
1763 out_free_iova:
1764 	iommu_dma_free_iova(cookie, iova, size, NULL);
1765 out_free_page:
1766 	kfree(msi_page);
1767 	return NULL;
1768 }
1769 
1770 /**
1771  * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1772  * @desc: MSI descriptor, will store the MSI page
1773  * @msi_addr: MSI target address to be mapped
1774  *
1775  * Return: 0 on success or negative error code if the mapping failed.
1776  */
1777 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1778 {
1779 	struct device *dev = msi_desc_to_dev(desc);
1780 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1781 	struct iommu_dma_msi_page *msi_page;
1782 	static DEFINE_MUTEX(msi_prepare_lock); /* see below */
1783 
1784 	if (!domain || !domain->iova_cookie) {
1785 		desc->iommu_cookie = NULL;
1786 		return 0;
1787 	}
1788 
1789 	/*
1790 	 * In fact the whole prepare operation should already be serialised by
1791 	 * irq_domain_mutex further up the callchain, but that's pretty subtle
1792 	 * on its own, so consider this locking as failsafe documentation...
1793 	 */
1794 	mutex_lock(&msi_prepare_lock);
1795 	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1796 	mutex_unlock(&msi_prepare_lock);
1797 
1798 	msi_desc_set_iommu_cookie(desc, msi_page);
1799 
1800 	if (!msi_page)
1801 		return -ENOMEM;
1802 	return 0;
1803 }
1804 
1805 /**
1806  * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
1807  * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
1808  * @msg: MSI message containing target physical address
1809  */
1810 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1811 {
1812 	struct device *dev = msi_desc_to_dev(desc);
1813 	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1814 	const struct iommu_dma_msi_page *msi_page;
1815 
1816 	msi_page = msi_desc_get_iommu_cookie(desc);
1817 
1818 	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1819 		return;
1820 
1821 	msg->address_hi = upper_32_bits(msi_page->iova);
1822 	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1823 	msg->address_lo += lower_32_bits(msi_page->iova);
1824 }
1825 
1826 static int iommu_dma_init(void)
1827 {
1828 	if (is_kdump_kernel())
1829 		static_branch_enable(&iommu_deferred_attach_enabled);
1830 
1831 	return iova_cache_get();
1832 }
1833 arch_initcall(iommu_dma_init);
1834