xref: /linux/drivers/iommu/dma-iommu.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * A fairly generic DMA-API to IOMMU-API glue layer.
3  *
4  * Copyright (C) 2014-2015 ARM Ltd.
5  *
6  * based in part on arch/arm/mm/dma-mapping.c:
7  * Copyright (C) 2000-2004 Russell King
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <linux/device.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/gfp.h>
25 #include <linux/huge_mm.h>
26 #include <linux/iommu.h>
27 #include <linux/iova.h>
28 #include <linux/irq.h>
29 #include <linux/mm.h>
30 #include <linux/pci.h>
31 #include <linux/scatterlist.h>
32 #include <linux/vmalloc.h>
33 
34 struct iommu_dma_msi_page {
35 	struct list_head	list;
36 	dma_addr_t		iova;
37 	phys_addr_t		phys;
38 };
39 
40 enum iommu_dma_cookie_type {
41 	IOMMU_DMA_IOVA_COOKIE,
42 	IOMMU_DMA_MSI_COOKIE,
43 };
44 
45 struct iommu_dma_cookie {
46 	enum iommu_dma_cookie_type	type;
47 	union {
48 		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
49 		struct iova_domain	iovad;
50 		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
51 		dma_addr_t		msi_iova;
52 	};
53 	struct list_head		msi_page_list;
54 	spinlock_t			msi_lock;
55 };
56 
57 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
58 {
59 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
60 		return cookie->iovad.granule;
61 	return PAGE_SIZE;
62 }
63 
64 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
65 {
66 	struct iommu_dma_cookie *cookie;
67 
68 	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
69 	if (cookie) {
70 		spin_lock_init(&cookie->msi_lock);
71 		INIT_LIST_HEAD(&cookie->msi_page_list);
72 		cookie->type = type;
73 	}
74 	return cookie;
75 }
76 
77 int iommu_dma_init(void)
78 {
79 	return iova_cache_get();
80 }
81 
82 /**
83  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
84  * @domain: IOMMU domain to prepare for DMA-API usage
85  *
86  * IOMMU drivers should normally call this from their domain_alloc
87  * callback when domain->type == IOMMU_DOMAIN_DMA.
88  */
89 int iommu_get_dma_cookie(struct iommu_domain *domain)
90 {
91 	if (domain->iova_cookie)
92 		return -EEXIST;
93 
94 	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
95 	if (!domain->iova_cookie)
96 		return -ENOMEM;
97 
98 	return 0;
99 }
100 EXPORT_SYMBOL(iommu_get_dma_cookie);
101 
102 /**
103  * iommu_get_msi_cookie - Acquire just MSI remapping resources
104  * @domain: IOMMU domain to prepare
105  * @base: Start address of IOVA region for MSI mappings
106  *
107  * Users who manage their own IOVA allocation and do not want DMA API support,
108  * but would still like to take advantage of automatic MSI remapping, can use
109  * this to initialise their own domain appropriately. Users should reserve a
110  * contiguous IOVA region, starting at @base, large enough to accommodate the
111  * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
112  * used by the devices attached to @domain.
113  */
114 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
115 {
116 	struct iommu_dma_cookie *cookie;
117 
118 	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
119 		return -EINVAL;
120 
121 	if (domain->iova_cookie)
122 		return -EEXIST;
123 
124 	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
125 	if (!cookie)
126 		return -ENOMEM;
127 
128 	cookie->msi_iova = base;
129 	domain->iova_cookie = cookie;
130 	return 0;
131 }
132 EXPORT_SYMBOL(iommu_get_msi_cookie);
133 
134 /**
135  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
136  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
137  *          iommu_get_msi_cookie()
138  *
139  * IOMMU drivers should normally call this from their domain_free callback.
140  */
141 void iommu_put_dma_cookie(struct iommu_domain *domain)
142 {
143 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
144 	struct iommu_dma_msi_page *msi, *tmp;
145 
146 	if (!cookie)
147 		return;
148 
149 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
150 		put_iova_domain(&cookie->iovad);
151 
152 	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
153 		list_del(&msi->list);
154 		kfree(msi);
155 	}
156 	kfree(cookie);
157 	domain->iova_cookie = NULL;
158 }
159 EXPORT_SYMBOL(iommu_put_dma_cookie);
160 
161 /**
162  * iommu_dma_get_resv_regions - Reserved region driver helper
163  * @dev: Device from iommu_get_resv_regions()
164  * @list: Reserved region list from iommu_get_resv_regions()
165  *
166  * IOMMU drivers can use this to implement their .get_resv_regions callback
167  * for general non-IOMMU-specific reservations. Currently, this covers host
168  * bridge windows for PCI devices.
169  */
170 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
171 {
172 	struct pci_host_bridge *bridge;
173 	struct resource_entry *window;
174 
175 	if (!dev_is_pci(dev))
176 		return;
177 
178 	bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
179 	resource_list_for_each_entry(window, &bridge->windows) {
180 		struct iommu_resv_region *region;
181 		phys_addr_t start;
182 		size_t length;
183 
184 		if (resource_type(window->res) != IORESOURCE_MEM)
185 			continue;
186 
187 		start = window->res->start - window->offset;
188 		length = window->res->end - window->res->start + 1;
189 		region = iommu_alloc_resv_region(start, length, 0,
190 				IOMMU_RESV_RESERVED);
191 		if (!region)
192 			return;
193 
194 		list_add_tail(&region->list, list);
195 	}
196 }
197 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
198 
199 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
200 		phys_addr_t start, phys_addr_t end)
201 {
202 	struct iova_domain *iovad = &cookie->iovad;
203 	struct iommu_dma_msi_page *msi_page;
204 	int i, num_pages;
205 
206 	start -= iova_offset(iovad, start);
207 	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
208 
209 	msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
210 	if (!msi_page)
211 		return -ENOMEM;
212 
213 	for (i = 0; i < num_pages; i++) {
214 		msi_page[i].phys = start;
215 		msi_page[i].iova = start;
216 		INIT_LIST_HEAD(&msi_page[i].list);
217 		list_add(&msi_page[i].list, &cookie->msi_page_list);
218 		start += iovad->granule;
219 	}
220 
221 	return 0;
222 }
223 
224 static int iova_reserve_iommu_regions(struct device *dev,
225 		struct iommu_domain *domain)
226 {
227 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
228 	struct iova_domain *iovad = &cookie->iovad;
229 	struct iommu_resv_region *region;
230 	LIST_HEAD(resv_regions);
231 	int ret = 0;
232 
233 	iommu_get_resv_regions(dev, &resv_regions);
234 	list_for_each_entry(region, &resv_regions, list) {
235 		unsigned long lo, hi;
236 
237 		/* We ARE the software that manages these! */
238 		if (region->type == IOMMU_RESV_SW_MSI)
239 			continue;
240 
241 		lo = iova_pfn(iovad, region->start);
242 		hi = iova_pfn(iovad, region->start + region->length - 1);
243 		reserve_iova(iovad, lo, hi);
244 
245 		if (region->type == IOMMU_RESV_MSI)
246 			ret = cookie_init_hw_msi_region(cookie, region->start,
247 					region->start + region->length);
248 		if (ret)
249 			break;
250 	}
251 	iommu_put_resv_regions(dev, &resv_regions);
252 
253 	return ret;
254 }
255 
256 /**
257  * iommu_dma_init_domain - Initialise a DMA mapping domain
258  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
259  * @base: IOVA at which the mappable address space starts
260  * @size: Size of IOVA space
261  * @dev: Device the domain is being initialised for
262  *
263  * @base and @size should be exact multiples of IOMMU page granularity to
264  * avoid rounding surprises. If necessary, we reserve the page at address 0
265  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
266  * any change which could make prior IOVAs invalid will fail.
267  */
268 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
269 		u64 size, struct device *dev)
270 {
271 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
272 	struct iova_domain *iovad = &cookie->iovad;
273 	unsigned long order, base_pfn, end_pfn;
274 
275 	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
276 		return -EINVAL;
277 
278 	/* Use the smallest supported page size for IOVA granularity */
279 	order = __ffs(domain->pgsize_bitmap);
280 	base_pfn = max_t(unsigned long, 1, base >> order);
281 	end_pfn = (base + size - 1) >> order;
282 
283 	/* Check the domain allows at least some access to the device... */
284 	if (domain->geometry.force_aperture) {
285 		if (base > domain->geometry.aperture_end ||
286 		    base + size <= domain->geometry.aperture_start) {
287 			pr_warn("specified DMA range outside IOMMU capability\n");
288 			return -EFAULT;
289 		}
290 		/* ...then finally give it a kicking to make sure it fits */
291 		base_pfn = max_t(unsigned long, base_pfn,
292 				domain->geometry.aperture_start >> order);
293 		end_pfn = min_t(unsigned long, end_pfn,
294 				domain->geometry.aperture_end >> order);
295 	}
296 	/*
297 	 * PCI devices may have larger DMA masks, but still prefer allocating
298 	 * within a 32-bit mask to avoid DAC addressing. Such limitations don't
299 	 * apply to the typical platform device, so for those we may as well
300 	 * leave the cache limit at the top of their range to save an rb_last()
301 	 * traversal on every allocation.
302 	 */
303 	if (dev && dev_is_pci(dev))
304 		end_pfn &= DMA_BIT_MASK(32) >> order;
305 
306 	/* start_pfn is always nonzero for an already-initialised domain */
307 	if (iovad->start_pfn) {
308 		if (1UL << order != iovad->granule ||
309 		    base_pfn != iovad->start_pfn) {
310 			pr_warn("Incompatible range for DMA domain\n");
311 			return -EFAULT;
312 		}
313 		/*
314 		 * If we have devices with different DMA masks, move the free
315 		 * area cache limit down for the benefit of the smaller one.
316 		 */
317 		iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
318 
319 		return 0;
320 	}
321 
322 	init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
323 	if (!dev)
324 		return 0;
325 
326 	return iova_reserve_iommu_regions(dev, domain);
327 }
328 EXPORT_SYMBOL(iommu_dma_init_domain);
329 
330 /**
331  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
332  *                    page flags.
333  * @dir: Direction of DMA transfer
334  * @coherent: Is the DMA master cache-coherent?
335  * @attrs: DMA attributes for the mapping
336  *
337  * Return: corresponding IOMMU API page protection flags
338  */
339 int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
340 		     unsigned long attrs)
341 {
342 	int prot = coherent ? IOMMU_CACHE : 0;
343 
344 	if (attrs & DMA_ATTR_PRIVILEGED)
345 		prot |= IOMMU_PRIV;
346 
347 	switch (dir) {
348 	case DMA_BIDIRECTIONAL:
349 		return prot | IOMMU_READ | IOMMU_WRITE;
350 	case DMA_TO_DEVICE:
351 		return prot | IOMMU_READ;
352 	case DMA_FROM_DEVICE:
353 		return prot | IOMMU_WRITE;
354 	default:
355 		return 0;
356 	}
357 }
358 
359 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
360 		size_t size, dma_addr_t dma_limit, struct device *dev)
361 {
362 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
363 	struct iova_domain *iovad = &cookie->iovad;
364 	unsigned long shift, iova_len, iova = 0;
365 
366 	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
367 		cookie->msi_iova += size;
368 		return cookie->msi_iova - size;
369 	}
370 
371 	shift = iova_shift(iovad);
372 	iova_len = size >> shift;
373 	/*
374 	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
375 	 * will come back to bite us badly, so we have to waste a bit of space
376 	 * rounding up anything cacheable to make sure that can't happen. The
377 	 * order of the unadjusted size will still match upon freeing.
378 	 */
379 	if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
380 		iova_len = roundup_pow_of_two(iova_len);
381 
382 	if (domain->geometry.force_aperture)
383 		dma_limit = min(dma_limit, domain->geometry.aperture_end);
384 
385 	/* Try to get PCI devices a SAC address */
386 	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
387 		iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift);
388 
389 	if (!iova)
390 		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift);
391 
392 	return (dma_addr_t)iova << shift;
393 }
394 
395 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
396 		dma_addr_t iova, size_t size)
397 {
398 	struct iova_domain *iovad = &cookie->iovad;
399 
400 	/* The MSI case is only ever cleaning up its most recent allocation */
401 	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
402 		cookie->msi_iova -= size;
403 	else
404 		free_iova_fast(iovad, iova_pfn(iovad, iova),
405 				size >> iova_shift(iovad));
406 }
407 
408 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
409 		size_t size)
410 {
411 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
412 	struct iova_domain *iovad = &cookie->iovad;
413 	size_t iova_off = iova_offset(iovad, dma_addr);
414 
415 	dma_addr -= iova_off;
416 	size = iova_align(iovad, size + iova_off);
417 
418 	WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
419 	iommu_dma_free_iova(cookie, dma_addr, size);
420 }
421 
422 static void __iommu_dma_free_pages(struct page **pages, int count)
423 {
424 	while (count--)
425 		__free_page(pages[count]);
426 	kvfree(pages);
427 }
428 
429 static struct page **__iommu_dma_alloc_pages(unsigned int count,
430 		unsigned long order_mask, gfp_t gfp)
431 {
432 	struct page **pages;
433 	unsigned int i = 0, array_size = count * sizeof(*pages);
434 
435 	order_mask &= (2U << MAX_ORDER) - 1;
436 	if (!order_mask)
437 		return NULL;
438 
439 	if (array_size <= PAGE_SIZE)
440 		pages = kzalloc(array_size, GFP_KERNEL);
441 	else
442 		pages = vzalloc(array_size);
443 	if (!pages)
444 		return NULL;
445 
446 	/* IOMMU can map any pages, so himem can also be used here */
447 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
448 
449 	while (count) {
450 		struct page *page = NULL;
451 		unsigned int order_size;
452 
453 		/*
454 		 * Higher-order allocations are a convenience rather
455 		 * than a necessity, hence using __GFP_NORETRY until
456 		 * falling back to minimum-order allocations.
457 		 */
458 		for (order_mask &= (2U << __fls(count)) - 1;
459 		     order_mask; order_mask &= ~order_size) {
460 			unsigned int order = __fls(order_mask);
461 
462 			order_size = 1U << order;
463 			page = alloc_pages((order_mask - order_size) ?
464 					   gfp | __GFP_NORETRY : gfp, order);
465 			if (!page)
466 				continue;
467 			if (!order)
468 				break;
469 			if (!PageCompound(page)) {
470 				split_page(page, order);
471 				break;
472 			} else if (!split_huge_page(page)) {
473 				break;
474 			}
475 			__free_pages(page, order);
476 		}
477 		if (!page) {
478 			__iommu_dma_free_pages(pages, i);
479 			return NULL;
480 		}
481 		count -= order_size;
482 		while (order_size--)
483 			pages[i++] = page++;
484 	}
485 	return pages;
486 }
487 
488 /**
489  * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
490  * @dev: Device which owns this buffer
491  * @pages: Array of buffer pages as returned by iommu_dma_alloc()
492  * @size: Size of buffer in bytes
493  * @handle: DMA address of buffer
494  *
495  * Frees both the pages associated with the buffer, and the array
496  * describing them
497  */
498 void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
499 		dma_addr_t *handle)
500 {
501 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
502 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
503 	*handle = DMA_ERROR_CODE;
504 }
505 
506 /**
507  * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
508  * @dev: Device to allocate memory for. Must be a real device
509  *	 attached to an iommu_dma_domain
510  * @size: Size of buffer in bytes
511  * @gfp: Allocation flags
512  * @attrs: DMA attributes for this allocation
513  * @prot: IOMMU mapping flags
514  * @handle: Out argument for allocated DMA handle
515  * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
516  *		given VA/PA are visible to the given non-coherent device.
517  *
518  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
519  * but an IOMMU which supports smaller pages might not map the whole thing.
520  *
521  * Return: Array of struct page pointers describing the buffer,
522  *	   or NULL on failure.
523  */
524 struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
525 		unsigned long attrs, int prot, dma_addr_t *handle,
526 		void (*flush_page)(struct device *, const void *, phys_addr_t))
527 {
528 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
529 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
530 	struct iova_domain *iovad = &cookie->iovad;
531 	struct page **pages;
532 	struct sg_table sgt;
533 	dma_addr_t iova;
534 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
535 
536 	*handle = DMA_ERROR_CODE;
537 
538 	min_size = alloc_sizes & -alloc_sizes;
539 	if (min_size < PAGE_SIZE) {
540 		min_size = PAGE_SIZE;
541 		alloc_sizes |= PAGE_SIZE;
542 	} else {
543 		size = ALIGN(size, min_size);
544 	}
545 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
546 		alloc_sizes = min_size;
547 
548 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
549 	pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
550 	if (!pages)
551 		return NULL;
552 
553 	size = iova_align(iovad, size);
554 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
555 	if (!iova)
556 		goto out_free_pages;
557 
558 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
559 		goto out_free_iova;
560 
561 	if (!(prot & IOMMU_CACHE)) {
562 		struct sg_mapping_iter miter;
563 		/*
564 		 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
565 		 * sufficient here, so skip it by using the "wrong" direction.
566 		 */
567 		sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
568 		while (sg_miter_next(&miter))
569 			flush_page(dev, miter.addr, page_to_phys(miter.page));
570 		sg_miter_stop(&miter);
571 	}
572 
573 	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
574 			< size)
575 		goto out_free_sg;
576 
577 	*handle = iova;
578 	sg_free_table(&sgt);
579 	return pages;
580 
581 out_free_sg:
582 	sg_free_table(&sgt);
583 out_free_iova:
584 	iommu_dma_free_iova(cookie, iova, size);
585 out_free_pages:
586 	__iommu_dma_free_pages(pages, count);
587 	return NULL;
588 }
589 
590 /**
591  * iommu_dma_mmap - Map a buffer into provided user VMA
592  * @pages: Array representing buffer from iommu_dma_alloc()
593  * @size: Size of buffer in bytes
594  * @vma: VMA describing requested userspace mapping
595  *
596  * Maps the pages of the buffer in @pages into @vma. The caller is responsible
597  * for verifying the correct size and protection of @vma beforehand.
598  */
599 
600 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
601 {
602 	unsigned long uaddr = vma->vm_start;
603 	unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
604 	int ret = -ENXIO;
605 
606 	for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
607 		ret = vm_insert_page(vma, uaddr, pages[i]);
608 		if (ret)
609 			break;
610 		uaddr += PAGE_SIZE;
611 	}
612 	return ret;
613 }
614 
615 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
616 		size_t size, int prot)
617 {
618 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
619 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
620 	size_t iova_off = 0;
621 	dma_addr_t iova;
622 
623 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
624 		iova_off = iova_offset(&cookie->iovad, phys);
625 		size = iova_align(&cookie->iovad, size + iova_off);
626 	}
627 
628 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
629 	if (!iova)
630 		return DMA_ERROR_CODE;
631 
632 	if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
633 		iommu_dma_free_iova(cookie, iova, size);
634 		return DMA_ERROR_CODE;
635 	}
636 	return iova + iova_off;
637 }
638 
639 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
640 		unsigned long offset, size_t size, int prot)
641 {
642 	return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
643 }
644 
645 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
646 		enum dma_data_direction dir, unsigned long attrs)
647 {
648 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
649 }
650 
651 /*
652  * Prepare a successfully-mapped scatterlist to give back to the caller.
653  *
654  * At this point the segments are already laid out by iommu_dma_map_sg() to
655  * avoid individually crossing any boundaries, so we merely need to check a
656  * segment's start address to avoid concatenating across one.
657  */
658 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
659 		dma_addr_t dma_addr)
660 {
661 	struct scatterlist *s, *cur = sg;
662 	unsigned long seg_mask = dma_get_seg_boundary(dev);
663 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
664 	int i, count = 0;
665 
666 	for_each_sg(sg, s, nents, i) {
667 		/* Restore this segment's original unaligned fields first */
668 		unsigned int s_iova_off = sg_dma_address(s);
669 		unsigned int s_length = sg_dma_len(s);
670 		unsigned int s_iova_len = s->length;
671 
672 		s->offset += s_iova_off;
673 		s->length = s_length;
674 		sg_dma_address(s) = DMA_ERROR_CODE;
675 		sg_dma_len(s) = 0;
676 
677 		/*
678 		 * Now fill in the real DMA data. If...
679 		 * - there is a valid output segment to append to
680 		 * - and this segment starts on an IOVA page boundary
681 		 * - but doesn't fall at a segment boundary
682 		 * - and wouldn't make the resulting output segment too long
683 		 */
684 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
685 		    (cur_len + s_length <= max_len)) {
686 			/* ...then concatenate it with the previous one */
687 			cur_len += s_length;
688 		} else {
689 			/* Otherwise start the next output segment */
690 			if (i > 0)
691 				cur = sg_next(cur);
692 			cur_len = s_length;
693 			count++;
694 
695 			sg_dma_address(cur) = dma_addr + s_iova_off;
696 		}
697 
698 		sg_dma_len(cur) = cur_len;
699 		dma_addr += s_iova_len;
700 
701 		if (s_length + s_iova_off < s_iova_len)
702 			cur_len = 0;
703 	}
704 	return count;
705 }
706 
707 /*
708  * If mapping failed, then just restore the original list,
709  * but making sure the DMA fields are invalidated.
710  */
711 static void __invalidate_sg(struct scatterlist *sg, int nents)
712 {
713 	struct scatterlist *s;
714 	int i;
715 
716 	for_each_sg(sg, s, nents, i) {
717 		if (sg_dma_address(s) != DMA_ERROR_CODE)
718 			s->offset += sg_dma_address(s);
719 		if (sg_dma_len(s))
720 			s->length = sg_dma_len(s);
721 		sg_dma_address(s) = DMA_ERROR_CODE;
722 		sg_dma_len(s) = 0;
723 	}
724 }
725 
726 /*
727  * The DMA API client is passing in a scatterlist which could describe
728  * any old buffer layout, but the IOMMU API requires everything to be
729  * aligned to IOMMU pages. Hence the need for this complicated bit of
730  * impedance-matching, to be able to hand off a suitably-aligned list,
731  * but still preserve the original offsets and sizes for the caller.
732  */
733 int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
734 		int nents, int prot)
735 {
736 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
737 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
738 	struct iova_domain *iovad = &cookie->iovad;
739 	struct scatterlist *s, *prev = NULL;
740 	dma_addr_t iova;
741 	size_t iova_len = 0;
742 	unsigned long mask = dma_get_seg_boundary(dev);
743 	int i;
744 
745 	/*
746 	 * Work out how much IOVA space we need, and align the segments to
747 	 * IOVA granules for the IOMMU driver to handle. With some clever
748 	 * trickery we can modify the list in-place, but reversibly, by
749 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
750 	 */
751 	for_each_sg(sg, s, nents, i) {
752 		size_t s_iova_off = iova_offset(iovad, s->offset);
753 		size_t s_length = s->length;
754 		size_t pad_len = (mask - iova_len + 1) & mask;
755 
756 		sg_dma_address(s) = s_iova_off;
757 		sg_dma_len(s) = s_length;
758 		s->offset -= s_iova_off;
759 		s_length = iova_align(iovad, s_length + s_iova_off);
760 		s->length = s_length;
761 
762 		/*
763 		 * Due to the alignment of our single IOVA allocation, we can
764 		 * depend on these assumptions about the segment boundary mask:
765 		 * - If mask size >= IOVA size, then the IOVA range cannot
766 		 *   possibly fall across a boundary, so we don't care.
767 		 * - If mask size < IOVA size, then the IOVA range must start
768 		 *   exactly on a boundary, therefore we can lay things out
769 		 *   based purely on segment lengths without needing to know
770 		 *   the actual addresses beforehand.
771 		 * - The mask must be a power of 2, so pad_len == 0 if
772 		 *   iova_len == 0, thus we cannot dereference prev the first
773 		 *   time through here (i.e. before it has a meaningful value).
774 		 */
775 		if (pad_len && pad_len < s_length - 1) {
776 			prev->length += pad_len;
777 			iova_len += pad_len;
778 		}
779 
780 		iova_len += s_length;
781 		prev = s;
782 	}
783 
784 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
785 	if (!iova)
786 		goto out_restore_sg;
787 
788 	/*
789 	 * We'll leave any physical concatenation to the IOMMU driver's
790 	 * implementation - it knows better than we do.
791 	 */
792 	if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
793 		goto out_free_iova;
794 
795 	return __finalise_sg(dev, sg, nents, iova);
796 
797 out_free_iova:
798 	iommu_dma_free_iova(cookie, iova, iova_len);
799 out_restore_sg:
800 	__invalidate_sg(sg, nents);
801 	return 0;
802 }
803 
804 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
805 		enum dma_data_direction dir, unsigned long attrs)
806 {
807 	dma_addr_t start, end;
808 	struct scatterlist *tmp;
809 	int i;
810 	/*
811 	 * The scatterlist segments are mapped into a single
812 	 * contiguous IOVA allocation, so this is incredibly easy.
813 	 */
814 	start = sg_dma_address(sg);
815 	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
816 		if (sg_dma_len(tmp) == 0)
817 			break;
818 		sg = tmp;
819 	}
820 	end = sg_dma_address(sg) + sg_dma_len(sg);
821 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
822 }
823 
824 dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
825 		size_t size, enum dma_data_direction dir, unsigned long attrs)
826 {
827 	return __iommu_dma_map(dev, phys, size,
828 			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
829 }
830 
831 void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
832 		size_t size, enum dma_data_direction dir, unsigned long attrs)
833 {
834 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
835 }
836 
837 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
838 {
839 	return dma_addr == DMA_ERROR_CODE;
840 }
841 
842 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
843 		phys_addr_t msi_addr, struct iommu_domain *domain)
844 {
845 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
846 	struct iommu_dma_msi_page *msi_page;
847 	dma_addr_t iova;
848 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
849 	size_t size = cookie_msi_granule(cookie);
850 
851 	msi_addr &= ~(phys_addr_t)(size - 1);
852 	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
853 		if (msi_page->phys == msi_addr)
854 			return msi_page;
855 
856 	msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
857 	if (!msi_page)
858 		return NULL;
859 
860 	iova = __iommu_dma_map(dev, msi_addr, size, prot);
861 	if (iommu_dma_mapping_error(dev, iova))
862 		goto out_free_page;
863 
864 	INIT_LIST_HEAD(&msi_page->list);
865 	msi_page->phys = msi_addr;
866 	msi_page->iova = iova;
867 	list_add(&msi_page->list, &cookie->msi_page_list);
868 	return msi_page;
869 
870 out_free_page:
871 	kfree(msi_page);
872 	return NULL;
873 }
874 
875 void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
876 {
877 	struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
878 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
879 	struct iommu_dma_cookie *cookie;
880 	struct iommu_dma_msi_page *msi_page;
881 	phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
882 	unsigned long flags;
883 
884 	if (!domain || !domain->iova_cookie)
885 		return;
886 
887 	cookie = domain->iova_cookie;
888 
889 	/*
890 	 * We disable IRQs to rule out a possible inversion against
891 	 * irq_desc_lock if, say, someone tries to retarget the affinity
892 	 * of an MSI from within an IPI handler.
893 	 */
894 	spin_lock_irqsave(&cookie->msi_lock, flags);
895 	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
896 	spin_unlock_irqrestore(&cookie->msi_lock, flags);
897 
898 	if (WARN_ON(!msi_page)) {
899 		/*
900 		 * We're called from a void callback, so the best we can do is
901 		 * 'fail' by filling the message with obviously bogus values.
902 		 * Since we got this far due to an IOMMU being present, it's
903 		 * not like the existing address would have worked anyway...
904 		 */
905 		msg->address_hi = ~0U;
906 		msg->address_lo = ~0U;
907 		msg->data = ~0U;
908 	} else {
909 		msg->address_hi = upper_32_bits(msi_page->iova);
910 		msg->address_lo &= cookie_msi_granule(cookie) - 1;
911 		msg->address_lo += lower_32_bits(msi_page->iova);
912 	}
913 }
914