xref: /linux/drivers/iommu/dma-iommu.c (revision 4491b85480c8ca2d85b2a06262828ec1af5c00ba)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20db2e5d1SRobin Murphy /*
30db2e5d1SRobin Murphy  * A fairly generic DMA-API to IOMMU-API glue layer.
40db2e5d1SRobin Murphy  *
50db2e5d1SRobin Murphy  * Copyright (C) 2014-2015 ARM Ltd.
60db2e5d1SRobin Murphy  *
70db2e5d1SRobin Murphy  * based in part on arch/arm/mm/dma-mapping.c:
80db2e5d1SRobin Murphy  * Copyright (C) 2000-2004 Russell King
90db2e5d1SRobin Murphy  */
100db2e5d1SRobin Murphy 
11f51dc892SShameer Kolothum #include <linux/acpi_iort.h>
12a17e3026SRobin Murphy #include <linux/atomic.h>
13a17e3026SRobin Murphy #include <linux/crash_dump.h>
140db2e5d1SRobin Murphy #include <linux/device.h>
15a17e3026SRobin Murphy #include <linux/dma-direct.h>
16a17e3026SRobin Murphy #include <linux/dma-map-ops.h>
175b11e9cdSRobin Murphy #include <linux/gfp.h>
180db2e5d1SRobin Murphy #include <linux/huge_mm.h>
190db2e5d1SRobin Murphy #include <linux/iommu.h>
20b5c58b2fSLeon Romanovsky #include <linux/iommu-dma.h>
210db2e5d1SRobin Murphy #include <linux/iova.h>
2244bb7e24SRobin Murphy #include <linux/irq.h>
23b8397a8fSRobin Murphy #include <linux/list_sort.h>
2430280eeeSLogan Gunthorpe #include <linux/memremap.h>
250db2e5d1SRobin Murphy #include <linux/mm.h>
26c1864790SRobin Murphy #include <linux/mutex.h>
275cef282eSThierry Reding #include <linux/of_iommu.h>
28fade1ec0SRobin Murphy #include <linux/pci.h>
295b11e9cdSRobin Murphy #include <linux/scatterlist.h>
30a17e3026SRobin Murphy #include <linux/spinlock.h>
31a17e3026SRobin Murphy #include <linux/swiotlb.h>
325b11e9cdSRobin Murphy #include <linux/vmalloc.h>
33a63c357bSIsaac J. Manjarres #include <trace/events/swiotlb.h>
340db2e5d1SRobin Murphy 
35f2042ed2SRobin Murphy #include "dma-iommu.h"
3695b18ef9SPasha Tatashin #include "iommu-pages.h"
37f2042ed2SRobin Murphy 
3844bb7e24SRobin Murphy struct iommu_dma_msi_page {
3944bb7e24SRobin Murphy 	struct list_head	list;
4044bb7e24SRobin Murphy 	dma_addr_t		iova;
4144bb7e24SRobin Murphy 	phys_addr_t		phys;
4244bb7e24SRobin Murphy };
4344bb7e24SRobin Murphy 
44fdbe574eSRobin Murphy enum iommu_dma_cookie_type {
45fdbe574eSRobin Murphy 	IOMMU_DMA_IOVA_COOKIE,
46fdbe574eSRobin Murphy 	IOMMU_DMA_MSI_COOKIE,
47fdbe574eSRobin Murphy };
48fdbe574eSRobin Murphy 
4932d5bc8bSNiklas Schnelle enum iommu_dma_queue_type {
5032d5bc8bSNiklas Schnelle 	IOMMU_DMA_OPTS_PER_CPU_QUEUE,
5132d5bc8bSNiklas Schnelle 	IOMMU_DMA_OPTS_SINGLE_QUEUE,
5232d5bc8bSNiklas Schnelle };
5332d5bc8bSNiklas Schnelle 
5432d5bc8bSNiklas Schnelle struct iommu_dma_options {
5532d5bc8bSNiklas Schnelle 	enum iommu_dma_queue_type qt;
569f5b681eSNiklas Schnelle 	size_t		fq_size;
579f5b681eSNiklas Schnelle 	unsigned int	fq_timeout;
5832d5bc8bSNiklas Schnelle };
5932d5bc8bSNiklas Schnelle 
6044bb7e24SRobin Murphy struct iommu_dma_cookie {
61fdbe574eSRobin Murphy 	enum iommu_dma_cookie_type	type;
62fdbe574eSRobin Murphy 	union {
63fdbe574eSRobin Murphy 		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
64a17e3026SRobin Murphy 		struct {
6544bb7e24SRobin Murphy 			struct iova_domain	iovad;
6632d5bc8bSNiklas Schnelle 			/* Flush queue */
6732d5bc8bSNiklas Schnelle 			union {
6832d5bc8bSNiklas Schnelle 				struct iova_fq	*single_fq;
6932d5bc8bSNiklas Schnelle 				struct iova_fq	__percpu *percpu_fq;
7032d5bc8bSNiklas Schnelle 			};
71a17e3026SRobin Murphy 			/* Number of TLB flushes that have been started */
72a17e3026SRobin Murphy 			atomic64_t		fq_flush_start_cnt;
73a17e3026SRobin Murphy 			/* Number of TLB flushes that have been finished */
74a17e3026SRobin Murphy 			atomic64_t		fq_flush_finish_cnt;
75a17e3026SRobin Murphy 			/* Timer to regularily empty the flush queues */
76a17e3026SRobin Murphy 			struct timer_list	fq_timer;
77a17e3026SRobin Murphy 			/* 1 when timer is active, 0 when not */
78a17e3026SRobin Murphy 			atomic_t		fq_timer_on;
79a17e3026SRobin Murphy 		};
80fdbe574eSRobin Murphy 		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
81fdbe574eSRobin Murphy 		dma_addr_t		msi_iova;
82fdbe574eSRobin Murphy 	};
8344bb7e24SRobin Murphy 	struct list_head		msi_page_list;
842da274cdSZhen Lei 
852da274cdSZhen Lei 	/* Domain for flush queue callback; NULL if flush queue not in use */
862da274cdSZhen Lei 	struct iommu_domain		*fq_domain;
8732d5bc8bSNiklas Schnelle 	/* Options for dma-iommu use */
8832d5bc8bSNiklas Schnelle 	struct iommu_dma_options	options;
89ac9a5d52SYunfei Wang 	struct mutex			mutex;
9044bb7e24SRobin Murphy };
9144bb7e24SRobin Murphy 
92a8e8af35SLianbo Jiang static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
93af3e9579SLinus Torvalds bool iommu_dma_forcedac __read_mostly;
943542dcb1SRobin Murphy 
953542dcb1SRobin Murphy static int __init iommu_dma_forcedac_setup(char *str)
963542dcb1SRobin Murphy {
973542dcb1SRobin Murphy 	int ret = kstrtobool(str, &iommu_dma_forcedac);
983542dcb1SRobin Murphy 
993542dcb1SRobin Murphy 	if (!ret && iommu_dma_forcedac)
1003542dcb1SRobin Murphy 		pr_info("Forcing DAC for PCI devices\n");
1013542dcb1SRobin Murphy 	return ret;
1023542dcb1SRobin Murphy }
1033542dcb1SRobin Murphy early_param("iommu.forcedac", iommu_dma_forcedac_setup);
104a8e8af35SLianbo Jiang 
105a17e3026SRobin Murphy /* Number of entries per flush queue */
1069f5b681eSNiklas Schnelle #define IOVA_DEFAULT_FQ_SIZE	256
1079f5b681eSNiklas Schnelle #define IOVA_SINGLE_FQ_SIZE	32768
108a17e3026SRobin Murphy 
109a17e3026SRobin Murphy /* Timeout (in ms) after which entries are flushed from the queue */
1109f5b681eSNiklas Schnelle #define IOVA_DEFAULT_FQ_TIMEOUT	10
1119f5b681eSNiklas Schnelle #define IOVA_SINGLE_FQ_TIMEOUT	1000
112a17e3026SRobin Murphy 
113a17e3026SRobin Murphy /* Flush queue entry for deferred flushing */
114a17e3026SRobin Murphy struct iova_fq_entry {
115a17e3026SRobin Murphy 	unsigned long iova_pfn;
116a17e3026SRobin Murphy 	unsigned long pages;
117a17e3026SRobin Murphy 	struct list_head freelist;
118a17e3026SRobin Murphy 	u64 counter; /* Flush counter when this entry was added */
119a17e3026SRobin Murphy };
120a17e3026SRobin Murphy 
121a17e3026SRobin Murphy /* Per-CPU flush queue structure */
122a17e3026SRobin Murphy struct iova_fq {
123a17e3026SRobin Murphy 	spinlock_t lock;
1249f5b681eSNiklas Schnelle 	unsigned int head, tail;
1259f5b681eSNiklas Schnelle 	unsigned int mod_mask;
1269f5b681eSNiklas Schnelle 	struct iova_fq_entry entries[];
127a17e3026SRobin Murphy };
128a17e3026SRobin Murphy 
129f7f07484SRobin Murphy #define fq_ring_for_each(i, fq) \
1309f5b681eSNiklas Schnelle 	for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
131f7f07484SRobin Murphy 
132f7f07484SRobin Murphy static inline bool fq_full(struct iova_fq *fq)
133f7f07484SRobin Murphy {
134f7f07484SRobin Murphy 	assert_spin_locked(&fq->lock);
1359f5b681eSNiklas Schnelle 	return (((fq->tail + 1) & fq->mod_mask) == fq->head);
136f7f07484SRobin Murphy }
137f7f07484SRobin Murphy 
138a17e3026SRobin Murphy static inline unsigned int fq_ring_add(struct iova_fq *fq)
139f7f07484SRobin Murphy {
140a17e3026SRobin Murphy 	unsigned int idx = fq->tail;
141f7f07484SRobin Murphy 
142f7f07484SRobin Murphy 	assert_spin_locked(&fq->lock);
143f7f07484SRobin Murphy 
1449f5b681eSNiklas Schnelle 	fq->tail = (idx + 1) & fq->mod_mask;
145f7f07484SRobin Murphy 
146f7f07484SRobin Murphy 	return idx;
147f7f07484SRobin Murphy }
148f7f07484SRobin Murphy 
14932d5bc8bSNiklas Schnelle static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
150f7f07484SRobin Murphy {
151a17e3026SRobin Murphy 	u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
152a17e3026SRobin Murphy 	unsigned int idx;
153f7f07484SRobin Murphy 
154f7f07484SRobin Murphy 	assert_spin_locked(&fq->lock);
155f7f07484SRobin Murphy 
156f7f07484SRobin Murphy 	fq_ring_for_each(idx, fq) {
157f7f07484SRobin Murphy 
158f7f07484SRobin Murphy 		if (fq->entries[idx].counter >= counter)
159f7f07484SRobin Murphy 			break;
160f7f07484SRobin Murphy 
16195b18ef9SPasha Tatashin 		iommu_put_pages_list(&fq->entries[idx].freelist);
162a17e3026SRobin Murphy 		free_iova_fast(&cookie->iovad,
163f7f07484SRobin Murphy 			       fq->entries[idx].iova_pfn,
164f7f07484SRobin Murphy 			       fq->entries[idx].pages);
165f7f07484SRobin Murphy 
1669f5b681eSNiklas Schnelle 		fq->head = (fq->head + 1) & fq->mod_mask;
167f7f07484SRobin Murphy 	}
168f7f07484SRobin Murphy }
169f7f07484SRobin Murphy 
17032d5bc8bSNiklas Schnelle static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
17132d5bc8bSNiklas Schnelle {
17232d5bc8bSNiklas Schnelle 	unsigned long flags;
17332d5bc8bSNiklas Schnelle 
17432d5bc8bSNiklas Schnelle 	spin_lock_irqsave(&fq->lock, flags);
17532d5bc8bSNiklas Schnelle 	fq_ring_free_locked(cookie, fq);
17632d5bc8bSNiklas Schnelle 	spin_unlock_irqrestore(&fq->lock, flags);
17732d5bc8bSNiklas Schnelle }
17832d5bc8bSNiklas Schnelle 
179a17e3026SRobin Murphy static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
180f7f07484SRobin Murphy {
181a17e3026SRobin Murphy 	atomic64_inc(&cookie->fq_flush_start_cnt);
182a17e3026SRobin Murphy 	cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
183a17e3026SRobin Murphy 	atomic64_inc(&cookie->fq_flush_finish_cnt);
184f7f07484SRobin Murphy }
185f7f07484SRobin Murphy 
186f7f07484SRobin Murphy static void fq_flush_timeout(struct timer_list *t)
187f7f07484SRobin Murphy {
188a17e3026SRobin Murphy 	struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
189f7f07484SRobin Murphy 	int cpu;
190f7f07484SRobin Murphy 
191a17e3026SRobin Murphy 	atomic_set(&cookie->fq_timer_on, 0);
192a17e3026SRobin Murphy 	fq_flush_iotlb(cookie);
193f7f07484SRobin Murphy 
19432d5bc8bSNiklas Schnelle 	if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) {
19532d5bc8bSNiklas Schnelle 		fq_ring_free(cookie, cookie->single_fq);
19632d5bc8bSNiklas Schnelle 	} else {
19732d5bc8bSNiklas Schnelle 		for_each_possible_cpu(cpu)
19832d5bc8bSNiklas Schnelle 			fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu));
199f7f07484SRobin Murphy 	}
200f7f07484SRobin Murphy }
201f7f07484SRobin Murphy 
202a17e3026SRobin Murphy static void queue_iova(struct iommu_dma_cookie *cookie,
203f7f07484SRobin Murphy 		unsigned long pfn, unsigned long pages,
204f7f07484SRobin Murphy 		struct list_head *freelist)
205f7f07484SRobin Murphy {
206f7f07484SRobin Murphy 	struct iova_fq *fq;
207f7f07484SRobin Murphy 	unsigned long flags;
208a17e3026SRobin Murphy 	unsigned int idx;
209f7f07484SRobin Murphy 
210f7f07484SRobin Murphy 	/*
211f7f07484SRobin Murphy 	 * Order against the IOMMU driver's pagetable update from unmapping
212a17e3026SRobin Murphy 	 * @pte, to guarantee that fq_flush_iotlb() observes that if called
213f7f07484SRobin Murphy 	 * from a different CPU before we release the lock below. Full barrier
214f7f07484SRobin Murphy 	 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
215f7f07484SRobin Murphy 	 * written fq state here.
216f7f07484SRobin Murphy 	 */
217f7f07484SRobin Murphy 	smp_mb();
218f7f07484SRobin Murphy 
21932d5bc8bSNiklas Schnelle 	if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
22032d5bc8bSNiklas Schnelle 		fq = cookie->single_fq;
22132d5bc8bSNiklas Schnelle 	else
22232d5bc8bSNiklas Schnelle 		fq = raw_cpu_ptr(cookie->percpu_fq);
22332d5bc8bSNiklas Schnelle 
224f7f07484SRobin Murphy 	spin_lock_irqsave(&fq->lock, flags);
225f7f07484SRobin Murphy 
226f7f07484SRobin Murphy 	/*
227f7f07484SRobin Murphy 	 * First remove all entries from the flush queue that have already been
228f7f07484SRobin Murphy 	 * flushed out on another CPU. This makes the fq_full() check below less
229f7f07484SRobin Murphy 	 * likely to be true.
230f7f07484SRobin Murphy 	 */
23132d5bc8bSNiklas Schnelle 	fq_ring_free_locked(cookie, fq);
232f7f07484SRobin Murphy 
233f7f07484SRobin Murphy 	if (fq_full(fq)) {
234a17e3026SRobin Murphy 		fq_flush_iotlb(cookie);
23532d5bc8bSNiklas Schnelle 		fq_ring_free_locked(cookie, fq);
236f7f07484SRobin Murphy 	}
237f7f07484SRobin Murphy 
238f7f07484SRobin Murphy 	idx = fq_ring_add(fq);
239f7f07484SRobin Murphy 
240f7f07484SRobin Murphy 	fq->entries[idx].iova_pfn = pfn;
241f7f07484SRobin Murphy 	fq->entries[idx].pages    = pages;
242a17e3026SRobin Murphy 	fq->entries[idx].counter  = atomic64_read(&cookie->fq_flush_start_cnt);
243f7f07484SRobin Murphy 	list_splice(freelist, &fq->entries[idx].freelist);
244f7f07484SRobin Murphy 
245f7f07484SRobin Murphy 	spin_unlock_irqrestore(&fq->lock, flags);
246f7f07484SRobin Murphy 
247f7f07484SRobin Murphy 	/* Avoid false sharing as much as possible. */
248a17e3026SRobin Murphy 	if (!atomic_read(&cookie->fq_timer_on) &&
249a17e3026SRobin Murphy 	    !atomic_xchg(&cookie->fq_timer_on, 1))
250a17e3026SRobin Murphy 		mod_timer(&cookie->fq_timer,
2519f5b681eSNiklas Schnelle 			  jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
252f7f07484SRobin Murphy }
253f7f07484SRobin Murphy 
25432d5bc8bSNiklas Schnelle static void iommu_dma_free_fq_single(struct iova_fq *fq)
25532d5bc8bSNiklas Schnelle {
25632d5bc8bSNiklas Schnelle 	int idx;
25732d5bc8bSNiklas Schnelle 
25832d5bc8bSNiklas Schnelle 	fq_ring_for_each(idx, fq)
25995b18ef9SPasha Tatashin 		iommu_put_pages_list(&fq->entries[idx].freelist);
26032d5bc8bSNiklas Schnelle 	vfree(fq);
26132d5bc8bSNiklas Schnelle }
26232d5bc8bSNiklas Schnelle 
26332d5bc8bSNiklas Schnelle static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
264f7f07484SRobin Murphy {
265f7f07484SRobin Murphy 	int cpu, idx;
266f7f07484SRobin Murphy 
267a17e3026SRobin Murphy 	/* The IOVAs will be torn down separately, so just free our queued pages */
268f7f07484SRobin Murphy 	for_each_possible_cpu(cpu) {
26932d5bc8bSNiklas Schnelle 		struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
270f7f07484SRobin Murphy 
271f7f07484SRobin Murphy 		fq_ring_for_each(idx, fq)
27295b18ef9SPasha Tatashin 			iommu_put_pages_list(&fq->entries[idx].freelist);
273f7f07484SRobin Murphy 	}
274f7f07484SRobin Murphy 
27532d5bc8bSNiklas Schnelle 	free_percpu(percpu_fq);
276f7f07484SRobin Murphy }
277f7f07484SRobin Murphy 
27832d5bc8bSNiklas Schnelle static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
279f7f07484SRobin Murphy {
28032d5bc8bSNiklas Schnelle 	if (!cookie->fq_domain)
28132d5bc8bSNiklas Schnelle 		return;
282f7f07484SRobin Murphy 
28332d5bc8bSNiklas Schnelle 	del_timer_sync(&cookie->fq_timer);
28432d5bc8bSNiklas Schnelle 	if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
28532d5bc8bSNiklas Schnelle 		iommu_dma_free_fq_single(cookie->single_fq);
28632d5bc8bSNiklas Schnelle 	else
28732d5bc8bSNiklas Schnelle 		iommu_dma_free_fq_percpu(cookie->percpu_fq);
288a17e3026SRobin Murphy }
289f7f07484SRobin Murphy 
2909f5b681eSNiklas Schnelle static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
29132d5bc8bSNiklas Schnelle {
29232d5bc8bSNiklas Schnelle 	int i;
293f7f07484SRobin Murphy 
294f7f07484SRobin Murphy 	fq->head = 0;
295f7f07484SRobin Murphy 	fq->tail = 0;
2969f5b681eSNiklas Schnelle 	fq->mod_mask = fq_size - 1;
297f7f07484SRobin Murphy 
298f7f07484SRobin Murphy 	spin_lock_init(&fq->lock);
299f7f07484SRobin Murphy 
3009f5b681eSNiklas Schnelle 	for (i = 0; i < fq_size; i++)
301f7f07484SRobin Murphy 		INIT_LIST_HEAD(&fq->entries[i].freelist);
302f7f07484SRobin Murphy }
303f7f07484SRobin Murphy 
30432d5bc8bSNiklas Schnelle static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
30532d5bc8bSNiklas Schnelle {
3069f5b681eSNiklas Schnelle 	size_t fq_size = cookie->options.fq_size;
30732d5bc8bSNiklas Schnelle 	struct iova_fq *queue;
30832d5bc8bSNiklas Schnelle 
3099f5b681eSNiklas Schnelle 	queue = vmalloc(struct_size(queue, entries, fq_size));
31032d5bc8bSNiklas Schnelle 	if (!queue)
31132d5bc8bSNiklas Schnelle 		return -ENOMEM;
3129f5b681eSNiklas Schnelle 	iommu_dma_init_one_fq(queue, fq_size);
31332d5bc8bSNiklas Schnelle 	cookie->single_fq = queue;
31432d5bc8bSNiklas Schnelle 
31532d5bc8bSNiklas Schnelle 	return 0;
31632d5bc8bSNiklas Schnelle }
31732d5bc8bSNiklas Schnelle 
31832d5bc8bSNiklas Schnelle static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
31932d5bc8bSNiklas Schnelle {
3209f5b681eSNiklas Schnelle 	size_t fq_size = cookie->options.fq_size;
32132d5bc8bSNiklas Schnelle 	struct iova_fq __percpu *queue;
32232d5bc8bSNiklas Schnelle 	int cpu;
32332d5bc8bSNiklas Schnelle 
3249f5b681eSNiklas Schnelle 	queue = __alloc_percpu(struct_size(queue, entries, fq_size),
3259f5b681eSNiklas Schnelle 			       __alignof__(*queue));
32632d5bc8bSNiklas Schnelle 	if (!queue)
32732d5bc8bSNiklas Schnelle 		return -ENOMEM;
32832d5bc8bSNiklas Schnelle 
32932d5bc8bSNiklas Schnelle 	for_each_possible_cpu(cpu)
3309f5b681eSNiklas Schnelle 		iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
33132d5bc8bSNiklas Schnelle 	cookie->percpu_fq = queue;
33232d5bc8bSNiklas Schnelle 	return 0;
33332d5bc8bSNiklas Schnelle }
33432d5bc8bSNiklas Schnelle 
33532d5bc8bSNiklas Schnelle /* sysfs updates are serialised by the mutex of the group owning @domain */
33632d5bc8bSNiklas Schnelle int iommu_dma_init_fq(struct iommu_domain *domain)
33732d5bc8bSNiklas Schnelle {
33832d5bc8bSNiklas Schnelle 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
33932d5bc8bSNiklas Schnelle 	int rc;
34032d5bc8bSNiklas Schnelle 
34132d5bc8bSNiklas Schnelle 	if (cookie->fq_domain)
34232d5bc8bSNiklas Schnelle 		return 0;
34332d5bc8bSNiklas Schnelle 
34432d5bc8bSNiklas Schnelle 	atomic64_set(&cookie->fq_flush_start_cnt,  0);
34532d5bc8bSNiklas Schnelle 	atomic64_set(&cookie->fq_flush_finish_cnt, 0);
34632d5bc8bSNiklas Schnelle 
34732d5bc8bSNiklas Schnelle 	if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
34832d5bc8bSNiklas Schnelle 		rc = iommu_dma_init_fq_single(cookie);
34932d5bc8bSNiklas Schnelle 	else
35032d5bc8bSNiklas Schnelle 		rc = iommu_dma_init_fq_percpu(cookie);
35132d5bc8bSNiklas Schnelle 
35232d5bc8bSNiklas Schnelle 	if (rc) {
35332d5bc8bSNiklas Schnelle 		pr_warn("iova flush queue initialization failed\n");
35432d5bc8bSNiklas Schnelle 		return -ENOMEM;
35532d5bc8bSNiklas Schnelle 	}
356f7f07484SRobin Murphy 
357a17e3026SRobin Murphy 	timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
358a17e3026SRobin Murphy 	atomic_set(&cookie->fq_timer_on, 0);
359a17e3026SRobin Murphy 	/*
360a17e3026SRobin Murphy 	 * Prevent incomplete fq state being observable. Pairs with path from
361a17e3026SRobin Murphy 	 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
362a17e3026SRobin Murphy 	 */
363a17e3026SRobin Murphy 	smp_wmb();
364a17e3026SRobin Murphy 	WRITE_ONCE(cookie->fq_domain, domain);
365f7f07484SRobin Murphy 	return 0;
366f7f07484SRobin Murphy }
367f7f07484SRobin Murphy 
368fdbe574eSRobin Murphy static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
369fdbe574eSRobin Murphy {
370fdbe574eSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
371fdbe574eSRobin Murphy 		return cookie->iovad.granule;
372fdbe574eSRobin Murphy 	return PAGE_SIZE;
373fdbe574eSRobin Murphy }
374fdbe574eSRobin Murphy 
375fdbe574eSRobin Murphy static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
376fdbe574eSRobin Murphy {
377fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
378fdbe574eSRobin Murphy 
379fdbe574eSRobin Murphy 	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
380fdbe574eSRobin Murphy 	if (cookie) {
381fdbe574eSRobin Murphy 		INIT_LIST_HEAD(&cookie->msi_page_list);
382fdbe574eSRobin Murphy 		cookie->type = type;
383fdbe574eSRobin Murphy 	}
384fdbe574eSRobin Murphy 	return cookie;
38544bb7e24SRobin Murphy }
38644bb7e24SRobin Murphy 
3870db2e5d1SRobin Murphy /**
3880db2e5d1SRobin Murphy  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
3890db2e5d1SRobin Murphy  * @domain: IOMMU domain to prepare for DMA-API usage
3900db2e5d1SRobin Murphy  */
3910db2e5d1SRobin Murphy int iommu_get_dma_cookie(struct iommu_domain *domain)
3920db2e5d1SRobin Murphy {
3930db2e5d1SRobin Murphy 	if (domain->iova_cookie)
3940db2e5d1SRobin Murphy 		return -EEXIST;
3950db2e5d1SRobin Murphy 
396fdbe574eSRobin Murphy 	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
397fdbe574eSRobin Murphy 	if (!domain->iova_cookie)
39844bb7e24SRobin Murphy 		return -ENOMEM;
3990db2e5d1SRobin Murphy 
400ac9a5d52SYunfei Wang 	mutex_init(&domain->iova_cookie->mutex);
40144bb7e24SRobin Murphy 	return 0;
4020db2e5d1SRobin Murphy }
4030db2e5d1SRobin Murphy 
4040db2e5d1SRobin Murphy /**
405fdbe574eSRobin Murphy  * iommu_get_msi_cookie - Acquire just MSI remapping resources
406fdbe574eSRobin Murphy  * @domain: IOMMU domain to prepare
407fdbe574eSRobin Murphy  * @base: Start address of IOVA region for MSI mappings
408fdbe574eSRobin Murphy  *
409fdbe574eSRobin Murphy  * Users who manage their own IOVA allocation and do not want DMA API support,
410fdbe574eSRobin Murphy  * but would still like to take advantage of automatic MSI remapping, can use
411fdbe574eSRobin Murphy  * this to initialise their own domain appropriately. Users should reserve a
412fdbe574eSRobin Murphy  * contiguous IOVA region, starting at @base, large enough to accommodate the
413fdbe574eSRobin Murphy  * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
414fdbe574eSRobin Murphy  * used by the devices attached to @domain.
415fdbe574eSRobin Murphy  */
416fdbe574eSRobin Murphy int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
417fdbe574eSRobin Murphy {
418fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
419fdbe574eSRobin Murphy 
420fdbe574eSRobin Murphy 	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
421fdbe574eSRobin Murphy 		return -EINVAL;
422fdbe574eSRobin Murphy 
423fdbe574eSRobin Murphy 	if (domain->iova_cookie)
424fdbe574eSRobin Murphy 		return -EEXIST;
425fdbe574eSRobin Murphy 
426fdbe574eSRobin Murphy 	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
427fdbe574eSRobin Murphy 	if (!cookie)
428fdbe574eSRobin Murphy 		return -ENOMEM;
429fdbe574eSRobin Murphy 
430fdbe574eSRobin Murphy 	cookie->msi_iova = base;
431fdbe574eSRobin Murphy 	domain->iova_cookie = cookie;
432fdbe574eSRobin Murphy 	return 0;
433fdbe574eSRobin Murphy }
434fdbe574eSRobin Murphy EXPORT_SYMBOL(iommu_get_msi_cookie);
435fdbe574eSRobin Murphy 
436fdbe574eSRobin Murphy /**
4370db2e5d1SRobin Murphy  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
438fdbe574eSRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
439fdbe574eSRobin Murphy  *          iommu_get_msi_cookie()
4400db2e5d1SRobin Murphy  */
4410db2e5d1SRobin Murphy void iommu_put_dma_cookie(struct iommu_domain *domain)
4420db2e5d1SRobin Murphy {
44344bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
44444bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi, *tmp;
4450db2e5d1SRobin Murphy 
44644bb7e24SRobin Murphy 	if (!cookie)
4470db2e5d1SRobin Murphy 		return;
4480db2e5d1SRobin Murphy 
449f7f07484SRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
450a17e3026SRobin Murphy 		iommu_dma_free_fq(cookie);
45144bb7e24SRobin Murphy 		put_iova_domain(&cookie->iovad);
452f7f07484SRobin Murphy 	}
45344bb7e24SRobin Murphy 
45444bb7e24SRobin Murphy 	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
45544bb7e24SRobin Murphy 		list_del(&msi->list);
45644bb7e24SRobin Murphy 		kfree(msi);
45744bb7e24SRobin Murphy 	}
45844bb7e24SRobin Murphy 	kfree(cookie);
4590db2e5d1SRobin Murphy 	domain->iova_cookie = NULL;
4600db2e5d1SRobin Murphy }
4610db2e5d1SRobin Murphy 
462273df963SRobin Murphy /**
463273df963SRobin Murphy  * iommu_dma_get_resv_regions - Reserved region driver helper
464273df963SRobin Murphy  * @dev: Device from iommu_get_resv_regions()
465273df963SRobin Murphy  * @list: Reserved region list from iommu_get_resv_regions()
466273df963SRobin Murphy  *
467273df963SRobin Murphy  * IOMMU drivers can use this to implement their .get_resv_regions callback
468cd2c9fcfSShameer Kolothum  * for general non-IOMMU-specific reservations. Currently, this covers GICv3
469cd2c9fcfSShameer Kolothum  * ITS region reservation on ACPI based ARM platforms that may require HW MSI
470cd2c9fcfSShameer Kolothum  * reservation.
471273df963SRobin Murphy  */
472273df963SRobin Murphy void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
473fade1ec0SRobin Murphy {
474fade1ec0SRobin Murphy 
47598cc4f71SJoerg Roedel 	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
47655be25b8SShameer Kolothum 		iort_iommu_get_resv_regions(dev, list);
477f51dc892SShameer Kolothum 
4785cef282eSThierry Reding 	if (dev->of_node)
4795cef282eSThierry Reding 		of_iommu_get_resv_regions(dev, list);
480fade1ec0SRobin Murphy }
481273df963SRobin Murphy EXPORT_SYMBOL(iommu_dma_get_resv_regions);
482fade1ec0SRobin Murphy 
4837c1b058cSRobin Murphy static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
4847c1b058cSRobin Murphy 		phys_addr_t start, phys_addr_t end)
4857c1b058cSRobin Murphy {
4867c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
4877c1b058cSRobin Murphy 	struct iommu_dma_msi_page *msi_page;
4887c1b058cSRobin Murphy 	int i, num_pages;
4897c1b058cSRobin Murphy 
4907c1b058cSRobin Murphy 	start -= iova_offset(iovad, start);
4917c1b058cSRobin Murphy 	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
4927c1b058cSRobin Murphy 
49365ac74f1SMarc Zyngier 	for (i = 0; i < num_pages; i++) {
49465ac74f1SMarc Zyngier 		msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
4957c1b058cSRobin Murphy 		if (!msi_page)
4967c1b058cSRobin Murphy 			return -ENOMEM;
4977c1b058cSRobin Murphy 
49865ac74f1SMarc Zyngier 		msi_page->phys = start;
49965ac74f1SMarc Zyngier 		msi_page->iova = start;
50065ac74f1SMarc Zyngier 		INIT_LIST_HEAD(&msi_page->list);
50165ac74f1SMarc Zyngier 		list_add(&msi_page->list, &cookie->msi_page_list);
5027c1b058cSRobin Murphy 		start += iovad->granule;
5037c1b058cSRobin Murphy 	}
5047c1b058cSRobin Murphy 
5057c1b058cSRobin Murphy 	return 0;
5067c1b058cSRobin Murphy }
5077c1b058cSRobin Murphy 
508b8397a8fSRobin Murphy static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
509b8397a8fSRobin Murphy 		const struct list_head *b)
510b8397a8fSRobin Murphy {
511b8397a8fSRobin Murphy 	struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
512b8397a8fSRobin Murphy 	struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
513b8397a8fSRobin Murphy 
514b8397a8fSRobin Murphy 	return res_a->res->start > res_b->res->start;
515b8397a8fSRobin Murphy }
516b8397a8fSRobin Murphy 
517aadad097SSrinath Mannam static int iova_reserve_pci_windows(struct pci_dev *dev,
518cd2c9fcfSShameer Kolothum 		struct iova_domain *iovad)
519cd2c9fcfSShameer Kolothum {
520cd2c9fcfSShameer Kolothum 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
521cd2c9fcfSShameer Kolothum 	struct resource_entry *window;
522cd2c9fcfSShameer Kolothum 	unsigned long lo, hi;
523aadad097SSrinath Mannam 	phys_addr_t start = 0, end;
524cd2c9fcfSShameer Kolothum 
525cd2c9fcfSShameer Kolothum 	resource_list_for_each_entry(window, &bridge->windows) {
526cd2c9fcfSShameer Kolothum 		if (resource_type(window->res) != IORESOURCE_MEM)
527cd2c9fcfSShameer Kolothum 			continue;
528cd2c9fcfSShameer Kolothum 
529cd2c9fcfSShameer Kolothum 		lo = iova_pfn(iovad, window->res->start - window->offset);
530cd2c9fcfSShameer Kolothum 		hi = iova_pfn(iovad, window->res->end - window->offset);
531cd2c9fcfSShameer Kolothum 		reserve_iova(iovad, lo, hi);
532cd2c9fcfSShameer Kolothum 	}
533aadad097SSrinath Mannam 
534aadad097SSrinath Mannam 	/* Get reserved DMA windows from host bridge */
535b8397a8fSRobin Murphy 	list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
536aadad097SSrinath Mannam 	resource_list_for_each_entry(window, &bridge->dma_ranges) {
537aadad097SSrinath Mannam 		end = window->res->start - window->offset;
538aadad097SSrinath Mannam resv_iova:
539aadad097SSrinath Mannam 		if (end > start) {
540aadad097SSrinath Mannam 			lo = iova_pfn(iovad, start);
541aadad097SSrinath Mannam 			hi = iova_pfn(iovad, end);
542aadad097SSrinath Mannam 			reserve_iova(iovad, lo, hi);
543571f3160SSrinath Mannam 		} else if (end < start) {
544b8397a8fSRobin Murphy 			/* DMA ranges should be non-overlapping */
545571f3160SSrinath Mannam 			dev_err(&dev->dev,
5467154cbd3SJoerg Roedel 				"Failed to reserve IOVA [%pa-%pa]\n",
5477154cbd3SJoerg Roedel 				&start, &end);
548aadad097SSrinath Mannam 			return -EINVAL;
549aadad097SSrinath Mannam 		}
550aadad097SSrinath Mannam 
551aadad097SSrinath Mannam 		start = window->res->end - window->offset + 1;
552aadad097SSrinath Mannam 		/* If window is last entry */
553aadad097SSrinath Mannam 		if (window->node.next == &bridge->dma_ranges &&
55429fcea8cSArnd Bergmann 		    end != ~(phys_addr_t)0) {
55529fcea8cSArnd Bergmann 			end = ~(phys_addr_t)0;
556aadad097SSrinath Mannam 			goto resv_iova;
557aadad097SSrinath Mannam 		}
558aadad097SSrinath Mannam 	}
559aadad097SSrinath Mannam 
560aadad097SSrinath Mannam 	return 0;
561cd2c9fcfSShameer Kolothum }
562cd2c9fcfSShameer Kolothum 
5637c1b058cSRobin Murphy static int iova_reserve_iommu_regions(struct device *dev,
5647c1b058cSRobin Murphy 		struct iommu_domain *domain)
5657c1b058cSRobin Murphy {
5667c1b058cSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
5677c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
5687c1b058cSRobin Murphy 	struct iommu_resv_region *region;
5697c1b058cSRobin Murphy 	LIST_HEAD(resv_regions);
5707c1b058cSRobin Murphy 	int ret = 0;
5717c1b058cSRobin Murphy 
572aadad097SSrinath Mannam 	if (dev_is_pci(dev)) {
573aadad097SSrinath Mannam 		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
574aadad097SSrinath Mannam 		if (ret)
575aadad097SSrinath Mannam 			return ret;
576aadad097SSrinath Mannam 	}
577cd2c9fcfSShameer Kolothum 
5787c1b058cSRobin Murphy 	iommu_get_resv_regions(dev, &resv_regions);
5797c1b058cSRobin Murphy 	list_for_each_entry(region, &resv_regions, list) {
5807c1b058cSRobin Murphy 		unsigned long lo, hi;
5817c1b058cSRobin Murphy 
5827c1b058cSRobin Murphy 		/* We ARE the software that manages these! */
5837c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_SW_MSI)
5847c1b058cSRobin Murphy 			continue;
5857c1b058cSRobin Murphy 
5867c1b058cSRobin Murphy 		lo = iova_pfn(iovad, region->start);
5877c1b058cSRobin Murphy 		hi = iova_pfn(iovad, region->start + region->length - 1);
5887c1b058cSRobin Murphy 		reserve_iova(iovad, lo, hi);
5897c1b058cSRobin Murphy 
5907c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_MSI)
5917c1b058cSRobin Murphy 			ret = cookie_init_hw_msi_region(cookie, region->start,
5927c1b058cSRobin Murphy 					region->start + region->length);
5937c1b058cSRobin Murphy 		if (ret)
5947c1b058cSRobin Murphy 			break;
5957c1b058cSRobin Murphy 	}
5967c1b058cSRobin Murphy 	iommu_put_resv_regions(dev, &resv_regions);
5977c1b058cSRobin Murphy 
5987c1b058cSRobin Murphy 	return ret;
5997c1b058cSRobin Murphy }
6007c1b058cSRobin Murphy 
60182c3cefbSLu Baolu static bool dev_is_untrusted(struct device *dev)
60282c3cefbSLu Baolu {
60382c3cefbSLu Baolu 	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
60482c3cefbSLu Baolu }
60582c3cefbSLu Baolu 
606861370f4SCatalin Marinas static bool dev_use_swiotlb(struct device *dev, size_t size,
607861370f4SCatalin Marinas 			    enum dma_data_direction dir)
6082e727bffSDavid Stevens {
609861370f4SCatalin Marinas 	return IS_ENABLED(CONFIG_SWIOTLB) &&
610861370f4SCatalin Marinas 		(dev_is_untrusted(dev) ||
611861370f4SCatalin Marinas 		 dma_kmalloc_needs_bounce(dev, size, dir));
612861370f4SCatalin Marinas }
613861370f4SCatalin Marinas 
614861370f4SCatalin Marinas static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
615861370f4SCatalin Marinas 			       int nents, enum dma_data_direction dir)
616861370f4SCatalin Marinas {
617861370f4SCatalin Marinas 	struct scatterlist *s;
618861370f4SCatalin Marinas 	int i;
619861370f4SCatalin Marinas 
620861370f4SCatalin Marinas 	if (!IS_ENABLED(CONFIG_SWIOTLB))
621861370f4SCatalin Marinas 		return false;
622861370f4SCatalin Marinas 
623861370f4SCatalin Marinas 	if (dev_is_untrusted(dev))
624861370f4SCatalin Marinas 		return true;
625861370f4SCatalin Marinas 
626861370f4SCatalin Marinas 	/*
627861370f4SCatalin Marinas 	 * If kmalloc() buffers are not DMA-safe for this device and
628861370f4SCatalin Marinas 	 * direction, check the individual lengths in the sg list. If any
629861370f4SCatalin Marinas 	 * element is deemed unsafe, use the swiotlb for bouncing.
630861370f4SCatalin Marinas 	 */
631861370f4SCatalin Marinas 	if (!dma_kmalloc_safe(dev, dir)) {
632861370f4SCatalin Marinas 		for_each_sg(sg, s, nents, i)
633861370f4SCatalin Marinas 			if (!dma_kmalloc_size_aligned(s->length))
634861370f4SCatalin Marinas 				return true;
635861370f4SCatalin Marinas 	}
636861370f4SCatalin Marinas 
637861370f4SCatalin Marinas 	return false;
6382e727bffSDavid Stevens }
6392e727bffSDavid Stevens 
6400db2e5d1SRobin Murphy /**
64132d5bc8bSNiklas Schnelle  * iommu_dma_init_options - Initialize dma-iommu options
64232d5bc8bSNiklas Schnelle  * @options: The options to be initialized
64332d5bc8bSNiklas Schnelle  * @dev: Device the options are set for
64432d5bc8bSNiklas Schnelle  *
64532d5bc8bSNiklas Schnelle  * This allows tuning dma-iommu specific to device properties
64632d5bc8bSNiklas Schnelle  */
64732d5bc8bSNiklas Schnelle static void iommu_dma_init_options(struct iommu_dma_options *options,
64832d5bc8bSNiklas Schnelle 				   struct device *dev)
64932d5bc8bSNiklas Schnelle {
6509f5b681eSNiklas Schnelle 	/* Shadowing IOTLB flushes do better with a single large queue */
6519f5b681eSNiklas Schnelle 	if (dev->iommu->shadow_on_flush) {
65232d5bc8bSNiklas Schnelle 		options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
6539f5b681eSNiklas Schnelle 		options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
6549f5b681eSNiklas Schnelle 		options->fq_size = IOVA_SINGLE_FQ_SIZE;
6559f5b681eSNiklas Schnelle 	} else {
65632d5bc8bSNiklas Schnelle 		options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
6579f5b681eSNiklas Schnelle 		options->fq_size = IOVA_DEFAULT_FQ_SIZE;
6589f5b681eSNiklas Schnelle 		options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
6599f5b681eSNiklas Schnelle 	}
66032d5bc8bSNiklas Schnelle }
66132d5bc8bSNiklas Schnelle 
66232d5bc8bSNiklas Schnelle /**
6630db2e5d1SRobin Murphy  * iommu_dma_init_domain - Initialise a DMA mapping domain
6640db2e5d1SRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
665fade1ec0SRobin Murphy  * @dev: Device the domain is being initialised for
6660db2e5d1SRobin Murphy  *
667ad4750b0SRobin Murphy  * If the geometry and dma_range_map include address 0, we reserve that page
6680db2e5d1SRobin Murphy  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
6690db2e5d1SRobin Murphy  * any change which could make prior IOVAs invalid will fail.
6700db2e5d1SRobin Murphy  */
671ad4750b0SRobin Murphy static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev)
6720db2e5d1SRobin Murphy {
673fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
674ad4750b0SRobin Murphy 	const struct bus_dma_region *map = dev->dma_range_map;
675c61a4633SShaokun Zhang 	unsigned long order, base_pfn;
6766b0c54e7SYunsheng Lin 	struct iova_domain *iovad;
67732e92d9fSJohn Garry 	int ret;
6780db2e5d1SRobin Murphy 
679fdbe574eSRobin Murphy 	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
680fdbe574eSRobin Murphy 		return -EINVAL;
6810db2e5d1SRobin Murphy 
6826b0c54e7SYunsheng Lin 	iovad = &cookie->iovad;
6836b0c54e7SYunsheng Lin 
6840db2e5d1SRobin Murphy 	/* Use the smallest supported page size for IOVA granularity */
685d16e0faaSRobin Murphy 	order = __ffs(domain->pgsize_bitmap);
686ad4750b0SRobin Murphy 	base_pfn = 1;
6870db2e5d1SRobin Murphy 
6880db2e5d1SRobin Murphy 	/* Check the domain allows at least some access to the device... */
689ad4750b0SRobin Murphy 	if (map) {
690cc8d89d0SRobin Murphy 		if (dma_range_map_min(map) > domain->geometry.aperture_end ||
691ad4750b0SRobin Murphy 		    dma_range_map_max(map) < domain->geometry.aperture_start) {
6920db2e5d1SRobin Murphy 			pr_warn("specified DMA range outside IOMMU capability\n");
6930db2e5d1SRobin Murphy 			return -EFAULT;
6940db2e5d1SRobin Murphy 		}
6950db2e5d1SRobin Murphy 	}
696cc8d89d0SRobin Murphy 	/* ...then finally give it a kicking to make sure it fits */
697cc8d89d0SRobin Murphy 	base_pfn = max_t(unsigned long, base_pfn,
698cc8d89d0SRobin Murphy 			 domain->geometry.aperture_start >> order);
6990db2e5d1SRobin Murphy 
700f51d7bb7SRobin Murphy 	/* start_pfn is always nonzero for an already-initialised domain */
701ac9a5d52SYunfei Wang 	mutex_lock(&cookie->mutex);
7020db2e5d1SRobin Murphy 	if (iovad->start_pfn) {
7030db2e5d1SRobin Murphy 		if (1UL << order != iovad->granule ||
704f51d7bb7SRobin Murphy 		    base_pfn != iovad->start_pfn) {
7050db2e5d1SRobin Murphy 			pr_warn("Incompatible range for DMA domain\n");
706ac9a5d52SYunfei Wang 			ret = -EFAULT;
707ac9a5d52SYunfei Wang 			goto done_unlock;
7080db2e5d1SRobin Murphy 		}
7097c1b058cSRobin Murphy 
710ac9a5d52SYunfei Wang 		ret = 0;
711ac9a5d52SYunfei Wang 		goto done_unlock;
7120db2e5d1SRobin Murphy 	}
7137c1b058cSRobin Murphy 
714aa3ac946SZhen Lei 	init_iova_domain(iovad, 1UL << order, base_pfn);
71532e92d9fSJohn Garry 	ret = iova_domain_init_rcaches(iovad);
71632e92d9fSJohn Garry 	if (ret)
717ac9a5d52SYunfei Wang 		goto done_unlock;
7182da274cdSZhen Lei 
71932d5bc8bSNiklas Schnelle 	iommu_dma_init_options(&cookie->options, dev);
72032d5bc8bSNiklas Schnelle 
721c208916fSRobin Murphy 	/* If the FQ fails we can simply fall back to strict mode */
722a4fdd976SRobin Murphy 	if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
723a4fdd976SRobin Murphy 	    (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
724c208916fSRobin Murphy 		domain->type = IOMMU_DOMAIN_DMA;
7257c1b058cSRobin Murphy 
726ac9a5d52SYunfei Wang 	ret = iova_reserve_iommu_regions(dev, domain);
727ac9a5d52SYunfei Wang 
728ac9a5d52SYunfei Wang done_unlock:
729ac9a5d52SYunfei Wang 	mutex_unlock(&cookie->mutex);
730ac9a5d52SYunfei Wang 	return ret;
7317c1b058cSRobin Murphy }
7320db2e5d1SRobin Murphy 
7330db2e5d1SRobin Murphy /**
734737c85caSMitchel Humpherys  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
735737c85caSMitchel Humpherys  *                    page flags.
7360db2e5d1SRobin Murphy  * @dir: Direction of DMA transfer
7370db2e5d1SRobin Murphy  * @coherent: Is the DMA master cache-coherent?
738737c85caSMitchel Humpherys  * @attrs: DMA attributes for the mapping
7390db2e5d1SRobin Murphy  *
7400db2e5d1SRobin Murphy  * Return: corresponding IOMMU API page protection flags
7410db2e5d1SRobin Murphy  */
74206d60728SChristoph Hellwig static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
743737c85caSMitchel Humpherys 		     unsigned long attrs)
7440db2e5d1SRobin Murphy {
7450db2e5d1SRobin Murphy 	int prot = coherent ? IOMMU_CACHE : 0;
7460db2e5d1SRobin Murphy 
747737c85caSMitchel Humpherys 	if (attrs & DMA_ATTR_PRIVILEGED)
748737c85caSMitchel Humpherys 		prot |= IOMMU_PRIV;
749737c85caSMitchel Humpherys 
7500db2e5d1SRobin Murphy 	switch (dir) {
7510db2e5d1SRobin Murphy 	case DMA_BIDIRECTIONAL:
7520db2e5d1SRobin Murphy 		return prot | IOMMU_READ | IOMMU_WRITE;
7530db2e5d1SRobin Murphy 	case DMA_TO_DEVICE:
7540db2e5d1SRobin Murphy 		return prot | IOMMU_READ;
7550db2e5d1SRobin Murphy 	case DMA_FROM_DEVICE:
7560db2e5d1SRobin Murphy 		return prot | IOMMU_WRITE;
7570db2e5d1SRobin Murphy 	default:
7580db2e5d1SRobin Murphy 		return 0;
7590db2e5d1SRobin Murphy 	}
7600db2e5d1SRobin Murphy }
7610db2e5d1SRobin Murphy 
762842fe519SRobin Murphy static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
763bd036d2fSRobin Murphy 		size_t size, u64 dma_limit, struct device *dev)
7640db2e5d1SRobin Murphy {
765a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
766a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
767791c2b17SRobin Murphy 	unsigned long shift, iova_len, iova;
7680db2e5d1SRobin Murphy 
769a44e6657SRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
770a44e6657SRobin Murphy 		cookie->msi_iova += size;
771a44e6657SRobin Murphy 		return cookie->msi_iova - size;
772a44e6657SRobin Murphy 	}
773a44e6657SRobin Murphy 
774a44e6657SRobin Murphy 	shift = iova_shift(iovad);
775a44e6657SRobin Murphy 	iova_len = size >> shift;
776a44e6657SRobin Murphy 
777a7ba70f1SNicolas Saenz Julienne 	dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
77803bfdc31SRobin Murphy 
779c987ff0dSRobin Murphy 	if (domain->geometry.force_aperture)
780bd036d2fSRobin Murphy 		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
781122fac03SRobin Murphy 
782791c2b17SRobin Murphy 	/*
783791c2b17SRobin Murphy 	 * Try to use all the 32-bit PCI addresses first. The original SAC vs.
784791c2b17SRobin Murphy 	 * DAC reasoning loses relevance with PCIe, but enough hardware and
785791c2b17SRobin Murphy 	 * firmware bugs are still lurking out there that it's safest not to
786791c2b17SRobin Murphy 	 * venture into the 64-bit space until necessary.
787791c2b17SRobin Murphy 	 *
788791c2b17SRobin Murphy 	 * If your device goes wrong after seeing the notice then likely either
789791c2b17SRobin Murphy 	 * its driver is not setting DMA masks accurately, the hardware has
790791c2b17SRobin Murphy 	 * some inherent bug in handling >32-bit addresses, or not all the
791791c2b17SRobin Murphy 	 * expected address bits are wired up between the device and the IOMMU.
792791c2b17SRobin Murphy 	 */
793791c2b17SRobin Murphy 	if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
794538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len,
795538d5b33STomasz Nowicki 				       DMA_BIT_MASK(32) >> shift, false);
796791c2b17SRobin Murphy 		if (iova)
797791c2b17SRobin Murphy 			goto done;
798122fac03SRobin Murphy 
799791c2b17SRobin Murphy 		dev->iommu->pci_32bit_workaround = false;
800791c2b17SRobin Murphy 		dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
801791c2b17SRobin Murphy 	}
802bb65a64cSRobin Murphy 
803791c2b17SRobin Murphy 	iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
804791c2b17SRobin Murphy done:
805bb65a64cSRobin Murphy 	return (dma_addr_t)iova << shift;
8060db2e5d1SRobin Murphy }
8070db2e5d1SRobin Murphy 
808842fe519SRobin Murphy static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
809452e69b5SRobin Murphy 		dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
8100db2e5d1SRobin Murphy {
811842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
8120db2e5d1SRobin Murphy 
813a44e6657SRobin Murphy 	/* The MSI case is only ever cleaning up its most recent allocation */
814bb65a64cSRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
815a44e6657SRobin Murphy 		cookie->msi_iova -= size;
816452e69b5SRobin Murphy 	else if (gather && gather->queued)
817a17e3026SRobin Murphy 		queue_iova(cookie, iova_pfn(iovad, iova),
8182a2b8eaaSTom Murphy 				size >> iova_shift(iovad),
81987f60cc6SMatthew Wilcox (Oracle) 				&gather->freelist);
820bb65a64cSRobin Murphy 	else
8211cc896edSRobin Murphy 		free_iova_fast(iovad, iova_pfn(iovad, iova),
8221cc896edSRobin Murphy 				size >> iova_shift(iovad));
823842fe519SRobin Murphy }
824842fe519SRobin Murphy 
825b61d271eSRobin Murphy static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
826842fe519SRobin Murphy 		size_t size)
827842fe519SRobin Murphy {
828b61d271eSRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
829a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
830a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
831842fe519SRobin Murphy 	size_t iova_off = iova_offset(iovad, dma_addr);
832a7d20dc1SWill Deacon 	struct iommu_iotlb_gather iotlb_gather;
833a7d20dc1SWill Deacon 	size_t unmapped;
834842fe519SRobin Murphy 
835842fe519SRobin Murphy 	dma_addr -= iova_off;
836842fe519SRobin Murphy 	size = iova_align(iovad, size + iova_off);
837a7d20dc1SWill Deacon 	iommu_iotlb_gather_init(&iotlb_gather);
838452e69b5SRobin Murphy 	iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
839842fe519SRobin Murphy 
840a7d20dc1SWill Deacon 	unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
841a7d20dc1SWill Deacon 	WARN_ON(unmapped != size);
842a7d20dc1SWill Deacon 
843452e69b5SRobin Murphy 	if (!iotlb_gather.queued)
844aae4c8e2STom Murphy 		iommu_iotlb_sync(domain, &iotlb_gather);
845452e69b5SRobin Murphy 	iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
8460db2e5d1SRobin Murphy }
8470db2e5d1SRobin Murphy 
84892aec09cSChristoph Hellwig static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
849bd036d2fSRobin Murphy 		size_t size, int prot, u64 dma_mask)
85092aec09cSChristoph Hellwig {
851b61d271eSRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
85292aec09cSChristoph Hellwig 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
8538af23fadSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
8548af23fadSRobin Murphy 	size_t iova_off = iova_offset(iovad, phys);
85592aec09cSChristoph Hellwig 	dma_addr_t iova;
85692aec09cSChristoph Hellwig 
857a8e8af35SLianbo Jiang 	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
8583ab65729SLianbo Jiang 	    iommu_deferred_attach(dev, domain))
859795bbbb9STom Murphy 		return DMA_MAPPING_ERROR;
860795bbbb9STom Murphy 
861e2addba4SRobin Murphy 	/* If anyone ever wants this we'd need support in the IOVA allocator */
862e2addba4SRobin Murphy 	if (dev_WARN_ONCE(dev, dma_get_min_align_mask(dev) > iova_mask(iovad),
863e2addba4SRobin Murphy 	    "Unsupported alignment constraint\n"))
864e2addba4SRobin Murphy 		return DMA_MAPPING_ERROR;
865e2addba4SRobin Murphy 
8668af23fadSRobin Murphy 	size = iova_align(iovad, size + iova_off);
86792aec09cSChristoph Hellwig 
8686e235020STom Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
86992aec09cSChristoph Hellwig 	if (!iova)
87092aec09cSChristoph Hellwig 		return DMA_MAPPING_ERROR;
87192aec09cSChristoph Hellwig 
8724dc6376aSJason Gunthorpe 	if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
8732a2b8eaaSTom Murphy 		iommu_dma_free_iova(cookie, iova, size, NULL);
87492aec09cSChristoph Hellwig 		return DMA_MAPPING_ERROR;
87592aec09cSChristoph Hellwig 	}
87692aec09cSChristoph Hellwig 	return iova + iova_off;
87792aec09cSChristoph Hellwig }
87892aec09cSChristoph Hellwig 
8790db2e5d1SRobin Murphy static void __iommu_dma_free_pages(struct page **pages, int count)
8800db2e5d1SRobin Murphy {
8810db2e5d1SRobin Murphy 	while (count--)
8820db2e5d1SRobin Murphy 		__free_page(pages[count]);
8830db2e5d1SRobin Murphy 	kvfree(pages);
8840db2e5d1SRobin Murphy }
8850db2e5d1SRobin Murphy 
886c4b17afbSGanapatrao Kulkarni static struct page **__iommu_dma_alloc_pages(struct device *dev,
887c4b17afbSGanapatrao Kulkarni 		unsigned int count, unsigned long order_mask, gfp_t gfp)
8880db2e5d1SRobin Murphy {
8890db2e5d1SRobin Murphy 	struct page **pages;
890c4b17afbSGanapatrao Kulkarni 	unsigned int i = 0, nid = dev_to_node(dev);
8913b6b7e19SRobin Murphy 
8925e0a760bSKirill A. Shutemov 	order_mask &= GENMASK(MAX_PAGE_ORDER, 0);
8933b6b7e19SRobin Murphy 	if (!order_mask)
8943b6b7e19SRobin Murphy 		return NULL;
8950db2e5d1SRobin Murphy 
896ab6f4b00SGustavo A. R. Silva 	pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
8970db2e5d1SRobin Murphy 	if (!pages)
8980db2e5d1SRobin Murphy 		return NULL;
8990db2e5d1SRobin Murphy 
9000db2e5d1SRobin Murphy 	/* IOMMU can map any pages, so himem can also be used here */
9010db2e5d1SRobin Murphy 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
9020db2e5d1SRobin Murphy 
9030db2e5d1SRobin Murphy 	while (count) {
9040db2e5d1SRobin Murphy 		struct page *page = NULL;
9053b6b7e19SRobin Murphy 		unsigned int order_size;
9060db2e5d1SRobin Murphy 
9070db2e5d1SRobin Murphy 		/*
9080db2e5d1SRobin Murphy 		 * Higher-order allocations are a convenience rather
9090db2e5d1SRobin Murphy 		 * than a necessity, hence using __GFP_NORETRY until
9103b6b7e19SRobin Murphy 		 * falling back to minimum-order allocations.
9110db2e5d1SRobin Murphy 		 */
91261883d3cSKirill A. Shutemov 		for (order_mask &= GENMASK(__fls(count), 0);
9133b6b7e19SRobin Murphy 		     order_mask; order_mask &= ~order_size) {
9143b6b7e19SRobin Murphy 			unsigned int order = __fls(order_mask);
915c4b17afbSGanapatrao Kulkarni 			gfp_t alloc_flags = gfp;
9163b6b7e19SRobin Murphy 
9173b6b7e19SRobin Murphy 			order_size = 1U << order;
918c4b17afbSGanapatrao Kulkarni 			if (order_mask > order_size)
919c4b17afbSGanapatrao Kulkarni 				alloc_flags |= __GFP_NORETRY;
920c4b17afbSGanapatrao Kulkarni 			page = alloc_pages_node(nid, alloc_flags, order);
9210db2e5d1SRobin Murphy 			if (!page)
9220db2e5d1SRobin Murphy 				continue;
9234604393cSRobin Murphy 			if (order)
9240db2e5d1SRobin Murphy 				split_page(page, order);
9250db2e5d1SRobin Murphy 			break;
9260db2e5d1SRobin Murphy 		}
9270db2e5d1SRobin Murphy 		if (!page) {
9280db2e5d1SRobin Murphy 			__iommu_dma_free_pages(pages, i);
9290db2e5d1SRobin Murphy 			return NULL;
9300db2e5d1SRobin Murphy 		}
9313b6b7e19SRobin Murphy 		count -= order_size;
9323b6b7e19SRobin Murphy 		while (order_size--)
9330db2e5d1SRobin Murphy 			pages[i++] = page++;
9340db2e5d1SRobin Murphy 	}
9350db2e5d1SRobin Murphy 	return pages;
9360db2e5d1SRobin Murphy }
9370db2e5d1SRobin Murphy 
9388230ce9aSChristoph Hellwig /*
9398230ce9aSChristoph Hellwig  * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
9400db2e5d1SRobin Murphy  * but an IOMMU which supports smaller pages might not map the whole thing.
9410db2e5d1SRobin Murphy  */
9428230ce9aSChristoph Hellwig static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
9438d485a69SRobin Murphy 		size_t size, struct sg_table *sgt, gfp_t gfp, unsigned long attrs)
9440db2e5d1SRobin Murphy {
94543c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
946842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
947842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
94821b95aafSChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
94921b95aafSChristoph Hellwig 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
95021b95aafSChristoph Hellwig 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
9510db2e5d1SRobin Murphy 	struct page **pages;
952842fe519SRobin Murphy 	dma_addr_t iova;
953a3884774SYunfei Wang 	ssize_t ret;
9540db2e5d1SRobin Murphy 
955a8e8af35SLianbo Jiang 	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
9563ab65729SLianbo Jiang 	    iommu_deferred_attach(dev, domain))
957795bbbb9STom Murphy 		return NULL;
958795bbbb9STom Murphy 
9593b6b7e19SRobin Murphy 	min_size = alloc_sizes & -alloc_sizes;
9603b6b7e19SRobin Murphy 	if (min_size < PAGE_SIZE) {
9613b6b7e19SRobin Murphy 		min_size = PAGE_SIZE;
9623b6b7e19SRobin Murphy 		alloc_sizes |= PAGE_SIZE;
9633b6b7e19SRobin Murphy 	} else {
9643b6b7e19SRobin Murphy 		size = ALIGN(size, min_size);
9653b6b7e19SRobin Murphy 	}
96600085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
9673b6b7e19SRobin Murphy 		alloc_sizes = min_size;
9683b6b7e19SRobin Murphy 
9693b6b7e19SRobin Murphy 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
970c4b17afbSGanapatrao Kulkarni 	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
971c4b17afbSGanapatrao Kulkarni 					gfp);
9720db2e5d1SRobin Murphy 	if (!pages)
9730db2e5d1SRobin Murphy 		return NULL;
9740db2e5d1SRobin Murphy 
975842fe519SRobin Murphy 	size = iova_align(iovad, size);
976842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
9770db2e5d1SRobin Murphy 	if (!iova)
9780db2e5d1SRobin Murphy 		goto out_free_pages;
9790db2e5d1SRobin Murphy 
98096d57808SJason Gunthorpe 	/*
98196d57808SJason Gunthorpe 	 * Remove the zone/policy flags from the GFP - these are applied to the
98296d57808SJason Gunthorpe 	 * __iommu_dma_alloc_pages() but are not used for the supporting
98396d57808SJason Gunthorpe 	 * internal allocations that follow.
98496d57808SJason Gunthorpe 	 */
98596d57808SJason Gunthorpe 	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
98696d57808SJason Gunthorpe 
98796d57808SJason Gunthorpe 	if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
9880db2e5d1SRobin Murphy 		goto out_free_iova;
9890db2e5d1SRobin Murphy 
99021b95aafSChristoph Hellwig 	if (!(ioprot & IOMMU_CACHE)) {
99123f88e0aSChristoph Hellwig 		struct scatterlist *sg;
99223f88e0aSChristoph Hellwig 		int i;
99323f88e0aSChristoph Hellwig 
9948230ce9aSChristoph Hellwig 		for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
99523f88e0aSChristoph Hellwig 			arch_dma_prep_coherent(sg_page(sg), sg->length);
9960db2e5d1SRobin Murphy 	}
9970db2e5d1SRobin Murphy 
998f2b2c051SJason Gunthorpe 	ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
99996d57808SJason Gunthorpe 			   gfp);
1000a3884774SYunfei Wang 	if (ret < 0 || ret < size)
10010db2e5d1SRobin Murphy 		goto out_free_sg;
10020db2e5d1SRobin Murphy 
10038230ce9aSChristoph Hellwig 	sgt->sgl->dma_address = iova;
1004e817ee5fSChristoph Hellwig 	sgt->sgl->dma_length = size;
10058230ce9aSChristoph Hellwig 	return pages;
10060db2e5d1SRobin Murphy 
10070db2e5d1SRobin Murphy out_free_sg:
10088230ce9aSChristoph Hellwig 	sg_free_table(sgt);
10090db2e5d1SRobin Murphy out_free_iova:
10102a2b8eaaSTom Murphy 	iommu_dma_free_iova(cookie, iova, size, NULL);
10110db2e5d1SRobin Murphy out_free_pages:
10120db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, count);
10130db2e5d1SRobin Murphy 	return NULL;
10140db2e5d1SRobin Murphy }
10150db2e5d1SRobin Murphy 
10168230ce9aSChristoph Hellwig static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
10178d485a69SRobin Murphy 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
10188230ce9aSChristoph Hellwig {
10198230ce9aSChristoph Hellwig 	struct page **pages;
10208230ce9aSChristoph Hellwig 	struct sg_table sgt;
10218230ce9aSChristoph Hellwig 	void *vaddr;
10228d485a69SRobin Murphy 	pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
10238230ce9aSChristoph Hellwig 
10248d485a69SRobin Murphy 	pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, attrs);
10258230ce9aSChristoph Hellwig 	if (!pages)
10268230ce9aSChristoph Hellwig 		return NULL;
10278230ce9aSChristoph Hellwig 	*dma_handle = sgt.sgl->dma_address;
10288230ce9aSChristoph Hellwig 	sg_free_table(&sgt);
10298230ce9aSChristoph Hellwig 	vaddr = dma_common_pages_remap(pages, size, prot,
10308230ce9aSChristoph Hellwig 			__builtin_return_address(0));
10318230ce9aSChristoph Hellwig 	if (!vaddr)
10328230ce9aSChristoph Hellwig 		goto out_unmap;
10338230ce9aSChristoph Hellwig 	return vaddr;
10348230ce9aSChristoph Hellwig 
10358230ce9aSChristoph Hellwig out_unmap:
10368230ce9aSChristoph Hellwig 	__iommu_dma_unmap(dev, *dma_handle, size);
10378230ce9aSChristoph Hellwig 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
10388230ce9aSChristoph Hellwig 	return NULL;
10398230ce9aSChristoph Hellwig }
10408230ce9aSChristoph Hellwig 
1041*bb0e3919SChristoph Hellwig /*
1042*bb0e3919SChristoph Hellwig  * This is the actual return value from the iommu_dma_alloc_noncontiguous.
1043*bb0e3919SChristoph Hellwig  *
1044*bb0e3919SChristoph Hellwig  * The users of the DMA API should only care about the sg_table, but to make
1045*bb0e3919SChristoph Hellwig  * the DMA-API internal vmaping and freeing easier we stash away the page
1046*bb0e3919SChristoph Hellwig  * array as well (except for the fallback case).  This can go away any time,
1047*bb0e3919SChristoph Hellwig  * e.g. when a vmap-variant that takes a scatterlist comes along.
1048*bb0e3919SChristoph Hellwig  */
1049*bb0e3919SChristoph Hellwig struct dma_sgt_handle {
1050*bb0e3919SChristoph Hellwig 	struct sg_table sgt;
1051*bb0e3919SChristoph Hellwig 	struct page **pages;
1052*bb0e3919SChristoph Hellwig };
1053*bb0e3919SChristoph Hellwig #define sgt_handle(sgt) \
1054*bb0e3919SChristoph Hellwig 	container_of((sgt), struct dma_sgt_handle, sgt)
1055*bb0e3919SChristoph Hellwig 
1056b5c58b2fSLeon Romanovsky struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
1057b5c58b2fSLeon Romanovsky 	       enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
1058e817ee5fSChristoph Hellwig {
1059e817ee5fSChristoph Hellwig 	struct dma_sgt_handle *sh;
1060e817ee5fSChristoph Hellwig 
1061e817ee5fSChristoph Hellwig 	sh = kmalloc(sizeof(*sh), gfp);
1062e817ee5fSChristoph Hellwig 	if (!sh)
1063e817ee5fSChristoph Hellwig 		return NULL;
1064e817ee5fSChristoph Hellwig 
10658d485a69SRobin Murphy 	sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, attrs);
1066e817ee5fSChristoph Hellwig 	if (!sh->pages) {
1067e817ee5fSChristoph Hellwig 		kfree(sh);
1068e817ee5fSChristoph Hellwig 		return NULL;
1069e817ee5fSChristoph Hellwig 	}
1070e817ee5fSChristoph Hellwig 	return &sh->sgt;
1071e817ee5fSChristoph Hellwig }
1072e817ee5fSChristoph Hellwig 
1073b5c58b2fSLeon Romanovsky void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
1074e817ee5fSChristoph Hellwig 		struct sg_table *sgt, enum dma_data_direction dir)
1075e817ee5fSChristoph Hellwig {
1076e817ee5fSChristoph Hellwig 	struct dma_sgt_handle *sh = sgt_handle(sgt);
1077e817ee5fSChristoph Hellwig 
1078e817ee5fSChristoph Hellwig 	__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
1079e817ee5fSChristoph Hellwig 	__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
1080e817ee5fSChristoph Hellwig 	sg_free_table(&sh->sgt);
10810fbea680SEzequiel Garcia 	kfree(sh);
1082e817ee5fSChristoph Hellwig }
1083e817ee5fSChristoph Hellwig 
1084*bb0e3919SChristoph Hellwig void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
1085*bb0e3919SChristoph Hellwig 		struct sg_table *sgt)
1086*bb0e3919SChristoph Hellwig {
1087*bb0e3919SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1088*bb0e3919SChristoph Hellwig 
1089*bb0e3919SChristoph Hellwig 	return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
1090*bb0e3919SChristoph Hellwig }
1091*bb0e3919SChristoph Hellwig 
1092*bb0e3919SChristoph Hellwig int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
1093*bb0e3919SChristoph Hellwig 		size_t size, struct sg_table *sgt)
1094*bb0e3919SChristoph Hellwig {
1095*bb0e3919SChristoph Hellwig 	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1096*bb0e3919SChristoph Hellwig 
1097*bb0e3919SChristoph Hellwig 	if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
1098*bb0e3919SChristoph Hellwig 		return -ENXIO;
1099*bb0e3919SChristoph Hellwig 	return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
1100*bb0e3919SChristoph Hellwig }
1101*bb0e3919SChristoph Hellwig 
1102b5c58b2fSLeon Romanovsky void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1103b5c58b2fSLeon Romanovsky 		size_t size, enum dma_data_direction dir)
11040db2e5d1SRobin Murphy {
110506d60728SChristoph Hellwig 	phys_addr_t phys;
11060db2e5d1SRobin Murphy 
1107861370f4SCatalin Marinas 	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
110806d60728SChristoph Hellwig 		return;
110906d60728SChristoph Hellwig 
111006d60728SChristoph Hellwig 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
111182612d66STom Murphy 	if (!dev_is_dma_coherent(dev))
111256e35f9cSChristoph Hellwig 		arch_sync_dma_for_cpu(phys, size, dir);
111382612d66STom Murphy 
111480808d27SChristoph Hellwig 	swiotlb_sync_single_for_cpu(dev, phys, size, dir);
11151cc896edSRobin Murphy }
11161cc896edSRobin Murphy 
1117b5c58b2fSLeon Romanovsky void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
1118b5c58b2fSLeon Romanovsky 		size_t size, enum dma_data_direction dir)
111951f8cc9eSRobin Murphy {
112006d60728SChristoph Hellwig 	phys_addr_t phys;
112106d60728SChristoph Hellwig 
1122861370f4SCatalin Marinas 	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
112306d60728SChristoph Hellwig 		return;
112406d60728SChristoph Hellwig 
112506d60728SChristoph Hellwig 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
112680808d27SChristoph Hellwig 	swiotlb_sync_single_for_device(dev, phys, size, dir);
112782612d66STom Murphy 
112882612d66STom Murphy 	if (!dev_is_dma_coherent(dev))
112956e35f9cSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
113051f8cc9eSRobin Murphy }
113151f8cc9eSRobin Murphy 
1132b5c58b2fSLeon Romanovsky void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
1133b5c58b2fSLeon Romanovsky 		int nelems, enum dma_data_direction dir)
11340db2e5d1SRobin Murphy {
113506d60728SChristoph Hellwig 	struct scatterlist *sg;
113606d60728SChristoph Hellwig 	int i;
113706d60728SChristoph Hellwig 
1138861370f4SCatalin Marinas 	if (sg_dma_is_swiotlb(sgl))
113908ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
114008ae5d4aSDavid Stevens 			iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
114180808d27SChristoph Hellwig 						      sg->length, dir);
114208ae5d4aSDavid Stevens 	else if (!dev_is_dma_coherent(dev))
114308ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
114408ae5d4aSDavid Stevens 			arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
114506d60728SChristoph Hellwig }
114606d60728SChristoph Hellwig 
1147b5c58b2fSLeon Romanovsky void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
1148b5c58b2fSLeon Romanovsky 		int nelems, enum dma_data_direction dir)
114906d60728SChristoph Hellwig {
115006d60728SChristoph Hellwig 	struct scatterlist *sg;
115106d60728SChristoph Hellwig 	int i;
115206d60728SChristoph Hellwig 
1153861370f4SCatalin Marinas 	if (sg_dma_is_swiotlb(sgl))
115408ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
115508ae5d4aSDavid Stevens 			iommu_dma_sync_single_for_device(dev,
115608ae5d4aSDavid Stevens 							 sg_dma_address(sg),
115780808d27SChristoph Hellwig 							 sg->length, dir);
115808ae5d4aSDavid Stevens 	else if (!dev_is_dma_coherent(dev))
115908ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
116056e35f9cSChristoph Hellwig 			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
116106d60728SChristoph Hellwig }
116206d60728SChristoph Hellwig 
1163b5c58b2fSLeon Romanovsky dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
116406d60728SChristoph Hellwig 	      unsigned long offset, size_t size, enum dma_data_direction dir,
116506d60728SChristoph Hellwig 	      unsigned long attrs)
116606d60728SChristoph Hellwig {
116706d60728SChristoph Hellwig 	phys_addr_t phys = page_to_phys(page) + offset;
116806d60728SChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
11699b49bbc2SDavid Stevens 	int prot = dma_info_to_prot(dir, coherent, attrs);
11709b49bbc2SDavid Stevens 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
11719b49bbc2SDavid Stevens 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
11729b49bbc2SDavid Stevens 	struct iova_domain *iovad = &cookie->iovad;
11739b49bbc2SDavid Stevens 	dma_addr_t iova, dma_mask = dma_get_mask(dev);
117406d60728SChristoph Hellwig 
11759b49bbc2SDavid Stevens 	/*
11769b49bbc2SDavid Stevens 	 * If both the physical buffer start address and size are
11779b49bbc2SDavid Stevens 	 * page aligned, we don't need to use a bounce page.
11789b49bbc2SDavid Stevens 	 */
1179861370f4SCatalin Marinas 	if (dev_use_swiotlb(dev, size, dir) &&
1180861370f4SCatalin Marinas 	    iova_offset(iovad, phys | size)) {
1181f316ba0aSMario Limonciello 		if (!is_swiotlb_active(dev)) {
1182f316ba0aSMario Limonciello 			dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
1183f316ba0aSMario Limonciello 			return DMA_MAPPING_ERROR;
1184f316ba0aSMario Limonciello 		}
1185f316ba0aSMario Limonciello 
1186a63c357bSIsaac J. Manjarres 		trace_swiotlb_bounced(dev, phys, size);
1187a63c357bSIsaac J. Manjarres 
1188327e2c97SMichael Kelley 		phys = swiotlb_tbl_map_single(dev, phys, size,
1189e81e99baSDavid Stevens 					      iova_mask(iovad), dir, attrs);
11909b49bbc2SDavid Stevens 
11919b49bbc2SDavid Stevens 		if (phys == DMA_MAPPING_ERROR)
11929b49bbc2SDavid Stevens 			return DMA_MAPPING_ERROR;
11939b49bbc2SDavid Stevens 
11942650073fSMichael Kelley 		/*
11952650073fSMichael Kelley 		 * Untrusted devices should not see padding areas with random
11962650073fSMichael Kelley 		 * leftover kernel data, so zero the pre- and post-padding.
11972650073fSMichael Kelley 		 * swiotlb_tbl_map_single() has initialized the bounce buffer
11982650073fSMichael Kelley 		 * proper to the contents of the original memory buffer.
11992650073fSMichael Kelley 		 */
12002650073fSMichael Kelley 		if (dev_is_untrusted(dev)) {
12012650073fSMichael Kelley 			size_t start, virt = (size_t)phys_to_virt(phys);
12029b49bbc2SDavid Stevens 
12032650073fSMichael Kelley 			/* Pre-padding */
12042650073fSMichael Kelley 			start = iova_align_down(iovad, virt);
12052650073fSMichael Kelley 			memset((void *)start, 0, virt - start);
12062650073fSMichael Kelley 
12072650073fSMichael Kelley 			/* Post-padding */
12082650073fSMichael Kelley 			start = virt + size;
12092650073fSMichael Kelley 			memset((void *)start, 0,
12102650073fSMichael Kelley 			       iova_align(iovad, start) - start);
12119b49bbc2SDavid Stevens 		}
12129b49bbc2SDavid Stevens 	}
12139b49bbc2SDavid Stevens 
12149b49bbc2SDavid Stevens 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
121556e35f9cSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
12169b49bbc2SDavid Stevens 
12172cbc61a1SDavid Stevens 	iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
12187296f230SMichael Kelley 	if (iova == DMA_MAPPING_ERROR)
12199b49bbc2SDavid Stevens 		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
12209b49bbc2SDavid Stevens 	return iova;
122106d60728SChristoph Hellwig }
122206d60728SChristoph Hellwig 
1223b5c58b2fSLeon Romanovsky void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
122406d60728SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
122506d60728SChristoph Hellwig {
12269b49bbc2SDavid Stevens 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
12279b49bbc2SDavid Stevens 	phys_addr_t phys;
12289b49bbc2SDavid Stevens 
12299b49bbc2SDavid Stevens 	phys = iommu_iova_to_phys(domain, dma_handle);
12309b49bbc2SDavid Stevens 	if (WARN_ON(!phys))
12319b49bbc2SDavid Stevens 		return;
12329b49bbc2SDavid Stevens 
12339b49bbc2SDavid Stevens 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
12349b49bbc2SDavid Stevens 		arch_sync_dma_for_cpu(phys, size, dir);
12359b49bbc2SDavid Stevens 
12369b49bbc2SDavid Stevens 	__iommu_dma_unmap(dev, dma_handle, size);
12379b49bbc2SDavid Stevens 
12389b49bbc2SDavid Stevens 	swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
12390db2e5d1SRobin Murphy }
12400db2e5d1SRobin Murphy 
12410db2e5d1SRobin Murphy /*
12420db2e5d1SRobin Murphy  * Prepare a successfully-mapped scatterlist to give back to the caller.
1243809eac54SRobin Murphy  *
1244809eac54SRobin Murphy  * At this point the segments are already laid out by iommu_dma_map_sg() to
1245809eac54SRobin Murphy  * avoid individually crossing any boundaries, so we merely need to check a
1246809eac54SRobin Murphy  * segment's start address to avoid concatenating across one.
12470db2e5d1SRobin Murphy  */
12480db2e5d1SRobin Murphy static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
12490db2e5d1SRobin Murphy 		dma_addr_t dma_addr)
12500db2e5d1SRobin Murphy {
1251809eac54SRobin Murphy 	struct scatterlist *s, *cur = sg;
1252809eac54SRobin Murphy 	unsigned long seg_mask = dma_get_seg_boundary(dev);
1253809eac54SRobin Murphy 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
1254809eac54SRobin Murphy 	int i, count = 0;
12550db2e5d1SRobin Murphy 
12560db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
1257809eac54SRobin Murphy 		/* Restore this segment's original unaligned fields first */
125830280eeeSLogan Gunthorpe 		dma_addr_t s_dma_addr = sg_dma_address(s);
1259809eac54SRobin Murphy 		unsigned int s_iova_off = sg_dma_address(s);
12600db2e5d1SRobin Murphy 		unsigned int s_length = sg_dma_len(s);
1261809eac54SRobin Murphy 		unsigned int s_iova_len = s->length;
12620db2e5d1SRobin Murphy 
1263cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
1264809eac54SRobin Murphy 		sg_dma_len(s) = 0;
1265809eac54SRobin Murphy 
1266cb147bbeSRobin Murphy 		if (sg_dma_is_bus_address(s)) {
126730280eeeSLogan Gunthorpe 			if (i > 0)
126830280eeeSLogan Gunthorpe 				cur = sg_next(cur);
126930280eeeSLogan Gunthorpe 
127030280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(s);
127130280eeeSLogan Gunthorpe 			sg_dma_address(cur) = s_dma_addr;
127230280eeeSLogan Gunthorpe 			sg_dma_len(cur) = s_length;
127330280eeeSLogan Gunthorpe 			sg_dma_mark_bus_address(cur);
127430280eeeSLogan Gunthorpe 			count++;
127530280eeeSLogan Gunthorpe 			cur_len = 0;
127630280eeeSLogan Gunthorpe 			continue;
127730280eeeSLogan Gunthorpe 		}
127830280eeeSLogan Gunthorpe 
127930280eeeSLogan Gunthorpe 		s->offset += s_iova_off;
128030280eeeSLogan Gunthorpe 		s->length = s_length;
128130280eeeSLogan Gunthorpe 
1282809eac54SRobin Murphy 		/*
1283809eac54SRobin Murphy 		 * Now fill in the real DMA data. If...
1284809eac54SRobin Murphy 		 * - there is a valid output segment to append to
1285809eac54SRobin Murphy 		 * - and this segment starts on an IOVA page boundary
1286809eac54SRobin Murphy 		 * - but doesn't fall at a segment boundary
1287809eac54SRobin Murphy 		 * - and wouldn't make the resulting output segment too long
1288809eac54SRobin Murphy 		 */
1289809eac54SRobin Murphy 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
1290ab2cbeb0SRobin Murphy 		    (max_len - cur_len >= s_length)) {
1291809eac54SRobin Murphy 			/* ...then concatenate it with the previous one */
1292809eac54SRobin Murphy 			cur_len += s_length;
1293809eac54SRobin Murphy 		} else {
1294809eac54SRobin Murphy 			/* Otherwise start the next output segment */
1295809eac54SRobin Murphy 			if (i > 0)
1296809eac54SRobin Murphy 				cur = sg_next(cur);
1297809eac54SRobin Murphy 			cur_len = s_length;
1298809eac54SRobin Murphy 			count++;
1299809eac54SRobin Murphy 
1300809eac54SRobin Murphy 			sg_dma_address(cur) = dma_addr + s_iova_off;
13010db2e5d1SRobin Murphy 		}
1302809eac54SRobin Murphy 
1303809eac54SRobin Murphy 		sg_dma_len(cur) = cur_len;
1304809eac54SRobin Murphy 		dma_addr += s_iova_len;
1305809eac54SRobin Murphy 
1306809eac54SRobin Murphy 		if (s_length + s_iova_off < s_iova_len)
1307809eac54SRobin Murphy 			cur_len = 0;
1308809eac54SRobin Murphy 	}
1309809eac54SRobin Murphy 	return count;
13100db2e5d1SRobin Murphy }
13110db2e5d1SRobin Murphy 
13120db2e5d1SRobin Murphy /*
13130db2e5d1SRobin Murphy  * If mapping failed, then just restore the original list,
13140db2e5d1SRobin Murphy  * but making sure the DMA fields are invalidated.
13150db2e5d1SRobin Murphy  */
13160db2e5d1SRobin Murphy static void __invalidate_sg(struct scatterlist *sg, int nents)
13170db2e5d1SRobin Murphy {
13180db2e5d1SRobin Murphy 	struct scatterlist *s;
13190db2e5d1SRobin Murphy 	int i;
13200db2e5d1SRobin Murphy 
13210db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
1322cb147bbeSRobin Murphy 		if (sg_dma_is_bus_address(s)) {
132330280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(s);
132430280eeeSLogan Gunthorpe 		} else {
1325cad34be7SChristoph Hellwig 			if (sg_dma_address(s) != DMA_MAPPING_ERROR)
132607b48ac4SRobin Murphy 				s->offset += sg_dma_address(s);
13270db2e5d1SRobin Murphy 			if (sg_dma_len(s))
13280db2e5d1SRobin Murphy 				s->length = sg_dma_len(s);
132930280eeeSLogan Gunthorpe 		}
1330cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
13310db2e5d1SRobin Murphy 		sg_dma_len(s) = 0;
13320db2e5d1SRobin Murphy 	}
13330db2e5d1SRobin Murphy }
13340db2e5d1SRobin Murphy 
133582612d66STom Murphy static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
133682612d66STom Murphy 		int nents, enum dma_data_direction dir, unsigned long attrs)
133782612d66STom Murphy {
133882612d66STom Murphy 	struct scatterlist *s;
133982612d66STom Murphy 	int i;
134082612d66STom Murphy 
134182612d66STom Murphy 	for_each_sg(sg, s, nents, i)
13429b49bbc2SDavid Stevens 		iommu_dma_unmap_page(dev, sg_dma_address(s),
134382612d66STom Murphy 				sg_dma_len(s), dir, attrs);
134482612d66STom Murphy }
134582612d66STom Murphy 
134682612d66STom Murphy static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
134782612d66STom Murphy 		int nents, enum dma_data_direction dir, unsigned long attrs)
134882612d66STom Murphy {
134982612d66STom Murphy 	struct scatterlist *s;
135082612d66STom Murphy 	int i;
135182612d66STom Murphy 
1352861370f4SCatalin Marinas 	sg_dma_mark_swiotlb(sg);
1353861370f4SCatalin Marinas 
135482612d66STom Murphy 	for_each_sg(sg, s, nents, i) {
13559b49bbc2SDavid Stevens 		sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
13569b49bbc2SDavid Stevens 				s->offset, s->length, dir, attrs);
135782612d66STom Murphy 		if (sg_dma_address(s) == DMA_MAPPING_ERROR)
135882612d66STom Murphy 			goto out_unmap;
135982612d66STom Murphy 		sg_dma_len(s) = s->length;
136082612d66STom Murphy 	}
136182612d66STom Murphy 
136282612d66STom Murphy 	return nents;
136382612d66STom Murphy 
136482612d66STom Murphy out_unmap:
136582612d66STom Murphy 	iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
1366dabb16f6SLogan Gunthorpe 	return -EIO;
136782612d66STom Murphy }
136882612d66STom Murphy 
13690db2e5d1SRobin Murphy /*
13700db2e5d1SRobin Murphy  * The DMA API client is passing in a scatterlist which could describe
13710db2e5d1SRobin Murphy  * any old buffer layout, but the IOMMU API requires everything to be
13720db2e5d1SRobin Murphy  * aligned to IOMMU pages. Hence the need for this complicated bit of
13730db2e5d1SRobin Murphy  * impedance-matching, to be able to hand off a suitably-aligned list,
13740db2e5d1SRobin Murphy  * but still preserve the original offsets and sizes for the caller.
13750db2e5d1SRobin Murphy  */
1376b5c58b2fSLeon Romanovsky int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1377b5c58b2fSLeon Romanovsky 		enum dma_data_direction dir, unsigned long attrs)
13780db2e5d1SRobin Murphy {
137943c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1380842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
1381842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
13820db2e5d1SRobin Murphy 	struct scatterlist *s, *prev = NULL;
138306d60728SChristoph Hellwig 	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
138430280eeeSLogan Gunthorpe 	struct pci_p2pdma_map_state p2pdma_state = {};
138530280eeeSLogan Gunthorpe 	enum pci_p2pdma_map_type map;
1386842fe519SRobin Murphy 	dma_addr_t iova;
13870db2e5d1SRobin Murphy 	size_t iova_len = 0;
1388809eac54SRobin Murphy 	unsigned long mask = dma_get_seg_boundary(dev);
1389dabb16f6SLogan Gunthorpe 	ssize_t ret;
13900db2e5d1SRobin Murphy 	int i;
13910db2e5d1SRobin Murphy 
1392dabb16f6SLogan Gunthorpe 	if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
1393dabb16f6SLogan Gunthorpe 		ret = iommu_deferred_attach(dev, domain);
1394ac315f96SLogan Gunthorpe 		if (ret)
1395dabb16f6SLogan Gunthorpe 			goto out;
1396dabb16f6SLogan Gunthorpe 	}
1397795bbbb9STom Murphy 
1398861370f4SCatalin Marinas 	if (dev_use_sg_swiotlb(dev, sg, nents, dir))
139982612d66STom Murphy 		return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
140082612d66STom Murphy 
14010db2e5d1SRobin Murphy 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
14020db2e5d1SRobin Murphy 		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
14030db2e5d1SRobin Murphy 
14040db2e5d1SRobin Murphy 	/*
14050db2e5d1SRobin Murphy 	 * Work out how much IOVA space we need, and align the segments to
14060db2e5d1SRobin Murphy 	 * IOVA granules for the IOMMU driver to handle. With some clever
14070db2e5d1SRobin Murphy 	 * trickery we can modify the list in-place, but reversibly, by
1408809eac54SRobin Murphy 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
14090db2e5d1SRobin Murphy 	 */
14100db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
1411809eac54SRobin Murphy 		size_t s_iova_off = iova_offset(iovad, s->offset);
14120db2e5d1SRobin Murphy 		size_t s_length = s->length;
1413809eac54SRobin Murphy 		size_t pad_len = (mask - iova_len + 1) & mask;
14140db2e5d1SRobin Murphy 
141530280eeeSLogan Gunthorpe 		if (is_pci_p2pdma_page(sg_page(s))) {
141630280eeeSLogan Gunthorpe 			map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
141730280eeeSLogan Gunthorpe 			switch (map) {
141830280eeeSLogan Gunthorpe 			case PCI_P2PDMA_MAP_BUS_ADDR:
141930280eeeSLogan Gunthorpe 				/*
142030280eeeSLogan Gunthorpe 				 * iommu_map_sg() will skip this segment as
142130280eeeSLogan Gunthorpe 				 * it is marked as a bus address,
142230280eeeSLogan Gunthorpe 				 * __finalise_sg() will copy the dma address
142330280eeeSLogan Gunthorpe 				 * into the output segment.
142430280eeeSLogan Gunthorpe 				 */
142530280eeeSLogan Gunthorpe 				continue;
142630280eeeSLogan Gunthorpe 			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
142730280eeeSLogan Gunthorpe 				/*
142830280eeeSLogan Gunthorpe 				 * Mapping through host bridge should be
142930280eeeSLogan Gunthorpe 				 * mapped with regular IOVAs, thus we
143030280eeeSLogan Gunthorpe 				 * do nothing here and continue below.
143130280eeeSLogan Gunthorpe 				 */
143230280eeeSLogan Gunthorpe 				break;
143330280eeeSLogan Gunthorpe 			default:
143430280eeeSLogan Gunthorpe 				ret = -EREMOTEIO;
143530280eeeSLogan Gunthorpe 				goto out_restore_sg;
143630280eeeSLogan Gunthorpe 			}
143730280eeeSLogan Gunthorpe 		}
143830280eeeSLogan Gunthorpe 
1439809eac54SRobin Murphy 		sg_dma_address(s) = s_iova_off;
14400db2e5d1SRobin Murphy 		sg_dma_len(s) = s_length;
1441809eac54SRobin Murphy 		s->offset -= s_iova_off;
1442809eac54SRobin Murphy 		s_length = iova_align(iovad, s_length + s_iova_off);
14430db2e5d1SRobin Murphy 		s->length = s_length;
14440db2e5d1SRobin Murphy 
14450db2e5d1SRobin Murphy 		/*
1446809eac54SRobin Murphy 		 * Due to the alignment of our single IOVA allocation, we can
1447809eac54SRobin Murphy 		 * depend on these assumptions about the segment boundary mask:
1448809eac54SRobin Murphy 		 * - If mask size >= IOVA size, then the IOVA range cannot
1449809eac54SRobin Murphy 		 *   possibly fall across a boundary, so we don't care.
1450809eac54SRobin Murphy 		 * - If mask size < IOVA size, then the IOVA range must start
1451809eac54SRobin Murphy 		 *   exactly on a boundary, therefore we can lay things out
1452809eac54SRobin Murphy 		 *   based purely on segment lengths without needing to know
1453809eac54SRobin Murphy 		 *   the actual addresses beforehand.
1454809eac54SRobin Murphy 		 * - The mask must be a power of 2, so pad_len == 0 if
1455809eac54SRobin Murphy 		 *   iova_len == 0, thus we cannot dereference prev the first
1456809eac54SRobin Murphy 		 *   time through here (i.e. before it has a meaningful value).
14570db2e5d1SRobin Murphy 		 */
1458809eac54SRobin Murphy 		if (pad_len && pad_len < s_length - 1) {
14590db2e5d1SRobin Murphy 			prev->length += pad_len;
14600db2e5d1SRobin Murphy 			iova_len += pad_len;
14610db2e5d1SRobin Murphy 		}
14620db2e5d1SRobin Murphy 
14630db2e5d1SRobin Murphy 		iova_len += s_length;
14640db2e5d1SRobin Murphy 		prev = s;
14650db2e5d1SRobin Murphy 	}
14660db2e5d1SRobin Murphy 
146730280eeeSLogan Gunthorpe 	if (!iova_len)
146830280eeeSLogan Gunthorpe 		return __finalise_sg(dev, sg, nents, 0);
146930280eeeSLogan Gunthorpe 
1470842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1471dabb16f6SLogan Gunthorpe 	if (!iova) {
1472dabb16f6SLogan Gunthorpe 		ret = -ENOMEM;
14730db2e5d1SRobin Murphy 		goto out_restore_sg;
1474dabb16f6SLogan Gunthorpe 	}
14750db2e5d1SRobin Murphy 
14760db2e5d1SRobin Murphy 	/*
14770db2e5d1SRobin Murphy 	 * We'll leave any physical concatenation to the IOMMU driver's
14780db2e5d1SRobin Murphy 	 * implementation - it knows better than we do.
14790db2e5d1SRobin Murphy 	 */
1480f2b2c051SJason Gunthorpe 	ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
1481a3884774SYunfei Wang 	if (ret < 0 || ret < iova_len)
14820db2e5d1SRobin Murphy 		goto out_free_iova;
14830db2e5d1SRobin Murphy 
1484842fe519SRobin Murphy 	return __finalise_sg(dev, sg, nents, iova);
14850db2e5d1SRobin Murphy 
14860db2e5d1SRobin Murphy out_free_iova:
14872a2b8eaaSTom Murphy 	iommu_dma_free_iova(cookie, iova, iova_len, NULL);
14880db2e5d1SRobin Murphy out_restore_sg:
14890db2e5d1SRobin Murphy 	__invalidate_sg(sg, nents);
1490dabb16f6SLogan Gunthorpe out:
149130280eeeSLogan Gunthorpe 	if (ret != -ENOMEM && ret != -EREMOTEIO)
1492dabb16f6SLogan Gunthorpe 		return -EINVAL;
1493dabb16f6SLogan Gunthorpe 	return ret;
14940db2e5d1SRobin Murphy }
14950db2e5d1SRobin Murphy 
1496b5c58b2fSLeon Romanovsky void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1497b5c58b2fSLeon Romanovsky 		enum dma_data_direction dir, unsigned long attrs)
14980db2e5d1SRobin Murphy {
149930280eeeSLogan Gunthorpe 	dma_addr_t end = 0, start;
1500842fe519SRobin Murphy 	struct scatterlist *tmp;
1501842fe519SRobin Murphy 	int i;
150206d60728SChristoph Hellwig 
1503861370f4SCatalin Marinas 	if (sg_dma_is_swiotlb(sg)) {
150482612d66STom Murphy 		iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
150582612d66STom Murphy 		return;
150682612d66STom Murphy 	}
150782612d66STom Murphy 
1508ee9d4097SDavid Stevens 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1509ee9d4097SDavid Stevens 		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1510ee9d4097SDavid Stevens 
15110db2e5d1SRobin Murphy 	/*
15120db2e5d1SRobin Murphy 	 * The scatterlist segments are mapped into a single
151330280eeeSLogan Gunthorpe 	 * contiguous IOVA allocation, the start and end points
151430280eeeSLogan Gunthorpe 	 * just have to be determined.
15150db2e5d1SRobin Murphy 	 */
151630280eeeSLogan Gunthorpe 	for_each_sg(sg, tmp, nents, i) {
1517cb147bbeSRobin Murphy 		if (sg_dma_is_bus_address(tmp)) {
151830280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(tmp);
151930280eeeSLogan Gunthorpe 			continue;
152030280eeeSLogan Gunthorpe 		}
152130280eeeSLogan Gunthorpe 
1522842fe519SRobin Murphy 		if (sg_dma_len(tmp) == 0)
1523842fe519SRobin Murphy 			break;
152430280eeeSLogan Gunthorpe 
152530280eeeSLogan Gunthorpe 		start = sg_dma_address(tmp);
152630280eeeSLogan Gunthorpe 		break;
1527842fe519SRobin Murphy 	}
152830280eeeSLogan Gunthorpe 
152930280eeeSLogan Gunthorpe 	nents -= i;
153030280eeeSLogan Gunthorpe 	for_each_sg(tmp, tmp, nents, i) {
1531cb147bbeSRobin Murphy 		if (sg_dma_is_bus_address(tmp)) {
153230280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(tmp);
153330280eeeSLogan Gunthorpe 			continue;
153430280eeeSLogan Gunthorpe 		}
153530280eeeSLogan Gunthorpe 
153630280eeeSLogan Gunthorpe 		if (sg_dma_len(tmp) == 0)
153730280eeeSLogan Gunthorpe 			break;
153830280eeeSLogan Gunthorpe 
153930280eeeSLogan Gunthorpe 		end = sg_dma_address(tmp) + sg_dma_len(tmp);
154030280eeeSLogan Gunthorpe 	}
154130280eeeSLogan Gunthorpe 
154230280eeeSLogan Gunthorpe 	if (end)
1543b61d271eSRobin Murphy 		__iommu_dma_unmap(dev, start, end - start);
15440db2e5d1SRobin Murphy }
15450db2e5d1SRobin Murphy 
1546b5c58b2fSLeon Romanovsky dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
154751f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
154851f8cc9eSRobin Murphy {
154951f8cc9eSRobin Murphy 	return __iommu_dma_map(dev, phys, size,
15506e235020STom Murphy 			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
15516e235020STom Murphy 			dma_get_mask(dev));
155251f8cc9eSRobin Murphy }
155351f8cc9eSRobin Murphy 
1554b5c58b2fSLeon Romanovsky void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
155551f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
155651f8cc9eSRobin Murphy {
1557b61d271eSRobin Murphy 	__iommu_dma_unmap(dev, handle, size);
155851f8cc9eSRobin Murphy }
155951f8cc9eSRobin Murphy 
15608553f6e6SRobin Murphy static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1561bcf4b9c4SRobin Murphy {
1562bcf4b9c4SRobin Murphy 	size_t alloc_size = PAGE_ALIGN(size);
1563bcf4b9c4SRobin Murphy 	int count = alloc_size >> PAGE_SHIFT;
1564bcf4b9c4SRobin Murphy 	struct page *page = NULL, **pages = NULL;
1565bcf4b9c4SRobin Murphy 
1566bcf4b9c4SRobin Murphy 	/* Non-coherent atomic allocation? Easy */
1567e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1568c84dc6e6SDavid Rientjes 	    dma_free_from_pool(dev, cpu_addr, alloc_size))
1569bcf4b9c4SRobin Murphy 		return;
1570bcf4b9c4SRobin Murphy 
1571f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
1572bcf4b9c4SRobin Murphy 		/*
1573bcf4b9c4SRobin Murphy 		 * If it the address is remapped, then it's either non-coherent
1574bcf4b9c4SRobin Murphy 		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1575bcf4b9c4SRobin Murphy 		 */
15765cf45379SChristoph Hellwig 		pages = dma_common_find_pages(cpu_addr);
1577bcf4b9c4SRobin Murphy 		if (!pages)
1578bcf4b9c4SRobin Murphy 			page = vmalloc_to_page(cpu_addr);
157951231740SChristoph Hellwig 		dma_common_free_remap(cpu_addr, alloc_size);
1580bcf4b9c4SRobin Murphy 	} else {
1581bcf4b9c4SRobin Murphy 		/* Lowmem means a coherent atomic or CMA allocation */
1582bcf4b9c4SRobin Murphy 		page = virt_to_page(cpu_addr);
1583bcf4b9c4SRobin Murphy 	}
1584bcf4b9c4SRobin Murphy 
1585bcf4b9c4SRobin Murphy 	if (pages)
1586bcf4b9c4SRobin Murphy 		__iommu_dma_free_pages(pages, count);
1587591fcf3bSNicolin Chen 	if (page)
1588591fcf3bSNicolin Chen 		dma_free_contiguous(dev, page, alloc_size);
1589bcf4b9c4SRobin Murphy }
1590bcf4b9c4SRobin Murphy 
1591b5c58b2fSLeon Romanovsky void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
15928553f6e6SRobin Murphy 		dma_addr_t handle, unsigned long attrs)
15938553f6e6SRobin Murphy {
15948553f6e6SRobin Murphy 	__iommu_dma_unmap(dev, handle, size);
15958553f6e6SRobin Murphy 	__iommu_dma_free(dev, size, cpu_addr);
15968553f6e6SRobin Murphy }
15978553f6e6SRobin Murphy 
1598ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1599ee1ef05dSChristoph Hellwig 		struct page **pagep, gfp_t gfp, unsigned long attrs)
160006d60728SChristoph Hellwig {
160106d60728SChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
16029ad5d6edSRobin Murphy 	size_t alloc_size = PAGE_ALIGN(size);
160390ae409fSChristoph Hellwig 	int node = dev_to_node(dev);
16049a4ab94aSChristoph Hellwig 	struct page *page = NULL;
16059ad5d6edSRobin Murphy 	void *cpu_addr;
160606d60728SChristoph Hellwig 
1607591fcf3bSNicolin Chen 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
160806d60728SChristoph Hellwig 	if (!page)
160990ae409fSChristoph Hellwig 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
161090ae409fSChristoph Hellwig 	if (!page)
161106d60728SChristoph Hellwig 		return NULL;
161206d60728SChristoph Hellwig 
1613f5ff79fdSChristoph Hellwig 	if (!coherent || PageHighMem(page)) {
161433dcb37cSChristoph Hellwig 		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
16158680aa5aSRobin Murphy 
16169ad5d6edSRobin Murphy 		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
161751231740SChristoph Hellwig 				prot, __builtin_return_address(0));
16189ad5d6edSRobin Murphy 		if (!cpu_addr)
1619ee1ef05dSChristoph Hellwig 			goto out_free_pages;
1620072bebc0SRobin Murphy 
162106d60728SChristoph Hellwig 		if (!coherent)
16229ad5d6edSRobin Murphy 			arch_dma_prep_coherent(page, size);
16238680aa5aSRobin Murphy 	} else {
16249ad5d6edSRobin Murphy 		cpu_addr = page_address(page);
16258680aa5aSRobin Murphy 	}
1626ee1ef05dSChristoph Hellwig 
1627ee1ef05dSChristoph Hellwig 	*pagep = page;
16289ad5d6edSRobin Murphy 	memset(cpu_addr, 0, alloc_size);
16299ad5d6edSRobin Murphy 	return cpu_addr;
1630072bebc0SRobin Murphy out_free_pages:
1631591fcf3bSNicolin Chen 	dma_free_contiguous(dev, page, alloc_size);
1632072bebc0SRobin Murphy 	return NULL;
163306d60728SChristoph Hellwig }
163406d60728SChristoph Hellwig 
1635b5c58b2fSLeon Romanovsky void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
1636b5c58b2fSLeon Romanovsky 		gfp_t gfp, unsigned long attrs)
1637ee1ef05dSChristoph Hellwig {
1638ee1ef05dSChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
1639ee1ef05dSChristoph Hellwig 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1640ee1ef05dSChristoph Hellwig 	struct page *page = NULL;
1641ee1ef05dSChristoph Hellwig 	void *cpu_addr;
1642ee1ef05dSChristoph Hellwig 
1643ee1ef05dSChristoph Hellwig 	gfp |= __GFP_ZERO;
1644ee1ef05dSChristoph Hellwig 
1645f5ff79fdSChristoph Hellwig 	if (gfpflags_allow_blocking(gfp) &&
1646e8d39a90SChristoph Hellwig 	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
16478d485a69SRobin Murphy 		return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
1648e8d39a90SChristoph Hellwig 	}
1649ee1ef05dSChristoph Hellwig 
1650e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1651e6475eb0SChristoph Hellwig 	    !gfpflags_allow_blocking(gfp) && !coherent)
16529420139fSChristoph Hellwig 		page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
16539420139fSChristoph Hellwig 					       gfp, NULL);
1654ee1ef05dSChristoph Hellwig 	else
1655ee1ef05dSChristoph Hellwig 		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1656ee1ef05dSChristoph Hellwig 	if (!cpu_addr)
1657ee1ef05dSChristoph Hellwig 		return NULL;
1658ee1ef05dSChristoph Hellwig 
16596e235020STom Murphy 	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
16606e235020STom Murphy 			dev->coherent_dma_mask);
1661ee1ef05dSChristoph Hellwig 	if (*handle == DMA_MAPPING_ERROR) {
1662ee1ef05dSChristoph Hellwig 		__iommu_dma_free(dev, size, cpu_addr);
1663ee1ef05dSChristoph Hellwig 		return NULL;
1664ee1ef05dSChristoph Hellwig 	}
1665ee1ef05dSChristoph Hellwig 
1666ee1ef05dSChristoph Hellwig 	return cpu_addr;
1667ee1ef05dSChristoph Hellwig }
1668ee1ef05dSChristoph Hellwig 
1669b5c58b2fSLeon Romanovsky int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
167006d60728SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
167106d60728SChristoph Hellwig 		unsigned long attrs)
167206d60728SChristoph Hellwig {
167306d60728SChristoph Hellwig 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1674efd9f10bSChristoph Hellwig 	unsigned long pfn, off = vma->vm_pgoff;
167506d60728SChristoph Hellwig 	int ret;
167606d60728SChristoph Hellwig 
167733dcb37cSChristoph Hellwig 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
167806d60728SChristoph Hellwig 
167906d60728SChristoph Hellwig 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
168006d60728SChristoph Hellwig 		return ret;
168106d60728SChristoph Hellwig 
168206d60728SChristoph Hellwig 	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
168306d60728SChristoph Hellwig 		return -ENXIO;
168406d60728SChristoph Hellwig 
1685f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
16865cf45379SChristoph Hellwig 		struct page **pages = dma_common_find_pages(cpu_addr);
168706d60728SChristoph Hellwig 
1688efd9f10bSChristoph Hellwig 		if (pages)
168971fe89ceSChristoph Hellwig 			return vm_map_pages(vma, pages, nr_pages);
1690efd9f10bSChristoph Hellwig 		pfn = vmalloc_to_pfn(cpu_addr);
1691efd9f10bSChristoph Hellwig 	} else {
1692efd9f10bSChristoph Hellwig 		pfn = page_to_pfn(virt_to_page(cpu_addr));
1693efd9f10bSChristoph Hellwig 	}
1694efd9f10bSChristoph Hellwig 
1695efd9f10bSChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start, pfn + off,
1696efd9f10bSChristoph Hellwig 			       vma->vm_end - vma->vm_start,
1697efd9f10bSChristoph Hellwig 			       vma->vm_page_prot);
169806d60728SChristoph Hellwig }
169906d60728SChristoph Hellwig 
1700b5c58b2fSLeon Romanovsky int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
170106d60728SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
170206d60728SChristoph Hellwig 		unsigned long attrs)
170306d60728SChristoph Hellwig {
17043fb3378bSChristoph Hellwig 	struct page *page;
17053fb3378bSChristoph Hellwig 	int ret;
170606d60728SChristoph Hellwig 
1707f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
17085cf45379SChristoph Hellwig 		struct page **pages = dma_common_find_pages(cpu_addr);
17093fb3378bSChristoph Hellwig 
17103fb3378bSChristoph Hellwig 		if (pages) {
17113fb3378bSChristoph Hellwig 			return sg_alloc_table_from_pages(sgt, pages,
17123fb3378bSChristoph Hellwig 					PAGE_ALIGN(size) >> PAGE_SHIFT,
17133fb3378bSChristoph Hellwig 					0, size, GFP_KERNEL);
171406d60728SChristoph Hellwig 		}
171506d60728SChristoph Hellwig 
17163fb3378bSChristoph Hellwig 		page = vmalloc_to_page(cpu_addr);
17173fb3378bSChristoph Hellwig 	} else {
17183fb3378bSChristoph Hellwig 		page = virt_to_page(cpu_addr);
171906d60728SChristoph Hellwig 	}
172006d60728SChristoph Hellwig 
17213fb3378bSChristoph Hellwig 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
17223fb3378bSChristoph Hellwig 	if (!ret)
17233fb3378bSChristoph Hellwig 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
17243fb3378bSChristoph Hellwig 	return ret;
172506d60728SChristoph Hellwig }
172606d60728SChristoph Hellwig 
1727b5c58b2fSLeon Romanovsky unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1728158a6d3cSYoshihiro Shimoda {
1729158a6d3cSYoshihiro Shimoda 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1730158a6d3cSYoshihiro Shimoda 
1731158a6d3cSYoshihiro Shimoda 	return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1732158a6d3cSYoshihiro Shimoda }
1733158a6d3cSYoshihiro Shimoda 
1734b5c58b2fSLeon Romanovsky size_t iommu_dma_opt_mapping_size(void)
17356d9870b7SJohn Garry {
17366d9870b7SJohn Garry 	return iova_rcache_range();
17376d9870b7SJohn Garry }
17386d9870b7SJohn Garry 
1739b5c58b2fSLeon Romanovsky size_t iommu_dma_max_mapping_size(struct device *dev)
1740afc5aa46SNicolin Chen {
1741afc5aa46SNicolin Chen 	if (dev_is_untrusted(dev))
1742afc5aa46SNicolin Chen 		return swiotlb_max_mapping_size(dev);
1743afc5aa46SNicolin Chen 
1744afc5aa46SNicolin Chen 	return SIZE_MAX;
1745afc5aa46SNicolin Chen }
1746afc5aa46SNicolin Chen 
1747b67483b3SRobin Murphy void iommu_setup_dma_ops(struct device *dev)
174806d60728SChristoph Hellwig {
174906d60728SChristoph Hellwig 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
175006d60728SChristoph Hellwig 
1751b67483b3SRobin Murphy 	if (dev_is_pci(dev))
1752b67483b3SRobin Murphy 		dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
175306d60728SChristoph Hellwig 
1754b5c58b2fSLeon Romanovsky 	dev->dma_iommu = iommu_is_dma_domain(domain);
1755b5c58b2fSLeon Romanovsky 	if (dev->dma_iommu && iommu_dma_init_domain(domain, dev))
175606d60728SChristoph Hellwig 		goto out_err;
175706d60728SChristoph Hellwig 
175806d60728SChristoph Hellwig 	return;
175906d60728SChristoph Hellwig out_err:
176006d60728SChristoph Hellwig 	pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
176106d60728SChristoph Hellwig 		dev_name(dev));
1762b5c58b2fSLeon Romanovsky 	dev->dma_iommu = false;
176344bb7e24SRobin Murphy }
176444bb7e24SRobin Murphy 
176544bb7e24SRobin Murphy static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
176644bb7e24SRobin Murphy 		phys_addr_t msi_addr, struct iommu_domain *domain)
176744bb7e24SRobin Murphy {
176844bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
176944bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1770842fe519SRobin Murphy 	dma_addr_t iova;
177144bb7e24SRobin Murphy 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1772fdbe574eSRobin Murphy 	size_t size = cookie_msi_granule(cookie);
177344bb7e24SRobin Murphy 
1774fdbe574eSRobin Murphy 	msi_addr &= ~(phys_addr_t)(size - 1);
177544bb7e24SRobin Murphy 	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
177644bb7e24SRobin Murphy 		if (msi_page->phys == msi_addr)
177744bb7e24SRobin Murphy 			return msi_page;
177844bb7e24SRobin Murphy 
1779c1864790SRobin Murphy 	msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
178044bb7e24SRobin Murphy 	if (!msi_page)
178144bb7e24SRobin Murphy 		return NULL;
178244bb7e24SRobin Murphy 
17838af23fadSRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
17848af23fadSRobin Murphy 	if (!iova)
178544bb7e24SRobin Murphy 		goto out_free_page;
178644bb7e24SRobin Murphy 
17871369459bSJason Gunthorpe 	if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
17888af23fadSRobin Murphy 		goto out_free_iova;
17898af23fadSRobin Murphy 
179044bb7e24SRobin Murphy 	INIT_LIST_HEAD(&msi_page->list);
1791a44e6657SRobin Murphy 	msi_page->phys = msi_addr;
1792a44e6657SRobin Murphy 	msi_page->iova = iova;
179344bb7e24SRobin Murphy 	list_add(&msi_page->list, &cookie->msi_page_list);
179444bb7e24SRobin Murphy 	return msi_page;
179544bb7e24SRobin Murphy 
17968af23fadSRobin Murphy out_free_iova:
17972a2b8eaaSTom Murphy 	iommu_dma_free_iova(cookie, iova, size, NULL);
179844bb7e24SRobin Murphy out_free_page:
179944bb7e24SRobin Murphy 	kfree(msi_page);
180044bb7e24SRobin Murphy 	return NULL;
180144bb7e24SRobin Murphy }
180244bb7e24SRobin Murphy 
1803fa49364cSRobin Murphy /**
1804fa49364cSRobin Murphy  * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1805fa49364cSRobin Murphy  * @desc: MSI descriptor, will store the MSI page
1806fa49364cSRobin Murphy  * @msi_addr: MSI target address to be mapped
1807fa49364cSRobin Murphy  *
1808fa49364cSRobin Murphy  * Return: 0 on success or negative error code if the mapping failed.
1809fa49364cSRobin Murphy  */
1810ece6e6f0SJulien Grall int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
181144bb7e24SRobin Murphy {
1812ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
181344bb7e24SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
181444bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1815c1864790SRobin Murphy 	static DEFINE_MUTEX(msi_prepare_lock); /* see below */
181644bb7e24SRobin Murphy 
1817ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie) {
1818ece6e6f0SJulien Grall 		desc->iommu_cookie = NULL;
1819ece6e6f0SJulien Grall 		return 0;
1820ece6e6f0SJulien Grall 	}
182144bb7e24SRobin Murphy 
182244bb7e24SRobin Murphy 	/*
1823c1864790SRobin Murphy 	 * In fact the whole prepare operation should already be serialised by
1824c1864790SRobin Murphy 	 * irq_domain_mutex further up the callchain, but that's pretty subtle
1825c1864790SRobin Murphy 	 * on its own, so consider this locking as failsafe documentation...
182644bb7e24SRobin Murphy 	 */
1827c1864790SRobin Murphy 	mutex_lock(&msi_prepare_lock);
182844bb7e24SRobin Murphy 	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1829c1864790SRobin Murphy 	mutex_unlock(&msi_prepare_lock);
183044bb7e24SRobin Murphy 
1831ece6e6f0SJulien Grall 	msi_desc_set_iommu_cookie(desc, msi_page);
1832ece6e6f0SJulien Grall 
1833ece6e6f0SJulien Grall 	if (!msi_page)
1834ece6e6f0SJulien Grall 		return -ENOMEM;
1835ece6e6f0SJulien Grall 	return 0;
183644bb7e24SRobin Murphy }
1837ece6e6f0SJulien Grall 
1838fa49364cSRobin Murphy /**
1839fa49364cSRobin Murphy  * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
1840fa49364cSRobin Murphy  * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
1841fa49364cSRobin Murphy  * @msg: MSI message containing target physical address
1842fa49364cSRobin Murphy  */
1843fa49364cSRobin Murphy void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1844ece6e6f0SJulien Grall {
1845ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
1846ece6e6f0SJulien Grall 	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1847ece6e6f0SJulien Grall 	const struct iommu_dma_msi_page *msi_page;
1848ece6e6f0SJulien Grall 
1849ece6e6f0SJulien Grall 	msi_page = msi_desc_get_iommu_cookie(desc);
1850ece6e6f0SJulien Grall 
1851ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1852ece6e6f0SJulien Grall 		return;
1853ece6e6f0SJulien Grall 
1854ece6e6f0SJulien Grall 	msg->address_hi = upper_32_bits(msi_page->iova);
1855ece6e6f0SJulien Grall 	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1856ece6e6f0SJulien Grall 	msg->address_lo += lower_32_bits(msi_page->iova);
185744bb7e24SRobin Murphy }
185806d60728SChristoph Hellwig 
185906d60728SChristoph Hellwig static int iommu_dma_init(void)
186006d60728SChristoph Hellwig {
1861a8e8af35SLianbo Jiang 	if (is_kdump_kernel())
1862a8e8af35SLianbo Jiang 		static_branch_enable(&iommu_deferred_attach_enabled);
1863a8e8af35SLianbo Jiang 
186406d60728SChristoph Hellwig 	return iova_cache_get();
18650db2e5d1SRobin Murphy }
186606d60728SChristoph Hellwig arch_initcall(iommu_dma_init);
1867