1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * A fairly generic DMA-API to IOMMU-API glue layer.
4 *
5 * Copyright (C) 2014-2015 ARM Ltd.
6 *
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
9 */
10
11 #include <linux/acpi_iort.h>
12 #include <linux/atomic.h>
13 #include <linux/crash_dump.h>
14 #include <linux/device.h>
15 #include <linux/dma-direct.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/generic_pt/iommu.h>
18 #include <linux/gfp.h>
19 #include <linux/huge_mm.h>
20 #include <linux/iommu.h>
21 #include <linux/iommu-dma.h>
22 #include <linux/iova.h>
23 #include <linux/irq.h>
24 #include <linux/list_sort.h>
25 #include <linux/memremap.h>
26 #include <linux/mm.h>
27 #include <linux/mutex.h>
28 #include <linux/msi.h>
29 #include <linux/of_iommu.h>
30 #include <linux/pci.h>
31 #include <linux/pci-p2pdma.h>
32 #include <linux/scatterlist.h>
33 #include <linux/spinlock.h>
34 #include <linux/swiotlb.h>
35 #include <linux/vmalloc.h>
36 #include <trace/events/swiotlb.h>
37
38 #include "dma-iommu.h"
39 #include "iommu-pages.h"
40
41 struct iommu_dma_msi_page {
42 struct list_head list;
43 dma_addr_t iova;
44 phys_addr_t phys;
45 };
46
47 enum iommu_dma_queue_type {
48 IOMMU_DMA_OPTS_PER_CPU_QUEUE,
49 IOMMU_DMA_OPTS_SINGLE_QUEUE,
50 };
51
52 struct iommu_dma_options {
53 enum iommu_dma_queue_type qt;
54 size_t fq_size;
55 unsigned int fq_timeout;
56 };
57
58 struct iommu_dma_cookie {
59 struct iova_domain iovad;
60 struct list_head msi_page_list;
61 /* Flush queue */
62 union {
63 struct iova_fq *single_fq;
64 struct iova_fq __percpu *percpu_fq;
65 };
66 /* Number of TLB flushes that have been started */
67 atomic64_t fq_flush_start_cnt;
68 /* Number of TLB flushes that have been finished */
69 atomic64_t fq_flush_finish_cnt;
70 /* Timer to regularily empty the flush queues */
71 struct timer_list fq_timer;
72 /* 1 when timer is active, 0 when not */
73 atomic_t fq_timer_on;
74 /* Domain for flush queue callback; NULL if flush queue not in use */
75 struct iommu_domain *fq_domain;
76 /* Options for dma-iommu use */
77 struct iommu_dma_options options;
78 };
79
80 struct iommu_dma_msi_cookie {
81 dma_addr_t msi_iova;
82 struct list_head msi_page_list;
83 };
84
85 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
86 bool iommu_dma_forcedac __read_mostly;
87
iommu_dma_forcedac_setup(char * str)88 static int __init iommu_dma_forcedac_setup(char *str)
89 {
90 int ret = kstrtobool(str, &iommu_dma_forcedac);
91
92 if (!ret && iommu_dma_forcedac)
93 pr_info("Forcing DAC for PCI devices\n");
94 return ret;
95 }
96 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
97
98 /* Number of entries per flush queue */
99 #define IOVA_DEFAULT_FQ_SIZE 256
100 #define IOVA_SINGLE_FQ_SIZE 32768
101
102 /* Timeout (in ms) after which entries are flushed from the queue */
103 #define IOVA_DEFAULT_FQ_TIMEOUT 10
104 #define IOVA_SINGLE_FQ_TIMEOUT 1000
105
106 /* Flush queue entry for deferred flushing */
107 struct iova_fq_entry {
108 unsigned long iova_pfn;
109 unsigned long pages;
110 struct iommu_pages_list freelist;
111 u64 counter; /* Flush counter when this entry was added */
112 };
113
114 /* Per-CPU flush queue structure */
115 struct iova_fq {
116 spinlock_t lock;
117 unsigned int head, tail;
118 unsigned int mod_mask;
119 struct iova_fq_entry entries[];
120 };
121
122 #define fq_ring_for_each(i, fq) \
123 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
124
fq_full(struct iova_fq * fq)125 static inline bool fq_full(struct iova_fq *fq)
126 {
127 assert_spin_locked(&fq->lock);
128 return (((fq->tail + 1) & fq->mod_mask) == fq->head);
129 }
130
fq_ring_add(struct iova_fq * fq)131 static inline unsigned int fq_ring_add(struct iova_fq *fq)
132 {
133 unsigned int idx = fq->tail;
134
135 assert_spin_locked(&fq->lock);
136
137 fq->tail = (idx + 1) & fq->mod_mask;
138
139 return idx;
140 }
141
fq_ring_free_locked(struct iommu_dma_cookie * cookie,struct iova_fq * fq)142 static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
143 {
144 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
145 unsigned int idx;
146
147 assert_spin_locked(&fq->lock);
148
149 fq_ring_for_each(idx, fq) {
150
151 if (fq->entries[idx].counter >= counter)
152 break;
153
154 iommu_put_pages_list(&fq->entries[idx].freelist);
155 free_iova_fast(&cookie->iovad,
156 fq->entries[idx].iova_pfn,
157 fq->entries[idx].pages);
158
159 fq->entries[idx].freelist =
160 IOMMU_PAGES_LIST_INIT(fq->entries[idx].freelist);
161 fq->head = (fq->head + 1) & fq->mod_mask;
162 }
163 }
164
fq_ring_free(struct iommu_dma_cookie * cookie,struct iova_fq * fq)165 static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
166 {
167 unsigned long flags;
168
169 spin_lock_irqsave(&fq->lock, flags);
170 fq_ring_free_locked(cookie, fq);
171 spin_unlock_irqrestore(&fq->lock, flags);
172 }
173
fq_flush_iotlb(struct iommu_dma_cookie * cookie)174 static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
175 {
176 atomic64_inc(&cookie->fq_flush_start_cnt);
177 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
178 atomic64_inc(&cookie->fq_flush_finish_cnt);
179 }
180
fq_flush_timeout(struct timer_list * t)181 static void fq_flush_timeout(struct timer_list *t)
182 {
183 struct iommu_dma_cookie *cookie = timer_container_of(cookie, t,
184 fq_timer);
185 int cpu;
186
187 atomic_set(&cookie->fq_timer_on, 0);
188 fq_flush_iotlb(cookie);
189
190 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) {
191 fq_ring_free(cookie, cookie->single_fq);
192 } else {
193 for_each_possible_cpu(cpu)
194 fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu));
195 }
196 }
197
queue_iova(struct iommu_dma_cookie * cookie,unsigned long pfn,unsigned long pages,struct iommu_pages_list * freelist)198 static void queue_iova(struct iommu_dma_cookie *cookie,
199 unsigned long pfn, unsigned long pages,
200 struct iommu_pages_list *freelist)
201 {
202 struct iova_fq *fq;
203 unsigned long flags;
204 unsigned int idx;
205
206 /*
207 * Order against the IOMMU driver's pagetable update from unmapping
208 * @pte, to guarantee that fq_flush_iotlb() observes that if called
209 * from a different CPU before we release the lock below. Full barrier
210 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
211 * written fq state here.
212 */
213 smp_mb();
214
215 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
216 fq = cookie->single_fq;
217 else
218 fq = raw_cpu_ptr(cookie->percpu_fq);
219
220 spin_lock_irqsave(&fq->lock, flags);
221
222 /*
223 * First remove all entries from the flush queue that have already been
224 * flushed out on another CPU. This makes the fq_full() check below less
225 * likely to be true.
226 */
227 fq_ring_free_locked(cookie, fq);
228
229 if (fq_full(fq)) {
230 fq_flush_iotlb(cookie);
231 fq_ring_free_locked(cookie, fq);
232 }
233
234 idx = fq_ring_add(fq);
235
236 fq->entries[idx].iova_pfn = pfn;
237 fq->entries[idx].pages = pages;
238 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
239 iommu_pages_list_splice(freelist, &fq->entries[idx].freelist);
240
241 spin_unlock_irqrestore(&fq->lock, flags);
242
243 /* Avoid false sharing as much as possible. */
244 if (!atomic_read(&cookie->fq_timer_on) &&
245 !atomic_xchg(&cookie->fq_timer_on, 1))
246 mod_timer(&cookie->fq_timer,
247 jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
248 }
249
iommu_dma_free_fq_single(struct iova_fq * fq)250 static void iommu_dma_free_fq_single(struct iova_fq *fq)
251 {
252 int idx;
253
254 fq_ring_for_each(idx, fq)
255 iommu_put_pages_list(&fq->entries[idx].freelist);
256 vfree(fq);
257 }
258
iommu_dma_free_fq_percpu(struct iova_fq __percpu * percpu_fq)259 static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
260 {
261 int cpu, idx;
262
263 /* The IOVAs will be torn down separately, so just free our queued pages */
264 for_each_possible_cpu(cpu) {
265 struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
266
267 fq_ring_for_each(idx, fq)
268 iommu_put_pages_list(&fq->entries[idx].freelist);
269 }
270
271 free_percpu(percpu_fq);
272 }
273
iommu_dma_free_fq(struct iommu_dma_cookie * cookie)274 static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
275 {
276 if (!cookie->fq_domain)
277 return;
278
279 timer_delete_sync(&cookie->fq_timer);
280 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
281 iommu_dma_free_fq_single(cookie->single_fq);
282 else
283 iommu_dma_free_fq_percpu(cookie->percpu_fq);
284 }
285
iommu_dma_init_one_fq(struct iova_fq * fq,size_t fq_size)286 static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
287 {
288 int i;
289
290 fq->head = 0;
291 fq->tail = 0;
292 fq->mod_mask = fq_size - 1;
293
294 spin_lock_init(&fq->lock);
295
296 for (i = 0; i < fq_size; i++)
297 fq->entries[i].freelist =
298 IOMMU_PAGES_LIST_INIT(fq->entries[i].freelist);
299 }
300
iommu_dma_init_fq_single(struct iommu_dma_cookie * cookie)301 static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
302 {
303 size_t fq_size = cookie->options.fq_size;
304 struct iova_fq *queue;
305
306 queue = vmalloc(struct_size(queue, entries, fq_size));
307 if (!queue)
308 return -ENOMEM;
309 iommu_dma_init_one_fq(queue, fq_size);
310 cookie->single_fq = queue;
311
312 return 0;
313 }
314
iommu_dma_init_fq_percpu(struct iommu_dma_cookie * cookie)315 static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
316 {
317 size_t fq_size = cookie->options.fq_size;
318 struct iova_fq __percpu *queue;
319 int cpu;
320
321 queue = __alloc_percpu(struct_size(queue, entries, fq_size),
322 __alignof__(*queue));
323 if (!queue)
324 return -ENOMEM;
325
326 for_each_possible_cpu(cpu)
327 iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
328 cookie->percpu_fq = queue;
329 return 0;
330 }
331
332 /* sysfs updates are serialised by the mutex of the group owning @domain */
iommu_dma_init_fq(struct iommu_domain * domain)333 int iommu_dma_init_fq(struct iommu_domain *domain)
334 {
335 struct iommu_dma_cookie *cookie = domain->iova_cookie;
336 int rc;
337
338 if (cookie->fq_domain)
339 return 0;
340
341 atomic64_set(&cookie->fq_flush_start_cnt, 0);
342 atomic64_set(&cookie->fq_flush_finish_cnt, 0);
343
344 if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
345 rc = iommu_dma_init_fq_single(cookie);
346 else
347 rc = iommu_dma_init_fq_percpu(cookie);
348
349 if (rc) {
350 pr_warn("iova flush queue initialization failed\n");
351 return -ENOMEM;
352 }
353
354 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
355 atomic_set(&cookie->fq_timer_on, 0);
356 /*
357 * Prevent incomplete fq state being observable. Pairs with path from
358 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
359 */
360 smp_wmb();
361 WRITE_ONCE(cookie->fq_domain, domain);
362 return 0;
363 }
364
365 /**
366 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
367 * @domain: IOMMU domain to prepare for DMA-API usage
368 */
iommu_get_dma_cookie(struct iommu_domain * domain)369 int iommu_get_dma_cookie(struct iommu_domain *domain)
370 {
371 struct iommu_dma_cookie *cookie;
372
373 if (domain->cookie_type != IOMMU_COOKIE_NONE)
374 return -EEXIST;
375
376 cookie = kzalloc_obj(*cookie);
377 if (!cookie)
378 return -ENOMEM;
379
380 INIT_LIST_HEAD(&cookie->msi_page_list);
381 domain->cookie_type = IOMMU_COOKIE_DMA_IOVA;
382 domain->iova_cookie = cookie;
383 return 0;
384 }
385
386 /**
387 * iommu_get_msi_cookie - Acquire just MSI remapping resources
388 * @domain: IOMMU domain to prepare
389 * @base: Start address of IOVA region for MSI mappings
390 *
391 * Users who manage their own IOVA allocation and do not want DMA API support,
392 * but would still like to take advantage of automatic MSI remapping, can use
393 * this to initialise their own domain appropriately. Users should reserve a
394 * contiguous IOVA region, starting at @base, large enough to accommodate the
395 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
396 * used by the devices attached to @domain.
397 */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)398 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
399 {
400 struct iommu_dma_msi_cookie *cookie;
401
402 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
403 return -EINVAL;
404
405 if (domain->cookie_type != IOMMU_COOKIE_NONE)
406 return -EEXIST;
407
408 cookie = kzalloc_obj(*cookie);
409 if (!cookie)
410 return -ENOMEM;
411
412 cookie->msi_iova = base;
413 INIT_LIST_HEAD(&cookie->msi_page_list);
414 domain->cookie_type = IOMMU_COOKIE_DMA_MSI;
415 domain->msi_cookie = cookie;
416 return 0;
417 }
418 EXPORT_SYMBOL(iommu_get_msi_cookie);
419
420 /**
421 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
422 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
423 */
iommu_put_dma_cookie(struct iommu_domain * domain)424 void iommu_put_dma_cookie(struct iommu_domain *domain)
425 {
426 struct iommu_dma_cookie *cookie = domain->iova_cookie;
427 struct iommu_dma_msi_page *msi, *tmp;
428
429 if (cookie->iovad.granule) {
430 iommu_dma_free_fq(cookie);
431 put_iova_domain(&cookie->iovad);
432 }
433 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list)
434 kfree(msi);
435 kfree(cookie);
436 }
437
438 /**
439 * iommu_put_msi_cookie - Release a domain's MSI mapping resources
440 * @domain: IOMMU domain previously prepared by iommu_get_msi_cookie()
441 */
iommu_put_msi_cookie(struct iommu_domain * domain)442 void iommu_put_msi_cookie(struct iommu_domain *domain)
443 {
444 struct iommu_dma_msi_cookie *cookie = domain->msi_cookie;
445 struct iommu_dma_msi_page *msi, *tmp;
446
447 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list)
448 kfree(msi);
449 kfree(cookie);
450 }
451
452 /**
453 * iommu_dma_get_resv_regions - Reserved region driver helper
454 * @dev: Device from iommu_get_resv_regions()
455 * @list: Reserved region list from iommu_get_resv_regions()
456 *
457 * IOMMU drivers can use this to implement their .get_resv_regions callback
458 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
459 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
460 * reservation.
461 */
iommu_dma_get_resv_regions(struct device * dev,struct list_head * list)462 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
463 {
464
465 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
466 iort_iommu_get_resv_regions(dev, list);
467
468 if (dev->of_node)
469 of_iommu_get_resv_regions(dev, list);
470 }
471 EXPORT_SYMBOL(iommu_dma_get_resv_regions);
472
cookie_init_hw_msi_region(struct iommu_dma_cookie * cookie,phys_addr_t start,phys_addr_t end)473 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
474 phys_addr_t start, phys_addr_t end)
475 {
476 struct iova_domain *iovad = &cookie->iovad;
477 struct iommu_dma_msi_page *msi_page;
478 int i, num_pages;
479
480 start -= iova_offset(iovad, start);
481 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
482
483 for (i = 0; i < num_pages; i++) {
484 msi_page = kmalloc_obj(*msi_page);
485 if (!msi_page)
486 return -ENOMEM;
487
488 msi_page->phys = start;
489 msi_page->iova = start;
490 INIT_LIST_HEAD(&msi_page->list);
491 list_add(&msi_page->list, &cookie->msi_page_list);
492 start += iovad->granule;
493 }
494
495 return 0;
496 }
497
iommu_dma_ranges_sort(void * priv,const struct list_head * a,const struct list_head * b)498 static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
499 const struct list_head *b)
500 {
501 struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
502 struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
503
504 return res_a->res->start > res_b->res->start;
505 }
506
iova_reserve_pci_windows(struct pci_dev * dev,struct iova_domain * iovad)507 static int iova_reserve_pci_windows(struct pci_dev *dev,
508 struct iova_domain *iovad)
509 {
510 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
511 struct resource_entry *window;
512 unsigned long lo, hi;
513 phys_addr_t start = 0, end;
514
515 resource_list_for_each_entry(window, &bridge->windows) {
516 if (resource_type(window->res) != IORESOURCE_MEM)
517 continue;
518
519 lo = iova_pfn(iovad, window->res->start - window->offset);
520 hi = iova_pfn(iovad, window->res->end - window->offset);
521 reserve_iova(iovad, lo, hi);
522 }
523
524 /* Get reserved DMA windows from host bridge */
525 list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
526 resource_list_for_each_entry(window, &bridge->dma_ranges) {
527 end = window->res->start - window->offset;
528 resv_iova:
529 if (end > start) {
530 lo = iova_pfn(iovad, start);
531 hi = iova_pfn(iovad, end);
532 reserve_iova(iovad, lo, hi);
533 } else if (end < start) {
534 /* DMA ranges should be non-overlapping */
535 dev_err(&dev->dev,
536 "Failed to reserve IOVA [%pa-%pa]\n",
537 &start, &end);
538 return -EINVAL;
539 }
540
541 start = window->res->end - window->offset + 1;
542 /* If window is last entry */
543 if (window->node.next == &bridge->dma_ranges &&
544 end != ~(phys_addr_t)0) {
545 end = ~(phys_addr_t)0;
546 goto resv_iova;
547 }
548 }
549
550 return 0;
551 }
552
iova_reserve_iommu_regions(struct device * dev,struct iommu_domain * domain)553 static int iova_reserve_iommu_regions(struct device *dev,
554 struct iommu_domain *domain)
555 {
556 struct iommu_dma_cookie *cookie = domain->iova_cookie;
557 struct iova_domain *iovad = &cookie->iovad;
558 struct iommu_resv_region *region;
559 LIST_HEAD(resv_regions);
560 int ret = 0;
561
562 if (dev_is_pci(dev)) {
563 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
564 if (ret)
565 return ret;
566 }
567
568 iommu_get_resv_regions(dev, &resv_regions);
569 list_for_each_entry(region, &resv_regions, list) {
570 unsigned long lo, hi;
571
572 /* We ARE the software that manages these! */
573 if (region->type == IOMMU_RESV_SW_MSI)
574 continue;
575
576 lo = iova_pfn(iovad, region->start);
577 hi = iova_pfn(iovad, region->start + region->length - 1);
578 reserve_iova(iovad, lo, hi);
579
580 if (region->type == IOMMU_RESV_MSI)
581 ret = cookie_init_hw_msi_region(cookie, region->start,
582 region->start + region->length);
583 if (ret)
584 break;
585 }
586 iommu_put_resv_regions(dev, &resv_regions);
587
588 return ret;
589 }
590
dev_is_untrusted(struct device * dev)591 static bool dev_is_untrusted(struct device *dev)
592 {
593 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
594 }
595
dev_use_swiotlb(struct device * dev,size_t size,enum dma_data_direction dir)596 static bool dev_use_swiotlb(struct device *dev, size_t size,
597 enum dma_data_direction dir)
598 {
599 return IS_ENABLED(CONFIG_SWIOTLB) &&
600 (dev_is_untrusted(dev) ||
601 dma_kmalloc_needs_bounce(dev, size, dir));
602 }
603
dev_use_sg_swiotlb(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)604 static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
605 int nents, enum dma_data_direction dir)
606 {
607 struct scatterlist *s;
608 int i;
609
610 if (!IS_ENABLED(CONFIG_SWIOTLB))
611 return false;
612
613 if (dev_is_untrusted(dev))
614 return true;
615
616 /*
617 * If kmalloc() buffers are not DMA-safe for this device and
618 * direction, check the individual lengths in the sg list. If any
619 * element is deemed unsafe, use the swiotlb for bouncing.
620 */
621 if (!dma_kmalloc_safe(dev, dir)) {
622 for_each_sg(sg, s, nents, i)
623 if (!dma_kmalloc_size_aligned(s->length))
624 return true;
625 }
626
627 return false;
628 }
629
630 /**
631 * iommu_dma_init_options - Initialize dma-iommu options
632 * @options: The options to be initialized
633 * @dev: Device the options are set for
634 *
635 * This allows tuning dma-iommu specific to device properties
636 */
iommu_dma_init_options(struct iommu_dma_options * options,struct device * dev)637 static void iommu_dma_init_options(struct iommu_dma_options *options,
638 struct device *dev)
639 {
640 /* Shadowing IOTLB flushes do better with a single large queue */
641 if (dev->iommu->shadow_on_flush) {
642 options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
643 options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
644 options->fq_size = IOVA_SINGLE_FQ_SIZE;
645 } else {
646 options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
647 options->fq_size = IOVA_DEFAULT_FQ_SIZE;
648 options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
649 }
650 }
651
iommu_domain_supports_fq(struct device * dev,struct iommu_domain * domain)652 static bool iommu_domain_supports_fq(struct device *dev,
653 struct iommu_domain *domain)
654 {
655 /* iommupt always supports DMA-FQ */
656 if (iommupt_from_domain(domain))
657 return true;
658 return device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH);
659 }
660
661 /**
662 * iommu_dma_init_domain - Initialise a DMA mapping domain
663 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
664 * @dev: Device the domain is being initialised for
665 *
666 * If the geometry and dma_range_map include address 0, we reserve that page
667 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
668 * any change which could make prior IOVAs invalid will fail.
669 */
iommu_dma_init_domain(struct iommu_domain * domain,struct device * dev)670 static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev)
671 {
672 struct iommu_dma_cookie *cookie = domain->iova_cookie;
673 const struct bus_dma_region *map = dev->dma_range_map;
674 unsigned long order, base_pfn;
675 struct iova_domain *iovad;
676 int ret;
677
678 if (!cookie || domain->cookie_type != IOMMU_COOKIE_DMA_IOVA)
679 return -EINVAL;
680
681 iovad = &cookie->iovad;
682
683 /* Use the smallest supported page size for IOVA granularity */
684 order = __ffs(domain->pgsize_bitmap);
685 base_pfn = 1;
686
687 /* Check the domain allows at least some access to the device... */
688 if (map) {
689 if (dma_range_map_min(map) > domain->geometry.aperture_end ||
690 dma_range_map_max(map) < domain->geometry.aperture_start) {
691 pr_warn("specified DMA range outside IOMMU capability\n");
692 return -EFAULT;
693 }
694 }
695 /* ...then finally give it a kicking to make sure it fits */
696 base_pfn = max_t(unsigned long, base_pfn,
697 domain->geometry.aperture_start >> order);
698
699 /* start_pfn is always nonzero for an already-initialised domain */
700 if (iovad->start_pfn) {
701 if (1UL << order != iovad->granule ||
702 base_pfn != iovad->start_pfn) {
703 pr_warn("Incompatible range for DMA domain\n");
704 return -EFAULT;
705 }
706
707 return 0;
708 }
709
710 init_iova_domain(iovad, 1UL << order, base_pfn);
711 ret = iova_domain_init_rcaches(iovad);
712 if (ret)
713 return ret;
714
715 iommu_dma_init_options(&cookie->options, dev);
716
717 /* If the FQ fails we can simply fall back to strict mode */
718 if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
719 (!iommu_domain_supports_fq(dev, domain) ||
720 iommu_dma_init_fq(domain)))
721 domain->type = IOMMU_DOMAIN_DMA;
722
723 return iova_reserve_iommu_regions(dev, domain);
724 }
725
726 /**
727 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
728 * page flags.
729 * @dir: Direction of DMA transfer
730 * @coherent: Is the DMA master cache-coherent?
731 * @attrs: DMA attributes for the mapping
732 *
733 * Return: corresponding IOMMU API page protection flags
734 */
dma_info_to_prot(enum dma_data_direction dir,bool coherent,unsigned long attrs)735 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
736 unsigned long attrs)
737 {
738 int prot;
739
740 if (attrs & DMA_ATTR_MMIO)
741 prot = IOMMU_MMIO;
742 else
743 prot = coherent ? IOMMU_CACHE : 0;
744
745 if (attrs & DMA_ATTR_PRIVILEGED)
746 prot |= IOMMU_PRIV;
747
748 switch (dir) {
749 case DMA_BIDIRECTIONAL:
750 return prot | IOMMU_READ | IOMMU_WRITE;
751 case DMA_TO_DEVICE:
752 return prot | IOMMU_READ;
753 case DMA_FROM_DEVICE:
754 return prot | IOMMU_WRITE;
755 default:
756 return 0;
757 }
758 }
759
iommu_dma_alloc_iova(struct iommu_domain * domain,size_t size,u64 dma_limit,struct device * dev)760 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
761 size_t size, u64 dma_limit, struct device *dev)
762 {
763 struct iommu_dma_cookie *cookie = domain->iova_cookie;
764 struct iova_domain *iovad = &cookie->iovad;
765 unsigned long shift, iova_len, iova;
766
767 if (domain->cookie_type == IOMMU_COOKIE_DMA_MSI) {
768 domain->msi_cookie->msi_iova += size;
769 return domain->msi_cookie->msi_iova - size;
770 }
771
772 shift = iova_shift(iovad);
773 iova_len = size >> shift;
774
775 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
776
777 if (domain->geometry.force_aperture)
778 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
779
780 /*
781 * Try to use all the 32-bit PCI addresses first. The original SAC vs.
782 * DAC reasoning loses relevance with PCIe, but enough hardware and
783 * firmware bugs are still lurking out there that it's safest not to
784 * venture into the 64-bit space until necessary.
785 *
786 * If your device goes wrong after seeing the notice then likely either
787 * its driver is not setting DMA masks accurately, the hardware has
788 * some inherent bug in handling >32-bit addresses, or not all the
789 * expected address bits are wired up between the device and the IOMMU.
790 */
791 if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
792 iova = alloc_iova_fast(iovad, iova_len,
793 DMA_BIT_MASK(32) >> shift, false);
794 if (iova)
795 goto done;
796
797 dev->iommu->pci_32bit_workaround = false;
798 dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
799 }
800
801 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
802 done:
803 return (dma_addr_t)iova << shift;
804 }
805
iommu_dma_free_iova(struct iommu_domain * domain,dma_addr_t iova,size_t size,struct iommu_iotlb_gather * gather)806 static void iommu_dma_free_iova(struct iommu_domain *domain, dma_addr_t iova,
807 size_t size, struct iommu_iotlb_gather *gather)
808 {
809 struct iova_domain *iovad = &domain->iova_cookie->iovad;
810
811 /* The MSI case is only ever cleaning up its most recent allocation */
812 if (domain->cookie_type == IOMMU_COOKIE_DMA_MSI)
813 domain->msi_cookie->msi_iova -= size;
814 else if (gather && gather->queued)
815 queue_iova(domain->iova_cookie, iova_pfn(iovad, iova),
816 size >> iova_shift(iovad),
817 &gather->freelist);
818 else
819 free_iova_fast(iovad, iova_pfn(iovad, iova),
820 size >> iova_shift(iovad));
821 }
822
__iommu_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t size)823 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
824 size_t size)
825 {
826 struct iommu_domain *domain = iommu_get_dma_domain(dev);
827 struct iommu_dma_cookie *cookie = domain->iova_cookie;
828 struct iova_domain *iovad = &cookie->iovad;
829 size_t iova_off = iova_offset(iovad, dma_addr);
830 struct iommu_iotlb_gather iotlb_gather;
831 size_t unmapped;
832
833 dma_addr -= iova_off;
834 size = iova_align(iovad, size + iova_off);
835 iommu_iotlb_gather_init(&iotlb_gather);
836 iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
837
838 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
839 WARN_ON(unmapped != size);
840
841 if (!iotlb_gather.queued)
842 iommu_iotlb_sync(domain, &iotlb_gather);
843 iommu_dma_free_iova(domain, dma_addr, size, &iotlb_gather);
844 }
845
__iommu_dma_map(struct device * dev,phys_addr_t phys,size_t size,int prot,u64 dma_mask)846 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
847 size_t size, int prot, u64 dma_mask)
848 {
849 struct iommu_domain *domain = iommu_get_dma_domain(dev);
850 struct iommu_dma_cookie *cookie = domain->iova_cookie;
851 struct iova_domain *iovad = &cookie->iovad;
852 size_t iova_off = iova_offset(iovad, phys);
853 dma_addr_t iova;
854
855 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
856 iommu_deferred_attach(dev, domain))
857 return DMA_MAPPING_ERROR;
858
859 /* If anyone ever wants this we'd need support in the IOVA allocator */
860 if (dev_WARN_ONCE(dev, dma_get_min_align_mask(dev) > iova_mask(iovad),
861 "Unsupported alignment constraint\n"))
862 return DMA_MAPPING_ERROR;
863
864 size = iova_align(iovad, size + iova_off);
865
866 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
867 if (!iova)
868 return DMA_MAPPING_ERROR;
869
870 if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
871 iommu_dma_free_iova(domain, iova, size, NULL);
872 return DMA_MAPPING_ERROR;
873 }
874 return iova + iova_off;
875 }
876
__iommu_dma_free_pages(struct page ** pages,int count)877 static void __iommu_dma_free_pages(struct page **pages, int count)
878 {
879 while (count--)
880 __free_page(pages[count]);
881 kvfree(pages);
882 }
883
__iommu_dma_alloc_pages(struct device * dev,unsigned int count,unsigned long order_mask,gfp_t gfp)884 static struct page **__iommu_dma_alloc_pages(struct device *dev,
885 unsigned int count, unsigned long order_mask, gfp_t gfp)
886 {
887 struct page **pages;
888 unsigned int i = 0, nid = dev_to_node(dev);
889
890 order_mask &= GENMASK(MAX_PAGE_ORDER, 0);
891 if (!order_mask)
892 return NULL;
893
894 pages = kvzalloc_objs(*pages, count);
895 if (!pages)
896 return NULL;
897
898 /* IOMMU can map any pages, so himem can also be used here */
899 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
900
901 while (count) {
902 struct page *page = NULL;
903 unsigned int order_size;
904
905 /*
906 * Higher-order allocations are a convenience rather
907 * than a necessity, hence using __GFP_NORETRY until
908 * falling back to minimum-order allocations.
909 */
910 for (order_mask &= GENMASK(__fls(count), 0);
911 order_mask; order_mask &= ~order_size) {
912 unsigned int order = __fls(order_mask);
913 gfp_t alloc_flags = gfp;
914
915 order_size = 1U << order;
916 if (order_mask > order_size)
917 alloc_flags |= __GFP_NORETRY;
918 page = alloc_pages_node(nid, alloc_flags, order);
919 if (!page)
920 continue;
921 if (order)
922 split_page(page, order);
923 break;
924 }
925 if (!page) {
926 __iommu_dma_free_pages(pages, i);
927 return NULL;
928 }
929 count -= order_size;
930 while (order_size--)
931 pages[i++] = page++;
932 }
933 return pages;
934 }
935
936 /*
937 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
938 * but an IOMMU which supports smaller pages might not map the whole thing.
939 */
__iommu_dma_alloc_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,gfp_t gfp,unsigned long attrs)940 static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
941 size_t size, struct sg_table *sgt, gfp_t gfp, unsigned long attrs)
942 {
943 struct iommu_domain *domain = iommu_get_dma_domain(dev);
944 struct iommu_dma_cookie *cookie = domain->iova_cookie;
945 struct iova_domain *iovad = &cookie->iovad;
946 bool coherent = dev_is_dma_coherent(dev);
947 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
948 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
949 struct page **pages;
950 dma_addr_t iova;
951 ssize_t ret;
952
953 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
954 iommu_deferred_attach(dev, domain))
955 return NULL;
956
957 min_size = alloc_sizes & -alloc_sizes;
958 if (min_size < PAGE_SIZE) {
959 min_size = PAGE_SIZE;
960 alloc_sizes |= PAGE_SIZE;
961 } else {
962 size = ALIGN(size, min_size);
963 }
964 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
965 alloc_sizes = min_size;
966
967 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
968 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
969 gfp);
970 if (!pages)
971 return NULL;
972
973 size = iova_align(iovad, size);
974 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
975 if (!iova)
976 goto out_free_pages;
977
978 /*
979 * Remove the zone/policy flags from the GFP - these are applied to the
980 * __iommu_dma_alloc_pages() but are not used for the supporting
981 * internal allocations that follow.
982 */
983 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
984
985 if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
986 goto out_free_iova;
987
988 if (!(ioprot & IOMMU_CACHE)) {
989 struct scatterlist *sg;
990 int i;
991
992 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
993 arch_dma_prep_coherent(sg_page(sg), sg->length);
994 }
995
996 ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
997 gfp);
998 if (ret < 0 || ret < size)
999 goto out_free_sg;
1000
1001 sgt->sgl->dma_address = iova;
1002 sgt->sgl->dma_length = size;
1003 return pages;
1004
1005 out_free_sg:
1006 sg_free_table(sgt);
1007 out_free_iova:
1008 iommu_dma_free_iova(domain, iova, size, NULL);
1009 out_free_pages:
1010 __iommu_dma_free_pages(pages, count);
1011 return NULL;
1012 }
1013
iommu_dma_alloc_remap(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)1014 static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
1015 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
1016 {
1017 struct page **pages;
1018 struct sg_table sgt;
1019 void *vaddr;
1020 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1021
1022 pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, attrs);
1023 if (!pages)
1024 return NULL;
1025 *dma_handle = sgt.sgl->dma_address;
1026 sg_free_table(&sgt);
1027 vaddr = dma_common_pages_remap(pages, size, prot,
1028 __builtin_return_address(0));
1029 if (!vaddr)
1030 goto out_unmap;
1031 return vaddr;
1032
1033 out_unmap:
1034 __iommu_dma_unmap(dev, *dma_handle, size);
1035 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
1036 return NULL;
1037 }
1038
1039 /*
1040 * This is the actual return value from the iommu_dma_alloc_noncontiguous.
1041 *
1042 * The users of the DMA API should only care about the sg_table, but to make
1043 * the DMA-API internal vmaping and freeing easier we stash away the page
1044 * array as well (except for the fallback case). This can go away any time,
1045 * e.g. when a vmap-variant that takes a scatterlist comes along.
1046 */
1047 struct dma_sgt_handle {
1048 struct sg_table sgt;
1049 struct page **pages;
1050 };
1051 #define sgt_handle(sgt) \
1052 container_of((sgt), struct dma_sgt_handle, sgt)
1053
iommu_dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)1054 struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
1055 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
1056 {
1057 struct dma_sgt_handle *sh;
1058
1059 sh = kmalloc_obj(*sh, gfp);
1060 if (!sh)
1061 return NULL;
1062
1063 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, attrs);
1064 if (!sh->pages) {
1065 kfree(sh);
1066 return NULL;
1067 }
1068 return &sh->sgt;
1069 }
1070
iommu_dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)1071 void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
1072 struct sg_table *sgt, enum dma_data_direction dir)
1073 {
1074 struct dma_sgt_handle *sh = sgt_handle(sgt);
1075
1076 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
1077 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
1078 sg_free_table(&sh->sgt);
1079 kfree(sh);
1080 }
1081
iommu_dma_vmap_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt)1082 void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
1083 struct sg_table *sgt)
1084 {
1085 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1086
1087 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
1088 }
1089
iommu_dma_mmap_noncontiguous(struct device * dev,struct vm_area_struct * vma,size_t size,struct sg_table * sgt)1090 int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
1091 size_t size, struct sg_table *sgt)
1092 {
1093 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1094
1095 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
1096 return -ENXIO;
1097 return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
1098 }
1099
iommu_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)1100 void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1101 size_t size, enum dma_data_direction dir)
1102 {
1103 phys_addr_t phys;
1104
1105 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
1106 return;
1107
1108 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
1109 if (!dev_is_dma_coherent(dev)) {
1110 arch_sync_dma_for_cpu(phys, size, dir);
1111 arch_sync_dma_flush();
1112 }
1113
1114 swiotlb_sync_single_for_cpu(dev, phys, size, dir);
1115 }
1116
iommu_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)1117 void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
1118 size_t size, enum dma_data_direction dir)
1119 {
1120 phys_addr_t phys;
1121
1122 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
1123 return;
1124
1125 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
1126 swiotlb_sync_single_for_device(dev, phys, size, dir);
1127
1128 if (!dev_is_dma_coherent(dev)) {
1129 arch_sync_dma_for_device(phys, size, dir);
1130 arch_sync_dma_flush();
1131 }
1132 }
1133
iommu_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)1134 void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
1135 int nelems, enum dma_data_direction dir)
1136 {
1137 struct scatterlist *sg;
1138 int i;
1139
1140 if (sg_dma_is_swiotlb(sgl)) {
1141 for_each_sg(sgl, sg, nelems, i)
1142 iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
1143 sg->length, dir);
1144 } else if (!dev_is_dma_coherent(dev)) {
1145 for_each_sg(sgl, sg, nelems, i)
1146 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
1147 arch_sync_dma_flush();
1148 }
1149 }
1150
iommu_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)1151 void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
1152 int nelems, enum dma_data_direction dir)
1153 {
1154 struct scatterlist *sg;
1155 int i;
1156
1157 if (sg_dma_is_swiotlb(sgl)) {
1158 for_each_sg(sgl, sg, nelems, i)
1159 iommu_dma_sync_single_for_device(dev,
1160 sg_dma_address(sg),
1161 sg->length, dir);
1162 } else if (!dev_is_dma_coherent(dev)) {
1163 for_each_sg(sgl, sg, nelems, i)
1164 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
1165 arch_sync_dma_flush();
1166 }
1167 }
1168
iommu_dma_map_swiotlb(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)1169 static phys_addr_t iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
1170 size_t size, enum dma_data_direction dir, unsigned long attrs)
1171 {
1172 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1173 struct iova_domain *iovad = &domain->iova_cookie->iovad;
1174
1175 if (!is_swiotlb_active(dev)) {
1176 dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
1177 return (phys_addr_t)DMA_MAPPING_ERROR;
1178 }
1179
1180 trace_swiotlb_bounced(dev, phys, size);
1181
1182 phys = swiotlb_tbl_map_single(dev, phys, size, iova_mask(iovad), dir,
1183 attrs);
1184
1185 /*
1186 * Untrusted devices should not see padding areas with random leftover
1187 * kernel data, so zero the pre- and post-padding.
1188 * swiotlb_tbl_map_single() has initialized the bounce buffer proper to
1189 * the contents of the original memory buffer.
1190 */
1191 if (phys != (phys_addr_t)DMA_MAPPING_ERROR && dev_is_untrusted(dev)) {
1192 size_t start, virt = (size_t)phys_to_virt(phys);
1193
1194 /* Pre-padding */
1195 start = iova_align_down(iovad, virt);
1196 memset((void *)start, 0, virt - start);
1197
1198 /* Post-padding */
1199 start = virt + size;
1200 memset((void *)start, 0, iova_align(iovad, start) - start);
1201 }
1202
1203 return phys;
1204 }
1205
1206 /*
1207 * Checks if a physical buffer has unaligned boundaries with respect to
1208 * the IOMMU granule. Returns non-zero if either the start or end
1209 * address is not aligned to the granule boundary.
1210 */
iova_unaligned(struct iova_domain * iovad,phys_addr_t phys,size_t size)1211 static inline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
1212 size_t size)
1213 {
1214 return iova_offset(iovad, phys | size);
1215 }
1216
iommu_dma_map_phys(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)1217 dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
1218 enum dma_data_direction dir, unsigned long attrs)
1219 {
1220 bool coherent = dev_is_dma_coherent(dev);
1221 int prot = dma_info_to_prot(dir, coherent, attrs);
1222 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1223 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1224 struct iova_domain *iovad = &cookie->iovad;
1225 dma_addr_t iova, dma_mask = dma_get_mask(dev);
1226
1227 /*
1228 * If both the physical buffer start address and size are page aligned,
1229 * we don't need to use a bounce page.
1230 */
1231 if (dev_use_swiotlb(dev, size, dir) &&
1232 iova_unaligned(iovad, phys, size)) {
1233 if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
1234 return DMA_MAPPING_ERROR;
1235
1236 phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
1237 if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
1238 return DMA_MAPPING_ERROR;
1239 }
1240
1241 if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))) {
1242 arch_sync_dma_for_device(phys, size, dir);
1243 arch_sync_dma_flush();
1244 }
1245
1246 iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
1247 if (iova == DMA_MAPPING_ERROR &&
1248 !(attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT)))
1249 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
1250 return iova;
1251 }
1252
iommu_dma_unmap_phys(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)1253 void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
1254 size_t size, enum dma_data_direction dir, unsigned long attrs)
1255 {
1256 phys_addr_t phys;
1257
1258 if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT)) {
1259 __iommu_dma_unmap(dev, dma_handle, size);
1260 return;
1261 }
1262
1263 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
1264 if (WARN_ON(!phys))
1265 return;
1266
1267 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) {
1268 arch_sync_dma_for_cpu(phys, size, dir);
1269 arch_sync_dma_flush();
1270 }
1271
1272 __iommu_dma_unmap(dev, dma_handle, size);
1273
1274 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
1275 }
1276
1277 /*
1278 * Prepare a successfully-mapped scatterlist to give back to the caller.
1279 *
1280 * At this point the segments are already laid out by iommu_dma_map_sg() to
1281 * avoid individually crossing any boundaries, so we merely need to check a
1282 * segment's start address to avoid concatenating across one.
1283 */
__finalise_sg(struct device * dev,struct scatterlist * sg,int nents,dma_addr_t dma_addr)1284 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
1285 dma_addr_t dma_addr)
1286 {
1287 struct scatterlist *s, *cur = sg;
1288 unsigned long seg_mask = dma_get_seg_boundary(dev);
1289 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
1290 int i, count = 0;
1291
1292 for_each_sg(sg, s, nents, i) {
1293 /* Restore this segment's original unaligned fields first */
1294 dma_addr_t s_dma_addr = sg_dma_address(s);
1295 unsigned int s_iova_off = sg_dma_address(s);
1296 unsigned int s_length = sg_dma_len(s);
1297 unsigned int s_iova_len = s->length;
1298
1299 sg_dma_address(s) = DMA_MAPPING_ERROR;
1300 sg_dma_len(s) = 0;
1301
1302 if (sg_dma_is_bus_address(s)) {
1303 if (i > 0)
1304 cur = sg_next(cur);
1305
1306 sg_dma_unmark_bus_address(s);
1307 sg_dma_address(cur) = s_dma_addr;
1308 sg_dma_len(cur) = s_length;
1309 sg_dma_mark_bus_address(cur);
1310 count++;
1311 cur_len = 0;
1312 continue;
1313 }
1314
1315 s->offset += s_iova_off;
1316 s->length = s_length;
1317
1318 /*
1319 * Now fill in the real DMA data. If...
1320 * - there is a valid output segment to append to
1321 * - and this segment starts on an IOVA page boundary
1322 * - but doesn't fall at a segment boundary
1323 * - and wouldn't make the resulting output segment too long
1324 */
1325 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
1326 (max_len - cur_len >= s_length)) {
1327 /* ...then concatenate it with the previous one */
1328 cur_len += s_length;
1329 } else {
1330 /* Otherwise start the next output segment */
1331 if (i > 0)
1332 cur = sg_next(cur);
1333 cur_len = s_length;
1334 count++;
1335
1336 sg_dma_address(cur) = dma_addr + s_iova_off;
1337 }
1338
1339 sg_dma_len(cur) = cur_len;
1340 dma_addr += s_iova_len;
1341
1342 if (s_length + s_iova_off < s_iova_len)
1343 cur_len = 0;
1344 }
1345 return count;
1346 }
1347
1348 /*
1349 * If mapping failed, then just restore the original list,
1350 * but making sure the DMA fields are invalidated.
1351 */
__invalidate_sg(struct scatterlist * sg,int nents)1352 static void __invalidate_sg(struct scatterlist *sg, int nents)
1353 {
1354 struct scatterlist *s;
1355 int i;
1356
1357 for_each_sg(sg, s, nents, i) {
1358 if (sg_dma_is_bus_address(s)) {
1359 sg_dma_unmark_bus_address(s);
1360 } else {
1361 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
1362 s->offset += sg_dma_address(s);
1363 if (sg_dma_len(s))
1364 s->length = sg_dma_len(s);
1365 }
1366 sg_dma_address(s) = DMA_MAPPING_ERROR;
1367 sg_dma_len(s) = 0;
1368 }
1369 }
1370
iommu_dma_unmap_sg_swiotlb(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1371 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1372 int nents, enum dma_data_direction dir, unsigned long attrs)
1373 {
1374 struct scatterlist *s;
1375 int i;
1376
1377 for_each_sg(sg, s, nents, i)
1378 iommu_dma_unmap_phys(dev, sg_dma_address(s),
1379 sg_dma_len(s), dir, attrs);
1380 }
1381
iommu_dma_map_sg_swiotlb(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1382 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
1383 int nents, enum dma_data_direction dir, unsigned long attrs)
1384 {
1385 struct scatterlist *s;
1386 int i;
1387
1388 sg_dma_mark_swiotlb(sg);
1389
1390 for_each_sg(sg, s, nents, i) {
1391 sg_dma_address(s) = iommu_dma_map_phys(dev, sg_phys(s),
1392 s->length, dir, attrs);
1393 if (sg_dma_address(s) == DMA_MAPPING_ERROR)
1394 goto out_unmap;
1395 sg_dma_len(s) = s->length;
1396 }
1397
1398 return nents;
1399
1400 out_unmap:
1401 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
1402 return -EIO;
1403 }
1404
1405 /*
1406 * The DMA API client is passing in a scatterlist which could describe
1407 * any old buffer layout, but the IOMMU API requires everything to be
1408 * aligned to IOMMU pages. Hence the need for this complicated bit of
1409 * impedance-matching, to be able to hand off a suitably-aligned list,
1410 * but still preserve the original offsets and sizes for the caller.
1411 */
iommu_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1412 int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1413 enum dma_data_direction dir, unsigned long attrs)
1414 {
1415 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1416 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1417 struct iova_domain *iovad = &cookie->iovad;
1418 struct scatterlist *s, *prev = NULL;
1419 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
1420 struct pci_p2pdma_map_state p2pdma_state = {};
1421 dma_addr_t iova;
1422 size_t iova_len = 0;
1423 unsigned long mask = dma_get_seg_boundary(dev);
1424 ssize_t ret;
1425 int i;
1426
1427 if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
1428 ret = iommu_deferred_attach(dev, domain);
1429 if (ret)
1430 goto out;
1431 }
1432
1433 if (dev_use_sg_swiotlb(dev, sg, nents, dir))
1434 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
1435
1436 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1437 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
1438
1439 /*
1440 * Work out how much IOVA space we need, and align the segments to
1441 * IOVA granules for the IOMMU driver to handle. With some clever
1442 * trickery we can modify the list in-place, but reversibly, by
1443 * stashing the unaligned parts in the as-yet-unused DMA fields.
1444 */
1445 for_each_sg(sg, s, nents, i) {
1446 size_t s_iova_off = iova_offset(iovad, s->offset);
1447 size_t s_length = s->length;
1448 size_t pad_len = (mask - iova_len + 1) & mask;
1449
1450 switch (pci_p2pdma_state(&p2pdma_state, dev, sg_page(s))) {
1451 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
1452 /*
1453 * Mapping through host bridge should be mapped with
1454 * regular IOVAs, thus we do nothing here and continue
1455 * below.
1456 */
1457 break;
1458 case PCI_P2PDMA_MAP_NONE:
1459 break;
1460 case PCI_P2PDMA_MAP_BUS_ADDR:
1461 /*
1462 * iommu_map_sg() will skip this segment as it is marked
1463 * as a bus address, __finalise_sg() will copy the dma
1464 * address into the output segment.
1465 */
1466 s->dma_address = pci_p2pdma_bus_addr_map(
1467 p2pdma_state.mem, sg_phys(s));
1468 sg_dma_len(s) = sg->length;
1469 sg_dma_mark_bus_address(s);
1470 continue;
1471 default:
1472 ret = -EREMOTEIO;
1473 goto out_restore_sg;
1474 }
1475
1476 sg_dma_address(s) = s_iova_off;
1477 sg_dma_len(s) = s_length;
1478 s->offset -= s_iova_off;
1479 s_length = iova_align(iovad, s_length + s_iova_off);
1480 s->length = s_length;
1481
1482 /*
1483 * Due to the alignment of our single IOVA allocation, we can
1484 * depend on these assumptions about the segment boundary mask:
1485 * - If mask size >= IOVA size, then the IOVA range cannot
1486 * possibly fall across a boundary, so we don't care.
1487 * - If mask size < IOVA size, then the IOVA range must start
1488 * exactly on a boundary, therefore we can lay things out
1489 * based purely on segment lengths without needing to know
1490 * the actual addresses beforehand.
1491 * - The mask must be a power of 2, so pad_len == 0 if
1492 * iova_len == 0, thus we cannot dereference prev the first
1493 * time through here (i.e. before it has a meaningful value).
1494 */
1495 if (pad_len && pad_len < s_length - 1) {
1496 prev->length += pad_len;
1497 iova_len += pad_len;
1498 }
1499
1500 iova_len += s_length;
1501 prev = s;
1502 }
1503
1504 if (!iova_len)
1505 return __finalise_sg(dev, sg, nents, 0);
1506
1507 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1508 if (!iova) {
1509 ret = -ENOMEM;
1510 goto out_restore_sg;
1511 }
1512
1513 /*
1514 * We'll leave any physical concatenation to the IOMMU driver's
1515 * implementation - it knows better than we do.
1516 */
1517 ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
1518 if (ret < 0 || ret < iova_len)
1519 goto out_free_iova;
1520
1521 return __finalise_sg(dev, sg, nents, iova);
1522
1523 out_free_iova:
1524 iommu_dma_free_iova(domain, iova, iova_len, NULL);
1525 out_restore_sg:
1526 __invalidate_sg(sg, nents);
1527 out:
1528 if (ret != -ENOMEM && ret != -EREMOTEIO)
1529 return -EINVAL;
1530 return ret;
1531 }
1532
iommu_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)1533 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1534 enum dma_data_direction dir, unsigned long attrs)
1535 {
1536 dma_addr_t end = 0, start;
1537 struct scatterlist *tmp;
1538 int i;
1539
1540 if (sg_dma_is_swiotlb(sg)) {
1541 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
1542 return;
1543 }
1544
1545 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1546 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1547
1548 /*
1549 * The scatterlist segments are mapped into a single
1550 * contiguous IOVA allocation, the start and end points
1551 * just have to be determined.
1552 */
1553 for_each_sg(sg, tmp, nents, i) {
1554 if (sg_dma_is_bus_address(tmp)) {
1555 sg_dma_unmark_bus_address(tmp);
1556 continue;
1557 }
1558
1559 if (sg_dma_len(tmp) == 0)
1560 break;
1561
1562 start = sg_dma_address(tmp);
1563 break;
1564 }
1565
1566 nents -= i;
1567 for_each_sg(tmp, tmp, nents, i) {
1568 if (sg_dma_is_bus_address(tmp)) {
1569 sg_dma_unmark_bus_address(tmp);
1570 continue;
1571 }
1572
1573 if (sg_dma_len(tmp) == 0)
1574 break;
1575
1576 end = sg_dma_address(tmp) + sg_dma_len(tmp);
1577 }
1578
1579 if (end)
1580 __iommu_dma_unmap(dev, start, end - start);
1581 }
1582
__iommu_dma_free(struct device * dev,size_t size,void * cpu_addr)1583 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1584 {
1585 size_t alloc_size = PAGE_ALIGN(size);
1586 int count = alloc_size >> PAGE_SHIFT;
1587 struct page *page = NULL, **pages = NULL;
1588
1589 /* Non-coherent atomic allocation? Easy */
1590 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1591 dma_free_from_pool(dev, cpu_addr, alloc_size))
1592 return;
1593
1594 if (is_vmalloc_addr(cpu_addr)) {
1595 /*
1596 * If it the address is remapped, then it's either non-coherent
1597 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1598 */
1599 pages = dma_common_find_pages(cpu_addr);
1600 if (!pages)
1601 page = vmalloc_to_page(cpu_addr);
1602 dma_common_free_remap(cpu_addr, alloc_size);
1603 } else {
1604 /* Lowmem means a coherent atomic or CMA allocation */
1605 page = virt_to_page(cpu_addr);
1606 }
1607
1608 if (pages)
1609 __iommu_dma_free_pages(pages, count);
1610 if (page)
1611 dma_free_contiguous(dev, page, alloc_size);
1612 }
1613
iommu_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)1614 void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
1615 dma_addr_t handle, unsigned long attrs)
1616 {
1617 __iommu_dma_unmap(dev, handle, size);
1618 __iommu_dma_free(dev, size, cpu_addr);
1619 }
1620
iommu_dma_alloc_pages(struct device * dev,size_t size,struct page ** pagep,gfp_t gfp,unsigned long attrs)1621 static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1622 struct page **pagep, gfp_t gfp, unsigned long attrs)
1623 {
1624 bool coherent = dev_is_dma_coherent(dev);
1625 size_t alloc_size = PAGE_ALIGN(size);
1626 int node = dev_to_node(dev);
1627 struct page *page = NULL;
1628 void *cpu_addr;
1629
1630 page = dma_alloc_contiguous(dev, alloc_size, gfp);
1631 if (!page)
1632 page = alloc_pages_node(node, gfp, get_order(alloc_size));
1633 if (!page)
1634 return NULL;
1635
1636 if (!coherent || PageHighMem(page)) {
1637 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
1638
1639 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
1640 prot, __builtin_return_address(0));
1641 if (!cpu_addr)
1642 goto out_free_pages;
1643
1644 if (!coherent)
1645 arch_dma_prep_coherent(page, size);
1646 } else {
1647 cpu_addr = page_address(page);
1648 }
1649
1650 *pagep = page;
1651 memset(cpu_addr, 0, alloc_size);
1652 return cpu_addr;
1653 out_free_pages:
1654 dma_free_contiguous(dev, page, alloc_size);
1655 return NULL;
1656 }
1657
iommu_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1658 void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
1659 gfp_t gfp, unsigned long attrs)
1660 {
1661 bool coherent = dev_is_dma_coherent(dev);
1662 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1663 struct page *page = NULL;
1664 void *cpu_addr;
1665
1666 gfp |= __GFP_ZERO;
1667
1668 if (gfpflags_allow_blocking(gfp) &&
1669 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1670 return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
1671 }
1672
1673 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1674 !gfpflags_allow_blocking(gfp) && !coherent)
1675 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
1676 gfp, NULL);
1677 else
1678 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1679 if (!cpu_addr)
1680 return NULL;
1681
1682 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
1683 dev->coherent_dma_mask);
1684 if (*handle == DMA_MAPPING_ERROR) {
1685 __iommu_dma_free(dev, size, cpu_addr);
1686 return NULL;
1687 }
1688
1689 return cpu_addr;
1690 }
1691
iommu_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1692 int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1693 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1694 unsigned long attrs)
1695 {
1696 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1697 unsigned long pfn, off = vma->vm_pgoff;
1698 int ret;
1699
1700 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1701
1702 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1703 return ret;
1704
1705 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1706 return -ENXIO;
1707
1708 if (is_vmalloc_addr(cpu_addr)) {
1709 struct page **pages = dma_common_find_pages(cpu_addr);
1710
1711 if (pages)
1712 return vm_map_pages(vma, pages, nr_pages);
1713 pfn = vmalloc_to_pfn(cpu_addr);
1714 } else {
1715 pfn = page_to_pfn(virt_to_page(cpu_addr));
1716 }
1717
1718 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1719 vma->vm_end - vma->vm_start,
1720 vma->vm_page_prot);
1721 }
1722
iommu_dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)1723 int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1724 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1725 unsigned long attrs)
1726 {
1727 struct page *page;
1728 int ret;
1729
1730 if (is_vmalloc_addr(cpu_addr)) {
1731 struct page **pages = dma_common_find_pages(cpu_addr);
1732
1733 if (pages) {
1734 return sg_alloc_table_from_pages(sgt, pages,
1735 PAGE_ALIGN(size) >> PAGE_SHIFT,
1736 0, size, GFP_KERNEL);
1737 }
1738
1739 page = vmalloc_to_page(cpu_addr);
1740 } else {
1741 page = virt_to_page(cpu_addr);
1742 }
1743
1744 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1745 if (!ret)
1746 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1747 return ret;
1748 }
1749
iommu_dma_get_merge_boundary(struct device * dev)1750 unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1751 {
1752 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1753
1754 return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1755 }
1756
iommu_dma_opt_mapping_size(void)1757 size_t iommu_dma_opt_mapping_size(void)
1758 {
1759 return iova_rcache_range();
1760 }
1761
iommu_dma_max_mapping_size(struct device * dev)1762 size_t iommu_dma_max_mapping_size(struct device *dev)
1763 {
1764 if (dev_is_untrusted(dev))
1765 return swiotlb_max_mapping_size(dev);
1766
1767 return SIZE_MAX;
1768 }
1769
1770 /**
1771 * dma_iova_try_alloc - Try to allocate an IOVA space
1772 * @dev: Device to allocate the IOVA space for
1773 * @state: IOVA state
1774 * @phys: physical address
1775 * @size: IOVA size
1776 *
1777 * Check if @dev supports the IOVA-based DMA API, and if yes allocate IOVA space
1778 * for the given base address and size.
1779 *
1780 * Note: @phys is only used to calculate the IOVA alignment. Callers that always
1781 * do PAGE_SIZE aligned transfers can safely pass 0 here.
1782 *
1783 * Returns %true if the IOVA-based DMA API can be used and IOVA space has been
1784 * allocated, or %false if the regular DMA API should be used.
1785 */
dma_iova_try_alloc(struct device * dev,struct dma_iova_state * state,phys_addr_t phys,size_t size)1786 bool dma_iova_try_alloc(struct device *dev, struct dma_iova_state *state,
1787 phys_addr_t phys, size_t size)
1788 {
1789 struct iommu_dma_cookie *cookie;
1790 struct iommu_domain *domain;
1791 struct iova_domain *iovad;
1792 size_t iova_off;
1793 dma_addr_t addr;
1794
1795 memset(state, 0, sizeof(*state));
1796 if (!use_dma_iommu(dev))
1797 return false;
1798
1799 domain = iommu_get_dma_domain(dev);
1800 cookie = domain->iova_cookie;
1801 iovad = &cookie->iovad;
1802 iova_off = iova_offset(iovad, phys);
1803
1804 if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
1805 iommu_deferred_attach(dev, iommu_get_domain_for_dev(dev)))
1806 return false;
1807
1808 if (WARN_ON_ONCE(!size))
1809 return false;
1810
1811 /*
1812 * DMA_IOVA_USE_SWIOTLB is flag which is set by dma-iommu
1813 * internals, make sure that caller didn't set it and/or
1814 * didn't use this interface to map SIZE_MAX.
1815 */
1816 if (WARN_ON_ONCE((u64)size & DMA_IOVA_USE_SWIOTLB))
1817 return false;
1818
1819 addr = iommu_dma_alloc_iova(domain,
1820 iova_align(iovad, size + iova_off),
1821 dma_get_mask(dev), dev);
1822 if (!addr)
1823 return false;
1824
1825 state->addr = addr + iova_off;
1826 state->__size = size;
1827 return true;
1828 }
1829 EXPORT_SYMBOL_GPL(dma_iova_try_alloc);
1830
1831 /**
1832 * dma_iova_free - Free an IOVA space
1833 * @dev: Device to free the IOVA space for
1834 * @state: IOVA state
1835 *
1836 * Undoes a successful dma_try_iova_alloc().
1837 *
1838 * Note that all dma_iova_link() calls need to be undone first. For callers
1839 * that never call dma_iova_unlink(), dma_iova_destroy() can be used instead
1840 * which unlinks all ranges and frees the IOVA space in a single efficient
1841 * operation.
1842 */
dma_iova_free(struct device * dev,struct dma_iova_state * state)1843 void dma_iova_free(struct device *dev, struct dma_iova_state *state)
1844 {
1845 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1846 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1847 struct iova_domain *iovad = &cookie->iovad;
1848 size_t iova_start_pad = iova_offset(iovad, state->addr);
1849 size_t size = dma_iova_size(state);
1850
1851 iommu_dma_free_iova(domain, state->addr - iova_start_pad,
1852 iova_align(iovad, size + iova_start_pad), NULL);
1853 }
1854 EXPORT_SYMBOL_GPL(dma_iova_free);
1855
__dma_iova_link(struct device * dev,dma_addr_t addr,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)1856 static int __dma_iova_link(struct device *dev, dma_addr_t addr,
1857 phys_addr_t phys, size_t size, enum dma_data_direction dir,
1858 unsigned long attrs)
1859 {
1860 bool coherent = dev_is_dma_coherent(dev);
1861 int prot = dma_info_to_prot(dir, coherent, attrs);
1862
1863 if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
1864 arch_sync_dma_for_device(phys, size, dir);
1865
1866 return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
1867 prot, GFP_ATOMIC);
1868 }
1869
iommu_dma_iova_bounce_and_link(struct device * dev,dma_addr_t addr,phys_addr_t phys,size_t bounce_len,enum dma_data_direction dir,unsigned long attrs,size_t iova_start_pad)1870 static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
1871 phys_addr_t phys, size_t bounce_len,
1872 enum dma_data_direction dir, unsigned long attrs,
1873 size_t iova_start_pad)
1874 {
1875 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1876 struct iova_domain *iovad = &domain->iova_cookie->iovad;
1877 phys_addr_t bounce_phys;
1878 int error;
1879
1880 bounce_phys = iommu_dma_map_swiotlb(dev, phys, bounce_len, dir, attrs);
1881 if (bounce_phys == DMA_MAPPING_ERROR)
1882 return -ENOMEM;
1883
1884 error = __dma_iova_link(dev, addr - iova_start_pad,
1885 bounce_phys - iova_start_pad,
1886 iova_align(iovad, bounce_len), dir, attrs);
1887 if (error)
1888 swiotlb_tbl_unmap_single(dev, bounce_phys, bounce_len, dir,
1889 attrs);
1890 return error;
1891 }
1892
iommu_dma_iova_link_swiotlb(struct device * dev,struct dma_iova_state * state,phys_addr_t phys,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)1893 static int iommu_dma_iova_link_swiotlb(struct device *dev,
1894 struct dma_iova_state *state, phys_addr_t phys, size_t offset,
1895 size_t size, enum dma_data_direction dir, unsigned long attrs)
1896 {
1897 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1898 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1899 struct iova_domain *iovad = &cookie->iovad;
1900 size_t iova_start_pad = iova_offset(iovad, phys);
1901 size_t iova_end_pad = iova_offset(iovad, phys + size);
1902 dma_addr_t addr = state->addr + offset;
1903 size_t mapped = 0;
1904 int error;
1905
1906 if (iova_start_pad) {
1907 size_t bounce_len = min(size, iovad->granule - iova_start_pad);
1908
1909 error = iommu_dma_iova_bounce_and_link(dev, addr, phys,
1910 bounce_len, dir, attrs, iova_start_pad);
1911 if (error)
1912 return error;
1913 state->__size |= DMA_IOVA_USE_SWIOTLB;
1914
1915 mapped += bounce_len;
1916 size -= bounce_len;
1917 if (!size)
1918 return 0;
1919 }
1920
1921 size -= iova_end_pad;
1922 error = __dma_iova_link(dev, addr + mapped, phys + mapped, size, dir,
1923 attrs);
1924 if (error)
1925 goto out_unmap;
1926 mapped += size;
1927
1928 if (iova_end_pad) {
1929 error = iommu_dma_iova_bounce_and_link(dev, addr + mapped,
1930 phys + mapped, iova_end_pad, dir, attrs, 0);
1931 if (error)
1932 goto out_unmap;
1933 state->__size |= DMA_IOVA_USE_SWIOTLB;
1934 }
1935
1936 return 0;
1937
1938 out_unmap:
1939 dma_iova_unlink(dev, state, 0, mapped, dir, attrs);
1940 return error;
1941 }
1942
1943 /**
1944 * dma_iova_link - Link a range of IOVA space
1945 * @dev: DMA device
1946 * @state: IOVA state
1947 * @phys: physical address to link
1948 * @offset: offset into the IOVA state to map into
1949 * @size: size of the buffer
1950 * @dir: DMA direction
1951 * @attrs: attributes of mapping properties
1952 *
1953 * Link a range of IOVA space for the given IOVA state without IOTLB sync.
1954 * This function is used to link multiple physical addresses in contiguous
1955 * IOVA space without performing costly IOTLB sync.
1956 *
1957 * The caller is responsible to call to dma_iova_sync() to sync IOTLB at
1958 * the end of linkage.
1959 */
dma_iova_link(struct device * dev,struct dma_iova_state * state,phys_addr_t phys,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)1960 int dma_iova_link(struct device *dev, struct dma_iova_state *state,
1961 phys_addr_t phys, size_t offset, size_t size,
1962 enum dma_data_direction dir, unsigned long attrs)
1963 {
1964 struct iommu_domain *domain = iommu_get_dma_domain(dev);
1965 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1966 struct iova_domain *iovad = &cookie->iovad;
1967 size_t iova_start_pad = iova_offset(iovad, phys);
1968
1969 if (WARN_ON_ONCE(iova_start_pad && offset > 0))
1970 return -EIO;
1971
1972 /*
1973 * DMA_IOVA_USE_SWIOTLB is set on state after some entry
1974 * took SWIOTLB path, which we were supposed to prevent
1975 * for DMA_ATTR_REQUIRE_COHERENT attribute.
1976 */
1977 if (WARN_ON_ONCE((state->__size & DMA_IOVA_USE_SWIOTLB) &&
1978 (attrs & DMA_ATTR_REQUIRE_COHERENT)))
1979 return -EOPNOTSUPP;
1980
1981 if (!dev_is_dma_coherent(dev) && (attrs & DMA_ATTR_REQUIRE_COHERENT))
1982 return -EOPNOTSUPP;
1983
1984 if (dev_use_swiotlb(dev, size, dir) &&
1985 iova_unaligned(iovad, phys, size)) {
1986 if (attrs & (DMA_ATTR_MMIO | DMA_ATTR_REQUIRE_COHERENT))
1987 return -EPERM;
1988
1989 return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
1990 size, dir, attrs);
1991 }
1992
1993 return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
1994 phys - iova_start_pad,
1995 iova_align(iovad, size + iova_start_pad), dir, attrs);
1996 }
1997 EXPORT_SYMBOL_GPL(dma_iova_link);
1998
1999 /**
2000 * dma_iova_sync - Sync IOTLB
2001 * @dev: DMA device
2002 * @state: IOVA state
2003 * @offset: offset into the IOVA state to sync
2004 * @size: size of the buffer
2005 *
2006 * Sync IOTLB for the given IOVA state. This function should be called on
2007 * the IOVA-contiguous range created by one ore more dma_iova_link() calls
2008 * to sync the IOTLB.
2009 */
dma_iova_sync(struct device * dev,struct dma_iova_state * state,size_t offset,size_t size)2010 int dma_iova_sync(struct device *dev, struct dma_iova_state *state,
2011 size_t offset, size_t size)
2012 {
2013 struct iommu_domain *domain = iommu_get_dma_domain(dev);
2014 struct iommu_dma_cookie *cookie = domain->iova_cookie;
2015 struct iova_domain *iovad = &cookie->iovad;
2016 dma_addr_t addr = state->addr + offset;
2017 size_t iova_start_pad = iova_offset(iovad, addr);
2018
2019 if (!dev_is_dma_coherent(dev))
2020 arch_sync_dma_flush();
2021 return iommu_sync_map(domain, addr - iova_start_pad,
2022 iova_align(iovad, size + iova_start_pad));
2023 }
2024 EXPORT_SYMBOL_GPL(dma_iova_sync);
2025
iommu_dma_iova_unlink_range_slow(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,unsigned long attrs)2026 static void iommu_dma_iova_unlink_range_slow(struct device *dev,
2027 dma_addr_t addr, size_t size, enum dma_data_direction dir,
2028 unsigned long attrs)
2029 {
2030 struct iommu_domain *domain = iommu_get_dma_domain(dev);
2031 struct iommu_dma_cookie *cookie = domain->iova_cookie;
2032 struct iova_domain *iovad = &cookie->iovad;
2033 size_t iova_start_pad = iova_offset(iovad, addr);
2034 bool need_sync_dma = !dev_is_dma_coherent(dev) &&
2035 !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO));
2036 dma_addr_t end = addr + size;
2037
2038 do {
2039 phys_addr_t phys;
2040 size_t len;
2041
2042 phys = iommu_iova_to_phys(domain, addr);
2043 if (WARN_ON(!phys))
2044 /* Something very horrible happen here */
2045 return;
2046
2047 len = min_t(size_t,
2048 end - addr, iovad->granule - iova_start_pad);
2049
2050 if (!dev_is_dma_coherent(dev) &&
2051 !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
2052 arch_sync_dma_for_cpu(phys, len, dir);
2053
2054 swiotlb_tbl_unmap_single(dev, phys, len, dir, attrs);
2055
2056 addr += len;
2057 iova_start_pad = 0;
2058 } while (addr < end);
2059
2060 if (need_sync_dma)
2061 arch_sync_dma_flush();
2062 }
2063
__iommu_dma_iova_unlink(struct device * dev,struct dma_iova_state * state,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs,bool free_iova)2064 static void __iommu_dma_iova_unlink(struct device *dev,
2065 struct dma_iova_state *state, size_t offset, size_t size,
2066 enum dma_data_direction dir, unsigned long attrs,
2067 bool free_iova)
2068 {
2069 struct iommu_domain *domain = iommu_get_dma_domain(dev);
2070 struct iommu_dma_cookie *cookie = domain->iova_cookie;
2071 struct iova_domain *iovad = &cookie->iovad;
2072 dma_addr_t addr = state->addr + offset;
2073 size_t iova_start_pad = iova_offset(iovad, addr);
2074 struct iommu_iotlb_gather iotlb_gather;
2075 size_t unmapped;
2076
2077 if ((state->__size & DMA_IOVA_USE_SWIOTLB) ||
2078 (!dev_is_dma_coherent(dev) &&
2079 !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))))
2080 iommu_dma_iova_unlink_range_slow(dev, addr, size, dir, attrs);
2081
2082 iommu_iotlb_gather_init(&iotlb_gather);
2083 iotlb_gather.queued = free_iova && READ_ONCE(cookie->fq_domain);
2084
2085 size = iova_align(iovad, size + iova_start_pad);
2086 addr -= iova_start_pad;
2087 unmapped = iommu_unmap_fast(domain, addr, size, &iotlb_gather);
2088 WARN_ON(unmapped != size);
2089
2090 if (!iotlb_gather.queued)
2091 iommu_iotlb_sync(domain, &iotlb_gather);
2092 if (free_iova)
2093 iommu_dma_free_iova(domain, addr, size, &iotlb_gather);
2094 }
2095
2096 /**
2097 * dma_iova_unlink - Unlink a range of IOVA space
2098 * @dev: DMA device
2099 * @state: IOVA state
2100 * @offset: offset into the IOVA state to unlink
2101 * @size: size of the buffer
2102 * @dir: DMA direction
2103 * @attrs: attributes of mapping properties
2104 *
2105 * Unlink a range of IOVA space for the given IOVA state.
2106 */
dma_iova_unlink(struct device * dev,struct dma_iova_state * state,size_t offset,size_t size,enum dma_data_direction dir,unsigned long attrs)2107 void dma_iova_unlink(struct device *dev, struct dma_iova_state *state,
2108 size_t offset, size_t size, enum dma_data_direction dir,
2109 unsigned long attrs)
2110 {
2111 __iommu_dma_iova_unlink(dev, state, offset, size, dir, attrs, false);
2112 }
2113 EXPORT_SYMBOL_GPL(dma_iova_unlink);
2114
2115 /**
2116 * dma_iova_destroy - Finish a DMA mapping transaction
2117 * @dev: DMA device
2118 * @state: IOVA state
2119 * @mapped_len: number of bytes to unmap
2120 * @dir: DMA direction
2121 * @attrs: attributes of mapping properties
2122 *
2123 * Unlink the IOVA range up to @mapped_len and free the entire IOVA space. The
2124 * range of IOVA from dma_addr to @mapped_len must all be linked, and be the
2125 * only linked IOVA in state.
2126 */
dma_iova_destroy(struct device * dev,struct dma_iova_state * state,size_t mapped_len,enum dma_data_direction dir,unsigned long attrs)2127 void dma_iova_destroy(struct device *dev, struct dma_iova_state *state,
2128 size_t mapped_len, enum dma_data_direction dir,
2129 unsigned long attrs)
2130 {
2131 if (mapped_len)
2132 __iommu_dma_iova_unlink(dev, state, 0, mapped_len, dir, attrs,
2133 true);
2134 else
2135 /*
2136 * We can be here if first call to dma_iova_link() failed and
2137 * there is nothing to unlink, so let's be more clear.
2138 */
2139 dma_iova_free(dev, state);
2140 }
2141 EXPORT_SYMBOL_GPL(dma_iova_destroy);
2142
iommu_setup_dma_ops(struct device * dev,struct iommu_domain * domain)2143 void iommu_setup_dma_ops(struct device *dev, struct iommu_domain *domain)
2144 {
2145 if (dev_is_pci(dev))
2146 dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac;
2147
2148 dev->dma_iommu = iommu_is_dma_domain(domain);
2149 if (dev->dma_iommu && iommu_dma_init_domain(domain, dev))
2150 goto out_err;
2151
2152 return;
2153 out_err:
2154 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
2155 dev_name(dev));
2156 dev->dma_iommu = false;
2157 }
2158
has_msi_cookie(const struct iommu_domain * domain)2159 static bool has_msi_cookie(const struct iommu_domain *domain)
2160 {
2161 return domain && (domain->cookie_type == IOMMU_COOKIE_DMA_IOVA ||
2162 domain->cookie_type == IOMMU_COOKIE_DMA_MSI);
2163 }
2164
cookie_msi_granule(const struct iommu_domain * domain)2165 static size_t cookie_msi_granule(const struct iommu_domain *domain)
2166 {
2167 switch (domain->cookie_type) {
2168 case IOMMU_COOKIE_DMA_IOVA:
2169 return domain->iova_cookie->iovad.granule;
2170 case IOMMU_COOKIE_DMA_MSI:
2171 return PAGE_SIZE;
2172 default:
2173 BUG();
2174 }
2175 }
2176
cookie_msi_pages(const struct iommu_domain * domain)2177 static struct list_head *cookie_msi_pages(const struct iommu_domain *domain)
2178 {
2179 switch (domain->cookie_type) {
2180 case IOMMU_COOKIE_DMA_IOVA:
2181 return &domain->iova_cookie->msi_page_list;
2182 case IOMMU_COOKIE_DMA_MSI:
2183 return &domain->msi_cookie->msi_page_list;
2184 default:
2185 BUG();
2186 }
2187 }
2188
iommu_dma_get_msi_page(struct device * dev,phys_addr_t msi_addr,struct iommu_domain * domain)2189 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
2190 phys_addr_t msi_addr, struct iommu_domain *domain)
2191 {
2192 struct list_head *msi_page_list = cookie_msi_pages(domain);
2193 struct iommu_dma_msi_page *msi_page;
2194 dma_addr_t iova;
2195 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
2196 size_t size = cookie_msi_granule(domain);
2197
2198 msi_addr &= ~(phys_addr_t)(size - 1);
2199 list_for_each_entry(msi_page, msi_page_list, list)
2200 if (msi_page->phys == msi_addr)
2201 return msi_page;
2202
2203 msi_page = kzalloc_obj(*msi_page);
2204 if (!msi_page)
2205 return NULL;
2206
2207 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
2208 if (!iova)
2209 goto out_free_page;
2210
2211 if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
2212 goto out_free_iova;
2213
2214 INIT_LIST_HEAD(&msi_page->list);
2215 msi_page->phys = msi_addr;
2216 msi_page->iova = iova;
2217 list_add(&msi_page->list, msi_page_list);
2218 return msi_page;
2219
2220 out_free_iova:
2221 iommu_dma_free_iova(domain, iova, size, NULL);
2222 out_free_page:
2223 kfree(msi_page);
2224 return NULL;
2225 }
2226
iommu_dma_sw_msi(struct iommu_domain * domain,struct msi_desc * desc,phys_addr_t msi_addr)2227 int iommu_dma_sw_msi(struct iommu_domain *domain, struct msi_desc *desc,
2228 phys_addr_t msi_addr)
2229 {
2230 struct device *dev = msi_desc_to_dev(desc);
2231 const struct iommu_dma_msi_page *msi_page;
2232
2233 if (!has_msi_cookie(domain)) {
2234 msi_desc_set_iommu_msi_iova(desc, 0, 0);
2235 return 0;
2236 }
2237
2238 iommu_group_mutex_assert(dev);
2239 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
2240 if (!msi_page)
2241 return -ENOMEM;
2242
2243 msi_desc_set_iommu_msi_iova(desc, msi_page->iova,
2244 ilog2(cookie_msi_granule(domain)));
2245 return 0;
2246 }
2247
iommu_dma_init(void)2248 static int iommu_dma_init(void)
2249 {
2250 if (is_kdump_kernel())
2251 static_branch_enable(&iommu_deferred_attach_enabled);
2252
2253 return iova_cache_get();
2254 }
2255 arch_initcall(iommu_dma_init);
2256