1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * A fairly generic DMA-API to IOMMU-API glue layer. 4 * 5 * Copyright (C) 2014-2015 ARM Ltd. 6 * 7 * based in part on arch/arm/mm/dma-mapping.c: 8 * Copyright (C) 2000-2004 Russell King 9 */ 10 11 #include <linux/acpi_iort.h> 12 #include <linux/atomic.h> 13 #include <linux/crash_dump.h> 14 #include <linux/device.h> 15 #include <linux/dma-direct.h> 16 #include <linux/dma-map-ops.h> 17 #include <linux/gfp.h> 18 #include <linux/huge_mm.h> 19 #include <linux/iommu.h> 20 #include <linux/iova.h> 21 #include <linux/irq.h> 22 #include <linux/list_sort.h> 23 #include <linux/memremap.h> 24 #include <linux/mm.h> 25 #include <linux/mutex.h> 26 #include <linux/of_iommu.h> 27 #include <linux/pci.h> 28 #include <linux/scatterlist.h> 29 #include <linux/spinlock.h> 30 #include <linux/swiotlb.h> 31 #include <linux/vmalloc.h> 32 33 #include "dma-iommu.h" 34 35 struct iommu_dma_msi_page { 36 struct list_head list; 37 dma_addr_t iova; 38 phys_addr_t phys; 39 }; 40 41 enum iommu_dma_cookie_type { 42 IOMMU_DMA_IOVA_COOKIE, 43 IOMMU_DMA_MSI_COOKIE, 44 }; 45 46 struct iommu_dma_cookie { 47 enum iommu_dma_cookie_type type; 48 union { 49 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ 50 struct { 51 struct iova_domain iovad; 52 53 struct iova_fq __percpu *fq; /* Flush queue */ 54 /* Number of TLB flushes that have been started */ 55 atomic64_t fq_flush_start_cnt; 56 /* Number of TLB flushes that have been finished */ 57 atomic64_t fq_flush_finish_cnt; 58 /* Timer to regularily empty the flush queues */ 59 struct timer_list fq_timer; 60 /* 1 when timer is active, 0 when not */ 61 atomic_t fq_timer_on; 62 }; 63 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ 64 dma_addr_t msi_iova; 65 }; 66 struct list_head msi_page_list; 67 68 /* Domain for flush queue callback; NULL if flush queue not in use */ 69 struct iommu_domain *fq_domain; 70 struct mutex mutex; 71 }; 72 73 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); 74 bool iommu_dma_forcedac __read_mostly; 75 76 static int __init iommu_dma_forcedac_setup(char *str) 77 { 78 int ret = kstrtobool(str, &iommu_dma_forcedac); 79 80 if (!ret && iommu_dma_forcedac) 81 pr_info("Forcing DAC for PCI devices\n"); 82 return ret; 83 } 84 early_param("iommu.forcedac", iommu_dma_forcedac_setup); 85 86 /* Number of entries per flush queue */ 87 #define IOVA_FQ_SIZE 256 88 89 /* Timeout (in ms) after which entries are flushed from the queue */ 90 #define IOVA_FQ_TIMEOUT 10 91 92 /* Flush queue entry for deferred flushing */ 93 struct iova_fq_entry { 94 unsigned long iova_pfn; 95 unsigned long pages; 96 struct list_head freelist; 97 u64 counter; /* Flush counter when this entry was added */ 98 }; 99 100 /* Per-CPU flush queue structure */ 101 struct iova_fq { 102 struct iova_fq_entry entries[IOVA_FQ_SIZE]; 103 unsigned int head, tail; 104 spinlock_t lock; 105 }; 106 107 #define fq_ring_for_each(i, fq) \ 108 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) 109 110 static inline bool fq_full(struct iova_fq *fq) 111 { 112 assert_spin_locked(&fq->lock); 113 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); 114 } 115 116 static inline unsigned int fq_ring_add(struct iova_fq *fq) 117 { 118 unsigned int idx = fq->tail; 119 120 assert_spin_locked(&fq->lock); 121 122 fq->tail = (idx + 1) % IOVA_FQ_SIZE; 123 124 return idx; 125 } 126 127 static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq) 128 { 129 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); 130 unsigned int idx; 131 132 assert_spin_locked(&fq->lock); 133 134 fq_ring_for_each(idx, fq) { 135 136 if (fq->entries[idx].counter >= counter) 137 break; 138 139 put_pages_list(&fq->entries[idx].freelist); 140 free_iova_fast(&cookie->iovad, 141 fq->entries[idx].iova_pfn, 142 fq->entries[idx].pages); 143 144 fq->head = (fq->head + 1) % IOVA_FQ_SIZE; 145 } 146 } 147 148 static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) 149 { 150 atomic64_inc(&cookie->fq_flush_start_cnt); 151 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); 152 atomic64_inc(&cookie->fq_flush_finish_cnt); 153 } 154 155 static void fq_flush_timeout(struct timer_list *t) 156 { 157 struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer); 158 int cpu; 159 160 atomic_set(&cookie->fq_timer_on, 0); 161 fq_flush_iotlb(cookie); 162 163 for_each_possible_cpu(cpu) { 164 unsigned long flags; 165 struct iova_fq *fq; 166 167 fq = per_cpu_ptr(cookie->fq, cpu); 168 spin_lock_irqsave(&fq->lock, flags); 169 fq_ring_free(cookie, fq); 170 spin_unlock_irqrestore(&fq->lock, flags); 171 } 172 } 173 174 static void queue_iova(struct iommu_dma_cookie *cookie, 175 unsigned long pfn, unsigned long pages, 176 struct list_head *freelist) 177 { 178 struct iova_fq *fq; 179 unsigned long flags; 180 unsigned int idx; 181 182 /* 183 * Order against the IOMMU driver's pagetable update from unmapping 184 * @pte, to guarantee that fq_flush_iotlb() observes that if called 185 * from a different CPU before we release the lock below. Full barrier 186 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially 187 * written fq state here. 188 */ 189 smp_mb(); 190 191 fq = raw_cpu_ptr(cookie->fq); 192 spin_lock_irqsave(&fq->lock, flags); 193 194 /* 195 * First remove all entries from the flush queue that have already been 196 * flushed out on another CPU. This makes the fq_full() check below less 197 * likely to be true. 198 */ 199 fq_ring_free(cookie, fq); 200 201 if (fq_full(fq)) { 202 fq_flush_iotlb(cookie); 203 fq_ring_free(cookie, fq); 204 } 205 206 idx = fq_ring_add(fq); 207 208 fq->entries[idx].iova_pfn = pfn; 209 fq->entries[idx].pages = pages; 210 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); 211 list_splice(freelist, &fq->entries[idx].freelist); 212 213 spin_unlock_irqrestore(&fq->lock, flags); 214 215 /* Avoid false sharing as much as possible. */ 216 if (!atomic_read(&cookie->fq_timer_on) && 217 !atomic_xchg(&cookie->fq_timer_on, 1)) 218 mod_timer(&cookie->fq_timer, 219 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); 220 } 221 222 static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie) 223 { 224 int cpu, idx; 225 226 if (!cookie->fq) 227 return; 228 229 del_timer_sync(&cookie->fq_timer); 230 /* The IOVAs will be torn down separately, so just free our queued pages */ 231 for_each_possible_cpu(cpu) { 232 struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu); 233 234 fq_ring_for_each(idx, fq) 235 put_pages_list(&fq->entries[idx].freelist); 236 } 237 238 free_percpu(cookie->fq); 239 } 240 241 /* sysfs updates are serialised by the mutex of the group owning @domain */ 242 int iommu_dma_init_fq(struct iommu_domain *domain) 243 { 244 struct iommu_dma_cookie *cookie = domain->iova_cookie; 245 struct iova_fq __percpu *queue; 246 int i, cpu; 247 248 if (cookie->fq_domain) 249 return 0; 250 251 atomic64_set(&cookie->fq_flush_start_cnt, 0); 252 atomic64_set(&cookie->fq_flush_finish_cnt, 0); 253 254 queue = alloc_percpu(struct iova_fq); 255 if (!queue) { 256 pr_warn("iova flush queue initialization failed\n"); 257 return -ENOMEM; 258 } 259 260 for_each_possible_cpu(cpu) { 261 struct iova_fq *fq = per_cpu_ptr(queue, cpu); 262 263 fq->head = 0; 264 fq->tail = 0; 265 266 spin_lock_init(&fq->lock); 267 268 for (i = 0; i < IOVA_FQ_SIZE; i++) 269 INIT_LIST_HEAD(&fq->entries[i].freelist); 270 } 271 272 cookie->fq = queue; 273 274 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); 275 atomic_set(&cookie->fq_timer_on, 0); 276 /* 277 * Prevent incomplete fq state being observable. Pairs with path from 278 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() 279 */ 280 smp_wmb(); 281 WRITE_ONCE(cookie->fq_domain, domain); 282 return 0; 283 } 284 285 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) 286 { 287 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) 288 return cookie->iovad.granule; 289 return PAGE_SIZE; 290 } 291 292 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) 293 { 294 struct iommu_dma_cookie *cookie; 295 296 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 297 if (cookie) { 298 INIT_LIST_HEAD(&cookie->msi_page_list); 299 cookie->type = type; 300 } 301 return cookie; 302 } 303 304 /** 305 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain 306 * @domain: IOMMU domain to prepare for DMA-API usage 307 */ 308 int iommu_get_dma_cookie(struct iommu_domain *domain) 309 { 310 if (domain->iova_cookie) 311 return -EEXIST; 312 313 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); 314 if (!domain->iova_cookie) 315 return -ENOMEM; 316 317 mutex_init(&domain->iova_cookie->mutex); 318 return 0; 319 } 320 321 /** 322 * iommu_get_msi_cookie - Acquire just MSI remapping resources 323 * @domain: IOMMU domain to prepare 324 * @base: Start address of IOVA region for MSI mappings 325 * 326 * Users who manage their own IOVA allocation and do not want DMA API support, 327 * but would still like to take advantage of automatic MSI remapping, can use 328 * this to initialise their own domain appropriately. Users should reserve a 329 * contiguous IOVA region, starting at @base, large enough to accommodate the 330 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address 331 * used by the devices attached to @domain. 332 */ 333 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 334 { 335 struct iommu_dma_cookie *cookie; 336 337 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 338 return -EINVAL; 339 340 if (domain->iova_cookie) 341 return -EEXIST; 342 343 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); 344 if (!cookie) 345 return -ENOMEM; 346 347 cookie->msi_iova = base; 348 domain->iova_cookie = cookie; 349 return 0; 350 } 351 EXPORT_SYMBOL(iommu_get_msi_cookie); 352 353 /** 354 * iommu_put_dma_cookie - Release a domain's DMA mapping resources 355 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or 356 * iommu_get_msi_cookie() 357 */ 358 void iommu_put_dma_cookie(struct iommu_domain *domain) 359 { 360 struct iommu_dma_cookie *cookie = domain->iova_cookie; 361 struct iommu_dma_msi_page *msi, *tmp; 362 363 if (!cookie) 364 return; 365 366 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { 367 iommu_dma_free_fq(cookie); 368 put_iova_domain(&cookie->iovad); 369 } 370 371 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { 372 list_del(&msi->list); 373 kfree(msi); 374 } 375 kfree(cookie); 376 domain->iova_cookie = NULL; 377 } 378 379 /** 380 * iommu_dma_get_resv_regions - Reserved region driver helper 381 * @dev: Device from iommu_get_resv_regions() 382 * @list: Reserved region list from iommu_get_resv_regions() 383 * 384 * IOMMU drivers can use this to implement their .get_resv_regions callback 385 * for general non-IOMMU-specific reservations. Currently, this covers GICv3 386 * ITS region reservation on ACPI based ARM platforms that may require HW MSI 387 * reservation. 388 */ 389 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) 390 { 391 392 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) 393 iort_iommu_get_resv_regions(dev, list); 394 395 if (dev->of_node) 396 of_iommu_get_resv_regions(dev, list); 397 } 398 EXPORT_SYMBOL(iommu_dma_get_resv_regions); 399 400 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, 401 phys_addr_t start, phys_addr_t end) 402 { 403 struct iova_domain *iovad = &cookie->iovad; 404 struct iommu_dma_msi_page *msi_page; 405 int i, num_pages; 406 407 start -= iova_offset(iovad, start); 408 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); 409 410 for (i = 0; i < num_pages; i++) { 411 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); 412 if (!msi_page) 413 return -ENOMEM; 414 415 msi_page->phys = start; 416 msi_page->iova = start; 417 INIT_LIST_HEAD(&msi_page->list); 418 list_add(&msi_page->list, &cookie->msi_page_list); 419 start += iovad->granule; 420 } 421 422 return 0; 423 } 424 425 static int iommu_dma_ranges_sort(void *priv, const struct list_head *a, 426 const struct list_head *b) 427 { 428 struct resource_entry *res_a = list_entry(a, typeof(*res_a), node); 429 struct resource_entry *res_b = list_entry(b, typeof(*res_b), node); 430 431 return res_a->res->start > res_b->res->start; 432 } 433 434 static int iova_reserve_pci_windows(struct pci_dev *dev, 435 struct iova_domain *iovad) 436 { 437 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); 438 struct resource_entry *window; 439 unsigned long lo, hi; 440 phys_addr_t start = 0, end; 441 442 resource_list_for_each_entry(window, &bridge->windows) { 443 if (resource_type(window->res) != IORESOURCE_MEM) 444 continue; 445 446 lo = iova_pfn(iovad, window->res->start - window->offset); 447 hi = iova_pfn(iovad, window->res->end - window->offset); 448 reserve_iova(iovad, lo, hi); 449 } 450 451 /* Get reserved DMA windows from host bridge */ 452 list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort); 453 resource_list_for_each_entry(window, &bridge->dma_ranges) { 454 end = window->res->start - window->offset; 455 resv_iova: 456 if (end > start) { 457 lo = iova_pfn(iovad, start); 458 hi = iova_pfn(iovad, end); 459 reserve_iova(iovad, lo, hi); 460 } else if (end < start) { 461 /* DMA ranges should be non-overlapping */ 462 dev_err(&dev->dev, 463 "Failed to reserve IOVA [%pa-%pa]\n", 464 &start, &end); 465 return -EINVAL; 466 } 467 468 start = window->res->end - window->offset + 1; 469 /* If window is last entry */ 470 if (window->node.next == &bridge->dma_ranges && 471 end != ~(phys_addr_t)0) { 472 end = ~(phys_addr_t)0; 473 goto resv_iova; 474 } 475 } 476 477 return 0; 478 } 479 480 static int iova_reserve_iommu_regions(struct device *dev, 481 struct iommu_domain *domain) 482 { 483 struct iommu_dma_cookie *cookie = domain->iova_cookie; 484 struct iova_domain *iovad = &cookie->iovad; 485 struct iommu_resv_region *region; 486 LIST_HEAD(resv_regions); 487 int ret = 0; 488 489 if (dev_is_pci(dev)) { 490 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); 491 if (ret) 492 return ret; 493 } 494 495 iommu_get_resv_regions(dev, &resv_regions); 496 list_for_each_entry(region, &resv_regions, list) { 497 unsigned long lo, hi; 498 499 /* We ARE the software that manages these! */ 500 if (region->type == IOMMU_RESV_SW_MSI) 501 continue; 502 503 lo = iova_pfn(iovad, region->start); 504 hi = iova_pfn(iovad, region->start + region->length - 1); 505 reserve_iova(iovad, lo, hi); 506 507 if (region->type == IOMMU_RESV_MSI) 508 ret = cookie_init_hw_msi_region(cookie, region->start, 509 region->start + region->length); 510 if (ret) 511 break; 512 } 513 iommu_put_resv_regions(dev, &resv_regions); 514 515 return ret; 516 } 517 518 static bool dev_is_untrusted(struct device *dev) 519 { 520 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; 521 } 522 523 static bool dev_use_swiotlb(struct device *dev, size_t size, 524 enum dma_data_direction dir) 525 { 526 return IS_ENABLED(CONFIG_SWIOTLB) && 527 (dev_is_untrusted(dev) || 528 dma_kmalloc_needs_bounce(dev, size, dir)); 529 } 530 531 static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg, 532 int nents, enum dma_data_direction dir) 533 { 534 struct scatterlist *s; 535 int i; 536 537 if (!IS_ENABLED(CONFIG_SWIOTLB)) 538 return false; 539 540 if (dev_is_untrusted(dev)) 541 return true; 542 543 /* 544 * If kmalloc() buffers are not DMA-safe for this device and 545 * direction, check the individual lengths in the sg list. If any 546 * element is deemed unsafe, use the swiotlb for bouncing. 547 */ 548 if (!dma_kmalloc_safe(dev, dir)) { 549 for_each_sg(sg, s, nents, i) 550 if (!dma_kmalloc_size_aligned(s->length)) 551 return true; 552 } 553 554 return false; 555 } 556 557 /** 558 * iommu_dma_init_domain - Initialise a DMA mapping domain 559 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 560 * @base: IOVA at which the mappable address space starts 561 * @limit: Last address of the IOVA space 562 * @dev: Device the domain is being initialised for 563 * 564 * @base and @limit + 1 should be exact multiples of IOMMU page granularity to 565 * avoid rounding surprises. If necessary, we reserve the page at address 0 566 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but 567 * any change which could make prior IOVAs invalid will fail. 568 */ 569 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, 570 dma_addr_t limit, struct device *dev) 571 { 572 struct iommu_dma_cookie *cookie = domain->iova_cookie; 573 unsigned long order, base_pfn; 574 struct iova_domain *iovad; 575 int ret; 576 577 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 578 return -EINVAL; 579 580 iovad = &cookie->iovad; 581 582 /* Use the smallest supported page size for IOVA granularity */ 583 order = __ffs(domain->pgsize_bitmap); 584 base_pfn = max_t(unsigned long, 1, base >> order); 585 586 /* Check the domain allows at least some access to the device... */ 587 if (domain->geometry.force_aperture) { 588 if (base > domain->geometry.aperture_end || 589 limit < domain->geometry.aperture_start) { 590 pr_warn("specified DMA range outside IOMMU capability\n"); 591 return -EFAULT; 592 } 593 /* ...then finally give it a kicking to make sure it fits */ 594 base_pfn = max_t(unsigned long, base_pfn, 595 domain->geometry.aperture_start >> order); 596 } 597 598 /* start_pfn is always nonzero for an already-initialised domain */ 599 mutex_lock(&cookie->mutex); 600 if (iovad->start_pfn) { 601 if (1UL << order != iovad->granule || 602 base_pfn != iovad->start_pfn) { 603 pr_warn("Incompatible range for DMA domain\n"); 604 ret = -EFAULT; 605 goto done_unlock; 606 } 607 608 ret = 0; 609 goto done_unlock; 610 } 611 612 init_iova_domain(iovad, 1UL << order, base_pfn); 613 ret = iova_domain_init_rcaches(iovad); 614 if (ret) 615 goto done_unlock; 616 617 /* If the FQ fails we can simply fall back to strict mode */ 618 if (domain->type == IOMMU_DOMAIN_DMA_FQ && 619 (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain))) 620 domain->type = IOMMU_DOMAIN_DMA; 621 622 ret = iova_reserve_iommu_regions(dev, domain); 623 624 done_unlock: 625 mutex_unlock(&cookie->mutex); 626 return ret; 627 } 628 629 /** 630 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API 631 * page flags. 632 * @dir: Direction of DMA transfer 633 * @coherent: Is the DMA master cache-coherent? 634 * @attrs: DMA attributes for the mapping 635 * 636 * Return: corresponding IOMMU API page protection flags 637 */ 638 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, 639 unsigned long attrs) 640 { 641 int prot = coherent ? IOMMU_CACHE : 0; 642 643 if (attrs & DMA_ATTR_PRIVILEGED) 644 prot |= IOMMU_PRIV; 645 646 switch (dir) { 647 case DMA_BIDIRECTIONAL: 648 return prot | IOMMU_READ | IOMMU_WRITE; 649 case DMA_TO_DEVICE: 650 return prot | IOMMU_READ; 651 case DMA_FROM_DEVICE: 652 return prot | IOMMU_WRITE; 653 default: 654 return 0; 655 } 656 } 657 658 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, 659 size_t size, u64 dma_limit, struct device *dev) 660 { 661 struct iommu_dma_cookie *cookie = domain->iova_cookie; 662 struct iova_domain *iovad = &cookie->iovad; 663 unsigned long shift, iova_len, iova = 0; 664 665 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { 666 cookie->msi_iova += size; 667 return cookie->msi_iova - size; 668 } 669 670 shift = iova_shift(iovad); 671 iova_len = size >> shift; 672 673 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); 674 675 if (domain->geometry.force_aperture) 676 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); 677 678 /* Try to get PCI devices a SAC address */ 679 if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) 680 iova = alloc_iova_fast(iovad, iova_len, 681 DMA_BIT_MASK(32) >> shift, false); 682 683 if (!iova) 684 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, 685 true); 686 687 return (dma_addr_t)iova << shift; 688 } 689 690 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, 691 dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather) 692 { 693 struct iova_domain *iovad = &cookie->iovad; 694 695 /* The MSI case is only ever cleaning up its most recent allocation */ 696 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 697 cookie->msi_iova -= size; 698 else if (gather && gather->queued) 699 queue_iova(cookie, iova_pfn(iovad, iova), 700 size >> iova_shift(iovad), 701 &gather->freelist); 702 else 703 free_iova_fast(iovad, iova_pfn(iovad, iova), 704 size >> iova_shift(iovad)); 705 } 706 707 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, 708 size_t size) 709 { 710 struct iommu_domain *domain = iommu_get_dma_domain(dev); 711 struct iommu_dma_cookie *cookie = domain->iova_cookie; 712 struct iova_domain *iovad = &cookie->iovad; 713 size_t iova_off = iova_offset(iovad, dma_addr); 714 struct iommu_iotlb_gather iotlb_gather; 715 size_t unmapped; 716 717 dma_addr -= iova_off; 718 size = iova_align(iovad, size + iova_off); 719 iommu_iotlb_gather_init(&iotlb_gather); 720 iotlb_gather.queued = READ_ONCE(cookie->fq_domain); 721 722 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); 723 WARN_ON(unmapped != size); 724 725 if (!iotlb_gather.queued) 726 iommu_iotlb_sync(domain, &iotlb_gather); 727 iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); 728 } 729 730 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, 731 size_t size, int prot, u64 dma_mask) 732 { 733 struct iommu_domain *domain = iommu_get_dma_domain(dev); 734 struct iommu_dma_cookie *cookie = domain->iova_cookie; 735 struct iova_domain *iovad = &cookie->iovad; 736 size_t iova_off = iova_offset(iovad, phys); 737 dma_addr_t iova; 738 739 if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 740 iommu_deferred_attach(dev, domain)) 741 return DMA_MAPPING_ERROR; 742 743 size = iova_align(iovad, size + iova_off); 744 745 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); 746 if (!iova) 747 return DMA_MAPPING_ERROR; 748 749 if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) { 750 iommu_dma_free_iova(cookie, iova, size, NULL); 751 return DMA_MAPPING_ERROR; 752 } 753 return iova + iova_off; 754 } 755 756 static void __iommu_dma_free_pages(struct page **pages, int count) 757 { 758 while (count--) 759 __free_page(pages[count]); 760 kvfree(pages); 761 } 762 763 static struct page **__iommu_dma_alloc_pages(struct device *dev, 764 unsigned int count, unsigned long order_mask, gfp_t gfp) 765 { 766 struct page **pages; 767 unsigned int i = 0, nid = dev_to_node(dev); 768 769 order_mask &= GENMASK(MAX_ORDER, 0); 770 if (!order_mask) 771 return NULL; 772 773 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL); 774 if (!pages) 775 return NULL; 776 777 /* IOMMU can map any pages, so himem can also be used here */ 778 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 779 780 while (count) { 781 struct page *page = NULL; 782 unsigned int order_size; 783 784 /* 785 * Higher-order allocations are a convenience rather 786 * than a necessity, hence using __GFP_NORETRY until 787 * falling back to minimum-order allocations. 788 */ 789 for (order_mask &= GENMASK(__fls(count), 0); 790 order_mask; order_mask &= ~order_size) { 791 unsigned int order = __fls(order_mask); 792 gfp_t alloc_flags = gfp; 793 794 order_size = 1U << order; 795 if (order_mask > order_size) 796 alloc_flags |= __GFP_NORETRY; 797 page = alloc_pages_node(nid, alloc_flags, order); 798 if (!page) 799 continue; 800 if (order) 801 split_page(page, order); 802 break; 803 } 804 if (!page) { 805 __iommu_dma_free_pages(pages, i); 806 return NULL; 807 } 808 count -= order_size; 809 while (order_size--) 810 pages[i++] = page++; 811 } 812 return pages; 813 } 814 815 /* 816 * If size is less than PAGE_SIZE, then a full CPU page will be allocated, 817 * but an IOMMU which supports smaller pages might not map the whole thing. 818 */ 819 static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, 820 size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, 821 unsigned long attrs) 822 { 823 struct iommu_domain *domain = iommu_get_dma_domain(dev); 824 struct iommu_dma_cookie *cookie = domain->iova_cookie; 825 struct iova_domain *iovad = &cookie->iovad; 826 bool coherent = dev_is_dma_coherent(dev); 827 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 828 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 829 struct page **pages; 830 dma_addr_t iova; 831 ssize_t ret; 832 833 if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 834 iommu_deferred_attach(dev, domain)) 835 return NULL; 836 837 min_size = alloc_sizes & -alloc_sizes; 838 if (min_size < PAGE_SIZE) { 839 min_size = PAGE_SIZE; 840 alloc_sizes |= PAGE_SIZE; 841 } else { 842 size = ALIGN(size, min_size); 843 } 844 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 845 alloc_sizes = min_size; 846 847 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 848 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, 849 gfp); 850 if (!pages) 851 return NULL; 852 853 size = iova_align(iovad, size); 854 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); 855 if (!iova) 856 goto out_free_pages; 857 858 /* 859 * Remove the zone/policy flags from the GFP - these are applied to the 860 * __iommu_dma_alloc_pages() but are not used for the supporting 861 * internal allocations that follow. 862 */ 863 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP); 864 865 if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp)) 866 goto out_free_iova; 867 868 if (!(ioprot & IOMMU_CACHE)) { 869 struct scatterlist *sg; 870 int i; 871 872 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) 873 arch_dma_prep_coherent(sg_page(sg), sg->length); 874 } 875 876 ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot, 877 gfp); 878 if (ret < 0 || ret < size) 879 goto out_free_sg; 880 881 sgt->sgl->dma_address = iova; 882 sgt->sgl->dma_length = size; 883 return pages; 884 885 out_free_sg: 886 sg_free_table(sgt); 887 out_free_iova: 888 iommu_dma_free_iova(cookie, iova, size, NULL); 889 out_free_pages: 890 __iommu_dma_free_pages(pages, count); 891 return NULL; 892 } 893 894 static void *iommu_dma_alloc_remap(struct device *dev, size_t size, 895 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, 896 unsigned long attrs) 897 { 898 struct page **pages; 899 struct sg_table sgt; 900 void *vaddr; 901 902 pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, 903 attrs); 904 if (!pages) 905 return NULL; 906 *dma_handle = sgt.sgl->dma_address; 907 sg_free_table(&sgt); 908 vaddr = dma_common_pages_remap(pages, size, prot, 909 __builtin_return_address(0)); 910 if (!vaddr) 911 goto out_unmap; 912 return vaddr; 913 914 out_unmap: 915 __iommu_dma_unmap(dev, *dma_handle, size); 916 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 917 return NULL; 918 } 919 920 static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, 921 size_t size, enum dma_data_direction dir, gfp_t gfp, 922 unsigned long attrs) 923 { 924 struct dma_sgt_handle *sh; 925 926 sh = kmalloc(sizeof(*sh), gfp); 927 if (!sh) 928 return NULL; 929 930 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, 931 PAGE_KERNEL, attrs); 932 if (!sh->pages) { 933 kfree(sh); 934 return NULL; 935 } 936 return &sh->sgt; 937 } 938 939 static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, 940 struct sg_table *sgt, enum dma_data_direction dir) 941 { 942 struct dma_sgt_handle *sh = sgt_handle(sgt); 943 944 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); 945 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 946 sg_free_table(&sh->sgt); 947 kfree(sh); 948 } 949 950 static void iommu_dma_sync_single_for_cpu(struct device *dev, 951 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 952 { 953 phys_addr_t phys; 954 955 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir)) 956 return; 957 958 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 959 if (!dev_is_dma_coherent(dev)) 960 arch_sync_dma_for_cpu(phys, size, dir); 961 962 if (is_swiotlb_buffer(dev, phys)) 963 swiotlb_sync_single_for_cpu(dev, phys, size, dir); 964 } 965 966 static void iommu_dma_sync_single_for_device(struct device *dev, 967 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 968 { 969 phys_addr_t phys; 970 971 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir)) 972 return; 973 974 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 975 if (is_swiotlb_buffer(dev, phys)) 976 swiotlb_sync_single_for_device(dev, phys, size, dir); 977 978 if (!dev_is_dma_coherent(dev)) 979 arch_sync_dma_for_device(phys, size, dir); 980 } 981 982 static void iommu_dma_sync_sg_for_cpu(struct device *dev, 983 struct scatterlist *sgl, int nelems, 984 enum dma_data_direction dir) 985 { 986 struct scatterlist *sg; 987 int i; 988 989 if (sg_dma_is_swiotlb(sgl)) 990 for_each_sg(sgl, sg, nelems, i) 991 iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), 992 sg->length, dir); 993 else if (!dev_is_dma_coherent(dev)) 994 for_each_sg(sgl, sg, nelems, i) 995 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); 996 } 997 998 static void iommu_dma_sync_sg_for_device(struct device *dev, 999 struct scatterlist *sgl, int nelems, 1000 enum dma_data_direction dir) 1001 { 1002 struct scatterlist *sg; 1003 int i; 1004 1005 if (sg_dma_is_swiotlb(sgl)) 1006 for_each_sg(sgl, sg, nelems, i) 1007 iommu_dma_sync_single_for_device(dev, 1008 sg_dma_address(sg), 1009 sg->length, dir); 1010 else if (!dev_is_dma_coherent(dev)) 1011 for_each_sg(sgl, sg, nelems, i) 1012 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); 1013 } 1014 1015 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, 1016 unsigned long offset, size_t size, enum dma_data_direction dir, 1017 unsigned long attrs) 1018 { 1019 phys_addr_t phys = page_to_phys(page) + offset; 1020 bool coherent = dev_is_dma_coherent(dev); 1021 int prot = dma_info_to_prot(dir, coherent, attrs); 1022 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1023 struct iommu_dma_cookie *cookie = domain->iova_cookie; 1024 struct iova_domain *iovad = &cookie->iovad; 1025 dma_addr_t iova, dma_mask = dma_get_mask(dev); 1026 1027 /* 1028 * If both the physical buffer start address and size are 1029 * page aligned, we don't need to use a bounce page. 1030 */ 1031 if (dev_use_swiotlb(dev, size, dir) && 1032 iova_offset(iovad, phys | size)) { 1033 void *padding_start; 1034 size_t padding_size, aligned_size; 1035 1036 if (!is_swiotlb_active(dev)) { 1037 dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); 1038 return DMA_MAPPING_ERROR; 1039 } 1040 1041 aligned_size = iova_align(iovad, size); 1042 phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, 1043 iova_mask(iovad), dir, attrs); 1044 1045 if (phys == DMA_MAPPING_ERROR) 1046 return DMA_MAPPING_ERROR; 1047 1048 /* Cleanup the padding area. */ 1049 padding_start = phys_to_virt(phys); 1050 padding_size = aligned_size; 1051 1052 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 1053 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { 1054 padding_start += size; 1055 padding_size -= size; 1056 } 1057 1058 memset(padding_start, 0, padding_size); 1059 } 1060 1061 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1062 arch_sync_dma_for_device(phys, size, dir); 1063 1064 iova = __iommu_dma_map(dev, phys, size, prot, dma_mask); 1065 if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) 1066 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); 1067 return iova; 1068 } 1069 1070 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 1071 size_t size, enum dma_data_direction dir, unsigned long attrs) 1072 { 1073 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1074 phys_addr_t phys; 1075 1076 phys = iommu_iova_to_phys(domain, dma_handle); 1077 if (WARN_ON(!phys)) 1078 return; 1079 1080 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) 1081 arch_sync_dma_for_cpu(phys, size, dir); 1082 1083 __iommu_dma_unmap(dev, dma_handle, size); 1084 1085 if (unlikely(is_swiotlb_buffer(dev, phys))) 1086 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); 1087 } 1088 1089 /* 1090 * Prepare a successfully-mapped scatterlist to give back to the caller. 1091 * 1092 * At this point the segments are already laid out by iommu_dma_map_sg() to 1093 * avoid individually crossing any boundaries, so we merely need to check a 1094 * segment's start address to avoid concatenating across one. 1095 */ 1096 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, 1097 dma_addr_t dma_addr) 1098 { 1099 struct scatterlist *s, *cur = sg; 1100 unsigned long seg_mask = dma_get_seg_boundary(dev); 1101 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); 1102 int i, count = 0; 1103 1104 for_each_sg(sg, s, nents, i) { 1105 /* Restore this segment's original unaligned fields first */ 1106 dma_addr_t s_dma_addr = sg_dma_address(s); 1107 unsigned int s_iova_off = sg_dma_address(s); 1108 unsigned int s_length = sg_dma_len(s); 1109 unsigned int s_iova_len = s->length; 1110 1111 sg_dma_address(s) = DMA_MAPPING_ERROR; 1112 sg_dma_len(s) = 0; 1113 1114 if (sg_dma_is_bus_address(s)) { 1115 if (i > 0) 1116 cur = sg_next(cur); 1117 1118 sg_dma_unmark_bus_address(s); 1119 sg_dma_address(cur) = s_dma_addr; 1120 sg_dma_len(cur) = s_length; 1121 sg_dma_mark_bus_address(cur); 1122 count++; 1123 cur_len = 0; 1124 continue; 1125 } 1126 1127 s->offset += s_iova_off; 1128 s->length = s_length; 1129 1130 /* 1131 * Now fill in the real DMA data. If... 1132 * - there is a valid output segment to append to 1133 * - and this segment starts on an IOVA page boundary 1134 * - but doesn't fall at a segment boundary 1135 * - and wouldn't make the resulting output segment too long 1136 */ 1137 if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 1138 (max_len - cur_len >= s_length)) { 1139 /* ...then concatenate it with the previous one */ 1140 cur_len += s_length; 1141 } else { 1142 /* Otherwise start the next output segment */ 1143 if (i > 0) 1144 cur = sg_next(cur); 1145 cur_len = s_length; 1146 count++; 1147 1148 sg_dma_address(cur) = dma_addr + s_iova_off; 1149 } 1150 1151 sg_dma_len(cur) = cur_len; 1152 dma_addr += s_iova_len; 1153 1154 if (s_length + s_iova_off < s_iova_len) 1155 cur_len = 0; 1156 } 1157 return count; 1158 } 1159 1160 /* 1161 * If mapping failed, then just restore the original list, 1162 * but making sure the DMA fields are invalidated. 1163 */ 1164 static void __invalidate_sg(struct scatterlist *sg, int nents) 1165 { 1166 struct scatterlist *s; 1167 int i; 1168 1169 for_each_sg(sg, s, nents, i) { 1170 if (sg_dma_is_bus_address(s)) { 1171 sg_dma_unmark_bus_address(s); 1172 } else { 1173 if (sg_dma_address(s) != DMA_MAPPING_ERROR) 1174 s->offset += sg_dma_address(s); 1175 if (sg_dma_len(s)) 1176 s->length = sg_dma_len(s); 1177 } 1178 sg_dma_address(s) = DMA_MAPPING_ERROR; 1179 sg_dma_len(s) = 0; 1180 } 1181 } 1182 1183 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg, 1184 int nents, enum dma_data_direction dir, unsigned long attrs) 1185 { 1186 struct scatterlist *s; 1187 int i; 1188 1189 for_each_sg(sg, s, nents, i) 1190 iommu_dma_unmap_page(dev, sg_dma_address(s), 1191 sg_dma_len(s), dir, attrs); 1192 } 1193 1194 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, 1195 int nents, enum dma_data_direction dir, unsigned long attrs) 1196 { 1197 struct scatterlist *s; 1198 int i; 1199 1200 sg_dma_mark_swiotlb(sg); 1201 1202 for_each_sg(sg, s, nents, i) { 1203 sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s), 1204 s->offset, s->length, dir, attrs); 1205 if (sg_dma_address(s) == DMA_MAPPING_ERROR) 1206 goto out_unmap; 1207 sg_dma_len(s) = s->length; 1208 } 1209 1210 return nents; 1211 1212 out_unmap: 1213 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 1214 return -EIO; 1215 } 1216 1217 /* 1218 * The DMA API client is passing in a scatterlist which could describe 1219 * any old buffer layout, but the IOMMU API requires everything to be 1220 * aligned to IOMMU pages. Hence the need for this complicated bit of 1221 * impedance-matching, to be able to hand off a suitably-aligned list, 1222 * but still preserve the original offsets and sizes for the caller. 1223 */ 1224 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, 1225 int nents, enum dma_data_direction dir, unsigned long attrs) 1226 { 1227 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1228 struct iommu_dma_cookie *cookie = domain->iova_cookie; 1229 struct iova_domain *iovad = &cookie->iovad; 1230 struct scatterlist *s, *prev = NULL; 1231 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); 1232 struct pci_p2pdma_map_state p2pdma_state = {}; 1233 enum pci_p2pdma_map_type map; 1234 dma_addr_t iova; 1235 size_t iova_len = 0; 1236 unsigned long mask = dma_get_seg_boundary(dev); 1237 ssize_t ret; 1238 int i; 1239 1240 if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { 1241 ret = iommu_deferred_attach(dev, domain); 1242 if (ret) 1243 goto out; 1244 } 1245 1246 if (dev_use_sg_swiotlb(dev, sg, nents, dir)) 1247 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); 1248 1249 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1250 iommu_dma_sync_sg_for_device(dev, sg, nents, dir); 1251 1252 /* 1253 * Work out how much IOVA space we need, and align the segments to 1254 * IOVA granules for the IOMMU driver to handle. With some clever 1255 * trickery we can modify the list in-place, but reversibly, by 1256 * stashing the unaligned parts in the as-yet-unused DMA fields. 1257 */ 1258 for_each_sg(sg, s, nents, i) { 1259 size_t s_iova_off = iova_offset(iovad, s->offset); 1260 size_t s_length = s->length; 1261 size_t pad_len = (mask - iova_len + 1) & mask; 1262 1263 if (is_pci_p2pdma_page(sg_page(s))) { 1264 map = pci_p2pdma_map_segment(&p2pdma_state, dev, s); 1265 switch (map) { 1266 case PCI_P2PDMA_MAP_BUS_ADDR: 1267 /* 1268 * iommu_map_sg() will skip this segment as 1269 * it is marked as a bus address, 1270 * __finalise_sg() will copy the dma address 1271 * into the output segment. 1272 */ 1273 continue; 1274 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: 1275 /* 1276 * Mapping through host bridge should be 1277 * mapped with regular IOVAs, thus we 1278 * do nothing here and continue below. 1279 */ 1280 break; 1281 default: 1282 ret = -EREMOTEIO; 1283 goto out_restore_sg; 1284 } 1285 } 1286 1287 sg_dma_address(s) = s_iova_off; 1288 sg_dma_len(s) = s_length; 1289 s->offset -= s_iova_off; 1290 s_length = iova_align(iovad, s_length + s_iova_off); 1291 s->length = s_length; 1292 1293 /* 1294 * Due to the alignment of our single IOVA allocation, we can 1295 * depend on these assumptions about the segment boundary mask: 1296 * - If mask size >= IOVA size, then the IOVA range cannot 1297 * possibly fall across a boundary, so we don't care. 1298 * - If mask size < IOVA size, then the IOVA range must start 1299 * exactly on a boundary, therefore we can lay things out 1300 * based purely on segment lengths without needing to know 1301 * the actual addresses beforehand. 1302 * - The mask must be a power of 2, so pad_len == 0 if 1303 * iova_len == 0, thus we cannot dereference prev the first 1304 * time through here (i.e. before it has a meaningful value). 1305 */ 1306 if (pad_len && pad_len < s_length - 1) { 1307 prev->length += pad_len; 1308 iova_len += pad_len; 1309 } 1310 1311 iova_len += s_length; 1312 prev = s; 1313 } 1314 1315 if (!iova_len) 1316 return __finalise_sg(dev, sg, nents, 0); 1317 1318 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); 1319 if (!iova) { 1320 ret = -ENOMEM; 1321 goto out_restore_sg; 1322 } 1323 1324 /* 1325 * We'll leave any physical concatenation to the IOMMU driver's 1326 * implementation - it knows better than we do. 1327 */ 1328 ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC); 1329 if (ret < 0 || ret < iova_len) 1330 goto out_free_iova; 1331 1332 return __finalise_sg(dev, sg, nents, iova); 1333 1334 out_free_iova: 1335 iommu_dma_free_iova(cookie, iova, iova_len, NULL); 1336 out_restore_sg: 1337 __invalidate_sg(sg, nents); 1338 out: 1339 if (ret != -ENOMEM && ret != -EREMOTEIO) 1340 return -EINVAL; 1341 return ret; 1342 } 1343 1344 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 1345 int nents, enum dma_data_direction dir, unsigned long attrs) 1346 { 1347 dma_addr_t end = 0, start; 1348 struct scatterlist *tmp; 1349 int i; 1350 1351 if (sg_dma_is_swiotlb(sg)) { 1352 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); 1353 return; 1354 } 1355 1356 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1357 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); 1358 1359 /* 1360 * The scatterlist segments are mapped into a single 1361 * contiguous IOVA allocation, the start and end points 1362 * just have to be determined. 1363 */ 1364 for_each_sg(sg, tmp, nents, i) { 1365 if (sg_dma_is_bus_address(tmp)) { 1366 sg_dma_unmark_bus_address(tmp); 1367 continue; 1368 } 1369 1370 if (sg_dma_len(tmp) == 0) 1371 break; 1372 1373 start = sg_dma_address(tmp); 1374 break; 1375 } 1376 1377 nents -= i; 1378 for_each_sg(tmp, tmp, nents, i) { 1379 if (sg_dma_is_bus_address(tmp)) { 1380 sg_dma_unmark_bus_address(tmp); 1381 continue; 1382 } 1383 1384 if (sg_dma_len(tmp) == 0) 1385 break; 1386 1387 end = sg_dma_address(tmp) + sg_dma_len(tmp); 1388 } 1389 1390 if (end) 1391 __iommu_dma_unmap(dev, start, end - start); 1392 } 1393 1394 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 1395 size_t size, enum dma_data_direction dir, unsigned long attrs) 1396 { 1397 return __iommu_dma_map(dev, phys, size, 1398 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 1399 dma_get_mask(dev)); 1400 } 1401 1402 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 1403 size_t size, enum dma_data_direction dir, unsigned long attrs) 1404 { 1405 __iommu_dma_unmap(dev, handle, size); 1406 } 1407 1408 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) 1409 { 1410 size_t alloc_size = PAGE_ALIGN(size); 1411 int count = alloc_size >> PAGE_SHIFT; 1412 struct page *page = NULL, **pages = NULL; 1413 1414 /* Non-coherent atomic allocation? Easy */ 1415 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1416 dma_free_from_pool(dev, cpu_addr, alloc_size)) 1417 return; 1418 1419 if (is_vmalloc_addr(cpu_addr)) { 1420 /* 1421 * If it the address is remapped, then it's either non-coherent 1422 * or highmem CMA, or an iommu_dma_alloc_remap() construction. 1423 */ 1424 pages = dma_common_find_pages(cpu_addr); 1425 if (!pages) 1426 page = vmalloc_to_page(cpu_addr); 1427 dma_common_free_remap(cpu_addr, alloc_size); 1428 } else { 1429 /* Lowmem means a coherent atomic or CMA allocation */ 1430 page = virt_to_page(cpu_addr); 1431 } 1432 1433 if (pages) 1434 __iommu_dma_free_pages(pages, count); 1435 if (page) 1436 dma_free_contiguous(dev, page, alloc_size); 1437 } 1438 1439 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 1440 dma_addr_t handle, unsigned long attrs) 1441 { 1442 __iommu_dma_unmap(dev, handle, size); 1443 __iommu_dma_free(dev, size, cpu_addr); 1444 } 1445 1446 static void *iommu_dma_alloc_pages(struct device *dev, size_t size, 1447 struct page **pagep, gfp_t gfp, unsigned long attrs) 1448 { 1449 bool coherent = dev_is_dma_coherent(dev); 1450 size_t alloc_size = PAGE_ALIGN(size); 1451 int node = dev_to_node(dev); 1452 struct page *page = NULL; 1453 void *cpu_addr; 1454 1455 page = dma_alloc_contiguous(dev, alloc_size, gfp); 1456 if (!page) 1457 page = alloc_pages_node(node, gfp, get_order(alloc_size)); 1458 if (!page) 1459 return NULL; 1460 1461 if (!coherent || PageHighMem(page)) { 1462 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 1463 1464 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 1465 prot, __builtin_return_address(0)); 1466 if (!cpu_addr) 1467 goto out_free_pages; 1468 1469 if (!coherent) 1470 arch_dma_prep_coherent(page, size); 1471 } else { 1472 cpu_addr = page_address(page); 1473 } 1474 1475 *pagep = page; 1476 memset(cpu_addr, 0, alloc_size); 1477 return cpu_addr; 1478 out_free_pages: 1479 dma_free_contiguous(dev, page, alloc_size); 1480 return NULL; 1481 } 1482 1483 static void *iommu_dma_alloc(struct device *dev, size_t size, 1484 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1485 { 1486 bool coherent = dev_is_dma_coherent(dev); 1487 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 1488 struct page *page = NULL; 1489 void *cpu_addr; 1490 1491 gfp |= __GFP_ZERO; 1492 1493 if (gfpflags_allow_blocking(gfp) && 1494 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { 1495 return iommu_dma_alloc_remap(dev, size, handle, gfp, 1496 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); 1497 } 1498 1499 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1500 !gfpflags_allow_blocking(gfp) && !coherent) 1501 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, 1502 gfp, NULL); 1503 else 1504 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); 1505 if (!cpu_addr) 1506 return NULL; 1507 1508 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, 1509 dev->coherent_dma_mask); 1510 if (*handle == DMA_MAPPING_ERROR) { 1511 __iommu_dma_free(dev, size, cpu_addr); 1512 return NULL; 1513 } 1514 1515 return cpu_addr; 1516 } 1517 1518 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 1519 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1520 unsigned long attrs) 1521 { 1522 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1523 unsigned long pfn, off = vma->vm_pgoff; 1524 int ret; 1525 1526 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 1527 1528 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1529 return ret; 1530 1531 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 1532 return -ENXIO; 1533 1534 if (is_vmalloc_addr(cpu_addr)) { 1535 struct page **pages = dma_common_find_pages(cpu_addr); 1536 1537 if (pages) 1538 return vm_map_pages(vma, pages, nr_pages); 1539 pfn = vmalloc_to_pfn(cpu_addr); 1540 } else { 1541 pfn = page_to_pfn(virt_to_page(cpu_addr)); 1542 } 1543 1544 return remap_pfn_range(vma, vma->vm_start, pfn + off, 1545 vma->vm_end - vma->vm_start, 1546 vma->vm_page_prot); 1547 } 1548 1549 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 1550 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1551 unsigned long attrs) 1552 { 1553 struct page *page; 1554 int ret; 1555 1556 if (is_vmalloc_addr(cpu_addr)) { 1557 struct page **pages = dma_common_find_pages(cpu_addr); 1558 1559 if (pages) { 1560 return sg_alloc_table_from_pages(sgt, pages, 1561 PAGE_ALIGN(size) >> PAGE_SHIFT, 1562 0, size, GFP_KERNEL); 1563 } 1564 1565 page = vmalloc_to_page(cpu_addr); 1566 } else { 1567 page = virt_to_page(cpu_addr); 1568 } 1569 1570 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 1571 if (!ret) 1572 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 1573 return ret; 1574 } 1575 1576 static unsigned long iommu_dma_get_merge_boundary(struct device *dev) 1577 { 1578 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1579 1580 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; 1581 } 1582 1583 static size_t iommu_dma_opt_mapping_size(void) 1584 { 1585 return iova_rcache_range(); 1586 } 1587 1588 static const struct dma_map_ops iommu_dma_ops = { 1589 .flags = DMA_F_PCI_P2PDMA_SUPPORTED, 1590 .alloc = iommu_dma_alloc, 1591 .free = iommu_dma_free, 1592 .alloc_pages = dma_common_alloc_pages, 1593 .free_pages = dma_common_free_pages, 1594 .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, 1595 .free_noncontiguous = iommu_dma_free_noncontiguous, 1596 .mmap = iommu_dma_mmap, 1597 .get_sgtable = iommu_dma_get_sgtable, 1598 .map_page = iommu_dma_map_page, 1599 .unmap_page = iommu_dma_unmap_page, 1600 .map_sg = iommu_dma_map_sg, 1601 .unmap_sg = iommu_dma_unmap_sg, 1602 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, 1603 .sync_single_for_device = iommu_dma_sync_single_for_device, 1604 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, 1605 .sync_sg_for_device = iommu_dma_sync_sg_for_device, 1606 .map_resource = iommu_dma_map_resource, 1607 .unmap_resource = iommu_dma_unmap_resource, 1608 .get_merge_boundary = iommu_dma_get_merge_boundary, 1609 .opt_mapping_size = iommu_dma_opt_mapping_size, 1610 }; 1611 1612 /* 1613 * The IOMMU core code allocates the default DMA domain, which the underlying 1614 * IOMMU driver needs to support via the dma-iommu layer. 1615 */ 1616 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) 1617 { 1618 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1619 1620 if (!domain) 1621 goto out_err; 1622 1623 /* 1624 * The IOMMU core code allocates the default DMA domain, which the 1625 * underlying IOMMU driver needs to support via the dma-iommu layer. 1626 */ 1627 if (iommu_is_dma_domain(domain)) { 1628 if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) 1629 goto out_err; 1630 dev->dma_ops = &iommu_dma_ops; 1631 } 1632 1633 return; 1634 out_err: 1635 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", 1636 dev_name(dev)); 1637 } 1638 EXPORT_SYMBOL_GPL(iommu_setup_dma_ops); 1639 1640 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, 1641 phys_addr_t msi_addr, struct iommu_domain *domain) 1642 { 1643 struct iommu_dma_cookie *cookie = domain->iova_cookie; 1644 struct iommu_dma_msi_page *msi_page; 1645 dma_addr_t iova; 1646 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1647 size_t size = cookie_msi_granule(cookie); 1648 1649 msi_addr &= ~(phys_addr_t)(size - 1); 1650 list_for_each_entry(msi_page, &cookie->msi_page_list, list) 1651 if (msi_page->phys == msi_addr) 1652 return msi_page; 1653 1654 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); 1655 if (!msi_page) 1656 return NULL; 1657 1658 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 1659 if (!iova) 1660 goto out_free_page; 1661 1662 if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL)) 1663 goto out_free_iova; 1664 1665 INIT_LIST_HEAD(&msi_page->list); 1666 msi_page->phys = msi_addr; 1667 msi_page->iova = iova; 1668 list_add(&msi_page->list, &cookie->msi_page_list); 1669 return msi_page; 1670 1671 out_free_iova: 1672 iommu_dma_free_iova(cookie, iova, size, NULL); 1673 out_free_page: 1674 kfree(msi_page); 1675 return NULL; 1676 } 1677 1678 /** 1679 * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain 1680 * @desc: MSI descriptor, will store the MSI page 1681 * @msi_addr: MSI target address to be mapped 1682 * 1683 * Return: 0 on success or negative error code if the mapping failed. 1684 */ 1685 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1686 { 1687 struct device *dev = msi_desc_to_dev(desc); 1688 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1689 struct iommu_dma_msi_page *msi_page; 1690 static DEFINE_MUTEX(msi_prepare_lock); /* see below */ 1691 1692 if (!domain || !domain->iova_cookie) { 1693 desc->iommu_cookie = NULL; 1694 return 0; 1695 } 1696 1697 /* 1698 * In fact the whole prepare operation should already be serialised by 1699 * irq_domain_mutex further up the callchain, but that's pretty subtle 1700 * on its own, so consider this locking as failsafe documentation... 1701 */ 1702 mutex_lock(&msi_prepare_lock); 1703 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); 1704 mutex_unlock(&msi_prepare_lock); 1705 1706 msi_desc_set_iommu_cookie(desc, msi_page); 1707 1708 if (!msi_page) 1709 return -ENOMEM; 1710 return 0; 1711 } 1712 1713 /** 1714 * iommu_dma_compose_msi_msg() - Apply translation to an MSI message 1715 * @desc: MSI descriptor prepared by iommu_dma_prepare_msi() 1716 * @msg: MSI message containing target physical address 1717 */ 1718 void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 1719 { 1720 struct device *dev = msi_desc_to_dev(desc); 1721 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1722 const struct iommu_dma_msi_page *msi_page; 1723 1724 msi_page = msi_desc_get_iommu_cookie(desc); 1725 1726 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) 1727 return; 1728 1729 msg->address_hi = upper_32_bits(msi_page->iova); 1730 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; 1731 msg->address_lo += lower_32_bits(msi_page->iova); 1732 } 1733 1734 static int iommu_dma_init(void) 1735 { 1736 if (is_kdump_kernel()) 1737 static_branch_enable(&iommu_deferred_attach_enabled); 1738 1739 return iova_cache_get(); 1740 } 1741 arch_initcall(iommu_dma_init); 1742