1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Dynamic DMA mapping support. 4 * 5 * This implementation is a fallback for platforms that do not support 6 * I/O TLBs (aka DMA address translation hardware). 7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 9 * Copyright (C) 2000, 2003 Hewlett-Packard Co 10 * David Mosberger-Tang <davidm@hpl.hp.com> 11 * 12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. 13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid 14 * unnecessary i-cache flushing. 15 * 04/07/.. ak Better overflow handling. Assorted fixes. 16 * 05/09/10 linville Add support for syncing ranges, support syncing for 17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 18 * 08/12/11 beckyb Add highmem support 19 */ 20 21 #define pr_fmt(fmt) "software IO TLB: " fmt 22 23 #include <linux/cache.h> 24 #include <linux/cc_platform.h> 25 #include <linux/ctype.h> 26 #include <linux/debugfs.h> 27 #include <linux/dma-direct.h> 28 #include <linux/dma-map-ops.h> 29 #include <linux/export.h> 30 #include <linux/gfp.h> 31 #include <linux/highmem.h> 32 #include <linux/io.h> 33 #include <linux/iommu-helper.h> 34 #include <linux/init.h> 35 #include <linux/memblock.h> 36 #include <linux/mm.h> 37 #include <linux/pfn.h> 38 #include <linux/scatterlist.h> 39 #include <linux/set_memory.h> 40 #include <linux/spinlock.h> 41 #include <linux/string.h> 42 #include <linux/swiotlb.h> 43 #include <linux/types.h> 44 #ifdef CONFIG_DMA_RESTRICTED_POOL 45 #include <linux/of.h> 46 #include <linux/of_fdt.h> 47 #include <linux/of_reserved_mem.h> 48 #include <linux/slab.h> 49 #endif 50 51 #define CREATE_TRACE_POINTS 52 #include <trace/events/swiotlb.h> 53 54 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 55 56 /* 57 * Minimum IO TLB size to bother booting with. Systems with mainly 58 * 64bit capable cards will only lightly use the swiotlb. If we can't 59 * allocate a contiguous 1MB, we're probably in trouble anyway. 60 */ 61 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 62 63 #define INVALID_PHYS_ADDR (~(phys_addr_t)0) 64 65 struct io_tlb_slot { 66 phys_addr_t orig_addr; 67 size_t alloc_size; 68 unsigned int list; 69 }; 70 71 static bool swiotlb_force_bounce; 72 static bool swiotlb_force_disable; 73 74 struct io_tlb_mem io_tlb_default_mem; 75 76 phys_addr_t swiotlb_unencrypted_base; 77 78 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; 79 static unsigned long default_nareas; 80 81 /** 82 * struct io_tlb_area - IO TLB memory area descriptor 83 * 84 * This is a single area with a single lock. 85 * 86 * @used: The number of used IO TLB block. 87 * @index: The slot index to start searching in this area for next round. 88 * @lock: The lock to protect the above data structures in the map and 89 * unmap calls. 90 */ 91 struct io_tlb_area { 92 unsigned long used; 93 unsigned int index; 94 spinlock_t lock; 95 }; 96 97 /* 98 * Round up number of slabs to the next power of 2. The last area is going 99 * be smaller than the rest if default_nslabs is not power of two. 100 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE, 101 * otherwise a segment may span two or more areas. It conflicts with free 102 * contiguous slots tracking: free slots are treated contiguous no matter 103 * whether they cross an area boundary. 104 * 105 * Return true if default_nslabs is rounded up. 106 */ 107 static bool round_up_default_nslabs(void) 108 { 109 if (!default_nareas) 110 return false; 111 112 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas) 113 default_nslabs = IO_TLB_SEGSIZE * default_nareas; 114 else if (is_power_of_2(default_nslabs)) 115 return false; 116 default_nslabs = roundup_pow_of_two(default_nslabs); 117 return true; 118 } 119 120 static void swiotlb_adjust_nareas(unsigned int nareas) 121 { 122 /* use a single area when non is specified */ 123 if (!nareas) 124 nareas = 1; 125 else if (!is_power_of_2(nareas)) 126 nareas = roundup_pow_of_two(nareas); 127 128 default_nareas = nareas; 129 130 pr_info("area num %d.\n", nareas); 131 if (round_up_default_nslabs()) 132 pr_info("SWIOTLB bounce buffer size roundup to %luMB", 133 (default_nslabs << IO_TLB_SHIFT) >> 20); 134 } 135 136 static int __init 137 setup_io_tlb_npages(char *str) 138 { 139 if (isdigit(*str)) { 140 /* avoid tail segment of size < IO_TLB_SEGSIZE */ 141 default_nslabs = 142 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); 143 } 144 if (*str == ',') 145 ++str; 146 if (isdigit(*str)) 147 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0)); 148 if (*str == ',') 149 ++str; 150 if (!strcmp(str, "force")) 151 swiotlb_force_bounce = true; 152 else if (!strcmp(str, "noforce")) 153 swiotlb_force_disable = true; 154 155 return 0; 156 } 157 early_param("swiotlb", setup_io_tlb_npages); 158 159 unsigned long swiotlb_size_or_default(void) 160 { 161 return default_nslabs << IO_TLB_SHIFT; 162 } 163 164 void __init swiotlb_adjust_size(unsigned long size) 165 { 166 /* 167 * If swiotlb parameter has not been specified, give a chance to 168 * architectures such as those supporting memory encryption to 169 * adjust/expand SWIOTLB size for their use. 170 */ 171 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) 172 return; 173 174 size = ALIGN(size, IO_TLB_SIZE); 175 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 176 if (round_up_default_nslabs()) 177 size = default_nslabs << IO_TLB_SHIFT; 178 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); 179 } 180 181 void swiotlb_print_info(void) 182 { 183 struct io_tlb_mem *mem = &io_tlb_default_mem; 184 185 if (!mem->nslabs) { 186 pr_warn("No low mem\n"); 187 return; 188 } 189 190 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, 191 (mem->nslabs << IO_TLB_SHIFT) >> 20); 192 } 193 194 static inline unsigned long io_tlb_offset(unsigned long val) 195 { 196 return val & (IO_TLB_SEGSIZE - 1); 197 } 198 199 static inline unsigned long nr_slots(u64 val) 200 { 201 return DIV_ROUND_UP(val, IO_TLB_SIZE); 202 } 203 204 /* 205 * Remap swioltb memory in the unencrypted physical address space 206 * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP 207 * Isolation VMs). 208 */ 209 #ifdef CONFIG_HAS_IOMEM 210 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) 211 { 212 void *vaddr = NULL; 213 214 if (swiotlb_unencrypted_base) { 215 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base; 216 217 vaddr = memremap(paddr, bytes, MEMREMAP_WB); 218 if (!vaddr) 219 pr_err("Failed to map the unencrypted memory %pa size %lx.\n", 220 &paddr, bytes); 221 } 222 223 return vaddr; 224 } 225 #else 226 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) 227 { 228 return NULL; 229 } 230 #endif 231 232 /* 233 * Early SWIOTLB allocation may be too early to allow an architecture to 234 * perform the desired operations. This function allows the architecture to 235 * call SWIOTLB when the operations are possible. It needs to be called 236 * before the SWIOTLB memory is used. 237 */ 238 void __init swiotlb_update_mem_attributes(void) 239 { 240 struct io_tlb_mem *mem = &io_tlb_default_mem; 241 void *vaddr; 242 unsigned long bytes; 243 244 if (!mem->nslabs || mem->late_alloc) 245 return; 246 vaddr = phys_to_virt(mem->start); 247 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); 248 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); 249 250 mem->vaddr = swiotlb_mem_remap(mem, bytes); 251 if (!mem->vaddr) 252 mem->vaddr = vaddr; 253 } 254 255 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, 256 unsigned long nslabs, unsigned int flags, 257 bool late_alloc, unsigned int nareas) 258 { 259 void *vaddr = phys_to_virt(start); 260 unsigned long bytes = nslabs << IO_TLB_SHIFT, i; 261 262 mem->nslabs = nslabs; 263 mem->start = start; 264 mem->end = mem->start + bytes; 265 mem->late_alloc = late_alloc; 266 mem->nareas = nareas; 267 mem->area_nslabs = nslabs / mem->nareas; 268 269 mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); 270 271 for (i = 0; i < mem->nareas; i++) { 272 spin_lock_init(&mem->areas[i].lock); 273 mem->areas[i].index = 0; 274 mem->areas[i].used = 0; 275 } 276 277 for (i = 0; i < mem->nslabs; i++) { 278 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); 279 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 280 mem->slots[i].alloc_size = 0; 281 } 282 283 /* 284 * If swiotlb_unencrypted_base is set, the bounce buffer memory will 285 * be remapped and cleared in swiotlb_update_mem_attributes. 286 */ 287 if (swiotlb_unencrypted_base) 288 return; 289 290 memset(vaddr, 0, bytes); 291 mem->vaddr = vaddr; 292 return; 293 } 294 295 static void __init *swiotlb_memblock_alloc(unsigned long nslabs, 296 unsigned int flags, 297 int (*remap)(void *tlb, unsigned long nslabs)) 298 { 299 size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT); 300 void *tlb; 301 302 /* 303 * By default allocate the bounce buffer memory from low memory, but 304 * allow to pick a location everywhere for hypervisors with guest 305 * memory encryption. 306 */ 307 if (flags & SWIOTLB_ANY) 308 tlb = memblock_alloc(bytes, PAGE_SIZE); 309 else 310 tlb = memblock_alloc_low(bytes, PAGE_SIZE); 311 312 if (!tlb) { 313 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", 314 __func__, bytes); 315 return NULL; 316 } 317 318 if (remap && remap(tlb, nslabs) < 0) { 319 memblock_free(tlb, PAGE_ALIGN(bytes)); 320 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); 321 return NULL; 322 } 323 324 return tlb; 325 } 326 327 /* 328 * Statically reserve bounce buffer space and initialize bounce buffer data 329 * structures for the software IO TLB used to implement the DMA API. 330 */ 331 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, 332 int (*remap)(void *tlb, unsigned long nslabs)) 333 { 334 struct io_tlb_mem *mem = &io_tlb_default_mem; 335 unsigned long nslabs; 336 size_t alloc_size; 337 void *tlb; 338 339 if (!addressing_limit && !swiotlb_force_bounce) 340 return; 341 if (swiotlb_force_disable) 342 return; 343 344 /* 345 * default_nslabs maybe changed when adjust area number. 346 * So allocate bounce buffer after adjusting area number. 347 */ 348 if (!default_nareas) 349 swiotlb_adjust_nareas(num_possible_cpus()); 350 351 nslabs = default_nslabs; 352 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) { 353 if (nslabs <= IO_TLB_MIN_SLABS) 354 return; 355 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); 356 } 357 358 if (default_nslabs != nslabs) { 359 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs", 360 default_nslabs, nslabs); 361 default_nslabs = nslabs; 362 } 363 364 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); 365 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); 366 if (!mem->slots) { 367 pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n", 368 __func__, alloc_size, PAGE_SIZE); 369 return; 370 } 371 372 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), 373 default_nareas), SMP_CACHE_BYTES); 374 if (!mem->areas) { 375 pr_warn("%s: Failed to allocate mem->areas.\n", __func__); 376 return; 377 } 378 379 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false, 380 default_nareas); 381 382 if (flags & SWIOTLB_VERBOSE) 383 swiotlb_print_info(); 384 } 385 386 void __init swiotlb_init(bool addressing_limit, unsigned int flags) 387 { 388 swiotlb_init_remap(addressing_limit, flags, NULL); 389 } 390 391 /* 392 * Systems with larger DMA zones (those that don't support ISA) can 393 * initialize the swiotlb later using the slab allocator if needed. 394 * This should be just like above, but with some error catching. 395 */ 396 int swiotlb_init_late(size_t size, gfp_t gfp_mask, 397 int (*remap)(void *tlb, unsigned long nslabs)) 398 { 399 struct io_tlb_mem *mem = &io_tlb_default_mem; 400 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 401 unsigned char *vstart = NULL; 402 unsigned int order, area_order; 403 bool retried = false; 404 int rc = 0; 405 406 if (swiotlb_force_disable) 407 return 0; 408 409 retry: 410 order = get_order(nslabs << IO_TLB_SHIFT); 411 nslabs = SLABS_PER_PAGE << order; 412 413 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 414 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, 415 order); 416 if (vstart) 417 break; 418 order--; 419 nslabs = SLABS_PER_PAGE << order; 420 retried = true; 421 } 422 423 if (!vstart) 424 return -ENOMEM; 425 426 if (remap) 427 rc = remap(vstart, nslabs); 428 if (rc) { 429 free_pages((unsigned long)vstart, order); 430 431 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); 432 if (nslabs < IO_TLB_MIN_SLABS) 433 return rc; 434 retried = true; 435 goto retry; 436 } 437 438 if (retried) { 439 pr_warn("only able to allocate %ld MB\n", 440 (PAGE_SIZE << order) >> 20); 441 } 442 443 if (!default_nareas) 444 swiotlb_adjust_nareas(num_possible_cpus()); 445 446 area_order = get_order(array_size(sizeof(*mem->areas), 447 default_nareas)); 448 mem->areas = (struct io_tlb_area *) 449 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order); 450 if (!mem->areas) 451 goto error_area; 452 453 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 454 get_order(array_size(sizeof(*mem->slots), nslabs))); 455 if (!mem->slots) 456 goto error_slots; 457 458 set_memory_decrypted((unsigned long)vstart, 459 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); 460 swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true, 461 default_nareas); 462 463 swiotlb_print_info(); 464 return 0; 465 466 error_slots: 467 free_pages((unsigned long)mem->areas, area_order); 468 error_area: 469 free_pages((unsigned long)vstart, order); 470 return -ENOMEM; 471 } 472 473 void __init swiotlb_exit(void) 474 { 475 struct io_tlb_mem *mem = &io_tlb_default_mem; 476 unsigned long tbl_vaddr; 477 size_t tbl_size, slots_size; 478 unsigned int area_order; 479 480 if (swiotlb_force_bounce) 481 return; 482 483 if (!mem->nslabs) 484 return; 485 486 pr_info("tearing down default memory pool\n"); 487 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); 488 tbl_size = PAGE_ALIGN(mem->end - mem->start); 489 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); 490 491 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT); 492 if (mem->late_alloc) { 493 area_order = get_order(array_size(sizeof(*mem->areas), 494 mem->nareas)); 495 free_pages((unsigned long)mem->areas, area_order); 496 free_pages(tbl_vaddr, get_order(tbl_size)); 497 free_pages((unsigned long)mem->slots, get_order(slots_size)); 498 } else { 499 memblock_free_late(__pa(mem->areas), 500 array_size(sizeof(*mem->areas), mem->nareas)); 501 memblock_free_late(mem->start, tbl_size); 502 memblock_free_late(__pa(mem->slots), slots_size); 503 } 504 505 memset(mem, 0, sizeof(*mem)); 506 } 507 508 /* 509 * Return the offset into a iotlb slot required to keep the device happy. 510 */ 511 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) 512 { 513 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); 514 } 515 516 /* 517 * Bounce: copy the swiotlb buffer from or back to the original dma location 518 */ 519 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, 520 enum dma_data_direction dir) 521 { 522 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 523 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; 524 phys_addr_t orig_addr = mem->slots[index].orig_addr; 525 size_t alloc_size = mem->slots[index].alloc_size; 526 unsigned long pfn = PFN_DOWN(orig_addr); 527 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; 528 unsigned int tlb_offset, orig_addr_offset; 529 530 if (orig_addr == INVALID_PHYS_ADDR) 531 return; 532 533 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1); 534 orig_addr_offset = swiotlb_align_offset(dev, orig_addr); 535 if (tlb_offset < orig_addr_offset) { 536 dev_WARN_ONCE(dev, 1, 537 "Access before mapping start detected. orig offset %u, requested offset %u.\n", 538 orig_addr_offset, tlb_offset); 539 return; 540 } 541 542 tlb_offset -= orig_addr_offset; 543 if (tlb_offset > alloc_size) { 544 dev_WARN_ONCE(dev, 1, 545 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n", 546 alloc_size, size, tlb_offset); 547 return; 548 } 549 550 orig_addr += tlb_offset; 551 alloc_size -= tlb_offset; 552 553 if (size > alloc_size) { 554 dev_WARN_ONCE(dev, 1, 555 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", 556 alloc_size, size); 557 size = alloc_size; 558 } 559 560 if (PageHighMem(pfn_to_page(pfn))) { 561 unsigned int offset = orig_addr & ~PAGE_MASK; 562 struct page *page; 563 unsigned int sz = 0; 564 unsigned long flags; 565 566 while (size) { 567 sz = min_t(size_t, PAGE_SIZE - offset, size); 568 569 local_irq_save(flags); 570 page = pfn_to_page(pfn); 571 if (dir == DMA_TO_DEVICE) 572 memcpy_from_page(vaddr, page, offset, sz); 573 else 574 memcpy_to_page(page, offset, vaddr, sz); 575 local_irq_restore(flags); 576 577 size -= sz; 578 pfn++; 579 vaddr += sz; 580 offset = 0; 581 } 582 } else if (dir == DMA_TO_DEVICE) { 583 memcpy(vaddr, phys_to_virt(orig_addr), size); 584 } else { 585 memcpy(phys_to_virt(orig_addr), vaddr, size); 586 } 587 } 588 589 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) 590 { 591 return start + (idx << IO_TLB_SHIFT); 592 } 593 594 /* 595 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. 596 */ 597 static inline unsigned long get_max_slots(unsigned long boundary_mask) 598 { 599 if (boundary_mask == ~0UL) 600 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 601 return nr_slots(boundary_mask + 1); 602 } 603 604 static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index) 605 { 606 if (index >= mem->area_nslabs) 607 return 0; 608 return index; 609 } 610 611 /* 612 * Find a suitable number of IO TLB entries size that will fit this request and 613 * allocate a buffer from that IO TLB pool. 614 */ 615 static int swiotlb_do_find_slots(struct device *dev, int area_index, 616 phys_addr_t orig_addr, size_t alloc_size, 617 unsigned int alloc_align_mask) 618 { 619 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 620 struct io_tlb_area *area = mem->areas + area_index; 621 unsigned long boundary_mask = dma_get_seg_boundary(dev); 622 dma_addr_t tbl_dma_addr = 623 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; 624 unsigned long max_slots = get_max_slots(boundary_mask); 625 unsigned int iotlb_align_mask = 626 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); 627 unsigned int nslots = nr_slots(alloc_size), stride; 628 unsigned int index, wrap, count = 0, i; 629 unsigned int offset = swiotlb_align_offset(dev, orig_addr); 630 unsigned long flags; 631 unsigned int slot_base; 632 unsigned int slot_index; 633 634 BUG_ON(!nslots); 635 BUG_ON(area_index >= mem->nareas); 636 637 /* 638 * For mappings with an alignment requirement don't bother looping to 639 * unaligned slots once we found an aligned one. For allocations of 640 * PAGE_SIZE or larger only look for page aligned allocations. 641 */ 642 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; 643 if (alloc_size >= PAGE_SIZE) 644 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); 645 stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1); 646 647 spin_lock_irqsave(&area->lock, flags); 648 if (unlikely(nslots > mem->area_nslabs - area->used)) 649 goto not_found; 650 651 slot_base = area_index * mem->area_nslabs; 652 index = wrap = wrap_area_index(mem, ALIGN(area->index, stride)); 653 654 do { 655 slot_index = slot_base + index; 656 657 if (orig_addr && 658 (slot_addr(tbl_dma_addr, slot_index) & 659 iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { 660 index = wrap_area_index(mem, index + 1); 661 continue; 662 } 663 664 /* 665 * If we find a slot that indicates we have 'nslots' number of 666 * contiguous buffers, we allocate the buffers from that slot 667 * and mark the entries as '0' indicating unavailable. 668 */ 669 if (!iommu_is_span_boundary(slot_index, nslots, 670 nr_slots(tbl_dma_addr), 671 max_slots)) { 672 if (mem->slots[slot_index].list >= nslots) 673 goto found; 674 } 675 index = wrap_area_index(mem, index + stride); 676 } while (index != wrap); 677 678 not_found: 679 spin_unlock_irqrestore(&area->lock, flags); 680 return -1; 681 682 found: 683 for (i = slot_index; i < slot_index + nslots; i++) { 684 mem->slots[i].list = 0; 685 mem->slots[i].alloc_size = alloc_size - (offset + 686 ((i - slot_index) << IO_TLB_SHIFT)); 687 } 688 for (i = slot_index - 1; 689 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && 690 mem->slots[i].list; i--) 691 mem->slots[i].list = ++count; 692 693 /* 694 * Update the indices to avoid searching in the next round. 695 */ 696 if (index + nslots < mem->area_nslabs) 697 area->index = index + nslots; 698 else 699 area->index = 0; 700 area->used += nslots; 701 spin_unlock_irqrestore(&area->lock, flags); 702 return slot_index; 703 } 704 705 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, 706 size_t alloc_size, unsigned int alloc_align_mask) 707 { 708 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 709 int start = raw_smp_processor_id() & (mem->nareas - 1); 710 int i = start, index; 711 712 do { 713 index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size, 714 alloc_align_mask); 715 if (index >= 0) 716 return index; 717 if (++i >= mem->nareas) 718 i = 0; 719 } while (i != start); 720 721 return -1; 722 } 723 724 static unsigned long mem_used(struct io_tlb_mem *mem) 725 { 726 int i; 727 unsigned long used = 0; 728 729 for (i = 0; i < mem->nareas; i++) 730 used += mem->areas[i].used; 731 return used; 732 } 733 734 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, 735 size_t mapping_size, size_t alloc_size, 736 unsigned int alloc_align_mask, enum dma_data_direction dir, 737 unsigned long attrs) 738 { 739 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 740 unsigned int offset = swiotlb_align_offset(dev, orig_addr); 741 unsigned int i; 742 int index; 743 phys_addr_t tlb_addr; 744 745 if (!mem || !mem->nslabs) { 746 dev_warn_ratelimited(dev, 747 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); 748 return (phys_addr_t)DMA_MAPPING_ERROR; 749 } 750 751 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 752 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); 753 754 if (mapping_size > alloc_size) { 755 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", 756 mapping_size, alloc_size); 757 return (phys_addr_t)DMA_MAPPING_ERROR; 758 } 759 760 index = swiotlb_find_slots(dev, orig_addr, 761 alloc_size + offset, alloc_align_mask); 762 if (index == -1) { 763 if (!(attrs & DMA_ATTR_NO_WARN)) 764 dev_warn_ratelimited(dev, 765 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", 766 alloc_size, mem->nslabs, mem_used(mem)); 767 return (phys_addr_t)DMA_MAPPING_ERROR; 768 } 769 770 /* 771 * Save away the mapping from the original address to the DMA address. 772 * This is needed when we sync the memory. Then we sync the buffer if 773 * needed. 774 */ 775 for (i = 0; i < nr_slots(alloc_size + offset); i++) 776 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); 777 tlb_addr = slot_addr(mem->start, index) + offset; 778 /* 779 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig 780 * to the tlb buffer, if we knew for sure the device will 781 * overwrite the entire current content. But we don't. Thus 782 * unconditional bounce may prevent leaking swiotlb content (i.e. 783 * kernel memory) to user-space. 784 */ 785 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); 786 return tlb_addr; 787 } 788 789 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) 790 { 791 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 792 unsigned long flags; 793 unsigned int offset = swiotlb_align_offset(dev, tlb_addr); 794 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; 795 int nslots = nr_slots(mem->slots[index].alloc_size + offset); 796 int aindex = index / mem->area_nslabs; 797 struct io_tlb_area *area = &mem->areas[aindex]; 798 int count, i; 799 800 /* 801 * Return the buffer to the free list by setting the corresponding 802 * entries to indicate the number of contiguous entries available. 803 * While returning the entries to the free list, we merge the entries 804 * with slots below and above the pool being returned. 805 */ 806 BUG_ON(aindex >= mem->nareas); 807 808 spin_lock_irqsave(&area->lock, flags); 809 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) 810 count = mem->slots[index + nslots].list; 811 else 812 count = 0; 813 814 /* 815 * Step 1: return the slots to the free list, merging the slots with 816 * superceeding slots 817 */ 818 for (i = index + nslots - 1; i >= index; i--) { 819 mem->slots[i].list = ++count; 820 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 821 mem->slots[i].alloc_size = 0; 822 } 823 824 /* 825 * Step 2: merge the returned slots with the preceding slots, if 826 * available (non zero) 827 */ 828 for (i = index - 1; 829 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; 830 i--) 831 mem->slots[i].list = ++count; 832 area->used -= nslots; 833 spin_unlock_irqrestore(&area->lock, flags); 834 } 835 836 /* 837 * tlb_addr is the physical address of the bounce buffer to unmap. 838 */ 839 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, 840 size_t mapping_size, enum dma_data_direction dir, 841 unsigned long attrs) 842 { 843 /* 844 * First, sync the memory before unmapping the entry 845 */ 846 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 847 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 848 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE); 849 850 swiotlb_release_slots(dev, tlb_addr); 851 } 852 853 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, 854 size_t size, enum dma_data_direction dir) 855 { 856 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 857 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); 858 else 859 BUG_ON(dir != DMA_FROM_DEVICE); 860 } 861 862 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, 863 size_t size, enum dma_data_direction dir) 864 { 865 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 866 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); 867 else 868 BUG_ON(dir != DMA_TO_DEVICE); 869 } 870 871 /* 872 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing 873 * to the device copy the data into it as well. 874 */ 875 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, 876 enum dma_data_direction dir, unsigned long attrs) 877 { 878 phys_addr_t swiotlb_addr; 879 dma_addr_t dma_addr; 880 881 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size); 882 883 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, 884 attrs); 885 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) 886 return DMA_MAPPING_ERROR; 887 888 /* Ensure that the address returned is DMA'ble */ 889 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr); 890 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { 891 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, 892 attrs | DMA_ATTR_SKIP_CPU_SYNC); 893 dev_WARN_ONCE(dev, 1, 894 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 895 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 896 return DMA_MAPPING_ERROR; 897 } 898 899 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 900 arch_sync_dma_for_device(swiotlb_addr, size, dir); 901 return dma_addr; 902 } 903 904 size_t swiotlb_max_mapping_size(struct device *dev) 905 { 906 int min_align_mask = dma_get_min_align_mask(dev); 907 int min_align = 0; 908 909 /* 910 * swiotlb_find_slots() skips slots according to 911 * min align mask. This affects max mapping size. 912 * Take it into acount here. 913 */ 914 if (min_align_mask) 915 min_align = roundup(min_align_mask, IO_TLB_SIZE); 916 917 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align; 918 } 919 920 bool is_swiotlb_active(struct device *dev) 921 { 922 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 923 924 return mem && mem->nslabs; 925 } 926 EXPORT_SYMBOL_GPL(is_swiotlb_active); 927 928 static int io_tlb_used_get(void *data, u64 *val) 929 { 930 *val = mem_used(&io_tlb_default_mem); 931 return 0; 932 } 933 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n"); 934 935 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, 936 const char *dirname) 937 { 938 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); 939 if (!mem->nslabs) 940 return; 941 942 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); 943 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL, 944 &fops_io_tlb_used); 945 } 946 947 static int __init __maybe_unused swiotlb_create_default_debugfs(void) 948 { 949 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb"); 950 return 0; 951 } 952 953 #ifdef CONFIG_DEBUG_FS 954 late_initcall(swiotlb_create_default_debugfs); 955 #endif 956 957 #ifdef CONFIG_DMA_RESTRICTED_POOL 958 959 struct page *swiotlb_alloc(struct device *dev, size_t size) 960 { 961 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 962 phys_addr_t tlb_addr; 963 int index; 964 965 if (!mem) 966 return NULL; 967 968 index = swiotlb_find_slots(dev, 0, size, 0); 969 if (index == -1) 970 return NULL; 971 972 tlb_addr = slot_addr(mem->start, index); 973 974 return pfn_to_page(PFN_DOWN(tlb_addr)); 975 } 976 977 bool swiotlb_free(struct device *dev, struct page *page, size_t size) 978 { 979 phys_addr_t tlb_addr = page_to_phys(page); 980 981 if (!is_swiotlb_buffer(dev, tlb_addr)) 982 return false; 983 984 swiotlb_release_slots(dev, tlb_addr); 985 986 return true; 987 } 988 989 static int rmem_swiotlb_device_init(struct reserved_mem *rmem, 990 struct device *dev) 991 { 992 struct io_tlb_mem *mem = rmem->priv; 993 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; 994 995 /* Set Per-device io tlb area to one */ 996 unsigned int nareas = 1; 997 998 /* 999 * Since multiple devices can share the same pool, the private data, 1000 * io_tlb_mem struct, will be initialized by the first device attached 1001 * to it. 1002 */ 1003 if (!mem) { 1004 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 1005 if (!mem) 1006 return -ENOMEM; 1007 1008 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL); 1009 if (!mem->slots) { 1010 kfree(mem); 1011 return -ENOMEM; 1012 } 1013 1014 mem->areas = kcalloc(nareas, sizeof(*mem->areas), 1015 GFP_KERNEL); 1016 if (!mem->areas) { 1017 kfree(mem->slots); 1018 kfree(mem); 1019 return -ENOMEM; 1020 } 1021 1022 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), 1023 rmem->size >> PAGE_SHIFT); 1024 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE, 1025 false, nareas); 1026 mem->for_alloc = true; 1027 1028 rmem->priv = mem; 1029 1030 swiotlb_create_debugfs_files(mem, rmem->name); 1031 } 1032 1033 dev->dma_io_tlb_mem = mem; 1034 1035 return 0; 1036 } 1037 1038 static void rmem_swiotlb_device_release(struct reserved_mem *rmem, 1039 struct device *dev) 1040 { 1041 dev->dma_io_tlb_mem = &io_tlb_default_mem; 1042 } 1043 1044 static const struct reserved_mem_ops rmem_swiotlb_ops = { 1045 .device_init = rmem_swiotlb_device_init, 1046 .device_release = rmem_swiotlb_device_release, 1047 }; 1048 1049 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) 1050 { 1051 unsigned long node = rmem->fdt_node; 1052 1053 if (of_get_flat_dt_prop(node, "reusable", NULL) || 1054 of_get_flat_dt_prop(node, "linux,cma-default", NULL) || 1055 of_get_flat_dt_prop(node, "linux,dma-default", NULL) || 1056 of_get_flat_dt_prop(node, "no-map", NULL)) 1057 return -EINVAL; 1058 1059 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { 1060 pr_err("Restricted DMA pool must be accessible within the linear mapping."); 1061 return -EINVAL; 1062 } 1063 1064 rmem->ops = &rmem_swiotlb_ops; 1065 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", 1066 &rmem->base, (unsigned long)rmem->size / SZ_1M); 1067 return 0; 1068 } 1069 1070 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); 1071 #endif /* CONFIG_DMA_RESTRICTED_POOL */ 1072