1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Dynamic DMA mapping support. 4 * 5 * This implementation is a fallback for platforms that do not support 6 * I/O TLBs (aka DMA address translation hardware). 7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 9 * Copyright (C) 2000, 2003 Hewlett-Packard Co 10 * David Mosberger-Tang <davidm@hpl.hp.com> 11 * 12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. 13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid 14 * unnecessary i-cache flushing. 15 * 04/07/.. ak Better overflow handling. Assorted fixes. 16 * 05/09/10 linville Add support for syncing ranges, support syncing for 17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 18 * 08/12/11 beckyb Add highmem support 19 */ 20 21 #define pr_fmt(fmt) "software IO TLB: " fmt 22 23 #include <linux/cache.h> 24 #include <linux/cc_platform.h> 25 #include <linux/ctype.h> 26 #include <linux/debugfs.h> 27 #include <linux/dma-direct.h> 28 #include <linux/dma-map-ops.h> 29 #include <linux/export.h> 30 #include <linux/gfp.h> 31 #include <linux/highmem.h> 32 #include <linux/io.h> 33 #include <linux/iommu-helper.h> 34 #include <linux/init.h> 35 #include <linux/memblock.h> 36 #include <linux/mm.h> 37 #include <linux/pfn.h> 38 #include <linux/scatterlist.h> 39 #include <linux/set_memory.h> 40 #include <linux/spinlock.h> 41 #include <linux/string.h> 42 #include <linux/swiotlb.h> 43 #include <linux/types.h> 44 #ifdef CONFIG_DMA_RESTRICTED_POOL 45 #include <linux/of.h> 46 #include <linux/of_fdt.h> 47 #include <linux/of_reserved_mem.h> 48 #include <linux/slab.h> 49 #endif 50 51 #define CREATE_TRACE_POINTS 52 #include <trace/events/swiotlb.h> 53 54 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 55 56 /* 57 * Minimum IO TLB size to bother booting with. Systems with mainly 58 * 64bit capable cards will only lightly use the swiotlb. If we can't 59 * allocate a contiguous 1MB, we're probably in trouble anyway. 60 */ 61 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 62 63 #define INVALID_PHYS_ADDR (~(phys_addr_t)0) 64 65 static bool swiotlb_force_bounce; 66 static bool swiotlb_force_disable; 67 68 struct io_tlb_mem io_tlb_default_mem; 69 70 phys_addr_t swiotlb_unencrypted_base; 71 72 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; 73 74 static int __init 75 setup_io_tlb_npages(char *str) 76 { 77 if (isdigit(*str)) { 78 /* avoid tail segment of size < IO_TLB_SEGSIZE */ 79 default_nslabs = 80 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); 81 } 82 if (*str == ',') 83 ++str; 84 if (!strcmp(str, "force")) 85 swiotlb_force_bounce = true; 86 else if (!strcmp(str, "noforce")) 87 swiotlb_force_disable = true; 88 89 return 0; 90 } 91 early_param("swiotlb", setup_io_tlb_npages); 92 93 unsigned int swiotlb_max_segment(void) 94 { 95 if (!io_tlb_default_mem.nslabs) 96 return 0; 97 return rounddown(io_tlb_default_mem.nslabs << IO_TLB_SHIFT, PAGE_SIZE); 98 } 99 EXPORT_SYMBOL_GPL(swiotlb_max_segment); 100 101 unsigned long swiotlb_size_or_default(void) 102 { 103 return default_nslabs << IO_TLB_SHIFT; 104 } 105 106 void __init swiotlb_adjust_size(unsigned long size) 107 { 108 /* 109 * If swiotlb parameter has not been specified, give a chance to 110 * architectures such as those supporting memory encryption to 111 * adjust/expand SWIOTLB size for their use. 112 */ 113 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) 114 return; 115 size = ALIGN(size, IO_TLB_SIZE); 116 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 117 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); 118 } 119 120 void swiotlb_print_info(void) 121 { 122 struct io_tlb_mem *mem = &io_tlb_default_mem; 123 124 if (!mem->nslabs) { 125 pr_warn("No low mem\n"); 126 return; 127 } 128 129 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, 130 (mem->nslabs << IO_TLB_SHIFT) >> 20); 131 } 132 133 static inline unsigned long io_tlb_offset(unsigned long val) 134 { 135 return val & (IO_TLB_SEGSIZE - 1); 136 } 137 138 static inline unsigned long nr_slots(u64 val) 139 { 140 return DIV_ROUND_UP(val, IO_TLB_SIZE); 141 } 142 143 /* 144 * Remap swioltb memory in the unencrypted physical address space 145 * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP 146 * Isolation VMs). 147 */ 148 #ifdef CONFIG_HAS_IOMEM 149 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) 150 { 151 void *vaddr = NULL; 152 153 if (swiotlb_unencrypted_base) { 154 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base; 155 156 vaddr = memremap(paddr, bytes, MEMREMAP_WB); 157 if (!vaddr) 158 pr_err("Failed to map the unencrypted memory %pa size %lx.\n", 159 &paddr, bytes); 160 } 161 162 return vaddr; 163 } 164 #else 165 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) 166 { 167 return NULL; 168 } 169 #endif 170 171 /* 172 * Early SWIOTLB allocation may be too early to allow an architecture to 173 * perform the desired operations. This function allows the architecture to 174 * call SWIOTLB when the operations are possible. It needs to be called 175 * before the SWIOTLB memory is used. 176 */ 177 void __init swiotlb_update_mem_attributes(void) 178 { 179 struct io_tlb_mem *mem = &io_tlb_default_mem; 180 void *vaddr; 181 unsigned long bytes; 182 183 if (!mem->nslabs || mem->late_alloc) 184 return; 185 vaddr = phys_to_virt(mem->start); 186 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); 187 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); 188 189 mem->vaddr = swiotlb_mem_remap(mem, bytes); 190 if (!mem->vaddr) 191 mem->vaddr = vaddr; 192 } 193 194 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, 195 unsigned long nslabs, bool late_alloc) 196 { 197 void *vaddr = phys_to_virt(start); 198 unsigned long bytes = nslabs << IO_TLB_SHIFT, i; 199 200 mem->nslabs = nslabs; 201 mem->start = start; 202 mem->end = mem->start + bytes; 203 mem->index = 0; 204 mem->late_alloc = late_alloc; 205 206 if (swiotlb_force_bounce) 207 mem->force_bounce = true; 208 209 spin_lock_init(&mem->lock); 210 for (i = 0; i < mem->nslabs; i++) { 211 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); 212 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 213 mem->slots[i].alloc_size = 0; 214 } 215 216 /* 217 * If swiotlb_unencrypted_base is set, the bounce buffer memory will 218 * be remapped and cleared in swiotlb_update_mem_attributes. 219 */ 220 if (swiotlb_unencrypted_base) 221 return; 222 223 memset(vaddr, 0, bytes); 224 mem->vaddr = vaddr; 225 return; 226 } 227 228 /* 229 * Statically reserve bounce buffer space and initialize bounce buffer data 230 * structures for the software IO TLB used to implement the DMA API. 231 */ 232 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, 233 int (*remap)(void *tlb, unsigned long nslabs)) 234 { 235 struct io_tlb_mem *mem = &io_tlb_default_mem; 236 unsigned long nslabs = default_nslabs; 237 size_t alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); 238 size_t bytes; 239 void *tlb; 240 241 if (!addressing_limit && !swiotlb_force_bounce) 242 return; 243 if (swiotlb_force_disable) 244 return; 245 246 /* 247 * By default allocate the bounce buffer memory from low memory, but 248 * allow to pick a location everywhere for hypervisors with guest 249 * memory encryption. 250 */ 251 retry: 252 bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); 253 if (flags & SWIOTLB_ANY) 254 tlb = memblock_alloc(bytes, PAGE_SIZE); 255 else 256 tlb = memblock_alloc_low(bytes, PAGE_SIZE); 257 if (!tlb) 258 panic("%s: failed to allocate tlb structure\n", __func__); 259 260 if (remap && remap(tlb, nslabs) < 0) { 261 memblock_free(tlb, PAGE_ALIGN(bytes)); 262 263 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); 264 if (nslabs < IO_TLB_MIN_SLABS) 265 panic("%s: Failed to remap %zu bytes\n", 266 __func__, bytes); 267 goto retry; 268 } 269 270 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); 271 if (!mem->slots) 272 panic("%s: Failed to allocate %zu bytes align=0x%lx\n", 273 __func__, alloc_size, PAGE_SIZE); 274 275 swiotlb_init_io_tlb_mem(mem, __pa(tlb), default_nslabs, false); 276 mem->force_bounce = flags & SWIOTLB_FORCE; 277 278 if (flags & SWIOTLB_VERBOSE) 279 swiotlb_print_info(); 280 } 281 282 void __init swiotlb_init(bool addressing_limit, unsigned int flags) 283 { 284 return swiotlb_init_remap(addressing_limit, flags, NULL); 285 } 286 287 /* 288 * Systems with larger DMA zones (those that don't support ISA) can 289 * initialize the swiotlb later using the slab allocator if needed. 290 * This should be just like above, but with some error catching. 291 */ 292 int swiotlb_init_late(size_t size, gfp_t gfp_mask, 293 int (*remap)(void *tlb, unsigned long nslabs)) 294 { 295 struct io_tlb_mem *mem = &io_tlb_default_mem; 296 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 297 unsigned long bytes; 298 unsigned char *vstart = NULL; 299 unsigned int order; 300 int rc = 0; 301 302 if (swiotlb_force_disable) 303 return 0; 304 305 retry: 306 order = get_order(nslabs << IO_TLB_SHIFT); 307 nslabs = SLABS_PER_PAGE << order; 308 bytes = nslabs << IO_TLB_SHIFT; 309 310 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 311 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, 312 order); 313 if (vstart) 314 break; 315 order--; 316 } 317 318 if (!vstart) 319 return -ENOMEM; 320 321 if (order != get_order(bytes)) { 322 pr_warn("only able to allocate %ld MB\n", 323 (PAGE_SIZE << order) >> 20); 324 nslabs = SLABS_PER_PAGE << order; 325 } 326 if (remap) 327 rc = remap(vstart, nslabs); 328 if (rc) { 329 free_pages((unsigned long)vstart, order); 330 331 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); 332 if (nslabs < IO_TLB_MIN_SLABS) 333 return rc; 334 goto retry; 335 } 336 337 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 338 get_order(array_size(sizeof(*mem->slots), nslabs))); 339 if (!mem->slots) { 340 free_pages((unsigned long)vstart, order); 341 return -ENOMEM; 342 } 343 344 set_memory_decrypted((unsigned long)vstart, bytes >> PAGE_SHIFT); 345 swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true); 346 347 swiotlb_print_info(); 348 return 0; 349 } 350 351 void __init swiotlb_exit(void) 352 { 353 struct io_tlb_mem *mem = &io_tlb_default_mem; 354 unsigned long tbl_vaddr; 355 size_t tbl_size, slots_size; 356 357 if (swiotlb_force_bounce) 358 return; 359 360 if (!mem->nslabs) 361 return; 362 363 pr_info("tearing down default memory pool\n"); 364 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); 365 tbl_size = PAGE_ALIGN(mem->end - mem->start); 366 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); 367 368 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT); 369 if (mem->late_alloc) { 370 free_pages(tbl_vaddr, get_order(tbl_size)); 371 free_pages((unsigned long)mem->slots, get_order(slots_size)); 372 } else { 373 memblock_free_late(mem->start, tbl_size); 374 memblock_free_late(__pa(mem->slots), slots_size); 375 } 376 377 memset(mem, 0, sizeof(*mem)); 378 } 379 380 /* 381 * Return the offset into a iotlb slot required to keep the device happy. 382 */ 383 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) 384 { 385 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); 386 } 387 388 /* 389 * Bounce: copy the swiotlb buffer from or back to the original dma location 390 */ 391 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, 392 enum dma_data_direction dir) 393 { 394 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 395 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; 396 phys_addr_t orig_addr = mem->slots[index].orig_addr; 397 size_t alloc_size = mem->slots[index].alloc_size; 398 unsigned long pfn = PFN_DOWN(orig_addr); 399 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; 400 unsigned int tlb_offset, orig_addr_offset; 401 402 if (orig_addr == INVALID_PHYS_ADDR) 403 return; 404 405 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1); 406 orig_addr_offset = swiotlb_align_offset(dev, orig_addr); 407 if (tlb_offset < orig_addr_offset) { 408 dev_WARN_ONCE(dev, 1, 409 "Access before mapping start detected. orig offset %u, requested offset %u.\n", 410 orig_addr_offset, tlb_offset); 411 return; 412 } 413 414 tlb_offset -= orig_addr_offset; 415 if (tlb_offset > alloc_size) { 416 dev_WARN_ONCE(dev, 1, 417 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n", 418 alloc_size, size, tlb_offset); 419 return; 420 } 421 422 orig_addr += tlb_offset; 423 alloc_size -= tlb_offset; 424 425 if (size > alloc_size) { 426 dev_WARN_ONCE(dev, 1, 427 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", 428 alloc_size, size); 429 size = alloc_size; 430 } 431 432 if (PageHighMem(pfn_to_page(pfn))) { 433 /* The buffer does not have a mapping. Map it in and copy */ 434 unsigned int offset = orig_addr & ~PAGE_MASK; 435 char *buffer; 436 unsigned int sz = 0; 437 unsigned long flags; 438 439 while (size) { 440 sz = min_t(size_t, PAGE_SIZE - offset, size); 441 442 local_irq_save(flags); 443 buffer = kmap_atomic(pfn_to_page(pfn)); 444 if (dir == DMA_TO_DEVICE) 445 memcpy(vaddr, buffer + offset, sz); 446 else 447 memcpy(buffer + offset, vaddr, sz); 448 kunmap_atomic(buffer); 449 local_irq_restore(flags); 450 451 size -= sz; 452 pfn++; 453 vaddr += sz; 454 offset = 0; 455 } 456 } else if (dir == DMA_TO_DEVICE) { 457 memcpy(vaddr, phys_to_virt(orig_addr), size); 458 } else { 459 memcpy(phys_to_virt(orig_addr), vaddr, size); 460 } 461 } 462 463 #define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) 464 465 /* 466 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. 467 */ 468 static inline unsigned long get_max_slots(unsigned long boundary_mask) 469 { 470 if (boundary_mask == ~0UL) 471 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 472 return nr_slots(boundary_mask + 1); 473 } 474 475 static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) 476 { 477 if (index >= mem->nslabs) 478 return 0; 479 return index; 480 } 481 482 /* 483 * Find a suitable number of IO TLB entries size that will fit this request and 484 * allocate a buffer from that IO TLB pool. 485 */ 486 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, 487 size_t alloc_size, unsigned int alloc_align_mask) 488 { 489 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 490 unsigned long boundary_mask = dma_get_seg_boundary(dev); 491 dma_addr_t tbl_dma_addr = 492 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; 493 unsigned long max_slots = get_max_slots(boundary_mask); 494 unsigned int iotlb_align_mask = 495 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); 496 unsigned int nslots = nr_slots(alloc_size), stride; 497 unsigned int index, wrap, count = 0, i; 498 unsigned int offset = swiotlb_align_offset(dev, orig_addr); 499 unsigned long flags; 500 501 BUG_ON(!nslots); 502 503 /* 504 * For mappings with an alignment requirement don't bother looping to 505 * unaligned slots once we found an aligned one. For allocations of 506 * PAGE_SIZE or larger only look for page aligned allocations. 507 */ 508 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; 509 if (alloc_size >= PAGE_SIZE) 510 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); 511 stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1); 512 513 spin_lock_irqsave(&mem->lock, flags); 514 if (unlikely(nslots > mem->nslabs - mem->used)) 515 goto not_found; 516 517 index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); 518 do { 519 if (orig_addr && 520 (slot_addr(tbl_dma_addr, index) & iotlb_align_mask) != 521 (orig_addr & iotlb_align_mask)) { 522 index = wrap_index(mem, index + 1); 523 continue; 524 } 525 526 /* 527 * If we find a slot that indicates we have 'nslots' number of 528 * contiguous buffers, we allocate the buffers from that slot 529 * and mark the entries as '0' indicating unavailable. 530 */ 531 if (!iommu_is_span_boundary(index, nslots, 532 nr_slots(tbl_dma_addr), 533 max_slots)) { 534 if (mem->slots[index].list >= nslots) 535 goto found; 536 } 537 index = wrap_index(mem, index + stride); 538 } while (index != wrap); 539 540 not_found: 541 spin_unlock_irqrestore(&mem->lock, flags); 542 return -1; 543 544 found: 545 for (i = index; i < index + nslots; i++) { 546 mem->slots[i].list = 0; 547 mem->slots[i].alloc_size = 548 alloc_size - (offset + ((i - index) << IO_TLB_SHIFT)); 549 } 550 for (i = index - 1; 551 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && 552 mem->slots[i].list; i--) 553 mem->slots[i].list = ++count; 554 555 /* 556 * Update the indices to avoid searching in the next round. 557 */ 558 if (index + nslots < mem->nslabs) 559 mem->index = index + nslots; 560 else 561 mem->index = 0; 562 mem->used += nslots; 563 564 spin_unlock_irqrestore(&mem->lock, flags); 565 return index; 566 } 567 568 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, 569 size_t mapping_size, size_t alloc_size, 570 unsigned int alloc_align_mask, enum dma_data_direction dir, 571 unsigned long attrs) 572 { 573 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 574 unsigned int offset = swiotlb_align_offset(dev, orig_addr); 575 unsigned int i; 576 int index; 577 phys_addr_t tlb_addr; 578 579 if (!mem) 580 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); 581 582 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 583 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); 584 585 if (mapping_size > alloc_size) { 586 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", 587 mapping_size, alloc_size); 588 return (phys_addr_t)DMA_MAPPING_ERROR; 589 } 590 591 index = swiotlb_find_slots(dev, orig_addr, 592 alloc_size + offset, alloc_align_mask); 593 if (index == -1) { 594 if (!(attrs & DMA_ATTR_NO_WARN)) 595 dev_warn_ratelimited(dev, 596 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", 597 alloc_size, mem->nslabs, mem->used); 598 return (phys_addr_t)DMA_MAPPING_ERROR; 599 } 600 601 /* 602 * Save away the mapping from the original address to the DMA address. 603 * This is needed when we sync the memory. Then we sync the buffer if 604 * needed. 605 */ 606 for (i = 0; i < nr_slots(alloc_size + offset); i++) 607 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); 608 tlb_addr = slot_addr(mem->start, index) + offset; 609 /* 610 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig 611 * to the tlb buffer, if we knew for sure the device will 612 * overwirte the entire current content. But we don't. Thus 613 * unconditional bounce may prevent leaking swiotlb content (i.e. 614 * kernel memory) to user-space. 615 */ 616 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); 617 return tlb_addr; 618 } 619 620 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) 621 { 622 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 623 unsigned long flags; 624 unsigned int offset = swiotlb_align_offset(dev, tlb_addr); 625 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; 626 int nslots = nr_slots(mem->slots[index].alloc_size + offset); 627 int count, i; 628 629 /* 630 * Return the buffer to the free list by setting the corresponding 631 * entries to indicate the number of contiguous entries available. 632 * While returning the entries to the free list, we merge the entries 633 * with slots below and above the pool being returned. 634 */ 635 spin_lock_irqsave(&mem->lock, flags); 636 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) 637 count = mem->slots[index + nslots].list; 638 else 639 count = 0; 640 641 /* 642 * Step 1: return the slots to the free list, merging the slots with 643 * superceeding slots 644 */ 645 for (i = index + nslots - 1; i >= index; i--) { 646 mem->slots[i].list = ++count; 647 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 648 mem->slots[i].alloc_size = 0; 649 } 650 651 /* 652 * Step 2: merge the returned slots with the preceding slots, if 653 * available (non zero) 654 */ 655 for (i = index - 1; 656 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; 657 i--) 658 mem->slots[i].list = ++count; 659 mem->used -= nslots; 660 spin_unlock_irqrestore(&mem->lock, flags); 661 } 662 663 /* 664 * tlb_addr is the physical address of the bounce buffer to unmap. 665 */ 666 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, 667 size_t mapping_size, enum dma_data_direction dir, 668 unsigned long attrs) 669 { 670 /* 671 * First, sync the memory before unmapping the entry 672 */ 673 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 674 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 675 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE); 676 677 swiotlb_release_slots(dev, tlb_addr); 678 } 679 680 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, 681 size_t size, enum dma_data_direction dir) 682 { 683 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 684 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); 685 else 686 BUG_ON(dir != DMA_FROM_DEVICE); 687 } 688 689 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, 690 size_t size, enum dma_data_direction dir) 691 { 692 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 693 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); 694 else 695 BUG_ON(dir != DMA_TO_DEVICE); 696 } 697 698 /* 699 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing 700 * to the device copy the data into it as well. 701 */ 702 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, 703 enum dma_data_direction dir, unsigned long attrs) 704 { 705 phys_addr_t swiotlb_addr; 706 dma_addr_t dma_addr; 707 708 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size); 709 710 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, 711 attrs); 712 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) 713 return DMA_MAPPING_ERROR; 714 715 /* Ensure that the address returned is DMA'ble */ 716 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr); 717 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { 718 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, 719 attrs | DMA_ATTR_SKIP_CPU_SYNC); 720 dev_WARN_ONCE(dev, 1, 721 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 722 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 723 return DMA_MAPPING_ERROR; 724 } 725 726 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 727 arch_sync_dma_for_device(swiotlb_addr, size, dir); 728 return dma_addr; 729 } 730 731 size_t swiotlb_max_mapping_size(struct device *dev) 732 { 733 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE; 734 } 735 736 bool is_swiotlb_active(struct device *dev) 737 { 738 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 739 740 return mem && mem->nslabs; 741 } 742 EXPORT_SYMBOL_GPL(is_swiotlb_active); 743 744 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, 745 const char *dirname) 746 { 747 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); 748 if (!mem->nslabs) 749 return; 750 751 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); 752 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); 753 } 754 755 static int __init __maybe_unused swiotlb_create_default_debugfs(void) 756 { 757 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb"); 758 return 0; 759 } 760 761 #ifdef CONFIG_DEBUG_FS 762 late_initcall(swiotlb_create_default_debugfs); 763 #endif 764 765 #ifdef CONFIG_DMA_RESTRICTED_POOL 766 767 struct page *swiotlb_alloc(struct device *dev, size_t size) 768 { 769 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 770 phys_addr_t tlb_addr; 771 int index; 772 773 if (!mem) 774 return NULL; 775 776 index = swiotlb_find_slots(dev, 0, size, 0); 777 if (index == -1) 778 return NULL; 779 780 tlb_addr = slot_addr(mem->start, index); 781 782 return pfn_to_page(PFN_DOWN(tlb_addr)); 783 } 784 785 bool swiotlb_free(struct device *dev, struct page *page, size_t size) 786 { 787 phys_addr_t tlb_addr = page_to_phys(page); 788 789 if (!is_swiotlb_buffer(dev, tlb_addr)) 790 return false; 791 792 swiotlb_release_slots(dev, tlb_addr); 793 794 return true; 795 } 796 797 static int rmem_swiotlb_device_init(struct reserved_mem *rmem, 798 struct device *dev) 799 { 800 struct io_tlb_mem *mem = rmem->priv; 801 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; 802 803 /* 804 * Since multiple devices can share the same pool, the private data, 805 * io_tlb_mem struct, will be initialized by the first device attached 806 * to it. 807 */ 808 if (!mem) { 809 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 810 if (!mem) 811 return -ENOMEM; 812 813 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL); 814 if (!mem->slots) { 815 kfree(mem); 816 return -ENOMEM; 817 } 818 819 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), 820 rmem->size >> PAGE_SHIFT); 821 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); 822 mem->force_bounce = true; 823 mem->for_alloc = true; 824 825 rmem->priv = mem; 826 827 swiotlb_create_debugfs_files(mem, rmem->name); 828 } 829 830 dev->dma_io_tlb_mem = mem; 831 832 return 0; 833 } 834 835 static void rmem_swiotlb_device_release(struct reserved_mem *rmem, 836 struct device *dev) 837 { 838 dev->dma_io_tlb_mem = &io_tlb_default_mem; 839 } 840 841 static const struct reserved_mem_ops rmem_swiotlb_ops = { 842 .device_init = rmem_swiotlb_device_init, 843 .device_release = rmem_swiotlb_device_release, 844 }; 845 846 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) 847 { 848 unsigned long node = rmem->fdt_node; 849 850 if (of_get_flat_dt_prop(node, "reusable", NULL) || 851 of_get_flat_dt_prop(node, "linux,cma-default", NULL) || 852 of_get_flat_dt_prop(node, "linux,dma-default", NULL) || 853 of_get_flat_dt_prop(node, "no-map", NULL)) 854 return -EINVAL; 855 856 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { 857 pr_err("Restricted DMA pool must be accessible within the linear mapping."); 858 return -EINVAL; 859 } 860 861 rmem->ops = &rmem_swiotlb_ops; 862 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", 863 &rmem->base, (unsigned long)rmem->size / SZ_1M); 864 return 0; 865 } 866 867 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); 868 #endif /* CONFIG_DMA_RESTRICTED_POOL */ 869