1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Dynamic DMA mapping support. 4 * 5 * This implementation is a fallback for platforms that do not support 6 * I/O TLBs (aka DMA address translation hardware). 7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 9 * Copyright (C) 2000, 2003 Hewlett-Packard Co 10 * David Mosberger-Tang <davidm@hpl.hp.com> 11 * 12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. 13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid 14 * unnecessary i-cache flushing. 15 * 04/07/.. ak Better overflow handling. Assorted fixes. 16 * 05/09/10 linville Add support for syncing ranges, support syncing for 17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 18 * 08/12/11 beckyb Add highmem support 19 */ 20 21 #define pr_fmt(fmt) "software IO TLB: " fmt 22 23 #include <linux/cache.h> 24 #include <linux/dma-direct.h> 25 #include <linux/dma-map-ops.h> 26 #include <linux/mm.h> 27 #include <linux/export.h> 28 #include <linux/spinlock.h> 29 #include <linux/string.h> 30 #include <linux/swiotlb.h> 31 #include <linux/pfn.h> 32 #include <linux/types.h> 33 #include <linux/ctype.h> 34 #include <linux/highmem.h> 35 #include <linux/gfp.h> 36 #include <linux/scatterlist.h> 37 #include <linux/cc_platform.h> 38 #include <linux/set_memory.h> 39 #ifdef CONFIG_DEBUG_FS 40 #include <linux/debugfs.h> 41 #endif 42 #ifdef CONFIG_DMA_RESTRICTED_POOL 43 #include <linux/io.h> 44 #include <linux/of.h> 45 #include <linux/of_fdt.h> 46 #include <linux/of_reserved_mem.h> 47 #include <linux/slab.h> 48 #endif 49 50 #include <asm/io.h> 51 #include <asm/dma.h> 52 53 #include <linux/io.h> 54 #include <linux/init.h> 55 #include <linux/memblock.h> 56 #include <linux/iommu-helper.h> 57 58 #define CREATE_TRACE_POINTS 59 #include <trace/events/swiotlb.h> 60 61 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 62 63 /* 64 * Minimum IO TLB size to bother booting with. Systems with mainly 65 * 64bit capable cards will only lightly use the swiotlb. If we can't 66 * allocate a contiguous 1MB, we're probably in trouble anyway. 67 */ 68 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 69 70 #define INVALID_PHYS_ADDR (~(phys_addr_t)0) 71 72 enum swiotlb_force swiotlb_force; 73 74 struct io_tlb_mem io_tlb_default_mem; 75 76 phys_addr_t swiotlb_unencrypted_base; 77 78 /* 79 * Max segment that we can provide which (if pages are contingous) will 80 * not be bounced (unless SWIOTLB_FORCE is set). 81 */ 82 static unsigned int max_segment; 83 84 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; 85 86 static int __init 87 setup_io_tlb_npages(char *str) 88 { 89 if (isdigit(*str)) { 90 /* avoid tail segment of size < IO_TLB_SEGSIZE */ 91 default_nslabs = 92 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); 93 } 94 if (*str == ',') 95 ++str; 96 if (!strcmp(str, "force")) 97 swiotlb_force = SWIOTLB_FORCE; 98 else if (!strcmp(str, "noforce")) 99 swiotlb_force = SWIOTLB_NO_FORCE; 100 101 return 0; 102 } 103 early_param("swiotlb", setup_io_tlb_npages); 104 105 unsigned int swiotlb_max_segment(void) 106 { 107 return io_tlb_default_mem.nslabs ? max_segment : 0; 108 } 109 EXPORT_SYMBOL_GPL(swiotlb_max_segment); 110 111 void swiotlb_set_max_segment(unsigned int val) 112 { 113 if (swiotlb_force == SWIOTLB_FORCE) 114 max_segment = 1; 115 else 116 max_segment = rounddown(val, PAGE_SIZE); 117 } 118 119 unsigned long swiotlb_size_or_default(void) 120 { 121 return default_nslabs << IO_TLB_SHIFT; 122 } 123 124 void __init swiotlb_adjust_size(unsigned long size) 125 { 126 /* 127 * If swiotlb parameter has not been specified, give a chance to 128 * architectures such as those supporting memory encryption to 129 * adjust/expand SWIOTLB size for their use. 130 */ 131 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) 132 return; 133 size = ALIGN(size, IO_TLB_SIZE); 134 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 135 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); 136 } 137 138 void swiotlb_print_info(void) 139 { 140 struct io_tlb_mem *mem = &io_tlb_default_mem; 141 142 if (!mem->nslabs) { 143 pr_warn("No low mem\n"); 144 return; 145 } 146 147 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, 148 (mem->nslabs << IO_TLB_SHIFT) >> 20); 149 } 150 151 static inline unsigned long io_tlb_offset(unsigned long val) 152 { 153 return val & (IO_TLB_SEGSIZE - 1); 154 } 155 156 static inline unsigned long nr_slots(u64 val) 157 { 158 return DIV_ROUND_UP(val, IO_TLB_SIZE); 159 } 160 161 /* 162 * Remap swioltb memory in the unencrypted physical address space 163 * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP 164 * Isolation VMs). 165 */ 166 #ifdef CONFIG_HAS_IOMEM 167 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) 168 { 169 void *vaddr = NULL; 170 171 if (swiotlb_unencrypted_base) { 172 phys_addr_t paddr = mem->start + swiotlb_unencrypted_base; 173 174 vaddr = memremap(paddr, bytes, MEMREMAP_WB); 175 if (!vaddr) 176 pr_err("Failed to map the unencrypted memory %pa size %lx.\n", 177 &paddr, bytes); 178 } 179 180 return vaddr; 181 } 182 #else 183 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes) 184 { 185 return NULL; 186 } 187 #endif 188 189 /* 190 * Early SWIOTLB allocation may be too early to allow an architecture to 191 * perform the desired operations. This function allows the architecture to 192 * call SWIOTLB when the operations are possible. It needs to be called 193 * before the SWIOTLB memory is used. 194 */ 195 void __init swiotlb_update_mem_attributes(void) 196 { 197 struct io_tlb_mem *mem = &io_tlb_default_mem; 198 void *vaddr; 199 unsigned long bytes; 200 201 if (!mem->nslabs || mem->late_alloc) 202 return; 203 vaddr = phys_to_virt(mem->start); 204 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); 205 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); 206 207 mem->vaddr = swiotlb_mem_remap(mem, bytes); 208 if (!mem->vaddr) 209 mem->vaddr = vaddr; 210 211 memset(mem->vaddr, 0, bytes); 212 } 213 214 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, 215 unsigned long nslabs, bool late_alloc) 216 { 217 void *vaddr = phys_to_virt(start); 218 unsigned long bytes = nslabs << IO_TLB_SHIFT, i; 219 220 mem->nslabs = nslabs; 221 mem->start = start; 222 mem->end = mem->start + bytes; 223 mem->index = 0; 224 mem->late_alloc = late_alloc; 225 226 if (swiotlb_force == SWIOTLB_FORCE) 227 mem->force_bounce = true; 228 229 spin_lock_init(&mem->lock); 230 for (i = 0; i < mem->nslabs; i++) { 231 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); 232 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 233 mem->slots[i].alloc_size = 0; 234 } 235 236 /* 237 * If swiotlb_unencrypted_base is set, the bounce buffer memory will 238 * be remapped and cleared in swiotlb_update_mem_attributes. 239 */ 240 if (swiotlb_unencrypted_base) 241 return; 242 243 memset(vaddr, 0, bytes); 244 mem->vaddr = vaddr; 245 return; 246 } 247 248 int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 249 { 250 struct io_tlb_mem *mem = &io_tlb_default_mem; 251 size_t alloc_size; 252 253 if (swiotlb_force == SWIOTLB_NO_FORCE) 254 return 0; 255 256 /* protect against double initialization */ 257 if (WARN_ON_ONCE(mem->nslabs)) 258 return -ENOMEM; 259 260 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); 261 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); 262 if (!mem->slots) 263 panic("%s: Failed to allocate %zu bytes align=0x%lx\n", 264 __func__, alloc_size, PAGE_SIZE); 265 266 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); 267 268 if (verbose) 269 swiotlb_print_info(); 270 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); 271 return 0; 272 } 273 274 /* 275 * Statically reserve bounce buffer space and initialize bounce buffer data 276 * structures for the software IO TLB used to implement the DMA API. 277 */ 278 void __init 279 swiotlb_init(int verbose) 280 { 281 size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); 282 void *tlb; 283 284 if (swiotlb_force == SWIOTLB_NO_FORCE) 285 return; 286 287 /* Get IO TLB memory from the low pages */ 288 tlb = memblock_alloc_low(bytes, PAGE_SIZE); 289 if (!tlb) 290 goto fail; 291 if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose)) 292 goto fail_free_mem; 293 return; 294 295 fail_free_mem: 296 memblock_free(tlb, bytes); 297 fail: 298 pr_warn("Cannot allocate buffer"); 299 } 300 301 /* 302 * Systems with larger DMA zones (those that don't support ISA) can 303 * initialize the swiotlb later using the slab allocator if needed. 304 * This should be just like above, but with some error catching. 305 */ 306 int 307 swiotlb_late_init_with_default_size(size_t default_size) 308 { 309 unsigned long nslabs = 310 ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 311 unsigned long bytes; 312 unsigned char *vstart = NULL; 313 unsigned int order; 314 int rc = 0; 315 316 if (swiotlb_force == SWIOTLB_NO_FORCE) 317 return 0; 318 319 /* 320 * Get IO TLB memory from the low pages 321 */ 322 order = get_order(nslabs << IO_TLB_SHIFT); 323 nslabs = SLABS_PER_PAGE << order; 324 bytes = nslabs << IO_TLB_SHIFT; 325 326 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 327 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 328 order); 329 if (vstart) 330 break; 331 order--; 332 } 333 334 if (!vstart) 335 return -ENOMEM; 336 337 if (order != get_order(bytes)) { 338 pr_warn("only able to allocate %ld MB\n", 339 (PAGE_SIZE << order) >> 20); 340 nslabs = SLABS_PER_PAGE << order; 341 } 342 rc = swiotlb_late_init_with_tbl(vstart, nslabs); 343 if (rc) 344 free_pages((unsigned long)vstart, order); 345 346 return rc; 347 } 348 349 int 350 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) 351 { 352 struct io_tlb_mem *mem = &io_tlb_default_mem; 353 unsigned long bytes = nslabs << IO_TLB_SHIFT; 354 355 if (swiotlb_force == SWIOTLB_NO_FORCE) 356 return 0; 357 358 /* protect against double initialization */ 359 if (WARN_ON_ONCE(mem->nslabs)) 360 return -ENOMEM; 361 362 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 363 get_order(array_size(sizeof(*mem->slots), nslabs))); 364 if (!mem->slots) 365 return -ENOMEM; 366 367 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); 368 swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true); 369 370 swiotlb_print_info(); 371 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); 372 return 0; 373 } 374 375 void __init swiotlb_exit(void) 376 { 377 struct io_tlb_mem *mem = &io_tlb_default_mem; 378 unsigned long tbl_vaddr; 379 size_t tbl_size, slots_size; 380 381 if (!mem->nslabs) 382 return; 383 384 pr_info("tearing down default memory pool\n"); 385 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); 386 tbl_size = PAGE_ALIGN(mem->end - mem->start); 387 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); 388 389 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT); 390 if (mem->late_alloc) { 391 free_pages(tbl_vaddr, get_order(tbl_size)); 392 free_pages((unsigned long)mem->slots, get_order(slots_size)); 393 } else { 394 memblock_free_late(mem->start, tbl_size); 395 memblock_free_late(__pa(mem->slots), slots_size); 396 } 397 398 memset(mem, 0, sizeof(*mem)); 399 } 400 401 /* 402 * Return the offset into a iotlb slot required to keep the device happy. 403 */ 404 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) 405 { 406 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); 407 } 408 409 /* 410 * Bounce: copy the swiotlb buffer from or back to the original dma location 411 */ 412 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, 413 enum dma_data_direction dir) 414 { 415 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 416 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; 417 phys_addr_t orig_addr = mem->slots[index].orig_addr; 418 size_t alloc_size = mem->slots[index].alloc_size; 419 unsigned long pfn = PFN_DOWN(orig_addr); 420 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; 421 unsigned int tlb_offset, orig_addr_offset; 422 423 if (orig_addr == INVALID_PHYS_ADDR) 424 return; 425 426 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1); 427 orig_addr_offset = swiotlb_align_offset(dev, orig_addr); 428 if (tlb_offset < orig_addr_offset) { 429 dev_WARN_ONCE(dev, 1, 430 "Access before mapping start detected. orig offset %u, requested offset %u.\n", 431 orig_addr_offset, tlb_offset); 432 return; 433 } 434 435 tlb_offset -= orig_addr_offset; 436 if (tlb_offset > alloc_size) { 437 dev_WARN_ONCE(dev, 1, 438 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n", 439 alloc_size, size, tlb_offset); 440 return; 441 } 442 443 orig_addr += tlb_offset; 444 alloc_size -= tlb_offset; 445 446 if (size > alloc_size) { 447 dev_WARN_ONCE(dev, 1, 448 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", 449 alloc_size, size); 450 size = alloc_size; 451 } 452 453 if (PageHighMem(pfn_to_page(pfn))) { 454 /* The buffer does not have a mapping. Map it in and copy */ 455 unsigned int offset = orig_addr & ~PAGE_MASK; 456 char *buffer; 457 unsigned int sz = 0; 458 unsigned long flags; 459 460 while (size) { 461 sz = min_t(size_t, PAGE_SIZE - offset, size); 462 463 local_irq_save(flags); 464 buffer = kmap_atomic(pfn_to_page(pfn)); 465 if (dir == DMA_TO_DEVICE) 466 memcpy(vaddr, buffer + offset, sz); 467 else 468 memcpy(buffer + offset, vaddr, sz); 469 kunmap_atomic(buffer); 470 local_irq_restore(flags); 471 472 size -= sz; 473 pfn++; 474 vaddr += sz; 475 offset = 0; 476 } 477 } else if (dir == DMA_TO_DEVICE) { 478 memcpy(vaddr, phys_to_virt(orig_addr), size); 479 } else { 480 memcpy(phys_to_virt(orig_addr), vaddr, size); 481 } 482 } 483 484 #define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) 485 486 /* 487 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. 488 */ 489 static inline unsigned long get_max_slots(unsigned long boundary_mask) 490 { 491 if (boundary_mask == ~0UL) 492 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 493 return nr_slots(boundary_mask + 1); 494 } 495 496 static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) 497 { 498 if (index >= mem->nslabs) 499 return 0; 500 return index; 501 } 502 503 /* 504 * Find a suitable number of IO TLB entries size that will fit this request and 505 * allocate a buffer from that IO TLB pool. 506 */ 507 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, 508 size_t alloc_size, unsigned int alloc_align_mask) 509 { 510 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 511 unsigned long boundary_mask = dma_get_seg_boundary(dev); 512 dma_addr_t tbl_dma_addr = 513 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; 514 unsigned long max_slots = get_max_slots(boundary_mask); 515 unsigned int iotlb_align_mask = 516 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); 517 unsigned int nslots = nr_slots(alloc_size), stride; 518 unsigned int index, wrap, count = 0, i; 519 unsigned int offset = swiotlb_align_offset(dev, orig_addr); 520 unsigned long flags; 521 522 BUG_ON(!nslots); 523 524 /* 525 * For mappings with an alignment requirement don't bother looping to 526 * unaligned slots once we found an aligned one. For allocations of 527 * PAGE_SIZE or larger only look for page aligned allocations. 528 */ 529 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; 530 if (alloc_size >= PAGE_SIZE) 531 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); 532 stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1); 533 534 spin_lock_irqsave(&mem->lock, flags); 535 if (unlikely(nslots > mem->nslabs - mem->used)) 536 goto not_found; 537 538 index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); 539 do { 540 if (orig_addr && 541 (slot_addr(tbl_dma_addr, index) & iotlb_align_mask) != 542 (orig_addr & iotlb_align_mask)) { 543 index = wrap_index(mem, index + 1); 544 continue; 545 } 546 547 /* 548 * If we find a slot that indicates we have 'nslots' number of 549 * contiguous buffers, we allocate the buffers from that slot 550 * and mark the entries as '0' indicating unavailable. 551 */ 552 if (!iommu_is_span_boundary(index, nslots, 553 nr_slots(tbl_dma_addr), 554 max_slots)) { 555 if (mem->slots[index].list >= nslots) 556 goto found; 557 } 558 index = wrap_index(mem, index + stride); 559 } while (index != wrap); 560 561 not_found: 562 spin_unlock_irqrestore(&mem->lock, flags); 563 return -1; 564 565 found: 566 for (i = index; i < index + nslots; i++) { 567 mem->slots[i].list = 0; 568 mem->slots[i].alloc_size = 569 alloc_size - (offset + ((i - index) << IO_TLB_SHIFT)); 570 } 571 for (i = index - 1; 572 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && 573 mem->slots[i].list; i--) 574 mem->slots[i].list = ++count; 575 576 /* 577 * Update the indices to avoid searching in the next round. 578 */ 579 if (index + nslots < mem->nslabs) 580 mem->index = index + nslots; 581 else 582 mem->index = 0; 583 mem->used += nslots; 584 585 spin_unlock_irqrestore(&mem->lock, flags); 586 return index; 587 } 588 589 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, 590 size_t mapping_size, size_t alloc_size, 591 unsigned int alloc_align_mask, enum dma_data_direction dir, 592 unsigned long attrs) 593 { 594 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 595 unsigned int offset = swiotlb_align_offset(dev, orig_addr); 596 unsigned int i; 597 int index; 598 phys_addr_t tlb_addr; 599 600 if (!mem) 601 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); 602 603 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 604 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); 605 606 if (mapping_size > alloc_size) { 607 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", 608 mapping_size, alloc_size); 609 return (phys_addr_t)DMA_MAPPING_ERROR; 610 } 611 612 index = swiotlb_find_slots(dev, orig_addr, 613 alloc_size + offset, alloc_align_mask); 614 if (index == -1) { 615 if (!(attrs & DMA_ATTR_NO_WARN)) 616 dev_warn_ratelimited(dev, 617 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", 618 alloc_size, mem->nslabs, mem->used); 619 return (phys_addr_t)DMA_MAPPING_ERROR; 620 } 621 622 /* 623 * Save away the mapping from the original address to the DMA address. 624 * This is needed when we sync the memory. Then we sync the buffer if 625 * needed. 626 */ 627 for (i = 0; i < nr_slots(alloc_size + offset); i++) 628 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); 629 tlb_addr = slot_addr(mem->start, index) + offset; 630 /* 631 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig 632 * to the tlb buffer, if we knew for sure the device will 633 * overwirte the entire current content. But we don't. Thus 634 * unconditional bounce may prevent leaking swiotlb content (i.e. 635 * kernel memory) to user-space. 636 */ 637 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); 638 return tlb_addr; 639 } 640 641 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) 642 { 643 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 644 unsigned long flags; 645 unsigned int offset = swiotlb_align_offset(dev, tlb_addr); 646 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; 647 int nslots = nr_slots(mem->slots[index].alloc_size + offset); 648 int count, i; 649 650 /* 651 * Return the buffer to the free list by setting the corresponding 652 * entries to indicate the number of contiguous entries available. 653 * While returning the entries to the free list, we merge the entries 654 * with slots below and above the pool being returned. 655 */ 656 spin_lock_irqsave(&mem->lock, flags); 657 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) 658 count = mem->slots[index + nslots].list; 659 else 660 count = 0; 661 662 /* 663 * Step 1: return the slots to the free list, merging the slots with 664 * superceeding slots 665 */ 666 for (i = index + nslots - 1; i >= index; i--) { 667 mem->slots[i].list = ++count; 668 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 669 mem->slots[i].alloc_size = 0; 670 } 671 672 /* 673 * Step 2: merge the returned slots with the preceding slots, if 674 * available (non zero) 675 */ 676 for (i = index - 1; 677 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; 678 i--) 679 mem->slots[i].list = ++count; 680 mem->used -= nslots; 681 spin_unlock_irqrestore(&mem->lock, flags); 682 } 683 684 /* 685 * tlb_addr is the physical address of the bounce buffer to unmap. 686 */ 687 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, 688 size_t mapping_size, enum dma_data_direction dir, 689 unsigned long attrs) 690 { 691 /* 692 * First, sync the memory before unmapping the entry 693 */ 694 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 695 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 696 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE); 697 698 swiotlb_release_slots(dev, tlb_addr); 699 } 700 701 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, 702 size_t size, enum dma_data_direction dir) 703 { 704 /* 705 * Unconditional bounce is necessary to avoid corruption on 706 * sync_*_for_cpu or dma_ummap_* when the device didn't overwrite 707 * the whole lengt of the bounce buffer. 708 */ 709 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); 710 BUG_ON(!valid_dma_direction(dir)); 711 } 712 713 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, 714 size_t size, enum dma_data_direction dir) 715 { 716 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 717 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); 718 else 719 BUG_ON(dir != DMA_TO_DEVICE); 720 } 721 722 /* 723 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing 724 * to the device copy the data into it as well. 725 */ 726 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, 727 enum dma_data_direction dir, unsigned long attrs) 728 { 729 phys_addr_t swiotlb_addr; 730 dma_addr_t dma_addr; 731 732 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, 733 swiotlb_force); 734 735 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, 736 attrs); 737 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) 738 return DMA_MAPPING_ERROR; 739 740 /* Ensure that the address returned is DMA'ble */ 741 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr); 742 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { 743 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, 744 attrs | DMA_ATTR_SKIP_CPU_SYNC); 745 dev_WARN_ONCE(dev, 1, 746 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 747 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 748 return DMA_MAPPING_ERROR; 749 } 750 751 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 752 arch_sync_dma_for_device(swiotlb_addr, size, dir); 753 return dma_addr; 754 } 755 756 size_t swiotlb_max_mapping_size(struct device *dev) 757 { 758 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE; 759 } 760 761 bool is_swiotlb_active(struct device *dev) 762 { 763 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 764 765 return mem && mem->nslabs; 766 } 767 EXPORT_SYMBOL_GPL(is_swiotlb_active); 768 769 #ifdef CONFIG_DEBUG_FS 770 static struct dentry *debugfs_dir; 771 772 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem) 773 { 774 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); 775 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); 776 } 777 778 static int __init swiotlb_create_default_debugfs(void) 779 { 780 struct io_tlb_mem *mem = &io_tlb_default_mem; 781 782 debugfs_dir = debugfs_create_dir("swiotlb", NULL); 783 if (mem->nslabs) { 784 mem->debugfs = debugfs_dir; 785 swiotlb_create_debugfs_files(mem); 786 } 787 return 0; 788 } 789 790 late_initcall(swiotlb_create_default_debugfs); 791 792 #endif 793 794 #ifdef CONFIG_DMA_RESTRICTED_POOL 795 796 #ifdef CONFIG_DEBUG_FS 797 static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem) 798 { 799 struct io_tlb_mem *mem = rmem->priv; 800 801 mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir); 802 swiotlb_create_debugfs_files(mem); 803 } 804 #else 805 static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem) 806 { 807 } 808 #endif 809 810 struct page *swiotlb_alloc(struct device *dev, size_t size) 811 { 812 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 813 phys_addr_t tlb_addr; 814 int index; 815 816 if (!mem) 817 return NULL; 818 819 index = swiotlb_find_slots(dev, 0, size, 0); 820 if (index == -1) 821 return NULL; 822 823 tlb_addr = slot_addr(mem->start, index); 824 825 return pfn_to_page(PFN_DOWN(tlb_addr)); 826 } 827 828 bool swiotlb_free(struct device *dev, struct page *page, size_t size) 829 { 830 phys_addr_t tlb_addr = page_to_phys(page); 831 832 if (!is_swiotlb_buffer(dev, tlb_addr)) 833 return false; 834 835 swiotlb_release_slots(dev, tlb_addr); 836 837 return true; 838 } 839 840 static int rmem_swiotlb_device_init(struct reserved_mem *rmem, 841 struct device *dev) 842 { 843 struct io_tlb_mem *mem = rmem->priv; 844 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; 845 846 /* 847 * Since multiple devices can share the same pool, the private data, 848 * io_tlb_mem struct, will be initialized by the first device attached 849 * to it. 850 */ 851 if (!mem) { 852 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 853 if (!mem) 854 return -ENOMEM; 855 856 mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs), 857 GFP_KERNEL); 858 if (!mem->slots) { 859 kfree(mem); 860 return -ENOMEM; 861 } 862 863 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), 864 rmem->size >> PAGE_SHIFT); 865 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); 866 mem->force_bounce = true; 867 mem->for_alloc = true; 868 869 rmem->priv = mem; 870 871 rmem_swiotlb_debugfs_init(rmem); 872 } 873 874 dev->dma_io_tlb_mem = mem; 875 876 return 0; 877 } 878 879 static void rmem_swiotlb_device_release(struct reserved_mem *rmem, 880 struct device *dev) 881 { 882 dev->dma_io_tlb_mem = &io_tlb_default_mem; 883 } 884 885 static const struct reserved_mem_ops rmem_swiotlb_ops = { 886 .device_init = rmem_swiotlb_device_init, 887 .device_release = rmem_swiotlb_device_release, 888 }; 889 890 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) 891 { 892 unsigned long node = rmem->fdt_node; 893 894 if (of_get_flat_dt_prop(node, "reusable", NULL) || 895 of_get_flat_dt_prop(node, "linux,cma-default", NULL) || 896 of_get_flat_dt_prop(node, "linux,dma-default", NULL) || 897 of_get_flat_dt_prop(node, "no-map", NULL)) 898 return -EINVAL; 899 900 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { 901 pr_err("Restricted DMA pool must be accessible within the linear mapping."); 902 return -EINVAL; 903 } 904 905 rmem->ops = &rmem_swiotlb_ops; 906 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", 907 &rmem->base, (unsigned long)rmem->size / SZ_1M); 908 return 0; 909 } 910 911 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); 912 #endif /* CONFIG_DMA_RESTRICTED_POOL */ 913