1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2018-2020 Christoph Hellwig. 4 * 5 * DMA operations that map physical memory directly without using an IOMMU. 6 */ 7 #include <linux/memblock.h> /* for max_pfn */ 8 #include <linux/export.h> 9 #include <linux/mm.h> 10 #include <linux/dma-map-ops.h> 11 #include <linux/scatterlist.h> 12 #include <linux/pfn.h> 13 #include <linux/vmalloc.h> 14 #include <linux/set_memory.h> 15 #include <linux/slab.h> 16 #include "direct.h" 17 18 /* 19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use 20 * it for entirely different regions. In that case the arch code needs to 21 * override the variable below for dma-direct to work properly. 22 */ 23 unsigned int zone_dma_bits __ro_after_init = 24; 24 25 static inline dma_addr_t phys_to_dma_direct(struct device *dev, 26 phys_addr_t phys) 27 { 28 if (force_dma_unencrypted(dev)) 29 return phys_to_dma_unencrypted(dev, phys); 30 return phys_to_dma(dev, phys); 31 } 32 33 static inline struct page *dma_direct_to_page(struct device *dev, 34 dma_addr_t dma_addr) 35 { 36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); 37 } 38 39 u64 dma_direct_get_required_mask(struct device *dev) 40 { 41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; 42 u64 max_dma = phys_to_dma_direct(dev, phys); 43 44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 45 } 46 47 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, 48 u64 *phys_limit) 49 { 50 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); 51 52 /* 53 * Optimistically try the zone that the physical address mask falls 54 * into first. If that returns memory that isn't actually addressable 55 * we will fallback to the next lower zone and try again. 56 * 57 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding 58 * zones. 59 */ 60 *phys_limit = dma_to_phys(dev, dma_limit); 61 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) 62 return GFP_DMA; 63 if (*phys_limit <= DMA_BIT_MASK(32)) 64 return GFP_DMA32; 65 return 0; 66 } 67 68 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 69 { 70 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); 71 72 if (dma_addr == DMA_MAPPING_ERROR) 73 return false; 74 return dma_addr + size - 1 <= 75 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 76 } 77 78 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) 79 { 80 if (!force_dma_unencrypted(dev)) 81 return 0; 82 return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size)); 83 } 84 85 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) 86 { 87 int ret; 88 89 if (!force_dma_unencrypted(dev)) 90 return 0; 91 ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size)); 92 if (ret) 93 pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n"); 94 return ret; 95 } 96 97 static void __dma_direct_free_pages(struct device *dev, struct page *page, 98 size_t size) 99 { 100 if (swiotlb_free(dev, page, size)) 101 return; 102 dma_free_contiguous(dev, page, size); 103 } 104 105 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) 106 { 107 struct page *page = swiotlb_alloc(dev, size); 108 109 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 110 swiotlb_free(dev, page, size); 111 return NULL; 112 } 113 114 return page; 115 } 116 117 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 118 gfp_t gfp) 119 { 120 int node = dev_to_node(dev); 121 struct page *page = NULL; 122 u64 phys_limit; 123 124 WARN_ON_ONCE(!PAGE_ALIGNED(size)); 125 126 if (is_swiotlb_for_alloc(dev)) 127 return dma_direct_alloc_swiotlb(dev, size); 128 129 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 130 &phys_limit); 131 page = dma_alloc_contiguous(dev, size, gfp); 132 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 133 dma_free_contiguous(dev, page, size); 134 page = NULL; 135 } 136 again: 137 if (!page) 138 page = alloc_pages_node(node, gfp, get_order(size)); 139 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 140 dma_free_contiguous(dev, page, size); 141 page = NULL; 142 143 if (IS_ENABLED(CONFIG_ZONE_DMA32) && 144 phys_limit < DMA_BIT_MASK(64) && 145 !(gfp & (GFP_DMA32 | GFP_DMA))) { 146 gfp |= GFP_DMA32; 147 goto again; 148 } 149 150 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { 151 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 152 goto again; 153 } 154 } 155 156 return page; 157 } 158 159 /* 160 * Check if a potentially blocking operations needs to dip into the atomic 161 * pools for the given device/gfp. 162 */ 163 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp) 164 { 165 return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev); 166 } 167 168 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, 169 dma_addr_t *dma_handle, gfp_t gfp) 170 { 171 struct page *page; 172 u64 phys_mask; 173 void *ret; 174 175 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))) 176 return NULL; 177 178 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 179 &phys_mask); 180 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); 181 if (!page) 182 return NULL; 183 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 184 return ret; 185 } 186 187 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, 188 dma_addr_t *dma_handle, gfp_t gfp) 189 { 190 struct page *page; 191 192 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); 193 if (!page) 194 return NULL; 195 196 /* remove any dirty cache lines on the kernel alias */ 197 if (!PageHighMem(page)) 198 arch_dma_prep_coherent(page, size); 199 200 /* return the page pointer as the opaque cookie */ 201 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 202 return page; 203 } 204 205 void *dma_direct_alloc(struct device *dev, size_t size, 206 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 207 { 208 bool remap = false, set_uncached = false; 209 struct page *page; 210 void *ret; 211 212 size = PAGE_ALIGN(size); 213 if (attrs & DMA_ATTR_NO_WARN) 214 gfp |= __GFP_NOWARN; 215 216 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 217 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) 218 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); 219 220 if (!dev_is_dma_coherent(dev)) { 221 /* 222 * Fallback to the arch handler if it exists. This should 223 * eventually go away. 224 */ 225 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && 226 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 227 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 228 !is_swiotlb_for_alloc(dev)) 229 return arch_dma_alloc(dev, size, dma_handle, gfp, 230 attrs); 231 232 /* 233 * If there is a global pool, always allocate from it for 234 * non-coherent devices. 235 */ 236 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL)) 237 return dma_alloc_from_global_coherent(dev, size, 238 dma_handle); 239 240 /* 241 * Otherwise remap if the architecture is asking for it. But 242 * given that remapping memory is a blocking operation we'll 243 * instead have to dip into the atomic pools. 244 */ 245 remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP); 246 if (remap) { 247 if (dma_direct_use_pool(dev, gfp)) 248 return dma_direct_alloc_from_pool(dev, size, 249 dma_handle, gfp); 250 } else { 251 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) 252 return NULL; 253 set_uncached = true; 254 } 255 } 256 257 /* 258 * Decrypting memory may block, so allocate the memory from the atomic 259 * pools if we can't block. 260 */ 261 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) 262 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); 263 264 /* we always manually zero the memory once we are done */ 265 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); 266 if (!page) 267 return NULL; 268 269 /* 270 * dma_alloc_contiguous can return highmem pages depending on a 271 * combination the cma= arguments and per-arch setup. These need to be 272 * remapped to return a kernel virtual address. 273 */ 274 if (PageHighMem(page)) { 275 remap = true; 276 set_uncached = false; 277 } 278 279 if (remap) { 280 /* remove any dirty cache lines on the kernel alias */ 281 arch_dma_prep_coherent(page, size); 282 283 /* create a coherent mapping */ 284 ret = dma_common_contiguous_remap(page, size, 285 dma_pgprot(dev, PAGE_KERNEL, attrs), 286 __builtin_return_address(0)); 287 if (!ret) 288 goto out_free_pages; 289 } else { 290 ret = page_address(page); 291 if (dma_set_decrypted(dev, ret, size)) 292 goto out_free_pages; 293 } 294 295 memset(ret, 0, size); 296 297 if (set_uncached) { 298 arch_dma_prep_coherent(page, size); 299 ret = arch_dma_set_uncached(ret, size); 300 if (IS_ERR(ret)) 301 goto out_encrypt_pages; 302 } 303 304 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 305 return ret; 306 307 out_encrypt_pages: 308 if (dma_set_encrypted(dev, page_address(page), size)) 309 return NULL; 310 out_free_pages: 311 __dma_direct_free_pages(dev, page, size); 312 return NULL; 313 } 314 315 void dma_direct_free(struct device *dev, size_t size, 316 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) 317 { 318 unsigned int page_order = get_order(size); 319 320 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 321 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { 322 /* cpu_addr is a struct page cookie, not a kernel address */ 323 dma_free_contiguous(dev, cpu_addr, size); 324 return; 325 } 326 327 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && 328 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 329 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 330 !dev_is_dma_coherent(dev) && 331 !is_swiotlb_for_alloc(dev)) { 332 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); 333 return; 334 } 335 336 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 337 !dev_is_dma_coherent(dev)) { 338 if (!dma_release_from_global_coherent(page_order, cpu_addr)) 339 WARN_ON_ONCE(1); 340 return; 341 } 342 343 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ 344 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && 345 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) 346 return; 347 348 if (is_vmalloc_addr(cpu_addr)) { 349 vunmap(cpu_addr); 350 } else { 351 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) 352 arch_dma_clear_uncached(cpu_addr, size); 353 if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) 354 return; 355 } 356 357 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); 358 } 359 360 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, 361 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 362 { 363 struct page *page; 364 void *ret; 365 366 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) 367 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); 368 369 page = __dma_direct_alloc_pages(dev, size, gfp); 370 if (!page) 371 return NULL; 372 if (PageHighMem(page)) { 373 /* 374 * Depending on the cma= arguments and per-arch setup 375 * dma_alloc_contiguous could return highmem pages. 376 * Without remapping there is no way to return them here, 377 * so log an error and fail. 378 */ 379 dev_info(dev, "Rejecting highmem page from CMA.\n"); 380 goto out_free_pages; 381 } 382 383 ret = page_address(page); 384 if (dma_set_decrypted(dev, ret, size)) 385 goto out_free_pages; 386 memset(ret, 0, size); 387 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 388 return page; 389 out_free_pages: 390 __dma_direct_free_pages(dev, page, size); 391 return NULL; 392 } 393 394 void dma_direct_free_pages(struct device *dev, size_t size, 395 struct page *page, dma_addr_t dma_addr, 396 enum dma_data_direction dir) 397 { 398 unsigned int page_order = get_order(size); 399 void *vaddr = page_address(page); 400 401 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ 402 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && 403 dma_free_from_pool(dev, vaddr, size)) 404 return; 405 406 if (dma_set_encrypted(dev, vaddr, 1 << page_order)) 407 return; 408 __dma_direct_free_pages(dev, page, size); 409 } 410 411 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 412 defined(CONFIG_SWIOTLB) 413 void dma_direct_sync_sg_for_device(struct device *dev, 414 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 415 { 416 struct scatterlist *sg; 417 int i; 418 419 for_each_sg(sgl, sg, nents, i) { 420 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 421 422 if (unlikely(is_swiotlb_buffer(dev, paddr))) 423 swiotlb_sync_single_for_device(dev, paddr, sg->length, 424 dir); 425 426 if (!dev_is_dma_coherent(dev)) 427 arch_sync_dma_for_device(paddr, sg->length, 428 dir); 429 } 430 } 431 #endif 432 433 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 434 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ 435 defined(CONFIG_SWIOTLB) 436 void dma_direct_sync_sg_for_cpu(struct device *dev, 437 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 438 { 439 struct scatterlist *sg; 440 int i; 441 442 for_each_sg(sgl, sg, nents, i) { 443 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 444 445 if (!dev_is_dma_coherent(dev)) 446 arch_sync_dma_for_cpu(paddr, sg->length, dir); 447 448 if (unlikely(is_swiotlb_buffer(dev, paddr))) 449 swiotlb_sync_single_for_cpu(dev, paddr, sg->length, 450 dir); 451 452 if (dir == DMA_FROM_DEVICE) 453 arch_dma_mark_clean(paddr, sg->length); 454 } 455 456 if (!dev_is_dma_coherent(dev)) 457 arch_sync_dma_for_cpu_all(); 458 } 459 460 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, 461 int nents, enum dma_data_direction dir, unsigned long attrs) 462 { 463 struct scatterlist *sg; 464 int i; 465 466 for_each_sg(sgl, sg, nents, i) 467 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, 468 attrs); 469 } 470 #endif 471 472 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 473 enum dma_data_direction dir, unsigned long attrs) 474 { 475 int i; 476 struct scatterlist *sg; 477 478 for_each_sg(sgl, sg, nents, i) { 479 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), 480 sg->offset, sg->length, dir, attrs); 481 if (sg->dma_address == DMA_MAPPING_ERROR) 482 goto out_unmap; 483 sg_dma_len(sg) = sg->length; 484 } 485 486 return nents; 487 488 out_unmap: 489 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 490 return -EIO; 491 } 492 493 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, 494 size_t size, enum dma_data_direction dir, unsigned long attrs) 495 { 496 dma_addr_t dma_addr = paddr; 497 498 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { 499 dev_err_once(dev, 500 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 501 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 502 WARN_ON_ONCE(1); 503 return DMA_MAPPING_ERROR; 504 } 505 506 return dma_addr; 507 } 508 509 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, 510 void *cpu_addr, dma_addr_t dma_addr, size_t size, 511 unsigned long attrs) 512 { 513 struct page *page = dma_direct_to_page(dev, dma_addr); 514 int ret; 515 516 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 517 if (!ret) 518 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 519 return ret; 520 } 521 522 bool dma_direct_can_mmap(struct device *dev) 523 { 524 return dev_is_dma_coherent(dev) || 525 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP); 526 } 527 528 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, 529 void *cpu_addr, dma_addr_t dma_addr, size_t size, 530 unsigned long attrs) 531 { 532 unsigned long user_count = vma_pages(vma); 533 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 534 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); 535 int ret = -ENXIO; 536 537 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 538 539 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 540 return ret; 541 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) 542 return ret; 543 544 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) 545 return -ENXIO; 546 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 547 user_count << PAGE_SHIFT, vma->vm_page_prot); 548 } 549 550 int dma_direct_supported(struct device *dev, u64 mask) 551 { 552 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; 553 554 /* 555 * Because 32-bit DMA masks are so common we expect every architecture 556 * to be able to satisfy them - either by not supporting more physical 557 * memory, or by providing a ZONE_DMA32. If neither is the case, the 558 * architecture needs to use an IOMMU instead of the direct mapping. 559 */ 560 if (mask >= DMA_BIT_MASK(32)) 561 return 1; 562 563 /* 564 * This check needs to be against the actual bit mask value, so use 565 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't 566 * part of the check. 567 */ 568 if (IS_ENABLED(CONFIG_ZONE_DMA)) 569 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits)); 570 return mask >= phys_to_dma_unencrypted(dev, min_mask); 571 } 572 573 size_t dma_direct_max_mapping_size(struct device *dev) 574 { 575 /* If SWIOTLB is active, use its maximum mapping size */ 576 if (is_swiotlb_active(dev) && 577 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) 578 return swiotlb_max_mapping_size(dev); 579 return SIZE_MAX; 580 } 581 582 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) 583 { 584 return !dev_is_dma_coherent(dev) || 585 is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr)); 586 } 587 588 /** 589 * dma_direct_set_offset - Assign scalar offset for a single DMA range. 590 * @dev: device pointer; needed to "own" the alloced memory. 591 * @cpu_start: beginning of memory region covered by this offset. 592 * @dma_start: beginning of DMA/PCI region covered by this offset. 593 * @size: size of the region. 594 * 595 * This is for the simple case of a uniform offset which cannot 596 * be discovered by "dma-ranges". 597 * 598 * It returns -ENOMEM if out of memory, -EINVAL if a map 599 * already exists, 0 otherwise. 600 * 601 * Note: any call to this from a driver is a bug. The mapping needs 602 * to be described by the device tree or other firmware interfaces. 603 */ 604 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, 605 dma_addr_t dma_start, u64 size) 606 { 607 struct bus_dma_region *map; 608 u64 offset = (u64)cpu_start - (u64)dma_start; 609 610 if (dev->dma_range_map) { 611 dev_err(dev, "attempt to add DMA range to existing map\n"); 612 return -EINVAL; 613 } 614 615 if (!offset) 616 return 0; 617 618 map = kcalloc(2, sizeof(*map), GFP_KERNEL); 619 if (!map) 620 return -ENOMEM; 621 map[0].cpu_start = cpu_start; 622 map[0].dma_start = dma_start; 623 map[0].offset = offset; 624 map[0].size = size; 625 dev->dma_range_map = map; 626 return 0; 627 } 628