1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2013 Red Hat Inc. 4 * 5 * Authors: Jérôme Glisse <jglisse@redhat.com> 6 */ 7 /* 8 * Refer to include/linux/hmm.h for information about heterogeneous memory 9 * management or HMM for short. 10 */ 11 #include <linux/pagewalk.h> 12 #include <linux/hmm.h> 13 #include <linux/hmm-dma.h> 14 #include <linux/init.h> 15 #include <linux/rmap.h> 16 #include <linux/swap.h> 17 #include <linux/slab.h> 18 #include <linux/sched.h> 19 #include <linux/mmzone.h> 20 #include <linux/pagemap.h> 21 #include <linux/swapops.h> 22 #include <linux/hugetlb.h> 23 #include <linux/memremap.h> 24 #include <linux/sched/mm.h> 25 #include <linux/jump_label.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/pci-p2pdma.h> 28 #include <linux/mmu_notifier.h> 29 #include <linux/memory_hotplug.h> 30 31 #include "internal.h" 32 33 struct hmm_vma_walk { 34 struct hmm_range *range; 35 unsigned long last; 36 }; 37 38 enum { 39 HMM_NEED_FAULT = 1 << 0, 40 HMM_NEED_WRITE_FAULT = 1 << 1, 41 HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT, 42 }; 43 44 enum { 45 /* These flags are carried from input-to-output */ 46 HMM_PFN_INOUT_FLAGS = HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | 47 HMM_PFN_P2PDMA_BUS, 48 }; 49 50 static int hmm_pfns_fill(unsigned long addr, unsigned long end, 51 struct hmm_range *range, unsigned long cpu_flags) 52 { 53 unsigned long i = (addr - range->start) >> PAGE_SHIFT; 54 55 for (; addr < end; addr += PAGE_SIZE, i++) { 56 range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; 57 range->hmm_pfns[i] |= cpu_flags; 58 } 59 return 0; 60 } 61 62 /* 63 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) 64 * @addr: range virtual start address (inclusive) 65 * @end: range virtual end address (exclusive) 66 * @required_fault: HMM_NEED_* flags 67 * @walk: mm_walk structure 68 * Return: -EBUSY after page fault, or page fault error 69 * 70 * This function will be called whenever pmd_none() or pte_none() returns true, 71 * or whenever there is no page directory covering the virtual address range. 72 */ 73 static int hmm_vma_fault(unsigned long addr, unsigned long end, 74 unsigned int required_fault, struct mm_walk *walk) 75 { 76 struct hmm_vma_walk *hmm_vma_walk = walk->private; 77 struct vm_area_struct *vma = walk->vma; 78 unsigned int fault_flags = FAULT_FLAG_REMOTE; 79 80 WARN_ON_ONCE(!required_fault); 81 hmm_vma_walk->last = addr; 82 83 if (required_fault & HMM_NEED_WRITE_FAULT) { 84 if (!(vma->vm_flags & VM_WRITE)) 85 return -EPERM; 86 fault_flags |= FAULT_FLAG_WRITE; 87 } 88 89 for (; addr < end; addr += PAGE_SIZE) 90 if (handle_mm_fault(vma, addr, fault_flags, NULL) & 91 VM_FAULT_ERROR) 92 return -EFAULT; 93 return -EBUSY; 94 } 95 96 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 97 unsigned long pfn_req_flags, 98 unsigned long cpu_flags) 99 { 100 struct hmm_range *range = hmm_vma_walk->range; 101 102 /* 103 * So we not only consider the individual per page request we also 104 * consider the default flags requested for the range. The API can 105 * be used 2 ways. The first one where the HMM user coalesces 106 * multiple page faults into one request and sets flags per pfn for 107 * those faults. The second one where the HMM user wants to pre- 108 * fault a range with specific flags. For the latter one it is a 109 * waste to have the user pre-fill the pfn arrays with a default 110 * flags value. 111 */ 112 pfn_req_flags &= range->pfn_flags_mask; 113 pfn_req_flags |= range->default_flags; 114 115 /* We aren't ask to do anything ... */ 116 if (!(pfn_req_flags & HMM_PFN_REQ_FAULT)) 117 return 0; 118 119 /* Need to write fault ? */ 120 if ((pfn_req_flags & HMM_PFN_REQ_WRITE) && 121 !(cpu_flags & HMM_PFN_WRITE)) 122 return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT; 123 124 /* If CPU page table is not valid then we need to fault */ 125 if (!(cpu_flags & HMM_PFN_VALID)) 126 return HMM_NEED_FAULT; 127 return 0; 128 } 129 130 static unsigned int 131 hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, 132 const unsigned long hmm_pfns[], unsigned long npages, 133 unsigned long cpu_flags) 134 { 135 struct hmm_range *range = hmm_vma_walk->range; 136 unsigned int required_fault = 0; 137 unsigned long i; 138 139 /* 140 * If the default flags do not request to fault pages, and the mask does 141 * not allow for individual pages to be faulted, then 142 * hmm_pte_need_fault() will always return 0. 143 */ 144 if (!((range->default_flags | range->pfn_flags_mask) & 145 HMM_PFN_REQ_FAULT)) 146 return 0; 147 148 for (i = 0; i < npages; ++i) { 149 required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i], 150 cpu_flags); 151 if (required_fault == HMM_NEED_ALL_BITS) 152 return required_fault; 153 } 154 return required_fault; 155 } 156 157 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, 158 __always_unused int depth, struct mm_walk *walk) 159 { 160 struct hmm_vma_walk *hmm_vma_walk = walk->private; 161 struct hmm_range *range = hmm_vma_walk->range; 162 unsigned int required_fault; 163 unsigned long i, npages; 164 unsigned long *hmm_pfns; 165 166 i = (addr - range->start) >> PAGE_SHIFT; 167 npages = (end - addr) >> PAGE_SHIFT; 168 hmm_pfns = &range->hmm_pfns[i]; 169 required_fault = 170 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); 171 if (!walk->vma) { 172 if (required_fault) 173 return -EFAULT; 174 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); 175 } 176 if (required_fault) 177 return hmm_vma_fault(addr, end, required_fault, walk); 178 return hmm_pfns_fill(addr, end, range, 0); 179 } 180 181 static inline unsigned long hmm_pfn_flags_order(unsigned long order) 182 { 183 return order << HMM_PFN_ORDER_SHIFT; 184 } 185 186 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 187 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, 188 pmd_t pmd) 189 { 190 if (pmd_protnone(pmd)) 191 return 0; 192 return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : 193 HMM_PFN_VALID) | 194 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT); 195 } 196 197 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, 198 unsigned long end, unsigned long hmm_pfns[], 199 pmd_t pmd) 200 { 201 struct hmm_vma_walk *hmm_vma_walk = walk->private; 202 struct hmm_range *range = hmm_vma_walk->range; 203 unsigned long pfn, npages, i; 204 unsigned int required_fault; 205 unsigned long cpu_flags; 206 207 npages = (end - addr) >> PAGE_SHIFT; 208 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); 209 required_fault = 210 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); 211 if (required_fault) 212 return hmm_vma_fault(addr, end, required_fault, walk); 213 214 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); 215 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { 216 hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; 217 hmm_pfns[i] |= pfn | cpu_flags; 218 } 219 return 0; 220 } 221 #else /* CONFIG_TRANSPARENT_HUGEPAGE */ 222 /* stub to allow the code below to compile */ 223 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, 224 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); 225 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 226 227 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, 228 pte_t pte) 229 { 230 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) 231 return 0; 232 return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; 233 } 234 235 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, 236 unsigned long end, pmd_t *pmdp, pte_t *ptep, 237 unsigned long *hmm_pfn) 238 { 239 struct hmm_vma_walk *hmm_vma_walk = walk->private; 240 struct hmm_range *range = hmm_vma_walk->range; 241 unsigned int required_fault; 242 unsigned long cpu_flags; 243 pte_t pte = ptep_get(ptep); 244 uint64_t pfn_req_flags = *hmm_pfn; 245 uint64_t new_pfn_flags = 0; 246 247 if (pte_none_mostly(pte)) { 248 required_fault = 249 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); 250 if (required_fault) 251 goto fault; 252 goto out; 253 } 254 255 if (!pte_present(pte)) { 256 swp_entry_t entry = pte_to_swp_entry(pte); 257 258 /* 259 * Don't fault in device private pages owned by the caller, 260 * just report the PFN. 261 */ 262 if (is_device_private_entry(entry) && 263 page_pgmap(pfn_swap_entry_to_page(entry))->owner == 264 range->dev_private_owner) { 265 cpu_flags = HMM_PFN_VALID; 266 if (is_writable_device_private_entry(entry)) 267 cpu_flags |= HMM_PFN_WRITE; 268 new_pfn_flags = swp_offset_pfn(entry) | cpu_flags; 269 goto out; 270 } 271 272 required_fault = 273 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); 274 if (!required_fault) 275 goto out; 276 277 if (!non_swap_entry(entry)) 278 goto fault; 279 280 if (is_device_private_entry(entry)) 281 goto fault; 282 283 if (is_device_exclusive_entry(entry)) 284 goto fault; 285 286 if (is_migration_entry(entry)) { 287 pte_unmap(ptep); 288 hmm_vma_walk->last = addr; 289 migration_entry_wait(walk->mm, pmdp, addr); 290 return -EBUSY; 291 } 292 293 /* Report error for everything else */ 294 pte_unmap(ptep); 295 return -EFAULT; 296 } 297 298 cpu_flags = pte_to_hmm_pfn_flags(range, pte); 299 required_fault = 300 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); 301 if (required_fault) 302 goto fault; 303 304 /* 305 * Since each architecture defines a struct page for the zero page, just 306 * fall through and treat it like a normal page. 307 */ 308 if (!vm_normal_page(walk->vma, addr, pte) && 309 !is_zero_pfn(pte_pfn(pte))) { 310 if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { 311 pte_unmap(ptep); 312 return -EFAULT; 313 } 314 new_pfn_flags = HMM_PFN_ERROR; 315 goto out; 316 } 317 318 new_pfn_flags = pte_pfn(pte) | cpu_flags; 319 out: 320 *hmm_pfn = (*hmm_pfn & HMM_PFN_INOUT_FLAGS) | new_pfn_flags; 321 return 0; 322 323 fault: 324 pte_unmap(ptep); 325 /* Fault any virtual address we were asked to fault */ 326 return hmm_vma_fault(addr, end, required_fault, walk); 327 } 328 329 static int hmm_vma_walk_pmd(pmd_t *pmdp, 330 unsigned long start, 331 unsigned long end, 332 struct mm_walk *walk) 333 { 334 struct hmm_vma_walk *hmm_vma_walk = walk->private; 335 struct hmm_range *range = hmm_vma_walk->range; 336 unsigned long *hmm_pfns = 337 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; 338 unsigned long npages = (end - start) >> PAGE_SHIFT; 339 unsigned long addr = start; 340 pte_t *ptep; 341 pmd_t pmd; 342 343 again: 344 pmd = pmdp_get_lockless(pmdp); 345 if (pmd_none(pmd)) 346 return hmm_vma_walk_hole(start, end, -1, walk); 347 348 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { 349 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { 350 hmm_vma_walk->last = addr; 351 pmd_migration_entry_wait(walk->mm, pmdp); 352 return -EBUSY; 353 } 354 return hmm_pfns_fill(start, end, range, 0); 355 } 356 357 if (!pmd_present(pmd)) { 358 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) 359 return -EFAULT; 360 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); 361 } 362 363 if (pmd_trans_huge(pmd)) { 364 /* 365 * No need to take pmd_lock here, even if some other thread 366 * is splitting the huge pmd we will get that event through 367 * mmu_notifier callback. 368 * 369 * So just read pmd value and check again it's a transparent 370 * huge or device mapping one and compute corresponding pfn 371 * values. 372 */ 373 pmd = pmdp_get_lockless(pmdp); 374 if (!pmd_trans_huge(pmd)) 375 goto again; 376 377 return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); 378 } 379 380 /* 381 * We have handled all the valid cases above ie either none, migration, 382 * huge or transparent huge. At this point either it is a valid pmd 383 * entry pointing to pte directory or it is a bad pmd that will not 384 * recover. 385 */ 386 if (pmd_bad(pmd)) { 387 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) 388 return -EFAULT; 389 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); 390 } 391 392 ptep = pte_offset_map(pmdp, addr); 393 if (!ptep) 394 goto again; 395 for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { 396 int r; 397 398 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns); 399 if (r) { 400 /* hmm_vma_handle_pte() did pte_unmap() */ 401 return r; 402 } 403 } 404 pte_unmap(ptep - 1); 405 return 0; 406 } 407 408 #if defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 409 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, 410 pud_t pud) 411 { 412 if (!pud_present(pud)) 413 return 0; 414 return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : 415 HMM_PFN_VALID) | 416 hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT); 417 } 418 419 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, 420 struct mm_walk *walk) 421 { 422 struct hmm_vma_walk *hmm_vma_walk = walk->private; 423 struct hmm_range *range = hmm_vma_walk->range; 424 unsigned long addr = start; 425 pud_t pud; 426 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); 427 428 if (!ptl) 429 return 0; 430 431 /* Normally we don't want to split the huge page */ 432 walk->action = ACTION_CONTINUE; 433 434 pud = READ_ONCE(*pudp); 435 if (!pud_present(pud)) { 436 spin_unlock(ptl); 437 return hmm_vma_walk_hole(start, end, -1, walk); 438 } 439 440 if (pud_leaf(pud)) { 441 unsigned long i, npages, pfn; 442 unsigned int required_fault; 443 unsigned long *hmm_pfns; 444 unsigned long cpu_flags; 445 446 i = (addr - range->start) >> PAGE_SHIFT; 447 npages = (end - addr) >> PAGE_SHIFT; 448 hmm_pfns = &range->hmm_pfns[i]; 449 450 cpu_flags = pud_to_hmm_pfn_flags(range, pud); 451 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, 452 npages, cpu_flags); 453 if (required_fault) { 454 spin_unlock(ptl); 455 return hmm_vma_fault(addr, end, required_fault, walk); 456 } 457 458 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); 459 for (i = 0; i < npages; ++i, ++pfn) { 460 hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; 461 hmm_pfns[i] |= pfn | cpu_flags; 462 } 463 goto out_unlock; 464 } 465 466 /* Ask for the PUD to be split */ 467 walk->action = ACTION_SUBTREE; 468 469 out_unlock: 470 spin_unlock(ptl); 471 return 0; 472 } 473 #else 474 #define hmm_vma_walk_pud NULL 475 #endif 476 477 #ifdef CONFIG_HUGETLB_PAGE 478 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, 479 unsigned long start, unsigned long end, 480 struct mm_walk *walk) 481 { 482 unsigned long addr = start, i, pfn; 483 struct hmm_vma_walk *hmm_vma_walk = walk->private; 484 struct hmm_range *range = hmm_vma_walk->range; 485 struct vm_area_struct *vma = walk->vma; 486 unsigned int required_fault; 487 unsigned long pfn_req_flags; 488 unsigned long cpu_flags; 489 spinlock_t *ptl; 490 pte_t entry; 491 492 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); 493 entry = huge_ptep_get(walk->mm, addr, pte); 494 495 i = (start - range->start) >> PAGE_SHIFT; 496 pfn_req_flags = range->hmm_pfns[i]; 497 cpu_flags = pte_to_hmm_pfn_flags(range, entry) | 498 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma))); 499 required_fault = 500 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); 501 if (required_fault) { 502 int ret; 503 504 spin_unlock(ptl); 505 hugetlb_vma_unlock_read(vma); 506 /* 507 * Avoid deadlock: drop the vma lock before calling 508 * hmm_vma_fault(), which will itself potentially take and 509 * drop the vma lock. This is also correct from a 510 * protection point of view, because there is no further 511 * use here of either pte or ptl after dropping the vma 512 * lock. 513 */ 514 ret = hmm_vma_fault(addr, end, required_fault, walk); 515 hugetlb_vma_lock_read(vma); 516 return ret; 517 } 518 519 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); 520 for (; addr < end; addr += PAGE_SIZE, i++, pfn++) { 521 range->hmm_pfns[i] &= HMM_PFN_INOUT_FLAGS; 522 range->hmm_pfns[i] |= pfn | cpu_flags; 523 } 524 525 spin_unlock(ptl); 526 return 0; 527 } 528 #else 529 #define hmm_vma_walk_hugetlb_entry NULL 530 #endif /* CONFIG_HUGETLB_PAGE */ 531 532 static int hmm_vma_walk_test(unsigned long start, unsigned long end, 533 struct mm_walk *walk) 534 { 535 struct hmm_vma_walk *hmm_vma_walk = walk->private; 536 struct hmm_range *range = hmm_vma_walk->range; 537 struct vm_area_struct *vma = walk->vma; 538 539 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) && 540 vma->vm_flags & VM_READ) 541 return 0; 542 543 /* 544 * vma ranges that don't have struct page backing them or map I/O 545 * devices directly cannot be handled by hmm_range_fault(). 546 * 547 * If the vma does not allow read access, then assume that it does not 548 * allow write access either. HMM does not support architectures that 549 * allow write without read. 550 * 551 * If a fault is requested for an unsupported range then it is a hard 552 * failure. 553 */ 554 if (hmm_range_need_fault(hmm_vma_walk, 555 range->hmm_pfns + 556 ((start - range->start) >> PAGE_SHIFT), 557 (end - start) >> PAGE_SHIFT, 0)) 558 return -EFAULT; 559 560 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); 561 562 /* Skip this vma and continue processing the next vma. */ 563 return 1; 564 } 565 566 static const struct mm_walk_ops hmm_walk_ops = { 567 .pud_entry = hmm_vma_walk_pud, 568 .pmd_entry = hmm_vma_walk_pmd, 569 .pte_hole = hmm_vma_walk_hole, 570 .hugetlb_entry = hmm_vma_walk_hugetlb_entry, 571 .test_walk = hmm_vma_walk_test, 572 .walk_lock = PGWALK_RDLOCK, 573 }; 574 575 /** 576 * hmm_range_fault - try to fault some address in a virtual address range 577 * @range: argument structure 578 * 579 * Returns 0 on success or one of the following error codes: 580 * 581 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma 582 * (e.g., device file vma). 583 * -ENOMEM: Out of memory. 584 * -EPERM: Invalid permission (e.g., asking for write and range is read 585 * only). 586 * -EBUSY: The range has been invalidated and the caller needs to wait for 587 * the invalidation to finish. 588 * -EFAULT: A page was requested to be valid and could not be made valid 589 * ie it has no backing VMA or it is illegal to access 590 * 591 * This is similar to get_user_pages(), except that it can read the page tables 592 * without mutating them (ie causing faults). 593 */ 594 int hmm_range_fault(struct hmm_range *range) 595 { 596 struct hmm_vma_walk hmm_vma_walk = { 597 .range = range, 598 .last = range->start, 599 }; 600 struct mm_struct *mm = range->notifier->mm; 601 int ret; 602 603 mmap_assert_locked(mm); 604 605 do { 606 /* If range is no longer valid force retry. */ 607 if (mmu_interval_check_retry(range->notifier, 608 range->notifier_seq)) 609 return -EBUSY; 610 ret = walk_page_range(mm, hmm_vma_walk.last, range->end, 611 &hmm_walk_ops, &hmm_vma_walk); 612 /* 613 * When -EBUSY is returned the loop restarts with 614 * hmm_vma_walk.last set to an address that has not been stored 615 * in pfns. All entries < last in the pfn array are set to their 616 * output, and all >= are still at their input values. 617 */ 618 } while (ret == -EBUSY); 619 return ret; 620 } 621 EXPORT_SYMBOL(hmm_range_fault); 622 623 /** 624 * hmm_dma_map_alloc - Allocate HMM map structure 625 * @dev: device to allocate structure for 626 * @map: HMM map to allocate 627 * @nr_entries: number of entries in the map 628 * @dma_entry_size: size of the DMA entry in the map 629 * 630 * Allocate the HMM map structure and all the lists it contains. 631 * Return 0 on success, -ENOMEM on failure. 632 */ 633 int hmm_dma_map_alloc(struct device *dev, struct hmm_dma_map *map, 634 size_t nr_entries, size_t dma_entry_size) 635 { 636 bool dma_need_sync = false; 637 bool use_iova; 638 639 WARN_ON_ONCE(!(nr_entries * PAGE_SIZE / dma_entry_size)); 640 641 /* 642 * The HMM API violates our normal DMA buffer ownership rules and can't 643 * transfer buffer ownership. The dma_addressing_limited() check is a 644 * best approximation to ensure no swiotlb buffering happens. 645 */ 646 #ifdef CONFIG_DMA_NEED_SYNC 647 dma_need_sync = !dev->dma_skip_sync; 648 #endif /* CONFIG_DMA_NEED_SYNC */ 649 if (dma_need_sync || dma_addressing_limited(dev)) 650 return -EOPNOTSUPP; 651 652 map->dma_entry_size = dma_entry_size; 653 map->pfn_list = kvcalloc(nr_entries, sizeof(*map->pfn_list), 654 GFP_KERNEL | __GFP_NOWARN); 655 if (!map->pfn_list) 656 return -ENOMEM; 657 658 use_iova = dma_iova_try_alloc(dev, &map->state, 0, 659 nr_entries * PAGE_SIZE); 660 if (!use_iova && dma_need_unmap(dev)) { 661 map->dma_list = kvcalloc(nr_entries, sizeof(*map->dma_list), 662 GFP_KERNEL | __GFP_NOWARN); 663 if (!map->dma_list) 664 goto err_dma; 665 } 666 return 0; 667 668 err_dma: 669 kvfree(map->pfn_list); 670 return -ENOMEM; 671 } 672 EXPORT_SYMBOL_GPL(hmm_dma_map_alloc); 673 674 /** 675 * hmm_dma_map_free - iFree HMM map structure 676 * @dev: device to free structure from 677 * @map: HMM map containing the various lists and state 678 * 679 * Free the HMM map structure and all the lists it contains. 680 */ 681 void hmm_dma_map_free(struct device *dev, struct hmm_dma_map *map) 682 { 683 if (dma_use_iova(&map->state)) 684 dma_iova_free(dev, &map->state); 685 kvfree(map->pfn_list); 686 kvfree(map->dma_list); 687 } 688 EXPORT_SYMBOL_GPL(hmm_dma_map_free); 689 690 /** 691 * hmm_dma_map_pfn - Map a physical HMM page to DMA address 692 * @dev: Device to map the page for 693 * @map: HMM map 694 * @idx: Index into the PFN and dma address arrays 695 * @p2pdma_state: PCI P2P state. 696 * 697 * dma_alloc_iova() allocates IOVA based on the size specified by their use in 698 * iova->size. Call this function after IOVA allocation to link whole @page 699 * to get the DMA address. Note that very first call to this function 700 * will have @offset set to 0 in the IOVA space allocated from 701 * dma_alloc_iova(). For subsequent calls to this function on same @iova, 702 * @offset needs to be advanced by the caller with the size of previous 703 * page that was linked + DMA address returned for the previous page that was 704 * linked by this function. 705 */ 706 dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map, 707 size_t idx, 708 struct pci_p2pdma_map_state *p2pdma_state) 709 { 710 struct dma_iova_state *state = &map->state; 711 dma_addr_t *dma_addrs = map->dma_list; 712 unsigned long *pfns = map->pfn_list; 713 struct page *page = hmm_pfn_to_page(pfns[idx]); 714 phys_addr_t paddr = hmm_pfn_to_phys(pfns[idx]); 715 size_t offset = idx * map->dma_entry_size; 716 unsigned long attrs = 0; 717 dma_addr_t dma_addr; 718 int ret; 719 720 if ((pfns[idx] & HMM_PFN_DMA_MAPPED) && 721 !(pfns[idx] & HMM_PFN_P2PDMA_BUS)) { 722 /* 723 * We are in this flow when there is a need to resync flags, 724 * for example when page was already linked in prefetch call 725 * with READ flag and now we need to add WRITE flag 726 * 727 * This page was already programmed to HW and we don't want/need 728 * to unlink and link it again just to resync flags. 729 */ 730 if (dma_use_iova(state)) 731 return state->addr + offset; 732 733 /* 734 * Without dma_need_unmap, the dma_addrs array is NULL, thus we 735 * need to regenerate the address below even if there already 736 * was a mapping. But !dma_need_unmap implies that the 737 * mapping stateless, so this is fine. 738 */ 739 if (dma_need_unmap(dev)) 740 return dma_addrs[idx]; 741 742 /* Continue to remapping */ 743 } 744 745 switch (pci_p2pdma_state(p2pdma_state, dev, page)) { 746 case PCI_P2PDMA_MAP_NONE: 747 break; 748 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: 749 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 750 pfns[idx] |= HMM_PFN_P2PDMA; 751 break; 752 case PCI_P2PDMA_MAP_BUS_ADDR: 753 pfns[idx] |= HMM_PFN_P2PDMA_BUS | HMM_PFN_DMA_MAPPED; 754 return pci_p2pdma_bus_addr_map(p2pdma_state, paddr); 755 default: 756 return DMA_MAPPING_ERROR; 757 } 758 759 if (dma_use_iova(state)) { 760 ret = dma_iova_link(dev, state, paddr, offset, 761 map->dma_entry_size, DMA_BIDIRECTIONAL, 762 attrs); 763 if (ret) 764 goto error; 765 766 ret = dma_iova_sync(dev, state, offset, map->dma_entry_size); 767 if (ret) { 768 dma_iova_unlink(dev, state, offset, map->dma_entry_size, 769 DMA_BIDIRECTIONAL, attrs); 770 goto error; 771 } 772 773 dma_addr = state->addr + offset; 774 } else { 775 if (WARN_ON_ONCE(dma_need_unmap(dev) && !dma_addrs)) 776 goto error; 777 778 dma_addr = dma_map_page(dev, page, 0, map->dma_entry_size, 779 DMA_BIDIRECTIONAL); 780 if (dma_mapping_error(dev, dma_addr)) 781 goto error; 782 783 if (dma_need_unmap(dev)) 784 dma_addrs[idx] = dma_addr; 785 } 786 pfns[idx] |= HMM_PFN_DMA_MAPPED; 787 return dma_addr; 788 error: 789 pfns[idx] &= ~HMM_PFN_P2PDMA; 790 return DMA_MAPPING_ERROR; 791 792 } 793 EXPORT_SYMBOL_GPL(hmm_dma_map_pfn); 794 795 /** 796 * hmm_dma_unmap_pfn - Unmap a physical HMM page from DMA address 797 * @dev: Device to unmap the page from 798 * @map: HMM map 799 * @idx: Index of the PFN to unmap 800 * 801 * Returns true if the PFN was mapped and has been unmapped, false otherwise. 802 */ 803 bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx) 804 { 805 const unsigned long valid_dma = HMM_PFN_VALID | HMM_PFN_DMA_MAPPED; 806 struct dma_iova_state *state = &map->state; 807 dma_addr_t *dma_addrs = map->dma_list; 808 unsigned long *pfns = map->pfn_list; 809 unsigned long attrs = 0; 810 811 if ((pfns[idx] & valid_dma) != valid_dma) 812 return false; 813 814 if (pfns[idx] & HMM_PFN_P2PDMA_BUS) 815 ; /* no need to unmap bus address P2P mappings */ 816 else if (dma_use_iova(state)) { 817 if (pfns[idx] & HMM_PFN_P2PDMA) 818 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 819 dma_iova_unlink(dev, state, idx * map->dma_entry_size, 820 map->dma_entry_size, DMA_BIDIRECTIONAL, attrs); 821 } else if (dma_need_unmap(dev)) 822 dma_unmap_page(dev, dma_addrs[idx], map->dma_entry_size, 823 DMA_BIDIRECTIONAL); 824 825 pfns[idx] &= 826 ~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS); 827 return true; 828 } 829 EXPORT_SYMBOL_GPL(hmm_dma_unmap_pfn); 830