1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020-2021 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include <linux/types.h> 24 #include <linux/hmm.h> 25 #include <linux/dma-direction.h> 26 #include <linux/dma-mapping.h> 27 #include "amdgpu_sync.h" 28 #include "amdgpu_object.h" 29 #include "amdgpu_vm.h" 30 #include "amdgpu_mn.h" 31 #include "amdgpu_res_cursor.h" 32 #include "kfd_priv.h" 33 #include "kfd_svm.h" 34 #include "kfd_migrate.h" 35 36 #ifdef dev_fmt 37 #undef dev_fmt 38 #endif 39 #define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__ 40 41 static uint64_t 42 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) 43 { 44 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); 45 } 46 47 static int 48 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, 49 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) 50 { 51 struct amdgpu_device *adev = ring->adev; 52 struct amdgpu_job *job; 53 unsigned int num_dw, num_bytes; 54 struct dma_fence *fence; 55 uint64_t src_addr, dst_addr; 56 uint64_t pte_flags; 57 void *cpu_addr; 58 int r; 59 60 /* use gart window 0 */ 61 *gart_addr = adev->gmc.gart_start; 62 63 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 64 num_bytes = npages * 8; 65 66 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, 67 AMDGPU_IB_POOL_DELAYED, &job); 68 if (r) 69 return r; 70 71 src_addr = num_dw * 4; 72 src_addr += job->ibs[0].gpu_addr; 73 74 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 75 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 76 dst_addr, num_bytes, false); 77 78 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 79 WARN_ON(job->ibs[0].length_dw > num_dw); 80 81 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 82 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED; 83 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO)) 84 pte_flags |= AMDGPU_PTE_WRITEABLE; 85 pte_flags |= adev->gart.gart_pte_flags; 86 87 cpu_addr = &job->ibs[0].ptr[num_dw]; 88 89 r = amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); 90 if (r) 91 goto error_free; 92 93 r = amdgpu_job_submit(job, &adev->mman.entity, 94 AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 95 if (r) 96 goto error_free; 97 98 dma_fence_put(fence); 99 100 return r; 101 102 error_free: 103 amdgpu_job_free(job); 104 return r; 105 } 106 107 /** 108 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram 109 * 110 * @adev: amdgpu device the sdma ring running 111 * @src: source page address array 112 * @dst: destination page address array 113 * @npages: number of pages to copy 114 * @direction: enum MIGRATION_COPY_DIR 115 * @mfence: output, sdma fence to signal after sdma is done 116 * 117 * ram address uses GART table continuous entries mapping to ram pages, 118 * vram address uses direct mapping of vram pages, which must have npages 119 * number of continuous pages. 120 * GART update and sdma uses same buf copy function ring, sdma is splited to 121 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for 122 * the last sdma finish fence which is returned to check copy memory is done. 123 * 124 * Context: Process context, takes and releases gtt_window_lock 125 * 126 * Return: 127 * 0 - OK, otherwise error code 128 */ 129 130 static int 131 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, 132 uint64_t *vram, uint64_t npages, 133 enum MIGRATION_COPY_DIR direction, 134 struct dma_fence **mfence) 135 { 136 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; 137 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 138 uint64_t gart_s, gart_d; 139 struct dma_fence *next; 140 uint64_t size; 141 int r; 142 143 mutex_lock(&adev->mman.gtt_window_lock); 144 145 while (npages) { 146 size = min(GTT_MAX_PAGES, npages); 147 148 if (direction == FROM_VRAM_TO_RAM) { 149 gart_s = svm_migrate_direct_mapping_addr(adev, *vram); 150 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0); 151 152 } else if (direction == FROM_RAM_TO_VRAM) { 153 r = svm_migrate_gart_map(ring, size, sys, &gart_s, 154 KFD_IOCTL_SVM_FLAG_GPU_RO); 155 gart_d = svm_migrate_direct_mapping_addr(adev, *vram); 156 } 157 if (r) { 158 dev_err(adev->dev, "fail %d create gart mapping\n", r); 159 goto out_unlock; 160 } 161 162 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, 163 NULL, &next, false, true, false); 164 if (r) { 165 dev_err(adev->dev, "fail %d to copy memory\n", r); 166 goto out_unlock; 167 } 168 169 dma_fence_put(*mfence); 170 *mfence = next; 171 npages -= size; 172 if (npages) { 173 sys += size; 174 vram += size; 175 } 176 } 177 178 out_unlock: 179 mutex_unlock(&adev->mman.gtt_window_lock); 180 181 return r; 182 } 183 184 /** 185 * svm_migrate_copy_done - wait for memory copy sdma is done 186 * 187 * @adev: amdgpu device the sdma memory copy is executing on 188 * @mfence: migrate fence 189 * 190 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma 191 * operations, this is the last sdma operation fence. 192 * 193 * Context: called after svm_migrate_copy_memory 194 * 195 * Return: 196 * 0 - success 197 * otherwise - error code from dma fence signal 198 */ 199 static int 200 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) 201 { 202 int r = 0; 203 204 if (mfence) { 205 r = dma_fence_wait(mfence, false); 206 dma_fence_put(mfence); 207 pr_debug("sdma copy memory fence done\n"); 208 } 209 210 return r; 211 } 212 213 unsigned long 214 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) 215 { 216 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; 217 } 218 219 static void 220 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) 221 { 222 struct page *page; 223 224 page = pfn_to_page(pfn); 225 svm_range_bo_ref(prange->svm_bo); 226 page->zone_device_data = prange->svm_bo; 227 get_page(page); 228 lock_page(page); 229 } 230 231 static void 232 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr) 233 { 234 struct page *page; 235 236 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr)); 237 unlock_page(page); 238 put_page(page); 239 } 240 241 static unsigned long 242 svm_migrate_addr(struct amdgpu_device *adev, struct page *page) 243 { 244 unsigned long addr; 245 246 addr = page_to_pfn(page) << PAGE_SHIFT; 247 return (addr - adev->kfd.dev->pgmap.range.start); 248 } 249 250 static struct page * 251 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr) 252 { 253 struct page *page; 254 255 page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 256 if (page) 257 lock_page(page); 258 259 return page; 260 } 261 262 static void svm_migrate_put_sys_page(unsigned long addr) 263 { 264 struct page *page; 265 266 page = pfn_to_page(addr >> PAGE_SHIFT); 267 unlock_page(page); 268 put_page(page); 269 } 270 271 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) 272 { 273 unsigned long cpages = 0; 274 unsigned long i; 275 276 for (i = 0; i < migrate->npages; i++) { 277 if (migrate->src[i] & MIGRATE_PFN_VALID && 278 migrate->src[i] & MIGRATE_PFN_MIGRATE) 279 cpages++; 280 } 281 return cpages; 282 } 283 284 static int 285 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 286 struct migrate_vma *migrate, struct dma_fence **mfence, 287 dma_addr_t *scratch) 288 { 289 uint64_t npages = migrate->cpages; 290 struct device *dev = adev->dev; 291 struct amdgpu_res_cursor cursor; 292 dma_addr_t *src; 293 uint64_t *dst; 294 uint64_t i, j; 295 int r; 296 297 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 298 prange->last); 299 300 src = scratch; 301 dst = (uint64_t *)(scratch + npages); 302 303 r = svm_range_vram_node_new(adev, prange, true); 304 if (r) { 305 dev_err(adev->dev, "fail %d to alloc vram\n", r); 306 goto out; 307 } 308 309 amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT, 310 npages << PAGE_SHIFT, &cursor); 311 for (i = j = 0; i < npages; i++) { 312 struct page *spage; 313 314 spage = migrate_pfn_to_page(migrate->src[i]); 315 if (spage && !is_zone_device_page(spage)) { 316 dst[i] = cursor.start + (j << PAGE_SHIFT); 317 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); 318 svm_migrate_get_vram_page(prange, migrate->dst[i]); 319 migrate->dst[i] = migrate_pfn(migrate->dst[i]); 320 migrate->dst[i] |= MIGRATE_PFN_LOCKED; 321 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, 322 DMA_TO_DEVICE); 323 r = dma_mapping_error(dev, src[i]); 324 if (r) { 325 dev_err(adev->dev, "fail %d dma_map_page\n", r); 326 goto out_free_vram_pages; 327 } 328 } else { 329 if (j) { 330 r = svm_migrate_copy_memory_gart( 331 adev, src + i - j, 332 dst + i - j, j, 333 FROM_RAM_TO_VRAM, 334 mfence); 335 if (r) 336 goto out_free_vram_pages; 337 amdgpu_res_next(&cursor, j << PAGE_SHIFT); 338 j = 0; 339 } else { 340 amdgpu_res_next(&cursor, PAGE_SIZE); 341 } 342 continue; 343 } 344 345 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n", 346 src[i] >> PAGE_SHIFT, page_to_pfn(spage)); 347 348 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { 349 r = svm_migrate_copy_memory_gart(adev, src + i - j, 350 dst + i - j, j + 1, 351 FROM_RAM_TO_VRAM, 352 mfence); 353 if (r) 354 goto out_free_vram_pages; 355 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE); 356 j= 0; 357 } else { 358 j++; 359 } 360 } 361 362 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j, 363 FROM_RAM_TO_VRAM, mfence); 364 365 out_free_vram_pages: 366 if (r) { 367 pr_debug("failed %d to copy memory to vram\n", r); 368 while (i--) { 369 svm_migrate_put_vram_page(adev, dst[i]); 370 migrate->dst[i] = 0; 371 } 372 } 373 374 #ifdef DEBUG_FORCE_MIXED_DOMAINS 375 for (i = 0, j = 0; i < npages; i += 4, j++) { 376 if (j & 1) 377 continue; 378 svm_migrate_put_vram_page(adev, dst[i]); 379 migrate->dst[i] = 0; 380 svm_migrate_put_vram_page(adev, dst[i + 1]); 381 migrate->dst[i + 1] = 0; 382 svm_migrate_put_vram_page(adev, dst[i + 2]); 383 migrate->dst[i + 2] = 0; 384 svm_migrate_put_vram_page(adev, dst[i + 3]); 385 migrate->dst[i + 3] = 0; 386 } 387 #endif 388 out: 389 return r; 390 } 391 392 static long 393 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 394 struct vm_area_struct *vma, uint64_t start, 395 uint64_t end) 396 { 397 uint64_t npages = (end - start) >> PAGE_SHIFT; 398 struct kfd_process_device *pdd; 399 struct dma_fence *mfence = NULL; 400 struct migrate_vma migrate; 401 unsigned long cpages = 0; 402 dma_addr_t *scratch; 403 size_t size; 404 void *buf; 405 int r = -ENOMEM; 406 407 memset(&migrate, 0, sizeof(migrate)); 408 migrate.vma = vma; 409 migrate.start = start; 410 migrate.end = end; 411 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM; 412 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 413 414 size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t); 415 size *= npages; 416 buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO); 417 if (!buf) 418 goto out; 419 420 migrate.src = buf; 421 migrate.dst = migrate.src + npages; 422 scratch = (dma_addr_t *)(migrate.dst + npages); 423 424 r = migrate_vma_setup(&migrate); 425 if (r) { 426 dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r, 427 prange->start, prange->last); 428 goto out_free; 429 } 430 431 cpages = migrate.cpages; 432 if (!cpages) { 433 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n", 434 prange->start, prange->last); 435 goto out_free; 436 } 437 if (cpages != npages) 438 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 439 cpages, npages); 440 else 441 pr_debug("0x%lx pages migrated\n", cpages); 442 443 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch); 444 migrate_vma_pages(&migrate); 445 446 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 447 svm_migrate_successful_pages(&migrate), cpages, migrate.npages); 448 449 svm_migrate_copy_done(adev, mfence); 450 migrate_vma_finalize(&migrate); 451 452 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 453 svm_range_free_dma_mappings(prange); 454 455 out_free: 456 kvfree(buf); 457 out: 458 if (!r && cpages) { 459 pdd = svm_range_get_pdd_by_adev(prange, adev); 460 if (pdd) 461 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); 462 463 return cpages; 464 } 465 return r; 466 } 467 468 /** 469 * svm_migrate_ram_to_vram - migrate svm range from system to device 470 * @prange: range structure 471 * @best_loc: the device to migrate to 472 * @mm: the process mm structure 473 * 474 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 475 * 476 * Return: 477 * 0 - OK, otherwise error code 478 */ 479 static int 480 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, 481 struct mm_struct *mm) 482 { 483 unsigned long addr, start, end; 484 struct vm_area_struct *vma; 485 struct amdgpu_device *adev; 486 unsigned long cpages = 0; 487 long r = 0; 488 489 if (prange->actual_loc == best_loc) { 490 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", 491 prange->svms, prange->start, prange->last, best_loc); 492 return 0; 493 } 494 495 adev = svm_range_get_adev_by_id(prange, best_loc); 496 if (!adev) { 497 pr_debug("failed to get device by id 0x%x\n", best_loc); 498 return -ENODEV; 499 } 500 501 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, 502 prange->start, prange->last, best_loc); 503 504 /* FIXME: workaround for page locking bug with invalid pages */ 505 svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev)); 506 507 start = prange->start << PAGE_SHIFT; 508 end = (prange->last + 1) << PAGE_SHIFT; 509 510 for (addr = start; addr < end;) { 511 unsigned long next; 512 513 vma = find_vma(mm, addr); 514 if (!vma || addr < vma->vm_start) 515 break; 516 517 next = min(vma->vm_end, end); 518 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next); 519 if (r < 0) { 520 pr_debug("failed %ld to migrate\n", r); 521 break; 522 } else { 523 cpages += r; 524 } 525 addr = next; 526 } 527 528 if (cpages) 529 prange->actual_loc = best_loc; 530 531 return r < 0 ? r : 0; 532 } 533 534 static void svm_migrate_page_free(struct page *page) 535 { 536 struct svm_range_bo *svm_bo = page->zone_device_data; 537 538 if (svm_bo) { 539 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); 540 svm_range_bo_unref(svm_bo); 541 } 542 } 543 544 static int 545 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 546 struct migrate_vma *migrate, struct dma_fence **mfence, 547 dma_addr_t *scratch, uint64_t npages) 548 { 549 struct device *dev = adev->dev; 550 uint64_t *src; 551 dma_addr_t *dst; 552 struct page *dpage; 553 uint64_t i = 0, j; 554 uint64_t addr; 555 int r = 0; 556 557 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 558 prange->last); 559 560 addr = prange->start << PAGE_SHIFT; 561 562 src = (uint64_t *)(scratch + npages); 563 dst = scratch; 564 565 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { 566 struct page *spage; 567 568 spage = migrate_pfn_to_page(migrate->src[i]); 569 if (!spage || !is_zone_device_page(spage)) { 570 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n", 571 prange->svms, prange->start, prange->last); 572 if (j) { 573 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 574 src + i - j, j, 575 FROM_VRAM_TO_RAM, 576 mfence); 577 if (r) 578 goto out_oom; 579 j = 0; 580 } 581 continue; 582 } 583 src[i] = svm_migrate_addr(adev, spage); 584 if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) { 585 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 586 src + i - j, j, 587 FROM_VRAM_TO_RAM, 588 mfence); 589 if (r) 590 goto out_oom; 591 j = 0; 592 } 593 594 dpage = svm_migrate_get_sys_page(migrate->vma, addr); 595 if (!dpage) { 596 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n", 597 prange->svms, prange->start, prange->last); 598 r = -ENOMEM; 599 goto out_oom; 600 } 601 602 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); 603 r = dma_mapping_error(dev, dst[i]); 604 if (r) { 605 dev_err(adev->dev, "fail %d dma_map_page\n", r); 606 goto out_oom; 607 } 608 609 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n", 610 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); 611 612 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); 613 migrate->dst[i] |= MIGRATE_PFN_LOCKED; 614 j++; 615 } 616 617 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, 618 FROM_VRAM_TO_RAM, mfence); 619 620 out_oom: 621 if (r) { 622 pr_debug("failed %d copy to ram\n", r); 623 while (i--) { 624 svm_migrate_put_sys_page(dst[i]); 625 migrate->dst[i] = 0; 626 } 627 } 628 629 return r; 630 } 631 632 static long 633 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 634 struct vm_area_struct *vma, uint64_t start, uint64_t end) 635 { 636 uint64_t npages = (end - start) >> PAGE_SHIFT; 637 struct kfd_process_device *pdd; 638 struct dma_fence *mfence = NULL; 639 struct migrate_vma migrate; 640 unsigned long cpages = 0; 641 dma_addr_t *scratch; 642 size_t size; 643 void *buf; 644 int r = -ENOMEM; 645 646 memset(&migrate, 0, sizeof(migrate)); 647 migrate.vma = vma; 648 migrate.start = start; 649 migrate.end = end; 650 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 651 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 652 653 size = 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t); 654 size *= npages; 655 buf = kvmalloc(size, GFP_KERNEL | __GFP_ZERO); 656 if (!buf) 657 goto out; 658 659 migrate.src = buf; 660 migrate.dst = migrate.src + npages; 661 scratch = (dma_addr_t *)(migrate.dst + npages); 662 663 r = migrate_vma_setup(&migrate); 664 if (r) { 665 dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r, 666 prange->start, prange->last); 667 goto out_free; 668 } 669 670 cpages = migrate.cpages; 671 if (!cpages) { 672 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", 673 prange->start, prange->last); 674 goto out_free; 675 } 676 if (cpages != npages) 677 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 678 cpages, npages); 679 else 680 pr_debug("0x%lx pages migrated\n", cpages); 681 682 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, 683 scratch, npages); 684 migrate_vma_pages(&migrate); 685 686 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 687 svm_migrate_successful_pages(&migrate), cpages, migrate.npages); 688 689 svm_migrate_copy_done(adev, mfence); 690 migrate_vma_finalize(&migrate); 691 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 692 693 out_free: 694 kvfree(buf); 695 out: 696 if (!r && cpages) { 697 pdd = svm_range_get_pdd_by_adev(prange, adev); 698 if (pdd) 699 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); 700 701 return cpages; 702 } 703 return r; 704 } 705 706 /** 707 * svm_migrate_vram_to_ram - migrate svm range from device to system 708 * @prange: range structure 709 * @mm: process mm, use current->mm if NULL 710 * 711 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 712 * 713 * Return: 714 * 0 - OK, otherwise error code 715 */ 716 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) 717 { 718 struct amdgpu_device *adev; 719 struct vm_area_struct *vma; 720 unsigned long addr; 721 unsigned long start; 722 unsigned long end; 723 unsigned long cpages = 0; 724 long r = 0; 725 726 if (!prange->actual_loc) { 727 pr_debug("[0x%lx 0x%lx] already migrated to ram\n", 728 prange->start, prange->last); 729 return 0; 730 } 731 732 adev = svm_range_get_adev_by_id(prange, prange->actual_loc); 733 if (!adev) { 734 pr_debug("failed to get device by id 0x%x\n", 735 prange->actual_loc); 736 return -ENODEV; 737 } 738 739 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", 740 prange->svms, prange, prange->start, prange->last, 741 prange->actual_loc); 742 743 start = prange->start << PAGE_SHIFT; 744 end = (prange->last + 1) << PAGE_SHIFT; 745 746 for (addr = start; addr < end;) { 747 unsigned long next; 748 749 vma = find_vma(mm, addr); 750 if (!vma || addr < vma->vm_start) 751 break; 752 753 next = min(vma->vm_end, end); 754 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next); 755 if (r < 0) { 756 pr_debug("failed %ld to migrate\n", r); 757 break; 758 } else { 759 cpages += r; 760 } 761 addr = next; 762 } 763 764 if (cpages) { 765 svm_range_vram_node_free(prange); 766 prange->actual_loc = 0; 767 } 768 769 return r < 0 ? r : 0; 770 } 771 772 /** 773 * svm_migrate_vram_to_vram - migrate svm range from device to device 774 * @prange: range structure 775 * @best_loc: the device to migrate to 776 * @mm: process mm, use current->mm if NULL 777 * 778 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 779 * 780 * Return: 781 * 0 - OK, otherwise error code 782 */ 783 static int 784 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, 785 struct mm_struct *mm) 786 { 787 int r; 788 789 /* 790 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip 791 * system memory as migration bridge 792 */ 793 794 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); 795 796 r = svm_migrate_vram_to_ram(prange, mm); 797 if (r) 798 return r; 799 800 return svm_migrate_ram_to_vram(prange, best_loc, mm); 801 } 802 803 int 804 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, 805 struct mm_struct *mm) 806 { 807 if (!prange->actual_loc) 808 return svm_migrate_ram_to_vram(prange, best_loc, mm); 809 else 810 return svm_migrate_vram_to_vram(prange, best_loc, mm); 811 812 } 813 814 /** 815 * svm_migrate_to_ram - CPU page fault handler 816 * @vmf: CPU vm fault vma, address 817 * 818 * Context: vm fault handler, caller holds the mmap read lock 819 * 820 * Return: 821 * 0 - OK 822 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault 823 */ 824 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) 825 { 826 unsigned long addr = vmf->address; 827 struct vm_area_struct *vma; 828 enum svm_work_list_ops op; 829 struct svm_range *parent; 830 struct svm_range *prange; 831 struct kfd_process *p; 832 struct mm_struct *mm; 833 int r = 0; 834 835 vma = vmf->vma; 836 mm = vma->vm_mm; 837 838 p = kfd_lookup_process_by_mm(vma->vm_mm); 839 if (!p) { 840 pr_debug("failed find process at fault address 0x%lx\n", addr); 841 return VM_FAULT_SIGBUS; 842 } 843 addr >>= PAGE_SHIFT; 844 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); 845 846 mutex_lock(&p->svms.lock); 847 848 prange = svm_range_from_addr(&p->svms, addr, &parent); 849 if (!prange) { 850 pr_debug("cannot find svm range at 0x%lx\n", addr); 851 r = -EFAULT; 852 goto out; 853 } 854 855 mutex_lock(&parent->migrate_mutex); 856 if (prange != parent) 857 mutex_lock_nested(&prange->migrate_mutex, 1); 858 859 if (!prange->actual_loc) 860 goto out_unlock_prange; 861 862 svm_range_lock(parent); 863 if (prange != parent) 864 mutex_lock_nested(&prange->lock, 1); 865 r = svm_range_split_by_granularity(p, mm, addr, parent, prange); 866 if (prange != parent) 867 mutex_unlock(&prange->lock); 868 svm_range_unlock(parent); 869 if (r) { 870 pr_debug("failed %d to split range by granularity\n", r); 871 goto out_unlock_prange; 872 } 873 874 r = svm_migrate_vram_to_ram(prange, mm); 875 if (r) 876 pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r, 877 prange, prange->start, prange->last); 878 879 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ 880 if (p->xnack_enabled && parent == prange) 881 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; 882 else 883 op = SVM_OP_UPDATE_RANGE_NOTIFIER; 884 svm_range_add_list_work(&p->svms, parent, mm, op); 885 schedule_deferred_list_work(&p->svms); 886 887 out_unlock_prange: 888 if (prange != parent) 889 mutex_unlock(&prange->migrate_mutex); 890 mutex_unlock(&parent->migrate_mutex); 891 out: 892 mutex_unlock(&p->svms.lock); 893 kfd_unref_process(p); 894 895 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); 896 897 return r ? VM_FAULT_SIGBUS : 0; 898 } 899 900 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { 901 .page_free = svm_migrate_page_free, 902 .migrate_to_ram = svm_migrate_to_ram, 903 }; 904 905 /* Each VRAM page uses sizeof(struct page) on system memory */ 906 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) 907 908 int svm_migrate_init(struct amdgpu_device *adev) 909 { 910 struct kfd_dev *kfddev = adev->kfd.dev; 911 struct dev_pagemap *pgmap; 912 struct resource *res; 913 unsigned long size; 914 void *r; 915 916 /* Page migration works on Vega10 or newer */ 917 if (kfddev->device_info->asic_family < CHIP_VEGA10) 918 return -EINVAL; 919 920 pgmap = &kfddev->pgmap; 921 memset(pgmap, 0, sizeof(*pgmap)); 922 923 /* TODO: register all vram to HMM for now. 924 * should remove reserved size 925 */ 926 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20); 927 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); 928 if (IS_ERR(res)) 929 return -ENOMEM; 930 931 pgmap->type = MEMORY_DEVICE_PRIVATE; 932 pgmap->nr_range = 1; 933 pgmap->range.start = res->start; 934 pgmap->range.end = res->end; 935 pgmap->ops = &svm_migrate_pgmap_ops; 936 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); 937 pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 938 939 /* Device manager releases device-specific resources, memory region and 940 * pgmap when driver disconnects from device. 941 */ 942 r = devm_memremap_pages(adev->dev, pgmap); 943 if (IS_ERR(r)) { 944 pr_err("failed to register HMM device memory\n"); 945 946 /* Disable SVM support capability */ 947 pgmap->type = 0; 948 devm_release_mem_region(adev->dev, res->start, resource_size(res)); 949 return PTR_ERR(r); 950 } 951 952 pr_debug("reserve %ldMB system memory for VRAM pages struct\n", 953 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20); 954 955 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); 956 957 pr_info("HMM registered %ldMB device memory\n", size >> 20); 958 959 return 0; 960 } 961