1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020-2021 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include <linux/types.h> 24 #include <linux/hmm.h> 25 #include <linux/dma-direction.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/migrate.h> 28 #include "amdgpu_sync.h" 29 #include "amdgpu_object.h" 30 #include "amdgpu_vm.h" 31 #include "amdgpu_mn.h" 32 #include "amdgpu_res_cursor.h" 33 #include "kfd_priv.h" 34 #include "kfd_svm.h" 35 #include "kfd_migrate.h" 36 37 #ifdef dev_fmt 38 #undef dev_fmt 39 #endif 40 #define dev_fmt(fmt) "kfd_migrate: " fmt 41 42 static uint64_t 43 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) 44 { 45 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); 46 } 47 48 static int 49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, 50 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) 51 { 52 struct amdgpu_device *adev = ring->adev; 53 struct amdgpu_job *job; 54 unsigned int num_dw, num_bytes; 55 struct dma_fence *fence; 56 uint64_t src_addr, dst_addr; 57 uint64_t pte_flags; 58 void *cpu_addr; 59 int r; 60 61 /* use gart window 0 */ 62 *gart_addr = adev->gmc.gart_start; 63 64 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 65 num_bytes = npages * 8; 66 67 r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, 68 AMDGPU_IB_POOL_DELAYED, &job); 69 if (r) 70 return r; 71 72 src_addr = num_dw * 4; 73 src_addr += job->ibs[0].gpu_addr; 74 75 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 76 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 77 dst_addr, num_bytes, false); 78 79 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 80 WARN_ON(job->ibs[0].length_dw > num_dw); 81 82 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 83 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED; 84 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO)) 85 pte_flags |= AMDGPU_PTE_WRITEABLE; 86 pte_flags |= adev->gart.gart_pte_flags; 87 88 cpu_addr = &job->ibs[0].ptr[num_dw]; 89 90 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); 91 r = amdgpu_job_submit(job, &adev->mman.entity, 92 AMDGPU_FENCE_OWNER_UNDEFINED, &fence); 93 if (r) 94 goto error_free; 95 96 dma_fence_put(fence); 97 98 return r; 99 100 error_free: 101 amdgpu_job_free(job); 102 return r; 103 } 104 105 /** 106 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram 107 * 108 * @adev: amdgpu device the sdma ring running 109 * @sys: system DMA pointer to be copied 110 * @vram: vram destination DMA pointer 111 * @npages: number of pages to copy 112 * @direction: enum MIGRATION_COPY_DIR 113 * @mfence: output, sdma fence to signal after sdma is done 114 * 115 * ram address uses GART table continuous entries mapping to ram pages, 116 * vram address uses direct mapping of vram pages, which must have npages 117 * number of continuous pages. 118 * GART update and sdma uses same buf copy function ring, sdma is splited to 119 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for 120 * the last sdma finish fence which is returned to check copy memory is done. 121 * 122 * Context: Process context, takes and releases gtt_window_lock 123 * 124 * Return: 125 * 0 - OK, otherwise error code 126 */ 127 128 static int 129 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, 130 uint64_t *vram, uint64_t npages, 131 enum MIGRATION_COPY_DIR direction, 132 struct dma_fence **mfence) 133 { 134 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; 135 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 136 uint64_t gart_s, gart_d; 137 struct dma_fence *next; 138 uint64_t size; 139 int r; 140 141 mutex_lock(&adev->mman.gtt_window_lock); 142 143 while (npages) { 144 size = min(GTT_MAX_PAGES, npages); 145 146 if (direction == FROM_VRAM_TO_RAM) { 147 gart_s = svm_migrate_direct_mapping_addr(adev, *vram); 148 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0); 149 150 } else if (direction == FROM_RAM_TO_VRAM) { 151 r = svm_migrate_gart_map(ring, size, sys, &gart_s, 152 KFD_IOCTL_SVM_FLAG_GPU_RO); 153 gart_d = svm_migrate_direct_mapping_addr(adev, *vram); 154 } 155 if (r) { 156 dev_err(adev->dev, "fail %d create gart mapping\n", r); 157 goto out_unlock; 158 } 159 160 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, 161 NULL, &next, false, true, false); 162 if (r) { 163 dev_err(adev->dev, "fail %d to copy memory\n", r); 164 goto out_unlock; 165 } 166 167 dma_fence_put(*mfence); 168 *mfence = next; 169 npages -= size; 170 if (npages) { 171 sys += size; 172 vram += size; 173 } 174 } 175 176 out_unlock: 177 mutex_unlock(&adev->mman.gtt_window_lock); 178 179 return r; 180 } 181 182 /** 183 * svm_migrate_copy_done - wait for memory copy sdma is done 184 * 185 * @adev: amdgpu device the sdma memory copy is executing on 186 * @mfence: migrate fence 187 * 188 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma 189 * operations, this is the last sdma operation fence. 190 * 191 * Context: called after svm_migrate_copy_memory 192 * 193 * Return: 194 * 0 - success 195 * otherwise - error code from dma fence signal 196 */ 197 static int 198 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) 199 { 200 int r = 0; 201 202 if (mfence) { 203 r = dma_fence_wait(mfence, false); 204 dma_fence_put(mfence); 205 pr_debug("sdma copy memory fence done\n"); 206 } 207 208 return r; 209 } 210 211 unsigned long 212 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) 213 { 214 return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; 215 } 216 217 static void 218 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) 219 { 220 struct page *page; 221 222 page = pfn_to_page(pfn); 223 svm_range_bo_ref(prange->svm_bo); 224 page->zone_device_data = prange->svm_bo; 225 lock_page(page); 226 } 227 228 static void 229 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr) 230 { 231 struct page *page; 232 233 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr)); 234 unlock_page(page); 235 put_page(page); 236 } 237 238 static unsigned long 239 svm_migrate_addr(struct amdgpu_device *adev, struct page *page) 240 { 241 unsigned long addr; 242 243 addr = page_to_pfn(page) << PAGE_SHIFT; 244 return (addr - adev->kfd.dev->pgmap.range.start); 245 } 246 247 static struct page * 248 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr) 249 { 250 struct page *page; 251 252 page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 253 if (page) 254 lock_page(page); 255 256 return page; 257 } 258 259 static void svm_migrate_put_sys_page(unsigned long addr) 260 { 261 struct page *page; 262 263 page = pfn_to_page(addr >> PAGE_SHIFT); 264 unlock_page(page); 265 put_page(page); 266 } 267 268 static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) 269 { 270 unsigned long cpages = 0; 271 unsigned long i; 272 273 for (i = 0; i < migrate->npages; i++) { 274 if (migrate->src[i] & MIGRATE_PFN_VALID && 275 migrate->src[i] & MIGRATE_PFN_MIGRATE) 276 cpages++; 277 } 278 return cpages; 279 } 280 281 static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) 282 { 283 unsigned long upages = 0; 284 unsigned long i; 285 286 for (i = 0; i < migrate->npages; i++) { 287 if (migrate->src[i] & MIGRATE_PFN_VALID && 288 !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 289 upages++; 290 } 291 return upages; 292 } 293 294 static int 295 svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 296 struct migrate_vma *migrate, struct dma_fence **mfence, 297 dma_addr_t *scratch) 298 { 299 uint64_t npages = migrate->npages; 300 struct device *dev = adev->dev; 301 struct amdgpu_res_cursor cursor; 302 dma_addr_t *src; 303 uint64_t *dst; 304 uint64_t i, j; 305 int r; 306 307 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 308 prange->last); 309 310 src = scratch; 311 dst = (uint64_t *)(scratch + npages); 312 313 r = svm_range_vram_node_new(adev, prange, true); 314 if (r) { 315 dev_dbg(adev->dev, "fail %d to alloc vram\n", r); 316 goto out; 317 } 318 319 amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT, 320 npages << PAGE_SHIFT, &cursor); 321 for (i = j = 0; i < npages; i++) { 322 struct page *spage; 323 324 spage = migrate_pfn_to_page(migrate->src[i]); 325 if (spage && !is_zone_device_page(spage)) { 326 dst[i] = cursor.start + (j << PAGE_SHIFT); 327 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); 328 svm_migrate_get_vram_page(prange, migrate->dst[i]); 329 migrate->dst[i] = migrate_pfn(migrate->dst[i]); 330 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, 331 DMA_TO_DEVICE); 332 r = dma_mapping_error(dev, src[i]); 333 if (r) { 334 dev_err(adev->dev, "%s: fail %d dma_map_page\n", 335 __func__, r); 336 goto out_free_vram_pages; 337 } 338 } else { 339 if (j) { 340 r = svm_migrate_copy_memory_gart( 341 adev, src + i - j, 342 dst + i - j, j, 343 FROM_RAM_TO_VRAM, 344 mfence); 345 if (r) 346 goto out_free_vram_pages; 347 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT); 348 j = 0; 349 } else { 350 amdgpu_res_next(&cursor, PAGE_SIZE); 351 } 352 continue; 353 } 354 355 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n", 356 src[i] >> PAGE_SHIFT, page_to_pfn(spage)); 357 358 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { 359 r = svm_migrate_copy_memory_gart(adev, src + i - j, 360 dst + i - j, j + 1, 361 FROM_RAM_TO_VRAM, 362 mfence); 363 if (r) 364 goto out_free_vram_pages; 365 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE); 366 j = 0; 367 } else { 368 j++; 369 } 370 } 371 372 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j, 373 FROM_RAM_TO_VRAM, mfence); 374 375 out_free_vram_pages: 376 if (r) { 377 pr_debug("failed %d to copy memory to vram\n", r); 378 while (i--) { 379 svm_migrate_put_vram_page(adev, dst[i]); 380 migrate->dst[i] = 0; 381 } 382 } 383 384 #ifdef DEBUG_FORCE_MIXED_DOMAINS 385 for (i = 0, j = 0; i < npages; i += 4, j++) { 386 if (j & 1) 387 continue; 388 svm_migrate_put_vram_page(adev, dst[i]); 389 migrate->dst[i] = 0; 390 svm_migrate_put_vram_page(adev, dst[i + 1]); 391 migrate->dst[i + 1] = 0; 392 svm_migrate_put_vram_page(adev, dst[i + 2]); 393 migrate->dst[i + 2] = 0; 394 svm_migrate_put_vram_page(adev, dst[i + 3]); 395 migrate->dst[i + 3] = 0; 396 } 397 #endif 398 out: 399 return r; 400 } 401 402 static long 403 svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, 404 struct vm_area_struct *vma, uint64_t start, 405 uint64_t end) 406 { 407 uint64_t npages = (end - start) >> PAGE_SHIFT; 408 struct kfd_process_device *pdd; 409 struct dma_fence *mfence = NULL; 410 struct migrate_vma migrate; 411 unsigned long cpages = 0; 412 dma_addr_t *scratch; 413 void *buf; 414 int r = -ENOMEM; 415 416 memset(&migrate, 0, sizeof(migrate)); 417 migrate.vma = vma; 418 migrate.start = start; 419 migrate.end = end; 420 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM; 421 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 422 423 buf = kvcalloc(npages, 424 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), 425 GFP_KERNEL); 426 if (!buf) 427 goto out; 428 429 migrate.src = buf; 430 migrate.dst = migrate.src + npages; 431 scratch = (dma_addr_t *)(migrate.dst + npages); 432 433 r = migrate_vma_setup(&migrate); 434 if (r) { 435 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", 436 __func__, r, prange->start, prange->last); 437 goto out_free; 438 } 439 440 cpages = migrate.cpages; 441 if (!cpages) { 442 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n", 443 prange->start, prange->last); 444 goto out_free; 445 } 446 if (cpages != npages) 447 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 448 cpages, npages); 449 else 450 pr_debug("0x%lx pages migrated\n", cpages); 451 452 r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch); 453 migrate_vma_pages(&migrate); 454 455 pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 456 svm_migrate_successful_pages(&migrate), cpages, migrate.npages); 457 458 svm_migrate_copy_done(adev, mfence); 459 migrate_vma_finalize(&migrate); 460 461 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 462 svm_range_free_dma_mappings(prange); 463 464 out_free: 465 kvfree(buf); 466 out: 467 if (!r && cpages) { 468 pdd = svm_range_get_pdd_by_adev(prange, adev); 469 if (pdd) 470 WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); 471 472 return cpages; 473 } 474 return r; 475 } 476 477 /** 478 * svm_migrate_ram_to_vram - migrate svm range from system to device 479 * @prange: range structure 480 * @best_loc: the device to migrate to 481 * @mm: the process mm structure 482 * 483 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 484 * 485 * Return: 486 * 0 - OK, otherwise error code 487 */ 488 static int 489 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, 490 struct mm_struct *mm) 491 { 492 unsigned long addr, start, end; 493 struct vm_area_struct *vma; 494 struct amdgpu_device *adev; 495 unsigned long cpages = 0; 496 long r = 0; 497 498 if (prange->actual_loc == best_loc) { 499 pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n", 500 prange->svms, prange->start, prange->last, best_loc); 501 return 0; 502 } 503 504 adev = svm_range_get_adev_by_id(prange, best_loc); 505 if (!adev) { 506 pr_debug("failed to get device by id 0x%x\n", best_loc); 507 return -ENODEV; 508 } 509 510 pr_debug("svms 0x%p [0x%lx 0x%lx] to gpu 0x%x\n", prange->svms, 511 prange->start, prange->last, best_loc); 512 513 /* FIXME: workaround for page locking bug with invalid pages */ 514 svm_range_prefault(prange, mm, SVM_ADEV_PGMAP_OWNER(adev)); 515 516 start = prange->start << PAGE_SHIFT; 517 end = (prange->last + 1) << PAGE_SHIFT; 518 519 for (addr = start; addr < end;) { 520 unsigned long next; 521 522 vma = find_vma(mm, addr); 523 if (!vma || addr < vma->vm_start) 524 break; 525 526 next = min(vma->vm_end, end); 527 r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next); 528 if (r < 0) { 529 pr_debug("failed %ld to migrate\n", r); 530 break; 531 } else { 532 cpages += r; 533 } 534 addr = next; 535 } 536 537 if (cpages) 538 prange->actual_loc = best_loc; 539 540 return r < 0 ? r : 0; 541 } 542 543 static void svm_migrate_page_free(struct page *page) 544 { 545 struct svm_range_bo *svm_bo = page->zone_device_data; 546 547 if (svm_bo) { 548 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); 549 svm_range_bo_unref_async(svm_bo); 550 } 551 } 552 553 static int 554 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 555 struct migrate_vma *migrate, struct dma_fence **mfence, 556 dma_addr_t *scratch, uint64_t npages) 557 { 558 struct device *dev = adev->dev; 559 uint64_t *src; 560 dma_addr_t *dst; 561 struct page *dpage; 562 uint64_t i = 0, j; 563 uint64_t addr; 564 int r = 0; 565 566 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 567 prange->last); 568 569 addr = prange->start << PAGE_SHIFT; 570 571 src = (uint64_t *)(scratch + npages); 572 dst = scratch; 573 574 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { 575 struct page *spage; 576 577 spage = migrate_pfn_to_page(migrate->src[i]); 578 if (!spage || !is_zone_device_page(spage)) { 579 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n", 580 prange->svms, prange->start, prange->last); 581 if (j) { 582 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 583 src + i - j, j, 584 FROM_VRAM_TO_RAM, 585 mfence); 586 if (r) 587 goto out_oom; 588 j = 0; 589 } 590 continue; 591 } 592 src[i] = svm_migrate_addr(adev, spage); 593 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) { 594 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 595 src + i - j, j, 596 FROM_VRAM_TO_RAM, 597 mfence); 598 if (r) 599 goto out_oom; 600 j = 0; 601 } 602 603 dpage = svm_migrate_get_sys_page(migrate->vma, addr); 604 if (!dpage) { 605 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n", 606 prange->svms, prange->start, prange->last); 607 r = -ENOMEM; 608 goto out_oom; 609 } 610 611 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); 612 r = dma_mapping_error(dev, dst[i]); 613 if (r) { 614 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r); 615 goto out_oom; 616 } 617 618 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n", 619 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); 620 621 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); 622 j++; 623 } 624 625 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, 626 FROM_VRAM_TO_RAM, mfence); 627 628 out_oom: 629 if (r) { 630 pr_debug("failed %d copy to ram\n", r); 631 while (i--) { 632 svm_migrate_put_sys_page(dst[i]); 633 migrate->dst[i] = 0; 634 } 635 } 636 637 return r; 638 } 639 640 /** 641 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system 642 * 643 * @adev: amdgpu device to migrate from 644 * @prange: svm range structure 645 * @vma: vm_area_struct that range [start, end] belongs to 646 * @start: range start virtual address in pages 647 * @end: range end virtual address in pages 648 * 649 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex 650 * 651 * Return: 652 * 0 - success with all pages migrated 653 * negative values - indicate error 654 * positive values - partial migration, number of pages not migrated 655 */ 656 static long 657 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 658 struct vm_area_struct *vma, uint64_t start, uint64_t end) 659 { 660 uint64_t npages = (end - start) >> PAGE_SHIFT; 661 unsigned long upages = npages; 662 unsigned long cpages = 0; 663 struct kfd_process_device *pdd; 664 struct dma_fence *mfence = NULL; 665 struct migrate_vma migrate; 666 dma_addr_t *scratch; 667 void *buf; 668 int r = -ENOMEM; 669 670 memset(&migrate, 0, sizeof(migrate)); 671 migrate.vma = vma; 672 migrate.start = start; 673 migrate.end = end; 674 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 675 if (adev->gmc.xgmi.connected_to_cpu) 676 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT; 677 else 678 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 679 680 buf = kvcalloc(npages, 681 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), 682 GFP_KERNEL); 683 if (!buf) 684 goto out; 685 686 migrate.src = buf; 687 migrate.dst = migrate.src + npages; 688 scratch = (dma_addr_t *)(migrate.dst + npages); 689 690 r = migrate_vma_setup(&migrate); 691 if (r) { 692 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", 693 __func__, r, prange->start, prange->last); 694 goto out_free; 695 } 696 697 cpages = migrate.cpages; 698 if (!cpages) { 699 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", 700 prange->start, prange->last); 701 upages = svm_migrate_unsuccessful_pages(&migrate); 702 goto out_free; 703 } 704 if (cpages != npages) 705 pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n", 706 cpages, npages); 707 else 708 pr_debug("0x%lx pages migrated\n", cpages); 709 710 r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, 711 scratch, npages); 712 migrate_vma_pages(&migrate); 713 714 upages = svm_migrate_unsuccessful_pages(&migrate); 715 pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n", 716 upages, cpages, migrate.npages); 717 718 svm_migrate_copy_done(adev, mfence); 719 migrate_vma_finalize(&migrate); 720 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 721 722 out_free: 723 kvfree(buf); 724 out: 725 if (!r && cpages) { 726 pdd = svm_range_get_pdd_by_adev(prange, adev); 727 if (pdd) 728 WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); 729 } 730 return r ? r : upages; 731 } 732 733 /** 734 * svm_migrate_vram_to_ram - migrate svm range from device to system 735 * @prange: range structure 736 * @mm: process mm, use current->mm if NULL 737 * 738 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex 739 * 740 * Return: 741 * 0 - OK, otherwise error code 742 */ 743 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm) 744 { 745 struct amdgpu_device *adev; 746 struct vm_area_struct *vma; 747 unsigned long addr; 748 unsigned long start; 749 unsigned long end; 750 unsigned long upages = 0; 751 long r = 0; 752 753 if (!prange->actual_loc) { 754 pr_debug("[0x%lx 0x%lx] already migrated to ram\n", 755 prange->start, prange->last); 756 return 0; 757 } 758 759 adev = svm_range_get_adev_by_id(prange, prange->actual_loc); 760 if (!adev) { 761 pr_debug("failed to get device by id 0x%x\n", 762 prange->actual_loc); 763 return -ENODEV; 764 } 765 766 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", 767 prange->svms, prange, prange->start, prange->last, 768 prange->actual_loc); 769 770 start = prange->start << PAGE_SHIFT; 771 end = (prange->last + 1) << PAGE_SHIFT; 772 773 for (addr = start; addr < end;) { 774 unsigned long next; 775 776 vma = find_vma(mm, addr); 777 if (!vma || addr < vma->vm_start) { 778 pr_debug("failed to find vma for prange %p\n", prange); 779 r = -EFAULT; 780 break; 781 } 782 783 next = min(vma->vm_end, end); 784 r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next); 785 if (r < 0) { 786 pr_debug("failed %ld to migrate prange %p\n", r, prange); 787 break; 788 } else { 789 upages += r; 790 } 791 addr = next; 792 } 793 794 if (r >= 0 && !upages) { 795 svm_range_vram_node_free(prange); 796 prange->actual_loc = 0; 797 } 798 799 return r < 0 ? r : 0; 800 } 801 802 /** 803 * svm_migrate_vram_to_vram - migrate svm range from device to device 804 * @prange: range structure 805 * @best_loc: the device to migrate to 806 * @mm: process mm, use current->mm if NULL 807 * 808 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 809 * 810 * Return: 811 * 0 - OK, otherwise error code 812 */ 813 static int 814 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, 815 struct mm_struct *mm) 816 { 817 int r, retries = 3; 818 819 /* 820 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip 821 * system memory as migration bridge 822 */ 823 824 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); 825 826 do { 827 r = svm_migrate_vram_to_ram(prange, mm); 828 if (r) 829 return r; 830 } while (prange->actual_loc && --retries); 831 832 if (prange->actual_loc) 833 return -EDEADLK; 834 835 return svm_migrate_ram_to_vram(prange, best_loc, mm); 836 } 837 838 int 839 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, 840 struct mm_struct *mm) 841 { 842 if (!prange->actual_loc) 843 return svm_migrate_ram_to_vram(prange, best_loc, mm); 844 else 845 return svm_migrate_vram_to_vram(prange, best_loc, mm); 846 847 } 848 849 /** 850 * svm_migrate_to_ram - CPU page fault handler 851 * @vmf: CPU vm fault vma, address 852 * 853 * Context: vm fault handler, caller holds the mmap read lock 854 * 855 * Return: 856 * 0 - OK 857 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault 858 */ 859 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) 860 { 861 unsigned long addr = vmf->address; 862 struct vm_area_struct *vma; 863 enum svm_work_list_ops op; 864 struct svm_range *parent; 865 struct svm_range *prange; 866 struct kfd_process *p; 867 struct mm_struct *mm; 868 int r = 0; 869 870 vma = vmf->vma; 871 mm = vma->vm_mm; 872 873 p = kfd_lookup_process_by_mm(vma->vm_mm); 874 if (!p) { 875 pr_debug("failed find process at fault address 0x%lx\n", addr); 876 return VM_FAULT_SIGBUS; 877 } 878 if (READ_ONCE(p->svms.faulting_task) == current) { 879 pr_debug("skipping ram migration\n"); 880 kfd_unref_process(p); 881 return 0; 882 } 883 addr >>= PAGE_SHIFT; 884 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); 885 886 mutex_lock(&p->svms.lock); 887 888 prange = svm_range_from_addr(&p->svms, addr, &parent); 889 if (!prange) { 890 pr_debug("cannot find svm range at 0x%lx\n", addr); 891 r = -EFAULT; 892 goto out; 893 } 894 895 mutex_lock(&parent->migrate_mutex); 896 if (prange != parent) 897 mutex_lock_nested(&prange->migrate_mutex, 1); 898 899 if (!prange->actual_loc) 900 goto out_unlock_prange; 901 902 svm_range_lock(parent); 903 if (prange != parent) 904 mutex_lock_nested(&prange->lock, 1); 905 r = svm_range_split_by_granularity(p, mm, addr, parent, prange); 906 if (prange != parent) 907 mutex_unlock(&prange->lock); 908 svm_range_unlock(parent); 909 if (r) { 910 pr_debug("failed %d to split range by granularity\n", r); 911 goto out_unlock_prange; 912 } 913 914 r = svm_migrate_vram_to_ram(prange, mm); 915 if (r) 916 pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r, 917 prange, prange->start, prange->last); 918 919 /* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */ 920 if (p->xnack_enabled && parent == prange) 921 op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP; 922 else 923 op = SVM_OP_UPDATE_RANGE_NOTIFIER; 924 svm_range_add_list_work(&p->svms, parent, mm, op); 925 schedule_deferred_list_work(&p->svms); 926 927 out_unlock_prange: 928 if (prange != parent) 929 mutex_unlock(&prange->migrate_mutex); 930 mutex_unlock(&parent->migrate_mutex); 931 out: 932 mutex_unlock(&p->svms.lock); 933 kfd_unref_process(p); 934 935 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); 936 937 return r ? VM_FAULT_SIGBUS : 0; 938 } 939 940 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { 941 .page_free = svm_migrate_page_free, 942 .migrate_to_ram = svm_migrate_to_ram, 943 }; 944 945 /* Each VRAM page uses sizeof(struct page) on system memory */ 946 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) 947 948 int svm_migrate_init(struct amdgpu_device *adev) 949 { 950 struct kfd_dev *kfddev = adev->kfd.dev; 951 struct dev_pagemap *pgmap; 952 struct resource *res = NULL; 953 unsigned long size; 954 void *r; 955 956 /* Page migration works on Vega10 or newer */ 957 if (!KFD_IS_SOC15(kfddev)) 958 return -EINVAL; 959 960 pgmap = &kfddev->pgmap; 961 memset(pgmap, 0, sizeof(*pgmap)); 962 963 /* TODO: register all vram to HMM for now. 964 * should remove reserved size 965 */ 966 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20); 967 if (adev->gmc.xgmi.connected_to_cpu) { 968 pgmap->range.start = adev->gmc.aper_base; 969 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1; 970 pgmap->type = MEMORY_DEVICE_COHERENT; 971 } else { 972 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); 973 if (IS_ERR(res)) 974 return -ENOMEM; 975 pgmap->range.start = res->start; 976 pgmap->range.end = res->end; 977 pgmap->type = MEMORY_DEVICE_PRIVATE; 978 } 979 980 pgmap->nr_range = 1; 981 pgmap->ops = &svm_migrate_pgmap_ops; 982 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); 983 pgmap->flags = 0; 984 /* Device manager releases device-specific resources, memory region and 985 * pgmap when driver disconnects from device. 986 */ 987 r = devm_memremap_pages(adev->dev, pgmap); 988 if (IS_ERR(r)) { 989 pr_err("failed to register HMM device memory\n"); 990 /* Disable SVM support capability */ 991 pgmap->type = 0; 992 if (pgmap->type == MEMORY_DEVICE_PRIVATE) 993 devm_release_mem_region(adev->dev, res->start, 994 res->end - res->start + 1); 995 return PTR_ERR(r); 996 } 997 998 pr_debug("reserve %ldMB system memory for VRAM pages struct\n", 999 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20); 1000 1001 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); 1002 1003 pr_info("HMM registered %ldMB device memory\n", size >> 20); 1004 1005 return 0; 1006 } 1007