1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020-2021 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include <linux/types.h> 24 #include <linux/hmm.h> 25 #include <linux/dma-direction.h> 26 #include <linux/dma-mapping.h> 27 #include <linux/migrate.h> 28 #include "amdgpu_sync.h" 29 #include "amdgpu_object.h" 30 #include "amdgpu_vm.h" 31 #include "amdgpu_res_cursor.h" 32 #include "kfd_priv.h" 33 #include "kfd_svm.h" 34 #include "kfd_migrate.h" 35 #include "kfd_smi_events.h" 36 37 #ifdef dev_fmt 38 #undef dev_fmt 39 #endif 40 #define dev_fmt(fmt) "kfd_migrate: " fmt 41 42 static uint64_t 43 svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) 44 { 45 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); 46 } 47 48 static int 49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, 50 dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) 51 { 52 struct amdgpu_device *adev = ring->adev; 53 struct amdgpu_job *job; 54 unsigned int num_dw, num_bytes; 55 struct dma_fence *fence; 56 uint64_t src_addr, dst_addr; 57 uint64_t pte_flags; 58 void *cpu_addr; 59 int r; 60 61 /* use gart window 0 */ 62 *gart_addr = adev->gmc.gart_start; 63 64 num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); 65 num_bytes = npages * 8; 66 67 r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, 68 AMDGPU_FENCE_OWNER_UNDEFINED, 69 num_dw * 4 + num_bytes, 70 AMDGPU_IB_POOL_DELAYED, 71 &job); 72 if (r) 73 return r; 74 75 src_addr = num_dw * 4; 76 src_addr += job->ibs[0].gpu_addr; 77 78 dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 79 amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, 80 dst_addr, num_bytes, 0); 81 82 amdgpu_ring_pad_ib(ring, &job->ibs[0]); 83 WARN_ON(job->ibs[0].length_dw > num_dw); 84 85 pte_flags = AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE; 86 pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED; 87 if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO)) 88 pte_flags |= AMDGPU_PTE_WRITEABLE; 89 pte_flags |= adev->gart.gart_pte_flags; 90 91 cpu_addr = &job->ibs[0].ptr[num_dw]; 92 93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); 94 fence = amdgpu_job_submit(job); 95 dma_fence_put(fence); 96 97 return r; 98 } 99 100 /** 101 * svm_migrate_copy_memory_gart - sdma copy data between ram and vram 102 * 103 * @adev: amdgpu device the sdma ring running 104 * @sys: system DMA pointer to be copied 105 * @vram: vram destination DMA pointer 106 * @npages: number of pages to copy 107 * @direction: enum MIGRATION_COPY_DIR 108 * @mfence: output, sdma fence to signal after sdma is done 109 * 110 * ram address uses GART table continuous entries mapping to ram pages, 111 * vram address uses direct mapping of vram pages, which must have npages 112 * number of continuous pages. 113 * GART update and sdma uses same buf copy function ring, sdma is splited to 114 * multiple GTT_MAX_PAGES transfer, all sdma operations are serialized, wait for 115 * the last sdma finish fence which is returned to check copy memory is done. 116 * 117 * Context: Process context, takes and releases gtt_window_lock 118 * 119 * Return: 120 * 0 - OK, otherwise error code 121 */ 122 123 static int 124 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, 125 uint64_t *vram, uint64_t npages, 126 enum MIGRATION_COPY_DIR direction, 127 struct dma_fence **mfence) 128 { 129 const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; 130 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 131 uint64_t gart_s, gart_d; 132 struct dma_fence *next; 133 uint64_t size; 134 int r; 135 136 mutex_lock(&adev->mman.gtt_window_lock); 137 138 while (npages) { 139 size = min(GTT_MAX_PAGES, npages); 140 141 if (direction == FROM_VRAM_TO_RAM) { 142 gart_s = svm_migrate_direct_mapping_addr(adev, *vram); 143 r = svm_migrate_gart_map(ring, size, sys, &gart_d, 0); 144 145 } else if (direction == FROM_RAM_TO_VRAM) { 146 r = svm_migrate_gart_map(ring, size, sys, &gart_s, 147 KFD_IOCTL_SVM_FLAG_GPU_RO); 148 gart_d = svm_migrate_direct_mapping_addr(adev, *vram); 149 } 150 if (r) { 151 dev_err(adev->dev, "fail %d create gart mapping\n", r); 152 goto out_unlock; 153 } 154 155 r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE, 156 NULL, &next, false, true, 0); 157 if (r) { 158 dev_err(adev->dev, "fail %d to copy memory\n", r); 159 goto out_unlock; 160 } 161 162 dma_fence_put(*mfence); 163 *mfence = next; 164 npages -= size; 165 if (npages) { 166 sys += size; 167 vram += size; 168 } 169 } 170 171 out_unlock: 172 mutex_unlock(&adev->mman.gtt_window_lock); 173 174 return r; 175 } 176 177 /** 178 * svm_migrate_copy_done - wait for memory copy sdma is done 179 * 180 * @adev: amdgpu device the sdma memory copy is executing on 181 * @mfence: migrate fence 182 * 183 * Wait for dma fence is signaled, if the copy ssplit into multiple sdma 184 * operations, this is the last sdma operation fence. 185 * 186 * Context: called after svm_migrate_copy_memory 187 * 188 * Return: 189 * 0 - success 190 * otherwise - error code from dma fence signal 191 */ 192 static int 193 svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) 194 { 195 int r = 0; 196 197 if (mfence) { 198 r = dma_fence_wait(mfence, false); 199 dma_fence_put(mfence); 200 pr_debug("sdma copy memory fence done\n"); 201 } 202 203 return r; 204 } 205 206 unsigned long 207 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) 208 { 209 return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT; 210 } 211 212 static void 213 svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) 214 { 215 struct page *page; 216 217 page = pfn_to_page(pfn); 218 svm_range_bo_ref(prange->svm_bo); 219 page->zone_device_data = prange->svm_bo; 220 zone_device_page_init(page); 221 } 222 223 static void 224 svm_migrate_put_vram_page(struct amdgpu_device *adev, unsigned long addr) 225 { 226 struct page *page; 227 228 page = pfn_to_page(svm_migrate_addr_to_pfn(adev, addr)); 229 unlock_page(page); 230 put_page(page); 231 } 232 233 static unsigned long 234 svm_migrate_addr(struct amdgpu_device *adev, struct page *page) 235 { 236 unsigned long addr; 237 238 addr = page_to_pfn(page) << PAGE_SHIFT; 239 return (addr - adev->kfd.pgmap.range.start); 240 } 241 242 static struct page * 243 svm_migrate_get_sys_page(struct vm_area_struct *vma, unsigned long addr) 244 { 245 struct page *page; 246 247 page = alloc_page_vma(GFP_HIGHUSER, vma, addr); 248 if (page) 249 lock_page(page); 250 251 return page; 252 } 253 254 static void svm_migrate_put_sys_page(unsigned long addr) 255 { 256 struct page *page; 257 258 page = pfn_to_page(addr >> PAGE_SHIFT); 259 unlock_page(page); 260 put_page(page); 261 } 262 263 static long 264 svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, 265 struct migrate_vma *migrate, struct dma_fence **mfence, 266 dma_addr_t *scratch, uint64_t ttm_res_offset) 267 { 268 uint64_t npages = migrate->npages; 269 struct amdgpu_device *adev = node->adev; 270 struct device *dev = adev->dev; 271 struct amdgpu_res_cursor cursor; 272 long mpages; 273 dma_addr_t *src; 274 uint64_t *dst; 275 uint64_t i, j; 276 int r; 277 278 pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start, 279 prange->last, ttm_res_offset); 280 281 src = scratch; 282 dst = (uint64_t *)(scratch + npages); 283 284 amdgpu_res_first(prange->ttm_res, ttm_res_offset, 285 npages << PAGE_SHIFT, &cursor); 286 mpages = 0; 287 for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) { 288 struct page *spage; 289 290 if (migrate->src[i] & MIGRATE_PFN_MIGRATE) { 291 dst[i] = cursor.start + (j << PAGE_SHIFT); 292 migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]); 293 svm_migrate_get_vram_page(prange, migrate->dst[i]); 294 migrate->dst[i] = migrate_pfn(migrate->dst[i]); 295 mpages++; 296 } 297 spage = migrate_pfn_to_page(migrate->src[i]); 298 if (spage && !is_zone_device_page(spage)) { 299 src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, 300 DMA_BIDIRECTIONAL); 301 r = dma_mapping_error(dev, src[i]); 302 if (r) { 303 dev_err(dev, "%s: fail %d dma_map_page\n", 304 __func__, r); 305 goto out_free_vram_pages; 306 } 307 } else { 308 if (j) { 309 r = svm_migrate_copy_memory_gart( 310 adev, src + i - j, 311 dst + i - j, j, 312 FROM_RAM_TO_VRAM, 313 mfence); 314 if (r) 315 goto out_free_vram_pages; 316 amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT); 317 j = 0; 318 } else { 319 amdgpu_res_next(&cursor, PAGE_SIZE); 320 } 321 continue; 322 } 323 324 pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n", 325 src[i] >> PAGE_SHIFT, page_to_pfn(spage)); 326 327 if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) { 328 r = svm_migrate_copy_memory_gart(adev, src + i - j, 329 dst + i - j, j + 1, 330 FROM_RAM_TO_VRAM, 331 mfence); 332 if (r) 333 goto out_free_vram_pages; 334 amdgpu_res_next(&cursor, (j + 1) * PAGE_SIZE); 335 j = 0; 336 } else { 337 j++; 338 } 339 } 340 341 r = svm_migrate_copy_memory_gart(adev, src + i - j, dst + i - j, j, 342 FROM_RAM_TO_VRAM, mfence); 343 344 out_free_vram_pages: 345 if (r) { 346 pr_debug("failed %d to copy memory to vram\n", r); 347 while (i-- && mpages) { 348 if (!dst[i]) 349 continue; 350 svm_migrate_put_vram_page(adev, dst[i]); 351 migrate->dst[i] = 0; 352 mpages--; 353 } 354 mpages = r; 355 } 356 357 #ifdef DEBUG_FORCE_MIXED_DOMAINS 358 for (i = 0, j = 0; i < npages; i += 4, j++) { 359 if (j & 1) 360 continue; 361 svm_migrate_put_vram_page(adev, dst[i]); 362 migrate->dst[i] = 0; 363 svm_migrate_put_vram_page(adev, dst[i + 1]); 364 migrate->dst[i + 1] = 0; 365 svm_migrate_put_vram_page(adev, dst[i + 2]); 366 migrate->dst[i + 2] = 0; 367 svm_migrate_put_vram_page(adev, dst[i + 3]); 368 migrate->dst[i + 3] = 0; 369 } 370 #endif 371 372 return mpages; 373 } 374 375 static long 376 svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, 377 struct vm_area_struct *vma, uint64_t start, 378 uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) 379 { 380 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 381 uint64_t npages = (end - start) >> PAGE_SHIFT; 382 struct amdgpu_device *adev = node->adev; 383 struct kfd_process_device *pdd; 384 struct dma_fence *mfence = NULL; 385 struct migrate_vma migrate = { 0 }; 386 unsigned long cpages = 0; 387 long mpages = 0; 388 dma_addr_t *scratch; 389 void *buf; 390 int r = -ENOMEM; 391 392 memset(&migrate, 0, sizeof(migrate)); 393 migrate.vma = vma; 394 migrate.start = start; 395 migrate.end = end; 396 migrate.flags = MIGRATE_VMA_SELECT_SYSTEM; 397 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 398 399 buf = kvcalloc(npages, 400 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), 401 GFP_KERNEL); 402 if (!buf) 403 goto out; 404 405 migrate.src = buf; 406 migrate.dst = migrate.src + npages; 407 scratch = (dma_addr_t *)(migrate.dst + npages); 408 409 kfd_smi_event_migration_start(node, p->lead_thread->pid, 410 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 411 0, node->id, prange->prefetch_loc, 412 prange->preferred_loc, trigger); 413 414 r = migrate_vma_setup(&migrate); 415 if (r) { 416 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", 417 __func__, r, prange->start, prange->last); 418 goto out_free; 419 } 420 421 cpages = migrate.cpages; 422 if (!cpages) { 423 pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n", 424 prange->start, prange->last); 425 goto out_free; 426 } 427 if (cpages != npages) 428 pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", 429 cpages, npages); 430 else 431 pr_debug("0x%lx pages collected\n", cpages); 432 433 mpages = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); 434 migrate_vma_pages(&migrate); 435 436 svm_migrate_copy_done(adev, mfence); 437 migrate_vma_finalize(&migrate); 438 439 if (mpages >= 0) 440 pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", 441 mpages, cpages, migrate.npages); 442 else 443 r = mpages; 444 445 svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); 446 447 out_free: 448 kvfree(buf); 449 kfd_smi_event_migration_end(node, p->lead_thread->pid, 450 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 451 0, node->id, trigger, r); 452 out: 453 if (!r && mpages > 0) { 454 pdd = svm_range_get_pdd_by_node(prange, node); 455 if (pdd) 456 WRITE_ONCE(pdd->page_in, pdd->page_in + mpages); 457 } 458 459 return r ? r : mpages; 460 } 461 462 /** 463 * svm_migrate_ram_to_vram - migrate svm range from system to device 464 * @prange: range structure 465 * @best_loc: the device to migrate to 466 * @start_mgr: start page to migrate 467 * @last_mgr: last page to migrate 468 * @mm: the process mm structure 469 * @trigger: reason of migration 470 * 471 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 472 * 473 * Return: 474 * 0 - OK, otherwise error code 475 */ 476 static int 477 svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, 478 unsigned long start_mgr, unsigned long last_mgr, 479 struct mm_struct *mm, uint32_t trigger) 480 { 481 unsigned long addr, start, end; 482 struct vm_area_struct *vma; 483 uint64_t ttm_res_offset; 484 struct kfd_node *node; 485 unsigned long mpages = 0; 486 long r = 0; 487 488 if (start_mgr < prange->start || last_mgr > prange->last) { 489 pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", 490 start_mgr, last_mgr, prange->start, prange->last); 491 return -EFAULT; 492 } 493 494 node = svm_range_get_node_by_id(prange, best_loc); 495 if (!node) { 496 pr_debug("failed to get kfd node by id 0x%x\n", best_loc); 497 return -ENODEV; 498 } 499 500 pr_debug("svms 0x%p [0x%lx 0x%lx] in [0x%lx 0x%lx] to gpu 0x%x\n", 501 prange->svms, start_mgr, last_mgr, prange->start, prange->last, 502 best_loc); 503 504 start = start_mgr << PAGE_SHIFT; 505 end = (last_mgr + 1) << PAGE_SHIFT; 506 507 r = amdgpu_amdkfd_reserve_mem_limit(node->adev, 508 prange->npages * PAGE_SIZE, 509 KFD_IOC_ALLOC_MEM_FLAGS_VRAM, 510 node->xcp ? node->xcp->id : 0); 511 if (r) { 512 dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r); 513 return -ENOSPC; 514 } 515 516 r = svm_range_vram_node_new(node, prange, true); 517 if (r) { 518 dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r); 519 goto out; 520 } 521 ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT; 522 523 for (addr = start; addr < end;) { 524 unsigned long next; 525 526 vma = vma_lookup(mm, addr); 527 if (!vma) 528 break; 529 530 next = min(vma->vm_end, end); 531 r = svm_migrate_vma_to_vram(node, prange, vma, addr, next, trigger, ttm_res_offset); 532 if (r < 0) { 533 pr_debug("failed %ld to migrate\n", r); 534 break; 535 } else { 536 mpages += r; 537 } 538 ttm_res_offset += next - addr; 539 addr = next; 540 } 541 542 if (mpages) { 543 prange->actual_loc = best_loc; 544 prange->vram_pages += mpages; 545 } else if (!prange->actual_loc) { 546 /* if no page migrated and all pages from prange are at 547 * sys ram drop svm_bo got from svm_range_vram_node_new 548 */ 549 svm_range_vram_node_free(prange); 550 } 551 552 out: 553 amdgpu_amdkfd_unreserve_mem_limit(node->adev, 554 prange->npages * PAGE_SIZE, 555 KFD_IOC_ALLOC_MEM_FLAGS_VRAM, 556 node->xcp ? node->xcp->id : 0); 557 return r < 0 ? r : 0; 558 } 559 560 static void svm_migrate_page_free(struct page *page) 561 { 562 struct svm_range_bo *svm_bo = page->zone_device_data; 563 564 if (svm_bo) { 565 pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); 566 svm_range_bo_unref_async(svm_bo); 567 } 568 } 569 570 static long 571 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 572 struct migrate_vma *migrate, struct dma_fence **mfence, 573 dma_addr_t *scratch, uint64_t npages) 574 { 575 struct device *dev = adev->dev; 576 uint64_t *src; 577 dma_addr_t *dst; 578 struct page *dpage; 579 long mpages; 580 uint64_t i = 0, j; 581 uint64_t addr; 582 int r = 0; 583 584 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, 585 prange->last); 586 587 addr = migrate->start; 588 589 src = (uint64_t *)(scratch + npages); 590 dst = scratch; 591 592 mpages = 0; 593 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { 594 struct page *spage; 595 596 spage = migrate_pfn_to_page(migrate->src[i]); 597 if (!spage || !is_zone_device_page(spage)) { 598 pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n", 599 prange->svms, prange->start, prange->last); 600 if (j) { 601 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 602 src + i - j, j, 603 FROM_VRAM_TO_RAM, 604 mfence); 605 if (r) 606 goto out_oom; 607 j = 0; 608 } 609 continue; 610 } 611 src[i] = svm_migrate_addr(adev, spage); 612 if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) { 613 r = svm_migrate_copy_memory_gart(adev, dst + i - j, 614 src + i - j, j, 615 FROM_VRAM_TO_RAM, 616 mfence); 617 if (r) 618 goto out_oom; 619 j = 0; 620 } 621 622 dpage = svm_migrate_get_sys_page(migrate->vma, addr); 623 if (!dpage) { 624 pr_debug("failed get page svms 0x%p [0x%lx 0x%lx]\n", 625 prange->svms, prange->start, prange->last); 626 r = -ENOMEM; 627 goto out_oom; 628 } 629 630 dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 631 r = dma_mapping_error(dev, dst[i]); 632 if (r) { 633 dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r); 634 goto out_oom; 635 } 636 637 pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n", 638 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); 639 640 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); 641 mpages++; 642 j++; 643 } 644 645 r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, 646 FROM_VRAM_TO_RAM, mfence); 647 648 out_oom: 649 if (r) { 650 pr_debug("failed %d copy to ram\n", r); 651 while (i-- && mpages) { 652 if (!migrate->dst[i]) 653 continue; 654 svm_migrate_put_sys_page(dst[i]); 655 migrate->dst[i] = 0; 656 mpages--; 657 } 658 mpages = r; 659 } 660 661 return mpages; 662 } 663 664 /** 665 * svm_migrate_vma_to_ram - migrate range inside one vma from device to system 666 * 667 * @prange: svm range structure 668 * @vma: vm_area_struct that range [start, end] belongs to 669 * @start: range start virtual address in pages 670 * @end: range end virtual address in pages 671 * @node: kfd node device to migrate from 672 * @trigger: reason of migration 673 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback 674 * 675 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex 676 * 677 * Return: 678 * negative values - indicate error 679 * positive values or zero - number of pages got migrated 680 */ 681 static long 682 svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, 683 struct vm_area_struct *vma, uint64_t start, uint64_t end, 684 uint32_t trigger, struct page *fault_page) 685 { 686 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 687 uint64_t npages = (end - start) >> PAGE_SHIFT; 688 unsigned long cpages = 0; 689 long mpages = 0; 690 struct amdgpu_device *adev = node->adev; 691 struct kfd_process_device *pdd; 692 struct dma_fence *mfence = NULL; 693 struct migrate_vma migrate = { 0 }; 694 dma_addr_t *scratch; 695 void *buf; 696 int r = -ENOMEM; 697 698 memset(&migrate, 0, sizeof(migrate)); 699 migrate.vma = vma; 700 migrate.start = start; 701 migrate.end = end; 702 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 703 if (adev->gmc.xgmi.connected_to_cpu) 704 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_COHERENT; 705 else 706 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 707 708 buf = kvcalloc(npages, 709 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), 710 GFP_KERNEL); 711 if (!buf) 712 goto out; 713 714 migrate.src = buf; 715 migrate.dst = migrate.src + npages; 716 migrate.fault_page = fault_page; 717 scratch = (dma_addr_t *)(migrate.dst + npages); 718 719 kfd_smi_event_migration_start(node, p->lead_thread->pid, 720 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 721 node->id, 0, prange->prefetch_loc, 722 prange->preferred_loc, trigger); 723 724 r = migrate_vma_setup(&migrate); 725 if (r) { 726 dev_err(adev->dev, "%s: vma setup fail %d range [0x%lx 0x%lx]\n", 727 __func__, r, prange->start, prange->last); 728 goto out_free; 729 } 730 731 cpages = migrate.cpages; 732 if (!cpages) { 733 pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n", 734 prange->start, prange->last); 735 goto out_free; 736 } 737 if (cpages != npages) 738 pr_debug("partial migration, 0x%lx/0x%llx pages collected\n", 739 cpages, npages); 740 else 741 pr_debug("0x%lx pages collected\n", cpages); 742 743 mpages = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, 744 scratch, npages); 745 migrate_vma_pages(&migrate); 746 747 if (mpages >= 0) 748 pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", 749 mpages, cpages, migrate.npages); 750 else 751 r = mpages; 752 753 svm_migrate_copy_done(adev, mfence); 754 migrate_vma_finalize(&migrate); 755 756 svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); 757 758 out_free: 759 kvfree(buf); 760 kfd_smi_event_migration_end(node, p->lead_thread->pid, 761 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 762 node->id, 0, trigger, r); 763 out: 764 if (!r && mpages > 0) { 765 pdd = svm_range_get_pdd_by_node(prange, node); 766 if (pdd) 767 WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); 768 } 769 770 return r ? r : mpages; 771 } 772 773 /** 774 * svm_migrate_vram_to_ram - migrate svm range from device to system 775 * @prange: range structure 776 * @mm: process mm, use current->mm if NULL 777 * @start_mgr: start page need be migrated to sys ram 778 * @last_mgr: last page need be migrated to sys ram 779 * @trigger: reason of migration 780 * @fault_page: is from vmf->page, svm_migrate_to_ram(), this is CPU page fault callback 781 * 782 * Context: Process context, caller hold mmap read lock, prange->migrate_mutex 783 * 784 * Return: 785 * 0 - OK, otherwise error code 786 */ 787 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, 788 unsigned long start_mgr, unsigned long last_mgr, 789 uint32_t trigger, struct page *fault_page) 790 { 791 struct kfd_node *node; 792 struct vm_area_struct *vma; 793 unsigned long addr; 794 unsigned long start; 795 unsigned long end; 796 unsigned long mpages = 0; 797 long r = 0; 798 799 /* this pragne has no any vram page to migrate to sys ram */ 800 if (!prange->actual_loc) { 801 pr_debug("[0x%lx 0x%lx] already migrated to ram\n", 802 prange->start, prange->last); 803 return 0; 804 } 805 806 if (start_mgr < prange->start || last_mgr > prange->last) { 807 pr_debug("range [0x%lx 0x%lx] out prange [0x%lx 0x%lx]\n", 808 start_mgr, last_mgr, prange->start, prange->last); 809 return -EFAULT; 810 } 811 812 node = svm_range_get_node_by_id(prange, prange->actual_loc); 813 if (!node) { 814 pr_debug("failed to get kfd node by id 0x%x\n", prange->actual_loc); 815 return -ENODEV; 816 } 817 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] from gpu 0x%x to ram\n", 818 prange->svms, prange, start_mgr, last_mgr, 819 prange->actual_loc); 820 821 start = start_mgr << PAGE_SHIFT; 822 end = (last_mgr + 1) << PAGE_SHIFT; 823 824 for (addr = start; addr < end;) { 825 unsigned long next; 826 827 vma = vma_lookup(mm, addr); 828 if (!vma) { 829 pr_debug("failed to find vma for prange %p\n", prange); 830 r = -EFAULT; 831 break; 832 } 833 834 next = min(vma->vm_end, end); 835 r = svm_migrate_vma_to_ram(node, prange, vma, addr, next, trigger, 836 fault_page); 837 if (r < 0) { 838 pr_debug("failed %ld to migrate prange %p\n", r, prange); 839 break; 840 } else { 841 mpages += r; 842 } 843 addr = next; 844 } 845 846 if (r >= 0) { 847 WARN_ONCE(prange->vram_pages < mpages, 848 "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).", 849 prange->vram_pages, mpages); 850 prange->vram_pages -= mpages; 851 852 /* prange does not have vram page set its actual_loc to system 853 * and drop its svm_bo ref 854 */ 855 if (prange->vram_pages == 0 && prange->ttm_res) { 856 prange->actual_loc = 0; 857 svm_range_vram_node_free(prange); 858 } 859 } 860 861 return r < 0 ? r : 0; 862 } 863 864 /** 865 * svm_migrate_vram_to_vram - migrate svm range from device to device 866 * @prange: range structure 867 * @best_loc: the device to migrate to 868 * @start: start page need be migrated to sys ram 869 * @last: last page need be migrated to sys ram 870 * @mm: process mm, use current->mm if NULL 871 * @trigger: reason of migration 872 * 873 * Context: Process context, caller hold mmap read lock, svms lock, prange lock 874 * 875 * migrate all vram pages in prange to sys ram, then migrate 876 * [start, last] pages from sys ram to gpu node best_loc. 877 * 878 * Return: 879 * 0 - OK, otherwise error code 880 */ 881 static int 882 svm_migrate_vram_to_vram(struct svm_range *prange, uint32_t best_loc, 883 unsigned long start, unsigned long last, 884 struct mm_struct *mm, uint32_t trigger) 885 { 886 int r, retries = 3; 887 888 /* 889 * TODO: for both devices with PCIe large bar or on same xgmi hive, skip 890 * system memory as migration bridge 891 */ 892 893 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); 894 895 do { 896 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, 897 trigger, NULL); 898 if (r) 899 return r; 900 } while (prange->actual_loc && --retries); 901 902 if (prange->actual_loc) 903 return -EDEADLK; 904 905 return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger); 906 } 907 908 int 909 svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, 910 unsigned long start, unsigned long last, 911 struct mm_struct *mm, uint32_t trigger) 912 { 913 if (!prange->actual_loc || prange->actual_loc == best_loc) 914 return svm_migrate_ram_to_vram(prange, best_loc, start, last, 915 mm, trigger); 916 917 else 918 return svm_migrate_vram_to_vram(prange, best_loc, start, last, 919 mm, trigger); 920 921 } 922 923 /** 924 * svm_migrate_to_ram - CPU page fault handler 925 * @vmf: CPU vm fault vma, address 926 * 927 * Context: vm fault handler, caller holds the mmap read lock 928 * 929 * Return: 930 * 0 - OK 931 * VM_FAULT_SIGBUS - notice application to have SIGBUS page fault 932 */ 933 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf) 934 { 935 unsigned long start, last, size; 936 unsigned long addr = vmf->address; 937 struct svm_range_bo *svm_bo; 938 struct svm_range *prange; 939 struct kfd_process *p; 940 struct mm_struct *mm; 941 int r = 0; 942 943 svm_bo = vmf->page->zone_device_data; 944 if (!svm_bo) { 945 pr_debug("failed get device page at addr 0x%lx\n", addr); 946 return VM_FAULT_SIGBUS; 947 } 948 if (!mmget_not_zero(svm_bo->eviction_fence->mm)) { 949 pr_debug("addr 0x%lx of process mm is destroyed\n", addr); 950 return VM_FAULT_SIGBUS; 951 } 952 953 mm = svm_bo->eviction_fence->mm; 954 if (mm != vmf->vma->vm_mm) 955 pr_debug("addr 0x%lx is COW mapping in child process\n", addr); 956 957 p = kfd_lookup_process_by_mm(mm); 958 if (!p) { 959 pr_debug("failed find process at fault address 0x%lx\n", addr); 960 r = VM_FAULT_SIGBUS; 961 goto out_mmput; 962 } 963 if (READ_ONCE(p->svms.faulting_task) == current) { 964 pr_debug("skipping ram migration\n"); 965 r = 0; 966 goto out_unref_process; 967 } 968 969 pr_debug("CPU page fault svms 0x%p address 0x%lx\n", &p->svms, addr); 970 addr >>= PAGE_SHIFT; 971 972 mutex_lock(&p->svms.lock); 973 974 prange = svm_range_from_addr(&p->svms, addr, NULL); 975 if (!prange) { 976 pr_debug("failed get range svms 0x%p addr 0x%lx\n", &p->svms, addr); 977 r = -EFAULT; 978 goto out_unlock_svms; 979 } 980 981 mutex_lock(&prange->migrate_mutex); 982 983 if (!prange->actual_loc) 984 goto out_unlock_prange; 985 986 /* Align migration range start and size to granularity size */ 987 size = 1UL << prange->granularity; 988 start = max(ALIGN_DOWN(addr, size), prange->start); 989 last = min(ALIGN(addr + 1, size) - 1, prange->last); 990 991 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, start, last, 992 KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, vmf->page); 993 if (r) 994 pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", 995 r, prange->svms, prange, start, last); 996 997 out_unlock_prange: 998 mutex_unlock(&prange->migrate_mutex); 999 out_unlock_svms: 1000 mutex_unlock(&p->svms.lock); 1001 out_unref_process: 1002 pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); 1003 kfd_unref_process(p); 1004 out_mmput: 1005 mmput(mm); 1006 return r ? VM_FAULT_SIGBUS : 0; 1007 } 1008 1009 static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { 1010 .page_free = svm_migrate_page_free, 1011 .migrate_to_ram = svm_migrate_to_ram, 1012 }; 1013 1014 /* Each VRAM page uses sizeof(struct page) on system memory */ 1015 #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) 1016 1017 int kgd2kfd_init_zone_device(struct amdgpu_device *adev) 1018 { 1019 struct amdgpu_kfd_dev *kfddev = &adev->kfd; 1020 struct dev_pagemap *pgmap; 1021 struct resource *res = NULL; 1022 unsigned long size; 1023 void *r; 1024 1025 /* Page migration works on gfx9 or newer */ 1026 if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1)) 1027 return -EINVAL; 1028 1029 if (adev->apu_prefer_gtt) 1030 return 0; 1031 1032 pgmap = &kfddev->pgmap; 1033 memset(pgmap, 0, sizeof(*pgmap)); 1034 1035 /* TODO: register all vram to HMM for now. 1036 * should remove reserved size 1037 */ 1038 size = ALIGN(adev->gmc.real_vram_size, 2ULL << 20); 1039 if (adev->gmc.xgmi.connected_to_cpu) { 1040 pgmap->range.start = adev->gmc.aper_base; 1041 pgmap->range.end = adev->gmc.aper_base + adev->gmc.aper_size - 1; 1042 pgmap->type = MEMORY_DEVICE_COHERENT; 1043 } else { 1044 res = devm_request_free_mem_region(adev->dev, &iomem_resource, size); 1045 if (IS_ERR(res)) 1046 return PTR_ERR(res); 1047 pgmap->range.start = res->start; 1048 pgmap->range.end = res->end; 1049 pgmap->type = MEMORY_DEVICE_PRIVATE; 1050 } 1051 1052 pgmap->nr_range = 1; 1053 pgmap->ops = &svm_migrate_pgmap_ops; 1054 pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev); 1055 pgmap->flags = 0; 1056 /* Device manager releases device-specific resources, memory region and 1057 * pgmap when driver disconnects from device. 1058 */ 1059 r = devm_memremap_pages(adev->dev, pgmap); 1060 if (IS_ERR(r)) { 1061 pr_err("failed to register HMM device memory\n"); 1062 if (pgmap->type == MEMORY_DEVICE_PRIVATE) 1063 devm_release_mem_region(adev->dev, res->start, resource_size(res)); 1064 /* Disable SVM support capability */ 1065 pgmap->type = 0; 1066 return PTR_ERR(r); 1067 } 1068 1069 pr_debug("reserve %ldMB system memory for VRAM pages struct\n", 1070 SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20); 1071 1072 amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); 1073 1074 pr_info("HMM registered %ldMB device memory\n", size >> 20); 1075 1076 return 0; 1077 } 1078