1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* 3 * Copyright 2020-2021 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #include <linux/types.h> 25 #include <linux/sched/task.h> 26 #include <linux/dynamic_debug.h> 27 #include <drm/ttm/ttm_tt.h> 28 #include <drm/drm_exec.h> 29 30 #include "amdgpu_sync.h" 31 #include "amdgpu_object.h" 32 #include "amdgpu_vm.h" 33 #include "amdgpu_hmm.h" 34 #include "amdgpu.h" 35 #include "amdgpu_xgmi.h" 36 #include "kfd_priv.h" 37 #include "kfd_svm.h" 38 #include "kfd_migrate.h" 39 #include "kfd_smi_events.h" 40 41 #ifdef dev_fmt 42 #undef dev_fmt 43 #endif 44 #define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__ 45 46 #define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1 47 48 /* Long enough to ensure no retry fault comes after svm range is restored and 49 * page table is updated. 50 */ 51 #define AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING (2UL * NSEC_PER_MSEC) 52 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) 53 #define dynamic_svm_range_dump(svms) \ 54 _dynamic_func_call_no_desc("svm_range_dump", svm_range_debug_dump, svms) 55 #else 56 #define dynamic_svm_range_dump(svms) \ 57 do { if (0) svm_range_debug_dump(svms); } while (0) 58 #endif 59 60 /* Giant svm range split into smaller ranges based on this, it is decided using 61 * minimum of all dGPU/APU 1/32 VRAM size, between 2MB to 1GB and alignment to 62 * power of 2MB. 63 */ 64 static uint64_t max_svm_range_pages; 65 66 struct criu_svm_metadata { 67 struct list_head list; 68 struct kfd_criu_svm_range_priv_data data; 69 }; 70 71 static void svm_range_evict_svm_bo_worker(struct work_struct *work); 72 static bool 73 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, 74 const struct mmu_notifier_range *range, 75 unsigned long cur_seq); 76 static int 77 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, 78 uint64_t *bo_s, uint64_t *bo_l); 79 static const struct mmu_interval_notifier_ops svm_range_mn_ops = { 80 .invalidate = svm_range_cpu_invalidate_pagetables, 81 }; 82 83 /** 84 * svm_range_unlink - unlink svm_range from lists and interval tree 85 * @prange: svm range structure to be removed 86 * 87 * Remove the svm_range from the svms and svm_bo lists and the svms 88 * interval tree. 89 * 90 * Context: The caller must hold svms->lock 91 */ 92 static void svm_range_unlink(struct svm_range *prange) 93 { 94 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, 95 prange, prange->start, prange->last); 96 97 if (prange->svm_bo) { 98 spin_lock(&prange->svm_bo->list_lock); 99 list_del(&prange->svm_bo_list); 100 spin_unlock(&prange->svm_bo->list_lock); 101 } 102 103 list_del(&prange->list); 104 if (prange->it_node.start != 0 && prange->it_node.last != 0) 105 interval_tree_remove(&prange->it_node, &prange->svms->objects); 106 } 107 108 static void 109 svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange) 110 { 111 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, 112 prange, prange->start, prange->last); 113 114 mmu_interval_notifier_insert_locked(&prange->notifier, mm, 115 prange->start << PAGE_SHIFT, 116 prange->npages << PAGE_SHIFT, 117 &svm_range_mn_ops); 118 } 119 120 /** 121 * svm_range_add_to_svms - add svm range to svms 122 * @prange: svm range structure to be added 123 * 124 * Add the svm range to svms interval tree and link list 125 * 126 * Context: The caller must hold svms->lock 127 */ 128 static void svm_range_add_to_svms(struct svm_range *prange) 129 { 130 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, 131 prange, prange->start, prange->last); 132 133 list_move_tail(&prange->list, &prange->svms->list); 134 prange->it_node.start = prange->start; 135 prange->it_node.last = prange->last; 136 interval_tree_insert(&prange->it_node, &prange->svms->objects); 137 } 138 139 static void svm_range_remove_notifier(struct svm_range *prange) 140 { 141 pr_debug("remove notifier svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", 142 prange->svms, prange, 143 prange->notifier.interval_tree.start >> PAGE_SHIFT, 144 prange->notifier.interval_tree.last >> PAGE_SHIFT); 145 146 if (prange->notifier.interval_tree.start != 0 && 147 prange->notifier.interval_tree.last != 0) 148 mmu_interval_notifier_remove(&prange->notifier); 149 } 150 151 static bool 152 svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr) 153 { 154 return dma_addr && !dma_mapping_error(dev, dma_addr) && 155 !(dma_addr & SVM_RANGE_VRAM_DOMAIN); 156 } 157 158 static int 159 svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, 160 unsigned long offset, unsigned long npages, 161 unsigned long *hmm_pfns, uint32_t gpuidx) 162 { 163 enum dma_data_direction dir = DMA_BIDIRECTIONAL; 164 dma_addr_t *addr = prange->dma_addr[gpuidx]; 165 struct device *dev = adev->dev; 166 struct page *page; 167 int i, r; 168 169 if (!addr) { 170 addr = kvcalloc(prange->npages, sizeof(*addr), GFP_KERNEL); 171 if (!addr) 172 return -ENOMEM; 173 prange->dma_addr[gpuidx] = addr; 174 } 175 176 addr += offset; 177 for (i = 0; i < npages; i++) { 178 if (svm_is_valid_dma_mapping_addr(dev, addr[i])) 179 dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); 180 181 page = hmm_pfn_to_page(hmm_pfns[i]); 182 if (is_zone_device_page(page)) { 183 struct amdgpu_device *bo_adev = prange->svm_bo->node->adev; 184 185 addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + 186 bo_adev->vm_manager.vram_base_offset - 187 bo_adev->kfd.pgmap.range.start; 188 addr[i] |= SVM_RANGE_VRAM_DOMAIN; 189 pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]); 190 continue; 191 } 192 addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); 193 r = dma_mapping_error(dev, addr[i]); 194 if (r) { 195 dev_err(dev, "failed %d dma_map_page\n", r); 196 return r; 197 } 198 pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", 199 addr[i] >> PAGE_SHIFT, page_to_pfn(page)); 200 } 201 202 return 0; 203 } 204 205 static int 206 svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, 207 unsigned long offset, unsigned long npages, 208 unsigned long *hmm_pfns) 209 { 210 struct kfd_process *p; 211 uint32_t gpuidx; 212 int r; 213 214 p = container_of(prange->svms, struct kfd_process, svms); 215 216 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 217 struct kfd_process_device *pdd; 218 219 pr_debug("mapping to gpu idx 0x%x\n", gpuidx); 220 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 221 if (!pdd) { 222 pr_debug("failed to find device idx %d\n", gpuidx); 223 return -EINVAL; 224 } 225 226 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, 227 hmm_pfns, gpuidx); 228 if (r) 229 break; 230 } 231 232 return r; 233 } 234 235 void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr, 236 unsigned long offset, unsigned long npages) 237 { 238 enum dma_data_direction dir = DMA_BIDIRECTIONAL; 239 int i; 240 241 if (!dma_addr) 242 return; 243 244 for (i = offset; i < offset + npages; i++) { 245 if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i])) 246 continue; 247 pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); 248 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); 249 dma_addr[i] = 0; 250 } 251 } 252 253 void svm_range_dma_unmap(struct svm_range *prange) 254 { 255 struct kfd_process_device *pdd; 256 dma_addr_t *dma_addr; 257 struct device *dev; 258 struct kfd_process *p; 259 uint32_t gpuidx; 260 261 p = container_of(prange->svms, struct kfd_process, svms); 262 263 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) { 264 dma_addr = prange->dma_addr[gpuidx]; 265 if (!dma_addr) 266 continue; 267 268 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 269 if (!pdd) { 270 pr_debug("failed to find device idx %d\n", gpuidx); 271 continue; 272 } 273 dev = &pdd->dev->adev->pdev->dev; 274 275 svm_range_dma_unmap_dev(dev, dma_addr, 0, prange->npages); 276 } 277 } 278 279 static void svm_range_free(struct svm_range *prange, bool do_unmap) 280 { 281 uint64_t size = (prange->last - prange->start + 1) << PAGE_SHIFT; 282 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 283 uint32_t gpuidx; 284 285 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, 286 prange->start, prange->last); 287 288 svm_range_vram_node_free(prange); 289 if (do_unmap) 290 svm_range_dma_unmap(prange); 291 292 if (do_unmap && !p->xnack_enabled) { 293 pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size); 294 amdgpu_amdkfd_unreserve_mem_limit(NULL, size, 295 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 296 } 297 298 /* free dma_addr array for each gpu */ 299 for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) { 300 if (prange->dma_addr[gpuidx]) { 301 kvfree(prange->dma_addr[gpuidx]); 302 prange->dma_addr[gpuidx] = NULL; 303 } 304 } 305 306 mutex_destroy(&prange->lock); 307 mutex_destroy(&prange->migrate_mutex); 308 kfree(prange); 309 } 310 311 static void 312 svm_range_set_default_attributes(int32_t *location, int32_t *prefetch_loc, 313 uint8_t *granularity, uint32_t *flags) 314 { 315 *location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 316 *prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 317 *granularity = 9; 318 *flags = 319 KFD_IOCTL_SVM_FLAG_HOST_ACCESS | KFD_IOCTL_SVM_FLAG_COHERENT; 320 } 321 322 static struct 323 svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, 324 uint64_t last, bool update_mem_usage) 325 { 326 uint64_t size = last - start + 1; 327 struct svm_range *prange; 328 struct kfd_process *p; 329 330 prange = kzalloc(sizeof(*prange), GFP_KERNEL); 331 if (!prange) 332 return NULL; 333 334 p = container_of(svms, struct kfd_process, svms); 335 if (!p->xnack_enabled && update_mem_usage && 336 amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT, 337 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) { 338 pr_info("SVM mapping failed, exceeds resident system memory limit\n"); 339 kfree(prange); 340 return NULL; 341 } 342 prange->npages = size; 343 prange->svms = svms; 344 prange->start = start; 345 prange->last = last; 346 INIT_LIST_HEAD(&prange->list); 347 INIT_LIST_HEAD(&prange->update_list); 348 INIT_LIST_HEAD(&prange->svm_bo_list); 349 INIT_LIST_HEAD(&prange->deferred_list); 350 INIT_LIST_HEAD(&prange->child_list); 351 atomic_set(&prange->invalid, 0); 352 prange->validate_timestamp = 0; 353 prange->vram_pages = 0; 354 mutex_init(&prange->migrate_mutex); 355 mutex_init(&prange->lock); 356 357 if (p->xnack_enabled) 358 bitmap_copy(prange->bitmap_access, svms->bitmap_supported, 359 MAX_GPU_INSTANCE); 360 361 svm_range_set_default_attributes(&prange->preferred_loc, 362 &prange->prefetch_loc, 363 &prange->granularity, &prange->flags); 364 365 pr_debug("svms 0x%p [0x%llx 0x%llx]\n", svms, start, last); 366 367 return prange; 368 } 369 370 static bool svm_bo_ref_unless_zero(struct svm_range_bo *svm_bo) 371 { 372 if (!svm_bo || !kref_get_unless_zero(&svm_bo->kref)) 373 return false; 374 375 return true; 376 } 377 378 static void svm_range_bo_release(struct kref *kref) 379 { 380 struct svm_range_bo *svm_bo; 381 382 svm_bo = container_of(kref, struct svm_range_bo, kref); 383 pr_debug("svm_bo 0x%p\n", svm_bo); 384 385 spin_lock(&svm_bo->list_lock); 386 while (!list_empty(&svm_bo->range_list)) { 387 struct svm_range *prange = 388 list_first_entry(&svm_bo->range_list, 389 struct svm_range, svm_bo_list); 390 /* list_del_init tells a concurrent svm_range_vram_node_new when 391 * it's safe to reuse the svm_bo pointer and svm_bo_list head. 392 */ 393 list_del_init(&prange->svm_bo_list); 394 spin_unlock(&svm_bo->list_lock); 395 396 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, 397 prange->start, prange->last); 398 mutex_lock(&prange->lock); 399 prange->svm_bo = NULL; 400 /* prange should not hold vram page now */ 401 WARN_ONCE(prange->actual_loc, "prange should not hold vram page"); 402 mutex_unlock(&prange->lock); 403 404 spin_lock(&svm_bo->list_lock); 405 } 406 spin_unlock(&svm_bo->list_lock); 407 if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) 408 /* We're not in the eviction worker. Signal the fence. */ 409 dma_fence_signal(&svm_bo->eviction_fence->base); 410 dma_fence_put(&svm_bo->eviction_fence->base); 411 amdgpu_bo_unref(&svm_bo->bo); 412 kfree(svm_bo); 413 } 414 415 static void svm_range_bo_wq_release(struct work_struct *work) 416 { 417 struct svm_range_bo *svm_bo; 418 419 svm_bo = container_of(work, struct svm_range_bo, release_work); 420 svm_range_bo_release(&svm_bo->kref); 421 } 422 423 static void svm_range_bo_release_async(struct kref *kref) 424 { 425 struct svm_range_bo *svm_bo; 426 427 svm_bo = container_of(kref, struct svm_range_bo, kref); 428 pr_debug("svm_bo 0x%p\n", svm_bo); 429 INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release); 430 schedule_work(&svm_bo->release_work); 431 } 432 433 void svm_range_bo_unref_async(struct svm_range_bo *svm_bo) 434 { 435 kref_put(&svm_bo->kref, svm_range_bo_release_async); 436 } 437 438 static void svm_range_bo_unref(struct svm_range_bo *svm_bo) 439 { 440 if (svm_bo) 441 kref_put(&svm_bo->kref, svm_range_bo_release); 442 } 443 444 static bool 445 svm_range_validate_svm_bo(struct kfd_node *node, struct svm_range *prange) 446 { 447 mutex_lock(&prange->lock); 448 if (!prange->svm_bo) { 449 mutex_unlock(&prange->lock); 450 return false; 451 } 452 if (prange->ttm_res) { 453 /* We still have a reference, all is well */ 454 mutex_unlock(&prange->lock); 455 return true; 456 } 457 if (svm_bo_ref_unless_zero(prange->svm_bo)) { 458 /* 459 * Migrate from GPU to GPU, remove range from source svm_bo->node 460 * range list, and return false to allocate svm_bo from destination 461 * node. 462 */ 463 if (prange->svm_bo->node != node) { 464 mutex_unlock(&prange->lock); 465 466 spin_lock(&prange->svm_bo->list_lock); 467 list_del_init(&prange->svm_bo_list); 468 spin_unlock(&prange->svm_bo->list_lock); 469 470 svm_range_bo_unref(prange->svm_bo); 471 return false; 472 } 473 if (READ_ONCE(prange->svm_bo->evicting)) { 474 struct dma_fence *f; 475 struct svm_range_bo *svm_bo; 476 /* The BO is getting evicted, 477 * we need to get a new one 478 */ 479 mutex_unlock(&prange->lock); 480 svm_bo = prange->svm_bo; 481 f = dma_fence_get(&svm_bo->eviction_fence->base); 482 svm_range_bo_unref(prange->svm_bo); 483 /* wait for the fence to avoid long spin-loop 484 * at list_empty_careful 485 */ 486 dma_fence_wait(f, false); 487 dma_fence_put(f); 488 } else { 489 /* The BO was still around and we got 490 * a new reference to it 491 */ 492 mutex_unlock(&prange->lock); 493 pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n", 494 prange->svms, prange->start, prange->last); 495 496 prange->ttm_res = prange->svm_bo->bo->tbo.resource; 497 return true; 498 } 499 500 } else { 501 mutex_unlock(&prange->lock); 502 } 503 504 /* We need a new svm_bo. Spin-loop to wait for concurrent 505 * svm_range_bo_release to finish removing this range from 506 * its range list and set prange->svm_bo to null. After this, 507 * it is safe to reuse the svm_bo pointer and svm_bo_list head. 508 */ 509 while (!list_empty_careful(&prange->svm_bo_list) || prange->svm_bo) 510 cond_resched(); 511 512 return false; 513 } 514 515 static struct svm_range_bo *svm_range_bo_new(void) 516 { 517 struct svm_range_bo *svm_bo; 518 519 svm_bo = kzalloc(sizeof(*svm_bo), GFP_KERNEL); 520 if (!svm_bo) 521 return NULL; 522 523 kref_init(&svm_bo->kref); 524 INIT_LIST_HEAD(&svm_bo->range_list); 525 spin_lock_init(&svm_bo->list_lock); 526 527 return svm_bo; 528 } 529 530 int 531 svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, 532 bool clear) 533 { 534 struct amdgpu_bo_param bp; 535 struct svm_range_bo *svm_bo; 536 struct amdgpu_bo_user *ubo; 537 struct amdgpu_bo *bo; 538 struct kfd_process *p; 539 struct mm_struct *mm; 540 int r; 541 542 p = container_of(prange->svms, struct kfd_process, svms); 543 pr_debug("pasid: %x svms 0x%p [0x%lx 0x%lx]\n", p->pasid, prange->svms, 544 prange->start, prange->last); 545 546 if (svm_range_validate_svm_bo(node, prange)) 547 return 0; 548 549 svm_bo = svm_range_bo_new(); 550 if (!svm_bo) { 551 pr_debug("failed to alloc svm bo\n"); 552 return -ENOMEM; 553 } 554 mm = get_task_mm(p->lead_thread); 555 if (!mm) { 556 pr_debug("failed to get mm\n"); 557 kfree(svm_bo); 558 return -ESRCH; 559 } 560 svm_bo->node = node; 561 svm_bo->eviction_fence = 562 amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), 563 mm, 564 svm_bo); 565 mmput(mm); 566 INIT_WORK(&svm_bo->eviction_work, svm_range_evict_svm_bo_worker); 567 svm_bo->evicting = 0; 568 memset(&bp, 0, sizeof(bp)); 569 bp.size = prange->npages * PAGE_SIZE; 570 bp.byte_align = PAGE_SIZE; 571 bp.domain = AMDGPU_GEM_DOMAIN_VRAM; 572 bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 573 bp.flags |= clear ? AMDGPU_GEM_CREATE_VRAM_CLEARED : 0; 574 bp.flags |= AMDGPU_GEM_CREATE_DISCARDABLE; 575 bp.type = ttm_bo_type_device; 576 bp.resv = NULL; 577 if (node->xcp) 578 bp.xcp_id_plus1 = node->xcp->id + 1; 579 580 r = amdgpu_bo_create_user(node->adev, &bp, &ubo); 581 if (r) { 582 pr_debug("failed %d to create bo\n", r); 583 goto create_bo_failed; 584 } 585 bo = &ubo->bo; 586 587 pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n", 588 bo->tbo.resource->start << PAGE_SHIFT, bp.size, 589 bp.xcp_id_plus1 - 1); 590 591 r = amdgpu_bo_reserve(bo, true); 592 if (r) { 593 pr_debug("failed %d to reserve bo\n", r); 594 goto reserve_bo_failed; 595 } 596 597 if (clear) { 598 r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); 599 if (r) { 600 pr_debug("failed %d to sync bo\n", r); 601 amdgpu_bo_unreserve(bo); 602 goto reserve_bo_failed; 603 } 604 } 605 606 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1); 607 if (r) { 608 pr_debug("failed %d to reserve bo\n", r); 609 amdgpu_bo_unreserve(bo); 610 goto reserve_bo_failed; 611 } 612 amdgpu_bo_fence(bo, &svm_bo->eviction_fence->base, true); 613 614 amdgpu_bo_unreserve(bo); 615 616 svm_bo->bo = bo; 617 prange->svm_bo = svm_bo; 618 prange->ttm_res = bo->tbo.resource; 619 prange->offset = 0; 620 621 spin_lock(&svm_bo->list_lock); 622 list_add(&prange->svm_bo_list, &svm_bo->range_list); 623 spin_unlock(&svm_bo->list_lock); 624 625 return 0; 626 627 reserve_bo_failed: 628 amdgpu_bo_unref(&bo); 629 create_bo_failed: 630 dma_fence_put(&svm_bo->eviction_fence->base); 631 kfree(svm_bo); 632 prange->ttm_res = NULL; 633 634 return r; 635 } 636 637 void svm_range_vram_node_free(struct svm_range *prange) 638 { 639 /* serialize prange->svm_bo unref */ 640 mutex_lock(&prange->lock); 641 /* prange->svm_bo has not been unref */ 642 if (prange->ttm_res) { 643 prange->ttm_res = NULL; 644 mutex_unlock(&prange->lock); 645 svm_range_bo_unref(prange->svm_bo); 646 } else 647 mutex_unlock(&prange->lock); 648 } 649 650 struct kfd_node * 651 svm_range_get_node_by_id(struct svm_range *prange, uint32_t gpu_id) 652 { 653 struct kfd_process *p; 654 struct kfd_process_device *pdd; 655 656 p = container_of(prange->svms, struct kfd_process, svms); 657 pdd = kfd_process_device_data_by_id(p, gpu_id); 658 if (!pdd) { 659 pr_debug("failed to get kfd process device by id 0x%x\n", gpu_id); 660 return NULL; 661 } 662 663 return pdd->dev; 664 } 665 666 struct kfd_process_device * 667 svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node) 668 { 669 struct kfd_process *p; 670 671 p = container_of(prange->svms, struct kfd_process, svms); 672 673 return kfd_get_process_device_data(node, p); 674 } 675 676 static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo) 677 { 678 struct ttm_operation_ctx ctx = { false, false }; 679 680 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); 681 682 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 683 } 684 685 static int 686 svm_range_check_attr(struct kfd_process *p, 687 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) 688 { 689 uint32_t i; 690 691 for (i = 0; i < nattr; i++) { 692 uint32_t val = attrs[i].value; 693 int gpuidx = MAX_GPU_INSTANCE; 694 695 switch (attrs[i].type) { 696 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 697 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM && 698 val != KFD_IOCTL_SVM_LOCATION_UNDEFINED) 699 gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 700 break; 701 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 702 if (val != KFD_IOCTL_SVM_LOCATION_SYSMEM) 703 gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 704 break; 705 case KFD_IOCTL_SVM_ATTR_ACCESS: 706 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 707 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 708 gpuidx = kfd_process_gpuidx_from_gpuid(p, val); 709 break; 710 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 711 break; 712 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 713 break; 714 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 715 break; 716 default: 717 pr_debug("unknown attr type 0x%x\n", attrs[i].type); 718 return -EINVAL; 719 } 720 721 if (gpuidx < 0) { 722 pr_debug("no GPU 0x%x found\n", val); 723 return -EINVAL; 724 } else if (gpuidx < MAX_GPU_INSTANCE && 725 !test_bit(gpuidx, p->svms.bitmap_supported)) { 726 pr_debug("GPU 0x%x not supported\n", val); 727 return -EINVAL; 728 } 729 } 730 731 return 0; 732 } 733 734 static void 735 svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange, 736 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, 737 bool *update_mapping) 738 { 739 uint32_t i; 740 int gpuidx; 741 742 for (i = 0; i < nattr; i++) { 743 switch (attrs[i].type) { 744 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 745 prange->preferred_loc = attrs[i].value; 746 break; 747 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 748 prange->prefetch_loc = attrs[i].value; 749 break; 750 case KFD_IOCTL_SVM_ATTR_ACCESS: 751 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 752 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 753 if (!p->xnack_enabled) 754 *update_mapping = true; 755 756 gpuidx = kfd_process_gpuidx_from_gpuid(p, 757 attrs[i].value); 758 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { 759 bitmap_clear(prange->bitmap_access, gpuidx, 1); 760 bitmap_clear(prange->bitmap_aip, gpuidx, 1); 761 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { 762 bitmap_set(prange->bitmap_access, gpuidx, 1); 763 bitmap_clear(prange->bitmap_aip, gpuidx, 1); 764 } else { 765 bitmap_clear(prange->bitmap_access, gpuidx, 1); 766 bitmap_set(prange->bitmap_aip, gpuidx, 1); 767 } 768 break; 769 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 770 *update_mapping = true; 771 prange->flags |= attrs[i].value; 772 break; 773 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 774 *update_mapping = true; 775 prange->flags &= ~attrs[i].value; 776 break; 777 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 778 prange->granularity = min_t(uint32_t, attrs[i].value, 0x3F); 779 break; 780 default: 781 WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); 782 } 783 } 784 } 785 786 static bool 787 svm_range_is_same_attrs(struct kfd_process *p, struct svm_range *prange, 788 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) 789 { 790 uint32_t i; 791 int gpuidx; 792 793 for (i = 0; i < nattr; i++) { 794 switch (attrs[i].type) { 795 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 796 if (prange->preferred_loc != attrs[i].value) 797 return false; 798 break; 799 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 800 /* Prefetch should always trigger a migration even 801 * if the value of the attribute didn't change. 802 */ 803 return false; 804 case KFD_IOCTL_SVM_ATTR_ACCESS: 805 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 806 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 807 gpuidx = kfd_process_gpuidx_from_gpuid(p, 808 attrs[i].value); 809 if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) { 810 if (test_bit(gpuidx, prange->bitmap_access) || 811 test_bit(gpuidx, prange->bitmap_aip)) 812 return false; 813 } else if (attrs[i].type == KFD_IOCTL_SVM_ATTR_ACCESS) { 814 if (!test_bit(gpuidx, prange->bitmap_access)) 815 return false; 816 } else { 817 if (!test_bit(gpuidx, prange->bitmap_aip)) 818 return false; 819 } 820 break; 821 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 822 if ((prange->flags & attrs[i].value) != attrs[i].value) 823 return false; 824 break; 825 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 826 if ((prange->flags & attrs[i].value) != 0) 827 return false; 828 break; 829 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 830 if (prange->granularity != attrs[i].value) 831 return false; 832 break; 833 default: 834 WARN_ONCE(1, "svm_range_check_attrs wasn't called?"); 835 } 836 } 837 838 return true; 839 } 840 841 /** 842 * svm_range_debug_dump - print all range information from svms 843 * @svms: svm range list header 844 * 845 * debug output svm range start, end, prefetch location from svms 846 * interval tree and link list 847 * 848 * Context: The caller must hold svms->lock 849 */ 850 static void svm_range_debug_dump(struct svm_range_list *svms) 851 { 852 struct interval_tree_node *node; 853 struct svm_range *prange; 854 855 pr_debug("dump svms 0x%p list\n", svms); 856 pr_debug("range\tstart\tpage\tend\t\tlocation\n"); 857 858 list_for_each_entry(prange, &svms->list, list) { 859 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n", 860 prange, prange->start, prange->npages, 861 prange->start + prange->npages - 1, 862 prange->actual_loc); 863 } 864 865 pr_debug("dump svms 0x%p interval tree\n", svms); 866 pr_debug("range\tstart\tpage\tend\t\tlocation\n"); 867 node = interval_tree_iter_first(&svms->objects, 0, ~0ULL); 868 while (node) { 869 prange = container_of(node, struct svm_range, it_node); 870 pr_debug("0x%p 0x%lx\t0x%llx\t0x%llx\t0x%x\n", 871 prange, prange->start, prange->npages, 872 prange->start + prange->npages - 1, 873 prange->actual_loc); 874 node = interval_tree_iter_next(node, 0, ~0ULL); 875 } 876 } 877 878 static void * 879 svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements, 880 uint64_t offset, uint64_t *vram_pages) 881 { 882 unsigned char *src = (unsigned char *)psrc + offset; 883 unsigned char *dst; 884 uint64_t i; 885 886 dst = kvmalloc_array(num_elements, size, GFP_KERNEL); 887 if (!dst) 888 return NULL; 889 890 if (!vram_pages) { 891 memcpy(dst, src, num_elements * size); 892 return (void *)dst; 893 } 894 895 *vram_pages = 0; 896 for (i = 0; i < num_elements; i++) { 897 dma_addr_t *temp; 898 temp = (dma_addr_t *)dst + i; 899 *temp = *((dma_addr_t *)src + i); 900 if (*temp&SVM_RANGE_VRAM_DOMAIN) 901 (*vram_pages)++; 902 } 903 904 return (void *)dst; 905 } 906 907 static int 908 svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src) 909 { 910 int i; 911 912 for (i = 0; i < MAX_GPU_INSTANCE; i++) { 913 if (!src->dma_addr[i]) 914 continue; 915 dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i], 916 sizeof(*src->dma_addr[i]), src->npages, 0, NULL); 917 if (!dst->dma_addr[i]) 918 return -ENOMEM; 919 } 920 921 return 0; 922 } 923 924 static int 925 svm_range_split_array(void *ppnew, void *ppold, size_t size, 926 uint64_t old_start, uint64_t old_n, 927 uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages) 928 { 929 unsigned char *new, *old, *pold; 930 uint64_t d; 931 932 if (!ppold) 933 return 0; 934 pold = *(unsigned char **)ppold; 935 if (!pold) 936 return 0; 937 938 d = (new_start - old_start) * size; 939 /* get dma addr array for new range and calculte its vram page number */ 940 new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages); 941 if (!new) 942 return -ENOMEM; 943 d = (new_start == old_start) ? new_n * size : 0; 944 old = svm_range_copy_array(pold, size, old_n, d, NULL); 945 if (!old) { 946 kvfree(new); 947 return -ENOMEM; 948 } 949 kvfree(pold); 950 *(void **)ppold = old; 951 *(void **)ppnew = new; 952 953 return 0; 954 } 955 956 static int 957 svm_range_split_pages(struct svm_range *new, struct svm_range *old, 958 uint64_t start, uint64_t last) 959 { 960 uint64_t npages = last - start + 1; 961 int i, r; 962 963 for (i = 0; i < MAX_GPU_INSTANCE; i++) { 964 r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i], 965 sizeof(*old->dma_addr[i]), old->start, 966 npages, new->start, new->npages, 967 old->actual_loc ? &new->vram_pages : NULL); 968 if (r) 969 return r; 970 } 971 if (old->actual_loc) 972 old->vram_pages -= new->vram_pages; 973 974 return 0; 975 } 976 977 static int 978 svm_range_split_nodes(struct svm_range *new, struct svm_range *old, 979 uint64_t start, uint64_t last) 980 { 981 uint64_t npages = last - start + 1; 982 983 pr_debug("svms 0x%p new prange 0x%p start 0x%lx [0x%llx 0x%llx]\n", 984 new->svms, new, new->start, start, last); 985 986 if (new->start == old->start) { 987 new->offset = old->offset; 988 old->offset += new->npages; 989 } else { 990 new->offset = old->offset + npages; 991 } 992 993 new->svm_bo = svm_range_bo_ref(old->svm_bo); 994 new->ttm_res = old->ttm_res; 995 996 spin_lock(&new->svm_bo->list_lock); 997 list_add(&new->svm_bo_list, &new->svm_bo->range_list); 998 spin_unlock(&new->svm_bo->list_lock); 999 1000 return 0; 1001 } 1002 1003 /** 1004 * svm_range_split_adjust - split range and adjust 1005 * 1006 * @new: new range 1007 * @old: the old range 1008 * @start: the old range adjust to start address in pages 1009 * @last: the old range adjust to last address in pages 1010 * 1011 * Copy system memory dma_addr or vram ttm_res in old range to new 1012 * range from new_start up to size new->npages, the remaining old range is from 1013 * start to last 1014 * 1015 * Return: 1016 * 0 - OK, -ENOMEM - out of memory 1017 */ 1018 static int 1019 svm_range_split_adjust(struct svm_range *new, struct svm_range *old, 1020 uint64_t start, uint64_t last) 1021 { 1022 int r; 1023 1024 pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n", 1025 new->svms, new->start, old->start, old->last, start, last); 1026 1027 if (new->start < old->start || 1028 new->last > old->last) { 1029 WARN_ONCE(1, "invalid new range start or last\n"); 1030 return -EINVAL; 1031 } 1032 1033 r = svm_range_split_pages(new, old, start, last); 1034 if (r) 1035 return r; 1036 1037 if (old->actual_loc && old->ttm_res) { 1038 r = svm_range_split_nodes(new, old, start, last); 1039 if (r) 1040 return r; 1041 } 1042 1043 old->npages = last - start + 1; 1044 old->start = start; 1045 old->last = last; 1046 new->flags = old->flags; 1047 new->preferred_loc = old->preferred_loc; 1048 new->prefetch_loc = old->prefetch_loc; 1049 new->actual_loc = old->actual_loc; 1050 new->granularity = old->granularity; 1051 new->mapped_to_gpu = old->mapped_to_gpu; 1052 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); 1053 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); 1054 1055 return 0; 1056 } 1057 1058 /** 1059 * svm_range_split - split a range in 2 ranges 1060 * 1061 * @prange: the svm range to split 1062 * @start: the remaining range start address in pages 1063 * @last: the remaining range last address in pages 1064 * @new: the result new range generated 1065 * 1066 * Two cases only: 1067 * case 1: if start == prange->start 1068 * prange ==> prange[start, last] 1069 * new range [last + 1, prange->last] 1070 * 1071 * case 2: if last == prange->last 1072 * prange ==> prange[start, last] 1073 * new range [prange->start, start - 1] 1074 * 1075 * Return: 1076 * 0 - OK, -ENOMEM - out of memory, -EINVAL - invalid start, last 1077 */ 1078 static int 1079 svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, 1080 struct svm_range **new) 1081 { 1082 uint64_t old_start = prange->start; 1083 uint64_t old_last = prange->last; 1084 struct svm_range_list *svms; 1085 int r = 0; 1086 1087 pr_debug("svms 0x%p [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", prange->svms, 1088 old_start, old_last, start, last); 1089 1090 if (old_start != start && old_last != last) 1091 return -EINVAL; 1092 if (start < old_start || last > old_last) 1093 return -EINVAL; 1094 1095 svms = prange->svms; 1096 if (old_start == start) 1097 *new = svm_range_new(svms, last + 1, old_last, false); 1098 else 1099 *new = svm_range_new(svms, old_start, start - 1, false); 1100 if (!*new) 1101 return -ENOMEM; 1102 1103 r = svm_range_split_adjust(*new, prange, start, last); 1104 if (r) { 1105 pr_debug("failed %d split [0x%llx 0x%llx] to [0x%llx 0x%llx]\n", 1106 r, old_start, old_last, start, last); 1107 svm_range_free(*new, false); 1108 *new = NULL; 1109 } 1110 1111 return r; 1112 } 1113 1114 static int 1115 svm_range_split_tail(struct svm_range *prange, uint64_t new_last, 1116 struct list_head *insert_list, struct list_head *remap_list) 1117 { 1118 struct svm_range *tail = NULL; 1119 int r = svm_range_split(prange, prange->start, new_last, &tail); 1120 1121 if (!r) { 1122 list_add(&tail->list, insert_list); 1123 if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity)) 1124 list_add(&tail->update_list, remap_list); 1125 } 1126 return r; 1127 } 1128 1129 static int 1130 svm_range_split_head(struct svm_range *prange, uint64_t new_start, 1131 struct list_head *insert_list, struct list_head *remap_list) 1132 { 1133 struct svm_range *head = NULL; 1134 int r = svm_range_split(prange, new_start, prange->last, &head); 1135 1136 if (!r) { 1137 list_add(&head->list, insert_list); 1138 if (!IS_ALIGNED(new_start, 1UL << prange->granularity)) 1139 list_add(&head->update_list, remap_list); 1140 } 1141 return r; 1142 } 1143 1144 static void 1145 svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, 1146 struct svm_range *pchild, enum svm_work_list_ops op) 1147 { 1148 pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n", 1149 pchild, pchild->start, pchild->last, prange, op); 1150 1151 pchild->work_item.mm = mm; 1152 pchild->work_item.op = op; 1153 list_add_tail(&pchild->child_list, &prange->child_list); 1154 } 1155 1156 static bool 1157 svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b) 1158 { 1159 return (node_a->adev == node_b->adev || 1160 amdgpu_xgmi_same_hive(node_a->adev, node_b->adev)); 1161 } 1162 1163 static uint64_t 1164 svm_range_get_pte_flags(struct kfd_node *node, 1165 struct svm_range *prange, int domain) 1166 { 1167 struct kfd_node *bo_node; 1168 uint32_t flags = prange->flags; 1169 uint32_t mapping_flags = 0; 1170 uint64_t pte_flags; 1171 bool snoop = (domain != SVM_RANGE_VRAM_DOMAIN); 1172 bool coherent = flags & (KFD_IOCTL_SVM_FLAG_COHERENT | KFD_IOCTL_SVM_FLAG_EXT_COHERENT); 1173 bool ext_coherent = flags & KFD_IOCTL_SVM_FLAG_EXT_COHERENT; 1174 bool uncached = false; /*flags & KFD_IOCTL_SVM_FLAG_UNCACHED;*/ 1175 unsigned int mtype_local; 1176 1177 if (domain == SVM_RANGE_VRAM_DOMAIN) 1178 bo_node = prange->svm_bo->node; 1179 1180 switch (amdgpu_ip_version(node->adev, GC_HWIP, 0)) { 1181 case IP_VERSION(9, 4, 1): 1182 if (domain == SVM_RANGE_VRAM_DOMAIN) { 1183 if (bo_node == node) { 1184 mapping_flags |= coherent ? 1185 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 1186 } else { 1187 mapping_flags |= coherent ? 1188 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1189 if (svm_nodes_in_same_hive(node, bo_node)) 1190 snoop = true; 1191 } 1192 } else { 1193 mapping_flags |= coherent ? 1194 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1195 } 1196 break; 1197 case IP_VERSION(9, 4, 2): 1198 if (domain == SVM_RANGE_VRAM_DOMAIN) { 1199 if (bo_node == node) { 1200 mapping_flags |= coherent ? 1201 AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 1202 if (node->adev->gmc.xgmi.connected_to_cpu) 1203 snoop = true; 1204 } else { 1205 mapping_flags |= coherent ? 1206 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1207 if (svm_nodes_in_same_hive(node, bo_node)) 1208 snoop = true; 1209 } 1210 } else { 1211 mapping_flags |= coherent ? 1212 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1213 } 1214 break; 1215 case IP_VERSION(9, 4, 3): 1216 case IP_VERSION(9, 4, 4): 1217 if (ext_coherent) 1218 mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC; 1219 else 1220 mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC : 1221 amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; 1222 snoop = true; 1223 if (uncached) { 1224 mapping_flags |= AMDGPU_VM_MTYPE_UC; 1225 } else if (domain == SVM_RANGE_VRAM_DOMAIN) { 1226 /* local HBM region close to partition */ 1227 if (bo_node->adev == node->adev && 1228 (!bo_node->xcp || !node->xcp || bo_node->xcp->mem_id == node->xcp->mem_id)) 1229 mapping_flags |= mtype_local; 1230 /* local HBM region far from partition or remote XGMI GPU 1231 * with regular system scope coherence 1232 */ 1233 else if (svm_nodes_in_same_hive(bo_node, node) && !ext_coherent) 1234 mapping_flags |= AMDGPU_VM_MTYPE_NC; 1235 /* PCIe P2P or extended system scope coherence */ 1236 else 1237 mapping_flags |= AMDGPU_VM_MTYPE_UC; 1238 /* system memory accessed by the APU */ 1239 } else if (node->adev->flags & AMD_IS_APU) { 1240 /* On NUMA systems, locality is determined per-page 1241 * in amdgpu_gmc_override_vm_pte_flags 1242 */ 1243 if (num_possible_nodes() <= 1) 1244 mapping_flags |= mtype_local; 1245 else 1246 mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1247 /* system memory accessed by the dGPU */ 1248 } else { 1249 mapping_flags |= AMDGPU_VM_MTYPE_UC; 1250 } 1251 break; 1252 case IP_VERSION(12, 0, 0): 1253 if (domain == SVM_RANGE_VRAM_DOMAIN) { 1254 if (bo_node != node) 1255 mapping_flags |= AMDGPU_VM_MTYPE_NC; 1256 } else { 1257 mapping_flags |= coherent ? 1258 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1259 } 1260 break; 1261 default: 1262 mapping_flags |= coherent ? 1263 AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; 1264 } 1265 1266 mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE; 1267 1268 if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO) 1269 mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE; 1270 if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC) 1271 mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; 1272 1273 pte_flags = AMDGPU_PTE_VALID; 1274 pte_flags |= (domain == SVM_RANGE_VRAM_DOMAIN) ? 0 : AMDGPU_PTE_SYSTEM; 1275 pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0; 1276 if (KFD_GC_VERSION(node) >= IP_VERSION(12, 0, 0)) 1277 pte_flags |= AMDGPU_PTE_IS_PTE; 1278 1279 pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags); 1280 return pte_flags; 1281 } 1282 1283 static int 1284 svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1285 uint64_t start, uint64_t last, 1286 struct dma_fence **fence) 1287 { 1288 uint64_t init_pte_value = 0; 1289 1290 pr_debug("[0x%llx 0x%llx]\n", start, last); 1291 1292 return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start, 1293 last, init_pte_value, 0, 0, NULL, NULL, 1294 fence); 1295 } 1296 1297 static int 1298 svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, 1299 unsigned long last, uint32_t trigger) 1300 { 1301 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); 1302 struct kfd_process_device *pdd; 1303 struct dma_fence *fence = NULL; 1304 struct kfd_process *p; 1305 uint32_t gpuidx; 1306 int r = 0; 1307 1308 if (!prange->mapped_to_gpu) { 1309 pr_debug("prange 0x%p [0x%lx 0x%lx] not mapped to GPU\n", 1310 prange, prange->start, prange->last); 1311 return 0; 1312 } 1313 1314 if (prange->start == start && prange->last == last) { 1315 pr_debug("unmap svms 0x%p prange 0x%p\n", prange->svms, prange); 1316 prange->mapped_to_gpu = false; 1317 } 1318 1319 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip, 1320 MAX_GPU_INSTANCE); 1321 p = container_of(prange->svms, struct kfd_process, svms); 1322 1323 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 1324 pr_debug("unmap from gpu idx 0x%x\n", gpuidx); 1325 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 1326 if (!pdd) { 1327 pr_debug("failed to find device idx %d\n", gpuidx); 1328 return -EINVAL; 1329 } 1330 1331 kfd_smi_event_unmap_from_gpu(pdd->dev, p->lead_thread->pid, 1332 start, last, trigger); 1333 1334 r = svm_range_unmap_from_gpu(pdd->dev->adev, 1335 drm_priv_to_vm(pdd->drm_priv), 1336 start, last, &fence); 1337 if (r) 1338 break; 1339 1340 if (fence) { 1341 r = dma_fence_wait(fence, false); 1342 dma_fence_put(fence); 1343 fence = NULL; 1344 if (r) 1345 break; 1346 } 1347 kfd_flush_tlb(pdd, TLB_FLUSH_HEAVYWEIGHT); 1348 } 1349 1350 return r; 1351 } 1352 1353 static int 1354 svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, 1355 unsigned long offset, unsigned long npages, bool readonly, 1356 dma_addr_t *dma_addr, struct amdgpu_device *bo_adev, 1357 struct dma_fence **fence, bool flush_tlb) 1358 { 1359 struct amdgpu_device *adev = pdd->dev->adev; 1360 struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); 1361 uint64_t pte_flags; 1362 unsigned long last_start; 1363 int last_domain; 1364 int r = 0; 1365 int64_t i, j; 1366 1367 last_start = prange->start + offset; 1368 1369 pr_debug("svms 0x%p [0x%lx 0x%lx] readonly %d\n", prange->svms, 1370 last_start, last_start + npages - 1, readonly); 1371 1372 for (i = offset; i < offset + npages; i++) { 1373 last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN; 1374 dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN; 1375 1376 /* Collect all pages in the same address range and memory domain 1377 * that can be mapped with a single call to update mapping. 1378 */ 1379 if (i < offset + npages - 1 && 1380 last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN)) 1381 continue; 1382 1383 pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n", 1384 last_start, prange->start + i, last_domain ? "GPU" : "CPU"); 1385 1386 pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain); 1387 if (readonly) 1388 pte_flags &= ~AMDGPU_PTE_WRITEABLE; 1389 1390 pr_debug("svms 0x%p map [0x%lx 0x%llx] vram %d PTE 0x%llx\n", 1391 prange->svms, last_start, prange->start + i, 1392 (last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0, 1393 pte_flags); 1394 1395 /* For dGPU mode, we use same vm_manager to allocate VRAM for 1396 * different memory partition based on fpfn/lpfn, we should use 1397 * same vm_manager.vram_base_offset regardless memory partition. 1398 */ 1399 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true, 1400 NULL, last_start, prange->start + i, 1401 pte_flags, 1402 (last_start - prange->start) << PAGE_SHIFT, 1403 bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, 1404 NULL, dma_addr, &vm->last_update); 1405 1406 for (j = last_start - prange->start; j <= i; j++) 1407 dma_addr[j] |= last_domain; 1408 1409 if (r) { 1410 pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); 1411 goto out; 1412 } 1413 last_start = prange->start + i + 1; 1414 } 1415 1416 r = amdgpu_vm_update_pdes(adev, vm, false); 1417 if (r) { 1418 pr_debug("failed %d to update directories 0x%lx\n", r, 1419 prange->start); 1420 goto out; 1421 } 1422 1423 if (fence) 1424 *fence = dma_fence_get(vm->last_update); 1425 1426 out: 1427 return r; 1428 } 1429 1430 static int 1431 svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, 1432 unsigned long npages, bool readonly, 1433 unsigned long *bitmap, bool wait, bool flush_tlb) 1434 { 1435 struct kfd_process_device *pdd; 1436 struct amdgpu_device *bo_adev = NULL; 1437 struct kfd_process *p; 1438 struct dma_fence *fence = NULL; 1439 uint32_t gpuidx; 1440 int r = 0; 1441 1442 if (prange->svm_bo && prange->ttm_res) 1443 bo_adev = prange->svm_bo->node->adev; 1444 1445 p = container_of(prange->svms, struct kfd_process, svms); 1446 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 1447 pr_debug("mapping to gpu idx 0x%x\n", gpuidx); 1448 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 1449 if (!pdd) { 1450 pr_debug("failed to find device idx %d\n", gpuidx); 1451 return -EINVAL; 1452 } 1453 1454 pdd = kfd_bind_process_to_device(pdd->dev, p); 1455 if (IS_ERR(pdd)) 1456 return -EINVAL; 1457 1458 if (bo_adev && pdd->dev->adev != bo_adev && 1459 !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { 1460 pr_debug("cannot map to device idx %d\n", gpuidx); 1461 continue; 1462 } 1463 1464 r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly, 1465 prange->dma_addr[gpuidx], 1466 bo_adev, wait ? &fence : NULL, 1467 flush_tlb); 1468 if (r) 1469 break; 1470 1471 if (fence) { 1472 r = dma_fence_wait(fence, false); 1473 dma_fence_put(fence); 1474 fence = NULL; 1475 if (r) { 1476 pr_debug("failed %d to dma fence wait\n", r); 1477 break; 1478 } 1479 } 1480 1481 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); 1482 } 1483 1484 return r; 1485 } 1486 1487 struct svm_validate_context { 1488 struct kfd_process *process; 1489 struct svm_range *prange; 1490 bool intr; 1491 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); 1492 struct drm_exec exec; 1493 }; 1494 1495 static int svm_range_reserve_bos(struct svm_validate_context *ctx, bool intr) 1496 { 1497 struct kfd_process_device *pdd; 1498 struct amdgpu_vm *vm; 1499 uint32_t gpuidx; 1500 int r; 1501 1502 drm_exec_init(&ctx->exec, intr ? DRM_EXEC_INTERRUPTIBLE_WAIT: 0, 0); 1503 drm_exec_until_all_locked(&ctx->exec) { 1504 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { 1505 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); 1506 if (!pdd) { 1507 pr_debug("failed to find device idx %d\n", gpuidx); 1508 r = -EINVAL; 1509 goto unreserve_out; 1510 } 1511 vm = drm_priv_to_vm(pdd->drm_priv); 1512 1513 r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2); 1514 drm_exec_retry_on_contention(&ctx->exec); 1515 if (unlikely(r)) { 1516 pr_debug("failed %d to reserve bo\n", r); 1517 goto unreserve_out; 1518 } 1519 } 1520 } 1521 1522 for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { 1523 pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); 1524 if (!pdd) { 1525 pr_debug("failed to find device idx %d\n", gpuidx); 1526 r = -EINVAL; 1527 goto unreserve_out; 1528 } 1529 1530 r = amdgpu_vm_validate(pdd->dev->adev, 1531 drm_priv_to_vm(pdd->drm_priv), NULL, 1532 svm_range_bo_validate, NULL); 1533 if (r) { 1534 pr_debug("failed %d validate pt bos\n", r); 1535 goto unreserve_out; 1536 } 1537 } 1538 1539 return 0; 1540 1541 unreserve_out: 1542 drm_exec_fini(&ctx->exec); 1543 return r; 1544 } 1545 1546 static void svm_range_unreserve_bos(struct svm_validate_context *ctx) 1547 { 1548 drm_exec_fini(&ctx->exec); 1549 } 1550 1551 static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) 1552 { 1553 struct kfd_process_device *pdd; 1554 1555 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 1556 if (!pdd) 1557 return NULL; 1558 1559 return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); 1560 } 1561 1562 /* 1563 * Validation+GPU mapping with concurrent invalidation (MMU notifiers) 1564 * 1565 * To prevent concurrent destruction or change of range attributes, the 1566 * svm_read_lock must be held. The caller must not hold the svm_write_lock 1567 * because that would block concurrent evictions and lead to deadlocks. To 1568 * serialize concurrent migrations or validations of the same range, the 1569 * prange->migrate_mutex must be held. 1570 * 1571 * For VRAM ranges, the SVM BO must be allocated and valid (protected by its 1572 * eviction fence. 1573 * 1574 * The following sequence ensures race-free validation and GPU mapping: 1575 * 1576 * 1. Reserve page table (and SVM BO if range is in VRAM) 1577 * 2. hmm_range_fault to get page addresses (if system memory) 1578 * 3. DMA-map pages (if system memory) 1579 * 4-a. Take notifier lock 1580 * 4-b. Check that pages still valid (mmu_interval_read_retry) 1581 * 4-c. Check that the range was not split or otherwise invalidated 1582 * 4-d. Update GPU page table 1583 * 4.e. Release notifier lock 1584 * 5. Release page table (and SVM BO) reservation 1585 */ 1586 static int svm_range_validate_and_map(struct mm_struct *mm, 1587 unsigned long map_start, unsigned long map_last, 1588 struct svm_range *prange, int32_t gpuidx, 1589 bool intr, bool wait, bool flush_tlb) 1590 { 1591 struct svm_validate_context *ctx; 1592 unsigned long start, end, addr; 1593 struct kfd_process *p; 1594 void *owner; 1595 int32_t idx; 1596 int r = 0; 1597 1598 ctx = kzalloc(sizeof(struct svm_validate_context), GFP_KERNEL); 1599 if (!ctx) 1600 return -ENOMEM; 1601 ctx->process = container_of(prange->svms, struct kfd_process, svms); 1602 ctx->prange = prange; 1603 ctx->intr = intr; 1604 1605 if (gpuidx < MAX_GPU_INSTANCE) { 1606 bitmap_zero(ctx->bitmap, MAX_GPU_INSTANCE); 1607 bitmap_set(ctx->bitmap, gpuidx, 1); 1608 } else if (ctx->process->xnack_enabled) { 1609 bitmap_copy(ctx->bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); 1610 1611 /* If prefetch range to GPU, or GPU retry fault migrate range to 1612 * GPU, which has ACCESS attribute to the range, create mapping 1613 * on that GPU. 1614 */ 1615 if (prange->actual_loc) { 1616 gpuidx = kfd_process_gpuidx_from_gpuid(ctx->process, 1617 prange->actual_loc); 1618 if (gpuidx < 0) { 1619 WARN_ONCE(1, "failed get device by id 0x%x\n", 1620 prange->actual_loc); 1621 r = -EINVAL; 1622 goto free_ctx; 1623 } 1624 if (test_bit(gpuidx, prange->bitmap_access)) 1625 bitmap_set(ctx->bitmap, gpuidx, 1); 1626 } 1627 1628 /* 1629 * If prange is already mapped or with always mapped flag, 1630 * update mapping on GPUs with ACCESS attribute 1631 */ 1632 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { 1633 if (prange->mapped_to_gpu || 1634 prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED) 1635 bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE); 1636 } 1637 } else { 1638 bitmap_or(ctx->bitmap, prange->bitmap_access, 1639 prange->bitmap_aip, MAX_GPU_INSTANCE); 1640 } 1641 1642 if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) { 1643 r = 0; 1644 goto free_ctx; 1645 } 1646 1647 if (prange->actual_loc && !prange->ttm_res) { 1648 /* This should never happen. actual_loc gets set by 1649 * svm_migrate_ram_to_vram after allocating a BO. 1650 */ 1651 WARN_ONCE(1, "VRAM BO missing during validation\n"); 1652 r = -EINVAL; 1653 goto free_ctx; 1654 } 1655 1656 r = svm_range_reserve_bos(ctx, intr); 1657 if (r) 1658 goto free_ctx; 1659 1660 p = container_of(prange->svms, struct kfd_process, svms); 1661 owner = kfd_svm_page_owner(p, find_first_bit(ctx->bitmap, 1662 MAX_GPU_INSTANCE)); 1663 for_each_set_bit(idx, ctx->bitmap, MAX_GPU_INSTANCE) { 1664 if (kfd_svm_page_owner(p, idx) != owner) { 1665 owner = NULL; 1666 break; 1667 } 1668 } 1669 1670 start = map_start << PAGE_SHIFT; 1671 end = (map_last + 1) << PAGE_SHIFT; 1672 for (addr = start; !r && addr < end; ) { 1673 struct hmm_range *hmm_range = NULL; 1674 unsigned long map_start_vma; 1675 unsigned long map_last_vma; 1676 struct vm_area_struct *vma; 1677 unsigned long next = 0; 1678 unsigned long offset; 1679 unsigned long npages; 1680 bool readonly; 1681 1682 vma = vma_lookup(mm, addr); 1683 if (vma) { 1684 readonly = !(vma->vm_flags & VM_WRITE); 1685 1686 next = min(vma->vm_end, end); 1687 npages = (next - addr) >> PAGE_SHIFT; 1688 WRITE_ONCE(p->svms.faulting_task, current); 1689 r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, 1690 readonly, owner, NULL, 1691 &hmm_range); 1692 WRITE_ONCE(p->svms.faulting_task, NULL); 1693 if (r) 1694 pr_debug("failed %d to get svm range pages\n", r); 1695 } else { 1696 r = -EFAULT; 1697 } 1698 1699 if (!r) { 1700 offset = (addr >> PAGE_SHIFT) - prange->start; 1701 r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, 1702 hmm_range->hmm_pfns); 1703 if (r) 1704 pr_debug("failed %d to dma map range\n", r); 1705 } 1706 1707 svm_range_lock(prange); 1708 1709 /* Free backing memory of hmm_range if it was initialized 1710 * Overrride return value to TRY AGAIN only if prior returns 1711 * were successful 1712 */ 1713 if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) { 1714 pr_debug("hmm update the range, need validate again\n"); 1715 r = -EAGAIN; 1716 } 1717 1718 if (!r && !list_empty(&prange->child_list)) { 1719 pr_debug("range split by unmap in parallel, validate again\n"); 1720 r = -EAGAIN; 1721 } 1722 1723 if (!r) { 1724 map_start_vma = max(map_start, prange->start + offset); 1725 map_last_vma = min(map_last, prange->start + offset + npages - 1); 1726 if (map_start_vma <= map_last_vma) { 1727 offset = map_start_vma - prange->start; 1728 npages = map_last_vma - map_start_vma + 1; 1729 r = svm_range_map_to_gpus(prange, offset, npages, readonly, 1730 ctx->bitmap, wait, flush_tlb); 1731 } 1732 } 1733 1734 if (!r && next == end) 1735 prange->mapped_to_gpu = true; 1736 1737 svm_range_unlock(prange); 1738 1739 addr = next; 1740 } 1741 1742 svm_range_unreserve_bos(ctx); 1743 if (!r) 1744 prange->validate_timestamp = ktime_get_boottime(); 1745 1746 free_ctx: 1747 kfree(ctx); 1748 1749 return r; 1750 } 1751 1752 /** 1753 * svm_range_list_lock_and_flush_work - flush pending deferred work 1754 * 1755 * @svms: the svm range list 1756 * @mm: the mm structure 1757 * 1758 * Context: Returns with mmap write lock held, pending deferred work flushed 1759 * 1760 */ 1761 void 1762 svm_range_list_lock_and_flush_work(struct svm_range_list *svms, 1763 struct mm_struct *mm) 1764 { 1765 retry_flush_work: 1766 flush_work(&svms->deferred_list_work); 1767 mmap_write_lock(mm); 1768 1769 if (list_empty(&svms->deferred_range_list)) 1770 return; 1771 mmap_write_unlock(mm); 1772 pr_debug("retry flush\n"); 1773 goto retry_flush_work; 1774 } 1775 1776 static void svm_range_restore_work(struct work_struct *work) 1777 { 1778 struct delayed_work *dwork = to_delayed_work(work); 1779 struct amdkfd_process_info *process_info; 1780 struct svm_range_list *svms; 1781 struct svm_range *prange; 1782 struct kfd_process *p; 1783 struct mm_struct *mm; 1784 int evicted_ranges; 1785 int invalid; 1786 int r; 1787 1788 svms = container_of(dwork, struct svm_range_list, restore_work); 1789 evicted_ranges = atomic_read(&svms->evicted_ranges); 1790 if (!evicted_ranges) 1791 return; 1792 1793 pr_debug("restore svm ranges\n"); 1794 1795 p = container_of(svms, struct kfd_process, svms); 1796 process_info = p->kgd_process_info; 1797 1798 /* Keep mm reference when svm_range_validate_and_map ranges */ 1799 mm = get_task_mm(p->lead_thread); 1800 if (!mm) { 1801 pr_debug("svms 0x%p process mm gone\n", svms); 1802 return; 1803 } 1804 1805 mutex_lock(&process_info->lock); 1806 svm_range_list_lock_and_flush_work(svms, mm); 1807 mutex_lock(&svms->lock); 1808 1809 evicted_ranges = atomic_read(&svms->evicted_ranges); 1810 1811 list_for_each_entry(prange, &svms->list, list) { 1812 invalid = atomic_read(&prange->invalid); 1813 if (!invalid) 1814 continue; 1815 1816 pr_debug("restoring svms 0x%p prange 0x%p [0x%lx %lx] inv %d\n", 1817 prange->svms, prange, prange->start, prange->last, 1818 invalid); 1819 1820 /* 1821 * If range is migrating, wait for migration is done. 1822 */ 1823 mutex_lock(&prange->migrate_mutex); 1824 1825 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, 1826 MAX_GPU_INSTANCE, false, true, false); 1827 if (r) 1828 pr_debug("failed %d to map 0x%lx to gpus\n", r, 1829 prange->start); 1830 1831 mutex_unlock(&prange->migrate_mutex); 1832 if (r) 1833 goto out_reschedule; 1834 1835 if (atomic_cmpxchg(&prange->invalid, invalid, 0) != invalid) 1836 goto out_reschedule; 1837 } 1838 1839 if (atomic_cmpxchg(&svms->evicted_ranges, evicted_ranges, 0) != 1840 evicted_ranges) 1841 goto out_reschedule; 1842 1843 evicted_ranges = 0; 1844 1845 r = kgd2kfd_resume_mm(mm); 1846 if (r) { 1847 /* No recovery from this failure. Probably the CP is 1848 * hanging. No point trying again. 1849 */ 1850 pr_debug("failed %d to resume KFD\n", r); 1851 } 1852 1853 pr_debug("restore svm ranges successfully\n"); 1854 1855 out_reschedule: 1856 mutex_unlock(&svms->lock); 1857 mmap_write_unlock(mm); 1858 mutex_unlock(&process_info->lock); 1859 1860 /* If validation failed, reschedule another attempt */ 1861 if (evicted_ranges) { 1862 pr_debug("reschedule to restore svm range\n"); 1863 queue_delayed_work(system_freezable_wq, &svms->restore_work, 1864 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); 1865 1866 kfd_smi_event_queue_restore_rescheduled(mm); 1867 } 1868 mmput(mm); 1869 } 1870 1871 /** 1872 * svm_range_evict - evict svm range 1873 * @prange: svm range structure 1874 * @mm: current process mm_struct 1875 * @start: starting process queue number 1876 * @last: last process queue number 1877 * @event: mmu notifier event when range is evicted or migrated 1878 * 1879 * Stop all queues of the process to ensure GPU doesn't access the memory, then 1880 * return to let CPU evict the buffer and proceed CPU pagetable update. 1881 * 1882 * Don't need use lock to sync cpu pagetable invalidation with GPU execution. 1883 * If invalidation happens while restore work is running, restore work will 1884 * restart to ensure to get the latest CPU pages mapping to GPU, then start 1885 * the queues. 1886 */ 1887 static int 1888 svm_range_evict(struct svm_range *prange, struct mm_struct *mm, 1889 unsigned long start, unsigned long last, 1890 enum mmu_notifier_event event) 1891 { 1892 struct svm_range_list *svms = prange->svms; 1893 struct svm_range *pchild; 1894 struct kfd_process *p; 1895 int r = 0; 1896 1897 p = container_of(svms, struct kfd_process, svms); 1898 1899 pr_debug("invalidate svms 0x%p prange [0x%lx 0x%lx] [0x%lx 0x%lx]\n", 1900 svms, prange->start, prange->last, start, last); 1901 1902 if (!p->xnack_enabled || 1903 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) { 1904 int evicted_ranges; 1905 bool mapped = prange->mapped_to_gpu; 1906 1907 list_for_each_entry(pchild, &prange->child_list, child_list) { 1908 if (!pchild->mapped_to_gpu) 1909 continue; 1910 mapped = true; 1911 mutex_lock_nested(&pchild->lock, 1); 1912 if (pchild->start <= last && pchild->last >= start) { 1913 pr_debug("increment pchild invalid [0x%lx 0x%lx]\n", 1914 pchild->start, pchild->last); 1915 atomic_inc(&pchild->invalid); 1916 } 1917 mutex_unlock(&pchild->lock); 1918 } 1919 1920 if (!mapped) 1921 return r; 1922 1923 if (prange->start <= last && prange->last >= start) 1924 atomic_inc(&prange->invalid); 1925 1926 evicted_ranges = atomic_inc_return(&svms->evicted_ranges); 1927 if (evicted_ranges != 1) 1928 return r; 1929 1930 pr_debug("evicting svms 0x%p range [0x%lx 0x%lx]\n", 1931 prange->svms, prange->start, prange->last); 1932 1933 /* First eviction, stop the queues */ 1934 r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM); 1935 if (r) 1936 pr_debug("failed to quiesce KFD\n"); 1937 1938 pr_debug("schedule to restore svm %p ranges\n", svms); 1939 queue_delayed_work(system_freezable_wq, &svms->restore_work, 1940 msecs_to_jiffies(AMDGPU_SVM_RANGE_RESTORE_DELAY_MS)); 1941 } else { 1942 unsigned long s, l; 1943 uint32_t trigger; 1944 1945 if (event == MMU_NOTIFY_MIGRATE) 1946 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE; 1947 else 1948 trigger = KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY; 1949 1950 pr_debug("invalidate unmap svms 0x%p [0x%lx 0x%lx] from GPUs\n", 1951 prange->svms, start, last); 1952 list_for_each_entry(pchild, &prange->child_list, child_list) { 1953 mutex_lock_nested(&pchild->lock, 1); 1954 s = max(start, pchild->start); 1955 l = min(last, pchild->last); 1956 if (l >= s) 1957 svm_range_unmap_from_gpus(pchild, s, l, trigger); 1958 mutex_unlock(&pchild->lock); 1959 } 1960 s = max(start, prange->start); 1961 l = min(last, prange->last); 1962 if (l >= s) 1963 svm_range_unmap_from_gpus(prange, s, l, trigger); 1964 } 1965 1966 return r; 1967 } 1968 1969 static struct svm_range *svm_range_clone(struct svm_range *old) 1970 { 1971 struct svm_range *new; 1972 1973 new = svm_range_new(old->svms, old->start, old->last, false); 1974 if (!new) 1975 return NULL; 1976 if (svm_range_copy_dma_addrs(new, old)) { 1977 svm_range_free(new, false); 1978 return NULL; 1979 } 1980 if (old->svm_bo) { 1981 new->ttm_res = old->ttm_res; 1982 new->offset = old->offset; 1983 new->svm_bo = svm_range_bo_ref(old->svm_bo); 1984 spin_lock(&new->svm_bo->list_lock); 1985 list_add(&new->svm_bo_list, &new->svm_bo->range_list); 1986 spin_unlock(&new->svm_bo->list_lock); 1987 } 1988 new->flags = old->flags; 1989 new->preferred_loc = old->preferred_loc; 1990 new->prefetch_loc = old->prefetch_loc; 1991 new->actual_loc = old->actual_loc; 1992 new->granularity = old->granularity; 1993 new->mapped_to_gpu = old->mapped_to_gpu; 1994 new->vram_pages = old->vram_pages; 1995 bitmap_copy(new->bitmap_access, old->bitmap_access, MAX_GPU_INSTANCE); 1996 bitmap_copy(new->bitmap_aip, old->bitmap_aip, MAX_GPU_INSTANCE); 1997 1998 return new; 1999 } 2000 2001 void svm_range_set_max_pages(struct amdgpu_device *adev) 2002 { 2003 uint64_t max_pages; 2004 uint64_t pages, _pages; 2005 uint64_t min_pages = 0; 2006 int i, id; 2007 2008 for (i = 0; i < adev->kfd.dev->num_nodes; i++) { 2009 if (adev->kfd.dev->nodes[i]->xcp) 2010 id = adev->kfd.dev->nodes[i]->xcp->id; 2011 else 2012 id = -1; 2013 pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17; 2014 pages = clamp(pages, 1ULL << 9, 1ULL << 18); 2015 pages = rounddown_pow_of_two(pages); 2016 min_pages = min_not_zero(min_pages, pages); 2017 } 2018 2019 do { 2020 max_pages = READ_ONCE(max_svm_range_pages); 2021 _pages = min_not_zero(max_pages, min_pages); 2022 } while (cmpxchg(&max_svm_range_pages, max_pages, _pages) != max_pages); 2023 } 2024 2025 static int 2026 svm_range_split_new(struct svm_range_list *svms, uint64_t start, uint64_t last, 2027 uint64_t max_pages, struct list_head *insert_list, 2028 struct list_head *update_list) 2029 { 2030 struct svm_range *prange; 2031 uint64_t l; 2032 2033 pr_debug("max_svm_range_pages 0x%llx adding [0x%llx 0x%llx]\n", 2034 max_pages, start, last); 2035 2036 while (last >= start) { 2037 l = min(last, ALIGN_DOWN(start + max_pages, max_pages) - 1); 2038 2039 prange = svm_range_new(svms, start, l, true); 2040 if (!prange) 2041 return -ENOMEM; 2042 list_add(&prange->list, insert_list); 2043 list_add(&prange->update_list, update_list); 2044 2045 start = l + 1; 2046 } 2047 return 0; 2048 } 2049 2050 /** 2051 * svm_range_add - add svm range and handle overlap 2052 * @p: the range add to this process svms 2053 * @start: page size aligned 2054 * @size: page size aligned 2055 * @nattr: number of attributes 2056 * @attrs: array of attributes 2057 * @update_list: output, the ranges need validate and update GPU mapping 2058 * @insert_list: output, the ranges need insert to svms 2059 * @remove_list: output, the ranges are replaced and need remove from svms 2060 * @remap_list: output, remap unaligned svm ranges 2061 * 2062 * Check if the virtual address range has overlap with any existing ranges, 2063 * split partly overlapping ranges and add new ranges in the gaps. All changes 2064 * should be applied to the range_list and interval tree transactionally. If 2065 * any range split or allocation fails, the entire update fails. Therefore any 2066 * existing overlapping svm_ranges are cloned and the original svm_ranges left 2067 * unchanged. 2068 * 2069 * If the transaction succeeds, the caller can update and insert clones and 2070 * new ranges, then free the originals. 2071 * 2072 * Otherwise the caller can free the clones and new ranges, while the old 2073 * svm_ranges remain unchanged. 2074 * 2075 * Context: Process context, caller must hold svms->lock 2076 * 2077 * Return: 2078 * 0 - OK, otherwise error code 2079 */ 2080 static int 2081 svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, 2082 uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, 2083 struct list_head *update_list, struct list_head *insert_list, 2084 struct list_head *remove_list, struct list_head *remap_list) 2085 { 2086 unsigned long last = start + size - 1UL; 2087 struct svm_range_list *svms = &p->svms; 2088 struct interval_tree_node *node; 2089 struct svm_range *prange; 2090 struct svm_range *tmp; 2091 struct list_head new_list; 2092 int r = 0; 2093 2094 pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last); 2095 2096 INIT_LIST_HEAD(update_list); 2097 INIT_LIST_HEAD(insert_list); 2098 INIT_LIST_HEAD(remove_list); 2099 INIT_LIST_HEAD(&new_list); 2100 INIT_LIST_HEAD(remap_list); 2101 2102 node = interval_tree_iter_first(&svms->objects, start, last); 2103 while (node) { 2104 struct interval_tree_node *next; 2105 unsigned long next_start; 2106 2107 pr_debug("found overlap node [0x%lx 0x%lx]\n", node->start, 2108 node->last); 2109 2110 prange = container_of(node, struct svm_range, it_node); 2111 next = interval_tree_iter_next(node, start, last); 2112 next_start = min(node->last, last) + 1; 2113 2114 if (svm_range_is_same_attrs(p, prange, nattr, attrs) && 2115 prange->mapped_to_gpu) { 2116 /* nothing to do */ 2117 } else if (node->start < start || node->last > last) { 2118 /* node intersects the update range and its attributes 2119 * will change. Clone and split it, apply updates only 2120 * to the overlapping part 2121 */ 2122 struct svm_range *old = prange; 2123 2124 prange = svm_range_clone(old); 2125 if (!prange) { 2126 r = -ENOMEM; 2127 goto out; 2128 } 2129 2130 list_add(&old->update_list, remove_list); 2131 list_add(&prange->list, insert_list); 2132 list_add(&prange->update_list, update_list); 2133 2134 if (node->start < start) { 2135 pr_debug("change old range start\n"); 2136 r = svm_range_split_head(prange, start, 2137 insert_list, remap_list); 2138 if (r) 2139 goto out; 2140 } 2141 if (node->last > last) { 2142 pr_debug("change old range last\n"); 2143 r = svm_range_split_tail(prange, last, 2144 insert_list, remap_list); 2145 if (r) 2146 goto out; 2147 } 2148 } else { 2149 /* The node is contained within start..last, 2150 * just update it 2151 */ 2152 list_add(&prange->update_list, update_list); 2153 } 2154 2155 /* insert a new node if needed */ 2156 if (node->start > start) { 2157 r = svm_range_split_new(svms, start, node->start - 1, 2158 READ_ONCE(max_svm_range_pages), 2159 &new_list, update_list); 2160 if (r) 2161 goto out; 2162 } 2163 2164 node = next; 2165 start = next_start; 2166 } 2167 2168 /* add a final range at the end if needed */ 2169 if (start <= last) 2170 r = svm_range_split_new(svms, start, last, 2171 READ_ONCE(max_svm_range_pages), 2172 &new_list, update_list); 2173 2174 out: 2175 if (r) { 2176 list_for_each_entry_safe(prange, tmp, insert_list, list) 2177 svm_range_free(prange, false); 2178 list_for_each_entry_safe(prange, tmp, &new_list, list) 2179 svm_range_free(prange, true); 2180 } else { 2181 list_splice(&new_list, insert_list); 2182 } 2183 2184 return r; 2185 } 2186 2187 static void 2188 svm_range_update_notifier_and_interval_tree(struct mm_struct *mm, 2189 struct svm_range *prange) 2190 { 2191 unsigned long start; 2192 unsigned long last; 2193 2194 start = prange->notifier.interval_tree.start >> PAGE_SHIFT; 2195 last = prange->notifier.interval_tree.last >> PAGE_SHIFT; 2196 2197 if (prange->start == start && prange->last == last) 2198 return; 2199 2200 pr_debug("up notifier 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", 2201 prange->svms, prange, start, last, prange->start, 2202 prange->last); 2203 2204 if (start != 0 && last != 0) { 2205 interval_tree_remove(&prange->it_node, &prange->svms->objects); 2206 svm_range_remove_notifier(prange); 2207 } 2208 prange->it_node.start = prange->start; 2209 prange->it_node.last = prange->last; 2210 2211 interval_tree_insert(&prange->it_node, &prange->svms->objects); 2212 svm_range_add_notifier_locked(mm, prange); 2213 } 2214 2215 static void 2216 svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange, 2217 struct mm_struct *mm) 2218 { 2219 switch (prange->work_item.op) { 2220 case SVM_OP_NULL: 2221 pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2222 svms, prange, prange->start, prange->last); 2223 break; 2224 case SVM_OP_UNMAP_RANGE: 2225 pr_debug("remove 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2226 svms, prange, prange->start, prange->last); 2227 svm_range_unlink(prange); 2228 svm_range_remove_notifier(prange); 2229 svm_range_free(prange, true); 2230 break; 2231 case SVM_OP_UPDATE_RANGE_NOTIFIER: 2232 pr_debug("update notifier 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2233 svms, prange, prange->start, prange->last); 2234 svm_range_update_notifier_and_interval_tree(mm, prange); 2235 break; 2236 case SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP: 2237 pr_debug("update and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", 2238 svms, prange, prange->start, prange->last); 2239 svm_range_update_notifier_and_interval_tree(mm, prange); 2240 /* TODO: implement deferred validation and mapping */ 2241 break; 2242 case SVM_OP_ADD_RANGE: 2243 pr_debug("add 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, prange, 2244 prange->start, prange->last); 2245 svm_range_add_to_svms(prange); 2246 svm_range_add_notifier_locked(mm, prange); 2247 break; 2248 case SVM_OP_ADD_RANGE_AND_MAP: 2249 pr_debug("add and map 0x%p prange 0x%p [0x%lx 0x%lx]\n", svms, 2250 prange, prange->start, prange->last); 2251 svm_range_add_to_svms(prange); 2252 svm_range_add_notifier_locked(mm, prange); 2253 /* TODO: implement deferred validation and mapping */ 2254 break; 2255 default: 2256 WARN_ONCE(1, "Unknown prange 0x%p work op %d\n", prange, 2257 prange->work_item.op); 2258 } 2259 } 2260 2261 static void svm_range_drain_retry_fault(struct svm_range_list *svms) 2262 { 2263 struct kfd_process_device *pdd; 2264 struct kfd_process *p; 2265 int drain; 2266 uint32_t i; 2267 2268 p = container_of(svms, struct kfd_process, svms); 2269 2270 restart: 2271 drain = atomic_read(&svms->drain_pagefaults); 2272 if (!drain) 2273 return; 2274 2275 for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { 2276 pdd = p->pdds[i]; 2277 if (!pdd) 2278 continue; 2279 2280 pr_debug("drain retry fault gpu %d svms %p\n", i, svms); 2281 2282 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev, 2283 pdd->dev->adev->irq.retry_cam_enabled ? 2284 &pdd->dev->adev->irq.ih : 2285 &pdd->dev->adev->irq.ih1); 2286 2287 if (pdd->dev->adev->irq.retry_cam_enabled) 2288 amdgpu_ih_wait_on_checkpoint_process_ts(pdd->dev->adev, 2289 &pdd->dev->adev->irq.ih_soft); 2290 2291 2292 pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); 2293 } 2294 if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) 2295 goto restart; 2296 } 2297 2298 static void svm_range_deferred_list_work(struct work_struct *work) 2299 { 2300 struct svm_range_list *svms; 2301 struct svm_range *prange; 2302 struct mm_struct *mm; 2303 2304 svms = container_of(work, struct svm_range_list, deferred_list_work); 2305 pr_debug("enter svms 0x%p\n", svms); 2306 2307 spin_lock(&svms->deferred_list_lock); 2308 while (!list_empty(&svms->deferred_range_list)) { 2309 prange = list_first_entry(&svms->deferred_range_list, 2310 struct svm_range, deferred_list); 2311 spin_unlock(&svms->deferred_list_lock); 2312 2313 pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, 2314 prange->start, prange->last, prange->work_item.op); 2315 2316 mm = prange->work_item.mm; 2317 retry: 2318 mmap_write_lock(mm); 2319 2320 /* Checking for the need to drain retry faults must be inside 2321 * mmap write lock to serialize with munmap notifiers. 2322 */ 2323 if (unlikely(atomic_read(&svms->drain_pagefaults))) { 2324 mmap_write_unlock(mm); 2325 svm_range_drain_retry_fault(svms); 2326 goto retry; 2327 } 2328 2329 /* Remove from deferred_list must be inside mmap write lock, for 2330 * two race cases: 2331 * 1. unmap_from_cpu may change work_item.op and add the range 2332 * to deferred_list again, cause use after free bug. 2333 * 2. svm_range_list_lock_and_flush_work may hold mmap write 2334 * lock and continue because deferred_list is empty, but 2335 * deferred_list work is actually waiting for mmap lock. 2336 */ 2337 spin_lock(&svms->deferred_list_lock); 2338 list_del_init(&prange->deferred_list); 2339 spin_unlock(&svms->deferred_list_lock); 2340 2341 mutex_lock(&svms->lock); 2342 mutex_lock(&prange->migrate_mutex); 2343 while (!list_empty(&prange->child_list)) { 2344 struct svm_range *pchild; 2345 2346 pchild = list_first_entry(&prange->child_list, 2347 struct svm_range, child_list); 2348 pr_debug("child prange 0x%p op %d\n", pchild, 2349 pchild->work_item.op); 2350 list_del_init(&pchild->child_list); 2351 svm_range_handle_list_op(svms, pchild, mm); 2352 } 2353 mutex_unlock(&prange->migrate_mutex); 2354 2355 svm_range_handle_list_op(svms, prange, mm); 2356 mutex_unlock(&svms->lock); 2357 mmap_write_unlock(mm); 2358 2359 /* Pairs with mmget in svm_range_add_list_work. If dropping the 2360 * last mm refcount, schedule release work to avoid circular locking 2361 */ 2362 mmput_async(mm); 2363 2364 spin_lock(&svms->deferred_list_lock); 2365 } 2366 spin_unlock(&svms->deferred_list_lock); 2367 pr_debug("exit svms 0x%p\n", svms); 2368 } 2369 2370 void 2371 svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, 2372 struct mm_struct *mm, enum svm_work_list_ops op) 2373 { 2374 spin_lock(&svms->deferred_list_lock); 2375 /* if prange is on the deferred list */ 2376 if (!list_empty(&prange->deferred_list)) { 2377 pr_debug("update exist prange 0x%p work op %d\n", prange, op); 2378 WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n"); 2379 if (op != SVM_OP_NULL && 2380 prange->work_item.op != SVM_OP_UNMAP_RANGE) 2381 prange->work_item.op = op; 2382 } else { 2383 prange->work_item.op = op; 2384 2385 /* Pairs with mmput in deferred_list_work */ 2386 mmget(mm); 2387 prange->work_item.mm = mm; 2388 list_add_tail(&prange->deferred_list, 2389 &prange->svms->deferred_range_list); 2390 pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", 2391 prange, prange->start, prange->last, op); 2392 } 2393 spin_unlock(&svms->deferred_list_lock); 2394 } 2395 2396 void schedule_deferred_list_work(struct svm_range_list *svms) 2397 { 2398 spin_lock(&svms->deferred_list_lock); 2399 if (!list_empty(&svms->deferred_range_list)) 2400 schedule_work(&svms->deferred_list_work); 2401 spin_unlock(&svms->deferred_list_lock); 2402 } 2403 2404 static void 2405 svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, 2406 struct svm_range *prange, unsigned long start, 2407 unsigned long last) 2408 { 2409 struct svm_range *head; 2410 struct svm_range *tail; 2411 2412 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) { 2413 pr_debug("prange 0x%p [0x%lx 0x%lx] is already freed\n", prange, 2414 prange->start, prange->last); 2415 return; 2416 } 2417 if (start > prange->last || last < prange->start) 2418 return; 2419 2420 head = tail = prange; 2421 if (start > prange->start) 2422 svm_range_split(prange, prange->start, start - 1, &tail); 2423 if (last < tail->last) 2424 svm_range_split(tail, last + 1, tail->last, &head); 2425 2426 if (head != prange && tail != prange) { 2427 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); 2428 svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); 2429 } else if (tail != prange) { 2430 svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE); 2431 } else if (head != prange) { 2432 svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); 2433 } else if (parent != prange) { 2434 prange->work_item.op = SVM_OP_UNMAP_RANGE; 2435 } 2436 } 2437 2438 static void 2439 svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, 2440 unsigned long start, unsigned long last) 2441 { 2442 uint32_t trigger = KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU; 2443 struct svm_range_list *svms; 2444 struct svm_range *pchild; 2445 struct kfd_process *p; 2446 unsigned long s, l; 2447 bool unmap_parent; 2448 2449 p = kfd_lookup_process_by_mm(mm); 2450 if (!p) 2451 return; 2452 svms = &p->svms; 2453 2454 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, 2455 prange, prange->start, prange->last, start, last); 2456 2457 /* Make sure pending page faults are drained in the deferred worker 2458 * before the range is freed to avoid straggler interrupts on 2459 * unmapped memory causing "phantom faults". 2460 */ 2461 atomic_inc(&svms->drain_pagefaults); 2462 2463 unmap_parent = start <= prange->start && last >= prange->last; 2464 2465 list_for_each_entry(pchild, &prange->child_list, child_list) { 2466 mutex_lock_nested(&pchild->lock, 1); 2467 s = max(start, pchild->start); 2468 l = min(last, pchild->last); 2469 if (l >= s) 2470 svm_range_unmap_from_gpus(pchild, s, l, trigger); 2471 svm_range_unmap_split(mm, prange, pchild, start, last); 2472 mutex_unlock(&pchild->lock); 2473 } 2474 s = max(start, prange->start); 2475 l = min(last, prange->last); 2476 if (l >= s) 2477 svm_range_unmap_from_gpus(prange, s, l, trigger); 2478 svm_range_unmap_split(mm, prange, prange, start, last); 2479 2480 if (unmap_parent) 2481 svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); 2482 else 2483 svm_range_add_list_work(svms, prange, mm, 2484 SVM_OP_UPDATE_RANGE_NOTIFIER); 2485 schedule_deferred_list_work(svms); 2486 2487 kfd_unref_process(p); 2488 } 2489 2490 /** 2491 * svm_range_cpu_invalidate_pagetables - interval notifier callback 2492 * @mni: mmu_interval_notifier struct 2493 * @range: mmu_notifier_range struct 2494 * @cur_seq: value to pass to mmu_interval_set_seq() 2495 * 2496 * If event is MMU_NOTIFY_UNMAP, this is from CPU unmap range, otherwise, it 2497 * is from migration, or CPU page invalidation callback. 2498 * 2499 * For unmap event, unmap range from GPUs, remove prange from svms in a delayed 2500 * work thread, and split prange if only part of prange is unmapped. 2501 * 2502 * For invalidation event, if GPU retry fault is not enabled, evict the queues, 2503 * then schedule svm_range_restore_work to update GPU mapping and resume queues. 2504 * If GPU retry fault is enabled, unmap the svm range from GPU, retry fault will 2505 * update GPU mapping to recover. 2506 * 2507 * Context: mmap lock, notifier_invalidate_start lock are held 2508 * for invalidate event, prange lock is held if this is from migration 2509 */ 2510 static bool 2511 svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, 2512 const struct mmu_notifier_range *range, 2513 unsigned long cur_seq) 2514 { 2515 struct svm_range *prange; 2516 unsigned long start; 2517 unsigned long last; 2518 2519 if (range->event == MMU_NOTIFY_RELEASE) 2520 return true; 2521 if (!mmget_not_zero(mni->mm)) 2522 return true; 2523 2524 start = mni->interval_tree.start; 2525 last = mni->interval_tree.last; 2526 start = max(start, range->start) >> PAGE_SHIFT; 2527 last = min(last, range->end - 1) >> PAGE_SHIFT; 2528 pr_debug("[0x%lx 0x%lx] range[0x%lx 0x%lx] notifier[0x%lx 0x%lx] %d\n", 2529 start, last, range->start >> PAGE_SHIFT, 2530 (range->end - 1) >> PAGE_SHIFT, 2531 mni->interval_tree.start >> PAGE_SHIFT, 2532 mni->interval_tree.last >> PAGE_SHIFT, range->event); 2533 2534 prange = container_of(mni, struct svm_range, notifier); 2535 2536 svm_range_lock(prange); 2537 mmu_interval_set_seq(mni, cur_seq); 2538 2539 switch (range->event) { 2540 case MMU_NOTIFY_UNMAP: 2541 svm_range_unmap_from_cpu(mni->mm, prange, start, last); 2542 break; 2543 default: 2544 svm_range_evict(prange, mni->mm, start, last, range->event); 2545 break; 2546 } 2547 2548 svm_range_unlock(prange); 2549 mmput(mni->mm); 2550 2551 return true; 2552 } 2553 2554 /** 2555 * svm_range_from_addr - find svm range from fault address 2556 * @svms: svm range list header 2557 * @addr: address to search range interval tree, in pages 2558 * @parent: parent range if range is on child list 2559 * 2560 * Context: The caller must hold svms->lock 2561 * 2562 * Return: the svm_range found or NULL 2563 */ 2564 struct svm_range * 2565 svm_range_from_addr(struct svm_range_list *svms, unsigned long addr, 2566 struct svm_range **parent) 2567 { 2568 struct interval_tree_node *node; 2569 struct svm_range *prange; 2570 struct svm_range *pchild; 2571 2572 node = interval_tree_iter_first(&svms->objects, addr, addr); 2573 if (!node) 2574 return NULL; 2575 2576 prange = container_of(node, struct svm_range, it_node); 2577 pr_debug("address 0x%lx prange [0x%lx 0x%lx] node [0x%lx 0x%lx]\n", 2578 addr, prange->start, prange->last, node->start, node->last); 2579 2580 if (addr >= prange->start && addr <= prange->last) { 2581 if (parent) 2582 *parent = prange; 2583 return prange; 2584 } 2585 list_for_each_entry(pchild, &prange->child_list, child_list) 2586 if (addr >= pchild->start && addr <= pchild->last) { 2587 pr_debug("found address 0x%lx pchild [0x%lx 0x%lx]\n", 2588 addr, pchild->start, pchild->last); 2589 if (parent) 2590 *parent = prange; 2591 return pchild; 2592 } 2593 2594 return NULL; 2595 } 2596 2597 /* svm_range_best_restore_location - decide the best fault restore location 2598 * @prange: svm range structure 2599 * @adev: the GPU on which vm fault happened 2600 * 2601 * This is only called when xnack is on, to decide the best location to restore 2602 * the range mapping after GPU vm fault. Caller uses the best location to do 2603 * migration if actual loc is not best location, then update GPU page table 2604 * mapping to the best location. 2605 * 2606 * If the preferred loc is accessible by faulting GPU, use preferred loc. 2607 * If vm fault gpu idx is on range ACCESSIBLE bitmap, best_loc is vm fault gpu 2608 * If vm fault gpu idx is on range ACCESSIBLE_IN_PLACE bitmap, then 2609 * if range actual loc is cpu, best_loc is cpu 2610 * if vm fault gpu is on xgmi same hive of range actual loc gpu, best_loc is 2611 * range actual loc. 2612 * Otherwise, GPU no access, best_loc is -1. 2613 * 2614 * Return: 2615 * -1 means vm fault GPU no access 2616 * 0 for CPU or GPU id 2617 */ 2618 static int32_t 2619 svm_range_best_restore_location(struct svm_range *prange, 2620 struct kfd_node *node, 2621 int32_t *gpuidx) 2622 { 2623 struct kfd_node *bo_node, *preferred_node; 2624 struct kfd_process *p; 2625 uint32_t gpuid; 2626 int r; 2627 2628 p = container_of(prange->svms, struct kfd_process, svms); 2629 2630 r = kfd_process_gpuid_from_node(p, node, &gpuid, gpuidx); 2631 if (r < 0) { 2632 pr_debug("failed to get gpuid from kgd\n"); 2633 return -1; 2634 } 2635 2636 if (node->adev->gmc.is_app_apu || 2637 node->adev->flags & AMD_IS_APU) 2638 return 0; 2639 2640 if (prange->preferred_loc == gpuid || 2641 prange->preferred_loc == KFD_IOCTL_SVM_LOCATION_SYSMEM) { 2642 return prange->preferred_loc; 2643 } else if (prange->preferred_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED) { 2644 preferred_node = svm_range_get_node_by_id(prange, prange->preferred_loc); 2645 if (preferred_node && svm_nodes_in_same_hive(node, preferred_node)) 2646 return prange->preferred_loc; 2647 /* fall through */ 2648 } 2649 2650 if (test_bit(*gpuidx, prange->bitmap_access)) 2651 return gpuid; 2652 2653 if (test_bit(*gpuidx, prange->bitmap_aip)) { 2654 if (!prange->actual_loc) 2655 return 0; 2656 2657 bo_node = svm_range_get_node_by_id(prange, prange->actual_loc); 2658 if (bo_node && svm_nodes_in_same_hive(node, bo_node)) 2659 return prange->actual_loc; 2660 else 2661 return 0; 2662 } 2663 2664 return -1; 2665 } 2666 2667 static int 2668 svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr, 2669 unsigned long *start, unsigned long *last, 2670 bool *is_heap_stack) 2671 { 2672 struct vm_area_struct *vma; 2673 struct interval_tree_node *node; 2674 struct rb_node *rb_node; 2675 unsigned long start_limit, end_limit; 2676 2677 vma = vma_lookup(p->mm, addr << PAGE_SHIFT); 2678 if (!vma) { 2679 pr_debug("VMA does not exist in address [0x%llx]\n", addr); 2680 return -EFAULT; 2681 } 2682 2683 *is_heap_stack = vma_is_initial_heap(vma) || vma_is_initial_stack(vma); 2684 2685 start_limit = max(vma->vm_start >> PAGE_SHIFT, 2686 (unsigned long)ALIGN_DOWN(addr, 2UL << 8)); 2687 end_limit = min(vma->vm_end >> PAGE_SHIFT, 2688 (unsigned long)ALIGN(addr + 1, 2UL << 8)); 2689 /* First range that starts after the fault address */ 2690 node = interval_tree_iter_first(&p->svms.objects, addr + 1, ULONG_MAX); 2691 if (node) { 2692 end_limit = min(end_limit, node->start); 2693 /* Last range that ends before the fault address */ 2694 rb_node = rb_prev(&node->rb); 2695 } else { 2696 /* Last range must end before addr because 2697 * there was no range after addr 2698 */ 2699 rb_node = rb_last(&p->svms.objects.rb_root); 2700 } 2701 if (rb_node) { 2702 node = container_of(rb_node, struct interval_tree_node, rb); 2703 if (node->last >= addr) { 2704 WARN(1, "Overlap with prev node and page fault addr\n"); 2705 return -EFAULT; 2706 } 2707 start_limit = max(start_limit, node->last + 1); 2708 } 2709 2710 *start = start_limit; 2711 *last = end_limit - 1; 2712 2713 pr_debug("vma [0x%lx 0x%lx] range [0x%lx 0x%lx] is_heap_stack %d\n", 2714 vma->vm_start >> PAGE_SHIFT, vma->vm_end >> PAGE_SHIFT, 2715 *start, *last, *is_heap_stack); 2716 2717 return 0; 2718 } 2719 2720 static int 2721 svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last, 2722 uint64_t *bo_s, uint64_t *bo_l) 2723 { 2724 struct amdgpu_bo_va_mapping *mapping; 2725 struct interval_tree_node *node; 2726 struct amdgpu_bo *bo = NULL; 2727 unsigned long userptr; 2728 uint32_t i; 2729 int r; 2730 2731 for (i = 0; i < p->n_pdds; i++) { 2732 struct amdgpu_vm *vm; 2733 2734 if (!p->pdds[i]->drm_priv) 2735 continue; 2736 2737 vm = drm_priv_to_vm(p->pdds[i]->drm_priv); 2738 r = amdgpu_bo_reserve(vm->root.bo, false); 2739 if (r) 2740 return r; 2741 2742 /* Check userptr by searching entire vm->va interval tree */ 2743 node = interval_tree_iter_first(&vm->va, 0, ~0ULL); 2744 while (node) { 2745 mapping = container_of((struct rb_node *)node, 2746 struct amdgpu_bo_va_mapping, rb); 2747 bo = mapping->bo_va->base.bo; 2748 2749 if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, 2750 start << PAGE_SHIFT, 2751 last << PAGE_SHIFT, 2752 &userptr)) { 2753 node = interval_tree_iter_next(node, 0, ~0ULL); 2754 continue; 2755 } 2756 2757 pr_debug("[0x%llx 0x%llx] already userptr mapped\n", 2758 start, last); 2759 if (bo_s && bo_l) { 2760 *bo_s = userptr >> PAGE_SHIFT; 2761 *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1; 2762 } 2763 amdgpu_bo_unreserve(vm->root.bo); 2764 return -EADDRINUSE; 2765 } 2766 amdgpu_bo_unreserve(vm->root.bo); 2767 } 2768 return 0; 2769 } 2770 2771 static struct 2772 svm_range *svm_range_create_unregistered_range(struct kfd_node *node, 2773 struct kfd_process *p, 2774 struct mm_struct *mm, 2775 int64_t addr) 2776 { 2777 struct svm_range *prange = NULL; 2778 unsigned long start, last; 2779 uint32_t gpuid, gpuidx; 2780 bool is_heap_stack; 2781 uint64_t bo_s = 0; 2782 uint64_t bo_l = 0; 2783 int r; 2784 2785 if (svm_range_get_range_boundaries(p, addr, &start, &last, 2786 &is_heap_stack)) 2787 return NULL; 2788 2789 r = svm_range_check_vm(p, start, last, &bo_s, &bo_l); 2790 if (r != -EADDRINUSE) 2791 r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l); 2792 2793 if (r == -EADDRINUSE) { 2794 if (addr >= bo_s && addr <= bo_l) 2795 return NULL; 2796 2797 /* Create one page svm range if 2MB range overlapping */ 2798 start = addr; 2799 last = addr; 2800 } 2801 2802 prange = svm_range_new(&p->svms, start, last, true); 2803 if (!prange) { 2804 pr_debug("Failed to create prange in address [0x%llx]\n", addr); 2805 return NULL; 2806 } 2807 if (kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx)) { 2808 pr_debug("failed to get gpuid from kgd\n"); 2809 svm_range_free(prange, true); 2810 return NULL; 2811 } 2812 2813 if (is_heap_stack) 2814 prange->preferred_loc = KFD_IOCTL_SVM_LOCATION_SYSMEM; 2815 2816 svm_range_add_to_svms(prange); 2817 svm_range_add_notifier_locked(mm, prange); 2818 2819 return prange; 2820 } 2821 2822 /* svm_range_skip_recover - decide if prange can be recovered 2823 * @prange: svm range structure 2824 * 2825 * GPU vm retry fault handle skip recover the range for cases: 2826 * 1. prange is on deferred list to be removed after unmap, it is stale fault, 2827 * deferred list work will drain the stale fault before free the prange. 2828 * 2. prange is on deferred list to add interval notifier after split, or 2829 * 3. prange is child range, it is split from parent prange, recover later 2830 * after interval notifier is added. 2831 * 2832 * Return: true to skip recover, false to recover 2833 */ 2834 static bool svm_range_skip_recover(struct svm_range *prange) 2835 { 2836 struct svm_range_list *svms = prange->svms; 2837 2838 spin_lock(&svms->deferred_list_lock); 2839 if (list_empty(&prange->deferred_list) && 2840 list_empty(&prange->child_list)) { 2841 spin_unlock(&svms->deferred_list_lock); 2842 return false; 2843 } 2844 spin_unlock(&svms->deferred_list_lock); 2845 2846 if (prange->work_item.op == SVM_OP_UNMAP_RANGE) { 2847 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] unmapped\n", 2848 svms, prange, prange->start, prange->last); 2849 return true; 2850 } 2851 if (prange->work_item.op == SVM_OP_ADD_RANGE_AND_MAP || 2852 prange->work_item.op == SVM_OP_ADD_RANGE) { 2853 pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] not added yet\n", 2854 svms, prange, prange->start, prange->last); 2855 return true; 2856 } 2857 return false; 2858 } 2859 2860 static void 2861 svm_range_count_fault(struct kfd_node *node, struct kfd_process *p, 2862 int32_t gpuidx) 2863 { 2864 struct kfd_process_device *pdd; 2865 2866 /* fault is on different page of same range 2867 * or fault is skipped to recover later 2868 * or fault is on invalid virtual address 2869 */ 2870 if (gpuidx == MAX_GPU_INSTANCE) { 2871 uint32_t gpuid; 2872 int r; 2873 2874 r = kfd_process_gpuid_from_node(p, node, &gpuid, &gpuidx); 2875 if (r < 0) 2876 return; 2877 } 2878 2879 /* fault is recovered 2880 * or fault cannot recover because GPU no access on the range 2881 */ 2882 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 2883 if (pdd) 2884 WRITE_ONCE(pdd->faults, pdd->faults + 1); 2885 } 2886 2887 static bool 2888 svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) 2889 { 2890 unsigned long requested = VM_READ; 2891 2892 if (write_fault) 2893 requested |= VM_WRITE; 2894 2895 pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested, 2896 vma->vm_flags); 2897 return (vma->vm_flags & requested) == requested; 2898 } 2899 2900 int 2901 svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, 2902 uint32_t vmid, uint32_t node_id, 2903 uint64_t addr, bool write_fault) 2904 { 2905 unsigned long start, last, size; 2906 struct mm_struct *mm = NULL; 2907 struct svm_range_list *svms; 2908 struct svm_range *prange; 2909 struct kfd_process *p; 2910 ktime_t timestamp = ktime_get_boottime(); 2911 struct kfd_node *node; 2912 int32_t best_loc; 2913 int32_t gpuidx = MAX_GPU_INSTANCE; 2914 bool write_locked = false; 2915 struct vm_area_struct *vma; 2916 bool migration = false; 2917 int r = 0; 2918 2919 if (!KFD_IS_SVM_API_SUPPORTED(adev)) { 2920 pr_debug("device does not support SVM\n"); 2921 return -EFAULT; 2922 } 2923 2924 p = kfd_lookup_process_by_pasid(pasid); 2925 if (!p) { 2926 pr_debug("kfd process not founded pasid 0x%x\n", pasid); 2927 return 0; 2928 } 2929 svms = &p->svms; 2930 2931 pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); 2932 2933 if (atomic_read(&svms->drain_pagefaults)) { 2934 pr_debug("draining retry fault, drop fault 0x%llx\n", addr); 2935 r = 0; 2936 goto out; 2937 } 2938 2939 if (!p->xnack_enabled) { 2940 pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); 2941 r = -EFAULT; 2942 goto out; 2943 } 2944 2945 /* p->lead_thread is available as kfd_process_wq_release flush the work 2946 * before releasing task ref. 2947 */ 2948 mm = get_task_mm(p->lead_thread); 2949 if (!mm) { 2950 pr_debug("svms 0x%p failed to get mm\n", svms); 2951 r = 0; 2952 goto out; 2953 } 2954 2955 node = kfd_node_by_irq_ids(adev, node_id, vmid); 2956 if (!node) { 2957 pr_debug("kfd node does not exist node_id: %d, vmid: %d\n", node_id, 2958 vmid); 2959 r = -EFAULT; 2960 goto out; 2961 } 2962 mmap_read_lock(mm); 2963 retry_write_locked: 2964 mutex_lock(&svms->lock); 2965 prange = svm_range_from_addr(svms, addr, NULL); 2966 if (!prange) { 2967 pr_debug("failed to find prange svms 0x%p address [0x%llx]\n", 2968 svms, addr); 2969 if (!write_locked) { 2970 /* Need the write lock to create new range with MMU notifier. 2971 * Also flush pending deferred work to make sure the interval 2972 * tree is up to date before we add a new range 2973 */ 2974 mutex_unlock(&svms->lock); 2975 mmap_read_unlock(mm); 2976 mmap_write_lock(mm); 2977 write_locked = true; 2978 goto retry_write_locked; 2979 } 2980 prange = svm_range_create_unregistered_range(node, p, mm, addr); 2981 if (!prange) { 2982 pr_debug("failed to create unregistered range svms 0x%p address [0x%llx]\n", 2983 svms, addr); 2984 mmap_write_downgrade(mm); 2985 r = -EFAULT; 2986 goto out_unlock_svms; 2987 } 2988 } 2989 if (write_locked) 2990 mmap_write_downgrade(mm); 2991 2992 mutex_lock(&prange->migrate_mutex); 2993 2994 if (svm_range_skip_recover(prange)) { 2995 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); 2996 r = 0; 2997 goto out_unlock_range; 2998 } 2999 3000 /* skip duplicate vm fault on different pages of same range */ 3001 if (ktime_before(timestamp, ktime_add_ns(prange->validate_timestamp, 3002 AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING))) { 3003 pr_debug("svms 0x%p [0x%lx %lx] already restored\n", 3004 svms, prange->start, prange->last); 3005 r = 0; 3006 goto out_unlock_range; 3007 } 3008 3009 /* __do_munmap removed VMA, return success as we are handling stale 3010 * retry fault. 3011 */ 3012 vma = vma_lookup(mm, addr << PAGE_SHIFT); 3013 if (!vma) { 3014 pr_debug("address 0x%llx VMA is removed\n", addr); 3015 r = 0; 3016 goto out_unlock_range; 3017 } 3018 3019 if (!svm_fault_allowed(vma, write_fault)) { 3020 pr_debug("fault addr 0x%llx no %s permission\n", addr, 3021 write_fault ? "write" : "read"); 3022 r = -EPERM; 3023 goto out_unlock_range; 3024 } 3025 3026 best_loc = svm_range_best_restore_location(prange, node, &gpuidx); 3027 if (best_loc == -1) { 3028 pr_debug("svms %p failed get best restore loc [0x%lx 0x%lx]\n", 3029 svms, prange->start, prange->last); 3030 r = -EACCES; 3031 goto out_unlock_range; 3032 } 3033 3034 pr_debug("svms %p [0x%lx 0x%lx] best restore 0x%x, actual loc 0x%x\n", 3035 svms, prange->start, prange->last, best_loc, 3036 prange->actual_loc); 3037 3038 kfd_smi_event_page_fault_start(node, p->lead_thread->pid, addr, 3039 write_fault, timestamp); 3040 3041 /* Align migration range start and size to granularity size */ 3042 size = 1UL << prange->granularity; 3043 start = max_t(unsigned long, ALIGN_DOWN(addr, size), prange->start); 3044 last = min_t(unsigned long, ALIGN(addr + 1, size) - 1, prange->last); 3045 if (prange->actual_loc != 0 || best_loc != 0) { 3046 migration = true; 3047 3048 if (best_loc) { 3049 r = svm_migrate_to_vram(prange, best_loc, start, last, 3050 mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); 3051 if (r) { 3052 pr_debug("svm_migrate_to_vram failed (%d) at %llx, falling back to system memory\n", 3053 r, addr); 3054 /* Fallback to system memory if migration to 3055 * VRAM failed 3056 */ 3057 if (prange->actual_loc && prange->actual_loc != best_loc) 3058 r = svm_migrate_vram_to_ram(prange, mm, start, last, 3059 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); 3060 else 3061 r = 0; 3062 } 3063 } else { 3064 r = svm_migrate_vram_to_ram(prange, mm, start, last, 3065 KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, NULL); 3066 } 3067 if (r) { 3068 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", 3069 r, svms, start, last); 3070 goto out_unlock_range; 3071 } 3072 } 3073 3074 r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false, 3075 false, false); 3076 if (r) 3077 pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n", 3078 r, svms, start, last); 3079 3080 kfd_smi_event_page_fault_end(node, p->lead_thread->pid, addr, 3081 migration); 3082 3083 out_unlock_range: 3084 mutex_unlock(&prange->migrate_mutex); 3085 out_unlock_svms: 3086 mutex_unlock(&svms->lock); 3087 mmap_read_unlock(mm); 3088 3089 svm_range_count_fault(node, p, gpuidx); 3090 3091 mmput(mm); 3092 out: 3093 kfd_unref_process(p); 3094 3095 if (r == -EAGAIN) { 3096 pr_debug("recover vm fault later\n"); 3097 amdgpu_gmc_filter_faults_remove(node->adev, addr, pasid); 3098 r = 0; 3099 } 3100 return r; 3101 } 3102 3103 int 3104 svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled) 3105 { 3106 struct svm_range *prange, *pchild; 3107 uint64_t reserved_size = 0; 3108 uint64_t size; 3109 int r = 0; 3110 3111 pr_debug("switching xnack from %d to %d\n", p->xnack_enabled, xnack_enabled); 3112 3113 mutex_lock(&p->svms.lock); 3114 3115 list_for_each_entry(prange, &p->svms.list, list) { 3116 svm_range_lock(prange); 3117 list_for_each_entry(pchild, &prange->child_list, child_list) { 3118 size = (pchild->last - pchild->start + 1) << PAGE_SHIFT; 3119 if (xnack_enabled) { 3120 amdgpu_amdkfd_unreserve_mem_limit(NULL, size, 3121 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3122 } else { 3123 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, 3124 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3125 if (r) 3126 goto out_unlock; 3127 reserved_size += size; 3128 } 3129 } 3130 3131 size = (prange->last - prange->start + 1) << PAGE_SHIFT; 3132 if (xnack_enabled) { 3133 amdgpu_amdkfd_unreserve_mem_limit(NULL, size, 3134 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3135 } else { 3136 r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, 3137 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3138 if (r) 3139 goto out_unlock; 3140 reserved_size += size; 3141 } 3142 out_unlock: 3143 svm_range_unlock(prange); 3144 if (r) 3145 break; 3146 } 3147 3148 if (r) 3149 amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size, 3150 KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); 3151 else 3152 /* Change xnack mode must be inside svms lock, to avoid race with 3153 * svm_range_deferred_list_work unreserve memory in parallel. 3154 */ 3155 p->xnack_enabled = xnack_enabled; 3156 3157 mutex_unlock(&p->svms.lock); 3158 return r; 3159 } 3160 3161 void svm_range_list_fini(struct kfd_process *p) 3162 { 3163 struct svm_range *prange; 3164 struct svm_range *next; 3165 3166 pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms); 3167 3168 cancel_delayed_work_sync(&p->svms.restore_work); 3169 3170 /* Ensure list work is finished before process is destroyed */ 3171 flush_work(&p->svms.deferred_list_work); 3172 3173 /* 3174 * Ensure no retry fault comes in afterwards, as page fault handler will 3175 * not find kfd process and take mm lock to recover fault. 3176 */ 3177 atomic_inc(&p->svms.drain_pagefaults); 3178 svm_range_drain_retry_fault(&p->svms); 3179 3180 list_for_each_entry_safe(prange, next, &p->svms.list, list) { 3181 svm_range_unlink(prange); 3182 svm_range_remove_notifier(prange); 3183 svm_range_free(prange, true); 3184 } 3185 3186 mutex_destroy(&p->svms.lock); 3187 3188 pr_debug("pasid 0x%x svms 0x%p done\n", p->pasid, &p->svms); 3189 } 3190 3191 int svm_range_list_init(struct kfd_process *p) 3192 { 3193 struct svm_range_list *svms = &p->svms; 3194 int i; 3195 3196 svms->objects = RB_ROOT_CACHED; 3197 mutex_init(&svms->lock); 3198 INIT_LIST_HEAD(&svms->list); 3199 atomic_set(&svms->evicted_ranges, 0); 3200 atomic_set(&svms->drain_pagefaults, 0); 3201 INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); 3202 INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); 3203 INIT_LIST_HEAD(&svms->deferred_range_list); 3204 INIT_LIST_HEAD(&svms->criu_svm_metadata_list); 3205 spin_lock_init(&svms->deferred_list_lock); 3206 3207 for (i = 0; i < p->n_pdds; i++) 3208 if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev)) 3209 bitmap_set(svms->bitmap_supported, i, 1); 3210 3211 return 0; 3212 } 3213 3214 /** 3215 * svm_range_check_vm - check if virtual address range mapped already 3216 * @p: current kfd_process 3217 * @start: range start address, in pages 3218 * @last: range last address, in pages 3219 * @bo_s: mapping start address in pages if address range already mapped 3220 * @bo_l: mapping last address in pages if address range already mapped 3221 * 3222 * The purpose is to avoid virtual address ranges already allocated by 3223 * kfd_ioctl_alloc_memory_of_gpu ioctl. 3224 * It looks for each pdd in the kfd_process. 3225 * 3226 * Context: Process context 3227 * 3228 * Return 0 - OK, if the range is not mapped. 3229 * Otherwise error code: 3230 * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu 3231 * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by 3232 * a signal. Release all buffer reservations and return to user-space. 3233 */ 3234 static int 3235 svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last, 3236 uint64_t *bo_s, uint64_t *bo_l) 3237 { 3238 struct amdgpu_bo_va_mapping *mapping; 3239 struct interval_tree_node *node; 3240 uint32_t i; 3241 int r; 3242 3243 for (i = 0; i < p->n_pdds; i++) { 3244 struct amdgpu_vm *vm; 3245 3246 if (!p->pdds[i]->drm_priv) 3247 continue; 3248 3249 vm = drm_priv_to_vm(p->pdds[i]->drm_priv); 3250 r = amdgpu_bo_reserve(vm->root.bo, false); 3251 if (r) 3252 return r; 3253 3254 node = interval_tree_iter_first(&vm->va, start, last); 3255 if (node) { 3256 pr_debug("range [0x%llx 0x%llx] already TTM mapped\n", 3257 start, last); 3258 mapping = container_of((struct rb_node *)node, 3259 struct amdgpu_bo_va_mapping, rb); 3260 if (bo_s && bo_l) { 3261 *bo_s = mapping->start; 3262 *bo_l = mapping->last; 3263 } 3264 amdgpu_bo_unreserve(vm->root.bo); 3265 return -EADDRINUSE; 3266 } 3267 amdgpu_bo_unreserve(vm->root.bo); 3268 } 3269 3270 return 0; 3271 } 3272 3273 /** 3274 * svm_range_is_valid - check if virtual address range is valid 3275 * @p: current kfd_process 3276 * @start: range start address, in pages 3277 * @size: range size, in pages 3278 * 3279 * Valid virtual address range means it belongs to one or more VMAs 3280 * 3281 * Context: Process context 3282 * 3283 * Return: 3284 * 0 - OK, otherwise error code 3285 */ 3286 static int 3287 svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size) 3288 { 3289 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; 3290 struct vm_area_struct *vma; 3291 unsigned long end; 3292 unsigned long start_unchg = start; 3293 3294 start <<= PAGE_SHIFT; 3295 end = start + (size << PAGE_SHIFT); 3296 do { 3297 vma = vma_lookup(p->mm, start); 3298 if (!vma || (vma->vm_flags & device_vma)) 3299 return -EFAULT; 3300 start = min(end, vma->vm_end); 3301 } while (start < end); 3302 3303 return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL, 3304 NULL); 3305 } 3306 3307 /** 3308 * svm_range_best_prefetch_location - decide the best prefetch location 3309 * @prange: svm range structure 3310 * 3311 * For xnack off: 3312 * If range map to single GPU, the best prefetch location is prefetch_loc, which 3313 * can be CPU or GPU. 3314 * 3315 * If range is ACCESS or ACCESS_IN_PLACE by mGPUs, only if mGPU connection on 3316 * XGMI same hive, the best prefetch location is prefetch_loc GPU, othervise 3317 * the best prefetch location is always CPU, because GPU can not have coherent 3318 * mapping VRAM of other GPUs even with large-BAR PCIe connection. 3319 * 3320 * For xnack on: 3321 * If range is not ACCESS_IN_PLACE by mGPUs, the best prefetch location is 3322 * prefetch_loc, other GPU access will generate vm fault and trigger migration. 3323 * 3324 * If range is ACCESS_IN_PLACE by mGPUs, only if mGPU connection on XGMI same 3325 * hive, the best prefetch location is prefetch_loc GPU, otherwise the best 3326 * prefetch location is always CPU. 3327 * 3328 * Context: Process context 3329 * 3330 * Return: 3331 * 0 for CPU or GPU id 3332 */ 3333 static uint32_t 3334 svm_range_best_prefetch_location(struct svm_range *prange) 3335 { 3336 DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); 3337 uint32_t best_loc = prange->prefetch_loc; 3338 struct kfd_process_device *pdd; 3339 struct kfd_node *bo_node; 3340 struct kfd_process *p; 3341 uint32_t gpuidx; 3342 3343 p = container_of(prange->svms, struct kfd_process, svms); 3344 3345 if (!best_loc || best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) 3346 goto out; 3347 3348 bo_node = svm_range_get_node_by_id(prange, best_loc); 3349 if (!bo_node) { 3350 WARN_ONCE(1, "failed to get valid kfd node at id%x\n", best_loc); 3351 best_loc = 0; 3352 goto out; 3353 } 3354 3355 if (bo_node->adev->gmc.is_app_apu || 3356 bo_node->adev->flags & AMD_IS_APU) { 3357 best_loc = 0; 3358 goto out; 3359 } 3360 3361 if (p->xnack_enabled) 3362 bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE); 3363 else 3364 bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip, 3365 MAX_GPU_INSTANCE); 3366 3367 for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { 3368 pdd = kfd_process_device_from_gpuidx(p, gpuidx); 3369 if (!pdd) { 3370 pr_debug("failed to get device by idx 0x%x\n", gpuidx); 3371 continue; 3372 } 3373 3374 if (pdd->dev->adev == bo_node->adev) 3375 continue; 3376 3377 if (!svm_nodes_in_same_hive(pdd->dev, bo_node)) { 3378 best_loc = 0; 3379 break; 3380 } 3381 } 3382 3383 out: 3384 pr_debug("xnack %d svms 0x%p [0x%lx 0x%lx] best loc 0x%x\n", 3385 p->xnack_enabled, &p->svms, prange->start, prange->last, 3386 best_loc); 3387 3388 return best_loc; 3389 } 3390 3391 /* svm_range_trigger_migration - start page migration if prefetch loc changed 3392 * @mm: current process mm_struct 3393 * @prange: svm range structure 3394 * @migrated: output, true if migration is triggered 3395 * 3396 * If range perfetch_loc is GPU, actual loc is cpu 0, then migrate the range 3397 * from ram to vram. 3398 * If range prefetch_loc is cpu 0, actual loc is GPU, then migrate the range 3399 * from vram to ram. 3400 * 3401 * If GPU vm fault retry is not enabled, migration interact with MMU notifier 3402 * and restore work: 3403 * 1. migrate_vma_setup invalidate pages, MMU notifier callback svm_range_evict 3404 * stops all queues, schedule restore work 3405 * 2. svm_range_restore_work wait for migration is done by 3406 * a. svm_range_validate_vram takes prange->migrate_mutex 3407 * b. svm_range_validate_ram HMM get pages wait for CPU fault handle returns 3408 * 3. restore work update mappings of GPU, resume all queues. 3409 * 3410 * Context: Process context 3411 * 3412 * Return: 3413 * 0 - OK, otherwise - error code of migration 3414 */ 3415 static int 3416 svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange, 3417 bool *migrated) 3418 { 3419 uint32_t best_loc; 3420 int r = 0; 3421 3422 *migrated = false; 3423 best_loc = svm_range_best_prefetch_location(prange); 3424 3425 /* when best_loc is a gpu node and same as prange->actual_loc 3426 * we still need do migration as prange->actual_loc !=0 does 3427 * not mean all pages in prange are vram. hmm migrate will pick 3428 * up right pages during migration. 3429 */ 3430 if ((best_loc == KFD_IOCTL_SVM_LOCATION_UNDEFINED) || 3431 (best_loc == 0 && prange->actual_loc == 0)) 3432 return 0; 3433 3434 if (!best_loc) { 3435 r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last, 3436 KFD_MIGRATE_TRIGGER_PREFETCH, NULL); 3437 *migrated = !r; 3438 return r; 3439 } 3440 3441 r = svm_migrate_to_vram(prange, best_loc, prange->start, prange->last, 3442 mm, KFD_MIGRATE_TRIGGER_PREFETCH); 3443 *migrated = !r; 3444 3445 return 0; 3446 } 3447 3448 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence) 3449 { 3450 /* Dereferencing fence->svm_bo is safe here because the fence hasn't 3451 * signaled yet and we're under the protection of the fence->lock. 3452 * After the fence is signaled in svm_range_bo_release, we cannot get 3453 * here any more. 3454 * 3455 * Reference is dropped in svm_range_evict_svm_bo_worker. 3456 */ 3457 if (svm_bo_ref_unless_zero(fence->svm_bo)) { 3458 WRITE_ONCE(fence->svm_bo->evicting, 1); 3459 schedule_work(&fence->svm_bo->eviction_work); 3460 } 3461 3462 return 0; 3463 } 3464 3465 static void svm_range_evict_svm_bo_worker(struct work_struct *work) 3466 { 3467 struct svm_range_bo *svm_bo; 3468 struct mm_struct *mm; 3469 int r = 0; 3470 3471 svm_bo = container_of(work, struct svm_range_bo, eviction_work); 3472 3473 if (mmget_not_zero(svm_bo->eviction_fence->mm)) { 3474 mm = svm_bo->eviction_fence->mm; 3475 } else { 3476 svm_range_bo_unref(svm_bo); 3477 return; 3478 } 3479 3480 mmap_read_lock(mm); 3481 spin_lock(&svm_bo->list_lock); 3482 while (!list_empty(&svm_bo->range_list) && !r) { 3483 struct svm_range *prange = 3484 list_first_entry(&svm_bo->range_list, 3485 struct svm_range, svm_bo_list); 3486 int retries = 3; 3487 3488 list_del_init(&prange->svm_bo_list); 3489 spin_unlock(&svm_bo->list_lock); 3490 3491 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, 3492 prange->start, prange->last); 3493 3494 mutex_lock(&prange->migrate_mutex); 3495 do { 3496 /* migrate all vram pages in this prange to sys ram 3497 * after that prange->actual_loc should be zero 3498 */ 3499 r = svm_migrate_vram_to_ram(prange, mm, 3500 prange->start, prange->last, 3501 KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL); 3502 } while (!r && prange->actual_loc && --retries); 3503 3504 if (!r && prange->actual_loc) 3505 pr_info_once("Migration failed during eviction"); 3506 3507 if (!prange->actual_loc) { 3508 mutex_lock(&prange->lock); 3509 prange->svm_bo = NULL; 3510 mutex_unlock(&prange->lock); 3511 } 3512 mutex_unlock(&prange->migrate_mutex); 3513 3514 spin_lock(&svm_bo->list_lock); 3515 } 3516 spin_unlock(&svm_bo->list_lock); 3517 mmap_read_unlock(mm); 3518 mmput(mm); 3519 3520 dma_fence_signal(&svm_bo->eviction_fence->base); 3521 3522 /* This is the last reference to svm_bo, after svm_range_vram_node_free 3523 * has been called in svm_migrate_vram_to_ram 3524 */ 3525 WARN_ONCE(!r && kref_read(&svm_bo->kref) != 1, "This was not the last reference\n"); 3526 svm_range_bo_unref(svm_bo); 3527 } 3528 3529 static int 3530 svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, 3531 uint64_t start, uint64_t size, uint32_t nattr, 3532 struct kfd_ioctl_svm_attribute *attrs) 3533 { 3534 struct amdkfd_process_info *process_info = p->kgd_process_info; 3535 struct list_head update_list; 3536 struct list_head insert_list; 3537 struct list_head remove_list; 3538 struct list_head remap_list; 3539 struct svm_range_list *svms; 3540 struct svm_range *prange; 3541 struct svm_range *next; 3542 bool update_mapping = false; 3543 bool flush_tlb; 3544 int r, ret = 0; 3545 3546 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n", 3547 p->pasid, &p->svms, start, start + size - 1, size); 3548 3549 r = svm_range_check_attr(p, nattr, attrs); 3550 if (r) 3551 return r; 3552 3553 svms = &p->svms; 3554 3555 mutex_lock(&process_info->lock); 3556 3557 svm_range_list_lock_and_flush_work(svms, mm); 3558 3559 r = svm_range_is_valid(p, start, size); 3560 if (r) { 3561 pr_debug("invalid range r=%d\n", r); 3562 mmap_write_unlock(mm); 3563 goto out; 3564 } 3565 3566 mutex_lock(&svms->lock); 3567 3568 /* Add new range and split existing ranges as needed */ 3569 r = svm_range_add(p, start, size, nattr, attrs, &update_list, 3570 &insert_list, &remove_list, &remap_list); 3571 if (r) { 3572 mutex_unlock(&svms->lock); 3573 mmap_write_unlock(mm); 3574 goto out; 3575 } 3576 /* Apply changes as a transaction */ 3577 list_for_each_entry_safe(prange, next, &insert_list, list) { 3578 svm_range_add_to_svms(prange); 3579 svm_range_add_notifier_locked(mm, prange); 3580 } 3581 list_for_each_entry(prange, &update_list, update_list) { 3582 svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping); 3583 /* TODO: unmap ranges from GPU that lost access */ 3584 } 3585 list_for_each_entry_safe(prange, next, &remove_list, update_list) { 3586 pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n", 3587 prange->svms, prange, prange->start, 3588 prange->last); 3589 svm_range_unlink(prange); 3590 svm_range_remove_notifier(prange); 3591 svm_range_free(prange, false); 3592 } 3593 3594 mmap_write_downgrade(mm); 3595 /* Trigger migrations and revalidate and map to GPUs as needed. If 3596 * this fails we may be left with partially completed actions. There 3597 * is no clean way of rolling back to the previous state in such a 3598 * case because the rollback wouldn't be guaranteed to work either. 3599 */ 3600 list_for_each_entry(prange, &update_list, update_list) { 3601 bool migrated; 3602 3603 mutex_lock(&prange->migrate_mutex); 3604 3605 r = svm_range_trigger_migration(mm, prange, &migrated); 3606 if (r) 3607 goto out_unlock_range; 3608 3609 if (migrated && (!p->xnack_enabled || 3610 (prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)) && 3611 prange->mapped_to_gpu) { 3612 pr_debug("restore_work will update mappings of GPUs\n"); 3613 mutex_unlock(&prange->migrate_mutex); 3614 continue; 3615 } 3616 3617 if (!migrated && !update_mapping) { 3618 mutex_unlock(&prange->migrate_mutex); 3619 continue; 3620 } 3621 3622 flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu; 3623 3624 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, 3625 MAX_GPU_INSTANCE, true, true, flush_tlb); 3626 if (r) 3627 pr_debug("failed %d to map svm range\n", r); 3628 3629 out_unlock_range: 3630 mutex_unlock(&prange->migrate_mutex); 3631 if (r) 3632 ret = r; 3633 } 3634 3635 list_for_each_entry(prange, &remap_list, update_list) { 3636 pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n", 3637 prange, prange->start, prange->last); 3638 mutex_lock(&prange->migrate_mutex); 3639 r = svm_range_validate_and_map(mm, prange->start, prange->last, prange, 3640 MAX_GPU_INSTANCE, true, true, prange->mapped_to_gpu); 3641 if (r) 3642 pr_debug("failed %d on remap svm range\n", r); 3643 mutex_unlock(&prange->migrate_mutex); 3644 if (r) 3645 ret = r; 3646 } 3647 3648 dynamic_svm_range_dump(svms); 3649 3650 mutex_unlock(&svms->lock); 3651 mmap_read_unlock(mm); 3652 out: 3653 mutex_unlock(&process_info->lock); 3654 3655 pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, 3656 &p->svms, start, start + size - 1, r); 3657 3658 return ret ? ret : r; 3659 } 3660 3661 static int 3662 svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm, 3663 uint64_t start, uint64_t size, uint32_t nattr, 3664 struct kfd_ioctl_svm_attribute *attrs) 3665 { 3666 DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE); 3667 DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); 3668 bool get_preferred_loc = false; 3669 bool get_prefetch_loc = false; 3670 bool get_granularity = false; 3671 bool get_accessible = false; 3672 bool get_flags = false; 3673 uint64_t last = start + size - 1UL; 3674 uint8_t granularity = 0xff; 3675 struct interval_tree_node *node; 3676 struct svm_range_list *svms; 3677 struct svm_range *prange; 3678 uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3679 uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3680 uint32_t flags_and = 0xffffffff; 3681 uint32_t flags_or = 0; 3682 int gpuidx; 3683 uint32_t i; 3684 int r = 0; 3685 3686 pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, 3687 start + size - 1, nattr); 3688 3689 /* Flush pending deferred work to avoid racing with deferred actions from 3690 * previous memory map changes (e.g. munmap). Concurrent memory map changes 3691 * can still race with get_attr because we don't hold the mmap lock. But that 3692 * would be a race condition in the application anyway, and undefined 3693 * behaviour is acceptable in that case. 3694 */ 3695 flush_work(&p->svms.deferred_list_work); 3696 3697 mmap_read_lock(mm); 3698 r = svm_range_is_valid(p, start, size); 3699 mmap_read_unlock(mm); 3700 if (r) { 3701 pr_debug("invalid range r=%d\n", r); 3702 return r; 3703 } 3704 3705 for (i = 0; i < nattr; i++) { 3706 switch (attrs[i].type) { 3707 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 3708 get_preferred_loc = true; 3709 break; 3710 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 3711 get_prefetch_loc = true; 3712 break; 3713 case KFD_IOCTL_SVM_ATTR_ACCESS: 3714 get_accessible = true; 3715 break; 3716 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 3717 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 3718 get_flags = true; 3719 break; 3720 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 3721 get_granularity = true; 3722 break; 3723 case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE: 3724 case KFD_IOCTL_SVM_ATTR_NO_ACCESS: 3725 fallthrough; 3726 default: 3727 pr_debug("get invalid attr type 0x%x\n", attrs[i].type); 3728 return -EINVAL; 3729 } 3730 } 3731 3732 svms = &p->svms; 3733 3734 mutex_lock(&svms->lock); 3735 3736 node = interval_tree_iter_first(&svms->objects, start, last); 3737 if (!node) { 3738 pr_debug("range attrs not found return default values\n"); 3739 svm_range_set_default_attributes(&location, &prefetch_loc, 3740 &granularity, &flags_and); 3741 flags_or = flags_and; 3742 if (p->xnack_enabled) 3743 bitmap_copy(bitmap_access, svms->bitmap_supported, 3744 MAX_GPU_INSTANCE); 3745 else 3746 bitmap_zero(bitmap_access, MAX_GPU_INSTANCE); 3747 bitmap_zero(bitmap_aip, MAX_GPU_INSTANCE); 3748 goto fill_values; 3749 } 3750 bitmap_copy(bitmap_access, svms->bitmap_supported, MAX_GPU_INSTANCE); 3751 bitmap_copy(bitmap_aip, svms->bitmap_supported, MAX_GPU_INSTANCE); 3752 3753 while (node) { 3754 struct interval_tree_node *next; 3755 3756 prange = container_of(node, struct svm_range, it_node); 3757 next = interval_tree_iter_next(node, start, last); 3758 3759 if (get_preferred_loc) { 3760 if (prange->preferred_loc == 3761 KFD_IOCTL_SVM_LOCATION_UNDEFINED || 3762 (location != KFD_IOCTL_SVM_LOCATION_UNDEFINED && 3763 location != prange->preferred_loc)) { 3764 location = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3765 get_preferred_loc = false; 3766 } else { 3767 location = prange->preferred_loc; 3768 } 3769 } 3770 if (get_prefetch_loc) { 3771 if (prange->prefetch_loc == 3772 KFD_IOCTL_SVM_LOCATION_UNDEFINED || 3773 (prefetch_loc != KFD_IOCTL_SVM_LOCATION_UNDEFINED && 3774 prefetch_loc != prange->prefetch_loc)) { 3775 prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED; 3776 get_prefetch_loc = false; 3777 } else { 3778 prefetch_loc = prange->prefetch_loc; 3779 } 3780 } 3781 if (get_accessible) { 3782 bitmap_and(bitmap_access, bitmap_access, 3783 prange->bitmap_access, MAX_GPU_INSTANCE); 3784 bitmap_and(bitmap_aip, bitmap_aip, 3785 prange->bitmap_aip, MAX_GPU_INSTANCE); 3786 } 3787 if (get_flags) { 3788 flags_and &= prange->flags; 3789 flags_or |= prange->flags; 3790 } 3791 3792 if (get_granularity && prange->granularity < granularity) 3793 granularity = prange->granularity; 3794 3795 node = next; 3796 } 3797 fill_values: 3798 mutex_unlock(&svms->lock); 3799 3800 for (i = 0; i < nattr; i++) { 3801 switch (attrs[i].type) { 3802 case KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: 3803 attrs[i].value = location; 3804 break; 3805 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 3806 attrs[i].value = prefetch_loc; 3807 break; 3808 case KFD_IOCTL_SVM_ATTR_ACCESS: 3809 gpuidx = kfd_process_gpuidx_from_gpuid(p, 3810 attrs[i].value); 3811 if (gpuidx < 0) { 3812 pr_debug("invalid gpuid %x\n", attrs[i].value); 3813 return -EINVAL; 3814 } 3815 if (test_bit(gpuidx, bitmap_access)) 3816 attrs[i].type = KFD_IOCTL_SVM_ATTR_ACCESS; 3817 else if (test_bit(gpuidx, bitmap_aip)) 3818 attrs[i].type = 3819 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE; 3820 else 3821 attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS; 3822 break; 3823 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 3824 attrs[i].value = flags_and; 3825 break; 3826 case KFD_IOCTL_SVM_ATTR_CLR_FLAGS: 3827 attrs[i].value = ~flags_or; 3828 break; 3829 case KFD_IOCTL_SVM_ATTR_GRANULARITY: 3830 attrs[i].value = (uint32_t)granularity; 3831 break; 3832 } 3833 } 3834 3835 return 0; 3836 } 3837 3838 int kfd_criu_resume_svm(struct kfd_process *p) 3839 { 3840 struct kfd_ioctl_svm_attribute *set_attr_new, *set_attr = NULL; 3841 int nattr_common = 4, nattr_accessibility = 1; 3842 struct criu_svm_metadata *criu_svm_md = NULL; 3843 struct svm_range_list *svms = &p->svms; 3844 struct criu_svm_metadata *next = NULL; 3845 uint32_t set_flags = 0xffffffff; 3846 int i, j, num_attrs, ret = 0; 3847 uint64_t set_attr_size; 3848 struct mm_struct *mm; 3849 3850 if (list_empty(&svms->criu_svm_metadata_list)) { 3851 pr_debug("No SVM data from CRIU restore stage 2\n"); 3852 return ret; 3853 } 3854 3855 mm = get_task_mm(p->lead_thread); 3856 if (!mm) { 3857 pr_err("failed to get mm for the target process\n"); 3858 return -ESRCH; 3859 } 3860 3861 num_attrs = nattr_common + (nattr_accessibility * p->n_pdds); 3862 3863 i = j = 0; 3864 list_for_each_entry(criu_svm_md, &svms->criu_svm_metadata_list, list) { 3865 pr_debug("criu_svm_md[%d]\n\tstart: 0x%llx size: 0x%llx (npages)\n", 3866 i, criu_svm_md->data.start_addr, criu_svm_md->data.size); 3867 3868 for (j = 0; j < num_attrs; j++) { 3869 pr_debug("\ncriu_svm_md[%d]->attrs[%d].type : 0x%x\ncriu_svm_md[%d]->attrs[%d].value : 0x%x\n", 3870 i, j, criu_svm_md->data.attrs[j].type, 3871 i, j, criu_svm_md->data.attrs[j].value); 3872 switch (criu_svm_md->data.attrs[j].type) { 3873 /* During Checkpoint operation, the query for 3874 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC attribute might 3875 * return KFD_IOCTL_SVM_LOCATION_UNDEFINED if they were 3876 * not used by the range which was checkpointed. Care 3877 * must be taken to not restore with an invalid value 3878 * otherwise the gpuidx value will be invalid and 3879 * set_attr would eventually fail so just replace those 3880 * with another dummy attribute such as 3881 * KFD_IOCTL_SVM_ATTR_SET_FLAGS. 3882 */ 3883 case KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: 3884 if (criu_svm_md->data.attrs[j].value == 3885 KFD_IOCTL_SVM_LOCATION_UNDEFINED) { 3886 criu_svm_md->data.attrs[j].type = 3887 KFD_IOCTL_SVM_ATTR_SET_FLAGS; 3888 criu_svm_md->data.attrs[j].value = 0; 3889 } 3890 break; 3891 case KFD_IOCTL_SVM_ATTR_SET_FLAGS: 3892 set_flags = criu_svm_md->data.attrs[j].value; 3893 break; 3894 default: 3895 break; 3896 } 3897 } 3898 3899 /* CLR_FLAGS is not available via get_attr during checkpoint but 3900 * it needs to be inserted before restoring the ranges so 3901 * allocate extra space for it before calling set_attr 3902 */ 3903 set_attr_size = sizeof(struct kfd_ioctl_svm_attribute) * 3904 (num_attrs + 1); 3905 set_attr_new = krealloc(set_attr, set_attr_size, 3906 GFP_KERNEL); 3907 if (!set_attr_new) { 3908 ret = -ENOMEM; 3909 goto exit; 3910 } 3911 set_attr = set_attr_new; 3912 3913 memcpy(set_attr, criu_svm_md->data.attrs, num_attrs * 3914 sizeof(struct kfd_ioctl_svm_attribute)); 3915 set_attr[num_attrs].type = KFD_IOCTL_SVM_ATTR_CLR_FLAGS; 3916 set_attr[num_attrs].value = ~set_flags; 3917 3918 ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr, 3919 criu_svm_md->data.size, num_attrs + 1, 3920 set_attr); 3921 if (ret) { 3922 pr_err("CRIU: failed to set range attributes\n"); 3923 goto exit; 3924 } 3925 3926 i++; 3927 } 3928 exit: 3929 kfree(set_attr); 3930 list_for_each_entry_safe(criu_svm_md, next, &svms->criu_svm_metadata_list, list) { 3931 pr_debug("freeing criu_svm_md[]\n\tstart: 0x%llx\n", 3932 criu_svm_md->data.start_addr); 3933 kfree(criu_svm_md); 3934 } 3935 3936 mmput(mm); 3937 return ret; 3938 3939 } 3940 3941 int kfd_criu_restore_svm(struct kfd_process *p, 3942 uint8_t __user *user_priv_ptr, 3943 uint64_t *priv_data_offset, 3944 uint64_t max_priv_data_size) 3945 { 3946 uint64_t svm_priv_data_size, svm_object_md_size, svm_attrs_size; 3947 int nattr_common = 4, nattr_accessibility = 1; 3948 struct criu_svm_metadata *criu_svm_md = NULL; 3949 struct svm_range_list *svms = &p->svms; 3950 uint32_t num_devices; 3951 int ret = 0; 3952 3953 num_devices = p->n_pdds; 3954 /* Handle one SVM range object at a time, also the number of gpus are 3955 * assumed to be same on the restore node, checking must be done while 3956 * evaluating the topology earlier 3957 */ 3958 3959 svm_attrs_size = sizeof(struct kfd_ioctl_svm_attribute) * 3960 (nattr_common + nattr_accessibility * num_devices); 3961 svm_object_md_size = sizeof(struct criu_svm_metadata) + svm_attrs_size; 3962 3963 svm_priv_data_size = sizeof(struct kfd_criu_svm_range_priv_data) + 3964 svm_attrs_size; 3965 3966 criu_svm_md = kzalloc(svm_object_md_size, GFP_KERNEL); 3967 if (!criu_svm_md) { 3968 pr_err("failed to allocate memory to store svm metadata\n"); 3969 return -ENOMEM; 3970 } 3971 if (*priv_data_offset + svm_priv_data_size > max_priv_data_size) { 3972 ret = -EINVAL; 3973 goto exit; 3974 } 3975 3976 ret = copy_from_user(&criu_svm_md->data, user_priv_ptr + *priv_data_offset, 3977 svm_priv_data_size); 3978 if (ret) { 3979 ret = -EFAULT; 3980 goto exit; 3981 } 3982 *priv_data_offset += svm_priv_data_size; 3983 3984 list_add_tail(&criu_svm_md->list, &svms->criu_svm_metadata_list); 3985 3986 return 0; 3987 3988 3989 exit: 3990 kfree(criu_svm_md); 3991 return ret; 3992 } 3993 3994 int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, 3995 uint64_t *svm_priv_data_size) 3996 { 3997 uint64_t total_size, accessibility_size, common_attr_size; 3998 int nattr_common = 4, nattr_accessibility = 1; 3999 int num_devices = p->n_pdds; 4000 struct svm_range_list *svms; 4001 struct svm_range *prange; 4002 uint32_t count = 0; 4003 4004 *svm_priv_data_size = 0; 4005 4006 svms = &p->svms; 4007 if (!svms) 4008 return -EINVAL; 4009 4010 mutex_lock(&svms->lock); 4011 list_for_each_entry(prange, &svms->list, list) { 4012 pr_debug("prange: 0x%p start: 0x%lx\t npages: 0x%llx\t end: 0x%llx\n", 4013 prange, prange->start, prange->npages, 4014 prange->start + prange->npages - 1); 4015 count++; 4016 } 4017 mutex_unlock(&svms->lock); 4018 4019 *num_svm_ranges = count; 4020 /* Only the accessbility attributes need to be queried for all the gpus 4021 * individually, remaining ones are spanned across the entire process 4022 * regardless of the various gpu nodes. Of the remaining attributes, 4023 * KFD_IOCTL_SVM_ATTR_CLR_FLAGS need not be saved. 4024 * 4025 * KFD_IOCTL_SVM_ATTR_PREFERRED_LOC 4026 * KFD_IOCTL_SVM_ATTR_PREFETCH_LOC 4027 * KFD_IOCTL_SVM_ATTR_SET_FLAGS 4028 * KFD_IOCTL_SVM_ATTR_GRANULARITY 4029 * 4030 * ** ACCESSBILITY ATTRIBUTES ** 4031 * (Considered as one, type is altered during query, value is gpuid) 4032 * KFD_IOCTL_SVM_ATTR_ACCESS 4033 * KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE 4034 * KFD_IOCTL_SVM_ATTR_NO_ACCESS 4035 */ 4036 if (*num_svm_ranges > 0) { 4037 common_attr_size = sizeof(struct kfd_ioctl_svm_attribute) * 4038 nattr_common; 4039 accessibility_size = sizeof(struct kfd_ioctl_svm_attribute) * 4040 nattr_accessibility * num_devices; 4041 4042 total_size = sizeof(struct kfd_criu_svm_range_priv_data) + 4043 common_attr_size + accessibility_size; 4044 4045 *svm_priv_data_size = *num_svm_ranges * total_size; 4046 } 4047 4048 pr_debug("num_svm_ranges %u total_priv_size %llu\n", *num_svm_ranges, 4049 *svm_priv_data_size); 4050 return 0; 4051 } 4052 4053 int kfd_criu_checkpoint_svm(struct kfd_process *p, 4054 uint8_t __user *user_priv_data, 4055 uint64_t *priv_data_offset) 4056 { 4057 struct kfd_criu_svm_range_priv_data *svm_priv = NULL; 4058 struct kfd_ioctl_svm_attribute *query_attr = NULL; 4059 uint64_t svm_priv_data_size, query_attr_size = 0; 4060 int index, nattr_common = 4, ret = 0; 4061 struct svm_range_list *svms; 4062 int num_devices = p->n_pdds; 4063 struct svm_range *prange; 4064 struct mm_struct *mm; 4065 4066 svms = &p->svms; 4067 if (!svms) 4068 return -EINVAL; 4069 4070 mm = get_task_mm(p->lead_thread); 4071 if (!mm) { 4072 pr_err("failed to get mm for the target process\n"); 4073 return -ESRCH; 4074 } 4075 4076 query_attr_size = sizeof(struct kfd_ioctl_svm_attribute) * 4077 (nattr_common + num_devices); 4078 4079 query_attr = kzalloc(query_attr_size, GFP_KERNEL); 4080 if (!query_attr) { 4081 ret = -ENOMEM; 4082 goto exit; 4083 } 4084 4085 query_attr[0].type = KFD_IOCTL_SVM_ATTR_PREFERRED_LOC; 4086 query_attr[1].type = KFD_IOCTL_SVM_ATTR_PREFETCH_LOC; 4087 query_attr[2].type = KFD_IOCTL_SVM_ATTR_SET_FLAGS; 4088 query_attr[3].type = KFD_IOCTL_SVM_ATTR_GRANULARITY; 4089 4090 for (index = 0; index < num_devices; index++) { 4091 struct kfd_process_device *pdd = p->pdds[index]; 4092 4093 query_attr[index + nattr_common].type = 4094 KFD_IOCTL_SVM_ATTR_ACCESS; 4095 query_attr[index + nattr_common].value = pdd->user_gpu_id; 4096 } 4097 4098 svm_priv_data_size = sizeof(*svm_priv) + query_attr_size; 4099 4100 svm_priv = kzalloc(svm_priv_data_size, GFP_KERNEL); 4101 if (!svm_priv) { 4102 ret = -ENOMEM; 4103 goto exit_query; 4104 } 4105 4106 index = 0; 4107 list_for_each_entry(prange, &svms->list, list) { 4108 4109 svm_priv->object_type = KFD_CRIU_OBJECT_TYPE_SVM_RANGE; 4110 svm_priv->start_addr = prange->start; 4111 svm_priv->size = prange->npages; 4112 memcpy(&svm_priv->attrs, query_attr, query_attr_size); 4113 pr_debug("CRIU: prange: 0x%p start: 0x%lx\t npages: 0x%llx end: 0x%llx\t size: 0x%llx\n", 4114 prange, prange->start, prange->npages, 4115 prange->start + prange->npages - 1, 4116 prange->npages * PAGE_SIZE); 4117 4118 ret = svm_range_get_attr(p, mm, svm_priv->start_addr, 4119 svm_priv->size, 4120 (nattr_common + num_devices), 4121 svm_priv->attrs); 4122 if (ret) { 4123 pr_err("CRIU: failed to obtain range attributes\n"); 4124 goto exit_priv; 4125 } 4126 4127 if (copy_to_user(user_priv_data + *priv_data_offset, svm_priv, 4128 svm_priv_data_size)) { 4129 pr_err("Failed to copy svm priv to user\n"); 4130 ret = -EFAULT; 4131 goto exit_priv; 4132 } 4133 4134 *priv_data_offset += svm_priv_data_size; 4135 4136 } 4137 4138 4139 exit_priv: 4140 kfree(svm_priv); 4141 exit_query: 4142 kfree(query_attr); 4143 exit: 4144 mmput(mm); 4145 return ret; 4146 } 4147 4148 int 4149 svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, 4150 uint64_t size, uint32_t nattrs, struct kfd_ioctl_svm_attribute *attrs) 4151 { 4152 struct mm_struct *mm = current->mm; 4153 int r; 4154 4155 start >>= PAGE_SHIFT; 4156 size >>= PAGE_SHIFT; 4157 4158 switch (op) { 4159 case KFD_IOCTL_SVM_OP_SET_ATTR: 4160 r = svm_range_set_attr(p, mm, start, size, nattrs, attrs); 4161 break; 4162 case KFD_IOCTL_SVM_OP_GET_ATTR: 4163 r = svm_range_get_attr(p, mm, start, size, nattrs, attrs); 4164 break; 4165 default: 4166 r = EINVAL; 4167 break; 4168 } 4169 4170 return r; 4171 } 4172