1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 29 #include <linux/dma-fence-array.h> 30 #include <linux/interval_tree_generic.h> 31 #include <linux/idr.h> 32 #include <linux/dma-buf.h> 33 34 #include <drm/amdgpu_drm.h> 35 #include <drm/drm_drv.h> 36 #include <drm/ttm/ttm_tt.h> 37 #include <drm/drm_exec.h> 38 #include "amdgpu.h" 39 #include "amdgpu_trace.h" 40 #include "amdgpu_amdkfd.h" 41 #include "amdgpu_gmc.h" 42 #include "amdgpu_xgmi.h" 43 #include "amdgpu_dma_buf.h" 44 #include "amdgpu_res_cursor.h" 45 #include "kfd_svm.h" 46 47 /** 48 * DOC: GPUVM 49 * 50 * GPUVM is the MMU functionality provided on the GPU. 51 * GPUVM is similar to the legacy GART on older asics, however 52 * rather than there being a single global GART table 53 * for the entire GPU, there can be multiple GPUVM page tables active 54 * at any given time. The GPUVM page tables can contain a mix 55 * VRAM pages and system pages (both memory and MMIO) and system pages 56 * can be mapped as snooped (cached system pages) or unsnooped 57 * (uncached system pages). 58 * 59 * Each active GPUVM has an ID associated with it and there is a page table 60 * linked with each VMID. When executing a command buffer, 61 * the kernel tells the engine what VMID to use for that command 62 * buffer. VMIDs are allocated dynamically as commands are submitted. 63 * The userspace drivers maintain their own address space and the kernel 64 * sets up their pages tables accordingly when they submit their 65 * command buffers and a VMID is assigned. 66 * The hardware supports up to 16 active GPUVMs at any given time. 67 * 68 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending 69 * on the ASIC family. GPUVM supports RWX attributes on each page as well 70 * as other features such as encryption and caching attributes. 71 * 72 * VMID 0 is special. It is the GPUVM used for the kernel driver. In 73 * addition to an aperture managed by a page table, VMID 0 also has 74 * several other apertures. There is an aperture for direct access to VRAM 75 * and there is a legacy AGP aperture which just forwards accesses directly 76 * to the matching system physical addresses (or IOVAs when an IOMMU is 77 * present). These apertures provide direct access to these memories without 78 * incurring the overhead of a page table. VMID 0 is used by the kernel 79 * driver for tasks like memory management. 80 * 81 * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory. 82 * For user applications, each application can have their own unique GPUVM 83 * address space. The application manages the address space and the kernel 84 * driver manages the GPUVM page tables for each process. If an GPU client 85 * accesses an invalid page, it will generate a GPU page fault, similar to 86 * accessing an invalid page on a CPU. 87 */ 88 89 #define START(node) ((node)->start) 90 #define LAST(node) ((node)->last) 91 92 INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last, 93 START, LAST, static, amdgpu_vm_it) 94 95 #undef START 96 #undef LAST 97 98 /** 99 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback 100 */ 101 struct amdgpu_prt_cb { 102 103 /** 104 * @adev: amdgpu device 105 */ 106 struct amdgpu_device *adev; 107 108 /** 109 * @cb: callback 110 */ 111 struct dma_fence_cb cb; 112 }; 113 114 /** 115 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence 116 */ 117 struct amdgpu_vm_tlb_seq_struct { 118 /** 119 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on 120 */ 121 struct amdgpu_vm *vm; 122 123 /** 124 * @cb: callback 125 */ 126 struct dma_fence_cb cb; 127 }; 128 129 /** 130 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping 131 * 132 * @adev: amdgpu_device pointer 133 * @vm: amdgpu_vm pointer 134 * @pasid: the pasid the VM is using on this GPU 135 * 136 * Set the pasid this VM is using on this GPU, can also be used to remove the 137 * pasid by passing in zero. 138 * 139 */ 140 int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, 141 u32 pasid) 142 { 143 int r; 144 145 if (vm->pasid == pasid) 146 return 0; 147 148 if (vm->pasid) { 149 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid)); 150 if (r < 0) 151 return r; 152 153 vm->pasid = 0; 154 } 155 156 if (pasid) { 157 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, 158 GFP_KERNEL)); 159 if (r < 0) 160 return r; 161 162 vm->pasid = pasid; 163 } 164 165 166 return 0; 167 } 168 169 /** 170 * amdgpu_vm_bo_evicted - vm_bo is evicted 171 * 172 * @vm_bo: vm_bo which is evicted 173 * 174 * State for PDs/PTs and per VM BOs which are not at the location they should 175 * be. 176 */ 177 static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo) 178 { 179 struct amdgpu_vm *vm = vm_bo->vm; 180 struct amdgpu_bo *bo = vm_bo->bo; 181 182 vm_bo->moved = true; 183 spin_lock(&vm_bo->vm->status_lock); 184 if (bo->tbo.type == ttm_bo_type_kernel) 185 list_move(&vm_bo->vm_status, &vm->evicted); 186 else 187 list_move_tail(&vm_bo->vm_status, &vm->evicted); 188 spin_unlock(&vm_bo->vm->status_lock); 189 } 190 /** 191 * amdgpu_vm_bo_moved - vm_bo is moved 192 * 193 * @vm_bo: vm_bo which is moved 194 * 195 * State for per VM BOs which are moved, but that change is not yet reflected 196 * in the page tables. 197 */ 198 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) 199 { 200 spin_lock(&vm_bo->vm->status_lock); 201 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 202 spin_unlock(&vm_bo->vm->status_lock); 203 } 204 205 /** 206 * amdgpu_vm_bo_idle - vm_bo is idle 207 * 208 * @vm_bo: vm_bo which is now idle 209 * 210 * State for PDs/PTs and per VM BOs which have gone through the state machine 211 * and are now idle. 212 */ 213 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) 214 { 215 spin_lock(&vm_bo->vm->status_lock); 216 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); 217 spin_unlock(&vm_bo->vm->status_lock); 218 vm_bo->moved = false; 219 } 220 221 /** 222 * amdgpu_vm_bo_invalidated - vm_bo is invalidated 223 * 224 * @vm_bo: vm_bo which is now invalidated 225 * 226 * State for normal BOs which are invalidated and that change not yet reflected 227 * in the PTs. 228 */ 229 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) 230 { 231 spin_lock(&vm_bo->vm->status_lock); 232 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); 233 spin_unlock(&vm_bo->vm->status_lock); 234 } 235 236 /** 237 * amdgpu_vm_bo_evicted_user - vm_bo is evicted 238 * 239 * @vm_bo: vm_bo which is evicted 240 * 241 * State for BOs used by user mode queues which are not at the location they 242 * should be. 243 */ 244 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo) 245 { 246 vm_bo->moved = true; 247 spin_lock(&vm_bo->vm->status_lock); 248 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); 249 spin_unlock(&vm_bo->vm->status_lock); 250 } 251 252 /** 253 * amdgpu_vm_bo_relocated - vm_bo is reloacted 254 * 255 * @vm_bo: vm_bo which is relocated 256 * 257 * State for PDs/PTs which needs to update their parent PD. 258 * For the root PD, just move to idle state. 259 */ 260 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) 261 { 262 if (vm_bo->bo->parent) { 263 spin_lock(&vm_bo->vm->status_lock); 264 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 265 spin_unlock(&vm_bo->vm->status_lock); 266 } else { 267 amdgpu_vm_bo_idle(vm_bo); 268 } 269 } 270 271 /** 272 * amdgpu_vm_bo_done - vm_bo is done 273 * 274 * @vm_bo: vm_bo which is now done 275 * 276 * State for normal BOs which are invalidated and that change has been updated 277 * in the PTs. 278 */ 279 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) 280 { 281 spin_lock(&vm_bo->vm->status_lock); 282 list_move(&vm_bo->vm_status, &vm_bo->vm->done); 283 spin_unlock(&vm_bo->vm->status_lock); 284 } 285 286 /** 287 * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine 288 * @vm: the VM which state machine to reset 289 * 290 * Move all vm_bo object in the VM into a state where they will be updated 291 * again during validation. 292 */ 293 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) 294 { 295 struct amdgpu_vm_bo_base *vm_bo, *tmp; 296 297 spin_lock(&vm->status_lock); 298 list_splice_init(&vm->done, &vm->invalidated); 299 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) 300 vm_bo->moved = true; 301 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { 302 struct amdgpu_bo *bo = vm_bo->bo; 303 304 vm_bo->moved = true; 305 if (!bo || bo->tbo.type != ttm_bo_type_kernel) 306 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 307 else if (bo->parent) 308 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 309 } 310 spin_unlock(&vm->status_lock); 311 } 312 313 /** 314 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm 315 * 316 * @base: base structure for tracking BO usage in a VM 317 * @vm: vm to which bo is to be added 318 * @bo: amdgpu buffer object 319 * 320 * Initialize a bo_va_base structure and add it to the appropriate lists 321 * 322 */ 323 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, 324 struct amdgpu_vm *vm, struct amdgpu_bo *bo) 325 { 326 base->vm = vm; 327 base->bo = bo; 328 base->next = NULL; 329 INIT_LIST_HEAD(&base->vm_status); 330 331 if (!bo) 332 return; 333 base->next = bo->vm_bo; 334 bo->vm_bo = base; 335 336 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) 337 return; 338 339 dma_resv_assert_held(vm->root.bo->tbo.base.resv); 340 341 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); 342 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) 343 amdgpu_vm_bo_relocated(base); 344 else 345 amdgpu_vm_bo_idle(base); 346 347 if (bo->preferred_domains & 348 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)) 349 return; 350 351 /* 352 * we checked all the prerequisites, but it looks like this per vm bo 353 * is currently evicted. add the bo to the evicted list to make sure it 354 * is validated on next vm use to avoid fault. 355 * */ 356 amdgpu_vm_bo_evicted(base); 357 } 358 359 /** 360 * amdgpu_vm_lock_pd - lock PD in drm_exec 361 * 362 * @vm: vm providing the BOs 363 * @exec: drm execution context 364 * @num_fences: number of extra fences to reserve 365 * 366 * Lock the VM root PD in the DRM execution context. 367 */ 368 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, 369 unsigned int num_fences) 370 { 371 /* We need at least two fences for the VM PD/PT updates */ 372 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, 373 2 + num_fences); 374 } 375 376 /** 377 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU 378 * 379 * @adev: amdgpu device pointer 380 * @vm: vm providing the BOs 381 * 382 * Move all BOs to the end of LRU and remember their positions to put them 383 * together. 384 */ 385 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, 386 struct amdgpu_vm *vm) 387 { 388 spin_lock(&adev->mman.bdev.lru_lock); 389 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); 390 spin_unlock(&adev->mman.bdev.lru_lock); 391 } 392 393 /* Create scheduler entities for page table updates */ 394 static int amdgpu_vm_init_entities(struct amdgpu_device *adev, 395 struct amdgpu_vm *vm) 396 { 397 int r; 398 399 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, 400 adev->vm_manager.vm_pte_scheds, 401 adev->vm_manager.vm_pte_num_scheds, NULL); 402 if (r) 403 goto error; 404 405 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, 406 adev->vm_manager.vm_pte_scheds, 407 adev->vm_manager.vm_pte_num_scheds, NULL); 408 409 error: 410 drm_sched_entity_destroy(&vm->immediate); 411 return r; 412 } 413 414 /* Destroy the entities for page table updates again */ 415 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm) 416 { 417 drm_sched_entity_destroy(&vm->immediate); 418 drm_sched_entity_destroy(&vm->delayed); 419 } 420 421 /** 422 * amdgpu_vm_generation - return the page table re-generation counter 423 * @adev: the amdgpu_device 424 * @vm: optional VM to check, might be NULL 425 * 426 * Returns a page table re-generation token to allow checking if submissions 427 * are still valid to use this VM. The VM parameter might be NULL in which case 428 * just the VRAM lost counter will be used. 429 */ 430 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) 431 { 432 uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32; 433 434 if (!vm) 435 return result; 436 437 result += vm->generation; 438 /* Add one if the page tables will be re-generated on next CS */ 439 if (drm_sched_entity_error(&vm->delayed)) 440 ++result; 441 442 return result; 443 } 444 445 /** 446 * amdgpu_vm_validate - validate evicted BOs tracked in the VM 447 * 448 * @adev: amdgpu device pointer 449 * @vm: vm providing the BOs 450 * @ticket: optional reservation ticket used to reserve the VM 451 * @validate: callback to do the validation 452 * @param: parameter for the validation callback 453 * 454 * Validate the page table BOs and per-VM BOs on command submission if 455 * necessary. If a ticket is given, also try to validate evicted user queue 456 * BOs. They must already be reserved with the given ticket. 457 * 458 * Returns: 459 * Validation result. 460 */ 461 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, 462 struct ww_acquire_ctx *ticket, 463 int (*validate)(void *p, struct amdgpu_bo *bo), 464 void *param) 465 { 466 struct amdgpu_vm_bo_base *bo_base; 467 struct amdgpu_bo *shadow; 468 struct amdgpu_bo *bo; 469 int r; 470 471 if (drm_sched_entity_error(&vm->delayed)) { 472 ++vm->generation; 473 amdgpu_vm_bo_reset_state_machine(vm); 474 amdgpu_vm_fini_entities(vm); 475 r = amdgpu_vm_init_entities(adev, vm); 476 if (r) 477 return r; 478 } 479 480 spin_lock(&vm->status_lock); 481 while (!list_empty(&vm->evicted)) { 482 bo_base = list_first_entry(&vm->evicted, 483 struct amdgpu_vm_bo_base, 484 vm_status); 485 spin_unlock(&vm->status_lock); 486 487 bo = bo_base->bo; 488 shadow = amdgpu_bo_shadowed(bo); 489 490 r = validate(param, bo); 491 if (r) 492 return r; 493 if (shadow) { 494 r = validate(param, shadow); 495 if (r) 496 return r; 497 } 498 499 if (bo->tbo.type != ttm_bo_type_kernel) { 500 amdgpu_vm_bo_moved(bo_base); 501 } else { 502 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); 503 amdgpu_vm_bo_relocated(bo_base); 504 } 505 spin_lock(&vm->status_lock); 506 } 507 while (ticket && !list_empty(&vm->evicted_user)) { 508 bo_base = list_first_entry(&vm->evicted_user, 509 struct amdgpu_vm_bo_base, 510 vm_status); 511 spin_unlock(&vm->status_lock); 512 513 bo = bo_base->bo; 514 515 if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) { 516 pr_warn_ratelimited("Evicted user BO is not reserved in pid %d\n", 517 vm->task_info.pid); 518 return -EINVAL; 519 } 520 521 r = validate(param, bo); 522 if (r) 523 return r; 524 525 amdgpu_vm_bo_invalidated(bo_base); 526 527 spin_lock(&vm->status_lock); 528 } 529 spin_unlock(&vm->status_lock); 530 531 amdgpu_vm_eviction_lock(vm); 532 vm->evicting = false; 533 amdgpu_vm_eviction_unlock(vm); 534 535 return 0; 536 } 537 538 /** 539 * amdgpu_vm_ready - check VM is ready for updates 540 * 541 * @vm: VM to check 542 * 543 * Check if all VM PDs/PTs are ready for updates 544 * 545 * Returns: 546 * True if VM is not evicting. 547 */ 548 bool amdgpu_vm_ready(struct amdgpu_vm *vm) 549 { 550 bool empty; 551 bool ret; 552 553 amdgpu_vm_eviction_lock(vm); 554 ret = !vm->evicting; 555 amdgpu_vm_eviction_unlock(vm); 556 557 spin_lock(&vm->status_lock); 558 empty = list_empty(&vm->evicted); 559 spin_unlock(&vm->status_lock); 560 561 return ret && empty; 562 } 563 564 /** 565 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug 566 * 567 * @adev: amdgpu_device pointer 568 */ 569 void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev) 570 { 571 const struct amdgpu_ip_block *ip_block; 572 bool has_compute_vm_bug; 573 struct amdgpu_ring *ring; 574 int i; 575 576 has_compute_vm_bug = false; 577 578 ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); 579 if (ip_block) { 580 /* Compute has a VM bug for GFX version < 7. 581 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ 582 if (ip_block->version->major <= 7) 583 has_compute_vm_bug = true; 584 else if (ip_block->version->major == 8) 585 if (adev->gfx.mec_fw_version < 673) 586 has_compute_vm_bug = true; 587 } 588 589 for (i = 0; i < adev->num_rings; i++) { 590 ring = adev->rings[i]; 591 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) 592 /* only compute rings */ 593 ring->has_compute_vm_bug = has_compute_vm_bug; 594 else 595 ring->has_compute_vm_bug = false; 596 } 597 } 598 599 /** 600 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job. 601 * 602 * @ring: ring on which the job will be submitted 603 * @job: job to submit 604 * 605 * Returns: 606 * True if sync is needed. 607 */ 608 bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, 609 struct amdgpu_job *job) 610 { 611 struct amdgpu_device *adev = ring->adev; 612 unsigned vmhub = ring->vm_hub; 613 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 614 615 if (job->vmid == 0) 616 return false; 617 618 if (job->vm_needs_flush || ring->has_compute_vm_bug) 619 return true; 620 621 if (ring->funcs->emit_gds_switch && job->gds_switch_needed) 622 return true; 623 624 if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid])) 625 return true; 626 627 return false; 628 } 629 630 /** 631 * amdgpu_vm_flush - hardware flush the vm 632 * 633 * @ring: ring to use for flush 634 * @job: related job 635 * @need_pipe_sync: is pipe sync needed 636 * 637 * Emit a VM flush when it is necessary. 638 * 639 * Returns: 640 * 0 on success, errno otherwise. 641 */ 642 int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, 643 bool need_pipe_sync) 644 { 645 struct amdgpu_device *adev = ring->adev; 646 unsigned vmhub = ring->vm_hub; 647 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 648 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid]; 649 bool spm_update_needed = job->spm_update_needed; 650 bool gds_switch_needed = ring->funcs->emit_gds_switch && 651 job->gds_switch_needed; 652 bool vm_flush_needed = job->vm_needs_flush; 653 struct dma_fence *fence = NULL; 654 bool pasid_mapping_needed = false; 655 unsigned patch_offset = 0; 656 int r; 657 658 if (amdgpu_vmid_had_gpu_reset(adev, id)) { 659 gds_switch_needed = true; 660 vm_flush_needed = true; 661 pasid_mapping_needed = true; 662 spm_update_needed = true; 663 } 664 665 mutex_lock(&id_mgr->lock); 666 if (id->pasid != job->pasid || !id->pasid_mapping || 667 !dma_fence_is_signaled(id->pasid_mapping)) 668 pasid_mapping_needed = true; 669 mutex_unlock(&id_mgr->lock); 670 671 gds_switch_needed &= !!ring->funcs->emit_gds_switch; 672 vm_flush_needed &= !!ring->funcs->emit_vm_flush && 673 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; 674 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping && 675 ring->funcs->emit_wreg; 676 677 if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) 678 return 0; 679 680 amdgpu_ring_ib_begin(ring); 681 if (ring->funcs->init_cond_exec) 682 patch_offset = amdgpu_ring_init_cond_exec(ring); 683 684 if (need_pipe_sync) 685 amdgpu_ring_emit_pipeline_sync(ring); 686 687 if (vm_flush_needed) { 688 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); 689 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); 690 } 691 692 if (pasid_mapping_needed) 693 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); 694 695 if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid) 696 adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid); 697 698 if (!ring->is_mes_queue && ring->funcs->emit_gds_switch && 699 gds_switch_needed) { 700 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base, 701 job->gds_size, job->gws_base, 702 job->gws_size, job->oa_base, 703 job->oa_size); 704 } 705 706 if (vm_flush_needed || pasid_mapping_needed) { 707 r = amdgpu_fence_emit(ring, &fence, NULL, 0); 708 if (r) 709 return r; 710 } 711 712 if (vm_flush_needed) { 713 mutex_lock(&id_mgr->lock); 714 dma_fence_put(id->last_flush); 715 id->last_flush = dma_fence_get(fence); 716 id->current_gpu_reset_count = 717 atomic_read(&adev->gpu_reset_counter); 718 mutex_unlock(&id_mgr->lock); 719 } 720 721 if (pasid_mapping_needed) { 722 mutex_lock(&id_mgr->lock); 723 id->pasid = job->pasid; 724 dma_fence_put(id->pasid_mapping); 725 id->pasid_mapping = dma_fence_get(fence); 726 mutex_unlock(&id_mgr->lock); 727 } 728 dma_fence_put(fence); 729 730 if (ring->funcs->patch_cond_exec) 731 amdgpu_ring_patch_cond_exec(ring, patch_offset); 732 733 /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */ 734 if (ring->funcs->emit_switch_buffer) { 735 amdgpu_ring_emit_switch_buffer(ring); 736 amdgpu_ring_emit_switch_buffer(ring); 737 } 738 amdgpu_ring_ib_end(ring); 739 return 0; 740 } 741 742 /** 743 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo 744 * 745 * @vm: requested vm 746 * @bo: requested buffer object 747 * 748 * Find @bo inside the requested vm. 749 * Search inside the @bos vm list for the requested vm 750 * Returns the found bo_va or NULL if none is found 751 * 752 * Object has to be reserved! 753 * 754 * Returns: 755 * Found bo_va or NULL. 756 */ 757 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, 758 struct amdgpu_bo *bo) 759 { 760 struct amdgpu_vm_bo_base *base; 761 762 for (base = bo->vm_bo; base; base = base->next) { 763 if (base->vm != vm) 764 continue; 765 766 return container_of(base, struct amdgpu_bo_va, base); 767 } 768 return NULL; 769 } 770 771 /** 772 * amdgpu_vm_map_gart - Resolve gart mapping of addr 773 * 774 * @pages_addr: optional DMA address to use for lookup 775 * @addr: the unmapped addr 776 * 777 * Look up the physical address of the page that the pte resolves 778 * to. 779 * 780 * Returns: 781 * The pointer for the page table entry. 782 */ 783 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) 784 { 785 uint64_t result; 786 787 /* page table offset */ 788 result = pages_addr[addr >> PAGE_SHIFT]; 789 790 /* in case cpu page size != gpu page size*/ 791 result |= addr & (~PAGE_MASK); 792 793 result &= 0xFFFFFFFFFFFFF000ULL; 794 795 return result; 796 } 797 798 /** 799 * amdgpu_vm_update_pdes - make sure that all directories are valid 800 * 801 * @adev: amdgpu_device pointer 802 * @vm: requested vm 803 * @immediate: submit immediately to the paging queue 804 * 805 * Makes sure all directories are up to date. 806 * 807 * Returns: 808 * 0 for success, error for failure. 809 */ 810 int amdgpu_vm_update_pdes(struct amdgpu_device *adev, 811 struct amdgpu_vm *vm, bool immediate) 812 { 813 struct amdgpu_vm_update_params params; 814 struct amdgpu_vm_bo_base *entry; 815 bool flush_tlb_needed = false; 816 LIST_HEAD(relocated); 817 int r, idx; 818 819 spin_lock(&vm->status_lock); 820 list_splice_init(&vm->relocated, &relocated); 821 spin_unlock(&vm->status_lock); 822 823 if (list_empty(&relocated)) 824 return 0; 825 826 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 827 return -ENODEV; 828 829 memset(¶ms, 0, sizeof(params)); 830 params.adev = adev; 831 params.vm = vm; 832 params.immediate = immediate; 833 834 r = vm->update_funcs->prepare(¶ms, NULL, AMDGPU_SYNC_EXPLICIT); 835 if (r) 836 goto error; 837 838 list_for_each_entry(entry, &relocated, vm_status) { 839 /* vm_flush_needed after updating moved PDEs */ 840 flush_tlb_needed |= entry->moved; 841 842 r = amdgpu_vm_pde_update(¶ms, entry); 843 if (r) 844 goto error; 845 } 846 847 r = vm->update_funcs->commit(¶ms, &vm->last_update); 848 if (r) 849 goto error; 850 851 if (flush_tlb_needed) 852 atomic64_inc(&vm->tlb_seq); 853 854 while (!list_empty(&relocated)) { 855 entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base, 856 vm_status); 857 amdgpu_vm_bo_idle(entry); 858 } 859 860 error: 861 drm_dev_exit(idx); 862 return r; 863 } 864 865 /** 866 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence 867 * @fence: unused 868 * @cb: the callback structure 869 * 870 * Increments the tlb sequence to make sure that future CS execute a VM flush. 871 */ 872 static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, 873 struct dma_fence_cb *cb) 874 { 875 struct amdgpu_vm_tlb_seq_struct *tlb_cb; 876 877 tlb_cb = container_of(cb, typeof(*tlb_cb), cb); 878 atomic64_inc(&tlb_cb->vm->tlb_seq); 879 kfree(tlb_cb); 880 } 881 882 /** 883 * amdgpu_vm_update_range - update a range in the vm page table 884 * 885 * @adev: amdgpu_device pointer to use for commands 886 * @vm: the VM to update the range 887 * @immediate: immediate submission in a page fault 888 * @unlocked: unlocked invalidation during MM callback 889 * @flush_tlb: trigger tlb invalidation after update completed 890 * @allow_override: change MTYPE for local NUMA nodes 891 * @resv: fences we need to sync to 892 * @start: start of mapped range 893 * @last: last mapped entry 894 * @flags: flags for the entries 895 * @offset: offset into nodes and pages_addr 896 * @vram_base: base for vram mappings 897 * @res: ttm_resource to map 898 * @pages_addr: DMA addresses to use for mapping 899 * @fence: optional resulting fence 900 * 901 * Fill in the page table entries between @start and @last. 902 * 903 * Returns: 904 * 0 for success, negative erro code for failure. 905 */ 906 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, 907 bool immediate, bool unlocked, bool flush_tlb, bool allow_override, 908 struct dma_resv *resv, uint64_t start, uint64_t last, 909 uint64_t flags, uint64_t offset, uint64_t vram_base, 910 struct ttm_resource *res, dma_addr_t *pages_addr, 911 struct dma_fence **fence) 912 { 913 struct amdgpu_vm_update_params params; 914 struct amdgpu_vm_tlb_seq_struct *tlb_cb; 915 struct amdgpu_res_cursor cursor; 916 enum amdgpu_sync_mode sync_mode; 917 int r, idx; 918 919 if (!drm_dev_enter(adev_to_drm(adev), &idx)) 920 return -ENODEV; 921 922 tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL); 923 if (!tlb_cb) { 924 r = -ENOMEM; 925 goto error_unlock; 926 } 927 928 /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache, 929 * heavy-weight flush TLB unconditionally. 930 */ 931 flush_tlb |= adev->gmc.xgmi.num_physical_nodes && 932 amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0); 933 934 /* 935 * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB 936 */ 937 flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0); 938 939 memset(¶ms, 0, sizeof(params)); 940 params.adev = adev; 941 params.vm = vm; 942 params.immediate = immediate; 943 params.pages_addr = pages_addr; 944 params.unlocked = unlocked; 945 params.allow_override = allow_override; 946 947 /* Implicitly sync to command submissions in the same VM before 948 * unmapping. Sync to moving fences before mapping. 949 */ 950 if (!(flags & AMDGPU_PTE_VALID)) 951 sync_mode = AMDGPU_SYNC_EQ_OWNER; 952 else 953 sync_mode = AMDGPU_SYNC_EXPLICIT; 954 955 amdgpu_vm_eviction_lock(vm); 956 if (vm->evicting) { 957 r = -EBUSY; 958 goto error_free; 959 } 960 961 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { 962 struct dma_fence *tmp = dma_fence_get_stub(); 963 964 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); 965 swap(vm->last_unlocked, tmp); 966 dma_fence_put(tmp); 967 } 968 969 r = vm->update_funcs->prepare(¶ms, resv, sync_mode); 970 if (r) 971 goto error_free; 972 973 amdgpu_res_first(pages_addr ? NULL : res, offset, 974 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor); 975 while (cursor.remaining) { 976 uint64_t tmp, num_entries, addr; 977 978 num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT; 979 if (pages_addr) { 980 bool contiguous = true; 981 982 if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) { 983 uint64_t pfn = cursor.start >> PAGE_SHIFT; 984 uint64_t count; 985 986 contiguous = pages_addr[pfn + 1] == 987 pages_addr[pfn] + PAGE_SIZE; 988 989 tmp = num_entries / 990 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 991 for (count = 2; count < tmp; ++count) { 992 uint64_t idx = pfn + count; 993 994 if (contiguous != (pages_addr[idx] == 995 pages_addr[idx - 1] + PAGE_SIZE)) 996 break; 997 } 998 if (!contiguous) 999 count--; 1000 num_entries = count * 1001 AMDGPU_GPU_PAGES_IN_CPU_PAGE; 1002 } 1003 1004 if (!contiguous) { 1005 addr = cursor.start; 1006 params.pages_addr = pages_addr; 1007 } else { 1008 addr = pages_addr[cursor.start >> PAGE_SHIFT]; 1009 params.pages_addr = NULL; 1010 } 1011 1012 } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) { 1013 addr = vram_base + cursor.start; 1014 } else { 1015 addr = 0; 1016 } 1017 1018 tmp = start + num_entries; 1019 r = amdgpu_vm_ptes_update(¶ms, start, tmp, addr, flags); 1020 if (r) 1021 goto error_free; 1022 1023 amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE); 1024 start = tmp; 1025 } 1026 1027 r = vm->update_funcs->commit(¶ms, fence); 1028 1029 if (flush_tlb || params.table_freed) { 1030 tlb_cb->vm = vm; 1031 if (fence && *fence && 1032 !dma_fence_add_callback(*fence, &tlb_cb->cb, 1033 amdgpu_vm_tlb_seq_cb)) { 1034 dma_fence_put(vm->last_tlb_flush); 1035 vm->last_tlb_flush = dma_fence_get(*fence); 1036 } else { 1037 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); 1038 } 1039 tlb_cb = NULL; 1040 } 1041 1042 error_free: 1043 kfree(tlb_cb); 1044 1045 error_unlock: 1046 amdgpu_vm_eviction_unlock(vm); 1047 drm_dev_exit(idx); 1048 return r; 1049 } 1050 1051 static void amdgpu_vm_bo_get_memory(struct amdgpu_bo_va *bo_va, 1052 struct amdgpu_mem_stats *stats) 1053 { 1054 struct amdgpu_vm *vm = bo_va->base.vm; 1055 struct amdgpu_bo *bo = bo_va->base.bo; 1056 1057 if (!bo) 1058 return; 1059 1060 /* 1061 * For now ignore BOs which are currently locked and potentially 1062 * changing their location. 1063 */ 1064 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv && 1065 !dma_resv_trylock(bo->tbo.base.resv)) 1066 return; 1067 1068 amdgpu_bo_get_memory(bo, stats); 1069 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv) 1070 dma_resv_unlock(bo->tbo.base.resv); 1071 } 1072 1073 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, 1074 struct amdgpu_mem_stats *stats) 1075 { 1076 struct amdgpu_bo_va *bo_va, *tmp; 1077 1078 spin_lock(&vm->status_lock); 1079 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) 1080 amdgpu_vm_bo_get_memory(bo_va, stats); 1081 1082 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) 1083 amdgpu_vm_bo_get_memory(bo_va, stats); 1084 1085 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) 1086 amdgpu_vm_bo_get_memory(bo_va, stats); 1087 1088 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) 1089 amdgpu_vm_bo_get_memory(bo_va, stats); 1090 1091 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) 1092 amdgpu_vm_bo_get_memory(bo_va, stats); 1093 1094 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) 1095 amdgpu_vm_bo_get_memory(bo_va, stats); 1096 spin_unlock(&vm->status_lock); 1097 } 1098 1099 /** 1100 * amdgpu_vm_bo_update - update all BO mappings in the vm page table 1101 * 1102 * @adev: amdgpu_device pointer 1103 * @bo_va: requested BO and VM object 1104 * @clear: if true clear the entries 1105 * 1106 * Fill in the page table entries for @bo_va. 1107 * 1108 * Returns: 1109 * 0 for success, -EINVAL for failure. 1110 */ 1111 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, 1112 bool clear) 1113 { 1114 struct amdgpu_bo *bo = bo_va->base.bo; 1115 struct amdgpu_vm *vm = bo_va->base.vm; 1116 struct amdgpu_bo_va_mapping *mapping; 1117 dma_addr_t *pages_addr = NULL; 1118 struct ttm_resource *mem; 1119 struct dma_fence **last_update; 1120 bool flush_tlb = clear; 1121 bool uncached; 1122 struct dma_resv *resv; 1123 uint64_t vram_base; 1124 uint64_t flags; 1125 int r; 1126 1127 if (clear || !bo) { 1128 mem = NULL; 1129 resv = vm->root.bo->tbo.base.resv; 1130 } else { 1131 struct drm_gem_object *obj = &bo->tbo.base; 1132 1133 resv = bo->tbo.base.resv; 1134 if (obj->import_attach && bo_va->is_xgmi) { 1135 struct dma_buf *dma_buf = obj->import_attach->dmabuf; 1136 struct drm_gem_object *gobj = dma_buf->priv; 1137 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); 1138 1139 if (abo->tbo.resource && 1140 abo->tbo.resource->mem_type == TTM_PL_VRAM) 1141 bo = gem_to_amdgpu_bo(gobj); 1142 } 1143 mem = bo->tbo.resource; 1144 if (mem && (mem->mem_type == TTM_PL_TT || 1145 mem->mem_type == AMDGPU_PL_PREEMPT)) 1146 pages_addr = bo->tbo.ttm->dma_address; 1147 } 1148 1149 if (bo) { 1150 struct amdgpu_device *bo_adev; 1151 1152 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem); 1153 1154 if (amdgpu_bo_encrypted(bo)) 1155 flags |= AMDGPU_PTE_TMZ; 1156 1157 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); 1158 vram_base = bo_adev->vm_manager.vram_base_offset; 1159 uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0; 1160 } else { 1161 flags = 0x0; 1162 vram_base = 0; 1163 uncached = false; 1164 } 1165 1166 if (clear || (bo && bo->tbo.base.resv == 1167 vm->root.bo->tbo.base.resv)) 1168 last_update = &vm->last_update; 1169 else 1170 last_update = &bo_va->last_pt_update; 1171 1172 if (!clear && bo_va->base.moved) { 1173 flush_tlb = true; 1174 list_splice_init(&bo_va->valids, &bo_va->invalids); 1175 1176 } else if (bo_va->cleared != clear) { 1177 list_splice_init(&bo_va->valids, &bo_va->invalids); 1178 } 1179 1180 list_for_each_entry(mapping, &bo_va->invalids, list) { 1181 uint64_t update_flags = flags; 1182 1183 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here 1184 * but in case of something, we filter the flags in first place 1185 */ 1186 if (!(mapping->flags & AMDGPU_PTE_READABLE)) 1187 update_flags &= ~AMDGPU_PTE_READABLE; 1188 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) 1189 update_flags &= ~AMDGPU_PTE_WRITEABLE; 1190 1191 /* Apply ASIC specific mapping flags */ 1192 amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags); 1193 1194 trace_amdgpu_vm_bo_update(mapping); 1195 1196 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, 1197 !uncached, resv, mapping->start, mapping->last, 1198 update_flags, mapping->offset, 1199 vram_base, mem, pages_addr, 1200 last_update); 1201 if (r) 1202 return r; 1203 } 1204 1205 /* If the BO is not in its preferred location add it back to 1206 * the evicted list so that it gets validated again on the 1207 * next command submission. 1208 */ 1209 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 1210 uint32_t mem_type = bo->tbo.resource->mem_type; 1211 1212 if (!(bo->preferred_domains & 1213 amdgpu_mem_type_to_domain(mem_type))) 1214 amdgpu_vm_bo_evicted(&bo_va->base); 1215 else 1216 amdgpu_vm_bo_idle(&bo_va->base); 1217 } else { 1218 amdgpu_vm_bo_done(&bo_va->base); 1219 } 1220 1221 list_splice_init(&bo_va->invalids, &bo_va->valids); 1222 bo_va->cleared = clear; 1223 bo_va->base.moved = false; 1224 1225 if (trace_amdgpu_vm_bo_mapping_enabled()) { 1226 list_for_each_entry(mapping, &bo_va->valids, list) 1227 trace_amdgpu_vm_bo_mapping(mapping); 1228 } 1229 1230 return 0; 1231 } 1232 1233 /** 1234 * amdgpu_vm_update_prt_state - update the global PRT state 1235 * 1236 * @adev: amdgpu_device pointer 1237 */ 1238 static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev) 1239 { 1240 unsigned long flags; 1241 bool enable; 1242 1243 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1244 enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1245 adev->gmc.gmc_funcs->set_prt(adev, enable); 1246 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1247 } 1248 1249 /** 1250 * amdgpu_vm_prt_get - add a PRT user 1251 * 1252 * @adev: amdgpu_device pointer 1253 */ 1254 static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 1255 { 1256 if (!adev->gmc.gmc_funcs->set_prt) 1257 return; 1258 1259 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 1260 amdgpu_vm_update_prt_state(adev); 1261 } 1262 1263 /** 1264 * amdgpu_vm_prt_put - drop a PRT user 1265 * 1266 * @adev: amdgpu_device pointer 1267 */ 1268 static void amdgpu_vm_prt_put(struct amdgpu_device *adev) 1269 { 1270 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) 1271 amdgpu_vm_update_prt_state(adev); 1272 } 1273 1274 /** 1275 * amdgpu_vm_prt_cb - callback for updating the PRT status 1276 * 1277 * @fence: fence for the callback 1278 * @_cb: the callback function 1279 */ 1280 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) 1281 { 1282 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb); 1283 1284 amdgpu_vm_prt_put(cb->adev); 1285 kfree(cb); 1286 } 1287 1288 /** 1289 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status 1290 * 1291 * @adev: amdgpu_device pointer 1292 * @fence: fence for the callback 1293 */ 1294 static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, 1295 struct dma_fence *fence) 1296 { 1297 struct amdgpu_prt_cb *cb; 1298 1299 if (!adev->gmc.gmc_funcs->set_prt) 1300 return; 1301 1302 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); 1303 if (!cb) { 1304 /* Last resort when we are OOM */ 1305 if (fence) 1306 dma_fence_wait(fence, false); 1307 1308 amdgpu_vm_prt_put(adev); 1309 } else { 1310 cb->adev = adev; 1311 if (!fence || dma_fence_add_callback(fence, &cb->cb, 1312 amdgpu_vm_prt_cb)) 1313 amdgpu_vm_prt_cb(fence, &cb->cb); 1314 } 1315 } 1316 1317 /** 1318 * amdgpu_vm_free_mapping - free a mapping 1319 * 1320 * @adev: amdgpu_device pointer 1321 * @vm: requested vm 1322 * @mapping: mapping to be freed 1323 * @fence: fence of the unmap operation 1324 * 1325 * Free a mapping and make sure we decrease the PRT usage count if applicable. 1326 */ 1327 static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, 1328 struct amdgpu_vm *vm, 1329 struct amdgpu_bo_va_mapping *mapping, 1330 struct dma_fence *fence) 1331 { 1332 if (mapping->flags & AMDGPU_PTE_PRT) 1333 amdgpu_vm_add_prt_cb(adev, fence); 1334 kfree(mapping); 1335 } 1336 1337 /** 1338 * amdgpu_vm_prt_fini - finish all prt mappings 1339 * 1340 * @adev: amdgpu_device pointer 1341 * @vm: requested vm 1342 * 1343 * Register a cleanup callback to disable PRT support after VM dies. 1344 */ 1345 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1346 { 1347 struct dma_resv *resv = vm->root.bo->tbo.base.resv; 1348 struct dma_resv_iter cursor; 1349 struct dma_fence *fence; 1350 1351 dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) { 1352 /* Add a callback for each fence in the reservation object */ 1353 amdgpu_vm_prt_get(adev); 1354 amdgpu_vm_add_prt_cb(adev, fence); 1355 } 1356 } 1357 1358 /** 1359 * amdgpu_vm_clear_freed - clear freed BOs in the PT 1360 * 1361 * @adev: amdgpu_device pointer 1362 * @vm: requested vm 1363 * @fence: optional resulting fence (unchanged if no work needed to be done 1364 * or if an error occurred) 1365 * 1366 * Make sure all freed BOs are cleared in the PT. 1367 * PTs have to be reserved and mutex must be locked! 1368 * 1369 * Returns: 1370 * 0 for success. 1371 * 1372 */ 1373 int amdgpu_vm_clear_freed(struct amdgpu_device *adev, 1374 struct amdgpu_vm *vm, 1375 struct dma_fence **fence) 1376 { 1377 struct dma_resv *resv = vm->root.bo->tbo.base.resv; 1378 struct amdgpu_bo_va_mapping *mapping; 1379 uint64_t init_pte_value = 0; 1380 struct dma_fence *f = NULL; 1381 int r; 1382 1383 while (!list_empty(&vm->freed)) { 1384 mapping = list_first_entry(&vm->freed, 1385 struct amdgpu_bo_va_mapping, list); 1386 list_del(&mapping->list); 1387 1388 if (vm->pte_support_ats && 1389 mapping->start < AMDGPU_GMC_HOLE_START) 1390 init_pte_value = AMDGPU_PTE_DEFAULT_ATC; 1391 1392 r = amdgpu_vm_update_range(adev, vm, false, false, true, false, 1393 resv, mapping->start, mapping->last, 1394 init_pte_value, 0, 0, NULL, NULL, 1395 &f); 1396 amdgpu_vm_free_mapping(adev, vm, mapping, f); 1397 if (r) { 1398 dma_fence_put(f); 1399 return r; 1400 } 1401 } 1402 1403 if (fence && f) { 1404 dma_fence_put(*fence); 1405 *fence = f; 1406 } else { 1407 dma_fence_put(f); 1408 } 1409 1410 return 0; 1411 1412 } 1413 1414 /** 1415 * amdgpu_vm_handle_moved - handle moved BOs in the PT 1416 * 1417 * @adev: amdgpu_device pointer 1418 * @vm: requested vm 1419 * @ticket: optional reservation ticket used to reserve the VM 1420 * 1421 * Make sure all BOs which are moved are updated in the PTs. 1422 * 1423 * Returns: 1424 * 0 for success. 1425 * 1426 * PTs have to be reserved! 1427 */ 1428 int amdgpu_vm_handle_moved(struct amdgpu_device *adev, 1429 struct amdgpu_vm *vm, 1430 struct ww_acquire_ctx *ticket) 1431 { 1432 struct amdgpu_bo_va *bo_va; 1433 struct dma_resv *resv; 1434 bool clear, unlock; 1435 int r; 1436 1437 spin_lock(&vm->status_lock); 1438 while (!list_empty(&vm->moved)) { 1439 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, 1440 base.vm_status); 1441 spin_unlock(&vm->status_lock); 1442 1443 /* Per VM BOs never need to bo cleared in the page tables */ 1444 r = amdgpu_vm_bo_update(adev, bo_va, false); 1445 if (r) 1446 return r; 1447 spin_lock(&vm->status_lock); 1448 } 1449 1450 while (!list_empty(&vm->invalidated)) { 1451 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, 1452 base.vm_status); 1453 resv = bo_va->base.bo->tbo.base.resv; 1454 spin_unlock(&vm->status_lock); 1455 1456 /* Try to reserve the BO to avoid clearing its ptes */ 1457 if (!adev->debug_vm && dma_resv_trylock(resv)) { 1458 clear = false; 1459 unlock = true; 1460 /* The caller is already holding the reservation lock */ 1461 } else if (ticket && dma_resv_locking_ctx(resv) == ticket) { 1462 clear = false; 1463 unlock = false; 1464 /* Somebody else is using the BO right now */ 1465 } else { 1466 clear = true; 1467 unlock = false; 1468 } 1469 1470 r = amdgpu_vm_bo_update(adev, bo_va, clear); 1471 1472 if (unlock) 1473 dma_resv_unlock(resv); 1474 if (r) 1475 return r; 1476 1477 /* Remember evicted DMABuf imports in compute VMs for later 1478 * validation 1479 */ 1480 if (vm->is_compute_context && 1481 bo_va->base.bo->tbo.base.import_attach && 1482 (!bo_va->base.bo->tbo.resource || 1483 bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM)) 1484 amdgpu_vm_bo_evicted_user(&bo_va->base); 1485 1486 spin_lock(&vm->status_lock); 1487 } 1488 spin_unlock(&vm->status_lock); 1489 1490 return 0; 1491 } 1492 1493 /** 1494 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM 1495 * 1496 * @adev: amdgpu_device pointer 1497 * @vm: requested vm 1498 * @flush_type: flush type 1499 * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush. 1500 * 1501 * Flush TLB if needed for a compute VM. 1502 * 1503 * Returns: 1504 * 0 for success. 1505 */ 1506 int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev, 1507 struct amdgpu_vm *vm, 1508 uint32_t flush_type, 1509 uint32_t xcc_mask) 1510 { 1511 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); 1512 bool all_hub = false; 1513 int xcc = 0, r = 0; 1514 1515 WARN_ON_ONCE(!vm->is_compute_context); 1516 1517 /* 1518 * It can be that we race and lose here, but that is extremely unlikely 1519 * and the worst thing which could happen is that we flush the changes 1520 * into the TLB once more which is harmless. 1521 */ 1522 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq) 1523 return 0; 1524 1525 if (adev->family == AMDGPU_FAMILY_AI || 1526 adev->family == AMDGPU_FAMILY_RV) 1527 all_hub = true; 1528 1529 for_each_inst(xcc, xcc_mask) { 1530 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type, 1531 all_hub, xcc); 1532 if (r) 1533 break; 1534 } 1535 return r; 1536 } 1537 1538 /** 1539 * amdgpu_vm_bo_add - add a bo to a specific vm 1540 * 1541 * @adev: amdgpu_device pointer 1542 * @vm: requested vm 1543 * @bo: amdgpu buffer object 1544 * 1545 * Add @bo into the requested vm. 1546 * Add @bo to the list of bos associated with the vm 1547 * 1548 * Returns: 1549 * Newly added bo_va or NULL for failure 1550 * 1551 * Object has to be reserved! 1552 */ 1553 struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, 1554 struct amdgpu_vm *vm, 1555 struct amdgpu_bo *bo) 1556 { 1557 struct amdgpu_bo_va *bo_va; 1558 1559 bo_va = kzalloc(sizeof(struct amdgpu_bo_va), GFP_KERNEL); 1560 if (bo_va == NULL) { 1561 return NULL; 1562 } 1563 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); 1564 1565 bo_va->ref_count = 1; 1566 bo_va->last_pt_update = dma_fence_get_stub(); 1567 INIT_LIST_HEAD(&bo_va->valids); 1568 INIT_LIST_HEAD(&bo_va->invalids); 1569 1570 if (!bo) 1571 return bo_va; 1572 1573 dma_resv_assert_held(bo->tbo.base.resv); 1574 if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) { 1575 bo_va->is_xgmi = true; 1576 /* Power up XGMI if it can be potentially used */ 1577 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20); 1578 } 1579 1580 return bo_va; 1581 } 1582 1583 1584 /** 1585 * amdgpu_vm_bo_insert_map - insert a new mapping 1586 * 1587 * @adev: amdgpu_device pointer 1588 * @bo_va: bo_va to store the address 1589 * @mapping: the mapping to insert 1590 * 1591 * Insert a new mapping into all structures. 1592 */ 1593 static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, 1594 struct amdgpu_bo_va *bo_va, 1595 struct amdgpu_bo_va_mapping *mapping) 1596 { 1597 struct amdgpu_vm *vm = bo_va->base.vm; 1598 struct amdgpu_bo *bo = bo_va->base.bo; 1599 1600 mapping->bo_va = bo_va; 1601 list_add(&mapping->list, &bo_va->invalids); 1602 amdgpu_vm_it_insert(mapping, &vm->va); 1603 1604 if (mapping->flags & AMDGPU_PTE_PRT) 1605 amdgpu_vm_prt_get(adev); 1606 1607 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && 1608 !bo_va->base.moved) { 1609 amdgpu_vm_bo_moved(&bo_va->base); 1610 } 1611 trace_amdgpu_vm_bo_map(bo_va, mapping); 1612 } 1613 1614 /** 1615 * amdgpu_vm_bo_map - map bo inside a vm 1616 * 1617 * @adev: amdgpu_device pointer 1618 * @bo_va: bo_va to store the address 1619 * @saddr: where to map the BO 1620 * @offset: requested offset in the BO 1621 * @size: BO size in bytes 1622 * @flags: attributes of pages (read/write/valid/etc.) 1623 * 1624 * Add a mapping of the BO at the specefied addr into the VM. 1625 * 1626 * Returns: 1627 * 0 for success, error for failure. 1628 * 1629 * Object has to be reserved and unreserved outside! 1630 */ 1631 int amdgpu_vm_bo_map(struct amdgpu_device *adev, 1632 struct amdgpu_bo_va *bo_va, 1633 uint64_t saddr, uint64_t offset, 1634 uint64_t size, uint64_t flags) 1635 { 1636 struct amdgpu_bo_va_mapping *mapping, *tmp; 1637 struct amdgpu_bo *bo = bo_va->base.bo; 1638 struct amdgpu_vm *vm = bo_va->base.vm; 1639 uint64_t eaddr; 1640 1641 /* validate the parameters */ 1642 if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) 1643 return -EINVAL; 1644 if (saddr + size <= saddr || offset + size <= offset) 1645 return -EINVAL; 1646 1647 /* make sure object fit at this offset */ 1648 eaddr = saddr + size - 1; 1649 if ((bo && offset + size > amdgpu_bo_size(bo)) || 1650 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) 1651 return -EINVAL; 1652 1653 saddr /= AMDGPU_GPU_PAGE_SIZE; 1654 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1655 1656 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 1657 if (tmp) { 1658 /* bo and tmp overlap, invalid addr */ 1659 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " 1660 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr, 1661 tmp->start, tmp->last + 1); 1662 return -EINVAL; 1663 } 1664 1665 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1666 if (!mapping) 1667 return -ENOMEM; 1668 1669 mapping->start = saddr; 1670 mapping->last = eaddr; 1671 mapping->offset = offset; 1672 mapping->flags = flags; 1673 1674 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 1675 1676 return 0; 1677 } 1678 1679 /** 1680 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings 1681 * 1682 * @adev: amdgpu_device pointer 1683 * @bo_va: bo_va to store the address 1684 * @saddr: where to map the BO 1685 * @offset: requested offset in the BO 1686 * @size: BO size in bytes 1687 * @flags: attributes of pages (read/write/valid/etc.) 1688 * 1689 * Add a mapping of the BO at the specefied addr into the VM. Replace existing 1690 * mappings as we do so. 1691 * 1692 * Returns: 1693 * 0 for success, error for failure. 1694 * 1695 * Object has to be reserved and unreserved outside! 1696 */ 1697 int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, 1698 struct amdgpu_bo_va *bo_va, 1699 uint64_t saddr, uint64_t offset, 1700 uint64_t size, uint64_t flags) 1701 { 1702 struct amdgpu_bo_va_mapping *mapping; 1703 struct amdgpu_bo *bo = bo_va->base.bo; 1704 uint64_t eaddr; 1705 int r; 1706 1707 /* validate the parameters */ 1708 if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) 1709 return -EINVAL; 1710 if (saddr + size <= saddr || offset + size <= offset) 1711 return -EINVAL; 1712 1713 /* make sure object fit at this offset */ 1714 eaddr = saddr + size - 1; 1715 if ((bo && offset + size > amdgpu_bo_size(bo)) || 1716 (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) 1717 return -EINVAL; 1718 1719 /* Allocate all the needed memory */ 1720 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1721 if (!mapping) 1722 return -ENOMEM; 1723 1724 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); 1725 if (r) { 1726 kfree(mapping); 1727 return r; 1728 } 1729 1730 saddr /= AMDGPU_GPU_PAGE_SIZE; 1731 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1732 1733 mapping->start = saddr; 1734 mapping->last = eaddr; 1735 mapping->offset = offset; 1736 mapping->flags = flags; 1737 1738 amdgpu_vm_bo_insert_map(adev, bo_va, mapping); 1739 1740 return 0; 1741 } 1742 1743 /** 1744 * amdgpu_vm_bo_unmap - remove bo mapping from vm 1745 * 1746 * @adev: amdgpu_device pointer 1747 * @bo_va: bo_va to remove the address from 1748 * @saddr: where to the BO is mapped 1749 * 1750 * Remove a mapping of the BO at the specefied addr from the VM. 1751 * 1752 * Returns: 1753 * 0 for success, error for failure. 1754 * 1755 * Object has to be reserved and unreserved outside! 1756 */ 1757 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, 1758 struct amdgpu_bo_va *bo_va, 1759 uint64_t saddr) 1760 { 1761 struct amdgpu_bo_va_mapping *mapping; 1762 struct amdgpu_vm *vm = bo_va->base.vm; 1763 bool valid = true; 1764 1765 saddr /= AMDGPU_GPU_PAGE_SIZE; 1766 1767 list_for_each_entry(mapping, &bo_va->valids, list) { 1768 if (mapping->start == saddr) 1769 break; 1770 } 1771 1772 if (&mapping->list == &bo_va->valids) { 1773 valid = false; 1774 1775 list_for_each_entry(mapping, &bo_va->invalids, list) { 1776 if (mapping->start == saddr) 1777 break; 1778 } 1779 1780 if (&mapping->list == &bo_va->invalids) 1781 return -ENOENT; 1782 } 1783 1784 list_del(&mapping->list); 1785 amdgpu_vm_it_remove(mapping, &vm->va); 1786 mapping->bo_va = NULL; 1787 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1788 1789 if (valid) 1790 list_add(&mapping->list, &vm->freed); 1791 else 1792 amdgpu_vm_free_mapping(adev, vm, mapping, 1793 bo_va->last_pt_update); 1794 1795 return 0; 1796 } 1797 1798 /** 1799 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range 1800 * 1801 * @adev: amdgpu_device pointer 1802 * @vm: VM structure to use 1803 * @saddr: start of the range 1804 * @size: size of the range 1805 * 1806 * Remove all mappings in a range, split them as appropriate. 1807 * 1808 * Returns: 1809 * 0 for success, error for failure. 1810 */ 1811 int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, 1812 struct amdgpu_vm *vm, 1813 uint64_t saddr, uint64_t size) 1814 { 1815 struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; 1816 LIST_HEAD(removed); 1817 uint64_t eaddr; 1818 1819 eaddr = saddr + size - 1; 1820 saddr /= AMDGPU_GPU_PAGE_SIZE; 1821 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1822 1823 /* Allocate all the needed memory */ 1824 before = kzalloc(sizeof(*before), GFP_KERNEL); 1825 if (!before) 1826 return -ENOMEM; 1827 INIT_LIST_HEAD(&before->list); 1828 1829 after = kzalloc(sizeof(*after), GFP_KERNEL); 1830 if (!after) { 1831 kfree(before); 1832 return -ENOMEM; 1833 } 1834 INIT_LIST_HEAD(&after->list); 1835 1836 /* Now gather all removed mappings */ 1837 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); 1838 while (tmp) { 1839 /* Remember mapping split at the start */ 1840 if (tmp->start < saddr) { 1841 before->start = tmp->start; 1842 before->last = saddr - 1; 1843 before->offset = tmp->offset; 1844 before->flags = tmp->flags; 1845 before->bo_va = tmp->bo_va; 1846 list_add(&before->list, &tmp->bo_va->invalids); 1847 } 1848 1849 /* Remember mapping split at the end */ 1850 if (tmp->last > eaddr) { 1851 after->start = eaddr + 1; 1852 after->last = tmp->last; 1853 after->offset = tmp->offset; 1854 after->offset += (after->start - tmp->start) << PAGE_SHIFT; 1855 after->flags = tmp->flags; 1856 after->bo_va = tmp->bo_va; 1857 list_add(&after->list, &tmp->bo_va->invalids); 1858 } 1859 1860 list_del(&tmp->list); 1861 list_add(&tmp->list, &removed); 1862 1863 tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr); 1864 } 1865 1866 /* And free them up */ 1867 list_for_each_entry_safe(tmp, next, &removed, list) { 1868 amdgpu_vm_it_remove(tmp, &vm->va); 1869 list_del(&tmp->list); 1870 1871 if (tmp->start < saddr) 1872 tmp->start = saddr; 1873 if (tmp->last > eaddr) 1874 tmp->last = eaddr; 1875 1876 tmp->bo_va = NULL; 1877 list_add(&tmp->list, &vm->freed); 1878 trace_amdgpu_vm_bo_unmap(NULL, tmp); 1879 } 1880 1881 /* Insert partial mapping before the range */ 1882 if (!list_empty(&before->list)) { 1883 struct amdgpu_bo *bo = before->bo_va->base.bo; 1884 1885 amdgpu_vm_it_insert(before, &vm->va); 1886 if (before->flags & AMDGPU_PTE_PRT) 1887 amdgpu_vm_prt_get(adev); 1888 1889 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && 1890 !before->bo_va->base.moved) 1891 amdgpu_vm_bo_moved(&before->bo_va->base); 1892 } else { 1893 kfree(before); 1894 } 1895 1896 /* Insert partial mapping after the range */ 1897 if (!list_empty(&after->list)) { 1898 struct amdgpu_bo *bo = after->bo_va->base.bo; 1899 1900 amdgpu_vm_it_insert(after, &vm->va); 1901 if (after->flags & AMDGPU_PTE_PRT) 1902 amdgpu_vm_prt_get(adev); 1903 1904 if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv && 1905 !after->bo_va->base.moved) 1906 amdgpu_vm_bo_moved(&after->bo_va->base); 1907 } else { 1908 kfree(after); 1909 } 1910 1911 return 0; 1912 } 1913 1914 /** 1915 * amdgpu_vm_bo_lookup_mapping - find mapping by address 1916 * 1917 * @vm: the requested VM 1918 * @addr: the address 1919 * 1920 * Find a mapping by it's address. 1921 * 1922 * Returns: 1923 * The amdgpu_bo_va_mapping matching for addr or NULL 1924 * 1925 */ 1926 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, 1927 uint64_t addr) 1928 { 1929 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); 1930 } 1931 1932 /** 1933 * amdgpu_vm_bo_trace_cs - trace all reserved mappings 1934 * 1935 * @vm: the requested vm 1936 * @ticket: CS ticket 1937 * 1938 * Trace all mappings of BOs reserved during a command submission. 1939 */ 1940 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) 1941 { 1942 struct amdgpu_bo_va_mapping *mapping; 1943 1944 if (!trace_amdgpu_vm_bo_cs_enabled()) 1945 return; 1946 1947 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; 1948 mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) { 1949 if (mapping->bo_va && mapping->bo_va->base.bo) { 1950 struct amdgpu_bo *bo; 1951 1952 bo = mapping->bo_va->base.bo; 1953 if (dma_resv_locking_ctx(bo->tbo.base.resv) != 1954 ticket) 1955 continue; 1956 } 1957 1958 trace_amdgpu_vm_bo_cs(mapping); 1959 } 1960 } 1961 1962 /** 1963 * amdgpu_vm_bo_del - remove a bo from a specific vm 1964 * 1965 * @adev: amdgpu_device pointer 1966 * @bo_va: requested bo_va 1967 * 1968 * Remove @bo_va->bo from the requested vm. 1969 * 1970 * Object have to be reserved! 1971 */ 1972 void amdgpu_vm_bo_del(struct amdgpu_device *adev, 1973 struct amdgpu_bo_va *bo_va) 1974 { 1975 struct amdgpu_bo_va_mapping *mapping, *next; 1976 struct amdgpu_bo *bo = bo_va->base.bo; 1977 struct amdgpu_vm *vm = bo_va->base.vm; 1978 struct amdgpu_vm_bo_base **base; 1979 1980 dma_resv_assert_held(vm->root.bo->tbo.base.resv); 1981 1982 if (bo) { 1983 dma_resv_assert_held(bo->tbo.base.resv); 1984 if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 1985 ttm_bo_set_bulk_move(&bo->tbo, NULL); 1986 1987 for (base = &bo_va->base.bo->vm_bo; *base; 1988 base = &(*base)->next) { 1989 if (*base != &bo_va->base) 1990 continue; 1991 1992 *base = bo_va->base.next; 1993 break; 1994 } 1995 } 1996 1997 spin_lock(&vm->status_lock); 1998 list_del(&bo_va->base.vm_status); 1999 spin_unlock(&vm->status_lock); 2000 2001 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2002 list_del(&mapping->list); 2003 amdgpu_vm_it_remove(mapping, &vm->va); 2004 mapping->bo_va = NULL; 2005 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 2006 list_add(&mapping->list, &vm->freed); 2007 } 2008 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { 2009 list_del(&mapping->list); 2010 amdgpu_vm_it_remove(mapping, &vm->va); 2011 amdgpu_vm_free_mapping(adev, vm, mapping, 2012 bo_va->last_pt_update); 2013 } 2014 2015 dma_fence_put(bo_va->last_pt_update); 2016 2017 if (bo && bo_va->is_xgmi) 2018 amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN); 2019 2020 kfree(bo_va); 2021 } 2022 2023 /** 2024 * amdgpu_vm_evictable - check if we can evict a VM 2025 * 2026 * @bo: A page table of the VM. 2027 * 2028 * Check if it is possible to evict a VM. 2029 */ 2030 bool amdgpu_vm_evictable(struct amdgpu_bo *bo) 2031 { 2032 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo; 2033 2034 /* Page tables of a destroyed VM can go away immediately */ 2035 if (!bo_base || !bo_base->vm) 2036 return true; 2037 2038 /* Don't evict VM page tables while they are busy */ 2039 if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP)) 2040 return false; 2041 2042 /* Try to block ongoing updates */ 2043 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) 2044 return false; 2045 2046 /* Don't evict VM page tables while they are updated */ 2047 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { 2048 amdgpu_vm_eviction_unlock(bo_base->vm); 2049 return false; 2050 } 2051 2052 bo_base->vm->evicting = true; 2053 amdgpu_vm_eviction_unlock(bo_base->vm); 2054 return true; 2055 } 2056 2057 /** 2058 * amdgpu_vm_bo_invalidate - mark the bo as invalid 2059 * 2060 * @adev: amdgpu_device pointer 2061 * @bo: amdgpu buffer object 2062 * @evicted: is the BO evicted 2063 * 2064 * Mark @bo as invalid. 2065 */ 2066 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 2067 struct amdgpu_bo *bo, bool evicted) 2068 { 2069 struct amdgpu_vm_bo_base *bo_base; 2070 2071 /* shadow bo doesn't have bo base, its validation needs its parent */ 2072 if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo)) 2073 bo = bo->parent; 2074 2075 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 2076 struct amdgpu_vm *vm = bo_base->vm; 2077 2078 if (evicted && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { 2079 amdgpu_vm_bo_evicted(bo_base); 2080 continue; 2081 } 2082 2083 if (bo_base->moved) 2084 continue; 2085 bo_base->moved = true; 2086 2087 if (bo->tbo.type == ttm_bo_type_kernel) 2088 amdgpu_vm_bo_relocated(bo_base); 2089 else if (bo->tbo.base.resv == vm->root.bo->tbo.base.resv) 2090 amdgpu_vm_bo_moved(bo_base); 2091 else 2092 amdgpu_vm_bo_invalidated(bo_base); 2093 } 2094 } 2095 2096 /** 2097 * amdgpu_vm_get_block_size - calculate VM page table size as power of two 2098 * 2099 * @vm_size: VM size 2100 * 2101 * Returns: 2102 * VM page table as power of two 2103 */ 2104 static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size) 2105 { 2106 /* Total bits covered by PD + PTs */ 2107 unsigned bits = ilog2(vm_size) + 18; 2108 2109 /* Make sure the PD is 4K in size up to 8GB address space. 2110 Above that split equal between PD and PTs */ 2111 if (vm_size <= 8) 2112 return (bits - 9); 2113 else 2114 return ((bits + 3) / 2); 2115 } 2116 2117 /** 2118 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size 2119 * 2120 * @adev: amdgpu_device pointer 2121 * @min_vm_size: the minimum vm size in GB if it's set auto 2122 * @fragment_size_default: Default PTE fragment size 2123 * @max_level: max VMPT level 2124 * @max_bits: max address space size in bits 2125 * 2126 */ 2127 void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, 2128 uint32_t fragment_size_default, unsigned max_level, 2129 unsigned max_bits) 2130 { 2131 unsigned int max_size = 1 << (max_bits - 30); 2132 unsigned int vm_size; 2133 uint64_t tmp; 2134 2135 /* adjust vm size first */ 2136 if (amdgpu_vm_size != -1) { 2137 vm_size = amdgpu_vm_size; 2138 if (vm_size > max_size) { 2139 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", 2140 amdgpu_vm_size, max_size); 2141 vm_size = max_size; 2142 } 2143 } else { 2144 struct sysinfo si; 2145 unsigned int phys_ram_gb; 2146 2147 /* Optimal VM size depends on the amount of physical 2148 * RAM available. Underlying requirements and 2149 * assumptions: 2150 * 2151 * - Need to map system memory and VRAM from all GPUs 2152 * - VRAM from other GPUs not known here 2153 * - Assume VRAM <= system memory 2154 * - On GFX8 and older, VM space can be segmented for 2155 * different MTYPEs 2156 * - Need to allow room for fragmentation, guard pages etc. 2157 * 2158 * This adds up to a rough guess of system memory x3. 2159 * Round up to power of two to maximize the available 2160 * VM size with the given page table size. 2161 */ 2162 si_meminfo(&si); 2163 phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit + 2164 (1 << 30) - 1) >> 30; 2165 vm_size = roundup_pow_of_two( 2166 min(max(phys_ram_gb * 3, min_vm_size), max_size)); 2167 } 2168 2169 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18; 2170 2171 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn); 2172 if (amdgpu_vm_block_size != -1) 2173 tmp >>= amdgpu_vm_block_size - 9; 2174 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1; 2175 adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp); 2176 switch (adev->vm_manager.num_level) { 2177 case 3: 2178 adev->vm_manager.root_level = AMDGPU_VM_PDB2; 2179 break; 2180 case 2: 2181 adev->vm_manager.root_level = AMDGPU_VM_PDB1; 2182 break; 2183 case 1: 2184 adev->vm_manager.root_level = AMDGPU_VM_PDB0; 2185 break; 2186 default: 2187 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n"); 2188 } 2189 /* block size depends on vm size and hw setup*/ 2190 if (amdgpu_vm_block_size != -1) 2191 adev->vm_manager.block_size = 2192 min((unsigned)amdgpu_vm_block_size, max_bits 2193 - AMDGPU_GPU_PAGE_SHIFT 2194 - 9 * adev->vm_manager.num_level); 2195 else if (adev->vm_manager.num_level > 1) 2196 adev->vm_manager.block_size = 9; 2197 else 2198 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp); 2199 2200 if (amdgpu_vm_fragment_size == -1) 2201 adev->vm_manager.fragment_size = fragment_size_default; 2202 else 2203 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; 2204 2205 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", 2206 vm_size, adev->vm_manager.num_level + 1, 2207 adev->vm_manager.block_size, 2208 adev->vm_manager.fragment_size); 2209 } 2210 2211 /** 2212 * amdgpu_vm_wait_idle - wait for the VM to become idle 2213 * 2214 * @vm: VM object to wait for 2215 * @timeout: timeout to wait for VM to become idle 2216 */ 2217 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) 2218 { 2219 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, 2220 DMA_RESV_USAGE_BOOKKEEP, 2221 true, timeout); 2222 if (timeout <= 0) 2223 return timeout; 2224 2225 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); 2226 } 2227 2228 /** 2229 * amdgpu_vm_init - initialize a vm instance 2230 * 2231 * @adev: amdgpu_device pointer 2232 * @vm: requested vm 2233 * @xcp_id: GPU partition selection id 2234 * 2235 * Init @vm fields. 2236 * 2237 * Returns: 2238 * 0 for success, error for failure. 2239 */ 2240 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, 2241 int32_t xcp_id) 2242 { 2243 struct amdgpu_bo *root_bo; 2244 struct amdgpu_bo_vm *root; 2245 int r, i; 2246 2247 vm->va = RB_ROOT_CACHED; 2248 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) 2249 vm->reserved_vmid[i] = NULL; 2250 INIT_LIST_HEAD(&vm->evicted); 2251 INIT_LIST_HEAD(&vm->evicted_user); 2252 INIT_LIST_HEAD(&vm->relocated); 2253 INIT_LIST_HEAD(&vm->moved); 2254 INIT_LIST_HEAD(&vm->idle); 2255 INIT_LIST_HEAD(&vm->invalidated); 2256 spin_lock_init(&vm->status_lock); 2257 INIT_LIST_HEAD(&vm->freed); 2258 INIT_LIST_HEAD(&vm->done); 2259 INIT_LIST_HEAD(&vm->pt_freed); 2260 INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work); 2261 INIT_KFIFO(vm->faults); 2262 2263 r = amdgpu_vm_init_entities(adev, vm); 2264 if (r) 2265 return r; 2266 2267 vm->pte_support_ats = false; 2268 vm->is_compute_context = false; 2269 2270 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2271 AMDGPU_VM_USE_CPU_FOR_GFX); 2272 2273 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2274 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2275 WARN_ONCE((vm->use_cpu_for_update && 2276 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2277 "CPU update of VM recommended only for large BAR system\n"); 2278 2279 if (vm->use_cpu_for_update) 2280 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2281 else 2282 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2283 2284 vm->last_update = dma_fence_get_stub(); 2285 vm->last_unlocked = dma_fence_get_stub(); 2286 vm->last_tlb_flush = dma_fence_get_stub(); 2287 vm->generation = 0; 2288 2289 mutex_init(&vm->eviction_lock); 2290 vm->evicting = false; 2291 2292 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, 2293 false, &root, xcp_id); 2294 if (r) 2295 goto error_free_delayed; 2296 2297 root_bo = amdgpu_bo_ref(&root->bo); 2298 r = amdgpu_bo_reserve(root_bo, true); 2299 if (r) { 2300 amdgpu_bo_unref(&root->shadow); 2301 amdgpu_bo_unref(&root_bo); 2302 goto error_free_delayed; 2303 } 2304 2305 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); 2306 r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1); 2307 if (r) 2308 goto error_free_root; 2309 2310 r = amdgpu_vm_pt_clear(adev, vm, root, false); 2311 if (r) 2312 goto error_free_root; 2313 2314 amdgpu_bo_unreserve(vm->root.bo); 2315 amdgpu_bo_unref(&root_bo); 2316 2317 return 0; 2318 2319 error_free_root: 2320 amdgpu_vm_pt_free_root(adev, vm); 2321 amdgpu_bo_unreserve(vm->root.bo); 2322 amdgpu_bo_unref(&root_bo); 2323 2324 error_free_delayed: 2325 dma_fence_put(vm->last_tlb_flush); 2326 dma_fence_put(vm->last_unlocked); 2327 amdgpu_vm_fini_entities(vm); 2328 2329 return r; 2330 } 2331 2332 /** 2333 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM 2334 * 2335 * @adev: amdgpu_device pointer 2336 * @vm: requested vm 2337 * 2338 * This only works on GFX VMs that don't have any BOs added and no 2339 * page tables allocated yet. 2340 * 2341 * Changes the following VM parameters: 2342 * - use_cpu_for_update 2343 * - pte_supports_ats 2344 * 2345 * Reinitializes the page directory to reflect the changed ATS 2346 * setting. 2347 * 2348 * Returns: 2349 * 0 for success, -errno for errors. 2350 */ 2351 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2352 { 2353 bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); 2354 int r; 2355 2356 r = amdgpu_bo_reserve(vm->root.bo, true); 2357 if (r) 2358 return r; 2359 2360 /* Check if PD needs to be reinitialized and do it before 2361 * changing any other state, in case it fails. 2362 */ 2363 if (pte_support_ats != vm->pte_support_ats) { 2364 /* Sanity checks */ 2365 if (!amdgpu_vm_pt_is_root_clean(adev, vm)) { 2366 r = -EINVAL; 2367 goto unreserve_bo; 2368 } 2369 2370 vm->pte_support_ats = pte_support_ats; 2371 r = amdgpu_vm_pt_clear(adev, vm, to_amdgpu_bo_vm(vm->root.bo), 2372 false); 2373 if (r) 2374 goto unreserve_bo; 2375 } 2376 2377 /* Update VM state */ 2378 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & 2379 AMDGPU_VM_USE_CPU_FOR_COMPUTE); 2380 DRM_DEBUG_DRIVER("VM update mode is %s\n", 2381 vm->use_cpu_for_update ? "CPU" : "SDMA"); 2382 WARN_ONCE((vm->use_cpu_for_update && 2383 !amdgpu_gmc_vram_full_visible(&adev->gmc)), 2384 "CPU update of VM recommended only for large BAR system\n"); 2385 2386 if (vm->use_cpu_for_update) { 2387 /* Sync with last SDMA update/clear before switching to CPU */ 2388 r = amdgpu_bo_sync_wait(vm->root.bo, 2389 AMDGPU_FENCE_OWNER_UNDEFINED, true); 2390 if (r) 2391 goto unreserve_bo; 2392 2393 vm->update_funcs = &amdgpu_vm_cpu_funcs; 2394 r = amdgpu_vm_pt_map_tables(adev, vm); 2395 if (r) 2396 goto unreserve_bo; 2397 2398 } else { 2399 vm->update_funcs = &amdgpu_vm_sdma_funcs; 2400 } 2401 2402 dma_fence_put(vm->last_update); 2403 vm->last_update = dma_fence_get_stub(); 2404 vm->is_compute_context = true; 2405 2406 /* Free the shadow bo for compute VM */ 2407 amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow); 2408 2409 goto unreserve_bo; 2410 2411 unreserve_bo: 2412 amdgpu_bo_unreserve(vm->root.bo); 2413 return r; 2414 } 2415 2416 /** 2417 * amdgpu_vm_release_compute - release a compute vm 2418 * @adev: amdgpu_device pointer 2419 * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute 2420 * 2421 * This is a correspondant of amdgpu_vm_make_compute. It decouples compute 2422 * pasid from vm. Compute should stop use of vm after this call. 2423 */ 2424 void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2425 { 2426 amdgpu_vm_set_pasid(adev, vm, 0); 2427 vm->is_compute_context = false; 2428 } 2429 2430 /** 2431 * amdgpu_vm_fini - tear down a vm instance 2432 * 2433 * @adev: amdgpu_device pointer 2434 * @vm: requested vm 2435 * 2436 * Tear down @vm. 2437 * Unbind the VM and remove all bos from the vm bo list 2438 */ 2439 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 2440 { 2441 struct amdgpu_bo_va_mapping *mapping, *tmp; 2442 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; 2443 struct amdgpu_bo *root; 2444 unsigned long flags; 2445 int i; 2446 2447 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); 2448 2449 flush_work(&vm->pt_free_work); 2450 2451 root = amdgpu_bo_ref(vm->root.bo); 2452 amdgpu_bo_reserve(root, true); 2453 amdgpu_vm_set_pasid(adev, vm, 0); 2454 dma_fence_wait(vm->last_unlocked, false); 2455 dma_fence_put(vm->last_unlocked); 2456 dma_fence_wait(vm->last_tlb_flush, false); 2457 /* Make sure that all fence callbacks have completed */ 2458 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); 2459 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); 2460 dma_fence_put(vm->last_tlb_flush); 2461 2462 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 2463 if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { 2464 amdgpu_vm_prt_fini(adev, vm); 2465 prt_fini_needed = false; 2466 } 2467 2468 list_del(&mapping->list); 2469 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); 2470 } 2471 2472 amdgpu_vm_pt_free_root(adev, vm); 2473 amdgpu_bo_unreserve(root); 2474 amdgpu_bo_unref(&root); 2475 WARN_ON(vm->root.bo); 2476 2477 amdgpu_vm_fini_entities(vm); 2478 2479 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 2480 dev_err(adev->dev, "still active bo inside vm\n"); 2481 } 2482 rbtree_postorder_for_each_entry_safe(mapping, tmp, 2483 &vm->va.rb_root, rb) { 2484 /* Don't remove the mapping here, we don't want to trigger a 2485 * rebalance and the tree is about to be destroyed anyway. 2486 */ 2487 list_del(&mapping->list); 2488 kfree(mapping); 2489 } 2490 2491 dma_fence_put(vm->last_update); 2492 2493 for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) { 2494 if (vm->reserved_vmid[i]) { 2495 amdgpu_vmid_free_reserved(adev, i); 2496 vm->reserved_vmid[i] = false; 2497 } 2498 } 2499 2500 } 2501 2502 /** 2503 * amdgpu_vm_manager_init - init the VM manager 2504 * 2505 * @adev: amdgpu_device pointer 2506 * 2507 * Initialize the VM manager structures 2508 */ 2509 void amdgpu_vm_manager_init(struct amdgpu_device *adev) 2510 { 2511 unsigned i; 2512 2513 /* Concurrent flushes are only possible starting with Vega10 and 2514 * are broken on Navi10 and Navi14. 2515 */ 2516 adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 || 2517 adev->asic_type == CHIP_NAVI10 || 2518 adev->asic_type == CHIP_NAVI14); 2519 amdgpu_vmid_mgr_init(adev); 2520 2521 adev->vm_manager.fence_context = 2522 dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2523 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) 2524 adev->vm_manager.seqno[i] = 0; 2525 2526 spin_lock_init(&adev->vm_manager.prt_lock); 2527 atomic_set(&adev->vm_manager.num_prt_users, 0); 2528 2529 /* If not overridden by the user, by default, only in large BAR systems 2530 * Compute VM tables will be updated by CPU 2531 */ 2532 #ifdef CONFIG_X86_64 2533 if (amdgpu_vm_update_mode == -1) { 2534 /* For asic with VF MMIO access protection 2535 * avoid using CPU for VM table updates 2536 */ 2537 if (amdgpu_gmc_vram_full_visible(&adev->gmc) && 2538 !amdgpu_sriov_vf_mmio_access_protection(adev)) 2539 adev->vm_manager.vm_update_mode = 2540 AMDGPU_VM_USE_CPU_FOR_COMPUTE; 2541 else 2542 adev->vm_manager.vm_update_mode = 0; 2543 } else 2544 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode; 2545 #else 2546 adev->vm_manager.vm_update_mode = 0; 2547 #endif 2548 2549 xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ); 2550 } 2551 2552 /** 2553 * amdgpu_vm_manager_fini - cleanup VM manager 2554 * 2555 * @adev: amdgpu_device pointer 2556 * 2557 * Cleanup the VM manager and free resources. 2558 */ 2559 void amdgpu_vm_manager_fini(struct amdgpu_device *adev) 2560 { 2561 WARN_ON(!xa_empty(&adev->vm_manager.pasids)); 2562 xa_destroy(&adev->vm_manager.pasids); 2563 2564 amdgpu_vmid_mgr_fini(adev); 2565 } 2566 2567 /** 2568 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs. 2569 * 2570 * @dev: drm device pointer 2571 * @data: drm_amdgpu_vm 2572 * @filp: drm file pointer 2573 * 2574 * Returns: 2575 * 0 for success, -errno for errors. 2576 */ 2577 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 2578 { 2579 union drm_amdgpu_vm *args = data; 2580 struct amdgpu_device *adev = drm_to_adev(dev); 2581 struct amdgpu_fpriv *fpriv = filp->driver_priv; 2582 2583 /* No valid flags defined yet */ 2584 if (args->in.flags) 2585 return -EINVAL; 2586 2587 switch (args->in.op) { 2588 case AMDGPU_VM_OP_RESERVE_VMID: 2589 /* We only have requirement to reserve vmid from gfxhub */ 2590 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { 2591 amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); 2592 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true; 2593 } 2594 2595 break; 2596 case AMDGPU_VM_OP_UNRESERVE_VMID: 2597 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) { 2598 amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0)); 2599 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false; 2600 } 2601 break; 2602 default: 2603 return -EINVAL; 2604 } 2605 2606 return 0; 2607 } 2608 2609 /** 2610 * amdgpu_vm_get_task_info - Extracts task info for a PASID. 2611 * 2612 * @adev: drm device pointer 2613 * @pasid: PASID identifier for VM 2614 * @task_info: task_info to fill. 2615 */ 2616 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, 2617 struct amdgpu_task_info *task_info) 2618 { 2619 struct amdgpu_vm *vm; 2620 unsigned long flags; 2621 2622 xa_lock_irqsave(&adev->vm_manager.pasids, flags); 2623 2624 vm = xa_load(&adev->vm_manager.pasids, pasid); 2625 if (vm) 2626 *task_info = vm->task_info; 2627 2628 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); 2629 } 2630 2631 /** 2632 * amdgpu_vm_set_task_info - Sets VMs task info. 2633 * 2634 * @vm: vm for which to set the info 2635 */ 2636 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) 2637 { 2638 if (vm->task_info.pid) 2639 return; 2640 2641 vm->task_info.pid = current->pid; 2642 get_task_comm(vm->task_info.task_name, current); 2643 2644 if (current->group_leader->mm != current->mm) 2645 return; 2646 2647 vm->task_info.tgid = current->group_leader->pid; 2648 get_task_comm(vm->task_info.process_name, current->group_leader); 2649 } 2650 2651 /** 2652 * amdgpu_vm_handle_fault - graceful handling of VM faults. 2653 * @adev: amdgpu device pointer 2654 * @pasid: PASID of the VM 2655 * @vmid: VMID, only used for GFX 9.4.3. 2656 * @node_id: Node_id received in IH cookie. Only applicable for 2657 * GFX 9.4.3. 2658 * @addr: Address of the fault 2659 * @write_fault: true is write fault, false is read fault 2660 * 2661 * Try to gracefully handle a VM fault. Return true if the fault was handled and 2662 * shouldn't be reported any more. 2663 */ 2664 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, 2665 u32 vmid, u32 node_id, uint64_t addr, 2666 bool write_fault) 2667 { 2668 bool is_compute_context = false; 2669 struct amdgpu_bo *root; 2670 unsigned long irqflags; 2671 uint64_t value, flags; 2672 struct amdgpu_vm *vm; 2673 int r; 2674 2675 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); 2676 vm = xa_load(&adev->vm_manager.pasids, pasid); 2677 if (vm) { 2678 root = amdgpu_bo_ref(vm->root.bo); 2679 is_compute_context = vm->is_compute_context; 2680 } else { 2681 root = NULL; 2682 } 2683 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); 2684 2685 if (!root) 2686 return false; 2687 2688 addr /= AMDGPU_GPU_PAGE_SIZE; 2689 2690 if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid, 2691 node_id, addr, write_fault)) { 2692 amdgpu_bo_unref(&root); 2693 return true; 2694 } 2695 2696 r = amdgpu_bo_reserve(root, true); 2697 if (r) 2698 goto error_unref; 2699 2700 /* Double check that the VM still exists */ 2701 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags); 2702 vm = xa_load(&adev->vm_manager.pasids, pasid); 2703 if (vm && vm->root.bo != root) 2704 vm = NULL; 2705 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags); 2706 if (!vm) 2707 goto error_unlock; 2708 2709 flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | 2710 AMDGPU_PTE_SYSTEM; 2711 2712 if (is_compute_context) { 2713 /* Intentionally setting invalid PTE flag 2714 * combination to force a no-retry-fault 2715 */ 2716 flags = AMDGPU_VM_NORETRY_FLAGS; 2717 value = 0; 2718 } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { 2719 /* Redirect the access to the dummy page */ 2720 value = adev->dummy_page_addr; 2721 flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | 2722 AMDGPU_PTE_WRITEABLE; 2723 2724 } else { 2725 /* Let the hw retry silently on the PTE */ 2726 value = 0; 2727 } 2728 2729 r = dma_resv_reserve_fences(root->tbo.base.resv, 1); 2730 if (r) { 2731 pr_debug("failed %d to reserve fence slot\n", r); 2732 goto error_unlock; 2733 } 2734 2735 r = amdgpu_vm_update_range(adev, vm, true, false, false, false, 2736 NULL, addr, addr, flags, value, 0, NULL, NULL, NULL); 2737 if (r) 2738 goto error_unlock; 2739 2740 r = amdgpu_vm_update_pdes(adev, vm, true); 2741 2742 error_unlock: 2743 amdgpu_bo_unreserve(root); 2744 if (r < 0) 2745 DRM_ERROR("Can't handle page fault (%d)\n", r); 2746 2747 error_unref: 2748 amdgpu_bo_unref(&root); 2749 2750 return false; 2751 } 2752 2753 #if defined(CONFIG_DEBUG_FS) 2754 /** 2755 * amdgpu_debugfs_vm_bo_info - print BO info for the VM 2756 * 2757 * @vm: Requested VM for printing BO info 2758 * @m: debugfs file 2759 * 2760 * Print BO information in debugfs file for the VM 2761 */ 2762 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) 2763 { 2764 struct amdgpu_bo_va *bo_va, *tmp; 2765 u64 total_idle = 0; 2766 u64 total_evicted = 0; 2767 u64 total_relocated = 0; 2768 u64 total_moved = 0; 2769 u64 total_invalidated = 0; 2770 u64 total_done = 0; 2771 unsigned int total_idle_objs = 0; 2772 unsigned int total_evicted_objs = 0; 2773 unsigned int total_relocated_objs = 0; 2774 unsigned int total_moved_objs = 0; 2775 unsigned int total_invalidated_objs = 0; 2776 unsigned int total_done_objs = 0; 2777 unsigned int id = 0; 2778 2779 spin_lock(&vm->status_lock); 2780 seq_puts(m, "\tIdle BOs:\n"); 2781 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { 2782 if (!bo_va->base.bo) 2783 continue; 2784 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2785 } 2786 total_idle_objs = id; 2787 id = 0; 2788 2789 seq_puts(m, "\tEvicted BOs:\n"); 2790 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { 2791 if (!bo_va->base.bo) 2792 continue; 2793 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2794 } 2795 total_evicted_objs = id; 2796 id = 0; 2797 2798 seq_puts(m, "\tRelocated BOs:\n"); 2799 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { 2800 if (!bo_va->base.bo) 2801 continue; 2802 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2803 } 2804 total_relocated_objs = id; 2805 id = 0; 2806 2807 seq_puts(m, "\tMoved BOs:\n"); 2808 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 2809 if (!bo_va->base.bo) 2810 continue; 2811 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2812 } 2813 total_moved_objs = id; 2814 id = 0; 2815 2816 seq_puts(m, "\tInvalidated BOs:\n"); 2817 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { 2818 if (!bo_va->base.bo) 2819 continue; 2820 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2821 } 2822 total_invalidated_objs = id; 2823 id = 0; 2824 2825 seq_puts(m, "\tDone BOs:\n"); 2826 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { 2827 if (!bo_va->base.bo) 2828 continue; 2829 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 2830 } 2831 spin_unlock(&vm->status_lock); 2832 total_done_objs = id; 2833 2834 seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle, 2835 total_idle_objs); 2836 seq_printf(m, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted, 2837 total_evicted_objs); 2838 seq_printf(m, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated, 2839 total_relocated_objs); 2840 seq_printf(m, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved, 2841 total_moved_objs); 2842 seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated, 2843 total_invalidated_objs); 2844 seq_printf(m, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done, 2845 total_done_objs); 2846 } 2847 #endif 2848 2849 /** 2850 * amdgpu_vm_update_fault_cache - update cached fault into. 2851 * @adev: amdgpu device pointer 2852 * @pasid: PASID of the VM 2853 * @addr: Address of the fault 2854 * @status: GPUVM fault status register 2855 * @vmhub: which vmhub got the fault 2856 * 2857 * Cache the fault info for later use by userspace in debugging. 2858 */ 2859 void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, 2860 unsigned int pasid, 2861 uint64_t addr, 2862 uint32_t status, 2863 unsigned int vmhub) 2864 { 2865 struct amdgpu_vm *vm; 2866 unsigned long flags; 2867 2868 xa_lock_irqsave(&adev->vm_manager.pasids, flags); 2869 2870 vm = xa_load(&adev->vm_manager.pasids, pasid); 2871 /* Don't update the fault cache if status is 0. In the multiple 2872 * fault case, subsequent faults will return a 0 status which is 2873 * useless for userspace and replaces the useful fault status, so 2874 * only update if status is non-0. 2875 */ 2876 if (vm && status) { 2877 vm->fault_info.addr = addr; 2878 vm->fault_info.status = status; 2879 if (AMDGPU_IS_GFXHUB(vmhub)) { 2880 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; 2881 vm->fault_info.vmhub |= 2882 (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT; 2883 } else if (AMDGPU_IS_MMHUB0(vmhub)) { 2884 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0; 2885 vm->fault_info.vmhub |= 2886 (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT; 2887 } else if (AMDGPU_IS_MMHUB1(vmhub)) { 2888 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1; 2889 vm->fault_info.vmhub |= 2890 (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT; 2891 } else { 2892 WARN_ONCE(1, "Invalid vmhub %u\n", vmhub); 2893 } 2894 } 2895 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags); 2896 } 2897 2898