Lines Matching full:vm

120 	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
122 struct amdgpu_vm *vm; member
131 * amdgpu_vm_assert_locked - check if VM is correctly locked
132 * @vm: the VM which schould be tested
134 * Asserts that the VM root PD is locked.
136 static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm) in amdgpu_vm_assert_locked() argument
138 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_assert_locked()
146 * State for PDs/PTs and per VM BOs which are not at the location they should
151 struct amdgpu_vm *vm = vm_bo->vm; in amdgpu_vm_bo_evicted() local
155 amdgpu_vm_assert_locked(vm); in amdgpu_vm_bo_evicted()
156 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
158 list_move(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
160 list_move_tail(&vm_bo->vm_status, &vm->evicted); in amdgpu_vm_bo_evicted()
161 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted()
168 * State for per VM BOs which are moved, but that change is not yet reflected
173 amdgpu_vm_assert_locked(vm_bo->vm); in amdgpu_vm_bo_moved()
174 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
175 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_moved()
176 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_moved()
184 * State for PDs/PTs and per VM BOs which have gone through the state machine
189 amdgpu_vm_assert_locked(vm_bo->vm); in amdgpu_vm_bo_idle()
190 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
191 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); in amdgpu_vm_bo_idle()
192 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_idle()
206 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
207 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); in amdgpu_vm_bo_invalidated()
208 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_invalidated()
222 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted_user()
223 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); in amdgpu_vm_bo_evicted_user()
224 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_evicted_user()
237 amdgpu_vm_assert_locked(vm_bo->vm); in amdgpu_vm_bo_relocated()
239 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
240 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_relocated()
241 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_relocated()
257 amdgpu_vm_assert_locked(vm_bo->vm); in amdgpu_vm_bo_done()
258 spin_lock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
259 list_move(&vm_bo->vm_status, &vm_bo->vm->done); in amdgpu_vm_bo_done()
260 spin_unlock(&vm_bo->vm->status_lock); in amdgpu_vm_bo_done()
265 * @vm: the VM which state machine to reset
267 * Move all vm_bo object in the VM into a state where they will be updated
270 static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm) in amdgpu_vm_bo_reset_state_machine() argument
274 amdgpu_vm_assert_locked(vm); in amdgpu_vm_bo_reset_state_machine()
276 spin_lock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
277 list_splice_init(&vm->done, &vm->invalidated); in amdgpu_vm_bo_reset_state_machine()
278 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) in amdgpu_vm_bo_reset_state_machine()
281 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { in amdgpu_vm_bo_reset_state_machine()
286 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); in amdgpu_vm_bo_reset_state_machine()
288 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); in amdgpu_vm_bo_reset_state_machine()
290 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_reset_state_machine()
295 * @base: base structure for tracking BO usage in a VM
297 * Takes the vm status_lock and updates the shared memory stat. If the basic
303 struct amdgpu_vm *vm = base->vm; in amdgpu_vm_update_shared() local
310 spin_lock(&vm->status_lock); in amdgpu_vm_update_shared()
315 vm->stats[bo_memtype].drm.shared += size; in amdgpu_vm_update_shared()
316 vm->stats[bo_memtype].drm.private -= size; in amdgpu_vm_update_shared()
318 vm->stats[bo_memtype].drm.shared -= size; in amdgpu_vm_update_shared()
319 vm->stats[bo_memtype].drm.private += size; in amdgpu_vm_update_shared()
322 spin_unlock(&vm->status_lock); in amdgpu_vm_update_shared()
329 * Update the per VM stats for all the vm if needed from private to shared or
342 * @base: base structure for tracking BO usage in a VM
347 * Caller need to have the vm status_lock held. Useful for when multiple update
353 struct amdgpu_vm *vm = base->vm; in amdgpu_vm_update_stats_locked() local
362 vm->stats[bo_memtype].drm.shared += size; in amdgpu_vm_update_stats_locked()
364 vm->stats[bo_memtype].drm.private += size; in amdgpu_vm_update_stats_locked()
369 vm->stats[res_memtype].drm.resident += size; in amdgpu_vm_update_stats_locked()
374 vm->stats[res_memtype].drm.purgeable += size; in amdgpu_vm_update_stats_locked()
376 vm->stats[bo_memtype].evicted += size; in amdgpu_vm_update_stats_locked()
382 * @base: base structure for tracking BO usage in a VM
392 struct amdgpu_vm *vm = base->vm; in amdgpu_vm_update_stats() local
394 spin_lock(&vm->status_lock); in amdgpu_vm_update_stats()
396 spin_unlock(&vm->status_lock); in amdgpu_vm_update_stats()
400 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
402 * @base: base structure for tracking BO usage in a VM
403 * @vm: vm to which bo is to be added
410 struct amdgpu_vm *vm, struct amdgpu_bo *bo) in amdgpu_vm_bo_base_init() argument
412 base->vm = vm; in amdgpu_vm_bo_base_init()
422 spin_lock(&vm->status_lock); in amdgpu_vm_bo_base_init()
425 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_base_init()
427 if (!amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_base_init()
430 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_base_init()
432 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move); in amdgpu_vm_bo_base_init()
443 * we checked all the prerequisites, but it looks like this per vm bo in amdgpu_vm_bo_base_init()
445 * is validated on next vm use to avoid fault. in amdgpu_vm_bo_base_init()
453 * @vm: vm providing the BOs
457 * Lock the VM root PD in the DRM execution context.
459 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, in amdgpu_vm_lock_pd() argument
462 /* We need at least two fences for the VM PD/PT updates */ in amdgpu_vm_lock_pd()
463 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, in amdgpu_vm_lock_pd()
469 * @vm: vm providing the BOs
475 int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, in amdgpu_vm_lock_done_list() argument
478 struct list_head *prev = &vm->done; in amdgpu_vm_lock_done_list()
484 spin_lock(&vm->status_lock); in amdgpu_vm_lock_done_list()
485 while (!list_is_head(prev->next, &vm->done)) { in amdgpu_vm_lock_done_list()
487 spin_unlock(&vm->status_lock); in amdgpu_vm_lock_done_list()
495 spin_lock(&vm->status_lock); in amdgpu_vm_lock_done_list()
498 spin_unlock(&vm->status_lock); in amdgpu_vm_lock_done_list()
507 * @vm: vm providing the BOs
513 struct amdgpu_vm *vm) in amdgpu_vm_move_to_lru_tail() argument
516 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in amdgpu_vm_move_to_lru_tail()
522 struct amdgpu_vm *vm) in amdgpu_vm_init_entities() argument
526 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
532 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, in amdgpu_vm_init_entities()
537 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_init_entities()
542 static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm) in amdgpu_vm_fini_entities() argument
544 drm_sched_entity_destroy(&vm->immediate); in amdgpu_vm_fini_entities()
545 drm_sched_entity_destroy(&vm->delayed); in amdgpu_vm_fini_entities()
551 * @vm: optional VM to check, might be NULL
554 * are still valid to use this VM. The VM parameter might be NULL in which case
557 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_generation() argument
561 if (!vm) in amdgpu_vm_generation()
564 result += lower_32_bits(vm->generation); in amdgpu_vm_generation()
566 if (drm_sched_entity_error(&vm->delayed)) in amdgpu_vm_generation()
573 * amdgpu_vm_validate - validate evicted BOs tracked in the VM
576 * @vm: vm providing the BOs
577 * @ticket: optional reservation ticket used to reserve the VM
581 * Validate the page table BOs and per-VM BOs on command submission if
588 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_validate() argument
593 uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); in amdgpu_vm_validate()
598 if (vm->generation != new_vm_generation) { in amdgpu_vm_validate()
599 vm->generation = new_vm_generation; in amdgpu_vm_validate()
600 amdgpu_vm_bo_reset_state_machine(vm); in amdgpu_vm_validate()
601 amdgpu_vm_fini_entities(vm); in amdgpu_vm_validate()
602 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_validate()
607 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
608 while (!list_empty(&vm->evicted)) { in amdgpu_vm_validate()
609 bo_base = list_first_entry(&vm->evicted, in amdgpu_vm_validate()
612 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
623 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); in amdgpu_vm_validate()
626 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
628 while (ticket && !list_empty(&vm->evicted_user)) { in amdgpu_vm_validate()
629 bo_base = list_first_entry(&vm->evicted_user, in amdgpu_vm_validate()
632 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
643 spin_lock(&vm->status_lock); in amdgpu_vm_validate()
645 spin_unlock(&vm->status_lock); in amdgpu_vm_validate()
647 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_validate()
648 vm->evicting = false; in amdgpu_vm_validate()
649 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_validate()
655 * amdgpu_vm_ready - check VM is ready for updates
657 * @vm: VM to check
659 * Check if all VM PDs/PTs are ready for updates
662 * True if VM is not evicting and all VM entities are not stopped
664 bool amdgpu_vm_ready(struct amdgpu_vm *vm) in amdgpu_vm_ready() argument
668 amdgpu_vm_assert_locked(vm); in amdgpu_vm_ready()
670 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_ready()
671 ret = !vm->evicting; in amdgpu_vm_ready()
672 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_ready()
674 spin_lock(&vm->status_lock); in amdgpu_vm_ready()
675 ret &= list_empty(&vm->evicted); in amdgpu_vm_ready()
676 spin_unlock(&vm->status_lock); in amdgpu_vm_ready()
678 spin_lock(&vm->immediate.lock); in amdgpu_vm_ready()
679 ret &= !vm->immediate.stopped; in amdgpu_vm_ready()
680 spin_unlock(&vm->immediate.lock); in amdgpu_vm_ready()
682 spin_lock(&vm->delayed.lock); in amdgpu_vm_ready()
683 ret &= !vm->delayed.stopped; in amdgpu_vm_ready()
684 spin_unlock(&vm->delayed.lock); in amdgpu_vm_ready()
690 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
705 /* Compute has a VM bug for GFX version < 7. in amdgpu_vm_check_compute_bug()
706 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/ in amdgpu_vm_check_compute_bug()
756 * amdgpu_vm_flush - hardware flush the vm
762 * Emit a VM flush when it is necessary.
896 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
898 * @vm: requested vm
901 * Find @bo inside the requested vm.
902 * Search inside the @bos vm list for the requested vm
910 struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, in amdgpu_vm_bo_find() argument
916 if (base->vm != vm) in amdgpu_vm_bo_find()
955 * @vm: requested vm
964 struct amdgpu_vm *vm, bool immediate) in amdgpu_vm_update_pdes() argument
972 amdgpu_vm_assert_locked(vm); in amdgpu_vm_update_pdes()
974 spin_lock(&vm->status_lock); in amdgpu_vm_update_pdes()
975 list_splice_init(&vm->relocated, &relocated); in amdgpu_vm_update_pdes()
976 spin_unlock(&vm->status_lock); in amdgpu_vm_update_pdes()
986 params.vm = vm; in amdgpu_vm_update_pdes()
989 r = vm->update_funcs->prepare(&params, NULL, in amdgpu_vm_update_pdes()
1003 r = vm->update_funcs->commit(&params, &vm->last_update); in amdgpu_vm_update_pdes()
1008 atomic64_inc(&vm->tlb_seq); in amdgpu_vm_update_pdes()
1026 * Increments the tlb sequence to make sure that future CS execute a VM flush.
1034 atomic64_inc(&tlb_cb->vm->tlb_seq); in amdgpu_vm_tlb_seq_cb()
1045 * Increments the tlb sequence to make sure that future CS execute a VM flush.
1052 struct amdgpu_vm *vm = params->vm; in amdgpu_vm_tlb_flush() local
1054 tlb_cb->vm = vm; in amdgpu_vm_tlb_flush()
1062 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_tlb_flush()
1063 vm->last_tlb_flush = dma_fence_get(*fence); in amdgpu_vm_tlb_flush()
1069 if (!params->unlocked && vm->is_compute_context) { in amdgpu_vm_tlb_flush()
1070 amdgpu_vm_tlb_fence_create(params->adev, vm, fence); in amdgpu_vm_tlb_flush()
1073 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence, in amdgpu_vm_tlb_flush()
1079 * amdgpu_vm_update_range - update a range in the vm page table
1082 * @vm: the VM to update the range
1102 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_update_range() argument
1137 params.vm = vm; in amdgpu_vm_update_range()
1145 amdgpu_vm_eviction_lock(vm); in amdgpu_vm_update_range()
1146 if (vm->evicting) { in amdgpu_vm_update_range()
1151 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) { in amdgpu_vm_update_range()
1154 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true); in amdgpu_vm_update_range()
1155 swap(vm->last_unlocked, tmp); in amdgpu_vm_update_range()
1159 r = vm->update_funcs->prepare(&params, sync, in amdgpu_vm_update_range()
1218 r = vm->update_funcs->commit(&params, fence); in amdgpu_vm_update_range()
1231 amdgpu_vm_eviction_unlock(vm); in amdgpu_vm_update_range()
1236 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, in amdgpu_vm_get_memory() argument
1239 spin_lock(&vm->status_lock); in amdgpu_vm_get_memory()
1240 memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM); in amdgpu_vm_get_memory()
1241 spin_unlock(&vm->status_lock); in amdgpu_vm_get_memory()
1245 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1248 * @bo_va: requested BO and VM object
1260 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_update() local
1276 /* Implicitly sync to command submissions in the same VM before in amdgpu_vm_bo_update()
1279 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, in amdgpu_vm_bo_update()
1280 AMDGPU_SYNC_EQ_OWNER, vm); in amdgpu_vm_bo_update()
1312 AMDGPU_SYNC_EXPLICIT, vm); in amdgpu_vm_bo_update()
1334 if (clear || amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_update()
1335 last_update = &vm->last_update; in amdgpu_vm_bo_update()
1359 amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags, in amdgpu_vm_bo_update()
1364 r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, in amdgpu_vm_bo_update()
1377 if (amdgpu_vm_is_bo_always_valid(vm, bo)) { in amdgpu_vm_bo_update()
1490 * @vm: requested vm
1497 struct amdgpu_vm *vm, in amdgpu_vm_free_mapping() argument
1510 * @vm: requested vm
1512 * Register a cleanup callback to disable PRT support after VM dies.
1514 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_prt_fini() argument
1516 struct dma_resv *resv = vm->root.bo->tbo.base.resv; in amdgpu_vm_prt_fini()
1531 * @vm: requested vm
1543 struct amdgpu_vm *vm, in amdgpu_vm_clear_freed() argument
1553 * Implicitly sync to command submissions in the same VM before in amdgpu_vm_clear_freed()
1557 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv, in amdgpu_vm_clear_freed()
1558 AMDGPU_SYNC_EQ_OWNER, vm); in amdgpu_vm_clear_freed()
1562 while (!list_empty(&vm->freed)) { in amdgpu_vm_clear_freed()
1563 mapping = list_first_entry(&vm->freed, in amdgpu_vm_clear_freed()
1567 r = amdgpu_vm_update_range(adev, vm, false, false, true, false, in amdgpu_vm_clear_freed()
1570 amdgpu_vm_free_mapping(adev, vm, mapping, f); in amdgpu_vm_clear_freed()
1594 * @vm: requested vm
1595 * @ticket: optional reservation ticket used to reserve the VM
1605 struct amdgpu_vm *vm, in amdgpu_vm_handle_moved() argument
1613 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1614 while (!list_empty(&vm->moved)) { in amdgpu_vm_handle_moved()
1615 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1617 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1619 /* Per VM BOs never need to bo cleared in the page tables */ in amdgpu_vm_handle_moved()
1623 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1626 while (!list_empty(&vm->invalidated)) { in amdgpu_vm_handle_moved()
1627 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, in amdgpu_vm_handle_moved()
1630 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1656 if (vm->is_compute_context && in amdgpu_vm_handle_moved()
1662 spin_lock(&vm->status_lock); in amdgpu_vm_handle_moved()
1664 spin_unlock(&vm->status_lock); in amdgpu_vm_handle_moved()
1670 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1673 * @vm: requested vm
1677 * Flush TLB if needed for a compute VM.
1683 struct amdgpu_vm *vm, in amdgpu_vm_flush_compute_tlb() argument
1687 uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); in amdgpu_vm_flush_compute_tlb()
1691 WARN_ON_ONCE(!vm->is_compute_context); in amdgpu_vm_flush_compute_tlb()
1698 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq) in amdgpu_vm_flush_compute_tlb()
1706 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type, in amdgpu_vm_flush_compute_tlb()
1715 * amdgpu_vm_bo_add - add a bo to a specific vm
1718 * @vm: requested vm
1721 * Add @bo into the requested vm.
1722 * Add @bo to the list of bos associated with the vm
1730 struct amdgpu_vm *vm, in amdgpu_vm_bo_add() argument
1739 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); in amdgpu_vm_bo_add()
1773 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_insert_map() local
1778 amdgpu_vm_it_insert(mapping, &vm->va); in amdgpu_vm_bo_insert_map()
1783 if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved) in amdgpu_vm_bo_insert_map()
1821 * amdgpu_vm_bo_map - map bo inside a vm
1830 * Add a mapping of the BO at the specefied addr into the VM.
1844 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_map() local
1855 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map()
1879 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1888 * Add a mapping of the BO at the specefied addr into the VM. Replace existing
1915 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size); in amdgpu_vm_bo_replace_map()
1935 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1941 * Remove a mapping of the BO at the specefied addr from the VM.
1953 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_unmap() local
1976 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_unmap()
1981 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_unmap()
1983 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_unmap()
1993 * @vm: VM structure to use
2003 struct amdgpu_vm *vm, in amdgpu_vm_bo_clear_mappings() argument
2032 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_clear_mappings()
2063 amdgpu_vm_it_remove(tmp, &vm->va); in amdgpu_vm_bo_clear_mappings()
2072 list_add(&tmp->list, &vm->freed); in amdgpu_vm_bo_clear_mappings()
2080 amdgpu_vm_it_insert(before, &vm->va); in amdgpu_vm_bo_clear_mappings()
2084 if (amdgpu_vm_is_bo_always_valid(vm, bo) && in amdgpu_vm_bo_clear_mappings()
2095 amdgpu_vm_it_insert(after, &vm->va); in amdgpu_vm_bo_clear_mappings()
2099 if (amdgpu_vm_is_bo_always_valid(vm, bo) && in amdgpu_vm_bo_clear_mappings()
2112 * @vm: the requested VM
2121 struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, in amdgpu_vm_bo_lookup_mapping() argument
2124 return amdgpu_vm_it_iter_first(&vm->va, addr, addr); in amdgpu_vm_bo_lookup_mapping()
2130 * @vm: the requested vm
2135 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) in amdgpu_vm_bo_trace_cs() argument
2142 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; in amdgpu_vm_bo_trace_cs()
2158 * amdgpu_vm_bo_del - remove a bo from a specific vm
2163 * Remove @bo_va->bo from the requested vm.
2172 struct amdgpu_vm *vm = bo_va->base.vm; in amdgpu_vm_bo_del() local
2175 dma_resv_assert_held(vm->root.bo->tbo.base.resv); in amdgpu_vm_bo_del()
2179 if (amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_del()
2193 spin_lock(&vm->status_lock); in amdgpu_vm_bo_del()
2195 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_del()
2199 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
2202 list_add(&mapping->list, &vm->freed); in amdgpu_vm_bo_del()
2206 amdgpu_vm_it_remove(mapping, &vm->va); in amdgpu_vm_bo_del()
2207 amdgpu_vm_free_mapping(adev, vm, mapping, in amdgpu_vm_bo_del()
2220 * amdgpu_vm_evictable - check if we can evict a VM
2222 * @bo: A page table of the VM.
2224 * Check if it is possible to evict a VM.
2230 /* Page tables of a destroyed VM can go away immediately */ in amdgpu_vm_evictable()
2231 if (!bo_base || !bo_base->vm) in amdgpu_vm_evictable()
2234 /* Don't evict VM page tables while they are busy */ in amdgpu_vm_evictable()
2239 if (!amdgpu_vm_eviction_trylock(bo_base->vm)) in amdgpu_vm_evictable()
2242 /* Don't evict VM page tables while they are updated */ in amdgpu_vm_evictable()
2243 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) { in amdgpu_vm_evictable()
2244 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2248 bo_base->vm->evicting = true; in amdgpu_vm_evictable()
2249 amdgpu_vm_eviction_unlock(bo_base->vm); in amdgpu_vm_evictable()
2266 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_invalidate() local
2268 if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) { in amdgpu_vm_bo_invalidate()
2279 else if (amdgpu_vm_is_bo_always_valid(vm, bo)) in amdgpu_vm_bo_invalidate()
2301 struct amdgpu_vm *vm = bo_base->vm; in amdgpu_vm_bo_move() local
2303 spin_lock(&vm->status_lock); in amdgpu_vm_bo_move()
2306 spin_unlock(&vm->status_lock); in amdgpu_vm_bo_move()
2313 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2315 * @vm_size: VM size
2318 * VM page table as power of two
2334 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2337 * @min_vm_size: the minimum vm size in GB if it's set auto
2351 /* adjust vm size first */ in amdgpu_vm_adjust_size()
2355 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n", in amdgpu_vm_adjust_size()
2363 /* Optimal VM size depends on the amount of physical in amdgpu_vm_adjust_size()
2370 * - On GFX8 and older, VM space can be segmented for in amdgpu_vm_adjust_size()
2376 * VM size with the given page table size. in amdgpu_vm_adjust_size()
2405 /* block size depends on vm size and hw setup*/ in amdgpu_vm_adjust_size()
2423 "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", in amdgpu_vm_adjust_size()
2429 * amdgpu_vm_wait_idle - wait for the VM to become idle
2431 * @vm: VM object to wait for
2432 * @timeout: timeout to wait for VM to become idle
2434 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) in amdgpu_vm_wait_idle() argument
2436 timeout = drm_sched_entity_flush(&vm->immediate, timeout); in amdgpu_vm_wait_idle()
2440 return drm_sched_entity_flush(&vm->delayed, timeout); in amdgpu_vm_wait_idle()
2453 struct amdgpu_vm *vm; in amdgpu_vm_get_vm_from_pasid() local
2457 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_get_vm_from_pasid()
2460 return vm; in amdgpu_vm_get_vm_from_pasid()
2464 * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2468 * frees the vm task_info ptr at the last put
2477 * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2479 * @vm: VM to get info from
2485 amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm) in amdgpu_vm_get_task_info_vm() argument
2489 if (vm) { in amdgpu_vm_get_task_info_vm()
2490 ti = vm->task_info; in amdgpu_vm_get_task_info_vm()
2491 kref_get(&vm->task_info->refcount); in amdgpu_vm_get_task_info_vm()
2501 * @pasid: PASID identifier for VM
2513 static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm) in amdgpu_vm_create_task_info() argument
2515 vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL); in amdgpu_vm_create_task_info()
2516 if (!vm->task_info) in amdgpu_vm_create_task_info()
2519 kref_init(&vm->task_info->refcount); in amdgpu_vm_create_task_info()
2526 * @vm: vm for which to set the info
2528 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) in amdgpu_vm_set_task_info() argument
2530 if (!vm->task_info) in amdgpu_vm_set_task_info()
2533 if (vm->task_info->task.pid == current->pid) in amdgpu_vm_set_task_info()
2536 vm->task_info->task.pid = current->pid; in amdgpu_vm_set_task_info()
2537 get_task_comm(vm->task_info->task.comm, current); in amdgpu_vm_set_task_info()
2542 vm->task_info->tgid = current->group_leader->pid; in amdgpu_vm_set_task_info()
2543 get_task_comm(vm->task_info->process_name, current->group_leader); in amdgpu_vm_set_task_info()
2547 * amdgpu_vm_init - initialize a vm instance
2550 * @vm: requested vm
2552 * @pasid: the pasid the VM is using on this GPU
2554 * Init @vm fields.
2559 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, in amdgpu_vm_init() argument
2566 vm->va = RB_ROOT_CACHED; in amdgpu_vm_init()
2568 vm->reserved_vmid[i] = NULL; in amdgpu_vm_init()
2569 INIT_LIST_HEAD(&vm->evicted); in amdgpu_vm_init()
2570 INIT_LIST_HEAD(&vm->evicted_user); in amdgpu_vm_init()
2571 INIT_LIST_HEAD(&vm->relocated); in amdgpu_vm_init()
2572 INIT_LIST_HEAD(&vm->moved); in amdgpu_vm_init()
2573 INIT_LIST_HEAD(&vm->idle); in amdgpu_vm_init()
2574 INIT_LIST_HEAD(&vm->invalidated); in amdgpu_vm_init()
2575 spin_lock_init(&vm->status_lock); in amdgpu_vm_init()
2576 INIT_LIST_HEAD(&vm->freed); in amdgpu_vm_init()
2577 INIT_LIST_HEAD(&vm->done); in amdgpu_vm_init()
2578 INIT_KFIFO(vm->faults); in amdgpu_vm_init()
2580 r = amdgpu_vm_init_entities(adev, vm); in amdgpu_vm_init()
2584 ttm_lru_bulk_move_init(&vm->lru_bulk_move); in amdgpu_vm_init()
2586 vm->is_compute_context = false; in amdgpu_vm_init()
2588 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_init()
2591 dev_dbg(adev->dev, "VM update mode is %s\n", in amdgpu_vm_init()
2592 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_init()
2593 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_init()
2595 "CPU update of VM recommended only for large BAR system\n"); in amdgpu_vm_init()
2597 if (vm->use_cpu_for_update) in amdgpu_vm_init()
2598 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_init()
2600 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_init()
2602 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_init()
2603 vm->last_unlocked = dma_fence_get_stub(); in amdgpu_vm_init()
2604 vm->last_tlb_flush = dma_fence_get_stub(); in amdgpu_vm_init()
2605 vm->generation = amdgpu_vm_generation(adev, NULL); in amdgpu_vm_init()
2607 mutex_init(&vm->eviction_lock); in amdgpu_vm_init()
2608 vm->evicting = false; in amdgpu_vm_init()
2609 vm->tlb_fence_context = dma_fence_context_alloc(1); in amdgpu_vm_init()
2611 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, in amdgpu_vm_init()
2623 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo); in amdgpu_vm_init()
2628 r = amdgpu_vm_pt_clear(adev, vm, root, false); in amdgpu_vm_init()
2632 r = amdgpu_vm_create_task_info(vm); in amdgpu_vm_init()
2634 dev_dbg(adev->dev, "Failed to create task info for VM\n"); in amdgpu_vm_init()
2638 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL)); in amdgpu_vm_init()
2642 vm->pasid = pasid; in amdgpu_vm_init()
2645 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2652 if (vm->pasid != 0) { in amdgpu_vm_init()
2653 xa_erase_irq(&adev->vm_manager.pasids, vm->pasid); in amdgpu_vm_init()
2654 vm->pasid = 0; in amdgpu_vm_init()
2656 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_init()
2657 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_init()
2661 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_init()
2662 dma_fence_put(vm->last_unlocked); in amdgpu_vm_init()
2663 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); in amdgpu_vm_init()
2664 amdgpu_vm_fini_entities(vm); in amdgpu_vm_init()
2670 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2673 * @vm: requested vm
2678 * Changes the following VM parameters:
2688 int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_make_compute() argument
2692 r = amdgpu_bo_reserve(vm->root.bo, true); in amdgpu_vm_make_compute()
2696 /* Update VM state */ in amdgpu_vm_make_compute()
2697 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & in amdgpu_vm_make_compute()
2699 dev_dbg(adev->dev, "VM update mode is %s\n", in amdgpu_vm_make_compute()
2700 vm->use_cpu_for_update ? "CPU" : "SDMA"); in amdgpu_vm_make_compute()
2701 WARN_ONCE((vm->use_cpu_for_update && in amdgpu_vm_make_compute()
2703 "CPU update of VM recommended only for large BAR system\n"); in amdgpu_vm_make_compute()
2705 if (vm->use_cpu_for_update) { in amdgpu_vm_make_compute()
2707 r = amdgpu_bo_sync_wait(vm->root.bo, in amdgpu_vm_make_compute()
2712 vm->update_funcs = &amdgpu_vm_cpu_funcs; in amdgpu_vm_make_compute()
2713 r = amdgpu_vm_pt_map_tables(adev, vm); in amdgpu_vm_make_compute()
2718 vm->update_funcs = &amdgpu_vm_sdma_funcs; in amdgpu_vm_make_compute()
2721 dma_fence_put(vm->last_update); in amdgpu_vm_make_compute()
2722 vm->last_update = dma_fence_get_stub(); in amdgpu_vm_make_compute()
2723 vm->is_compute_context = true; in amdgpu_vm_make_compute()
2726 amdgpu_bo_unreserve(vm->root.bo); in amdgpu_vm_make_compute()
2730 static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm) in amdgpu_vm_stats_is_zero() argument
2733 if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) && in amdgpu_vm_stats_is_zero()
2734 vm->stats[i].evicted == 0)) in amdgpu_vm_stats_is_zero()
2741 * amdgpu_vm_fini - tear down a vm instance
2744 * @vm: requested vm
2746 * Tear down @vm.
2747 * Unbind the VM and remove all bos from the vm bo list
2749 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) in amdgpu_vm_fini() argument
2757 amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); in amdgpu_vm_fini()
2759 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_fini()
2761 /* Remove PASID mapping before destroying VM */ in amdgpu_vm_fini()
2762 if (vm->pasid != 0) { in amdgpu_vm_fini()
2763 xa_erase_irq(&adev->vm_manager.pasids, vm->pasid); in amdgpu_vm_fini()
2764 vm->pasid = 0; in amdgpu_vm_fini()
2766 dma_fence_wait(vm->last_unlocked, false); in amdgpu_vm_fini()
2767 dma_fence_put(vm->last_unlocked); in amdgpu_vm_fini()
2768 dma_fence_wait(vm->last_tlb_flush, false); in amdgpu_vm_fini()
2770 spin_lock_irqsave(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2771 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags); in amdgpu_vm_fini()
2772 dma_fence_put(vm->last_tlb_flush); in amdgpu_vm_fini()
2774 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { in amdgpu_vm_fini()
2776 amdgpu_vm_prt_fini(adev, vm); in amdgpu_vm_fini()
2781 amdgpu_vm_free_mapping(adev, vm, mapping, NULL); in amdgpu_vm_fini()
2784 amdgpu_vm_pt_free_root(adev, vm); in amdgpu_vm_fini()
2787 WARN_ON(vm->root.bo); in amdgpu_vm_fini()
2789 amdgpu_vm_fini_entities(vm); in amdgpu_vm_fini()
2791 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { in amdgpu_vm_fini()
2792 dev_err(adev->dev, "still active bo inside vm\n"); in amdgpu_vm_fini()
2795 &vm->va.rb_root, rb) { in amdgpu_vm_fini()
2803 dma_fence_put(vm->last_update); in amdgpu_vm_fini()
2806 amdgpu_vmid_free_reserved(adev, vm, i); in amdgpu_vm_fini()
2809 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move); in amdgpu_vm_fini()
2811 if (!amdgpu_vm_stats_is_zero(vm)) { in amdgpu_vm_fini()
2812 struct amdgpu_task_info *ti = vm->task_info; in amdgpu_vm_fini()
2815 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n", in amdgpu_vm_fini()
2819 amdgpu_vm_put_task_info(vm->task_info); in amdgpu_vm_fini()
2823 * amdgpu_vm_manager_init - init the VM manager
2827 * Initialize the VM manager structures
2850 * Compute VM tables will be updated by CPU in amdgpu_vm_manager_init()
2855 * avoid using CPU for VM table updates in amdgpu_vm_manager_init()
2873 * amdgpu_vm_manager_fini - cleanup VM manager
2877 * Cleanup the VM manager and free resources.
2888 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2902 struct amdgpu_vm *vm = &fpriv->vm; in amdgpu_vm_ioctl() local
2911 amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0)); in amdgpu_vm_ioctl()
2914 amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0)); in amdgpu_vm_ioctl()
2924 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2926 * @pasid: PASID of the VM
2934 * Try to gracefully handle a VM fault. Return true if the fault was handled and
2945 struct amdgpu_vm *vm; in amdgpu_vm_handle_fault() local
2949 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2950 if (vm) { in amdgpu_vm_handle_fault()
2951 root = amdgpu_bo_ref(vm->root.bo); in amdgpu_vm_handle_fault()
2952 is_compute_context = vm->is_compute_context; in amdgpu_vm_handle_fault()
2973 /* Double check that the VM still exists */ in amdgpu_vm_handle_fault()
2975 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_handle_fault()
2976 if (vm && vm->root.bo != root) in amdgpu_vm_handle_fault()
2977 vm = NULL; in amdgpu_vm_handle_fault()
2979 if (!vm) in amdgpu_vm_handle_fault()
3008 r = amdgpu_vm_update_range(adev, vm, true, false, false, false, in amdgpu_vm_handle_fault()
3013 r = amdgpu_vm_update_pdes(adev, vm, true); in amdgpu_vm_handle_fault()
3028 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
3030 * @vm: Requested VM for printing BO info
3033 * Print BO information in debugfs file for the VM
3035 void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m) in amdgpu_debugfs_vm_bo_info() argument
3052 amdgpu_vm_assert_locked(vm); in amdgpu_debugfs_vm_bo_info()
3054 spin_lock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
3056 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3065 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3074 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3083 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3092 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3101 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) { in amdgpu_debugfs_vm_bo_info()
3106 spin_unlock(&vm->status_lock); in amdgpu_debugfs_vm_bo_info()
3127 * @pasid: PASID of the VM
3140 struct amdgpu_vm *vm; in amdgpu_vm_update_fault_cache() local
3145 vm = xa_load(&adev->vm_manager.pasids, pasid); in amdgpu_vm_update_fault_cache()
3151 if (vm && status) { in amdgpu_vm_update_fault_cache()
3152 vm->fault_info.addr = addr; in amdgpu_vm_update_fault_cache()
3153 vm->fault_info.status = status; in amdgpu_vm_update_fault_cache()
3156 * when vm could be stale or freed. in amdgpu_vm_update_fault_cache()
3163 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; in amdgpu_vm_update_fault_cache()
3164 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3167 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0; in amdgpu_vm_update_fault_cache()
3168 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3171 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1; in amdgpu_vm_update_fault_cache()
3172 vm->fault_info.vmhub |= in amdgpu_vm_update_fault_cache()
3182 * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3184 * @vm: VM to test against.
3188 * always guaranteed to be valid inside the VM.
3190 bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo) in amdgpu_vm_is_bo_always_valid() argument
3192 return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv; in amdgpu_vm_is_bo_always_valid()