Lines Matching +full:compute +full:- +full:cb

29 #include <linux/dma-fence-array.h>
32 #include <linux/dma-buf.h>
69 * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
90 #define START(node) ((node)->start)
91 #define LAST(node) ((node)->last)
100 * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
110 * @cb: callback
112 struct dma_fence_cb cb;
116 * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
125 * @cb: callback
127 struct dma_fence_cb cb;
131 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
146 if (vm->pasid == pasid)
149 if (vm->pasid) {
150 r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
154 vm->pasid = 0;
158 r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
163 vm->pasid = pasid;
171 * amdgpu_vm_bo_evicted - vm_bo is evicted
180 struct amdgpu_vm *vm = vm_bo->vm;
181 struct amdgpu_bo *bo = vm_bo->bo;
183 vm_bo->moved = true;
184 spin_lock(&vm_bo->vm->status_lock);
185 if (bo->tbo.type == ttm_bo_type_kernel)
186 list_move(&vm_bo->vm_status, &vm->evicted);
188 list_move_tail(&vm_bo->vm_status, &vm->evicted);
189 spin_unlock(&vm_bo->vm->status_lock);
192 * amdgpu_vm_bo_moved - vm_bo is moved
201 spin_lock(&vm_bo->vm->status_lock);
202 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
203 spin_unlock(&vm_bo->vm->status_lock);
207 * amdgpu_vm_bo_idle - vm_bo is idle
216 spin_lock(&vm_bo->vm->status_lock);
217 list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
218 spin_unlock(&vm_bo->vm->status_lock);
219 vm_bo->moved = false;
223 * amdgpu_vm_bo_invalidated - vm_bo is invalidated
232 spin_lock(&vm_bo->vm->status_lock);
233 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
234 spin_unlock(&vm_bo->vm->status_lock);
238 * amdgpu_vm_bo_evicted_user - vm_bo is evicted
247 vm_bo->moved = true;
248 spin_lock(&vm_bo->vm->status_lock);
249 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
250 spin_unlock(&vm_bo->vm->status_lock);
254 * amdgpu_vm_bo_relocated - vm_bo is reloacted
263 if (vm_bo->bo->parent) {
264 spin_lock(&vm_bo->vm->status_lock);
265 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
266 spin_unlock(&vm_bo->vm->status_lock);
273 * amdgpu_vm_bo_done - vm_bo is done
282 spin_lock(&vm_bo->vm->status_lock);
283 list_move(&vm_bo->vm_status, &vm_bo->vm->done);
284 spin_unlock(&vm_bo->vm->status_lock);
288 * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
298 spin_lock(&vm->status_lock);
299 list_splice_init(&vm->done, &vm->invalidated);
300 list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
301 vm_bo->moved = true;
302 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
303 struct amdgpu_bo *bo = vm_bo->bo;
305 vm_bo->moved = true;
306 if (!bo || bo->tbo.type != ttm_bo_type_kernel)
307 list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
308 else if (bo->parent)
309 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
311 spin_unlock(&vm->status_lock);
315 * amdgpu_vm_update_shared - helper to update shared memory stat
324 struct amdgpu_vm *vm = base->vm;
325 struct amdgpu_bo *bo = base->bo;
330 spin_lock(&vm->status_lock);
331 shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
332 if (base->shared != shared) {
333 base->shared = shared;
335 vm->stats[bo_memtype].drm.shared += size;
336 vm->stats[bo_memtype].drm.private -= size;
338 vm->stats[bo_memtype].drm.shared -= size;
339 vm->stats[bo_memtype].drm.private += size;
342 spin_unlock(&vm->status_lock);
346 * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
356 for (base = bo->vm_bo; base; base = base->next)
361 * amdgpu_vm_update_stats_locked - helper to update normal memory stat
364 * be bo->tbo.resource
365 * @sign: if we should add (+1) or subtract (-1) from the stat
373 struct amdgpu_vm *vm = base->vm;
374 struct amdgpu_bo *bo = base->bo;
378 /* For drm-total- and drm-shared-, BO are accounted by their preferred
381 if (base->shared)
382 vm->stats[bo_memtype].drm.shared += size;
384 vm->stats[bo_memtype].drm.private += size;
386 if (res && res->mem_type < __AMDGPU_PL_NUM) {
387 uint32_t res_memtype = res->mem_type;
389 vm->stats[res_memtype].drm.resident += size;
393 if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
394 vm->stats[res_memtype].drm.purgeable += size;
395 if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
396 vm->stats[bo_memtype].evicted += size;
401 * amdgpu_vm_update_stats - helper to update normal memory stat
404 * be bo->tbo.resource
405 * @sign: if we should add (+1) or subtract (-1) from the stat
412 struct amdgpu_vm *vm = base->vm;
414 spin_lock(&vm->status_lock);
416 spin_unlock(&vm->status_lock);
420 * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
432 base->vm = vm;
433 base->bo = bo;
434 base->next = NULL;
435 INIT_LIST_HEAD(&base->vm_status);
439 base->next = bo->vm_bo;
440 bo->vm_bo = base;
442 spin_lock(&vm->status_lock);
443 base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
444 amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
445 spin_unlock(&vm->status_lock);
450 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
452 ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
453 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
458 if (bo->preferred_domains &
459 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
471 * amdgpu_vm_lock_pd - lock PD in drm_exec
483 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
488 * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
499 spin_lock(&adev->mman.bdev.lru_lock);
500 ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
501 spin_unlock(&adev->mman.bdev.lru_lock);
510 r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
511 adev->vm_manager.vm_pte_scheds,
512 adev->vm_manager.vm_pte_num_scheds, NULL);
516 return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
517 adev->vm_manager.vm_pte_scheds,
518 adev->vm_manager.vm_pte_num_scheds, NULL);
521 drm_sched_entity_destroy(&vm->immediate);
528 drm_sched_entity_destroy(&vm->immediate);
529 drm_sched_entity_destroy(&vm->delayed);
533 * amdgpu_vm_generation - return the page table re-generation counter
537 * Returns a page table re-generation token to allow checking if submissions
543 uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
548 result += lower_32_bits(vm->generation);
549 /* Add one if the page tables will be re-generated on next CS */
550 if (drm_sched_entity_error(&vm->delayed))
557 * amdgpu_vm_validate - validate evicted BOs tracked in the VM
565 * Validate the page table BOs and per-VM BOs on command submission if
582 if (vm->generation != new_vm_generation) {
583 vm->generation = new_vm_generation;
591 spin_lock(&vm->status_lock);
592 while (!list_empty(&vm->evicted)) {
593 bo_base = list_first_entry(&vm->evicted,
596 spin_unlock(&vm->status_lock);
598 bo = bo_base->bo;
604 if (bo->tbo.type != ttm_bo_type_kernel) {
607 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
610 spin_lock(&vm->status_lock);
612 while (ticket && !list_empty(&vm->evicted_user)) {
613 bo_base = list_first_entry(&vm->evicted_user,
616 spin_unlock(&vm->status_lock);
618 bo = bo_base->bo;
620 if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
625 pr_warn_ratelimited("pid %d\n", ti->pid);
629 return -EINVAL;
638 spin_lock(&vm->status_lock);
640 spin_unlock(&vm->status_lock);
643 vm->evicting = false;
650 * amdgpu_vm_ready - check VM is ready for updates
665 ret = !vm->evicting;
668 spin_lock(&vm->status_lock);
669 empty = list_empty(&vm->evicted);
670 spin_unlock(&vm->status_lock);
676 * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
691 /* Compute has a VM bug for GFX version < 7.
692 Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
693 if (ip_block->version->major <= 7)
695 else if (ip_block->version->major == 8)
696 if (adev->gfx.mec_fw_version < 673)
700 for (i = 0; i < adev->num_rings; i++) {
701 ring = adev->rings[i];
702 if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
703 /* only compute rings */
704 ring->has_compute_vm_bug = has_compute_vm_bug;
706 ring->has_compute_vm_bug = false;
711 * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
722 struct amdgpu_device *adev = ring->adev;
723 unsigned vmhub = ring->vm_hub;
724 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
726 if (job->vmid == 0)
729 if (job->vm_needs_flush || ring->has_compute_vm_bug)
732 if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
735 if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
742 * amdgpu_vm_flush - hardware flush the vm
756 struct amdgpu_device *adev = ring->adev;
757 struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
758 unsigned vmhub = ring->vm_hub;
759 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
760 struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
761 bool spm_update_needed = job->spm_update_needed;
762 bool gds_switch_needed = ring->funcs->emit_gds_switch &&
763 job->gds_switch_needed;
764 bool vm_flush_needed = job->vm_needs_flush;
778 mutex_lock(&id_mgr->lock);
779 if (id->pasid != job->pasid || !id->pasid_mapping ||
780 !dma_fence_is_signaled(id->pasid_mapping))
782 mutex_unlock(&id_mgr->lock);
784 gds_switch_needed &= !!ring->funcs->emit_gds_switch;
785 vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
786 job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
787 pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
788 ring->funcs->emit_wreg;
790 cleaner_shader_needed = job->run_cleaner_shader &&
791 adev->gfx.enable_cleaner_shader &&
792 ring->funcs->emit_cleaner_shader && job->base.s_fence &&
793 &job->base.s_fence->scheduled == isolation->spearhead;
800 if (ring->funcs->init_cond_exec)
802 ring->cond_exe_gpu_addr);
808 ring->funcs->emit_cleaner_shader(ring);
811 trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
812 amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
816 amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
818 if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
819 adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
821 if (ring->funcs->emit_gds_switch &&
823 amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
824 job->gds_size, job->gws_base,
825 job->gws_size, job->oa_base,
826 job->oa_size);
836 mutex_lock(&id_mgr->lock);
837 dma_fence_put(id->last_flush);
838 id->last_flush = dma_fence_get(fence);
839 id->current_gpu_reset_count =
840 atomic_read(&adev->gpu_reset_counter);
841 mutex_unlock(&id_mgr->lock);
845 mutex_lock(&id_mgr->lock);
846 id->pasid = job->pasid;
847 dma_fence_put(id->pasid_mapping);
848 id->pasid_mapping = dma_fence_get(fence);
849 mutex_unlock(&id_mgr->lock);
858 mutex_lock(&adev->enforce_isolation_mutex);
859 dma_fence_put(isolation->spearhead);
860 isolation->spearhead = dma_fence_get(fence);
861 mutex_unlock(&adev->enforce_isolation_mutex);
868 if (ring->funcs->emit_switch_buffer) {
878 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
897 for (base = bo->vm_bo; base; base = base->next) {
898 if (base->vm != vm)
907 * amdgpu_vm_map_gart - Resolve gart mapping of addr
934 * amdgpu_vm_update_pdes - make sure that all directories are valid
954 spin_lock(&vm->status_lock);
955 list_splice_init(&vm->relocated, &relocated);
956 spin_unlock(&vm->status_lock);
962 return -ENODEV;
969 r = vm->update_funcs->prepare(&params, NULL);
975 flush_tlb_needed |= entry->moved;
982 r = vm->update_funcs->commit(&params, &vm->last_update);
987 atomic64_inc(&vm->tlb_seq);
1001 * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
1003 * @cb: the callback structure
1008 struct dma_fence_cb *cb)
1012 tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
1013 atomic64_inc(&tlb_cb->vm->tlb_seq);
1018 * amdgpu_vm_tlb_flush - prepare TLB flush
1031 struct amdgpu_vm *vm = params->vm;
1033 tlb_cb->vm = vm;
1035 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1039 if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
1041 dma_fence_put(vm->last_tlb_flush);
1042 vm->last_tlb_flush = dma_fence_get(*fence);
1044 amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
1048 if (!params->unlocked && vm->is_compute_context) {
1049 amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
1052 dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
1058 * amdgpu_vm_update_range - update a range in the vm page table
1095 return -ENODEV;
1100 return -ENOMEM;
1104 * heavy-weight flush TLB unconditionally.
1106 flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
1125 if (vm->evicting) {
1126 r = -EBUSY;
1130 if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
1133 amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
1134 swap(vm->last_unlocked, tmp);
1138 r = vm->update_funcs->prepare(&params, sync);
1143 (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
1164 pages_addr[idx - 1] + PAGE_SIZE))
1168 count--;
1196 r = vm->update_funcs->commit(&params, fence);
1217 spin_lock(&vm->status_lock);
1218 memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
1219 spin_unlock(&vm->status_lock);
1223 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
1232 * 0 for success, -EINVAL for failure.
1237 struct amdgpu_bo *bo = bo_va->base.bo;
1238 struct amdgpu_vm *vm = bo_va->base.vm;
1257 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1262 r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
1272 struct drm_gem_object *obj = &bo->tbo.base;
1274 if (obj->import_attach && bo_va->is_xgmi) {
1275 struct dma_buf *dma_buf = obj->import_attach->dmabuf;
1276 struct drm_gem_object *gobj = dma_buf->priv;
1279 if (abo->tbo.resource &&
1280 abo->tbo.resource->mem_type == TTM_PL_VRAM)
1283 mem = bo->tbo.resource;
1284 if (mem && (mem->mem_type == TTM_PL_TT ||
1285 mem->mem_type == AMDGPU_PL_PREEMPT))
1286 pages_addr = bo->tbo.ttm->dma_address;
1289 r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
1298 flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
1303 bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1304 vram_base = bo_adev->vm_manager.vram_base_offset;
1305 uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
1313 last_update = &vm->last_update;
1315 last_update = &bo_va->last_pt_update;
1317 if (!clear && bo_va->base.moved) {
1319 list_splice_init(&bo_va->valids, &bo_va->invalids);
1321 } else if (bo_va->cleared != clear) {
1322 list_splice_init(&bo_va->valids, &bo_va->invalids);
1325 list_for_each_entry(mapping, &bo_va->invalids, list) {
1328 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
1331 if (!(mapping->flags & AMDGPU_PTE_READABLE))
1333 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
1342 !uncached, &sync, mapping->start,
1343 mapping->last, update_flags,
1344 mapping->offset, vram_base, mem,
1355 if (bo->tbo.resource &&
1356 !(bo->preferred_domains &
1357 amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
1358 amdgpu_vm_bo_evicted(&bo_va->base);
1360 amdgpu_vm_bo_idle(&bo_va->base);
1362 amdgpu_vm_bo_done(&bo_va->base);
1365 list_splice_init(&bo_va->invalids, &bo_va->valids);
1366 bo_va->cleared = clear;
1367 bo_va->base.moved = false;
1370 list_for_each_entry(mapping, &bo_va->valids, list)
1380 * amdgpu_vm_update_prt_state - update the global PRT state
1389 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
1390 enable = !!atomic_read(&adev->vm_manager.num_prt_users);
1391 adev->gmc.gmc_funcs->set_prt(adev, enable);
1392 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
1396 * amdgpu_vm_prt_get - add a PRT user
1402 if (!adev->gmc.gmc_funcs->set_prt)
1405 if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
1410 * amdgpu_vm_prt_put - drop a PRT user
1416 if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
1421 * amdgpu_vm_prt_cb - callback for updating the PRT status
1428 struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
1430 amdgpu_vm_prt_put(cb->adev);
1431 kfree(cb);
1435 * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
1443 struct amdgpu_prt_cb *cb;
1445 if (!adev->gmc.gmc_funcs->set_prt)
1448 cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
1449 if (!cb) {
1456 cb->adev = adev;
1457 if (!fence || dma_fence_add_callback(fence, &cb->cb,
1459 amdgpu_vm_prt_cb(fence, &cb->cb);
1464 * amdgpu_vm_free_mapping - free a mapping
1478 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1484 * amdgpu_vm_prt_fini - finish all prt mappings
1493 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1505 * amdgpu_vm_clear_freed - clear freed BOs in the PT
1534 r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
1539 while (!list_empty(&vm->freed)) {
1540 mapping = list_first_entry(&vm->freed,
1542 list_del(&mapping->list);
1545 &sync, mapping->start, mapping->last,
1568 * amdgpu_vm_handle_moved - handle moved BOs in the PT
1590 spin_lock(&vm->status_lock);
1591 while (!list_empty(&vm->moved)) {
1592 bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
1594 spin_unlock(&vm->status_lock);
1600 spin_lock(&vm->status_lock);
1603 while (!list_empty(&vm->invalidated)) {
1604 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
1606 resv = bo_va->base.bo->tbo.base.resv;
1607 spin_unlock(&vm->status_lock);
1610 if (!adev->debug_vm && dma_resv_trylock(resv)) {
1630 /* Remember evicted DMABuf imports in compute VMs for later
1633 if (vm->is_compute_context &&
1634 bo_va->base.bo->tbo.base.import_attach &&
1635 (!bo_va->base.bo->tbo.resource ||
1636 bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
1637 amdgpu_vm_bo_evicted_user(&bo_va->base);
1639 spin_lock(&vm->status_lock);
1641 spin_unlock(&vm->status_lock);
1647 * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
1652 * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
1654 * Flush TLB if needed for a compute VM.
1668 WARN_ON_ONCE(!vm->is_compute_context);
1675 if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
1678 if (adev->family == AMDGPU_FAMILY_AI ||
1679 adev->family == AMDGPU_FAMILY_RV)
1683 r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
1692 * amdgpu_vm_bo_add - add a bo to a specific vm
1716 amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
1718 bo_va->ref_count = 1;
1719 bo_va->last_pt_update = dma_fence_get_stub();
1720 INIT_LIST_HEAD(&bo_va->valids);
1721 INIT_LIST_HEAD(&bo_va->invalids);
1726 dma_resv_assert_held(bo->tbo.base.resv);
1728 bo_va->is_xgmi = true;
1738 * amdgpu_vm_bo_insert_map - insert a new mapping
1750 struct amdgpu_vm *vm = bo_va->base.vm;
1751 struct amdgpu_bo *bo = bo_va->base.bo;
1753 mapping->bo_va = bo_va;
1754 list_add(&mapping->list, &bo_va->invalids);
1755 amdgpu_vm_it_insert(mapping, &vm->va);
1757 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
1760 if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
1761 amdgpu_vm_bo_moved(&bo_va->base);
1778 return -EINVAL;
1783 return -EINVAL;
1787 return -EINVAL;
1790 lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
1791 if (lpfn >= adev->vm_manager.max_pfn)
1792 return -EINVAL;
1798 * amdgpu_vm_bo_map - map bo inside a vm
1820 struct amdgpu_bo *bo = bo_va->base.bo;
1821 struct amdgpu_vm *vm = bo_va->base.vm;
1830 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1832 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
1835 dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1836 "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
1837 tmp->start, tmp->last + 1);
1838 return -EINVAL;
1843 return -ENOMEM;
1845 mapping->start = saddr;
1846 mapping->last = eaddr;
1847 mapping->offset = offset;
1848 mapping->flags = flags;
1856 * amdgpu_vm_bo_replace_map - map bo inside a vm, replacing existing mappings
1879 struct amdgpu_bo *bo = bo_va->base.bo;
1890 return -ENOMEM;
1892 r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
1899 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1901 mapping->start = saddr;
1902 mapping->last = eaddr;
1903 mapping->offset = offset;
1904 mapping->flags = flags;
1912 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1930 struct amdgpu_vm *vm = bo_va->base.vm;
1935 list_for_each_entry(mapping, &bo_va->valids, list) {
1936 if (mapping->start == saddr)
1940 if (&mapping->list == &bo_va->valids) {
1943 list_for_each_entry(mapping, &bo_va->invalids, list) {
1944 if (mapping->start == saddr)
1948 if (&mapping->list == &bo_va->invalids)
1949 return -ENOENT;
1952 list_del(&mapping->list);
1953 amdgpu_vm_it_remove(mapping, &vm->va);
1954 mapping->bo_va = NULL;
1958 list_add(&mapping->list, &vm->freed);
1961 bo_va->last_pt_update);
1967 * amdgpu_vm_bo_clear_mappings - remove all mappings in a specific range
1993 eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
1998 return -ENOMEM;
1999 INIT_LIST_HEAD(&before->list);
2004 return -ENOMEM;
2006 INIT_LIST_HEAD(&after->list);
2009 tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
2012 if (tmp->start < saddr) {
2013 before->start = tmp->start;
2014 before->last = saddr - 1;
2015 before->offset = tmp->offset;
2016 before->flags = tmp->flags;
2017 before->bo_va = tmp->bo_va;
2018 list_add(&before->list, &tmp->bo_va->invalids);
2022 if (tmp->last > eaddr) {
2023 after->start = eaddr + 1;
2024 after->last = tmp->last;
2025 after->offset = tmp->offset;
2026 after->offset += (after->start - tmp->start) << PAGE_SHIFT;
2027 after->flags = tmp->flags;
2028 after->bo_va = tmp->bo_va;
2029 list_add(&after->list, &tmp->bo_va->invalids);
2032 list_del(&tmp->list);
2033 list_add(&tmp->list, &removed);
2040 amdgpu_vm_it_remove(tmp, &vm->va);
2041 list_del(&tmp->list);
2043 if (tmp->start < saddr)
2044 tmp->start = saddr;
2045 if (tmp->last > eaddr)
2046 tmp->last = eaddr;
2048 tmp->bo_va = NULL;
2049 list_add(&tmp->list, &vm->freed);
2054 if (!list_empty(&before->list)) {
2055 struct amdgpu_bo *bo = before->bo_va->base.bo;
2057 amdgpu_vm_it_insert(before, &vm->va);
2058 if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
2062 !before->bo_va->base.moved)
2063 amdgpu_vm_bo_moved(&before->bo_va->base);
2069 if (!list_empty(&after->list)) {
2070 struct amdgpu_bo *bo = after->bo_va->base.bo;
2072 amdgpu_vm_it_insert(after, &vm->va);
2073 if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
2077 !after->bo_va->base.moved)
2078 amdgpu_vm_bo_moved(&after->bo_va->base);
2087 * amdgpu_vm_bo_lookup_mapping - find mapping by address
2101 return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
2105 * amdgpu_vm_bo_trace_cs - trace all reserved mappings
2119 for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
2121 if (mapping->bo_va && mapping->bo_va->base.bo) {
2124 bo = mapping->bo_va->base.bo;
2125 if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
2135 * amdgpu_vm_bo_del - remove a bo from a specific vm
2140 * Remove @bo_va->bo from the requested vm.
2148 struct amdgpu_bo *bo = bo_va->base.bo;
2149 struct amdgpu_vm *vm = bo_va->base.vm;
2152 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
2155 dma_resv_assert_held(bo->tbo.base.resv);
2157 ttm_bo_set_bulk_move(&bo->tbo, NULL);
2159 for (base = &bo_va->base.bo->vm_bo; *base;
2160 base = &(*base)->next) {
2161 if (*base != &bo_va->base)
2164 amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
2165 *base = bo_va->base.next;
2170 spin_lock(&vm->status_lock);
2171 list_del(&bo_va->base.vm_status);
2172 spin_unlock(&vm->status_lock);
2174 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
2175 list_del(&mapping->list);
2176 amdgpu_vm_it_remove(mapping, &vm->va);
2177 mapping->bo_va = NULL;
2179 list_add(&mapping->list, &vm->freed);
2181 list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
2182 list_del(&mapping->list);
2183 amdgpu_vm_it_remove(mapping, &vm->va);
2185 bo_va->last_pt_update);
2188 dma_fence_put(bo_va->last_pt_update);
2190 if (bo && bo_va->is_xgmi)
2197 * amdgpu_vm_evictable - check if we can evict a VM
2205 struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
2208 if (!bo_base || !bo_base->vm)
2212 if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
2216 if (!amdgpu_vm_eviction_trylock(bo_base->vm))
2220 if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
2221 amdgpu_vm_eviction_unlock(bo_base->vm);
2225 bo_base->vm->evicting = true;
2226 amdgpu_vm_eviction_unlock(bo_base->vm);
2231 * amdgpu_vm_bo_invalidate - mark the bo as invalid
2242 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2243 struct amdgpu_vm *vm = bo_base->vm;
2250 if (bo_base->moved)
2252 bo_base->moved = true;
2254 if (bo->tbo.type == ttm_bo_type_kernel)
2264 * amdgpu_vm_bo_move - handle BO move
2277 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
2278 struct amdgpu_vm *vm = bo_base->vm;
2280 spin_lock(&vm->status_lock);
2281 amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
2283 spin_unlock(&vm->status_lock);
2290 * amdgpu_vm_get_block_size - calculate VM page table size as power of two
2305 return (bits - 9);
2311 * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
2324 unsigned int max_size = 1 << (max_bits - 30);
2329 if (amdgpu_vm_size != -1) {
2332 dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
2344 * - Need to map system memory and VRAM from all GPUs
2345 * - VRAM from other GPUs not known here
2346 * - Assume VRAM <= system memory
2347 * - On GFX8 and older, VM space can be segmented for
2349 * - Need to allow room for fragmentation, guard pages etc.
2357 (1 << 30) - 1) >> 30;
2362 adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
2364 tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
2365 if (amdgpu_vm_block_size != -1)
2366 tmp >>= amdgpu_vm_block_size - 9;
2367 tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
2368 adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
2369 switch (adev->vm_manager.num_level) {
2371 adev->vm_manager.root_level = AMDGPU_VM_PDB2;
2374 adev->vm_manager.root_level = AMDGPU_VM_PDB1;
2377 adev->vm_manager.root_level = AMDGPU_VM_PDB0;
2380 dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
2383 if (amdgpu_vm_block_size != -1)
2384 adev->vm_manager.block_size =
2386 - AMDGPU_GPU_PAGE_SHIFT
2387 - 9 * adev->vm_manager.num_level);
2388 else if (adev->vm_manager.num_level > 1)
2389 adev->vm_manager.block_size = 9;
2391 adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
2393 if (amdgpu_vm_fragment_size == -1)
2394 adev->vm_manager.fragment_size = fragment_size_default;
2396 adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
2398 DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
2399 vm_size, adev->vm_manager.num_level + 1,
2400 adev->vm_manager.block_size,
2401 adev->vm_manager.fragment_size);
2405 * amdgpu_vm_wait_idle - wait for the VM to become idle
2412 timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
2418 return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
2434 xa_lock_irqsave(&adev->vm_manager.pasids, flags);
2435 vm = xa_load(&adev->vm_manager.pasids, pasid);
2436 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
2442 * amdgpu_vm_put_task_info - reference down the vm task_info ptr
2450 kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
2454 * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
2467 ti = vm->task_info;
2468 kref_get(&vm->task_info->refcount);
2475 * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
2492 vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
2493 if (!vm->task_info)
2494 return -ENOMEM;
2496 kref_init(&vm->task_info->refcount);
2501 * amdgpu_vm_set_task_info - Sets VMs task info.
2507 if (!vm->task_info)
2510 if (vm->task_info->pid == current->pid)
2513 vm->task_info->pid = current->pid;
2514 get_task_comm(vm->task_info->task_name, current);
2516 if (current->group_leader->mm != current->mm)
2519 vm->task_info->tgid = current->group_leader->pid;
2520 get_task_comm(vm->task_info->process_name, current->group_leader);
2524 * amdgpu_vm_init - initialize a vm instance
2542 vm->va = RB_ROOT_CACHED;
2544 vm->reserved_vmid[i] = NULL;
2545 INIT_LIST_HEAD(&vm->evicted);
2546 INIT_LIST_HEAD(&vm->evicted_user);
2547 INIT_LIST_HEAD(&vm->relocated);
2548 INIT_LIST_HEAD(&vm->moved);
2549 INIT_LIST_HEAD(&vm->idle);
2550 INIT_LIST_HEAD(&vm->invalidated);
2551 spin_lock_init(&vm->status_lock);
2552 INIT_LIST_HEAD(&vm->freed);
2553 INIT_LIST_HEAD(&vm->done);
2554 INIT_KFIFO(vm->faults);
2560 ttm_lru_bulk_move_init(&vm->lru_bulk_move);
2562 vm->is_compute_context = false;
2564 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2568 vm->use_cpu_for_update ? "CPU" : "SDMA");
2569 WARN_ONCE((vm->use_cpu_for_update &&
2570 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2573 if (vm->use_cpu_for_update)
2574 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2576 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2578 vm->last_update = dma_fence_get_stub();
2579 vm->last_unlocked = dma_fence_get_stub();
2580 vm->last_tlb_flush = dma_fence_get_stub();
2581 vm->generation = amdgpu_vm_generation(adev, NULL);
2583 mutex_init(&vm->eviction_lock);
2584 vm->evicting = false;
2585 vm->tlb_fence_context = dma_fence_context_alloc(1);
2587 r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
2592 root_bo = amdgpu_bo_ref(&root->bo);
2599 amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
2600 r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
2612 amdgpu_bo_unreserve(vm->root.bo);
2619 amdgpu_bo_unreserve(vm->root.bo);
2623 dma_fence_put(vm->last_tlb_flush);
2624 dma_fence_put(vm->last_unlocked);
2625 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2632 * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
2641 * - use_cpu_for_update
2642 * - pte_supports_ats
2648 * 0 for success, -errno for errors.
2654 r = amdgpu_bo_reserve(vm->root.bo, true);
2659 vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
2662 vm->use_cpu_for_update ? "CPU" : "SDMA");
2663 WARN_ONCE((vm->use_cpu_for_update &&
2664 !amdgpu_gmc_vram_full_visible(&adev->gmc)),
2667 if (vm->use_cpu_for_update) {
2669 r = amdgpu_bo_sync_wait(vm->root.bo,
2674 vm->update_funcs = &amdgpu_vm_cpu_funcs;
2680 vm->update_funcs = &amdgpu_vm_sdma_funcs;
2683 dma_fence_put(vm->last_update);
2684 vm->last_update = dma_fence_get_stub();
2685 vm->is_compute_context = true;
2688 amdgpu_bo_unreserve(vm->root.bo);
2695 if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
2696 vm->stats[i].evicted == 0))
2703 * amdgpu_vm_fini - tear down a vm instance
2714 bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
2721 root = amdgpu_bo_ref(vm->root.bo);
2724 dma_fence_wait(vm->last_unlocked, false);
2725 dma_fence_put(vm->last_unlocked);
2726 dma_fence_wait(vm->last_tlb_flush, false);
2728 spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
2729 spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
2730 dma_fence_put(vm->last_tlb_flush);
2732 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
2733 if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
2738 list_del(&mapping->list);
2745 WARN_ON(vm->root.bo);
2749 if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
2750 dev_err(adev->dev, "still active bo inside vm\n");
2753 &vm->va.rb_root, rb) {
2757 list_del(&mapping->list);
2761 dma_fence_put(vm->last_update);
2764 if (vm->reserved_vmid[i]) {
2766 vm->reserved_vmid[i] = false;
2770 ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
2773 struct amdgpu_task_info *ti = vm->task_info;
2775 dev_warn(adev->dev,
2776 "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
2777 ti->process_name, ti->pid, ti->task_name, ti->tgid);
2780 amdgpu_vm_put_task_info(vm->task_info);
2784 * amdgpu_vm_manager_init - init the VM manager
2797 adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
2798 adev->asic_type == CHIP_NAVI10 ||
2799 adev->asic_type == CHIP_NAVI14);
2802 adev->vm_manager.fence_context =
2805 adev->vm_manager.seqno[i] = 0;
2807 spin_lock_init(&adev->vm_manager.prt_lock);
2808 atomic_set(&adev->vm_manager.num_prt_users, 0);
2811 * Compute VM tables will be updated by CPU
2814 if (amdgpu_vm_update_mode == -1) {
2818 if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
2820 adev->vm_manager.vm_update_mode =
2823 adev->vm_manager.vm_update_mode = 0;
2825 adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
2827 adev->vm_manager.vm_update_mode = 0;
2830 xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
2834 * amdgpu_vm_manager_fini - cleanup VM manager
2842 WARN_ON(!xa_empty(&adev->vm_manager.pasids));
2843 xa_destroy(&adev->vm_manager.pasids);
2849 * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
2856 * 0 for success, -errno for errors.
2862 struct amdgpu_fpriv *fpriv = filp->driver_priv;
2865 if (args->in.flags)
2866 return -EINVAL;
2868 switch (args->in.op) {
2871 if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2873 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
2878 if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
2880 fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
2884 return -EINVAL;
2891 * amdgpu_vm_handle_fault - graceful handling of VM faults.
2915 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2916 vm = xa_load(&adev->vm_manager.pasids, pasid);
2918 root = amdgpu_bo_ref(vm->root.bo);
2919 is_compute_context = vm->is_compute_context;
2923 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2941 xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
2942 vm = xa_load(&adev->vm_manager.pasids, pasid);
2943 if (vm && vm->root.bo != root)
2945 xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
2954 * combination to force a no-retry-fault
2960 value = adev->dummy_page_addr;
2969 r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
2995 * amdgpu_debugfs_vm_bo_info - print BO info for the VM
3019 spin_lock(&vm->status_lock);
3021 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
3022 if (!bo_va->base.bo)
3024 total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3030 list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
3031 if (!bo_va->base.bo)
3033 total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3039 list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
3040 if (!bo_va->base.bo)
3042 total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3048 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
3049 if (!bo_va->base.bo)
3051 total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3057 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
3058 if (!bo_va->base.bo)
3060 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3066 list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
3067 if (!bo_va->base.bo)
3069 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
3071 spin_unlock(&vm->status_lock);
3090 * amdgpu_vm_update_fault_cache - update cached fault into.
3108 xa_lock_irqsave(&adev->vm_manager.pasids, flags);
3110 vm = xa_load(&adev->vm_manager.pasids, pasid);
3114 * only update if status is non-0.
3117 vm->fault_info.addr = addr;
3118 vm->fault_info.status = status;
3123 adev->vm_manager.fault_info.addr = addr;
3124 adev->vm_manager.fault_info.vmhub = vmhub;
3125 adev->vm_manager.fault_info.status = status;
3128 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
3129 vm->fault_info.vmhub |=
3130 (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
3132 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
3133 vm->fault_info.vmhub |=
3134 (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
3136 vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
3137 vm->fault_info.vmhub |=
3138 (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
3143 xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
3147 * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
3157 return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;