Lines Matching full:vm
45 * We do not allow a user to trigger a bind at exec time rather we have a VM
47 * sense, a VM bind is basically the same operation as an exec from the user
48 * perspective. e.g. If an exec depends on a VM bind use the in / out fence
53 * the VM that have been invalidated since the last exec, likewise we also have
55 * behind any pending kernel operations on any external BOs in VM or any BOs
56 * private to the VM. This is accomplished by the rebinds waiting on BOs
77 * Wait for any async VM bind passed as in-fences to start
79 * Lock global VM lock in read mode |
81 * Lock exec (VM dma-resv lock, external BOs dma-resv locks) |
86 * Add job VM dma-resv bookkeeping slot (non-compute mode) |
100 struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm); in xe_exec_fn() local
104 xe_vm_set_validation_exec(vm, &vm_exec->exec); in xe_exec_fn()
105 ret = xe_vm_validate_rebind(vm, &vm_exec->exec, 1); in xe_exec_fn()
106 xe_vm_set_validation_exec(vm, NULL); in xe_exec_fn()
125 struct xe_vm *vm; in xe_exec_ioctl() local
164 vm = q->vm; in xe_exec_ioctl()
169 (xe_vm_in_lr_mode(vm) ? in xe_exec_ioctl()
202 if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) { in xe_exec_ioctl()
203 err = down_write_killable(&vm->lock); in xe_exec_ioctl()
206 /* We don't allow execs while the VM is in error state */ in xe_exec_ioctl()
207 err = down_read_interruptible(&vm->lock); in xe_exec_ioctl()
214 err = xe_vm_userptr_pin(vm); in xe_exec_ioctl()
215 downgrade_write(&vm->lock); in xe_exec_ioctl()
222 err = xe_vm_lock(vm, true); in xe_exec_ioctl()
226 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
229 fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); in xe_exec_ioctl()
232 xe_vm_unlock(vm); in xe_exec_ioctl()
237 xe_exec_queue_last_fence_set(q, vm, fence); in xe_exec_ioctl()
241 xe_vm_unlock(vm); in xe_exec_ioctl()
246 * It's OK to block interruptible here with the vm lock held, since in xe_exec_ioctl()
254 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
255 vm_exec.vm = &vm->gpuvm; in xe_exec_ioctl()
262 if (xe_vm_is_closed_or_banned(q->vm)) { in xe_exec_ioctl()
263 drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n"); in xe_exec_ioctl()
275 err = xe_vm_validate_protected(q->vm); in xe_exec_ioctl()
288 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
290 xe_vm_resv(vm), in xe_exec_ioctl()
301 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
302 err = xe_sched_job_last_fence_add_dep(job, vm); in xe_exec_ioctl()
306 err = xe_svm_notifier_lock_interruptible(vm); in xe_exec_ioctl()
310 err = __xe_vm_userptr_needs_repin(vm); in xe_exec_ioctl()
320 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
321 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, in xe_exec_ioctl()
332 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
333 xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); in xe_exec_ioctl()
335 xe_vm_reactivate_rebind(vm); in xe_exec_ioctl()
337 if (!err && !xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
339 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in xe_exec_ioctl()
347 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
348 xe_svm_notifier_unlock(vm); in xe_exec_ioctl()
353 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
356 up_read(&vm->lock); in xe_exec_ioctl()