Lines Matching full:vm
45 * We do not allow a user to trigger a bind at exec time rather we have a VM
47 * sense, a VM bind is basically the same operation as an exec from the user
48 * perspective. e.g. If an exec depends on a VM bind use the in / out fence
53 * the VM that have been invalidated since the last exec, likewise we also have
55 * behind any pending kernel operations on any external BOs in VM or any BOs
56 * private to the VM. This is accomplished by the rebinds waiting on BOs
77 * Wait for any async VM bind passed as in-fences to start
79 * Lock global VM lock in read mode |
81 * Lock exec (VM dma-resv lock, external BOs dma-resv locks) |
86 * Add job VM dma-resv bookkeeping slot (non-compute mode) |
100 struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm); in xe_exec_fn() local
104 xe_vm_set_validation_exec(vm, &vm_exec->exec); in xe_exec_fn()
105 ret = xe_vm_validate_rebind(vm, &vm_exec->exec, 1); in xe_exec_fn()
106 xe_vm_set_validation_exec(vm, NULL); in xe_exec_fn()
125 struct xe_vm *vm; in xe_exec_ioctl() local
164 vm = q->vm; in xe_exec_ioctl()
170 (xe_vm_in_lr_mode(vm) ? in xe_exec_ioctl()
203 if (!xe_vm_in_lr_mode(vm) && xe_vm_userptr_check_repin(vm)) { in xe_exec_ioctl()
204 err = down_write_killable(&vm->lock); in xe_exec_ioctl()
207 /* We don't allow execs while the VM is in error state */ in xe_exec_ioctl()
208 err = down_read_interruptible(&vm->lock); in xe_exec_ioctl()
215 err = xe_vm_userptr_pin(vm); in xe_exec_ioctl()
216 downgrade_write(&vm->lock); in xe_exec_ioctl()
223 err = xe_vm_lock(vm, true); in xe_exec_ioctl()
227 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
230 fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm); in xe_exec_ioctl()
233 xe_vm_unlock(vm); in xe_exec_ioctl()
238 xe_exec_queue_last_fence_set(q, vm, fence); in xe_exec_ioctl()
242 xe_vm_unlock(vm); in xe_exec_ioctl()
247 * It's OK to block interruptible here with the vm lock held, since in xe_exec_ioctl()
255 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
256 vm_exec.vm = &vm->gpuvm; in xe_exec_ioctl()
263 if (xe_vm_is_closed_or_banned(q->vm)) { in xe_exec_ioctl()
264 drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n"); in xe_exec_ioctl()
276 err = xe_vm_validate_protected(q->vm); in xe_exec_ioctl()
289 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
291 xe_vm_resv(vm), in xe_exec_ioctl()
302 if (!xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
303 err = xe_sched_job_last_fence_add_dep(job, vm); in xe_exec_ioctl()
307 err = xe_svm_notifier_lock_interruptible(vm); in xe_exec_ioctl()
311 err = __xe_vm_userptr_needs_repin(vm); in xe_exec_ioctl()
321 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
322 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, in xe_exec_ioctl()
333 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
334 xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); in xe_exec_ioctl()
336 xe_vm_reactivate_rebind(vm); in xe_exec_ioctl()
338 if (!err && !xe_vm_in_lr_mode(vm)) { in xe_exec_ioctl()
340 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in xe_exec_ioctl()
348 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
349 xe_svm_notifier_unlock(vm); in xe_exec_ioctl()
354 if (!xe_vm_in_lr_mode(vm)) in xe_exec_ioctl()
357 up_read(&vm->lock); in xe_exec_ioctl()