Lines Matching +full:0 +full:- +full:job +full:- +full:ring
1 // SPDX-License-Identifier: MIT
31 * - Passing in a list BO which are read / written to creating implicit syncs
32 * - Binding at exec time
33 * - Flow controlling the ring at exec time
36 * passed into an exec, using the dma-buf implicit sync uAPI, have binds as
37 * separate operations, and using the DRM scheduler to flow control the ring.
61 * Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
64 * There is no need to flow control the ring in the exec as we write the ring at
65 * submission time and set the DRM scheduler max job limit SIZE_OF_RING /
67 * ring is available.
74 * .. code-block::
77 * Wait for any async VM bind passed as in-fences to start
78 * <----------------------------------------------------------------------|
81 * Lock exec (VM dma-resv lock, external BOs dma-resv locks) |
83 * Create job |
84 * Rebind invalidated userptrs + evicted BOs (non-compute-mode) |
85 * Add rebind fence dependency to job |
86 * Add job VM dma-resv bookkeeping slot (non-compute mode) |
87 * Add job to external BOs dma-resv write slots (non-compute mode) |
88 * Check if any userptrs invalidated since pin ------ Drop locks ---------|
89 * Install in / out fences for job
90 * Submit job
100 struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm); in xe_exec_fn()
103 /* The fence slot added here is intended for the exec sched job. */ in xe_exec_fn()
104 xe_vm_set_validation_exec(vm, &vm_exec->exec); in xe_exec_fn()
105 ret = xe_vm_validate_rebind(vm, &vm_exec->exec, 1); in xe_exec_fn()
115 struct drm_xe_sync __user *syncs_user = u64_to_user_ptr(args->syncs); in xe_exec_ioctl()
116 u64 __user *addresses_user = u64_to_user_ptr(args->address); in xe_exec_ioctl()
122 u32 i, num_syncs, num_ufence = 0; in xe_exec_ioctl()
124 struct xe_sched_job *job; in xe_exec_ioctl() local
127 int err = 0; in xe_exec_ioctl()
131 if (XE_IOCTL_DBG(xe, args->extensions) || in xe_exec_ioctl()
132 XE_IOCTL_DBG(xe, args->pad[0] || args->pad[1] || args->pad[2]) || in xe_exec_ioctl()
133 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_ioctl()
134 return -EINVAL; in xe_exec_ioctl()
136 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_ioctl()
138 return -ENOENT; in xe_exec_ioctl()
140 if (XE_IOCTL_DBG(xe, q->flags & EXEC_QUEUE_FLAG_VM)) { in xe_exec_ioctl()
141 err = -EINVAL; in xe_exec_ioctl()
145 if (XE_IOCTL_DBG(xe, args->num_batch_buffer && in xe_exec_ioctl()
146 q->width != args->num_batch_buffer)) { in xe_exec_ioctl()
147 err = -EINVAL; in xe_exec_ioctl()
151 if (XE_IOCTL_DBG(xe, q->ops->reset_status(q))) { in xe_exec_ioctl()
152 err = -ECANCELED; in xe_exec_ioctl()
156 if (args->num_syncs) { in xe_exec_ioctl()
157 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); in xe_exec_ioctl()
159 err = -ENOMEM; in xe_exec_ioctl()
164 vm = q->vm; in xe_exec_ioctl()
166 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { in xe_exec_ioctl()
170 SYNC_PARSE_FLAG_LR_MODE : 0)); in xe_exec_ioctl()
179 err = -EINVAL; in xe_exec_ioctl()
185 q->width); in xe_exec_ioctl()
187 err = -EFAULT; in xe_exec_ioctl()
192 group = q->hwe->hw_engine_group; in xe_exec_ioctl()
203 err = down_write_killable(&vm->lock); in xe_exec_ioctl()
207 err = down_read_interruptible(&vm->lock); in xe_exec_ioctl()
215 downgrade_write(&vm->lock); in xe_exec_ioctl()
221 if (!args->num_batch_buffer) { in xe_exec_ioctl()
235 for (i = 0; i < num_syncs; i++) in xe_exec_ioctl()
248 * return -ERESTARTSYS and the IOCTL will be rerun. in xe_exec_ioctl()
250 err = wait_for_completion_interruptible(&xe->pm_block); in xe_exec_ioctl()
255 vm_exec.vm = &vm->gpuvm; in xe_exec_ioctl()
257 err = xe_validation_exec_lock(&ctx, &vm_exec, &xe->val); in xe_exec_ioctl()
262 if (xe_vm_is_closed_or_banned(q->vm)) { in xe_exec_ioctl()
263 drm_warn(&xe->drm, "Trying to schedule after vm is closed or banned\n"); in xe_exec_ioctl()
264 err = -ECANCELED; in xe_exec_ioctl()
269 err = -EWOULDBLOCK; /* Aliased to -EAGAIN */ in xe_exec_ioctl()
275 err = xe_vm_validate_protected(q->vm); in xe_exec_ioctl()
280 job = xe_sched_job_create(q, xe_exec_queue_is_parallel(q) ? in xe_exec_ioctl()
281 addresses : &args->address); in xe_exec_ioctl()
282 if (IS_ERR(job)) { in xe_exec_ioctl()
283 err = PTR_ERR(job); in xe_exec_ioctl()
289 err = xe_sched_job_add_deps(job, in xe_exec_ioctl()
296 for (i = 0; i < num_syncs && !err; i++) in xe_exec_ioctl()
297 err = xe_sync_entry_add_deps(&syncs[i], job); in xe_exec_ioctl()
302 err = xe_sched_job_last_fence_add_dep(job, vm); in xe_exec_ioctl()
317 * the job and let the DRM scheduler / backend clean up the job. in xe_exec_ioctl()
319 xe_sched_job_arm(job); in xe_exec_ioctl()
321 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished, in xe_exec_ioctl()
325 for (i = 0; i < num_syncs; i++) { in xe_exec_ioctl()
326 xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished); in xe_exec_ioctl()
327 xe_sched_job_init_user_fence(job, &syncs[i]); in xe_exec_ioctl()
331 q->ring_ops->emit_job(job); in xe_exec_ioctl()
333 xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished); in xe_exec_ioctl()
334 xe_sched_job_push(job); in xe_exec_ioctl()
338 spin_lock(&xe->ttm.lru_lock); in xe_exec_ioctl()
339 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); in xe_exec_ioctl()
340 spin_unlock(&xe->ttm.lru_lock); in xe_exec_ioctl()
351 xe_sched_job_put(job); in xe_exec_ioctl()
356 up_read(&vm->lock); in xe_exec_ioctl()
357 if (err == -EAGAIN && !skip_retry) in xe_exec_ioctl()
363 while (num_syncs--) in xe_exec_ioctl()