Lines Matching defs:job

68 static void job_free(struct xe_sched_job *job)
70 struct xe_exec_queue *q = job->q;
73 kmem_cache_free(xe_exec_queue_is_parallel(job->q) || is_migration ?
74 xe_sched_job_parallel_slab : xe_sched_job_slab, job);
77 static struct xe_device *job_to_xe(struct xe_sched_job *job)
79 return gt_to_xe(job->q->gt);
83 static void xe_sched_job_free_fences(struct xe_sched_job *job)
87 for (i = 0; i < job->q->width; ++i) {
88 struct xe_job_ptrs *ptrs = &job->ptrs[i];
100 struct xe_sched_job *job;
105 /* only a kernel context can submit a vm-less job */
108 job = job_alloc(xe_exec_queue_is_parallel(q) || is_migration);
109 if (!job)
112 job->q = q;
113 kref_init(&job->refcount);
114 xe_exec_queue_get(job->q);
116 err = drm_sched_job_init(&job->drm, q->entity, 1, NULL,
129 job->ptrs[i].lrc_fence = fence;
139 job->ptrs[i].chain_fence = chain;
147 job->ptrs[i].batch_addr = batch_addr[i];
149 xe_pm_runtime_get_noresume(job_to_xe(job));
150 trace_xe_sched_job_create(job);
151 return job;
154 xe_sched_job_free_fences(job);
155 drm_sched_job_cleanup(&job->drm);
158 job_free(job);
163 * xe_sched_job_destroy - Destroy XE schedule job
164 * @ref: reference to XE schedule job
166 * Called when ref == 0, drop a reference to job's xe_engine + fence, cleanup
167 * base DRM schedule job, and free memory for XE schedule job.
171 struct xe_sched_job *job =
173 struct xe_device *xe = job_to_xe(job);
174 struct xe_exec_queue *q = job->q;
176 xe_sched_job_free_fences(job);
177 dma_fence_put(job->fence);
178 drm_sched_job_cleanup(&job->drm);
179 job_free(job);
199 void xe_sched_job_set_error(struct xe_sched_job *job, int error)
201 if (xe_fence_set_error(job->fence, error))
204 if (dma_fence_is_chain(job->fence)) {
207 dma_fence_chain_for_each(iter, job->fence)
212 trace_xe_sched_job_set_error(job);
214 dma_fence_enable_sw_signaling(job->fence);
215 xe_hw_fence_irq_run(job->q->fence_irq);
218 bool xe_sched_job_started(struct xe_sched_job *job)
220 struct dma_fence *fence = dma_fence_chain_contained(job->fence);
221 struct xe_lrc *lrc = job->q->lrc[0];
224 xe_sched_job_lrc_seqno(job),
228 bool xe_sched_job_completed(struct xe_sched_job *job)
230 struct dma_fence *fence = dma_fence_chain_contained(job->fence);
231 struct xe_lrc *lrc = job->q->lrc[0];
239 xe_sched_job_lrc_seqno(job),
243 void xe_sched_job_arm(struct xe_sched_job *job)
245 struct xe_exec_queue *q = job->q;
263 job->ring_ops_flush_tlb = true;
270 fence = job->ptrs[i].lrc_fence;
272 job->ptrs[i].lrc_fence = NULL;
274 job->lrc_seqno = fence->seqno;
277 xe_assert(gt_to_xe(q->gt), job->lrc_seqno == fence->seqno);
280 chain = job->ptrs[i - 1].chain_fence;
282 job->ptrs[i - 1].chain_fence = NULL;
286 job->fence = dma_fence_get(fence); /* Pairs with put in scheduler */
287 drm_sched_job_arm(&job->drm);
290 void xe_sched_job_push(struct xe_sched_job *job)
292 xe_sched_job_get(job);
293 trace_xe_sched_job_exec(job);
294 drm_sched_entity_push_job(&job->drm);
295 xe_sched_job_put(job);
299 * xe_sched_job_last_fence_add_dep - Add last fence dependency to job
300 * @job:job to add the last fence dependency to
301 * @vm: virtual memory job belongs to
306 int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
310 fence = xe_exec_queue_last_fence_get(job->q, vm);
312 return drm_sched_job_add_dependency(&job->drm, fence);
316 * xe_sched_job_init_user_fence - Initialize user_fence for the job
317 * @job: job whose user_fence needs an init
320 void xe_sched_job_init_user_fence(struct xe_sched_job *job,
326 job->user_fence.used = true;
327 job->user_fence.addr = sync->addr;
328 job->user_fence.value = sync->timeline_value;
332 xe_sched_job_snapshot_capture(struct xe_sched_job *job)
334 struct xe_exec_queue *q = job->q;
347 xe_device_uncanonicalize_addr(xe, job->ptrs[i].batch_addr);
370 int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
373 return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);