/linux/drivers/gpu/drm/scheduler/ |
H A D | sched_main.c | 399 if (job && dma_fence_is_signaled(&job->s_fence->finished)) in drm_sched_run_free_queue() 412 struct drm_sched_fence *s_fence = s_job->s_fence; in drm_sched_job_done() local 413 struct drm_gpu_scheduler *sched = s_fence->sched; in drm_sched_job_done() 418 trace_drm_sched_process_job(s_fence); in drm_sched_job_done() 420 dma_fence_get(&s_fence->finished); in drm_sched_job_done() 421 drm_sched_fence_finished(s_fence, result); in drm_sched_job_done() 422 dma_fence_put(&s_fence->finished); in drm_sched_job_done() 633 if (s_job->s_fence->parent && in drm_sched_stop() 634 dma_fence_remove_callback(s_job->s_fence->parent, in drm_sched_stop() 636 dma_fence_put(s_job->s_fence->parent); in drm_sched_stop() [all …]
|
H A D | sched_fence.c | 49 static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence, in drm_sched_fence_set_parent() argument 57 smp_store_release(&s_fence->parent, dma_fence_get(fence)); in drm_sched_fence_set_parent() 59 &s_fence->finished.flags)) in drm_sched_fence_set_parent() 60 dma_fence_set_deadline(fence, s_fence->deadline); in drm_sched_fence_set_parent()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_job.c | 117 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout() 135 dma_fence_set_error(&s_job->s_fence->finished, -ETIME); in amdgpu_job_timedout() 261 if (job->base.s_fence && job->base.s_fence->finished.ops) in amdgpu_job_free_resources() 262 f = &job->base.s_fence->finished; in amdgpu_job_free_resources() 290 struct dma_fence *fence = &leader->base.s_fence->scheduled; in amdgpu_job_set_gang_leader() 310 if (job->gang_submit != &job->base.s_fence->scheduled) in amdgpu_job_free() 324 f = dma_fence_get(&job->base.s_fence->finished); in amdgpu_job_submit() 373 dma_fence_set_error(&job->base.s_fence->finished, r); in amdgpu_job_prepare_job() 386 finished = &job->base.s_fence->finished; in amdgpu_job_run() 429 struct drm_sched_fence *s_fence = s_job->s_fence; in amdgpu_job_stop_all_jobs_on_sched() local [all …]
|
H A D | amdgpu_sync.c | 68 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); in amdgpu_sync_same_dev() local 70 if (s_fence) { in amdgpu_sync_same_dev() 73 ring = container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_sync_same_dev() 89 struct drm_sched_fence *s_fence; in amdgpu_sync_get_owner() local 95 s_fence = to_drm_sched_fence(f); in amdgpu_sync_get_owner() 96 if (s_fence) in amdgpu_sync_get_owner() 97 return s_fence->owner; in amdgpu_sync_get_owner() 319 struct drm_sched_fence *s_fence = to_drm_sched_fence(f); in amdgpu_sync_peek_fence() local 325 if (ring && s_fence) { in amdgpu_sync_peek_fence() 329 if (s_fence->sched == &ring->sched) { in amdgpu_sync_peek_fence() [all …]
|
H A D | amdgpu_trace.h | 36 job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished) 182 __entry->context = job->base.s_fence->finished.context; 183 __entry->seqno = job->base.s_fence->finished.seqno; 207 __entry->context = job->base.s_fence->finished.context; 208 __entry->seqno = job->base.s_fence->finished.seqno;
|
H A D | amdgpu_ctx.c | 172 struct drm_sched_fence *s_fence; in amdgpu_ctx_fence_time() local 178 s_fence = to_drm_sched_fence(fence); in amdgpu_ctx_fence_time() 179 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags)) in amdgpu_ctx_fence_time() 183 if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags)) in amdgpu_ctx_fence_time() 184 return ktime_sub(ktime_get(), s_fence->scheduled.timestamp); in amdgpu_ctx_fence_time() 186 return ktime_sub(s_fence->finished.timestamp, in amdgpu_ctx_fence_time() 187 s_fence->scheduled.timestamp); in amdgpu_ctx_fence_time()
|
H A D | amdgpu_cs.c | 423 struct drm_sched_fence *s_fence; in amdgpu_cs_p2_dependencies() local 426 s_fence = to_drm_sched_fence(fence); in amdgpu_cs_p2_dependencies() 427 fence = dma_fence_get(&s_fence->scheduled); in amdgpu_cs_p2_dependencies() 1238 struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); in amdgpu_cs_sync_rings() local 1246 if (!s_fence || s_fence->sched != sched) { in amdgpu_cs_sync_rings() 1297 fence = &p->jobs[i]->base.s_fence->scheduled; in amdgpu_cs_submit() 1332 p->fence = dma_fence_get(&leader->base.s_fence->finished); in amdgpu_cs_submit() 1343 &p->jobs[i]->base.s_fence->finished, in amdgpu_cs_submit()
|
H A D | amdgpu_ids.c | 329 r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished); in amdgpu_vmid_grab_reserved() 390 &job->base.s_fence->finished); in amdgpu_vmid_grab_used() 442 &job->base.s_fence->finished); in amdgpu_vmid_grab()
|
H A D | amdgpu_ib.c | 153 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule() 154 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule()
|
H A D | amdgpu_fence.c | 722 if (!job->base.s_fence && !dma_fence_is_signaled(old)) in amdgpu_fence_driver_clear_job_fences()
|
/linux/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_sched.c | 25 if (likely(!sched_job->s_fence->finished.error)) in etnaviv_sched_run_job() 112 submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); in etnaviv_sched_push_job()
|
/linux/drivers/gpu/drm/imagination/ |
H A D | pvr_queue.c | 477 if (f == &job->base.s_fence->scheduled) in pvr_queue_get_paired_frag_job_dep() 625 &job->paired_job->base.s_fence->scheduled == fence) in pvr_queue_submit_job_to_cccb() 775 WARN_ON(job->base.s_fence->parent); in pvr_queue_start() 776 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_start() 827 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_timedout_job() 1120 return &job->base.s_fence->finished; in pvr_queue_job_arm() 1135 if (job->base.s_fence) in pvr_queue_job_cleanup() 1155 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); in pvr_queue_job_push()
|
H A D | pvr_sync.c | 231 struct drm_sched_fence *s_fence = to_drm_sched_fence(uf); in pvr_sync_add_dep_to_job() local 237 dma_fence_get(&s_fence->scheduled)); in pvr_sync_add_dep_to_job()
|
H A D | pvr_job.c | 593 dma_resv_add_fence(obj->resv, &job->base.s_fence->finished, usage); in update_job_resvs() 621 if (&geom_job->base.s_fence->scheduled == fence) in can_combine_jobs() 640 return dma_fence_get(&job->base.s_fence->scheduled); in get_last_queued_job_scheduled_fence()
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_gsc_submit.c | 208 fence = dma_fence_get(&job->drm.s_fence->finished); in xe_gsc_pkt_submit_kernel()
|
H A D | xe_gt.c | 182 fence = dma_fence_get(&job->drm.s_fence->finished); in emit_nop_job() 268 fence = dma_fence_get(&job->drm.s_fence->finished); in emit_wa_job()
|
H A D | xe_migrate.c | 909 fence = dma_fence_get(&job->drm.s_fence->finished); in xe_migrate_copy() 1161 fence = dma_fence_get(&job->drm.s_fence->finished); in xe_migrate_clear() 1477 fence = dma_fence_get(&job->drm.s_fence->finished); in __xe_migrate_update_pgtables()
|
H A D | xe_gsc.c | 93 fence = dma_fence_get(&job->drm.s_fence->finished); in emit_gsc_upload()
|
/linux/drivers/gpu/drm/lima/ |
H A D | lima_sched.c | 173 struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished); in lima_sched_context_queue_task() 211 if (job->s_fence->finished.error < 0) in lima_sched_run_job()
|
/linux/drivers/gpu/drm/panfrost/ |
H A D | panfrost_job.c | 306 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); in panfrost_job_push() 381 if (unlikely(job->base.s_fence->finished.error)) in panfrost_job_run()
|
/linux/drivers/gpu/drm/xe/tests/ |
H A D | xe_migrate.c | 54 fence = dma_fence_get(&job->drm.s_fence->finished); in run_sanity_job() 467 fence = dma_fence_get(&job->drm.s_fence->finished); in blt_copy()
|
/linux/drivers/gpu/drm/v3d/ |
H A D | v3d_sched.c | 185 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_bin_job_run() 240 if (unlikely(job->base.base.s_fence->finished.error)) in v3d_render_job_run()
|
/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_sched.c | 313 job->done_fence = dma_fence_get(&job->base.s_fence->finished); in nouveau_job_submit()
|
/linux/drivers/gpu/drm/msm/ |
H A D | msm_gem_submit.c | 836 submit->user_fence = dma_fence_get(&submit->base.s_fence->finished); in msm_ioctl_gem_submit()
|
/linux/drivers/gpu/drm/panthor/ |
H A D | panthor_sched.c | 2547 job->base.s_fence->parent = dma_fence_get(job->done_fence); in queue_start() 3337 if (job->base.s_fence) in job_release() 3458 panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished, in panthor_job_update_resvs()
|