Lines Matching full:job

32  * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
46 * Note that once a job was taken from the entities queue and pushed to the
61 * Once a job is executed (but not yet finished), the job's credits contribute
62 * to the scheduler's credit count until the job is finished. If by executing
63 * one more job the scheduler's credit count would exceed the scheduler's
64 * credit limit, the job won't be executed. Instead, the scheduler will wait
112 * Return true if we can push at least one more job from @entity, false
124 /* If a job exceeds the credit limit, truncate it to the credit limit in drm_sched_can_queue()
244 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
253 * its job; return NULL, if no ready entity was found.
301 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
310 * its job; return NULL, if no ready entity was found.
342 * drm_sched_run_job_queue - enqueue run-job work
352 * drm_sched_run_free_queue - enqueue free-job work
362 * drm_sched_job_done - complete a job
363 * @s_job: pointer to the job which is done
365 * Finish the job's fence and resubmit the work items.
384 * drm_sched_job_done_cb - the callback for a done job
419 * drm_sched_tdr_queue_imm: - immediately start job timeout handler
449 * drm_sched_suspend_timeout - Suspend scheduler job timeout
479 * drm_sched_resume_timeout - Resume scheduler job timeout
511 * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout
513 * @job: job to be reinserted on the pending list
516 * hung and is making progress, the scheduler must reinsert the job back into
517 * @sched->pending_list. Otherwise, the job and its resources won't be freed
523 struct drm_sched_job *job) in drm_sched_job_reinsert_on_false_timeout() argument
526 list_add(&job->list, &sched->pending_list); in drm_sched_job_reinsert_on_false_timeout()
528 /* After reinserting the job, the scheduler enqueues the free-job work in drm_sched_job_reinsert_on_false_timeout()
529 * again if ready. Otherwise, a signaled job could be added to the in drm_sched_job_reinsert_on_false_timeout()
539 struct drm_sched_job *job; in drm_sched_job_timedout() local
546 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_job_timedout()
549 if (job) { in drm_sched_job_timedout()
551 * Remove the bad job so it cannot be freed by a concurrent in drm_sched_job_timedout()
556 list_del_init(&job->list); in drm_sched_job_timedout()
559 status = job->sched->ops->timedout_job(job); in drm_sched_job_timedout()
562 * Guilty job did complete and hence needs to be manually removed in drm_sched_job_timedout()
566 job->sched->ops->free_job(job); in drm_sched_job_timedout()
571 drm_sched_job_reinsert_on_false_timeout(sched, job); in drm_sched_job_timedout()
584 * @bad: job which caused the time out
587 * Note: bad job will not be freed as it might be used later and so it's
606 * Reinsert back the bad job here - now it's safe as in drm_sched_stop()
608 * bad job at this point - we parked (waited for) any in progress in drm_sched_stop()
615 * job extracted. in drm_sched_stop()
620 * Iterate the job list from later to earlier one and either deactive in drm_sched_stop()
636 * remove job from pending_list. in drm_sched_stop()
644 * Wait for job's HW fence callback to finish using s_job in drm_sched_stop()
647 * Job is still alive so fence refcount at least 1 in drm_sched_stop()
652 * We must keep bad job alive for later use during in drm_sched_stop()
654 * that the guilty job must be released. in drm_sched_stop()
723 * recovery after a job timeout.
771 * drm_sched_job_init - init a scheduler job
772 * @job: scheduler job to init
774 * @credits: the number of credits this job contributes to the schedulers
776 * @owner: job owner for debugging
784 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
797 int drm_sched_job_init(struct drm_sched_job *job, in drm_sched_job_init() argument
807 dev_err(job->sched->dev, "%s: entity has no rq!\n", __func__); in drm_sched_job_init()
822 memset(job, 0, sizeof(*job)); in drm_sched_job_init()
824 job->entity = entity; in drm_sched_job_init()
825 job->credits = credits; in drm_sched_job_init()
826 job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id); in drm_sched_job_init()
827 if (!job->s_fence) in drm_sched_job_init()
830 INIT_LIST_HEAD(&job->list); in drm_sched_job_init()
832 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC); in drm_sched_job_init()
839 * drm_sched_job_arm - arm a scheduler job for execution
840 * @job: scheduler job to arm
842 * This arms a scheduler job for execution. Specifically it initializes the
843 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
844 * or other places that need to track the completion of this job. It also
850 * Once this function was called, you *must* submit @job with
855 void drm_sched_job_arm(struct drm_sched_job *job) in drm_sched_job_arm() argument
858 struct drm_sched_entity *entity = job->entity; in drm_sched_job_arm()
864 job->sched = sched; in drm_sched_job_arm()
865 job->s_priority = entity->priority; in drm_sched_job_arm()
867 drm_sched_fence_init(job->s_fence, job->entity); in drm_sched_job_arm()
872 * drm_sched_job_add_dependency - adds the fence as a job dependency
873 * @job: scheduler job to add the dependencies to
881 int drm_sched_job_add_dependency(struct drm_sched_job *job, in drm_sched_job_add_dependency() argument
896 xa_for_each(&job->dependencies, index, entry) { in drm_sched_job_add_dependency()
902 xa_store(&job->dependencies, index, fence, GFP_KERNEL); in drm_sched_job_add_dependency()
909 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL); in drm_sched_job_add_dependency()
918 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
919 * @job: scheduler job to add the dependencies to
924 * This adds the fence matching the given syncobj to @job.
929 int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job, in drm_sched_job_add_syncobj_dependency() argument
941 return drm_sched_job_add_dependency(job, fence); in drm_sched_job_add_syncobj_dependency()
946 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
947 * @job: scheduler job to add the dependencies to
951 * This adds all fences matching the given usage from @resv to @job.
957 int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job, in drm_sched_job_add_resv_dependencies() argument
973 ret = drm_sched_job_add_dependency(job, dma_fence_get(fence)); in drm_sched_job_add_resv_dependencies()
982 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
984 * @job: scheduler job to add the dependencies to
986 * @write: whether the job might write the object (so we need to depend on
990 * GEM objects used in the job but before updating the reservations with your
996 int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, in drm_sched_job_add_implicit_dependencies() argument
1000 return drm_sched_job_add_resv_dependencies(job, obj->resv, in drm_sched_job_add_implicit_dependencies()
1006 * drm_sched_job_has_dependency - check whether fence is the job's dependency
1007 * @job: scheduler job to check
1011 * True if @fence is found within the job's dependencies, or otherwise false.
1013 bool drm_sched_job_has_dependency(struct drm_sched_job *job, in drm_sched_job_has_dependency() argument
1019 xa_for_each(&job->dependencies, index, f) { in drm_sched_job_has_dependency()
1029 * drm_sched_job_cleanup - clean up scheduler job resources
1030 * @job: scheduler job to clean up
1034 * Drivers should call this from their error unwind code if @job is aborted
1044 void drm_sched_job_cleanup(struct drm_sched_job *job) in drm_sched_job_cleanup() argument
1049 if (kref_read(&job->s_fence->finished.refcount)) { in drm_sched_job_cleanup()
1050 /* The job has been processed by the scheduler, i.e., in drm_sched_job_cleanup()
1054 dma_fence_put(&job->s_fence->finished); in drm_sched_job_cleanup()
1056 /* The job was aborted before it has been committed to be run; in drm_sched_job_cleanup()
1059 drm_sched_fence_free(job->s_fence); in drm_sched_job_cleanup()
1062 job->s_fence = NULL; in drm_sched_job_cleanup()
1064 xa_for_each(&job->dependencies, index, fence) { in drm_sched_job_cleanup()
1067 xa_destroy(&job->dependencies); in drm_sched_job_cleanup()
1114 * drm_sched_get_finished_job - fetch the next finished job to be destroyed
1122 * Returns the next finished job from the pending list (if there is one)
1128 struct drm_sched_job *job, *next; in drm_sched_get_finished_job() local
1132 job = list_first_entry_or_null(&sched->pending_list, in drm_sched_get_finished_job()
1134 if (job && dma_fence_is_signaled(&job->s_fence->finished)) { in drm_sched_get_finished_job()
1135 /* remove job from pending_list */ in drm_sched_get_finished_job()
1136 list_del_init(&job->list); in drm_sched_get_finished_job()
1138 /* cancel this job's TO timer */ in drm_sched_get_finished_job()
1149 dma_fence_timestamp(&job->s_fence->finished); in drm_sched_get_finished_job()
1153 /* start TO timer for next job */ in drm_sched_get_finished_job()
1157 job = NULL; in drm_sched_get_finished_job()
1162 return job; in drm_sched_get_finished_job()
1204 * @w: free job work
1210 struct drm_sched_job *job; in drm_sched_free_job_work() local
1213 job = drm_sched_get_finished_job(sched, &have_more); in drm_sched_free_job_work()
1214 if (job) { in drm_sched_free_job_work()
1215 sched->ops->free_job(job); in drm_sched_free_job_work()
1226 * @w: run job work
1238 /* Find entity with a ready job */ in drm_sched_run_job_work()
1242 * Either no more work to do, or the next ready job needs more in drm_sched_run_job_work()
1394 struct drm_sched_job *job, *tmp; in drm_sched_cancel_remaining_jobs() local
1397 list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) { in drm_sched_cancel_remaining_jobs()
1398 sched->ops->cancel_job(job); in drm_sched_cancel_remaining_jobs()
1399 list_del(&job->list); in drm_sched_cancel_remaining_jobs()
1400 sched->ops->free_job(job); in drm_sched_cancel_remaining_jobs()
1486 * @bad: The job guilty of time out
1488 * Increment on every hang caused by the 'bad' job. If this exceeds the hang