Lines Matching +full:re +full:- +full:attached
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
40 sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0])); in get_xfer_ctx_state_size()
70 sizeof(((struct rogue_fwif_frag_ctx_state *)0)->frag_reg_isp_store[0])); in get_frag_ctx_state_size()
87 return -EINVAL; in get_ctx_state_size()
116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release()
125 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name()
148 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name()
150 return "geometry-cccb"; in pvr_queue_cccb_fence_get_timeline_name()
153 return "fragment-cccb"; in pvr_queue_cccb_fence_get_timeline_name()
156 return "compute-cccb"; in pvr_queue_cccb_fence_get_timeline_name()
159 return "transfer-cccb"; in pvr_queue_cccb_fence_get_timeline_name()
173 * to_pvr_queue_job_fence() - Return a pvr_queue_fence object if the fence is
178 * * A non-NULL pvr_queue_fence object if the dma_fence is backed by a UFO, or
187 f = sched_fence->parent; in to_pvr_queue_job_fence()
189 if (f && f->ops == &pvr_queue_job_fence_ops) in to_pvr_queue_job_fence()
202 * pvr_queue_fence_put() - Put wrapper for pvr_queue_fence objects.
215 if (WARN_ON(f->ops && in pvr_queue_fence_put()
216 f->ops != &pvr_queue_cccb_fence_ops && in pvr_queue_fence_put()
217 f->ops != &pvr_queue_job_fence_ops)) in pvr_queue_fence_put()
221 if (f->ops) in pvr_queue_fence_put()
228 * pvr_queue_fence_alloc() - Allocate a pvr_queue_fence fence object
248 return &fence->base; in pvr_queue_fence_alloc()
252 * pvr_queue_fence_init() - Initializes a pvr_queue_fence object.
269 pvr_context_get(queue->ctx); in pvr_queue_fence_init()
270 fence->queue = queue; in pvr_queue_fence_init()
271 dma_fence_init(&fence->base, fence_ops, in pvr_queue_fence_init()
272 &fence_ctx->lock, fence_ctx->id, in pvr_queue_fence_init()
273 atomic_inc_return(&fence_ctx->seqno)); in pvr_queue_fence_init()
277 * pvr_queue_cccb_fence_init() - Initializes a CCCB fence object.
290 &queue->cccb_fence_ctx.base); in pvr_queue_cccb_fence_init()
294 * pvr_queue_job_fence_init() - Initializes a job done fence object.
308 &queue->job_fence_ctx); in pvr_queue_job_fence_init()
312 * pvr_queue_fence_ctx_init() - Queue fence context initialization.
318 spin_lock_init(&fence_ctx->lock); in pvr_queue_fence_ctx_init()
319 fence_ctx->id = dma_fence_context_alloc(1); in pvr_queue_fence_ctx_init()
320 atomic_set(&fence_ctx->seqno, 0); in pvr_queue_fence_ctx_init()
325 /* We can pass at most ROGUE_FWIF_CCB_CMD_MAX_UFOS per UFO-related command. */ in ufo_cmds_size()
346 pvr_cccb_get_size_of_cmd_with_hdr(job->cmd_len); in job_cmds_size()
350 * job_count_remaining_native_deps() - Count the number of non-signaled native dependencies.
353 * Returns: Number of non-signaled native deps remaining.
361 xa_for_each(&job->base.dependencies, index, fence) { in job_count_remaining_native_deps()
368 if (!dma_fence_is_signaled(&jfence->base)) in job_count_remaining_native_deps()
376 * pvr_queue_get_job_cccb_fence() - Get the CCCB fence attached to a job.
396 if (!job->cccb_fence) in pvr_queue_get_job_cccb_fence()
399 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
403 if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_get_job_cccb_fence()
404 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_get_job_cccb_fence()
405 job->cccb_fence = NULL; in pvr_queue_get_job_cccb_fence()
409 /* There should be no job attached to the CCCB fence context: in pvr_queue_get_job_cccb_fence()
412 if (WARN_ON(queue->cccb_fence_ctx.job)) in pvr_queue_get_job_cccb_fence()
413 pvr_job_put(queue->cccb_fence_ctx.job); in pvr_queue_get_job_cccb_fence()
415 queue->cccb_fence_ctx.job = pvr_job_get(job); in pvr_queue_get_job_cccb_fence()
418 cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); in pvr_queue_get_job_cccb_fence()
419 if (!WARN_ON(cccb_fence->queue)) in pvr_queue_get_job_cccb_fence()
420 pvr_queue_cccb_fence_init(job->cccb_fence, queue); in pvr_queue_get_job_cccb_fence()
423 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
425 return dma_fence_get(job->cccb_fence); in pvr_queue_get_job_cccb_fence()
429 * pvr_queue_get_job_kccb_fence() - Get the KCCB fence attached to a job.
443 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_get_job_kccb_fence()
449 if (!job->kccb_fence) in pvr_queue_get_job_kccb_fence()
452 if (!WARN_ON(job->kccb_fence->ops)) { in pvr_queue_get_job_kccb_fence()
453 kccb_fence = pvr_kccb_reserve_slot(pvr_dev, job->kccb_fence); in pvr_queue_get_job_kccb_fence()
454 job->kccb_fence = NULL; in pvr_queue_get_job_kccb_fence()
463 struct pvr_job *frag_job = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ? in pvr_queue_get_paired_frag_job_dep()
464 job->paired_job : NULL; in pvr_queue_get_paired_frag_job_dep()
471 xa_for_each(&frag_job->base.dependencies, index, f) { in pvr_queue_get_paired_frag_job_dep()
477 if (f == &job->base.s_fence->scheduled) in pvr_queue_get_paired_frag_job_dep()
483 return frag_job->base.sched->ops->prepare_job(&frag_job->base, &queue->entity); in pvr_queue_get_paired_frag_job_dep()
487 * pvr_queue_prepare_job() - Return the next internal dependencies expressed as a dma_fence.
507 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { in pvr_queue_prepare_job()
514 if (job->paired_job->has_pm_ref) in pvr_queue_prepare_job()
522 pvr_queue_job_fence_init(job->done_fence, in pvr_queue_prepare_job()
523 job->ctx->queues.fragment); in pvr_queue_prepare_job()
525 pvr_queue_job_fence_init(job->done_fence, queue); in pvr_queue_prepare_job()
554 * pvr_queue_update_active_state_locked() - Update the queue active state.
562 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state_locked()
564 lockdep_assert_held(&pvr_dev->queues.lock); in pvr_queue_update_active_state_locked()
568 * to re-insert it behind our back. in pvr_queue_update_active_state_locked()
570 if (list_empty(&queue->node)) in pvr_queue_update_active_state_locked()
573 if (!atomic_read(&queue->in_flight_job_count)) in pvr_queue_update_active_state_locked()
574 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_update_active_state_locked()
576 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_update_active_state_locked()
580 * pvr_queue_update_active_state() - Update the queue active state.
594 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state()
596 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
598 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
603 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_submit_job_to_cccb()
605 struct pvr_cccb *cccb = &queue->cccb; in pvr_queue_submit_job_to_cccb()
615 atomic_inc(&queue->in_flight_job_count); in pvr_queue_submit_job_to_cccb()
618 xa_for_each(&job->base.dependencies, index, fence) { in pvr_queue_submit_job_to_cccb()
624 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job && in pvr_queue_submit_job_to_cccb()
625 &job->paired_job->base.s_fence->scheduled == fence) in pvr_queue_submit_job_to_cccb()
628 if (dma_fence_is_signaled(&jfence->base)) in pvr_queue_submit_job_to_cccb()
631 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
633 ufos[ufo_count++].value = jfence->base.seqno; in pvr_queue_submit_job_to_cccb()
643 if (job->type == DRM_PVR_JOB_TYPE_FRAGMENT && job->paired_job) { in pvr_queue_submit_job_to_cccb()
644 jfence = to_pvr_queue_job_fence(job->paired_job->done_fence); in pvr_queue_submit_job_to_cccb()
646 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
648 ufos[ufo_count++].value = job->paired_job->done_fence->seqno; in pvr_queue_submit_job_to_cccb()
657 if (job->type == DRM_PVR_JOB_TYPE_GEOMETRY && job->paired_job) { in pvr_queue_submit_job_to_cccb()
658 struct rogue_fwif_cmd_geom *cmd = job->cmd; in pvr_queue_submit_job_to_cccb()
663 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
664 &cmd->partial_render_geom_frag_fence.addr); in pvr_queue_submit_job_to_cccb()
665 cmd->partial_render_geom_frag_fence.value = job->done_fence->seqno - 1; in pvr_queue_submit_job_to_cccb()
669 pvr_cccb_write_command_with_header(cccb, job->fw_ccb_cmd_type, job->cmd_len, job->cmd, in pvr_queue_submit_job_to_cccb()
670 job->id, job->id); in pvr_queue_submit_job_to_cccb()
673 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, &ufos[0].addr); in pvr_queue_submit_job_to_cccb()
674 ufos[0].value = job->done_fence->seqno; in pvr_queue_submit_job_to_cccb()
680 * pvr_queue_run_job() - Submit a job to the FW.
683 * This function is called when all non-native dependencies have been met and
689 struct pvr_device *pvr_dev = job->pvr_dev; in pvr_queue_run_job()
696 if (job->paired_job && job->type == DRM_PVR_JOB_TYPE_FRAGMENT && in pvr_queue_run_job()
697 job->done_fence->ops) { in pvr_queue_run_job()
698 return dma_fence_get(job->done_fence); in pvr_queue_run_job()
707 if (WARN_ON(job->paired_job && in pvr_queue_run_job()
708 (job->type != DRM_PVR_JOB_TYPE_GEOMETRY || in pvr_queue_run_job()
709 job->paired_job->type != DRM_PVR_JOB_TYPE_FRAGMENT || in pvr_queue_run_job()
710 job->hwrt != job->paired_job->hwrt || in pvr_queue_run_job()
711 job->ctx != job->paired_job->ctx))) in pvr_queue_run_job()
712 return ERR_PTR(-EINVAL); in pvr_queue_run_job()
718 if (job->paired_job) { in pvr_queue_run_job()
719 err = pvr_job_get_pm_ref(job->paired_job); in pvr_queue_run_job()
727 if (job->paired_job) { in pvr_queue_run_job()
729 struct pvr_job *frag_job = job->paired_job; in pvr_queue_run_job()
730 struct pvr_queue *geom_queue = job->ctx->queues.geometry; in pvr_queue_run_job()
731 struct pvr_queue *frag_queue = job->ctx->queues.fragment; in pvr_queue_run_job()
736 &geom_queue->cccb, &frag_queue->cccb, in pvr_queue_run_job()
737 pvr_context_get_fw_addr(geom_job->ctx) + in pvr_queue_run_job()
738 geom_queue->ctx_offset, in pvr_queue_run_job()
739 pvr_context_get_fw_addr(frag_job->ctx) + in pvr_queue_run_job()
740 frag_queue->ctx_offset, in pvr_queue_run_job()
741 job->hwrt, in pvr_queue_run_job()
742 frag_job->fw_ccb_cmd_type == in pvr_queue_run_job()
745 struct pvr_queue *queue = container_of(job->base.sched, in pvr_queue_run_job()
748 pvr_cccb_send_kccb_kick(pvr_dev, &queue->cccb, in pvr_queue_run_job()
749 pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset, in pvr_queue_run_job()
750 job->hwrt); in pvr_queue_run_job()
753 return dma_fence_get(job->done_fence); in pvr_queue_run_job()
758 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); in pvr_queue_stop()
765 /* Make sure we CPU-signal the UFO object, so other queues don't get in pvr_queue_start()
768 *queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno); in pvr_queue_start()
770 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) { in pvr_queue_start()
771 if (dma_fence_is_signaled(job->done_fence)) { in pvr_queue_start()
773 * In that case, re-assign the parent field to the done_fence. in pvr_queue_start()
775 WARN_ON(job->base.s_fence->parent); in pvr_queue_start()
776 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_start()
781 atomic_set(&queue->ctx->faulty, 1); in pvr_queue_start()
785 drm_sched_start(&queue->scheduler); in pvr_queue_start()
789 * pvr_queue_timedout_job() - Handle a job timeout event.
793 * the scheduler, and re-assign parent fences in the middle.
801 struct drm_gpu_scheduler *sched = s_job->sched; in pvr_queue_timedout_job()
803 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_timedout_job()
807 dev_err(sched->dev, "Job timeout\n"); in pvr_queue_timedout_job()
811 * until the scheduler is really stopped doesn't end up re-inserting the in pvr_queue_timedout_job()
819 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
820 list_del_init(&queue->node); in pvr_queue_timedout_job()
821 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
825 /* Re-assign job parent fences. */ in pvr_queue_timedout_job()
826 list_for_each_entry(job, &sched->pending_list, base.list) { in pvr_queue_timedout_job()
827 job->base.s_fence->parent = dma_fence_get(job->done_fence); in pvr_queue_timedout_job()
830 WARN_ON(atomic_read(&queue->in_flight_job_count) != job_count); in pvr_queue_timedout_job()
832 /* Re-insert the queue in the proper list, and kick a queue processing in pvr_queue_timedout_job()
835 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
837 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_timedout_job()
839 atomic_set(&queue->in_flight_job_count, job_count); in pvr_queue_timedout_job()
840 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_timedout_job()
843 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
851 * pvr_queue_free_job() - Release the reference the scheduler had on a job object.
859 job->paired_job = NULL; in pvr_queue_free_job()
871 * pvr_queue_fence_is_ufo_backed() - Check if a dma_fence is backed by a UFO object
874 * A UFO-backed fence is a fence that can be signaled or waited upon FW-side.
875 * pvr_job::done_fence objects are backed by the timeline UFO attached to the queue
877 * world, so we also need to check if the fence we're being passed is a
885 sched_fence->sched->ops == &pvr_queue_sched_ops) in pvr_queue_fence_is_ufo_backed()
888 if (f && f->ops == &pvr_queue_job_fence_ops) in pvr_queue_fence_is_ufo_backed()
895 * pvr_queue_signal_done_fences() - Signal done fences.
899 * the UFO object attached to the queue.
907 spin_lock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
908 cur_seqno = *queue->timeline_ufo.value; in pvr_queue_signal_done_fences()
909 list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) { in pvr_queue_signal_done_fences()
910 if ((int)(cur_seqno - lower_32_bits(job->done_fence->seqno)) < 0) in pvr_queue_signal_done_fences()
913 if (!dma_fence_is_signaled(job->done_fence)) { in pvr_queue_signal_done_fences()
914 dma_fence_signal(job->done_fence); in pvr_queue_signal_done_fences()
916 atomic_dec(&queue->in_flight_job_count); in pvr_queue_signal_done_fences()
919 spin_unlock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
923 * pvr_queue_check_job_waiting_for_cccb_space() - Check if the job waiting for CCCB space
938 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
939 job = queue->cccb_fence_ctx.job; in pvr_queue_check_job_waiting_for_cccb_space()
943 /* If we have a job attached to the CCCB fence context, its CCCB fence in pvr_queue_check_job_waiting_for_cccb_space()
946 if (WARN_ON(!job->cccb_fence)) { in pvr_queue_check_job_waiting_for_cccb_space()
952 cccb_fence = container_of(job->cccb_fence, struct pvr_queue_fence, base); in pvr_queue_check_job_waiting_for_cccb_space()
953 if (WARN_ON(!cccb_fence->queue)) { in pvr_queue_check_job_waiting_for_cccb_space()
963 if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_check_job_waiting_for_cccb_space()
968 dma_fence_signal(job->cccb_fence); in pvr_queue_check_job_waiting_for_cccb_space()
969 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_check_job_waiting_for_cccb_space()
970 job->cccb_fence = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
971 queue->cccb_fence_ctx.job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
974 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
980 * pvr_queue_process() - Process events that happened on a queue.
987 lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_process()
996 switch (queue->type) { in get_dm_type()
1010 * init_fw_context() - Initializes the queue part of a FW context.
1014 * FW contexts are containing various states, one of them being a per-queue state
1020 struct pvr_context *ctx = queue->ctx; in init_fw_context()
1021 struct pvr_fw_object *fw_mem_ctx_obj = pvr_vm_get_fw_mem_context(ctx->vm_ctx); in init_fw_context()
1023 struct pvr_cccb *cccb = &queue->cccb; in init_fw_context()
1025 cctx_fw = fw_ctx_map + queue->ctx_offset; in init_fw_context()
1026 cctx_fw->ccbctl_fw_addr = cccb->ctrl_fw_addr; in init_fw_context()
1027 cctx_fw->ccb_fw_addr = cccb->cccb_fw_addr; in init_fw_context()
1029 cctx_fw->dm = get_dm_type(queue); in init_fw_context()
1030 cctx_fw->priority = ctx->priority; in init_fw_context()
1031 cctx_fw->priority_seq_num = 0; in init_fw_context()
1032 cctx_fw->max_deadline_ms = MAX_DEADLINE_MS; in init_fw_context()
1033 cctx_fw->pid = task_tgid_nr(current); in init_fw_context()
1034 cctx_fw->server_common_context_id = ctx->ctx_id; in init_fw_context()
1036 pvr_fw_object_get_fw_addr(fw_mem_ctx_obj, &cctx_fw->fw_mem_context_fw_addr); in init_fw_context()
1038 pvr_fw_object_get_fw_addr(queue->reg_state_obj, &cctx_fw->context_state_addr); in init_fw_context()
1042 * pvr_queue_cleanup_fw_context() - Wait for the FW context to be idle and clean it up.
1051 if (!queue->ctx->fw_obj) in pvr_queue_cleanup_fw_context()
1054 return pvr_fw_structure_cleanup(queue->ctx->pvr_dev, in pvr_queue_cleanup_fw_context()
1056 queue->ctx->fw_obj, queue->ctx_offset); in pvr_queue_cleanup_fw_context()
1060 * pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
1074 u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0; in pvr_queue_job_init()
1078 if (atomic_read(&job->ctx->faulty)) in pvr_queue_job_init()
1079 return -EIO; in pvr_queue_job_init()
1081 queue = pvr_context_get_queue_for_job(job->ctx, job->type); in pvr_queue_job_init()
1083 return -EINVAL; in pvr_queue_job_init()
1085 if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count))) in pvr_queue_job_init()
1086 return -E2BIG; in pvr_queue_job_init()
1088 err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE); in pvr_queue_job_init()
1092 job->cccb_fence = pvr_queue_fence_alloc(); in pvr_queue_job_init()
1093 job->kccb_fence = pvr_kccb_fence_alloc(); in pvr_queue_job_init()
1094 job->done_fence = pvr_queue_fence_alloc(); in pvr_queue_job_init()
1095 if (!job->cccb_fence || !job->kccb_fence || !job->done_fence) in pvr_queue_job_init()
1096 return -ENOMEM; in pvr_queue_job_init()
1102 * pvr_queue_job_arm() - Arm a job object.
1109 * we do multi-job submission, and something failed when creating/initializing
1118 drm_sched_job_arm(&job->base); in pvr_queue_job_arm()
1120 return &job->base.s_fence->finished; in pvr_queue_job_arm()
1124 * pvr_queue_job_cleanup() - Cleanup fence/scheduler related fields in the job object.
1131 pvr_queue_fence_put(job->done_fence); in pvr_queue_job_cleanup()
1132 pvr_queue_fence_put(job->cccb_fence); in pvr_queue_job_cleanup()
1133 pvr_kccb_fence_put(job->kccb_fence); in pvr_queue_job_cleanup()
1135 if (job->base.s_fence) in pvr_queue_job_cleanup()
1136 drm_sched_job_cleanup(&job->base); in pvr_queue_job_cleanup()
1140 * pvr_queue_job_push() - Push a job to its queue.
1145 * the drm_sched_entity attached to the queue. We grab a reference on
1151 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_job_push()
1154 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_job_push()
1155 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); in pvr_queue_job_push()
1158 drm_sched_entity_push_job(&job->base); in pvr_queue_job_push()
1165 if (queue->type == DRM_PVR_JOB_TYPE_GEOMETRY) { in reg_state_init()
1168 geom_ctx_state_fw->geom_core[0].geom_reg_vdm_call_stack_pointer_init = in reg_state_init()
1169 queue->callstack_addr; in reg_state_init()
1174 * pvr_queue_create() - Create a queue object.
1175 * @ctx: The context this queue will be attached to.
1212 struct pvr_device *pvr_dev = ctx->pvr_dev; in pvr_queue_create()
1219 return ERR_PTR(-EINVAL); in pvr_queue_create()
1221 switch (ctx->type) { in pvr_queue_create()
1225 return ERR_PTR(-EINVAL); in pvr_queue_create()
1229 return ERR_PTR(-EINVAL); in pvr_queue_create()
1233 return ERR_PTR(-EINVAL); in pvr_queue_create()
1236 return ERR_PTR(-EINVAL); in pvr_queue_create()
1245 return ERR_PTR(-ENOMEM); in pvr_queue_create()
1247 queue->type = type; in pvr_queue_create()
1248 queue->ctx_offset = get_ctx_offset(type); in pvr_queue_create()
1249 queue->ctx = ctx; in pvr_queue_create()
1250 queue->callstack_addr = args->callstack_addr; in pvr_queue_create()
1251 sched = &queue->scheduler; in pvr_queue_create()
1252 INIT_LIST_HEAD(&queue->node); in pvr_queue_create()
1253 mutex_init(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1254 pvr_queue_fence_ctx_init(&queue->cccb_fence_ctx.base); in pvr_queue_create()
1255 pvr_queue_fence_ctx_init(&queue->job_fence_ctx); in pvr_queue_create()
1257 err = pvr_cccb_init(pvr_dev, &queue->cccb, props[type].cccb_size, props[type].name); in pvr_queue_create()
1263 reg_state_init, queue, &queue->reg_state_obj); in pvr_queue_create()
1270 args->callstack_addr) { in pvr_queue_create()
1271 err = -EINVAL; in pvr_queue_create()
1275 cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value), in pvr_queue_create()
1277 NULL, NULL, &queue->timeline_ufo.fw_obj); in pvr_queue_create()
1283 queue->timeline_ufo.value = cpu_map; in pvr_queue_create()
1285 err = drm_sched_init(&queue->scheduler, in pvr_queue_create()
1287 pvr_dev->sched_wq, 1, 64 * 1024, 1, in pvr_queue_create()
1289 pvr_dev->sched_wq, NULL, "pvr-queue", in pvr_queue_create()
1290 pvr_dev->base.dev); in pvr_queue_create()
1294 err = drm_sched_entity_init(&queue->entity, in pvr_queue_create()
1296 &sched, 1, &ctx->faulty); in pvr_queue_create()
1300 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_create()
1301 list_add_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_create()
1302 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_create()
1307 drm_sched_fini(&queue->scheduler); in pvr_queue_create()
1310 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_create()
1313 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_create()
1316 pvr_cccb_fini(&queue->cccb); in pvr_queue_create()
1319 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1329 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_device_pre_reset()
1330 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_pre_reset()
1332 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_pre_reset()
1334 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_device_pre_reset()
1341 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_device_post_reset()
1342 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_post_reset()
1344 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_post_reset()
1346 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_device_post_reset()
1350 * pvr_queue_kill() - Kill a queue.
1360 drm_sched_entity_destroy(&queue->entity); in pvr_queue_kill()
1361 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_kill()
1362 queue->last_queued_job_scheduled_fence = NULL; in pvr_queue_kill()
1366 * pvr_queue_destroy() - Destroy a queue.
1369 * Cleanup the queue and free the resources attached to it. Should be
1377 mutex_lock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1378 list_del_init(&queue->node); in pvr_queue_destroy()
1379 mutex_unlock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1381 drm_sched_fini(&queue->scheduler); in pvr_queue_destroy()
1382 drm_sched_entity_fini(&queue->entity); in pvr_queue_destroy()
1384 if (WARN_ON(queue->last_queued_job_scheduled_fence)) in pvr_queue_destroy()
1385 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_destroy()
1389 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_destroy()
1390 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_destroy()
1391 pvr_cccb_fini(&queue->cccb); in pvr_queue_destroy()
1392 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_destroy()
1397 * pvr_queue_device_init() - Device-level initialization of queue related fields.
1410 INIT_LIST_HEAD(&pvr_dev->queues.active); in pvr_queue_device_init()
1411 INIT_LIST_HEAD(&pvr_dev->queues.idle); in pvr_queue_device_init()
1412 err = drmm_mutex_init(from_pvr_device(pvr_dev), &pvr_dev->queues.lock); in pvr_queue_device_init()
1416 pvr_dev->sched_wq = alloc_workqueue("powervr-sched", WQ_UNBOUND, 0); in pvr_queue_device_init()
1417 if (!pvr_dev->sched_wq) in pvr_queue_device_init()
1418 return -ENOMEM; in pvr_queue_device_init()
1424 * pvr_queue_device_fini() - Device-level cleanup of queue related fields.
1427 * Cleanup/free all queue-related resources attached to a pvr_device object.
1431 destroy_workqueue(pvr_dev->sched_wq); in pvr_queue_device_fini()