Lines Matching full:queue

86 	WARN(1, "Invalid queue type");  in get_ctx_state_size()
116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release_work()
123 struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev; in pvr_queue_fence_release()
133 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name()
147 WARN(1, "Invalid queue type"); in pvr_queue_job_fence_get_timeline_name()
156 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name()
170 WARN(1, "Invalid queue type"); in pvr_queue_cccb_fence_get_timeline_name()
262 * @queue: The queue this fence belongs to.
267 * pvr_queue_fence::queue field too.
271 struct pvr_queue *queue, in pvr_queue_fence_init() argument
277 pvr_context_get(queue->ctx); in pvr_queue_fence_init()
278 fence->queue = queue; in pvr_queue_fence_init()
288 * @queue: The queue this fence belongs to.
296 pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue) in pvr_queue_cccb_fence_init() argument
298 pvr_queue_fence_init(fence, queue, &pvr_queue_cccb_fence_ops, in pvr_queue_cccb_fence_init()
299 &queue->cccb_fence_ctx.base); in pvr_queue_cccb_fence_init()
305 * @queue: The queue this fence belongs to.
314 pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue) in pvr_queue_job_fence_init() argument
317 pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops, in pvr_queue_job_fence_init()
318 &queue->job_fence_ctx); in pvr_queue_job_fence_init()
322 * pvr_queue_fence_ctx_init() - Queue fence context initialization.
387 * @queue: The queue this job will be submitted to.
398 pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_cccb_fence() argument
409 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
413 if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_get_job_cccb_fence()
422 if (WARN_ON(queue->cccb_fence_ctx.job)) in pvr_queue_get_job_cccb_fence()
423 pvr_job_put(queue->cccb_fence_ctx.job); in pvr_queue_get_job_cccb_fence()
425 queue->cccb_fence_ctx.job = pvr_job_get(job); in pvr_queue_get_job_cccb_fence()
429 if (!WARN_ON(cccb_fence->queue)) in pvr_queue_get_job_cccb_fence()
430 pvr_queue_cccb_fence_init(job->cccb_fence, queue); in pvr_queue_get_job_cccb_fence()
433 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
440 * @queue: The queue this job will be submitted to.
451 pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_kccb_fence() argument
453 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_get_job_kccb_fence()
471 pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_paired_frag_job_dep() argument
493 return frag_job->base.sched->ops->prepare_job(&frag_job->base, &queue->entity); in pvr_queue_get_paired_frag_job_dep()
499 * @s_entity: The entity this job is queue on.
509 struct pvr_queue *queue = container_of(s_entity, struct pvr_queue, entity); in pvr_queue_prepare_job() local
535 pvr_queue_job_fence_init(job->done_fence, queue); in pvr_queue_prepare_job()
541 internal_dep = pvr_queue_get_job_cccb_fence(queue, job); in pvr_queue_prepare_job()
543 /* KCCB fence is used to make sure we have a KCCB slot to queue our in pvr_queue_prepare_job()
547 internal_dep = pvr_queue_get_job_kccb_fence(queue, job); in pvr_queue_prepare_job()
553 * internal_dep = pvr_queue_get_job_xxxx_fence(queue, job); in pvr_queue_prepare_job()
558 internal_dep = pvr_queue_get_paired_frag_job_dep(queue, job); in pvr_queue_prepare_job()
564 * pvr_queue_update_active_state_locked() - Update the queue active state.
565 * @queue: Queue to update the state on.
568 * pvr_device::queue::lock held.
570 static void pvr_queue_update_active_state_locked(struct pvr_queue *queue) in pvr_queue_update_active_state_locked() argument
572 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state_locked()
576 /* The queue is temporary out of any list when it's being reset, in pvr_queue_update_active_state_locked()
580 if (list_empty(&queue->node)) in pvr_queue_update_active_state_locked()
583 if (!atomic_read(&queue->in_flight_job_count)) in pvr_queue_update_active_state_locked()
584 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_update_active_state_locked()
586 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_update_active_state_locked()
590 * pvr_queue_update_active_state() - Update the queue active state.
591 * @queue: Queue to update the state on.
595 * Updating the active state implies moving the queue in or out of the
596 * active queue list, which also defines whether the queue is checked
602 static void pvr_queue_update_active_state(struct pvr_queue *queue) in pvr_queue_update_active_state() argument
604 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state()
607 pvr_queue_update_active_state_locked(queue); in pvr_queue_update_active_state()
613 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_submit_job_to_cccb() local
615 struct pvr_cccb *cccb = &queue->cccb; in pvr_queue_submit_job_to_cccb()
621 /* We need to add the queue to the active list before updating the CCCB, in pvr_queue_submit_job_to_cccb()
623 * happened on this queue. in pvr_queue_submit_job_to_cccb()
625 atomic_inc(&queue->in_flight_job_count); in pvr_queue_submit_job_to_cccb()
626 pvr_queue_update_active_state(queue); in pvr_queue_submit_job_to_cccb()
641 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
656 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
670 /* Reference value for the partial render test is the current queue fence in pvr_queue_submit_job_to_cccb()
673 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
683 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, &ufos[0].addr); in pvr_queue_submit_job_to_cccb()
755 struct pvr_queue *queue = container_of(job->base.sched, in pvr_queue_run_job() local
758 pvr_cccb_send_kccb_kick(pvr_dev, &queue->cccb, in pvr_queue_run_job()
759 pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset, in pvr_queue_run_job()
766 static void pvr_queue_stop(struct pvr_queue *queue, struct pvr_job *bad_job) in pvr_queue_stop() argument
768 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); in pvr_queue_stop()
771 static void pvr_queue_start(struct pvr_queue *queue) in pvr_queue_start() argument
778 *queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno); in pvr_queue_start()
780 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) { in pvr_queue_start()
791 atomic_set(&queue->ctx->faulty, 1); in pvr_queue_start()
795 drm_sched_start(&queue->scheduler, 0); in pvr_queue_start()
812 struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler); in pvr_queue_timedout_job() local
813 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_timedout_job()
819 /* Before we stop the scheduler, make sure the queue is out of any list, so in pvr_queue_timedout_job()
822 * queue in the active list. This would cause in pvr_queue_timedout_job()
830 list_del_init(&queue->node); in pvr_queue_timedout_job()
840 WARN_ON(atomic_read(&queue->in_flight_job_count) != job_count); in pvr_queue_timedout_job()
842 /* Re-insert the queue in the proper list, and kick a queue processing in pvr_queue_timedout_job()
847 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_timedout_job()
849 atomic_set(&queue->in_flight_job_count, job_count); in pvr_queue_timedout_job()
850 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_timedout_job()
851 pvr_queue_process(queue); in pvr_queue_timedout_job()
885 * pvr_job::done_fence objects are backed by the timeline UFO attached to the queue
906 * @queue: Queue to check.
909 * the UFO object attached to the queue.
912 pvr_queue_signal_done_fences(struct pvr_queue *queue) in pvr_queue_signal_done_fences() argument
917 spin_lock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
918 cur_seqno = *queue->timeline_ufo.value; in pvr_queue_signal_done_fences()
919 list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) { in pvr_queue_signal_done_fences()
926 atomic_dec(&queue->in_flight_job_count); in pvr_queue_signal_done_fences()
929 spin_unlock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
936 * @queue: Queue to check
942 pvr_queue_check_job_waiting_for_cccb_space(struct pvr_queue *queue) in pvr_queue_check_job_waiting_for_cccb_space() argument
948 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
949 job = queue->cccb_fence_ctx.job; in pvr_queue_check_job_waiting_for_cccb_space()
963 if (WARN_ON(!cccb_fence->queue)) { in pvr_queue_check_job_waiting_for_cccb_space()
973 if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_check_job_waiting_for_cccb_space()
981 queue->cccb_fence_ctx.job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
984 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
990 * pvr_queue_process() - Process events that happened on a queue.
991 * @queue: Queue to check
995 void pvr_queue_process(struct pvr_queue *queue) in pvr_queue_process() argument
997 lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_process()
999 pvr_queue_check_job_waiting_for_cccb_space(queue); in pvr_queue_process()
1000 pvr_queue_signal_done_fences(queue); in pvr_queue_process()
1001 pvr_queue_update_active_state_locked(queue); in pvr_queue_process()
1004 static u32 get_dm_type(struct pvr_queue *queue) in get_dm_type() argument
1006 switch (queue->type) { in get_dm_type()
1020 * init_fw_context() - Initializes the queue part of a FW context.
1021 * @queue: Queue object to initialize the FW context for.
1024 * FW contexts are containing various states, one of them being a per-queue state
1025 * that needs to be initialized for each queue being exposed by a context. This
1028 static void init_fw_context(struct pvr_queue *queue, void *fw_ctx_map) in init_fw_context() argument
1030 struct pvr_context *ctx = queue->ctx; in init_fw_context()
1033 struct pvr_cccb *cccb = &queue->cccb; in init_fw_context()
1035 cctx_fw = fw_ctx_map + queue->ctx_offset; in init_fw_context()
1039 cctx_fw->dm = get_dm_type(queue); in init_fw_context()
1048 pvr_fw_object_get_fw_addr(queue->reg_state_obj, &cctx_fw->context_state_addr); in init_fw_context()
1053 * @queue: Queue on FW context to clean up.
1059 static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue) in pvr_queue_cleanup_fw_context() argument
1061 if (!queue->ctx->fw_obj) in pvr_queue_cleanup_fw_context()
1064 return pvr_fw_structure_cleanup(queue->ctx->pvr_dev, in pvr_queue_cleanup_fw_context()
1066 queue->ctx->fw_obj, queue->ctx_offset); in pvr_queue_cleanup_fw_context()
1070 * pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
1073 * Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
1085 struct pvr_queue *queue; in pvr_queue_job_init() local
1091 queue = pvr_context_get_queue_for_job(job->ctx, job->type); in pvr_queue_job_init()
1092 if (!queue) in pvr_queue_job_init()
1095 if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count))) in pvr_queue_job_init()
1098 err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE); in pvr_queue_job_init()
1150 * pvr_queue_job_push() - Push a job to its queue.
1154 * have been added to the job. This will effectively queue the job to
1155 * the drm_sched_entity attached to the queue. We grab a reference on
1161 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_job_push() local
1164 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_job_push()
1165 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); in pvr_queue_job_push()
1173 struct pvr_queue *queue = priv; in reg_state_init() local
1175 if (queue->type == DRM_PVR_JOB_TYPE_GEOMETRY) { in reg_state_init()
1179 queue->callstack_addr; in reg_state_init()
1184 * pvr_queue_create() - Create a queue object.
1185 * @ctx: The context this queue will be attached to.
1186 * @type: The type of jobs being pushed to this queue.
1190 * Create a queue object that will be used to queue and track jobs.
1224 struct pvr_queue *queue; in pvr_queue_create() local
1253 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in pvr_queue_create()
1254 if (!queue) in pvr_queue_create()
1257 queue->type = type; in pvr_queue_create()
1258 queue->ctx_offset = get_ctx_offset(type); in pvr_queue_create()
1259 queue->ctx = ctx; in pvr_queue_create()
1260 queue->callstack_addr = args->callstack_addr; in pvr_queue_create()
1261 sched = &queue->scheduler; in pvr_queue_create()
1262 INIT_LIST_HEAD(&queue->node); in pvr_queue_create()
1263 mutex_init(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1264 pvr_queue_fence_ctx_init(&queue->cccb_fence_ctx.base); in pvr_queue_create()
1265 pvr_queue_fence_ctx_init(&queue->job_fence_ctx); in pvr_queue_create()
1267 err = pvr_cccb_init(pvr_dev, &queue->cccb, props[type].cccb_size, props[type].name); in pvr_queue_create()
1273 reg_state_init, queue, &queue->reg_state_obj); in pvr_queue_create()
1277 init_fw_context(queue, fw_ctx_map); in pvr_queue_create()
1285 cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value), in pvr_queue_create()
1287 NULL, NULL, &queue->timeline_ufo.fw_obj); in pvr_queue_create()
1293 queue->timeline_ufo.value = cpu_map; in pvr_queue_create()
1295 err = drm_sched_init(&queue->scheduler, in pvr_queue_create()
1299 pvr_dev->sched_wq, NULL, "pvr-queue", in pvr_queue_create()
1304 err = drm_sched_entity_init(&queue->entity, in pvr_queue_create()
1311 list_add_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_create()
1314 return queue; in pvr_queue_create()
1317 drm_sched_fini(&queue->scheduler); in pvr_queue_create()
1320 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_create()
1323 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_create()
1326 pvr_cccb_fini(&queue->cccb); in pvr_queue_create()
1329 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1330 kfree(queue); in pvr_queue_create()
1337 struct pvr_queue *queue; in pvr_queue_device_pre_reset() local
1340 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_pre_reset()
1341 pvr_queue_stop(queue, NULL); in pvr_queue_device_pre_reset()
1342 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_pre_reset()
1343 pvr_queue_stop(queue, NULL); in pvr_queue_device_pre_reset()
1349 struct pvr_queue *queue; in pvr_queue_device_post_reset() local
1352 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_post_reset()
1353 pvr_queue_start(queue); in pvr_queue_device_post_reset()
1354 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_post_reset()
1355 pvr_queue_start(queue); in pvr_queue_device_post_reset()
1360 * pvr_queue_kill() - Kill a queue.
1361 * @queue: The queue to kill.
1363 * Kill the queue so no new jobs can be pushed. Should be called when the
1364 * context handle is destroyed. The queue object might last longer if jobs
1365 * are still in flight and holding a reference to the context this queue
1368 void pvr_queue_kill(struct pvr_queue *queue) in pvr_queue_kill() argument
1370 drm_sched_entity_destroy(&queue->entity); in pvr_queue_kill()
1371 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_kill()
1372 queue->last_queued_job_scheduled_fence = NULL; in pvr_queue_kill()
1376 * pvr_queue_destroy() - Destroy a queue.
1377 * @queue: The queue to destroy.
1379 * Cleanup the queue and free the resources attached to it. Should be
1382 void pvr_queue_destroy(struct pvr_queue *queue) in pvr_queue_destroy() argument
1384 if (!queue) in pvr_queue_destroy()
1387 mutex_lock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1388 list_del_init(&queue->node); in pvr_queue_destroy()
1389 mutex_unlock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1391 drm_sched_fini(&queue->scheduler); in pvr_queue_destroy()
1392 drm_sched_entity_fini(&queue->entity); in pvr_queue_destroy()
1394 if (WARN_ON(queue->last_queued_job_scheduled_fence)) in pvr_queue_destroy()
1395 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_destroy()
1397 pvr_queue_cleanup_fw_context(queue); in pvr_queue_destroy()
1399 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_destroy()
1400 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_destroy()
1401 pvr_cccb_fini(&queue->cccb); in pvr_queue_destroy()
1402 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_destroy()
1403 kfree(queue); in pvr_queue_destroy()
1407 * pvr_queue_device_init() - Device-level initialization of queue related fields.
1410 * Initializes all fields related to queue management in pvr_device.
1434 * pvr_queue_device_fini() - Device-level cleanup of queue related fields.
1437 * Cleanup/free all queue-related resources attached to a pvr_device object.