Lines Matching full:q

63 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
66 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument
71 if (q->tlb_inval[i].dep_scheduler) in __xe_exec_queue_free()
72 xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler); in __xe_exec_queue_free()
74 if (xe_exec_queue_uses_pxp(q)) in __xe_exec_queue_free()
75 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in __xe_exec_queue_free()
76 if (q->vm) in __xe_exec_queue_free()
77 xe_vm_put(q->vm); in __xe_exec_queue_free()
79 if (q->xef) in __xe_exec_queue_free()
80 xe_file_put(q->xef); in __xe_exec_queue_free()
82 kfree(q); in __xe_exec_queue_free()
85 static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q) in alloc_dep_schedulers() argument
87 struct xe_tile *tile = gt_to_tile(q->gt); in alloc_dep_schedulers()
106 dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name, in alloc_dep_schedulers()
111 q->tlb_inval[i].dep_scheduler = dep_scheduler; in alloc_dep_schedulers()
124 struct xe_exec_queue *q; in __xe_exec_queue_alloc() local
131 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); in __xe_exec_queue_alloc()
132 if (!q) in __xe_exec_queue_alloc()
135 kref_init(&q->refcount); in __xe_exec_queue_alloc()
136 q->flags = flags; in __xe_exec_queue_alloc()
137 q->hwe = hwe; in __xe_exec_queue_alloc()
138 q->gt = gt; in __xe_exec_queue_alloc()
139 q->class = hwe->class; in __xe_exec_queue_alloc()
140 q->width = width; in __xe_exec_queue_alloc()
141 q->msix_vec = XE_IRQ_DEFAULT_MSIX; in __xe_exec_queue_alloc()
142 q->logical_mask = logical_mask; in __xe_exec_queue_alloc()
143 q->fence_irq = &gt->fence_irq[hwe->class]; in __xe_exec_queue_alloc()
144 q->ring_ops = gt->ring_ops[hwe->class]; in __xe_exec_queue_alloc()
145 q->ops = gt->exec_queue_ops; in __xe_exec_queue_alloc()
146 INIT_LIST_HEAD(&q->lr.link); in __xe_exec_queue_alloc()
147 INIT_LIST_HEAD(&q->multi_gt_link); in __xe_exec_queue_alloc()
148 INIT_LIST_HEAD(&q->hw_engine_group_link); in __xe_exec_queue_alloc()
149 INIT_LIST_HEAD(&q->pxp.link); in __xe_exec_queue_alloc()
151 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; in __xe_exec_queue_alloc()
152 q->sched_props.preempt_timeout_us = in __xe_exec_queue_alloc()
154 q->sched_props.job_timeout_ms = in __xe_exec_queue_alloc()
156 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && in __xe_exec_queue_alloc()
157 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) in __xe_exec_queue_alloc()
158 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; in __xe_exec_queue_alloc()
160 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; in __xe_exec_queue_alloc()
162 if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) { in __xe_exec_queue_alloc()
163 err = alloc_dep_schedulers(xe, q); in __xe_exec_queue_alloc()
165 __xe_exec_queue_free(q); in __xe_exec_queue_alloc()
171 q->vm = xe_vm_get(vm); in __xe_exec_queue_alloc()
175 * may set q->usm, must come before xe_lrc_create(), in __xe_exec_queue_alloc()
176 * may overwrite q->sched_props, must come before q->ops->init() in __xe_exec_queue_alloc()
178 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
180 __xe_exec_queue_free(q); in __xe_exec_queue_alloc()
185 return q; in __xe_exec_queue_alloc()
188 static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags) in __xe_exec_queue_init() argument
199 if (xe_exec_queue_uses_pxp(q) && in __xe_exec_queue_init()
200 (q->class == XE_ENGINE_CLASS_RENDER || q->class == XE_ENGINE_CLASS_COMPUTE)) { in __xe_exec_queue_init()
201 if (GRAPHICS_VER(gt_to_xe(q->gt)) >= 20) in __xe_exec_queue_init()
210 err = q->ops->init(q); in __xe_exec_queue_init()
215 * This must occur after q->ops->init to avoid race conditions during VF in __xe_exec_queue_init()
224 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
227 xe_gt_sriov_vf_wait_valid_ggtt(q->gt); in __xe_exec_queue_init()
228 lrc = xe_lrc_create(q->hwe, q->vm, xe_lrc_ring_size(), in __xe_exec_queue_init()
229 q->msix_vec, flags); in __xe_exec_queue_init()
236 WRITE_ONCE(q->lrc[i], lrc); in __xe_exec_queue_init()
243 xe_lrc_put(q->lrc[i]); in __xe_exec_queue_init()
247 static void __xe_exec_queue_fini(struct xe_exec_queue *q) in __xe_exec_queue_fini() argument
251 q->ops->fini(q); in __xe_exec_queue_fini()
253 for (i = 0; i < q->width; ++i) in __xe_exec_queue_fini()
254 xe_lrc_put(q->lrc[i]); in __xe_exec_queue_fini()
262 struct xe_exec_queue *q; in xe_exec_queue_create() local
268 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
270 if (IS_ERR(q)) in xe_exec_queue_create()
271 return q; in xe_exec_queue_create()
273 err = __xe_exec_queue_init(q, flags); in xe_exec_queue_create()
284 if (xe_exec_queue_uses_pxp(q)) { in xe_exec_queue_create()
285 err = xe_pxp_exec_queue_add(xe->pxp, q); in xe_exec_queue_create()
290 return q; in xe_exec_queue_create()
293 __xe_exec_queue_fini(q); in xe_exec_queue_create()
295 __xe_exec_queue_free(q); in xe_exec_queue_create()
347 struct xe_exec_queue *q; in xe_exec_queue_create_bind() local
362 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
366 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
372 if (!IS_ERR(q)) { in xe_exec_queue_create_bind()
373 int err = drm_syncobj_create(&q->ufence_syncobj, in xe_exec_queue_create_bind()
377 xe_exec_queue_put(q); in xe_exec_queue_create_bind()
382 return q; in xe_exec_queue_create_bind()
388 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); in xe_exec_queue_destroy() local
392 xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0); in xe_exec_queue_destroy()
394 if (q->ufence_syncobj) in xe_exec_queue_destroy()
395 drm_syncobj_put(q->ufence_syncobj); in xe_exec_queue_destroy()
397 if (xe_exec_queue_uses_pxp(q)) in xe_exec_queue_destroy()
398 xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q); in xe_exec_queue_destroy()
400 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_destroy()
402 xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, i); in xe_exec_queue_destroy()
404 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { in xe_exec_queue_destroy()
405 list_for_each_entry_safe(eq, next, &q->multi_gt_list, in xe_exec_queue_destroy()
410 q->ops->destroy(q); in xe_exec_queue_destroy()
413 void xe_exec_queue_fini(struct xe_exec_queue *q) in xe_exec_queue_fini() argument
419 xe_exec_queue_update_run_ticks(q); in xe_exec_queue_fini()
420 if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal)) in xe_exec_queue_fini()
421 wake_up_var(&q->xef->exec_queue.pending_removal); in xe_exec_queue_fini()
423 __xe_exec_queue_fini(q); in xe_exec_queue_fini()
424 __xe_exec_queue_free(q); in xe_exec_queue_fini()
427 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) in xe_exec_queue_assign_name() argument
429 switch (q->class) { in xe_exec_queue_assign_name()
431 snprintf(q->name, sizeof(q->name), "rcs%d", instance); in xe_exec_queue_assign_name()
434 snprintf(q->name, sizeof(q->name), "vcs%d", instance); in xe_exec_queue_assign_name()
437 snprintf(q->name, sizeof(q->name), "vecs%d", instance); in xe_exec_queue_assign_name()
440 snprintf(q->name, sizeof(q->name), "bcs%d", instance); in xe_exec_queue_assign_name()
443 snprintf(q->name, sizeof(q->name), "ccs%d", instance); in xe_exec_queue_assign_name()
446 snprintf(q->name, sizeof(q->name), "gsccs%d", instance); in xe_exec_queue_assign_name()
449 XE_WARN_ON(q->class); in xe_exec_queue_assign_name()
455 struct xe_exec_queue *q; in xe_exec_queue_lookup() local
458 q = xa_load(&xef->exec_queue.xa, id); in xe_exec_queue_lookup()
459 if (q) in xe_exec_queue_lookup()
460 xe_exec_queue_get(q); in xe_exec_queue_lookup()
463 return q; in xe_exec_queue_lookup()
473 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
482 q->sched_props.priority = value; in exec_queue_set_priority()
538 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
543 xe_exec_queue_get_prop_minmax(q->hwe->eclass, in exec_queue_set_timeslice()
550 q->sched_props.timeslice_us = value; in exec_queue_set_timeslice()
555 exec_queue_set_pxp_type(struct xe_device *xe, struct xe_exec_queue *q, u64 value) in exec_queue_set_pxp_type() argument
567 return xe_pxp_exec_queue_set_type(xe->pxp, q, DRM_XE_PXP_TYPE_HWDRM); in exec_queue_set_pxp_type()
571 struct xe_exec_queue *q,
581 struct xe_exec_queue *q, in exec_queue_user_ext_set_property() argument
605 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
609 struct xe_exec_queue *q,
617 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
639 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); in exec_queue_user_extensions()
644 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
713 struct xe_exec_queue *q = NULL; in xe_exec_queue_create_ioctl() local
756 if (q) in xe_exec_queue_create_ioctl()
761 q = new; in xe_exec_queue_create_ioctl()
764 &q->multi_gt_link); in xe_exec_queue_create_ioctl()
793 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
798 if (IS_ERR(q)) in xe_exec_queue_create_ioctl()
799 return PTR_ERR(q); in xe_exec_queue_create_ioctl()
802 q->lr.context = dma_fence_context_alloc(1); in xe_exec_queue_create_ioctl()
804 err = xe_vm_add_compute_exec_queue(vm, q); in xe_exec_queue_create_ioctl()
809 if (q->vm && q->hwe->hw_engine_group) { in xe_exec_queue_create_ioctl()
810 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_create_ioctl()
816 q->xef = xe_file_get(xef); in xe_exec_queue_create_ioctl()
819 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); in xe_exec_queue_create_ioctl()
828 xe_exec_queue_kill(q); in xe_exec_queue_create_ioctl()
830 xe_exec_queue_put(q); in xe_exec_queue_create_ioctl()
840 struct xe_exec_queue *q; in xe_exec_queue_get_property_ioctl() local
846 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_queue_get_property_ioctl()
847 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
852 args->value = q->ops->reset_status(q); in xe_exec_queue_get_property_ioctl()
859 xe_exec_queue_put(q); in xe_exec_queue_get_property_ioctl()
866 * @q: The exec_queue.
874 struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q) in xe_exec_queue_lrc() argument
876 return q->lrc[0]; in xe_exec_queue_lrc()
881 * @q: The exec_queue
885 bool xe_exec_queue_is_lr(struct xe_exec_queue *q) in xe_exec_queue_is_lr() argument
887 return q->vm && xe_vm_in_lr_mode(q->vm) && in xe_exec_queue_is_lr()
888 !(q->flags & EXEC_QUEUE_FLAG_VM); in xe_exec_queue_is_lr()
893 * @q: The exec_queue
905 bool xe_exec_queue_is_idle(struct xe_exec_queue *q) in xe_exec_queue_is_idle() argument
907 if (xe_exec_queue_is_parallel(q)) { in xe_exec_queue_is_idle()
910 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
911 if (xe_lrc_seqno(q->lrc[i]) != in xe_exec_queue_is_idle()
912 q->lrc[i]->fence_ctx.next_seqno - 1) in xe_exec_queue_is_idle()
919 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
920 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
926 * @q: The exec queue
931 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) in xe_exec_queue_update_run_ticks() argument
933 struct xe_device *xe = gt_to_xe(q->gt); in xe_exec_queue_update_run_ticks()
942 if (!q->xef) in xe_exec_queue_update_run_ticks()
956 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
958 q->xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; in xe_exec_queue_update_run_ticks()
965 * @q: The exec queue
972 void xe_exec_queue_kill(struct xe_exec_queue *q) in xe_exec_queue_kill() argument
974 struct xe_exec_queue *eq = q, *next; in xe_exec_queue_kill()
978 q->ops->kill(eq); in xe_exec_queue_kill()
979 xe_vm_remove_compute_exec_queue(q->vm, eq); in xe_exec_queue_kill()
982 q->ops->kill(q); in xe_exec_queue_kill()
983 xe_vm_remove_compute_exec_queue(q->vm, q); in xe_exec_queue_kill()
992 struct xe_exec_queue *q; in xe_exec_queue_destroy_ioctl() local
999 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); in xe_exec_queue_destroy_ioctl()
1000 if (q) in xe_exec_queue_destroy_ioctl()
1004 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
1007 if (q->vm && q->hwe->hw_engine_group) in xe_exec_queue_destroy_ioctl()
1008 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_destroy_ioctl()
1010 xe_exec_queue_kill(q); in xe_exec_queue_destroy_ioctl()
1012 trace_xe_exec_queue_close(q); in xe_exec_queue_destroy_ioctl()
1013 xe_exec_queue_put(q); in xe_exec_queue_destroy_ioctl()
1018 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, in xe_exec_queue_last_fence_lockdep_assert() argument
1021 if (q->flags & EXEC_QUEUE_FLAG_MIGRATE) { in xe_exec_queue_last_fence_lockdep_assert()
1022 xe_migrate_job_lock_assert(q); in xe_exec_queue_last_fence_lockdep_assert()
1023 } else if (q->flags & EXEC_QUEUE_FLAG_VM) { in xe_exec_queue_last_fence_lockdep_assert()
1027 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_lockdep_assert()
1033 * @q: The exec queue
1036 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_put() argument
1038 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_put()
1040 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_put()
1045 * @q: The exec queue
1049 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) in xe_exec_queue_last_fence_put_unlocked() argument
1051 if (q->last_fence) { in xe_exec_queue_last_fence_put_unlocked()
1052 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put_unlocked()
1053 q->last_fence = NULL; in xe_exec_queue_last_fence_put_unlocked()
1059 * @q: The exec queue
1066 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get() argument
1071 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_get()
1073 if (q->last_fence && in xe_exec_queue_last_fence_get()
1074 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get()
1075 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_get()
1077 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get()
1084 * @q: The exec queue
1093 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get_for_resume() argument
1098 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_get_for_resume()
1100 if (q->last_fence && in xe_exec_queue_last_fence_get_for_resume()
1101 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get_for_resume()
1102 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_get_for_resume()
1104 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get_for_resume()
1111 * @q: The exec queue
1118 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, in xe_exec_queue_last_fence_set() argument
1121 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_set()
1124 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_set()
1125 q->last_fence = dma_fence_get(fence); in xe_exec_queue_last_fence_set()
1130 * @q: The exec queue
1134 void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q, in xe_exec_queue_tlb_inval_last_fence_put() argument
1138 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_tlb_inval_last_fence_put()
1142 xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, type); in xe_exec_queue_tlb_inval_last_fence_put()
1148 * @q: The exec queue
1153 void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q, in xe_exec_queue_tlb_inval_last_fence_put_unlocked() argument
1156 xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT || in xe_exec_queue_tlb_inval_last_fence_put_unlocked()
1159 dma_fence_put(q->tlb_inval[type].last_fence); in xe_exec_queue_tlb_inval_last_fence_put_unlocked()
1160 q->tlb_inval[type].last_fence = NULL; in xe_exec_queue_tlb_inval_last_fence_put_unlocked()
1165 * @q: The exec queue
1173 struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q, in xe_exec_queue_tlb_inval_last_fence_get() argument
1179 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_tlb_inval_last_fence_get()
1182 xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM | in xe_exec_queue_tlb_inval_last_fence_get()
1185 if (q->tlb_inval[type].last_fence && in xe_exec_queue_tlb_inval_last_fence_get()
1187 &q->tlb_inval[type].last_fence->flags)) in xe_exec_queue_tlb_inval_last_fence_get()
1188 xe_exec_queue_tlb_inval_last_fence_put(q, vm, type); in xe_exec_queue_tlb_inval_last_fence_get()
1190 fence = q->tlb_inval[type].last_fence ?: dma_fence_get_stub(); in xe_exec_queue_tlb_inval_last_fence_get()
1197 * @q: The exec queue
1206 void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q, in xe_exec_queue_tlb_inval_last_fence_set() argument
1211 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_tlb_inval_last_fence_set()
1214 xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM | in xe_exec_queue_tlb_inval_last_fence_set()
1218 xe_exec_queue_tlb_inval_last_fence_put(q, vm, type); in xe_exec_queue_tlb_inval_last_fence_set()
1219 q->tlb_inval[type].last_fence = dma_fence_get(fence); in xe_exec_queue_tlb_inval_last_fence_set()
1225 * @q: the &xe_exec_queue struct instance containing target LRCs
1230 int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch) in xe_exec_queue_contexts_hwsp_rebase() argument
1235 for (i = 0; i < q->width; ++i) { in xe_exec_queue_contexts_hwsp_rebase()
1239 lrc = READ_ONCE(q->lrc[i]); in xe_exec_queue_contexts_hwsp_rebase()
1243 xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch); in xe_exec_queue_contexts_hwsp_rebase()
1245 err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch); in xe_exec_queue_contexts_hwsp_rebase()