Lines Matching full:q

51 exec_queue_to_guc(struct xe_exec_queue *q)  in exec_queue_to_guc()  argument
53 return &q->gt->uc.guc; in exec_queue_to_guc()
76 static bool exec_queue_registered(struct xe_exec_queue *q) in exec_queue_registered() argument
78 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED; in exec_queue_registered()
81 static void set_exec_queue_registered(struct xe_exec_queue *q) in set_exec_queue_registered() argument
83 atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in set_exec_queue_registered()
86 static void clear_exec_queue_registered(struct xe_exec_queue *q) in clear_exec_queue_registered() argument
88 atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state); in clear_exec_queue_registered()
91 static bool exec_queue_enabled(struct xe_exec_queue *q) in exec_queue_enabled() argument
93 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED; in exec_queue_enabled()
96 static void set_exec_queue_enabled(struct xe_exec_queue *q) in set_exec_queue_enabled() argument
98 atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in set_exec_queue_enabled()
101 static void clear_exec_queue_enabled(struct xe_exec_queue *q) in clear_exec_queue_enabled() argument
103 atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state); in clear_exec_queue_enabled()
106 static bool exec_queue_pending_enable(struct xe_exec_queue *q) in exec_queue_pending_enable() argument
108 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE; in exec_queue_pending_enable()
111 static void set_exec_queue_pending_enable(struct xe_exec_queue *q) in set_exec_queue_pending_enable() argument
113 atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in set_exec_queue_pending_enable()
116 static void clear_exec_queue_pending_enable(struct xe_exec_queue *q) in clear_exec_queue_pending_enable() argument
118 atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state); in clear_exec_queue_pending_enable()
121 static bool exec_queue_pending_disable(struct xe_exec_queue *q) in exec_queue_pending_disable() argument
123 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE; in exec_queue_pending_disable()
126 static void set_exec_queue_pending_disable(struct xe_exec_queue *q) in set_exec_queue_pending_disable() argument
128 atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in set_exec_queue_pending_disable()
131 static void clear_exec_queue_pending_disable(struct xe_exec_queue *q) in clear_exec_queue_pending_disable() argument
133 atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state); in clear_exec_queue_pending_disable()
136 static bool exec_queue_destroyed(struct xe_exec_queue *q) in exec_queue_destroyed() argument
138 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED; in exec_queue_destroyed()
141 static void set_exec_queue_destroyed(struct xe_exec_queue *q) in set_exec_queue_destroyed() argument
143 atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state); in set_exec_queue_destroyed()
146 static void clear_exec_queue_destroyed(struct xe_exec_queue *q) in clear_exec_queue_destroyed() argument
148 atomic_and(~EXEC_QUEUE_STATE_DESTROYED, &q->guc->state); in clear_exec_queue_destroyed()
151 static bool exec_queue_banned(struct xe_exec_queue *q) in exec_queue_banned() argument
153 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED; in exec_queue_banned()
156 static void set_exec_queue_banned(struct xe_exec_queue *q) in set_exec_queue_banned() argument
158 atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state); in set_exec_queue_banned()
161 static bool exec_queue_suspended(struct xe_exec_queue *q) in exec_queue_suspended() argument
163 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED; in exec_queue_suspended()
166 static void set_exec_queue_suspended(struct xe_exec_queue *q) in set_exec_queue_suspended() argument
168 atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in set_exec_queue_suspended()
171 static void clear_exec_queue_suspended(struct xe_exec_queue *q) in clear_exec_queue_suspended() argument
173 atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state); in clear_exec_queue_suspended()
176 static bool exec_queue_reset(struct xe_exec_queue *q) in exec_queue_reset() argument
178 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET; in exec_queue_reset()
181 static void set_exec_queue_reset(struct xe_exec_queue *q) in set_exec_queue_reset() argument
183 atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state); in set_exec_queue_reset()
186 static bool exec_queue_killed(struct xe_exec_queue *q) in exec_queue_killed() argument
188 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED; in exec_queue_killed()
191 static void set_exec_queue_killed(struct xe_exec_queue *q) in set_exec_queue_killed() argument
193 atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state); in set_exec_queue_killed()
196 static bool exec_queue_wedged(struct xe_exec_queue *q) in exec_queue_wedged() argument
198 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED; in exec_queue_wedged()
201 static void set_exec_queue_wedged(struct xe_exec_queue *q) in set_exec_queue_wedged() argument
203 atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state); in set_exec_queue_wedged()
206 static bool exec_queue_check_timeout(struct xe_exec_queue *q) in exec_queue_check_timeout() argument
208 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT; in exec_queue_check_timeout()
211 static void set_exec_queue_check_timeout(struct xe_exec_queue *q) in set_exec_queue_check_timeout() argument
213 atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in set_exec_queue_check_timeout()
216 static void clear_exec_queue_check_timeout(struct xe_exec_queue *q) in clear_exec_queue_check_timeout() argument
218 atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state); in clear_exec_queue_check_timeout()
221 static bool exec_queue_extra_ref(struct xe_exec_queue *q) in exec_queue_extra_ref() argument
223 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF; in exec_queue_extra_ref()
226 static void set_exec_queue_extra_ref(struct xe_exec_queue *q) in set_exec_queue_extra_ref() argument
228 atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); in set_exec_queue_extra_ref()
231 static void clear_exec_queue_extra_ref(struct xe_exec_queue *q) in clear_exec_queue_extra_ref() argument
233 atomic_and(~EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state); in clear_exec_queue_extra_ref()
236 static bool exec_queue_pending_resume(struct xe_exec_queue *q) in exec_queue_pending_resume() argument
238 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME; in exec_queue_pending_resume()
241 static void set_exec_queue_pending_resume(struct xe_exec_queue *q) in set_exec_queue_pending_resume() argument
243 atomic_or(EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state); in set_exec_queue_pending_resume()
246 static void clear_exec_queue_pending_resume(struct xe_exec_queue *q) in clear_exec_queue_pending_resume() argument
248 atomic_and(~EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state); in clear_exec_queue_pending_resume()
251 static bool exec_queue_pending_tdr_exit(struct xe_exec_queue *q) in exec_queue_pending_tdr_exit() argument
253 return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_TDR_EXIT; in exec_queue_pending_tdr_exit()
256 static void set_exec_queue_pending_tdr_exit(struct xe_exec_queue *q) in set_exec_queue_pending_tdr_exit() argument
258 atomic_or(EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state); in set_exec_queue_pending_tdr_exit()
261 static void clear_exec_queue_pending_tdr_exit(struct xe_exec_queue *q) in clear_exec_queue_pending_tdr_exit() argument
263 atomic_and(~EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state); in clear_exec_queue_pending_tdr_exit()
266 static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) in exec_queue_killed_or_banned_or_wedged() argument
268 return (atomic_read(&q->guc->state) & in exec_queue_killed_or_banned_or_wedged()
294 struct xe_exec_queue *q; in guc_submit_wedged_fini() local
298 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in guc_submit_wedged_fini()
299 if (exec_queue_wedged(q)) { in guc_submit_wedged_fini()
301 xe_exec_queue_put(q); in guc_submit_wedged_fini()
428 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count) in __release_guc_id() argument
435 xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i); in __release_guc_id()
438 q->guc->id, q->width); in __release_guc_id()
444 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in alloc_guc_id() argument
459 q->width); in alloc_guc_id()
463 q->guc->id = ret; in alloc_guc_id()
465 for (i = 0; i < q->width; ++i) { in alloc_guc_id()
467 q->guc->id + i, q, GFP_NOWAIT)); in alloc_guc_id()
475 __release_guc_id(guc, q, i); in alloc_guc_id()
480 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q) in release_guc_id() argument
483 __release_guc_id(guc, q, q->width); in release_guc_id()
536 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q) in init_policies() argument
539 enum xe_exec_queue_priority prio = q->sched_props.priority; in init_policies()
540 u32 timeslice_us = q->sched_props.timeslice_us; in init_policies()
542 u32 preempt_timeout_us = q->sched_props.preempt_timeout_us; in init_policies()
544 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in init_policies()
546 if (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY) in init_policies()
549 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in init_policies()
560 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q) in set_min_preemption_timeout() argument
564 __guc_exec_queue_policy_start_klv(&policy, q->guc->id); in set_min_preemption_timeout()
579 struct xe_exec_queue *q, in __register_mlrc_exec_queue() argument
587 xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q)); in __register_mlrc_exec_queue()
599 action[len++] = q->width; in __register_mlrc_exec_queue()
603 for (i = 1; i < q->width; ++i) { in __register_mlrc_exec_queue()
604 struct xe_lrc *lrc = q->lrc[i]; in __register_mlrc_exec_queue()
615 xe_gt_assert(guc_to_gt(guc), q->width == in __register_mlrc_exec_queue()
654 static void register_exec_queue(struct xe_exec_queue *q, int ctx_type) in register_exec_queue() argument
656 struct xe_guc *guc = exec_queue_to_guc(q); in register_exec_queue()
658 struct xe_lrc *lrc = q->lrc[0]; in register_exec_queue()
661 xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q)); in register_exec_queue()
665 info.context_idx = q->guc->id; in register_exec_queue()
666 info.engine_class = xe_engine_class_to_guc_class(q->class); in register_exec_queue()
667 info.engine_submit_mask = q->logical_mask; in register_exec_queue()
673 if (xe_exec_queue_is_parallel(q)) { in register_exec_queue()
687 q->guc->wqi_head = 0; in register_exec_queue()
688 q->guc->wqi_tail = 0; in register_exec_queue()
698 if (xe_exec_queue_is_lr(q)) in register_exec_queue()
699 xe_exec_queue_get(q); in register_exec_queue()
701 set_exec_queue_registered(q); in register_exec_queue()
702 trace_xe_exec_queue_register(q); in register_exec_queue()
703 if (xe_exec_queue_is_parallel(q)) in register_exec_queue()
704 __register_mlrc_exec_queue(guc, q, &info); in register_exec_queue()
707 init_policies(guc, q); in register_exec_queue()
710 static u32 wq_space_until_wrap(struct xe_exec_queue *q) in wq_space_until_wrap() argument
712 return (WQ_SIZE - q->guc->wqi_tail); in wq_space_until_wrap()
720 static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size) in wq_wait_for_space() argument
722 struct xe_guc *guc = exec_queue_to_guc(q); in wq_wait_for_space()
724 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_wait_for_space()
728 CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE) in wq_wait_for_space()
731 q->guc->wqi_head = parallel_read(xe, map, wq_desc.head); in wq_wait_for_space()
734 xe_gt_reset_async(q->gt); in wq_wait_for_space()
748 static int wq_noop_append(struct xe_exec_queue *q) in wq_noop_append() argument
750 struct xe_guc *guc = exec_queue_to_guc(q); in wq_noop_append()
752 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_noop_append()
753 u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1; in wq_noop_append()
755 if (wq_wait_for_space(q, wq_space_until_wrap(q))) in wq_noop_append()
760 parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)], in wq_noop_append()
763 q->guc->wqi_tail = 0; in wq_noop_append()
768 static void wq_item_append(struct xe_exec_queue *q) in wq_item_append() argument
770 struct xe_guc *guc = exec_queue_to_guc(q); in wq_item_append()
772 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in wq_item_append()
775 u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32); in wq_item_append()
779 if (wqi_size > wq_space_until_wrap(q)) { in wq_item_append()
780 if (wq_noop_append(q)) in wq_item_append()
783 if (wq_wait_for_space(q, wqi_size)) in wq_item_append()
788 wqi[i++] = xe_lrc_descriptor(q->lrc[0]); in wq_item_append()
789 wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) | in wq_item_append()
790 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64)); in wq_item_append()
792 for (j = 1; j < q->width; ++j) { in wq_item_append()
793 struct xe_lrc *lrc = q->lrc[j]; in wq_item_append()
801 wq[q->guc->wqi_tail / sizeof(u32)])); in wq_item_append()
803 q->guc->wqi_tail += wqi_size; in wq_item_append()
804 xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE); in wq_item_append()
808 map = xe_lrc_parallel_map(q->lrc[0]); in wq_item_append()
809 parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail); in wq_item_append()
813 static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job) in submit_exec_queue() argument
815 struct xe_guc *guc = exec_queue_to_guc(q); in submit_exec_queue()
816 struct xe_lrc *lrc = q->lrc[0]; in submit_exec_queue()
823 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in submit_exec_queue()
826 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
827 wq_item_append(q); in submit_exec_queue()
833 if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q)) in submit_exec_queue()
836 if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) { in submit_exec_queue()
838 action[len++] = q->guc->id; in submit_exec_queue()
842 if (xe_exec_queue_is_parallel(q)) in submit_exec_queue()
845 q->guc->resume_time = RESUME_PENDING; in submit_exec_queue()
846 set_exec_queue_pending_enable(q); in submit_exec_queue()
847 set_exec_queue_enabled(q); in submit_exec_queue()
848 trace_xe_exec_queue_scheduling_enable(q); in submit_exec_queue()
851 action[len++] = q->guc->id; in submit_exec_queue()
852 trace_xe_exec_queue_submit(q); in submit_exec_queue()
860 action[len++] = q->guc->id; in submit_exec_queue()
861 trace_xe_exec_queue_submit(q); in submit_exec_queue()
871 struct xe_exec_queue *q = job->q; in guc_exec_queue_run_job() local
872 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_run_job()
873 bool lr = xe_exec_queue_is_lr(q), killed_or_banned_or_wedged = in guc_exec_queue_run_job()
874 exec_queue_killed_or_banned_or_wedged(q); in guc_exec_queue_run_job()
876 xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) || in guc_exec_queue_run_job()
877 exec_queue_banned(q) || exec_queue_suspended(q)); in guc_exec_queue_run_job()
882 if (!exec_queue_registered(q)) in guc_exec_queue_run_job()
883 register_exec_queue(q, GUC_CONTEXT_NORMAL); in guc_exec_queue_run_job()
885 q->ring_ops->emit_job(job); in guc_exec_queue_run_job()
886 submit_exec_queue(q, job); in guc_exec_queue_run_job()
915 #define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable) \ argument
918 q->guc->id, \
923 struct xe_exec_queue *q) in disable_scheduling_deregister() argument
925 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in disable_scheduling_deregister()
928 set_min_preemption_timeout(guc, q); in disable_scheduling_deregister()
931 (!exec_queue_pending_enable(q) && in disable_scheduling_deregister()
932 !exec_queue_pending_disable(q)) || in disable_scheduling_deregister()
937 struct xe_gpu_scheduler *sched = &q->guc->sched; in disable_scheduling_deregister()
939 xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n"); in disable_scheduling_deregister()
941 xe_gt_reset_async(q->gt); in disable_scheduling_deregister()
942 if (!xe_exec_queue_is_lr(q)) in disable_scheduling_deregister()
947 clear_exec_queue_enabled(q); in disable_scheduling_deregister()
948 set_exec_queue_pending_disable(q); in disable_scheduling_deregister()
949 set_exec_queue_destroyed(q); in disable_scheduling_deregister()
950 trace_xe_exec_queue_scheduling_disable(q); in disable_scheduling_deregister()
961 static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q) in xe_guc_exec_queue_trigger_cleanup() argument
963 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_trigger_cleanup()
969 if (xe_exec_queue_is_lr(q)) in xe_guc_exec_queue_trigger_cleanup()
970 queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr); in xe_guc_exec_queue_trigger_cleanup()
972 xe_sched_tdr_queue_imm(&q->guc->sched); in xe_guc_exec_queue_trigger_cleanup()
985 struct xe_exec_queue *q; in xe_guc_submit_wedge() local
1007 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_wedge()
1008 if (xe_exec_queue_get_unless_zero(q)) in xe_guc_submit_wedge()
1009 set_exec_queue_wedged(q); in xe_guc_submit_wedge()
1032 struct xe_exec_queue *q = ge->q; in xe_guc_exec_queue_lr_cleanup() local
1033 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
1038 xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q)); in xe_guc_exec_queue_lr_cleanup()
1043 trace_xe_exec_queue_lr_cleanup(q); in xe_guc_exec_queue_lr_cleanup()
1045 if (!exec_queue_killed(q)) in xe_guc_exec_queue_lr_cleanup()
1046 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); in xe_guc_exec_queue_lr_cleanup()
1062 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) { in xe_guc_exec_queue_lr_cleanup()
1063 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_exec_queue_lr_cleanup()
1066 set_exec_queue_banned(q); in xe_guc_exec_queue_lr_cleanup()
1067 disable_scheduling_deregister(guc, q); in xe_guc_exec_queue_lr_cleanup()
1074 !exec_queue_pending_disable(q) || in xe_guc_exec_queue_lr_cleanup()
1081 xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n", in xe_guc_exec_queue_lr_cleanup()
1082 q->guc->id); in xe_guc_exec_queue_lr_cleanup()
1083 xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n", in xe_guc_exec_queue_lr_cleanup()
1084 q->guc->id); in xe_guc_exec_queue_lr_cleanup()
1086 xe_gt_reset_async(q->gt); in xe_guc_exec_queue_lr_cleanup()
1091 if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0])) in xe_guc_exec_queue_lr_cleanup()
1092 xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id); in xe_guc_exec_queue_lr_cleanup()
1094 xe_hw_fence_irq_stop(q->fence_irq); in xe_guc_exec_queue_lr_cleanup()
1103 xe_hw_fence_irq_start(q->fence_irq); in xe_guc_exec_queue_lr_cleanup()
1108 static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job) in check_timeout() argument
1110 struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q)); in check_timeout()
1112 u32 timeout_ms = q->sched_props.job_timeout_ms; in check_timeout()
1119 q->guc->id); in check_timeout()
1124 ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0])); in check_timeout()
1125 ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]); in check_timeout()
1144 q->guc->id, running_time_ms, timeout_ms, diff); in check_timeout()
1149 static void enable_scheduling(struct xe_exec_queue *q) in enable_scheduling() argument
1151 MAKE_SCHED_CONTEXT_ACTION(q, ENABLE); in enable_scheduling()
1152 struct xe_guc *guc = exec_queue_to_guc(q); in enable_scheduling()
1155 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in enable_scheduling()
1156 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in enable_scheduling()
1157 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in enable_scheduling()
1158 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in enable_scheduling()
1160 set_exec_queue_pending_enable(q); in enable_scheduling()
1161 set_exec_queue_enabled(q); in enable_scheduling()
1162 trace_xe_exec_queue_scheduling_enable(q); in enable_scheduling()
1168 !exec_queue_pending_enable(q) || in enable_scheduling()
1173 set_exec_queue_banned(q); in enable_scheduling()
1174 xe_gt_reset_async(q->gt); in enable_scheduling()
1175 if (!xe_exec_queue_is_lr(q)) in enable_scheduling()
1176 xe_sched_tdr_queue_imm(&q->guc->sched); in enable_scheduling()
1180 static void disable_scheduling(struct xe_exec_queue *q, bool immediate) in disable_scheduling() argument
1182 MAKE_SCHED_CONTEXT_ACTION(q, DISABLE); in disable_scheduling()
1183 struct xe_guc *guc = exec_queue_to_guc(q); in disable_scheduling()
1185 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in disable_scheduling()
1186 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in disable_scheduling()
1187 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in disable_scheduling()
1190 set_min_preemption_timeout(guc, q); in disable_scheduling()
1191 clear_exec_queue_enabled(q); in disable_scheduling()
1192 set_exec_queue_pending_disable(q); in disable_scheduling()
1193 trace_xe_exec_queue_scheduling_disable(q); in disable_scheduling()
1199 static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in __deregister_exec_queue() argument
1203 q->guc->id, in __deregister_exec_queue()
1206 xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q)); in __deregister_exec_queue()
1207 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in __deregister_exec_queue()
1208 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in __deregister_exec_queue()
1209 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in __deregister_exec_queue()
1211 set_exec_queue_destroyed(q); in __deregister_exec_queue()
1212 trace_xe_exec_queue_deregister(q); in __deregister_exec_queue()
1223 struct xe_exec_queue *q = job->q; in guc_exec_queue_timedout_job() local
1224 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_timedout_job()
1225 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_timedout_job()
1234 xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_lr(q)); in guc_exec_queue_timedout_job()
1250 skip_timeout_check = exec_queue_reset(q) || in guc_exec_queue_timedout_job()
1251 exec_queue_killed_or_banned_or_wedged(q) || in guc_exec_queue_timedout_job()
1252 exec_queue_destroyed(q); in guc_exec_queue_timedout_job()
1258 if (!exec_queue_killed(q) && !xe->devcoredump.captured && in guc_exec_queue_timedout_job()
1259 !xe_guc_capture_get_matching_and_lock(q)) { in guc_exec_queue_timedout_job()
1261 fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); in guc_exec_queue_timedout_job()
1263 xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n"); in guc_exec_queue_timedout_job()
1265 xe_engine_snapshot_capture_for_queue(q); in guc_exec_queue_timedout_job()
1267 xe_force_wake_put(gt_to_fw(q->gt), fw_ref); in guc_exec_queue_timedout_job()
1277 if (!exec_queue_killed(q)) in guc_exec_queue_timedout_job()
1278 wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); in guc_exec_queue_timedout_job()
1281 if (!wedged && exec_queue_registered(q)) { in guc_exec_queue_timedout_job()
1284 if (exec_queue_reset(q)) in guc_exec_queue_timedout_job()
1287 if (!exec_queue_destroyed(q)) { in guc_exec_queue_timedout_job()
1293 (!exec_queue_pending_enable(q) && in guc_exec_queue_timedout_job()
1294 !exec_queue_pending_disable(q)) || in guc_exec_queue_timedout_job()
1308 set_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1309 disable_scheduling(q, skip_timeout_check); in guc_exec_queue_timedout_job()
1322 !exec_queue_pending_disable(q) || in guc_exec_queue_timedout_job()
1332 q->guc->id); in guc_exec_queue_timedout_job()
1333 xe_devcoredump(q, job, in guc_exec_queue_timedout_job()
1335 q->guc->id, ret, xe_guc_read_stopped(guc)); in guc_exec_queue_timedout_job()
1336 set_exec_queue_extra_ref(q); in guc_exec_queue_timedout_job()
1337 xe_exec_queue_get(q); /* GT reset owns this */ in guc_exec_queue_timedout_job()
1338 set_exec_queue_banned(q); in guc_exec_queue_timedout_job()
1339 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
1348 if (!wedged && !skip_timeout_check && !check_timeout(q, job) && in guc_exec_queue_timedout_job()
1349 !exec_queue_reset(q) && exec_queue_registered(q)) { in guc_exec_queue_timedout_job()
1350 clear_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1354 if (q->vm && q->vm->xef) { in guc_exec_queue_timedout_job()
1355 process_name = q->vm->xef->process_name; in guc_exec_queue_timedout_job()
1356 pid = q->vm->xef->pid; in guc_exec_queue_timedout_job()
1359 if (!exec_queue_killed(q)) in guc_exec_queue_timedout_job()
1363 q->guc->id, q->flags, process_name, pid); in guc_exec_queue_timedout_job()
1367 if (!exec_queue_killed(q)) in guc_exec_queue_timedout_job()
1368 xe_devcoredump(q, job, in guc_exec_queue_timedout_job()
1371 q->guc->id, q->flags); in guc_exec_queue_timedout_job()
1377 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL, in guc_exec_queue_timedout_job()
1379 xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q), in guc_exec_queue_timedout_job()
1381 if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL || in guc_exec_queue_timedout_job()
1382 (q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) { in guc_exec_queue_timedout_job()
1384 clear_exec_queue_check_timeout(q); in guc_exec_queue_timedout_job()
1385 xe_gt_reset_async(q->gt); in guc_exec_queue_timedout_job()
1391 set_exec_queue_banned(q); in guc_exec_queue_timedout_job()
1392 if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) { in guc_exec_queue_timedout_job()
1393 set_exec_queue_extra_ref(q); in guc_exec_queue_timedout_job()
1394 xe_exec_queue_get(q); in guc_exec_queue_timedout_job()
1395 __deregister_exec_queue(guc, q); in guc_exec_queue_timedout_job()
1399 xe_hw_fence_irq_stop(q->fence_irq); in guc_exec_queue_timedout_job()
1408 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_timedout_job()
1417 xe_hw_fence_irq_start(q->fence_irq); in guc_exec_queue_timedout_job()
1422 set_exec_queue_pending_tdr_exit(q); in guc_exec_queue_timedout_job()
1423 enable_scheduling(q); in guc_exec_queue_timedout_job()
1435 static void guc_exec_queue_fini(struct xe_exec_queue *q) in guc_exec_queue_fini() argument
1437 struct xe_guc_exec_queue *ge = q->guc; in guc_exec_queue_fini()
1438 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_fini()
1440 release_guc_id(guc, q); in guc_exec_queue_fini()
1455 struct xe_exec_queue *q = ge->q; in __guc_exec_queue_destroy_async() local
1456 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_destroy_async()
1459 trace_xe_exec_queue_destroy(q); in __guc_exec_queue_destroy_async()
1461 if (xe_exec_queue_is_lr(q)) in __guc_exec_queue_destroy_async()
1466 xe_exec_queue_fini(q); in __guc_exec_queue_destroy_async()
1471 static void guc_exec_queue_destroy_async(struct xe_exec_queue *q) in guc_exec_queue_destroy_async() argument
1473 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_destroy_async()
1476 INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async); in guc_exec_queue_destroy_async()
1479 if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q)) in guc_exec_queue_destroy_async()
1480 __guc_exec_queue_destroy_async(&q->guc->destroy_async); in guc_exec_queue_destroy_async()
1482 queue_work(xe->destroy_wq, &q->guc->destroy_async); in guc_exec_queue_destroy_async()
1485 static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q) in __guc_exec_queue_destroy() argument
1494 guc_exec_queue_destroy_async(q); in __guc_exec_queue_destroy()
1499 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_cleanup() local
1500 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_cleanup()
1502 xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT)); in __guc_exec_queue_process_msg_cleanup()
1503 trace_xe_exec_queue_cleanup_entity(q); in __guc_exec_queue_process_msg_cleanup()
1515 if (exec_queue_registered(q) && xe_uc_fw_is_running(&guc->fw)) in __guc_exec_queue_process_msg_cleanup()
1516 disable_scheduling_deregister(guc, q); in __guc_exec_queue_process_msg_cleanup()
1518 __guc_exec_queue_destroy(guc, q); in __guc_exec_queue_process_msg_cleanup()
1521 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q) in guc_exec_queue_allowed_to_change_state() argument
1523 return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q); in guc_exec_queue_allowed_to_change_state()
1528 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_set_sched_props() local
1529 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_set_sched_props()
1531 if (guc_exec_queue_allowed_to_change_state(q)) in __guc_exec_queue_process_msg_set_sched_props()
1532 init_policies(guc, q); in __guc_exec_queue_process_msg_set_sched_props()
1536 static void __suspend_fence_signal(struct xe_exec_queue *q) in __suspend_fence_signal() argument
1538 struct xe_guc *guc = exec_queue_to_guc(q); in __suspend_fence_signal()
1541 if (!q->guc->suspend_pending) in __suspend_fence_signal()
1544 WRITE_ONCE(q->guc->suspend_pending, false); in __suspend_fence_signal()
1555 wake_up(&q->guc->suspend_wait); in __suspend_fence_signal()
1558 static void suspend_fence_signal(struct xe_exec_queue *q) in suspend_fence_signal() argument
1560 struct xe_guc *guc = exec_queue_to_guc(q); in suspend_fence_signal()
1562 xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) || in suspend_fence_signal()
1564 xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending); in suspend_fence_signal()
1566 __suspend_fence_signal(q); in suspend_fence_signal()
1571 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_suspend() local
1572 struct xe_guc *guc = exec_queue_to_guc(q); in __guc_exec_queue_process_msg_suspend()
1574 if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) && in __guc_exec_queue_process_msg_suspend()
1575 exec_queue_enabled(q)) { in __guc_exec_queue_process_msg_suspend()
1577 ((q->guc->resume_time != RESUME_PENDING || in __guc_exec_queue_process_msg_suspend()
1578 xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q))); in __guc_exec_queue_process_msg_suspend()
1583 q->guc->resume_time); in __guc_exec_queue_process_msg_suspend()
1584 s64 wait_ms = q->vm->preempt.min_run_period_ms - in __guc_exec_queue_process_msg_suspend()
1587 if (wait_ms > 0 && q->guc->resume_time) in __guc_exec_queue_process_msg_suspend()
1590 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1591 disable_scheduling(q, false); in __guc_exec_queue_process_msg_suspend()
1593 } else if (q->guc->suspend_pending) { in __guc_exec_queue_process_msg_suspend()
1594 set_exec_queue_suspended(q); in __guc_exec_queue_process_msg_suspend()
1595 suspend_fence_signal(q); in __guc_exec_queue_process_msg_suspend()
1601 struct xe_exec_queue *q = msg->private_data; in __guc_exec_queue_process_msg_resume() local
1603 if (guc_exec_queue_allowed_to_change_state(q)) { in __guc_exec_queue_process_msg_resume()
1604 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1605 if (!exec_queue_enabled(q)) { in __guc_exec_queue_process_msg_resume()
1606 q->guc->resume_time = RESUME_PENDING; in __guc_exec_queue_process_msg_resume()
1607 set_exec_queue_pending_resume(q); in __guc_exec_queue_process_msg_resume()
1608 enable_scheduling(q); in __guc_exec_queue_process_msg_resume()
1611 clear_exec_queue_suspended(q); in __guc_exec_queue_process_msg_resume()
1659 static int guc_exec_queue_init(struct xe_exec_queue *q) in guc_exec_queue_init() argument
1662 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_init()
1673 q->guc = ge; in guc_exec_queue_init()
1674 ge->q = q; in guc_exec_queue_init()
1681 timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : in guc_exec_queue_init()
1682 msecs_to_jiffies(q->sched_props.job_timeout_ms); in guc_exec_queue_init()
1686 q->name, gt_to_xe(q->gt)->drm.dev); in guc_exec_queue_init()
1695 if (xe_exec_queue_is_lr(q)) in guc_exec_queue_init()
1696 INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup); in guc_exec_queue_init()
1700 err = alloc_guc_id(guc, q); in guc_exec_queue_init()
1704 q->entity = &ge->entity; in guc_exec_queue_init()
1711 xe_exec_queue_assign_name(q, q->guc->id); in guc_exec_queue_init()
1713 trace_xe_exec_queue_create(q); in guc_exec_queue_init()
1728 static void guc_exec_queue_kill(struct xe_exec_queue *q) in guc_exec_queue_kill() argument
1730 trace_xe_exec_queue_kill(q); in guc_exec_queue_kill()
1731 set_exec_queue_killed(q); in guc_exec_queue_kill()
1732 __suspend_fence_signal(q); in guc_exec_queue_kill()
1733 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_kill()
1736 static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg, in guc_exec_queue_add_msg() argument
1739 xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q))); in guc_exec_queue_add_msg()
1743 msg->private_data = q; in guc_exec_queue_add_msg()
1747 xe_sched_add_msg_head(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1749 xe_sched_add_msg_locked(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1751 xe_sched_add_msg(&q->guc->sched, msg); in guc_exec_queue_add_msg()
1754 static void guc_exec_queue_try_add_msg_head(struct xe_exec_queue *q, in guc_exec_queue_try_add_msg_head() argument
1761 guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED | MSG_HEAD); in guc_exec_queue_try_add_msg_head()
1764 static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q, in guc_exec_queue_try_add_msg() argument
1771 guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED); in guc_exec_queue_try_add_msg()
1779 static void guc_exec_queue_destroy(struct xe_exec_queue *q) in guc_exec_queue_destroy() argument
1781 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; in guc_exec_queue_destroy()
1783 if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q)) in guc_exec_queue_destroy()
1784 guc_exec_queue_add_msg(q, msg, CLEANUP); in guc_exec_queue_destroy()
1786 __guc_exec_queue_destroy(exec_queue_to_guc(q), q); in guc_exec_queue_destroy()
1789 static int guc_exec_queue_set_priority(struct xe_exec_queue *q, in guc_exec_queue_set_priority() argument
1794 if (q->sched_props.priority == priority || in guc_exec_queue_set_priority()
1795 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_priority()
1802 q->sched_props.priority = priority; in guc_exec_queue_set_priority()
1803 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_priority()
1808 static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us) in guc_exec_queue_set_timeslice() argument
1812 if (q->sched_props.timeslice_us == timeslice_us || in guc_exec_queue_set_timeslice()
1813 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_timeslice()
1820 q->sched_props.timeslice_us = timeslice_us; in guc_exec_queue_set_timeslice()
1821 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_timeslice()
1826 static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q, in guc_exec_queue_set_preempt_timeout() argument
1831 if (q->sched_props.preempt_timeout_us == preempt_timeout_us || in guc_exec_queue_set_preempt_timeout()
1832 exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_set_preempt_timeout()
1839 q->sched_props.preempt_timeout_us = preempt_timeout_us; in guc_exec_queue_set_preempt_timeout()
1840 guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS); in guc_exec_queue_set_preempt_timeout()
1845 static int guc_exec_queue_suspend(struct xe_exec_queue *q) in guc_exec_queue_suspend() argument
1847 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_suspend()
1848 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; in guc_exec_queue_suspend()
1850 if (exec_queue_killed_or_banned_or_wedged(q)) in guc_exec_queue_suspend()
1854 if (guc_exec_queue_try_add_msg(q, msg, SUSPEND)) in guc_exec_queue_suspend()
1855 q->guc->suspend_pending = true; in guc_exec_queue_suspend()
1861 static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q) in guc_exec_queue_suspend_wait() argument
1863 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_suspend_wait()
1873 (!READ_ONCE(q->guc->suspend_pending) || exec_queue_killed(q) || \ in guc_exec_queue_suspend_wait()
1882 ret = wait_event_interruptible_timeout(q->guc->suspend_wait, in guc_exec_queue_suspend_wait()
1891 q->guc->id); in guc_exec_queue_suspend_wait()
1904 static void guc_exec_queue_resume(struct xe_exec_queue *q) in guc_exec_queue_resume() argument
1906 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_resume()
1907 struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME; in guc_exec_queue_resume()
1908 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_resume()
1910 xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending); in guc_exec_queue_resume()
1913 guc_exec_queue_try_add_msg(q, msg, RESUME); in guc_exec_queue_resume()
1917 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q) in guc_exec_queue_reset_status() argument
1919 return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q); in guc_exec_queue_reset_status()
1942 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q) in guc_exec_queue_stop() argument
1944 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_stop()
1950 if (exec_queue_registered(q)) { in guc_exec_queue_stop()
1951 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) in guc_exec_queue_stop()
1952 xe_exec_queue_put(q); in guc_exec_queue_stop()
1953 else if (exec_queue_destroyed(q)) in guc_exec_queue_stop()
1954 __guc_exec_queue_destroy(guc, q); in guc_exec_queue_stop()
1956 if (q->guc->suspend_pending) { in guc_exec_queue_stop()
1957 set_exec_queue_suspended(q); in guc_exec_queue_stop()
1958 suspend_fence_signal(q); in guc_exec_queue_stop()
1963 &q->guc->state); in guc_exec_queue_stop()
1964 q->guc->resume_time = 0; in guc_exec_queue_stop()
1965 trace_xe_exec_queue_stop(q); in guc_exec_queue_stop()
1972 if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) { in guc_exec_queue_stop()
1983 } else if (xe_exec_queue_is_lr(q) && in guc_exec_queue_stop()
1984 !xe_lrc_ring_is_idle(q->lrc[0])) { in guc_exec_queue_stop()
1989 set_exec_queue_banned(q); in guc_exec_queue_stop()
1990 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_stop()
2027 struct xe_exec_queue *q; in xe_guc_submit_stop() local
2034 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_stop()
2036 if (q->guc->id != index) in xe_guc_submit_stop()
2039 guc_exec_queue_stop(guc, q); in xe_guc_submit_stop()
2052 struct xe_exec_queue *q) in guc_exec_queue_revert_pending_state_change() argument
2056 pending_enable = exec_queue_pending_enable(q); in guc_exec_queue_revert_pending_state_change()
2057 pending_resume = exec_queue_pending_resume(q); in guc_exec_queue_revert_pending_state_change()
2060 q->guc->needs_resume = true; in guc_exec_queue_revert_pending_state_change()
2062 q->guc->id); in guc_exec_queue_revert_pending_state_change()
2066 !exec_queue_pending_tdr_exit(q)) { in guc_exec_queue_revert_pending_state_change()
2067 clear_exec_queue_registered(q); in guc_exec_queue_revert_pending_state_change()
2068 if (xe_exec_queue_is_lr(q)) in guc_exec_queue_revert_pending_state_change()
2069 xe_exec_queue_put(q); in guc_exec_queue_revert_pending_state_change()
2071 q->guc->id); in guc_exec_queue_revert_pending_state_change()
2075 clear_exec_queue_enabled(q); in guc_exec_queue_revert_pending_state_change()
2076 clear_exec_queue_pending_resume(q); in guc_exec_queue_revert_pending_state_change()
2077 clear_exec_queue_pending_tdr_exit(q); in guc_exec_queue_revert_pending_state_change()
2078 clear_exec_queue_pending_enable(q); in guc_exec_queue_revert_pending_state_change()
2080 q->guc->id); in guc_exec_queue_revert_pending_state_change()
2083 if (exec_queue_destroyed(q) && exec_queue_registered(q)) { in guc_exec_queue_revert_pending_state_change()
2084 clear_exec_queue_destroyed(q); in guc_exec_queue_revert_pending_state_change()
2085 if (exec_queue_extra_ref(q)) in guc_exec_queue_revert_pending_state_change()
2086 xe_exec_queue_put(q); in guc_exec_queue_revert_pending_state_change()
2088 q->guc->needs_cleanup = true; in guc_exec_queue_revert_pending_state_change()
2089 clear_exec_queue_extra_ref(q); in guc_exec_queue_revert_pending_state_change()
2091 q->guc->id); in guc_exec_queue_revert_pending_state_change()
2094 pending_disable = exec_queue_pending_disable(q); in guc_exec_queue_revert_pending_state_change()
2096 if (pending_disable && exec_queue_suspended(q)) { in guc_exec_queue_revert_pending_state_change()
2097 clear_exec_queue_suspended(q); in guc_exec_queue_revert_pending_state_change()
2098 q->guc->needs_suspend = true; in guc_exec_queue_revert_pending_state_change()
2100 q->guc->id); in guc_exec_queue_revert_pending_state_change()
2105 set_exec_queue_enabled(q); in guc_exec_queue_revert_pending_state_change()
2106 clear_exec_queue_pending_disable(q); in guc_exec_queue_revert_pending_state_change()
2107 clear_exec_queue_check_timeout(q); in guc_exec_queue_revert_pending_state_change()
2109 q->guc->id); in guc_exec_queue_revert_pending_state_change()
2112 q->guc->resume_time = 0; in guc_exec_queue_revert_pending_state_change()
2132 static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q) in guc_exec_queue_pause() argument
2134 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_pause()
2142 if (xe_exec_queue_is_lr(q)) in guc_exec_queue_pause()
2143 cancel_work_sync(&q->guc->lr_tdr); in guc_exec_queue_pause()
2147 guc_exec_queue_revert_pending_state_change(guc, q); in guc_exec_queue_pause()
2149 if (xe_exec_queue_is_parallel(q)) { in guc_exec_queue_pause()
2151 struct xe_lrc *lrc = READ_ONCE(q->lrc[0]); in guc_exec_queue_pause()
2171 for (i = 0; i < q->width; ++i) in guc_exec_queue_pause()
2172 q->lrc[i]->ring.tail = job->ptrs[i].head; in guc_exec_queue_pause()
2182 struct xe_exec_queue *q; in xe_guc_submit_pause() local
2188 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_pause()
2190 if (q->guc->id != index) in xe_guc_submit_pause()
2193 guc_exec_queue_pause(guc, q); in xe_guc_submit_pause()
2198 static void guc_exec_queue_start(struct xe_exec_queue *q) in guc_exec_queue_start() argument
2200 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_start()
2202 if (!exec_queue_killed_or_banned_or_wedged(q)) { in guc_exec_queue_start()
2206 trace_xe_exec_queue_resubmit(q); in guc_exec_queue_start()
2208 for (i = 0; i < q->width; ++i) { in guc_exec_queue_start()
2217 q->lrc[i]->ring.tail = job->ptrs[i].head; in guc_exec_queue_start()
2218 xe_lrc_set_ring_tail(q->lrc[i], in guc_exec_queue_start()
2219 xe_lrc_ring_head(q->lrc[i])); in guc_exec_queue_start()
2231 struct xe_exec_queue *q; in xe_guc_submit_start() local
2238 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_start()
2240 if (q->guc->id != index) in xe_guc_submit_start()
2243 guc_exec_queue_start(q); in xe_guc_submit_start()
2253 struct xe_exec_queue *q) in guc_exec_queue_unpause_prepare() argument
2255 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_unpause_prepare()
2263 q->guc->id, xe_sched_job_seqno(job)); in guc_exec_queue_unpause_prepare()
2265 q->ring_ops->emit_job(job); in guc_exec_queue_unpause_prepare()
2280 struct xe_exec_queue *q; in xe_guc_submit_unpause_prepare() local
2286 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_unpause_prepare()
2288 if (q->guc->id != index) in xe_guc_submit_unpause_prepare()
2291 guc_exec_queue_unpause_prepare(guc, q); in xe_guc_submit_unpause_prepare()
2296 static void guc_exec_queue_replay_pending_state_change(struct xe_exec_queue *q) in guc_exec_queue_replay_pending_state_change() argument
2298 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_replay_pending_state_change()
2301 if (q->guc->needs_cleanup) { in guc_exec_queue_replay_pending_state_change()
2302 msg = q->guc->static_msgs + STATIC_MSG_CLEANUP; in guc_exec_queue_replay_pending_state_change()
2304 guc_exec_queue_add_msg(q, msg, CLEANUP); in guc_exec_queue_replay_pending_state_change()
2305 q->guc->needs_cleanup = false; in guc_exec_queue_replay_pending_state_change()
2308 if (q->guc->needs_suspend) { in guc_exec_queue_replay_pending_state_change()
2309 msg = q->guc->static_msgs + STATIC_MSG_SUSPEND; in guc_exec_queue_replay_pending_state_change()
2312 guc_exec_queue_try_add_msg_head(q, msg, SUSPEND); in guc_exec_queue_replay_pending_state_change()
2315 q->guc->needs_suspend = false; in guc_exec_queue_replay_pending_state_change()
2323 if (q->guc->needs_resume) { in guc_exec_queue_replay_pending_state_change()
2324 msg = q->guc->static_msgs + STATIC_MSG_RESUME; in guc_exec_queue_replay_pending_state_change()
2327 guc_exec_queue_try_add_msg_head(q, msg, RESUME); in guc_exec_queue_replay_pending_state_change()
2330 q->guc->needs_resume = false; in guc_exec_queue_replay_pending_state_change()
2334 static void guc_exec_queue_unpause(struct xe_guc *guc, struct xe_exec_queue *q) in guc_exec_queue_unpause() argument
2336 struct xe_gpu_scheduler *sched = &q->guc->sched; in guc_exec_queue_unpause()
2337 bool needs_tdr = exec_queue_killed_or_banned_or_wedged(q); in guc_exec_queue_unpause()
2342 guc_exec_queue_replay_pending_state_change(q); in guc_exec_queue_unpause()
2345 xe_guc_exec_queue_trigger_cleanup(q); in guc_exec_queue_unpause()
2355 struct xe_exec_queue *q; in xe_guc_submit_unpause() local
2359 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_unpause()
2364 if (q->guc->id != index || in xe_guc_submit_unpause()
2365 !READ_ONCE(q->guc->sched.base.pause_submit)) in xe_guc_submit_unpause()
2368 guc_exec_queue_unpause(guc, q); in xe_guc_submit_unpause()
2379 struct xe_exec_queue *q; in xe_guc_submit_pause_abort() local
2383 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_submit_pause_abort()
2384 struct xe_gpu_scheduler *sched = &q->guc->sched; in xe_guc_submit_pause_abort()
2387 if (q->guc->id != index) in xe_guc_submit_pause_abort()
2391 if (exec_queue_killed_or_banned_or_wedged(q)) in xe_guc_submit_pause_abort()
2392 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_submit_pause_abort()
2401 struct xe_exec_queue *q; in g2h_exec_queue_lookup() local
2408 q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id); in g2h_exec_queue_lookup()
2409 if (unlikely(!q)) { in g2h_exec_queue_lookup()
2414 xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id); in g2h_exec_queue_lookup()
2415 xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width)); in g2h_exec_queue_lookup()
2417 return q; in g2h_exec_queue_lookup()
2420 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q) in deregister_exec_queue() argument
2424 q->guc->id, in deregister_exec_queue()
2427 xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q)); in deregister_exec_queue()
2428 xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q)); in deregister_exec_queue()
2429 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q)); in deregister_exec_queue()
2430 xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q)); in deregister_exec_queue()
2432 trace_xe_exec_queue_deregister(q); in deregister_exec_queue()
2437 static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q, in handle_sched_done() argument
2440 trace_xe_exec_queue_scheduling_done(q); in handle_sched_done()
2443 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q)); in handle_sched_done()
2445 q->guc->resume_time = ktime_get(); in handle_sched_done()
2446 clear_exec_queue_pending_resume(q); in handle_sched_done()
2447 clear_exec_queue_pending_tdr_exit(q); in handle_sched_done()
2448 clear_exec_queue_pending_enable(q); in handle_sched_done()
2452 bool check_timeout = exec_queue_check_timeout(q); in handle_sched_done()
2455 xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q)); in handle_sched_done()
2457 if (q->guc->suspend_pending) { in handle_sched_done()
2458 suspend_fence_signal(q); in handle_sched_done()
2459 clear_exec_queue_pending_disable(q); in handle_sched_done()
2461 if (exec_queue_banned(q) || check_timeout) { in handle_sched_done()
2465 if (!check_timeout && exec_queue_destroyed(q)) { in handle_sched_done()
2475 clear_exec_queue_pending_disable(q); in handle_sched_done()
2476 deregister_exec_queue(guc, q); in handle_sched_done()
2478 clear_exec_queue_pending_disable(q); in handle_sched_done()
2486 struct xe_exec_queue *q; in xe_guc_sched_done_handler() local
2495 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_sched_done_handler()
2496 if (unlikely(!q)) in xe_guc_sched_done_handler()
2499 if (unlikely(!exec_queue_pending_enable(q) && in xe_guc_sched_done_handler()
2500 !exec_queue_pending_disable(q))) { in xe_guc_sched_done_handler()
2503 atomic_read(&q->guc->state), q->guc->id, in xe_guc_sched_done_handler()
2508 handle_sched_done(guc, q, runnable_state); in xe_guc_sched_done_handler()
2513 static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q) in handle_deregister_done() argument
2515 trace_xe_exec_queue_deregister_done(q); in handle_deregister_done()
2517 clear_exec_queue_registered(q); in handle_deregister_done()
2519 if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q)) in handle_deregister_done()
2520 xe_exec_queue_put(q); in handle_deregister_done()
2522 __guc_exec_queue_destroy(guc, q); in handle_deregister_done()
2527 struct xe_exec_queue *q; in xe_guc_deregister_done_handler() local
2535 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_deregister_done_handler()
2536 if (unlikely(!q)) in xe_guc_deregister_done_handler()
2539 if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) || in xe_guc_deregister_done_handler()
2540 exec_queue_pending_enable(q) || exec_queue_enabled(q)) { in xe_guc_deregister_done_handler()
2543 atomic_read(&q->guc->state), q->guc->id); in xe_guc_deregister_done_handler()
2547 handle_deregister_done(guc, q); in xe_guc_deregister_done_handler()
2555 struct xe_exec_queue *q; in xe_guc_exec_queue_reset_handler() local
2563 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_reset_handler()
2564 if (unlikely(!q)) in xe_guc_exec_queue_reset_handler()
2568 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_reset_handler()
2570 trace_xe_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
2578 set_exec_queue_reset(q); in xe_guc_exec_queue_reset_handler()
2579 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q)) in xe_guc_exec_queue_reset_handler()
2580 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_reset_handler()
2617 struct xe_exec_queue *q; in xe_guc_exec_queue_memory_cat_error_handler() local
2638 q = g2h_exec_queue_lookup(guc, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
2639 if (unlikely(!q)) in xe_guc_exec_queue_memory_cat_error_handler()
2650 type, xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
2654 xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); in xe_guc_exec_queue_memory_cat_error_handler()
2656 trace_xe_exec_queue_memory_cat_error(q); in xe_guc_exec_queue_memory_cat_error_handler()
2659 set_exec_queue_reset(q); in xe_guc_exec_queue_memory_cat_error_handler()
2660 if (!exec_queue_banned(q) && !exec_queue_check_timeout(q)) in xe_guc_exec_queue_memory_cat_error_handler()
2661 xe_guc_exec_queue_trigger_cleanup(q); in xe_guc_exec_queue_memory_cat_error_handler()
2689 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q, in guc_exec_queue_wq_snapshot_capture() argument
2692 struct xe_guc *guc = exec_queue_to_guc(q); in guc_exec_queue_wq_snapshot_capture()
2694 struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]); in guc_exec_queue_wq_snapshot_capture()
2697 snapshot->guc.wqi_head = q->guc->wqi_head; in guc_exec_queue_wq_snapshot_capture()
2698 snapshot->guc.wqi_tail = q->guc->wqi_tail; in guc_exec_queue_wq_snapshot_capture()
2738 * @q: faulty exec queue
2747 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q) in xe_guc_exec_queue_snapshot_capture() argument
2749 struct xe_gpu_scheduler *sched = &q->guc->sched; in xe_guc_exec_queue_snapshot_capture()
2758 snapshot->guc.id = q->guc->id; in xe_guc_exec_queue_snapshot_capture()
2759 memcpy(&snapshot->name, &q->name, sizeof(snapshot->name)); in xe_guc_exec_queue_snapshot_capture()
2760 snapshot->class = q->class; in xe_guc_exec_queue_snapshot_capture()
2761 snapshot->logical_mask = q->logical_mask; in xe_guc_exec_queue_snapshot_capture()
2762 snapshot->width = q->width; in xe_guc_exec_queue_snapshot_capture()
2763 snapshot->refcount = kref_read(&q->refcount); in xe_guc_exec_queue_snapshot_capture()
2765 snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us; in xe_guc_exec_queue_snapshot_capture()
2767 q->sched_props.preempt_timeout_us; in xe_guc_exec_queue_snapshot_capture()
2769 snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *), in xe_guc_exec_queue_snapshot_capture()
2773 for (i = 0; i < q->width; ++i) { in xe_guc_exec_queue_snapshot_capture()
2774 struct xe_lrc *lrc = q->lrc[i]; in xe_guc_exec_queue_snapshot_capture()
2780 snapshot->schedule_state = atomic_read(&q->guc->state); in xe_guc_exec_queue_snapshot_capture()
2781 snapshot->exec_queue_flags = q->flags; in xe_guc_exec_queue_snapshot_capture()
2783 snapshot->parallel_execution = xe_exec_queue_is_parallel(q); in xe_guc_exec_queue_snapshot_capture()
2785 guc_exec_queue_wq_snapshot_capture(q, snapshot); in xe_guc_exec_queue_snapshot_capture()
2901 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p) in guc_exec_queue_print() argument
2905 snapshot = xe_guc_exec_queue_snapshot_capture(q); in guc_exec_queue_print()
2912 * @q: Execution queue
2922 void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type) in xe_guc_register_vf_exec_queue() argument
2924 struct xe_guc *guc = exec_queue_to_guc(q); in xe_guc_register_vf_exec_queue()
2934 register_exec_queue(q, ctx_type); in xe_guc_register_vf_exec_queue()
2935 enable_scheduling(q); in xe_guc_register_vf_exec_queue()
2947 struct xe_exec_queue *q; in xe_guc_submit_print() local
2954 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) in xe_guc_submit_print()
2955 guc_exec_queue_print(q, p); in xe_guc_submit_print()
2969 struct xe_exec_queue *q; in xe_guc_contexts_hwsp_rebase() local
2974 xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) { in xe_guc_contexts_hwsp_rebase()
2976 if (q->guc->id != index) in xe_guc_contexts_hwsp_rebase()
2979 err = xe_exec_queue_contexts_hwsp_rebase(q, scratch); in xe_guc_contexts_hwsp_rebase()