xref: /linux/drivers/gpu/drm/xe/xe_guc_submit.c (revision ed98261b41687323ffa02ca20fef1e60b38fd1aa)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_guc_submit.h"
7 
8 #include <linux/bitfield.h>
9 #include <linux/bitmap.h>
10 #include <linux/circ_buf.h>
11 #include <linux/delay.h>
12 #include <linux/dma-fence-array.h>
13 #include <linux/math64.h>
14 
15 #include <drm/drm_managed.h>
16 
17 #include "abi/guc_actions_abi.h"
18 #include "abi/guc_actions_slpc_abi.h"
19 #include "abi/guc_klvs_abi.h"
20 #include "regs/xe_lrc_layout.h"
21 #include "xe_assert.h"
22 #include "xe_devcoredump.h"
23 #include "xe_device.h"
24 #include "xe_exec_queue.h"
25 #include "xe_force_wake.h"
26 #include "xe_gpu_scheduler.h"
27 #include "xe_gt.h"
28 #include "xe_gt_clock.h"
29 #include "xe_gt_printk.h"
30 #include "xe_guc.h"
31 #include "xe_guc_capture.h"
32 #include "xe_guc_ct.h"
33 #include "xe_guc_exec_queue_types.h"
34 #include "xe_guc_id_mgr.h"
35 #include "xe_guc_submit_types.h"
36 #include "xe_hw_engine.h"
37 #include "xe_hw_fence.h"
38 #include "xe_lrc.h"
39 #include "xe_macros.h"
40 #include "xe_map.h"
41 #include "xe_mocs.h"
42 #include "xe_pm.h"
43 #include "xe_ring_ops_types.h"
44 #include "xe_sched_job.h"
45 #include "xe_trace.h"
46 #include "xe_vm.h"
47 
48 static struct xe_guc *
49 exec_queue_to_guc(struct xe_exec_queue *q)
50 {
51 	return &q->gt->uc.guc;
52 }
53 
54 /*
55  * Helpers for engine state, using an atomic as some of the bits can transition
56  * as the same time (e.g. a suspend can be happning at the same time as schedule
57  * engine done being processed).
58  */
59 #define EXEC_QUEUE_STATE_REGISTERED		(1 << 0)
60 #define EXEC_QUEUE_STATE_ENABLED		(1 << 1)
61 #define EXEC_QUEUE_STATE_PENDING_ENABLE		(1 << 2)
62 #define EXEC_QUEUE_STATE_PENDING_DISABLE	(1 << 3)
63 #define EXEC_QUEUE_STATE_DESTROYED		(1 << 4)
64 #define EXEC_QUEUE_STATE_SUSPENDED		(1 << 5)
65 #define EXEC_QUEUE_STATE_RESET			(1 << 6)
66 #define EXEC_QUEUE_STATE_KILLED			(1 << 7)
67 #define EXEC_QUEUE_STATE_WEDGED			(1 << 8)
68 #define EXEC_QUEUE_STATE_BANNED			(1 << 9)
69 #define EXEC_QUEUE_STATE_CHECK_TIMEOUT		(1 << 10)
70 #define EXEC_QUEUE_STATE_EXTRA_REF		(1 << 11)
71 
72 static bool exec_queue_registered(struct xe_exec_queue *q)
73 {
74 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_REGISTERED;
75 }
76 
77 static void set_exec_queue_registered(struct xe_exec_queue *q)
78 {
79 	atomic_or(EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
80 }
81 
82 static void clear_exec_queue_registered(struct xe_exec_queue *q)
83 {
84 	atomic_and(~EXEC_QUEUE_STATE_REGISTERED, &q->guc->state);
85 }
86 
87 static bool exec_queue_enabled(struct xe_exec_queue *q)
88 {
89 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_ENABLED;
90 }
91 
92 static void set_exec_queue_enabled(struct xe_exec_queue *q)
93 {
94 	atomic_or(EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
95 }
96 
97 static void clear_exec_queue_enabled(struct xe_exec_queue *q)
98 {
99 	atomic_and(~EXEC_QUEUE_STATE_ENABLED, &q->guc->state);
100 }
101 
102 static bool exec_queue_pending_enable(struct xe_exec_queue *q)
103 {
104 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_ENABLE;
105 }
106 
107 static void set_exec_queue_pending_enable(struct xe_exec_queue *q)
108 {
109 	atomic_or(EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
110 }
111 
112 static void clear_exec_queue_pending_enable(struct xe_exec_queue *q)
113 {
114 	atomic_and(~EXEC_QUEUE_STATE_PENDING_ENABLE, &q->guc->state);
115 }
116 
117 static bool exec_queue_pending_disable(struct xe_exec_queue *q)
118 {
119 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_DISABLE;
120 }
121 
122 static void set_exec_queue_pending_disable(struct xe_exec_queue *q)
123 {
124 	atomic_or(EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
125 }
126 
127 static void clear_exec_queue_pending_disable(struct xe_exec_queue *q)
128 {
129 	atomic_and(~EXEC_QUEUE_STATE_PENDING_DISABLE, &q->guc->state);
130 }
131 
132 static bool exec_queue_destroyed(struct xe_exec_queue *q)
133 {
134 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_DESTROYED;
135 }
136 
137 static void set_exec_queue_destroyed(struct xe_exec_queue *q)
138 {
139 	atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
140 }
141 
142 static bool exec_queue_banned(struct xe_exec_queue *q)
143 {
144 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
145 }
146 
147 static void set_exec_queue_banned(struct xe_exec_queue *q)
148 {
149 	atomic_or(EXEC_QUEUE_STATE_BANNED, &q->guc->state);
150 }
151 
152 static bool exec_queue_suspended(struct xe_exec_queue *q)
153 {
154 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_SUSPENDED;
155 }
156 
157 static void set_exec_queue_suspended(struct xe_exec_queue *q)
158 {
159 	atomic_or(EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
160 }
161 
162 static void clear_exec_queue_suspended(struct xe_exec_queue *q)
163 {
164 	atomic_and(~EXEC_QUEUE_STATE_SUSPENDED, &q->guc->state);
165 }
166 
167 static bool exec_queue_reset(struct xe_exec_queue *q)
168 {
169 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_RESET;
170 }
171 
172 static void set_exec_queue_reset(struct xe_exec_queue *q)
173 {
174 	atomic_or(EXEC_QUEUE_STATE_RESET, &q->guc->state);
175 }
176 
177 static bool exec_queue_killed(struct xe_exec_queue *q)
178 {
179 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_KILLED;
180 }
181 
182 static void set_exec_queue_killed(struct xe_exec_queue *q)
183 {
184 	atomic_or(EXEC_QUEUE_STATE_KILLED, &q->guc->state);
185 }
186 
187 static bool exec_queue_wedged(struct xe_exec_queue *q)
188 {
189 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_WEDGED;
190 }
191 
192 static void set_exec_queue_wedged(struct xe_exec_queue *q)
193 {
194 	atomic_or(EXEC_QUEUE_STATE_WEDGED, &q->guc->state);
195 }
196 
197 static bool exec_queue_check_timeout(struct xe_exec_queue *q)
198 {
199 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_CHECK_TIMEOUT;
200 }
201 
202 static void set_exec_queue_check_timeout(struct xe_exec_queue *q)
203 {
204 	atomic_or(EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
205 }
206 
207 static void clear_exec_queue_check_timeout(struct xe_exec_queue *q)
208 {
209 	atomic_and(~EXEC_QUEUE_STATE_CHECK_TIMEOUT, &q->guc->state);
210 }
211 
212 static bool exec_queue_extra_ref(struct xe_exec_queue *q)
213 {
214 	return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_EXTRA_REF;
215 }
216 
217 static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
218 {
219 	atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
220 }
221 
222 static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
223 {
224 	return (atomic_read(&q->guc->state) &
225 		(EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_KILLED |
226 		 EXEC_QUEUE_STATE_BANNED));
227 }
228 
229 static void guc_submit_fini(struct drm_device *drm, void *arg)
230 {
231 	struct xe_guc *guc = arg;
232 	struct xe_device *xe = guc_to_xe(guc);
233 	struct xe_gt *gt = guc_to_gt(guc);
234 	int ret;
235 
236 	ret = wait_event_timeout(guc->submission_state.fini_wq,
237 				 xa_empty(&guc->submission_state.exec_queue_lookup),
238 				 HZ * 5);
239 
240 	drain_workqueue(xe->destroy_wq);
241 
242 	xe_gt_assert(gt, ret);
243 
244 	xa_destroy(&guc->submission_state.exec_queue_lookup);
245 }
246 
247 static void guc_submit_wedged_fini(void *arg)
248 {
249 	struct xe_guc *guc = arg;
250 	struct xe_exec_queue *q;
251 	unsigned long index;
252 
253 	mutex_lock(&guc->submission_state.lock);
254 	xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
255 		if (exec_queue_wedged(q)) {
256 			mutex_unlock(&guc->submission_state.lock);
257 			xe_exec_queue_put(q);
258 			mutex_lock(&guc->submission_state.lock);
259 		}
260 	}
261 	mutex_unlock(&guc->submission_state.lock);
262 }
263 
264 static const struct xe_exec_queue_ops guc_exec_queue_ops;
265 
266 static void primelockdep(struct xe_guc *guc)
267 {
268 	if (!IS_ENABLED(CONFIG_LOCKDEP))
269 		return;
270 
271 	fs_reclaim_acquire(GFP_KERNEL);
272 
273 	mutex_lock(&guc->submission_state.lock);
274 	mutex_unlock(&guc->submission_state.lock);
275 
276 	fs_reclaim_release(GFP_KERNEL);
277 }
278 
279 /**
280  * xe_guc_submit_init() - Initialize GuC submission.
281  * @guc: the &xe_guc to initialize
282  * @num_ids: number of GuC context IDs to use
283  *
284  * The bare-metal or PF driver can pass ~0 as &num_ids to indicate that all
285  * GuC context IDs supported by the GuC firmware should be used for submission.
286  *
287  * Only VF drivers will have to provide explicit number of GuC context IDs
288  * that they can use for submission.
289  *
290  * Return: 0 on success or a negative error code on failure.
291  */
292 int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
293 {
294 	struct xe_device *xe = guc_to_xe(guc);
295 	struct xe_gt *gt = guc_to_gt(guc);
296 	int err;
297 
298 	err = drmm_mutex_init(&xe->drm, &guc->submission_state.lock);
299 	if (err)
300 		return err;
301 
302 	err = xe_guc_id_mgr_init(&guc->submission_state.idm, num_ids);
303 	if (err)
304 		return err;
305 
306 	gt->exec_queue_ops = &guc_exec_queue_ops;
307 
308 	xa_init(&guc->submission_state.exec_queue_lookup);
309 
310 	init_waitqueue_head(&guc->submission_state.fini_wq);
311 
312 	primelockdep(guc);
313 
314 	guc->submission_state.initialized = true;
315 
316 	return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
317 }
318 
319 static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
320 {
321 	int i;
322 
323 	lockdep_assert_held(&guc->submission_state.lock);
324 
325 	for (i = 0; i < xa_count; ++i)
326 		xa_erase(&guc->submission_state.exec_queue_lookup, q->guc->id + i);
327 
328 	xe_guc_id_mgr_release_locked(&guc->submission_state.idm,
329 				     q->guc->id, q->width);
330 
331 	if (xa_empty(&guc->submission_state.exec_queue_lookup))
332 		wake_up(&guc->submission_state.fini_wq);
333 }
334 
335 static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
336 {
337 	int ret;
338 	int i;
339 
340 	/*
341 	 * Must use GFP_NOWAIT as this lock is in the dma fence signalling path,
342 	 * worse case user gets -ENOMEM on engine create and has to try again.
343 	 *
344 	 * FIXME: Have caller pre-alloc or post-alloc /w GFP_KERNEL to prevent
345 	 * failure.
346 	 */
347 	lockdep_assert_held(&guc->submission_state.lock);
348 
349 	ret = xe_guc_id_mgr_reserve_locked(&guc->submission_state.idm,
350 					   q->width);
351 	if (ret < 0)
352 		return ret;
353 
354 	q->guc->id = ret;
355 
356 	for (i = 0; i < q->width; ++i) {
357 		ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup,
358 				      q->guc->id + i, q, GFP_NOWAIT));
359 		if (ret)
360 			goto err_release;
361 	}
362 
363 	return 0;
364 
365 err_release:
366 	__release_guc_id(guc, q, i);
367 
368 	return ret;
369 }
370 
371 static void release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
372 {
373 	mutex_lock(&guc->submission_state.lock);
374 	__release_guc_id(guc, q, q->width);
375 	mutex_unlock(&guc->submission_state.lock);
376 }
377 
378 struct exec_queue_policy {
379 	u32 count;
380 	struct guc_update_exec_queue_policy h2g;
381 };
382 
383 static u32 __guc_exec_queue_policy_action_size(struct exec_queue_policy *policy)
384 {
385 	size_t bytes = sizeof(policy->h2g.header) +
386 		       (sizeof(policy->h2g.klv[0]) * policy->count);
387 
388 	return bytes / sizeof(u32);
389 }
390 
391 static void __guc_exec_queue_policy_start_klv(struct exec_queue_policy *policy,
392 					      u16 guc_id)
393 {
394 	policy->h2g.header.action =
395 		XE_GUC_ACTION_HOST2GUC_UPDATE_CONTEXT_POLICIES;
396 	policy->h2g.header.guc_id = guc_id;
397 	policy->count = 0;
398 }
399 
400 #define MAKE_EXEC_QUEUE_POLICY_ADD(func, id) \
401 static void __guc_exec_queue_policy_add_##func(struct exec_queue_policy *policy, \
402 					   u32 data) \
403 { \
404 	XE_WARN_ON(policy->count >= GUC_CONTEXT_POLICIES_KLV_NUM_IDS); \
405 \
406 	policy->h2g.klv[policy->count].kl = \
407 		FIELD_PREP(GUC_KLV_0_KEY, \
408 			   GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
409 		FIELD_PREP(GUC_KLV_0_LEN, 1); \
410 	policy->h2g.klv[policy->count].value = data; \
411 	policy->count++; \
412 }
413 
414 MAKE_EXEC_QUEUE_POLICY_ADD(execution_quantum, EXECUTION_QUANTUM)
415 MAKE_EXEC_QUEUE_POLICY_ADD(preemption_timeout, PREEMPTION_TIMEOUT)
416 MAKE_EXEC_QUEUE_POLICY_ADD(priority, SCHEDULING_PRIORITY)
417 MAKE_EXEC_QUEUE_POLICY_ADD(slpc_exec_queue_freq_req, SLPM_GT_FREQUENCY)
418 #undef MAKE_EXEC_QUEUE_POLICY_ADD
419 
420 static const int xe_exec_queue_prio_to_guc[] = {
421 	[XE_EXEC_QUEUE_PRIORITY_LOW] = GUC_CLIENT_PRIORITY_NORMAL,
422 	[XE_EXEC_QUEUE_PRIORITY_NORMAL] = GUC_CLIENT_PRIORITY_KMD_NORMAL,
423 	[XE_EXEC_QUEUE_PRIORITY_HIGH] = GUC_CLIENT_PRIORITY_HIGH,
424 	[XE_EXEC_QUEUE_PRIORITY_KERNEL] = GUC_CLIENT_PRIORITY_KMD_HIGH,
425 };
426 
427 static void init_policies(struct xe_guc *guc, struct xe_exec_queue *q)
428 {
429 	struct exec_queue_policy policy;
430 	enum xe_exec_queue_priority prio = q->sched_props.priority;
431 	u32 timeslice_us = q->sched_props.timeslice_us;
432 	u32 slpc_exec_queue_freq_req = 0;
433 	u32 preempt_timeout_us = q->sched_props.preempt_timeout_us;
434 
435 	xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
436 
437 	if (q->flags & EXEC_QUEUE_FLAG_LOW_LATENCY)
438 		slpc_exec_queue_freq_req |= SLPC_CTX_FREQ_REQ_IS_COMPUTE;
439 
440 	__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
441 	__guc_exec_queue_policy_add_priority(&policy, xe_exec_queue_prio_to_guc[prio]);
442 	__guc_exec_queue_policy_add_execution_quantum(&policy, timeslice_us);
443 	__guc_exec_queue_policy_add_preemption_timeout(&policy, preempt_timeout_us);
444 	__guc_exec_queue_policy_add_slpc_exec_queue_freq_req(&policy,
445 							     slpc_exec_queue_freq_req);
446 
447 	xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
448 		       __guc_exec_queue_policy_action_size(&policy), 0, 0);
449 }
450 
451 static void set_min_preemption_timeout(struct xe_guc *guc, struct xe_exec_queue *q)
452 {
453 	struct exec_queue_policy policy;
454 
455 	__guc_exec_queue_policy_start_klv(&policy, q->guc->id);
456 	__guc_exec_queue_policy_add_preemption_timeout(&policy, 1);
457 
458 	xe_guc_ct_send(&guc->ct, (u32 *)&policy.h2g,
459 		       __guc_exec_queue_policy_action_size(&policy), 0, 0);
460 }
461 
462 #define parallel_read(xe_, map_, field_) \
463 	xe_map_rd_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
464 			field_)
465 #define parallel_write(xe_, map_, field_, val_) \
466 	xe_map_wr_field(xe_, &map_, 0, struct guc_submit_parallel_scratch, \
467 			field_, val_)
468 
469 static void __register_mlrc_exec_queue(struct xe_guc *guc,
470 				       struct xe_exec_queue *q,
471 				       struct guc_ctxt_registration_info *info)
472 {
473 #define MAX_MLRC_REG_SIZE      (13 + XE_HW_ENGINE_MAX_INSTANCE * 2)
474 	u32 action[MAX_MLRC_REG_SIZE];
475 	int len = 0;
476 	int i;
477 
478 	xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_parallel(q));
479 
480 	action[len++] = XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
481 	action[len++] = info->flags;
482 	action[len++] = info->context_idx;
483 	action[len++] = info->engine_class;
484 	action[len++] = info->engine_submit_mask;
485 	action[len++] = info->wq_desc_lo;
486 	action[len++] = info->wq_desc_hi;
487 	action[len++] = info->wq_base_lo;
488 	action[len++] = info->wq_base_hi;
489 	action[len++] = info->wq_size;
490 	action[len++] = q->width;
491 	action[len++] = info->hwlrca_lo;
492 	action[len++] = info->hwlrca_hi;
493 
494 	for (i = 1; i < q->width; ++i) {
495 		struct xe_lrc *lrc = q->lrc[i];
496 
497 		action[len++] = lower_32_bits(xe_lrc_descriptor(lrc));
498 		action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
499 	}
500 
501 	/* explicitly checks some fields that we might fixup later */
502 	xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
503 		     action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER]);
504 	xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
505 		     action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER]);
506 	xe_gt_assert(guc_to_gt(guc), q->width ==
507 		     action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS]);
508 	xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
509 		     action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR]);
510 	xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE);
511 #undef MAX_MLRC_REG_SIZE
512 
513 	xe_guc_ct_send(&guc->ct, action, len, 0, 0);
514 }
515 
516 static void __register_exec_queue(struct xe_guc *guc,
517 				  struct guc_ctxt_registration_info *info)
518 {
519 	u32 action[] = {
520 		XE_GUC_ACTION_REGISTER_CONTEXT,
521 		info->flags,
522 		info->context_idx,
523 		info->engine_class,
524 		info->engine_submit_mask,
525 		info->wq_desc_lo,
526 		info->wq_desc_hi,
527 		info->wq_base_lo,
528 		info->wq_base_hi,
529 		info->wq_size,
530 		info->hwlrca_lo,
531 		info->hwlrca_hi,
532 	};
533 
534 	/* explicitly checks some fields that we might fixup later */
535 	xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
536 		     action[XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER]);
537 	xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
538 		     action[XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER]);
539 	xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
540 		     action[XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR]);
541 
542 	xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
543 }
544 
545 static void register_exec_queue(struct xe_exec_queue *q)
546 {
547 	struct xe_guc *guc = exec_queue_to_guc(q);
548 	struct xe_device *xe = guc_to_xe(guc);
549 	struct xe_lrc *lrc = q->lrc[0];
550 	struct guc_ctxt_registration_info info;
551 
552 	xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
553 
554 	memset(&info, 0, sizeof(info));
555 	info.context_idx = q->guc->id;
556 	info.engine_class = xe_engine_class_to_guc_class(q->class);
557 	info.engine_submit_mask = q->logical_mask;
558 	info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
559 	info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
560 	info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
561 
562 	if (xe_exec_queue_is_parallel(q)) {
563 		u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
564 		struct iosys_map map = xe_lrc_parallel_map(lrc);
565 
566 		info.wq_desc_lo = lower_32_bits(ggtt_addr +
567 			offsetof(struct guc_submit_parallel_scratch, wq_desc));
568 		info.wq_desc_hi = upper_32_bits(ggtt_addr +
569 			offsetof(struct guc_submit_parallel_scratch, wq_desc));
570 		info.wq_base_lo = lower_32_bits(ggtt_addr +
571 			offsetof(struct guc_submit_parallel_scratch, wq[0]));
572 		info.wq_base_hi = upper_32_bits(ggtt_addr +
573 			offsetof(struct guc_submit_parallel_scratch, wq[0]));
574 		info.wq_size = WQ_SIZE;
575 
576 		q->guc->wqi_head = 0;
577 		q->guc->wqi_tail = 0;
578 		xe_map_memset(xe, &map, 0, 0, PARALLEL_SCRATCH_SIZE - WQ_SIZE);
579 		parallel_write(xe, map, wq_desc.wq_status, WQ_STATUS_ACTIVE);
580 	}
581 
582 	/*
583 	 * We must keep a reference for LR engines if engine is registered with
584 	 * the GuC as jobs signal immediately and can't destroy an engine if the
585 	 * GuC has a reference to it.
586 	 */
587 	if (xe_exec_queue_is_lr(q))
588 		xe_exec_queue_get(q);
589 
590 	set_exec_queue_registered(q);
591 	trace_xe_exec_queue_register(q);
592 	if (xe_exec_queue_is_parallel(q))
593 		__register_mlrc_exec_queue(guc, q, &info);
594 	else
595 		__register_exec_queue(guc, &info);
596 	init_policies(guc, q);
597 }
598 
599 static u32 wq_space_until_wrap(struct xe_exec_queue *q)
600 {
601 	return (WQ_SIZE - q->guc->wqi_tail);
602 }
603 
604 static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
605 {
606 	struct xe_guc *guc = exec_queue_to_guc(q);
607 	struct xe_device *xe = guc_to_xe(guc);
608 	struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
609 	unsigned int sleep_period_ms = 1;
610 
611 #define AVAILABLE_SPACE \
612 	CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
613 	if (wqi_size > AVAILABLE_SPACE) {
614 try_again:
615 		q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
616 		if (wqi_size > AVAILABLE_SPACE) {
617 			if (sleep_period_ms == 1024) {
618 				xe_gt_reset_async(q->gt);
619 				return -ENODEV;
620 			}
621 
622 			msleep(sleep_period_ms);
623 			sleep_period_ms <<= 1;
624 			goto try_again;
625 		}
626 	}
627 #undef AVAILABLE_SPACE
628 
629 	return 0;
630 }
631 
632 static int wq_noop_append(struct xe_exec_queue *q)
633 {
634 	struct xe_guc *guc = exec_queue_to_guc(q);
635 	struct xe_device *xe = guc_to_xe(guc);
636 	struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
637 	u32 len_dw = wq_space_until_wrap(q) / sizeof(u32) - 1;
638 
639 	if (wq_wait_for_space(q, wq_space_until_wrap(q)))
640 		return -ENODEV;
641 
642 	xe_gt_assert(guc_to_gt(guc), FIELD_FIT(WQ_LEN_MASK, len_dw));
643 
644 	parallel_write(xe, map, wq[q->guc->wqi_tail / sizeof(u32)],
645 		       FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
646 		       FIELD_PREP(WQ_LEN_MASK, len_dw));
647 	q->guc->wqi_tail = 0;
648 
649 	return 0;
650 }
651 
652 static void wq_item_append(struct xe_exec_queue *q)
653 {
654 	struct xe_guc *guc = exec_queue_to_guc(q);
655 	struct xe_device *xe = guc_to_xe(guc);
656 	struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
657 #define WQ_HEADER_SIZE	4	/* Includes 1 LRC address too */
658 	u32 wqi[XE_HW_ENGINE_MAX_INSTANCE + (WQ_HEADER_SIZE - 1)];
659 	u32 wqi_size = (q->width + (WQ_HEADER_SIZE - 1)) * sizeof(u32);
660 	u32 len_dw = (wqi_size / sizeof(u32)) - 1;
661 	int i = 0, j;
662 
663 	if (wqi_size > wq_space_until_wrap(q)) {
664 		if (wq_noop_append(q))
665 			return;
666 	}
667 	if (wq_wait_for_space(q, wqi_size))
668 		return;
669 
670 	wqi[i++] = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
671 		FIELD_PREP(WQ_LEN_MASK, len_dw);
672 	wqi[i++] = xe_lrc_descriptor(q->lrc[0]);
673 	wqi[i++] = FIELD_PREP(WQ_GUC_ID_MASK, q->guc->id) |
674 		FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc[0]->ring.tail / sizeof(u64));
675 	wqi[i++] = 0;
676 	for (j = 1; j < q->width; ++j) {
677 		struct xe_lrc *lrc = q->lrc[j];
678 
679 		wqi[i++] = lrc->ring.tail / sizeof(u64);
680 	}
681 
682 	xe_gt_assert(guc_to_gt(guc), i == wqi_size / sizeof(u32));
683 
684 	iosys_map_incr(&map, offsetof(struct guc_submit_parallel_scratch,
685 				      wq[q->guc->wqi_tail / sizeof(u32)]));
686 	xe_map_memcpy_to(xe, &map, 0, wqi, wqi_size);
687 	q->guc->wqi_tail += wqi_size;
688 	xe_gt_assert(guc_to_gt(guc), q->guc->wqi_tail <= WQ_SIZE);
689 
690 	xe_device_wmb(xe);
691 
692 	map = xe_lrc_parallel_map(q->lrc[0]);
693 	parallel_write(xe, map, wq_desc.tail, q->guc->wqi_tail);
694 }
695 
696 #define RESUME_PENDING	~0x0ull
697 static void submit_exec_queue(struct xe_exec_queue *q)
698 {
699 	struct xe_guc *guc = exec_queue_to_guc(q);
700 	struct xe_lrc *lrc = q->lrc[0];
701 	u32 action[3];
702 	u32 g2h_len = 0;
703 	u32 num_g2h = 0;
704 	int len = 0;
705 	bool extra_submit = false;
706 
707 	xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
708 
709 	if (xe_exec_queue_is_parallel(q))
710 		wq_item_append(q);
711 	else
712 		xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
713 
714 	if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
715 		return;
716 
717 	if (!exec_queue_enabled(q) && !exec_queue_suspended(q)) {
718 		action[len++] = XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
719 		action[len++] = q->guc->id;
720 		action[len++] = GUC_CONTEXT_ENABLE;
721 		g2h_len = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
722 		num_g2h = 1;
723 		if (xe_exec_queue_is_parallel(q))
724 			extra_submit = true;
725 
726 		q->guc->resume_time = RESUME_PENDING;
727 		set_exec_queue_pending_enable(q);
728 		set_exec_queue_enabled(q);
729 		trace_xe_exec_queue_scheduling_enable(q);
730 	} else {
731 		action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
732 		action[len++] = q->guc->id;
733 		trace_xe_exec_queue_submit(q);
734 	}
735 
736 	xe_guc_ct_send(&guc->ct, action, len, g2h_len, num_g2h);
737 
738 	if (extra_submit) {
739 		len = 0;
740 		action[len++] = XE_GUC_ACTION_SCHED_CONTEXT;
741 		action[len++] = q->guc->id;
742 		trace_xe_exec_queue_submit(q);
743 
744 		xe_guc_ct_send(&guc->ct, action, len, 0, 0);
745 	}
746 }
747 
748 static struct dma_fence *
749 guc_exec_queue_run_job(struct drm_sched_job *drm_job)
750 {
751 	struct xe_sched_job *job = to_xe_sched_job(drm_job);
752 	struct xe_exec_queue *q = job->q;
753 	struct xe_guc *guc = exec_queue_to_guc(q);
754 	struct dma_fence *fence = NULL;
755 	bool lr = xe_exec_queue_is_lr(q);
756 
757 	xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
758 		     exec_queue_banned(q) || exec_queue_suspended(q));
759 
760 	trace_xe_sched_job_run(job);
761 
762 	if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
763 		if (!exec_queue_registered(q))
764 			register_exec_queue(q);
765 		if (!lr)	/* LR jobs are emitted in the exec IOCTL */
766 			q->ring_ops->emit_job(job);
767 		submit_exec_queue(q);
768 	}
769 
770 	if (lr) {
771 		xe_sched_job_set_error(job, -EOPNOTSUPP);
772 		dma_fence_put(job->fence);	/* Drop ref from xe_sched_job_arm */
773 	} else {
774 		fence = job->fence;
775 	}
776 
777 	return fence;
778 }
779 
780 static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
781 {
782 	struct xe_sched_job *job = to_xe_sched_job(drm_job);
783 
784 	trace_xe_sched_job_free(job);
785 	xe_sched_job_put(job);
786 }
787 
788 int xe_guc_read_stopped(struct xe_guc *guc)
789 {
790 	return atomic_read(&guc->submission_state.stopped);
791 }
792 
793 #define MAKE_SCHED_CONTEXT_ACTION(q, enable_disable)			\
794 	u32 action[] = {						\
795 		XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET,			\
796 		q->guc->id,						\
797 		GUC_CONTEXT_##enable_disable,				\
798 	}
799 
800 static void disable_scheduling_deregister(struct xe_guc *guc,
801 					  struct xe_exec_queue *q)
802 {
803 	MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
804 	int ret;
805 
806 	set_min_preemption_timeout(guc, q);
807 	smp_rmb();
808 	ret = wait_event_timeout(guc->ct.wq,
809 				 (!exec_queue_pending_enable(q) &&
810 				  !exec_queue_pending_disable(q)) ||
811 					 xe_guc_read_stopped(guc),
812 				 HZ * 5);
813 	if (!ret) {
814 		struct xe_gpu_scheduler *sched = &q->guc->sched;
815 
816 		xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
817 		xe_sched_submission_start(sched);
818 		xe_gt_reset_async(q->gt);
819 		xe_sched_tdr_queue_imm(sched);
820 		return;
821 	}
822 
823 	clear_exec_queue_enabled(q);
824 	set_exec_queue_pending_disable(q);
825 	set_exec_queue_destroyed(q);
826 	trace_xe_exec_queue_scheduling_disable(q);
827 
828 	/*
829 	 * Reserve space for both G2H here as the 2nd G2H is sent from a G2H
830 	 * handler and we are not allowed to reserved G2H space in handlers.
831 	 */
832 	xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
833 		       G2H_LEN_DW_SCHED_CONTEXT_MODE_SET +
834 		       G2H_LEN_DW_DEREGISTER_CONTEXT, 2);
835 }
836 
837 static void xe_guc_exec_queue_trigger_cleanup(struct xe_exec_queue *q)
838 {
839 	struct xe_guc *guc = exec_queue_to_guc(q);
840 	struct xe_device *xe = guc_to_xe(guc);
841 
842 	/** to wakeup xe_wait_user_fence ioctl if exec queue is reset */
843 	wake_up_all(&xe->ufence_wq);
844 
845 	if (xe_exec_queue_is_lr(q))
846 		queue_work(guc_to_gt(guc)->ordered_wq, &q->guc->lr_tdr);
847 	else
848 		xe_sched_tdr_queue_imm(&q->guc->sched);
849 }
850 
851 /**
852  * xe_guc_submit_wedge() - Wedge GuC submission
853  * @guc: the GuC object
854  *
855  * Save exec queue's registered with GuC state by taking a ref to each queue.
856  * Register a DRMM handler to drop refs upon driver unload.
857  */
858 void xe_guc_submit_wedge(struct xe_guc *guc)
859 {
860 	struct xe_gt *gt = guc_to_gt(guc);
861 	struct xe_exec_queue *q;
862 	unsigned long index;
863 	int err;
864 
865 	xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
866 
867 	/*
868 	 * If device is being wedged even before submission_state is
869 	 * initialized, there's nothing to do here.
870 	 */
871 	if (!guc->submission_state.initialized)
872 		return;
873 
874 	err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
875 				       guc_submit_wedged_fini, guc);
876 	if (err) {
877 		xe_gt_err(gt, "Failed to register clean-up on wedged.mode=2; "
878 			  "Although device is wedged.\n");
879 		return;
880 	}
881 
882 	mutex_lock(&guc->submission_state.lock);
883 	xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
884 		if (xe_exec_queue_get_unless_zero(q))
885 			set_exec_queue_wedged(q);
886 	mutex_unlock(&guc->submission_state.lock);
887 }
888 
889 static bool guc_submit_hint_wedged(struct xe_guc *guc)
890 {
891 	struct xe_device *xe = guc_to_xe(guc);
892 
893 	if (xe->wedged.mode != 2)
894 		return false;
895 
896 	if (xe_device_wedged(xe))
897 		return true;
898 
899 	xe_device_declare_wedged(xe);
900 
901 	return true;
902 }
903 
904 static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
905 {
906 	struct xe_guc_exec_queue *ge =
907 		container_of(w, struct xe_guc_exec_queue, lr_tdr);
908 	struct xe_exec_queue *q = ge->q;
909 	struct xe_guc *guc = exec_queue_to_guc(q);
910 	struct xe_gpu_scheduler *sched = &ge->sched;
911 	bool wedged;
912 
913 	xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
914 	trace_xe_exec_queue_lr_cleanup(q);
915 
916 	wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
917 
918 	/* Kill the run_job / process_msg entry points */
919 	xe_sched_submission_stop(sched);
920 
921 	/*
922 	 * Engine state now mostly stable, disable scheduling / deregister if
923 	 * needed. This cleanup routine might be called multiple times, where
924 	 * the actual async engine deregister drops the final engine ref.
925 	 * Calling disable_scheduling_deregister will mark the engine as
926 	 * destroyed and fire off the CT requests to disable scheduling /
927 	 * deregister, which we only want to do once. We also don't want to mark
928 	 * the engine as pending_disable again as this may race with the
929 	 * xe_guc_deregister_done_handler() which treats it as an unexpected
930 	 * state.
931 	 */
932 	if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
933 		struct xe_guc *guc = exec_queue_to_guc(q);
934 		int ret;
935 
936 		set_exec_queue_banned(q);
937 		disable_scheduling_deregister(guc, q);
938 
939 		/*
940 		 * Must wait for scheduling to be disabled before signalling
941 		 * any fences, if GT broken the GT reset code should signal us.
942 		 */
943 		ret = wait_event_timeout(guc->ct.wq,
944 					 !exec_queue_pending_disable(q) ||
945 					 xe_guc_read_stopped(guc), HZ * 5);
946 		if (!ret) {
947 			xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
948 				   q->guc->id);
949 			xe_devcoredump(q, NULL, "Schedule disable failed to respond, guc_id=%d\n",
950 				       q->guc->id);
951 			xe_sched_submission_start(sched);
952 			xe_gt_reset_async(q->gt);
953 			return;
954 		}
955 	}
956 
957 	if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
958 		xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
959 
960 	xe_sched_submission_start(sched);
961 }
962 
963 #define ADJUST_FIVE_PERCENT(__t)	mul_u64_u32_div(__t, 105, 100)
964 
965 static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
966 {
967 	struct xe_gt *gt = guc_to_gt(exec_queue_to_guc(q));
968 	u32 ctx_timestamp, ctx_job_timestamp;
969 	u32 timeout_ms = q->sched_props.job_timeout_ms;
970 	u32 diff;
971 	u64 running_time_ms;
972 
973 	if (!xe_sched_job_started(job)) {
974 		xe_gt_warn(gt, "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, not started",
975 			   xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
976 			   q->guc->id);
977 
978 		return xe_sched_invalidate_job(job, 2);
979 	}
980 
981 	ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(q->lrc[0]));
982 	ctx_job_timestamp = xe_lrc_ctx_job_timestamp(q->lrc[0]);
983 
984 	/*
985 	 * Counter wraps at ~223s at the usual 19.2MHz, be paranoid catch
986 	 * possible overflows with a high timeout.
987 	 */
988 	xe_gt_assert(gt, timeout_ms < 100 * MSEC_PER_SEC);
989 
990 	diff = ctx_timestamp - ctx_job_timestamp;
991 
992 	/*
993 	 * Ensure timeout is within 5% to account for an GuC scheduling latency
994 	 */
995 	running_time_ms =
996 		ADJUST_FIVE_PERCENT(xe_gt_clock_interval_to_ms(gt, diff));
997 
998 	xe_gt_dbg(gt,
999 		  "Check job timeout: seqno=%u, lrc_seqno=%u, guc_id=%d, running_time_ms=%llu, timeout_ms=%u, diff=0x%08x",
1000 		  xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
1001 		  q->guc->id, running_time_ms, timeout_ms, diff);
1002 
1003 	return running_time_ms >= timeout_ms;
1004 }
1005 
1006 static void enable_scheduling(struct xe_exec_queue *q)
1007 {
1008 	MAKE_SCHED_CONTEXT_ACTION(q, ENABLE);
1009 	struct xe_guc *guc = exec_queue_to_guc(q);
1010 	int ret;
1011 
1012 	xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1013 	xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1014 	xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1015 	xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1016 
1017 	set_exec_queue_pending_enable(q);
1018 	set_exec_queue_enabled(q);
1019 	trace_xe_exec_queue_scheduling_enable(q);
1020 
1021 	xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1022 		       G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
1023 
1024 	ret = wait_event_timeout(guc->ct.wq,
1025 				 !exec_queue_pending_enable(q) ||
1026 				 xe_guc_read_stopped(guc), HZ * 5);
1027 	if (!ret || xe_guc_read_stopped(guc)) {
1028 		xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
1029 		set_exec_queue_banned(q);
1030 		xe_gt_reset_async(q->gt);
1031 		xe_sched_tdr_queue_imm(&q->guc->sched);
1032 	}
1033 }
1034 
1035 static void disable_scheduling(struct xe_exec_queue *q, bool immediate)
1036 {
1037 	MAKE_SCHED_CONTEXT_ACTION(q, DISABLE);
1038 	struct xe_guc *guc = exec_queue_to_guc(q);
1039 
1040 	xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1041 	xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1042 	xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1043 
1044 	if (immediate)
1045 		set_min_preemption_timeout(guc, q);
1046 	clear_exec_queue_enabled(q);
1047 	set_exec_queue_pending_disable(q);
1048 	trace_xe_exec_queue_scheduling_disable(q);
1049 
1050 	xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1051 		       G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, 1);
1052 }
1053 
1054 static void __deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1055 {
1056 	u32 action[] = {
1057 		XE_GUC_ACTION_DEREGISTER_CONTEXT,
1058 		q->guc->id,
1059 	};
1060 
1061 	xe_gt_assert(guc_to_gt(guc), !exec_queue_destroyed(q));
1062 	xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1063 	xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1064 	xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1065 
1066 	set_exec_queue_destroyed(q);
1067 	trace_xe_exec_queue_deregister(q);
1068 
1069 	xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action),
1070 		       G2H_LEN_DW_DEREGISTER_CONTEXT, 1);
1071 }
1072 
1073 static enum drm_gpu_sched_stat
1074 guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
1075 {
1076 	struct xe_sched_job *job = to_xe_sched_job(drm_job);
1077 	struct xe_sched_job *tmp_job;
1078 	struct xe_exec_queue *q = job->q;
1079 	struct xe_gpu_scheduler *sched = &q->guc->sched;
1080 	struct xe_guc *guc = exec_queue_to_guc(q);
1081 	const char *process_name = "no process";
1082 	struct xe_device *xe = guc_to_xe(guc);
1083 	unsigned int fw_ref;
1084 	int err = -ETIME;
1085 	pid_t pid = -1;
1086 	int i = 0;
1087 	bool wedged, skip_timeout_check;
1088 
1089 	/*
1090 	 * TDR has fired before free job worker. Common if exec queue
1091 	 * immediately closed after last fence signaled. Add back to pending
1092 	 * list so job can be freed and kick scheduler ensuring free job is not
1093 	 * lost.
1094 	 */
1095 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags))
1096 		return DRM_GPU_SCHED_STAT_NO_HANG;
1097 
1098 	/* Kill the run_job entry point */
1099 	xe_sched_submission_stop(sched);
1100 
1101 	/* Must check all state after stopping scheduler */
1102 	skip_timeout_check = exec_queue_reset(q) ||
1103 		exec_queue_killed_or_banned_or_wedged(q) ||
1104 		exec_queue_destroyed(q);
1105 
1106 	/*
1107 	 * If devcoredump not captured and GuC capture for the job is not ready
1108 	 * do manual capture first and decide later if we need to use it
1109 	 */
1110 	if (!exec_queue_killed(q) && !xe->devcoredump.captured &&
1111 	    !xe_guc_capture_get_matching_and_lock(q)) {
1112 		/* take force wake before engine register manual capture */
1113 		fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL);
1114 		if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
1115 			xe_gt_info(q->gt, "failed to get forcewake for coredump capture\n");
1116 
1117 		xe_engine_snapshot_capture_for_queue(q);
1118 
1119 		xe_force_wake_put(gt_to_fw(q->gt), fw_ref);
1120 	}
1121 
1122 	/*
1123 	 * XXX: Sampling timeout doesn't work in wedged mode as we have to
1124 	 * modify scheduling state to read timestamp. We could read the
1125 	 * timestamp from a register to accumulate current running time but this
1126 	 * doesn't work for SRIOV. For now assuming timeouts in wedged mode are
1127 	 * genuine timeouts.
1128 	 */
1129 	wedged = guc_submit_hint_wedged(exec_queue_to_guc(q));
1130 
1131 	/* Engine state now stable, disable scheduling to check timestamp */
1132 	if (!wedged && exec_queue_registered(q)) {
1133 		int ret;
1134 
1135 		if (exec_queue_reset(q))
1136 			err = -EIO;
1137 
1138 		if (!exec_queue_destroyed(q)) {
1139 			/*
1140 			 * Wait for any pending G2H to flush out before
1141 			 * modifying state
1142 			 */
1143 			ret = wait_event_timeout(guc->ct.wq,
1144 						 (!exec_queue_pending_enable(q) &&
1145 						  !exec_queue_pending_disable(q)) ||
1146 						 xe_guc_read_stopped(guc), HZ * 5);
1147 			if (!ret || xe_guc_read_stopped(guc))
1148 				goto trigger_reset;
1149 
1150 			/*
1151 			 * Flag communicates to G2H handler that schedule
1152 			 * disable originated from a timeout check. The G2H then
1153 			 * avoid triggering cleanup or deregistering the exec
1154 			 * queue.
1155 			 */
1156 			set_exec_queue_check_timeout(q);
1157 			disable_scheduling(q, skip_timeout_check);
1158 		}
1159 
1160 		/*
1161 		 * Must wait for scheduling to be disabled before signalling
1162 		 * any fences, if GT broken the GT reset code should signal us.
1163 		 *
1164 		 * FIXME: Tests can generate a ton of 0x6000 (IOMMU CAT fault
1165 		 * error) messages which can cause the schedule disable to get
1166 		 * lost. If this occurs, trigger a GT reset to recover.
1167 		 */
1168 		smp_rmb();
1169 		ret = wait_event_timeout(guc->ct.wq,
1170 					 !exec_queue_pending_disable(q) ||
1171 					 xe_guc_read_stopped(guc), HZ * 5);
1172 		if (!ret || xe_guc_read_stopped(guc)) {
1173 trigger_reset:
1174 			if (!ret)
1175 				xe_gt_warn(guc_to_gt(guc),
1176 					   "Schedule disable failed to respond, guc_id=%d",
1177 					   q->guc->id);
1178 			xe_devcoredump(q, job,
1179 				       "Schedule disable failed to respond, guc_id=%d, ret=%d, guc_read=%d",
1180 				       q->guc->id, ret, xe_guc_read_stopped(guc));
1181 			set_exec_queue_extra_ref(q);
1182 			xe_exec_queue_get(q);	/* GT reset owns this */
1183 			set_exec_queue_banned(q);
1184 			xe_gt_reset_async(q->gt);
1185 			xe_sched_tdr_queue_imm(sched);
1186 			goto rearm;
1187 		}
1188 	}
1189 
1190 	/*
1191 	 * Check if job is actually timed out, if so restart job execution and TDR
1192 	 */
1193 	if (!wedged && !skip_timeout_check && !check_timeout(q, job) &&
1194 	    !exec_queue_reset(q) && exec_queue_registered(q)) {
1195 		clear_exec_queue_check_timeout(q);
1196 		goto sched_enable;
1197 	}
1198 
1199 	if (q->vm && q->vm->xef) {
1200 		process_name = q->vm->xef->process_name;
1201 		pid = q->vm->xef->pid;
1202 	}
1203 
1204 	if (!exec_queue_killed(q))
1205 		xe_gt_notice(guc_to_gt(guc),
1206 			     "Timedout job: seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx in %s [%d]",
1207 			     xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
1208 			     q->guc->id, q->flags, process_name, pid);
1209 
1210 	trace_xe_sched_job_timedout(job);
1211 
1212 	if (!exec_queue_killed(q))
1213 		xe_devcoredump(q, job,
1214 			       "Timedout job - seqno=%u, lrc_seqno=%u, guc_id=%d, flags=0x%lx",
1215 			       xe_sched_job_seqno(job), xe_sched_job_lrc_seqno(job),
1216 			       q->guc->id, q->flags);
1217 
1218 	/*
1219 	 * Kernel jobs should never fail, nor should VM jobs if they do
1220 	 * somethings has gone wrong and the GT needs a reset
1221 	 */
1222 	xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_KERNEL,
1223 		   "Kernel-submitted job timed out\n");
1224 	xe_gt_WARN(q->gt, q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q),
1225 		   "VM job timed out on non-killed execqueue\n");
1226 	if (!wedged && (q->flags & EXEC_QUEUE_FLAG_KERNEL ||
1227 			(q->flags & EXEC_QUEUE_FLAG_VM && !exec_queue_killed(q)))) {
1228 		if (!xe_sched_invalidate_job(job, 2)) {
1229 			clear_exec_queue_check_timeout(q);
1230 			xe_gt_reset_async(q->gt);
1231 			goto rearm;
1232 		}
1233 	}
1234 
1235 	/* Finish cleaning up exec queue via deregister */
1236 	set_exec_queue_banned(q);
1237 	if (!wedged && exec_queue_registered(q) && !exec_queue_destroyed(q)) {
1238 		set_exec_queue_extra_ref(q);
1239 		xe_exec_queue_get(q);
1240 		__deregister_exec_queue(guc, q);
1241 	}
1242 
1243 	/* Stop fence signaling */
1244 	xe_hw_fence_irq_stop(q->fence_irq);
1245 
1246 	/*
1247 	 * Fence state now stable, stop / start scheduler which cleans up any
1248 	 * fences that are complete
1249 	 */
1250 	xe_sched_add_pending_job(sched, job);
1251 	xe_sched_submission_start(sched);
1252 
1253 	xe_guc_exec_queue_trigger_cleanup(q);
1254 
1255 	/* Mark all outstanding jobs as bad, thus completing them */
1256 	spin_lock(&sched->base.job_list_lock);
1257 	list_for_each_entry(tmp_job, &sched->base.pending_list, drm.list)
1258 		xe_sched_job_set_error(tmp_job, !i++ ? err : -ECANCELED);
1259 	spin_unlock(&sched->base.job_list_lock);
1260 
1261 	/* Start fence signaling */
1262 	xe_hw_fence_irq_start(q->fence_irq);
1263 
1264 	return DRM_GPU_SCHED_STAT_RESET;
1265 
1266 sched_enable:
1267 	enable_scheduling(q);
1268 rearm:
1269 	/*
1270 	 * XXX: Ideally want to adjust timeout based on current execution time
1271 	 * but there is not currently an easy way to do in DRM scheduler. With
1272 	 * some thought, do this in a follow up.
1273 	 */
1274 	xe_sched_submission_start(sched);
1275 	return DRM_GPU_SCHED_STAT_NO_HANG;
1276 }
1277 
1278 static void __guc_exec_queue_fini_async(struct work_struct *w)
1279 {
1280 	struct xe_guc_exec_queue *ge =
1281 		container_of(w, struct xe_guc_exec_queue, fini_async);
1282 	struct xe_exec_queue *q = ge->q;
1283 	struct xe_guc *guc = exec_queue_to_guc(q);
1284 
1285 	xe_pm_runtime_get(guc_to_xe(guc));
1286 	trace_xe_exec_queue_destroy(q);
1287 
1288 	release_guc_id(guc, q);
1289 	if (xe_exec_queue_is_lr(q))
1290 		cancel_work_sync(&ge->lr_tdr);
1291 	/* Confirm no work left behind accessing device structures */
1292 	cancel_delayed_work_sync(&ge->sched.base.work_tdr);
1293 	xe_sched_entity_fini(&ge->entity);
1294 	xe_sched_fini(&ge->sched);
1295 
1296 	/*
1297 	 * RCU free due sched being exported via DRM scheduler fences
1298 	 * (timeline name).
1299 	 */
1300 	kfree_rcu(ge, rcu);
1301 	xe_exec_queue_fini(q);
1302 	xe_pm_runtime_put(guc_to_xe(guc));
1303 }
1304 
1305 static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
1306 {
1307 	struct xe_guc *guc = exec_queue_to_guc(q);
1308 	struct xe_device *xe = guc_to_xe(guc);
1309 
1310 	INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
1311 
1312 	/* We must block on kernel engines so slabs are empty on driver unload */
1313 	if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
1314 		__guc_exec_queue_fini_async(&q->guc->fini_async);
1315 	else
1316 		queue_work(xe->destroy_wq, &q->guc->fini_async);
1317 }
1318 
1319 static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
1320 {
1321 	/*
1322 	 * Might be done from within the GPU scheduler, need to do async as we
1323 	 * fini the scheduler when the engine is fini'd, the scheduler can't
1324 	 * complete fini within itself (circular dependency). Async resolves
1325 	 * this we and don't really care when everything is fini'd, just that it
1326 	 * is.
1327 	 */
1328 	guc_exec_queue_fini_async(q);
1329 }
1330 
1331 static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
1332 {
1333 	struct xe_exec_queue *q = msg->private_data;
1334 	struct xe_guc *guc = exec_queue_to_guc(q);
1335 
1336 	xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
1337 	trace_xe_exec_queue_cleanup_entity(q);
1338 
1339 	if (exec_queue_registered(q))
1340 		disable_scheduling_deregister(guc, q);
1341 	else
1342 		__guc_exec_queue_fini(guc, q);
1343 }
1344 
1345 static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
1346 {
1347 	return !exec_queue_killed_or_banned_or_wedged(q) && exec_queue_registered(q);
1348 }
1349 
1350 static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *msg)
1351 {
1352 	struct xe_exec_queue *q = msg->private_data;
1353 	struct xe_guc *guc = exec_queue_to_guc(q);
1354 
1355 	if (guc_exec_queue_allowed_to_change_state(q))
1356 		init_policies(guc, q);
1357 	kfree(msg);
1358 }
1359 
1360 static void __suspend_fence_signal(struct xe_exec_queue *q)
1361 {
1362 	if (!q->guc->suspend_pending)
1363 		return;
1364 
1365 	WRITE_ONCE(q->guc->suspend_pending, false);
1366 	wake_up(&q->guc->suspend_wait);
1367 }
1368 
1369 static void suspend_fence_signal(struct xe_exec_queue *q)
1370 {
1371 	struct xe_guc *guc = exec_queue_to_guc(q);
1372 
1373 	xe_gt_assert(guc_to_gt(guc), exec_queue_suspended(q) || exec_queue_killed(q) ||
1374 		     xe_guc_read_stopped(guc));
1375 	xe_gt_assert(guc_to_gt(guc), q->guc->suspend_pending);
1376 
1377 	__suspend_fence_signal(q);
1378 }
1379 
1380 static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
1381 {
1382 	struct xe_exec_queue *q = msg->private_data;
1383 	struct xe_guc *guc = exec_queue_to_guc(q);
1384 
1385 	if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
1386 	    exec_queue_enabled(q)) {
1387 		wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING ||
1388 			   xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q));
1389 
1390 		if (!xe_guc_read_stopped(guc)) {
1391 			s64 since_resume_ms =
1392 				ktime_ms_delta(ktime_get(),
1393 					       q->guc->resume_time);
1394 			s64 wait_ms = q->vm->preempt.min_run_period_ms -
1395 				since_resume_ms;
1396 
1397 			if (wait_ms > 0 && q->guc->resume_time)
1398 				msleep(wait_ms);
1399 
1400 			set_exec_queue_suspended(q);
1401 			disable_scheduling(q, false);
1402 		}
1403 	} else if (q->guc->suspend_pending) {
1404 		set_exec_queue_suspended(q);
1405 		suspend_fence_signal(q);
1406 	}
1407 }
1408 
1409 static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
1410 {
1411 	struct xe_exec_queue *q = msg->private_data;
1412 
1413 	if (guc_exec_queue_allowed_to_change_state(q)) {
1414 		clear_exec_queue_suspended(q);
1415 		if (!exec_queue_enabled(q)) {
1416 			q->guc->resume_time = RESUME_PENDING;
1417 			enable_scheduling(q);
1418 		}
1419 	} else {
1420 		clear_exec_queue_suspended(q);
1421 	}
1422 }
1423 
1424 #define CLEANUP		1	/* Non-zero values to catch uninitialized msg */
1425 #define SET_SCHED_PROPS	2
1426 #define SUSPEND		3
1427 #define RESUME		4
1428 #define OPCODE_MASK	0xf
1429 #define MSG_LOCKED	BIT(8)
1430 
1431 static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
1432 {
1433 	struct xe_device *xe = guc_to_xe(exec_queue_to_guc(msg->private_data));
1434 
1435 	trace_xe_sched_msg_recv(msg);
1436 
1437 	switch (msg->opcode) {
1438 	case CLEANUP:
1439 		__guc_exec_queue_process_msg_cleanup(msg);
1440 		break;
1441 	case SET_SCHED_PROPS:
1442 		__guc_exec_queue_process_msg_set_sched_props(msg);
1443 		break;
1444 	case SUSPEND:
1445 		__guc_exec_queue_process_msg_suspend(msg);
1446 		break;
1447 	case RESUME:
1448 		__guc_exec_queue_process_msg_resume(msg);
1449 		break;
1450 	default:
1451 		XE_WARN_ON("Unknown message type");
1452 	}
1453 
1454 	xe_pm_runtime_put(xe);
1455 }
1456 
1457 static const struct drm_sched_backend_ops drm_sched_ops = {
1458 	.run_job = guc_exec_queue_run_job,
1459 	.free_job = guc_exec_queue_free_job,
1460 	.timedout_job = guc_exec_queue_timedout_job,
1461 };
1462 
1463 static const struct xe_sched_backend_ops xe_sched_ops = {
1464 	.process_msg = guc_exec_queue_process_msg,
1465 };
1466 
1467 static int guc_exec_queue_init(struct xe_exec_queue *q)
1468 {
1469 	struct xe_gpu_scheduler *sched;
1470 	struct xe_guc *guc = exec_queue_to_guc(q);
1471 	struct xe_guc_exec_queue *ge;
1472 	long timeout;
1473 	int err, i;
1474 
1475 	xe_gt_assert(guc_to_gt(guc), xe_device_uc_enabled(guc_to_xe(guc)));
1476 
1477 	ge = kzalloc(sizeof(*ge), GFP_KERNEL);
1478 	if (!ge)
1479 		return -ENOMEM;
1480 
1481 	q->guc = ge;
1482 	ge->q = q;
1483 	init_rcu_head(&ge->rcu);
1484 	init_waitqueue_head(&ge->suspend_wait);
1485 
1486 	for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
1487 		INIT_LIST_HEAD(&ge->static_msgs[i].link);
1488 
1489 	timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
1490 		  msecs_to_jiffies(q->sched_props.job_timeout_ms);
1491 	err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
1492 			    NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
1493 			    timeout, guc_to_gt(guc)->ordered_wq, NULL,
1494 			    q->name, gt_to_xe(q->gt)->drm.dev);
1495 	if (err)
1496 		goto err_free;
1497 
1498 	sched = &ge->sched;
1499 	err = xe_sched_entity_init(&ge->entity, sched);
1500 	if (err)
1501 		goto err_sched;
1502 
1503 	if (xe_exec_queue_is_lr(q))
1504 		INIT_WORK(&q->guc->lr_tdr, xe_guc_exec_queue_lr_cleanup);
1505 
1506 	mutex_lock(&guc->submission_state.lock);
1507 
1508 	err = alloc_guc_id(guc, q);
1509 	if (err)
1510 		goto err_entity;
1511 
1512 	q->entity = &ge->entity;
1513 
1514 	if (xe_guc_read_stopped(guc))
1515 		xe_sched_stop(sched);
1516 
1517 	mutex_unlock(&guc->submission_state.lock);
1518 
1519 	xe_exec_queue_assign_name(q, q->guc->id);
1520 
1521 	trace_xe_exec_queue_create(q);
1522 
1523 	return 0;
1524 
1525 err_entity:
1526 	mutex_unlock(&guc->submission_state.lock);
1527 	xe_sched_entity_fini(&ge->entity);
1528 err_sched:
1529 	xe_sched_fini(&ge->sched);
1530 err_free:
1531 	kfree(ge);
1532 
1533 	return err;
1534 }
1535 
1536 static void guc_exec_queue_kill(struct xe_exec_queue *q)
1537 {
1538 	trace_xe_exec_queue_kill(q);
1539 	set_exec_queue_killed(q);
1540 	__suspend_fence_signal(q);
1541 	xe_guc_exec_queue_trigger_cleanup(q);
1542 }
1543 
1544 static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
1545 				   u32 opcode)
1546 {
1547 	xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
1548 
1549 	INIT_LIST_HEAD(&msg->link);
1550 	msg->opcode = opcode & OPCODE_MASK;
1551 	msg->private_data = q;
1552 
1553 	trace_xe_sched_msg_add(msg);
1554 	if (opcode & MSG_LOCKED)
1555 		xe_sched_add_msg_locked(&q->guc->sched, msg);
1556 	else
1557 		xe_sched_add_msg(&q->guc->sched, msg);
1558 }
1559 
1560 static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
1561 				       struct xe_sched_msg *msg,
1562 				       u32 opcode)
1563 {
1564 	if (!list_empty(&msg->link))
1565 		return false;
1566 
1567 	guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED);
1568 
1569 	return true;
1570 }
1571 
1572 #define STATIC_MSG_CLEANUP	0
1573 #define STATIC_MSG_SUSPEND	1
1574 #define STATIC_MSG_RESUME	2
1575 static void guc_exec_queue_fini(struct xe_exec_queue *q)
1576 {
1577 	struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
1578 
1579 	if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
1580 		guc_exec_queue_add_msg(q, msg, CLEANUP);
1581 	else
1582 		__guc_exec_queue_fini(exec_queue_to_guc(q), q);
1583 }
1584 
1585 static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
1586 				       enum xe_exec_queue_priority priority)
1587 {
1588 	struct xe_sched_msg *msg;
1589 
1590 	if (q->sched_props.priority == priority ||
1591 	    exec_queue_killed_or_banned_or_wedged(q))
1592 		return 0;
1593 
1594 	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1595 	if (!msg)
1596 		return -ENOMEM;
1597 
1598 	q->sched_props.priority = priority;
1599 	guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1600 
1601 	return 0;
1602 }
1603 
1604 static int guc_exec_queue_set_timeslice(struct xe_exec_queue *q, u32 timeslice_us)
1605 {
1606 	struct xe_sched_msg *msg;
1607 
1608 	if (q->sched_props.timeslice_us == timeslice_us ||
1609 	    exec_queue_killed_or_banned_or_wedged(q))
1610 		return 0;
1611 
1612 	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1613 	if (!msg)
1614 		return -ENOMEM;
1615 
1616 	q->sched_props.timeslice_us = timeslice_us;
1617 	guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1618 
1619 	return 0;
1620 }
1621 
1622 static int guc_exec_queue_set_preempt_timeout(struct xe_exec_queue *q,
1623 					      u32 preempt_timeout_us)
1624 {
1625 	struct xe_sched_msg *msg;
1626 
1627 	if (q->sched_props.preempt_timeout_us == preempt_timeout_us ||
1628 	    exec_queue_killed_or_banned_or_wedged(q))
1629 		return 0;
1630 
1631 	msg = kmalloc(sizeof(*msg), GFP_KERNEL);
1632 	if (!msg)
1633 		return -ENOMEM;
1634 
1635 	q->sched_props.preempt_timeout_us = preempt_timeout_us;
1636 	guc_exec_queue_add_msg(q, msg, SET_SCHED_PROPS);
1637 
1638 	return 0;
1639 }
1640 
1641 static int guc_exec_queue_suspend(struct xe_exec_queue *q)
1642 {
1643 	struct xe_gpu_scheduler *sched = &q->guc->sched;
1644 	struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
1645 
1646 	if (exec_queue_killed_or_banned_or_wedged(q))
1647 		return -EINVAL;
1648 
1649 	xe_sched_msg_lock(sched);
1650 	if (guc_exec_queue_try_add_msg(q, msg, SUSPEND))
1651 		q->guc->suspend_pending = true;
1652 	xe_sched_msg_unlock(sched);
1653 
1654 	return 0;
1655 }
1656 
1657 static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
1658 {
1659 	struct xe_guc *guc = exec_queue_to_guc(q);
1660 	int ret;
1661 
1662 	/*
1663 	 * Likely don't need to check exec_queue_killed() as we clear
1664 	 * suspend_pending upon kill but to be paranoid but races in which
1665 	 * suspend_pending is set after kill also check kill here.
1666 	 */
1667 	ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
1668 					       !READ_ONCE(q->guc->suspend_pending) ||
1669 					       exec_queue_killed(q) ||
1670 					       xe_guc_read_stopped(guc),
1671 					       HZ * 5);
1672 
1673 	if (!ret) {
1674 		xe_gt_warn(guc_to_gt(guc),
1675 			   "Suspend fence, guc_id=%d, failed to respond",
1676 			   q->guc->id);
1677 		/* XXX: Trigger GT reset? */
1678 		return -ETIME;
1679 	}
1680 
1681 	return ret < 0 ? ret : 0;
1682 }
1683 
1684 static void guc_exec_queue_resume(struct xe_exec_queue *q)
1685 {
1686 	struct xe_gpu_scheduler *sched = &q->guc->sched;
1687 	struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_RESUME;
1688 	struct xe_guc *guc = exec_queue_to_guc(q);
1689 
1690 	xe_gt_assert(guc_to_gt(guc), !q->guc->suspend_pending);
1691 
1692 	xe_sched_msg_lock(sched);
1693 	guc_exec_queue_try_add_msg(q, msg, RESUME);
1694 	xe_sched_msg_unlock(sched);
1695 }
1696 
1697 static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
1698 {
1699 	return exec_queue_reset(q) || exec_queue_killed_or_banned_or_wedged(q);
1700 }
1701 
1702 /*
1703  * All of these functions are an abstraction layer which other parts of XE can
1704  * use to trap into the GuC backend. All of these functions, aside from init,
1705  * really shouldn't do much other than trap into the DRM scheduler which
1706  * synchronizes these operations.
1707  */
1708 static const struct xe_exec_queue_ops guc_exec_queue_ops = {
1709 	.init = guc_exec_queue_init,
1710 	.kill = guc_exec_queue_kill,
1711 	.fini = guc_exec_queue_fini,
1712 	.set_priority = guc_exec_queue_set_priority,
1713 	.set_timeslice = guc_exec_queue_set_timeslice,
1714 	.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
1715 	.suspend = guc_exec_queue_suspend,
1716 	.suspend_wait = guc_exec_queue_suspend_wait,
1717 	.resume = guc_exec_queue_resume,
1718 	.reset_status = guc_exec_queue_reset_status,
1719 };
1720 
1721 static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
1722 {
1723 	struct xe_gpu_scheduler *sched = &q->guc->sched;
1724 
1725 	/* Stop scheduling + flush any DRM scheduler operations */
1726 	xe_sched_submission_stop(sched);
1727 
1728 	/* Clean up lost G2H + reset engine state */
1729 	if (exec_queue_registered(q)) {
1730 		if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1731 			xe_exec_queue_put(q);
1732 		else if (exec_queue_destroyed(q))
1733 			__guc_exec_queue_fini(guc, q);
1734 	}
1735 	if (q->guc->suspend_pending) {
1736 		set_exec_queue_suspended(q);
1737 		suspend_fence_signal(q);
1738 	}
1739 	atomic_and(EXEC_QUEUE_STATE_WEDGED | EXEC_QUEUE_STATE_BANNED |
1740 		   EXEC_QUEUE_STATE_KILLED | EXEC_QUEUE_STATE_DESTROYED |
1741 		   EXEC_QUEUE_STATE_SUSPENDED,
1742 		   &q->guc->state);
1743 	q->guc->resume_time = 0;
1744 	trace_xe_exec_queue_stop(q);
1745 
1746 	/*
1747 	 * Ban any engine (aside from kernel and engines used for VM ops) with a
1748 	 * started but not complete job or if a job has gone through a GT reset
1749 	 * more than twice.
1750 	 */
1751 	if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
1752 		struct xe_sched_job *job = xe_sched_first_pending_job(sched);
1753 		bool ban = false;
1754 
1755 		if (job) {
1756 			if ((xe_sched_job_started(job) &&
1757 			    !xe_sched_job_completed(job)) ||
1758 			    xe_sched_invalidate_job(job, 2)) {
1759 				trace_xe_sched_job_ban(job);
1760 				ban = true;
1761 			}
1762 		} else if (xe_exec_queue_is_lr(q) &&
1763 			   !xe_lrc_ring_is_idle(q->lrc[0])) {
1764 			ban = true;
1765 		}
1766 
1767 		if (ban) {
1768 			set_exec_queue_banned(q);
1769 			xe_guc_exec_queue_trigger_cleanup(q);
1770 		}
1771 	}
1772 }
1773 
1774 int xe_guc_submit_reset_prepare(struct xe_guc *guc)
1775 {
1776 	int ret;
1777 
1778 	if (!guc->submission_state.initialized)
1779 		return 0;
1780 
1781 	/*
1782 	 * Using an atomic here rather than submission_state.lock as this
1783 	 * function can be called while holding the CT lock (engine reset
1784 	 * failure). submission_state.lock needs the CT lock to resubmit jobs.
1785 	 * Atomic is not ideal, but it works to prevent against concurrent reset
1786 	 * and releasing any TDRs waiting on guc->submission_state.stopped.
1787 	 */
1788 	ret = atomic_fetch_or(1, &guc->submission_state.stopped);
1789 	smp_wmb();
1790 	wake_up_all(&guc->ct.wq);
1791 
1792 	return ret;
1793 }
1794 
1795 void xe_guc_submit_reset_wait(struct xe_guc *guc)
1796 {
1797 	wait_event(guc->ct.wq, xe_device_wedged(guc_to_xe(guc)) ||
1798 		   !xe_guc_read_stopped(guc));
1799 }
1800 
1801 void xe_guc_submit_stop(struct xe_guc *guc)
1802 {
1803 	struct xe_exec_queue *q;
1804 	unsigned long index;
1805 
1806 	xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
1807 
1808 	mutex_lock(&guc->submission_state.lock);
1809 
1810 	xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
1811 		/* Prevent redundant attempts to stop parallel queues */
1812 		if (q->guc->id != index)
1813 			continue;
1814 
1815 		guc_exec_queue_stop(guc, q);
1816 	}
1817 
1818 	mutex_unlock(&guc->submission_state.lock);
1819 
1820 	/*
1821 	 * No one can enter the backend at this point, aside from new engine
1822 	 * creation which is protected by guc->submission_state.lock.
1823 	 */
1824 
1825 }
1826 
1827 static void guc_exec_queue_start(struct xe_exec_queue *q)
1828 {
1829 	struct xe_gpu_scheduler *sched = &q->guc->sched;
1830 
1831 	if (!exec_queue_killed_or_banned_or_wedged(q)) {
1832 		int i;
1833 
1834 		trace_xe_exec_queue_resubmit(q);
1835 		for (i = 0; i < q->width; ++i)
1836 			xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail);
1837 		xe_sched_resubmit_jobs(sched);
1838 	}
1839 
1840 	xe_sched_submission_start(sched);
1841 	xe_sched_submission_resume_tdr(sched);
1842 }
1843 
1844 int xe_guc_submit_start(struct xe_guc *guc)
1845 {
1846 	struct xe_exec_queue *q;
1847 	unsigned long index;
1848 
1849 	xe_gt_assert(guc_to_gt(guc), xe_guc_read_stopped(guc) == 1);
1850 
1851 	mutex_lock(&guc->submission_state.lock);
1852 	atomic_dec(&guc->submission_state.stopped);
1853 	xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
1854 		/* Prevent redundant attempts to start parallel queues */
1855 		if (q->guc->id != index)
1856 			continue;
1857 
1858 		guc_exec_queue_start(q);
1859 	}
1860 	mutex_unlock(&guc->submission_state.lock);
1861 
1862 	wake_up_all(&guc->ct.wq);
1863 
1864 	return 0;
1865 }
1866 
1867 static struct xe_exec_queue *
1868 g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
1869 {
1870 	struct xe_gt *gt = guc_to_gt(guc);
1871 	struct xe_exec_queue *q;
1872 
1873 	if (unlikely(guc_id >= GUC_ID_MAX)) {
1874 		xe_gt_err(gt, "Invalid guc_id %u\n", guc_id);
1875 		return NULL;
1876 	}
1877 
1878 	q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
1879 	if (unlikely(!q)) {
1880 		xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id);
1881 		return NULL;
1882 	}
1883 
1884 	xe_gt_assert(guc_to_gt(guc), guc_id >= q->guc->id);
1885 	xe_gt_assert(guc_to_gt(guc), guc_id < (q->guc->id + q->width));
1886 
1887 	return q;
1888 }
1889 
1890 static void deregister_exec_queue(struct xe_guc *guc, struct xe_exec_queue *q)
1891 {
1892 	u32 action[] = {
1893 		XE_GUC_ACTION_DEREGISTER_CONTEXT,
1894 		q->guc->id,
1895 	};
1896 
1897 	xe_gt_assert(guc_to_gt(guc), exec_queue_destroyed(q));
1898 	xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
1899 	xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_disable(q));
1900 	xe_gt_assert(guc_to_gt(guc), !exec_queue_pending_enable(q));
1901 
1902 	trace_xe_exec_queue_deregister(q);
1903 
1904 	xe_guc_ct_send_g2h_handler(&guc->ct, action, ARRAY_SIZE(action));
1905 }
1906 
1907 static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
1908 			      u32 runnable_state)
1909 {
1910 	trace_xe_exec_queue_scheduling_done(q);
1911 
1912 	if (runnable_state == 1) {
1913 		xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q));
1914 
1915 		q->guc->resume_time = ktime_get();
1916 		clear_exec_queue_pending_enable(q);
1917 		smp_wmb();
1918 		wake_up_all(&guc->ct.wq);
1919 	} else {
1920 		bool check_timeout = exec_queue_check_timeout(q);
1921 
1922 		xe_gt_assert(guc_to_gt(guc), runnable_state == 0);
1923 		xe_gt_assert(guc_to_gt(guc), exec_queue_pending_disable(q));
1924 
1925 		if (q->guc->suspend_pending) {
1926 			suspend_fence_signal(q);
1927 			clear_exec_queue_pending_disable(q);
1928 		} else {
1929 			if (exec_queue_banned(q) || check_timeout) {
1930 				smp_wmb();
1931 				wake_up_all(&guc->ct.wq);
1932 			}
1933 			if (!check_timeout && exec_queue_destroyed(q)) {
1934 				/*
1935 				 * Make sure to clear the pending_disable only
1936 				 * after sampling the destroyed state. We want
1937 				 * to ensure we don't trigger the unregister too
1938 				 * early with something intending to only
1939 				 * disable scheduling. The caller doing the
1940 				 * destroy must wait for an ongoing
1941 				 * pending_disable before marking as destroyed.
1942 				 */
1943 				clear_exec_queue_pending_disable(q);
1944 				deregister_exec_queue(guc, q);
1945 			} else {
1946 				clear_exec_queue_pending_disable(q);
1947 			}
1948 		}
1949 	}
1950 }
1951 
1952 int xe_guc_sched_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1953 {
1954 	struct xe_exec_queue *q;
1955 	u32 guc_id, runnable_state;
1956 
1957 	if (unlikely(len < 2))
1958 		return -EPROTO;
1959 
1960 	guc_id = msg[0];
1961 	runnable_state = msg[1];
1962 
1963 	q = g2h_exec_queue_lookup(guc, guc_id);
1964 	if (unlikely(!q))
1965 		return -EPROTO;
1966 
1967 	if (unlikely(!exec_queue_pending_enable(q) &&
1968 		     !exec_queue_pending_disable(q))) {
1969 		xe_gt_err(guc_to_gt(guc),
1970 			  "SCHED_DONE: Unexpected engine state 0x%04x, guc_id=%d, runnable_state=%u",
1971 			  atomic_read(&q->guc->state), q->guc->id,
1972 			  runnable_state);
1973 		return -EPROTO;
1974 	}
1975 
1976 	handle_sched_done(guc, q, runnable_state);
1977 
1978 	return 0;
1979 }
1980 
1981 static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
1982 {
1983 	trace_xe_exec_queue_deregister_done(q);
1984 
1985 	clear_exec_queue_registered(q);
1986 
1987 	if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
1988 		xe_exec_queue_put(q);
1989 	else
1990 		__guc_exec_queue_fini(guc, q);
1991 }
1992 
1993 int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
1994 {
1995 	struct xe_exec_queue *q;
1996 	u32 guc_id;
1997 
1998 	if (unlikely(len < 1))
1999 		return -EPROTO;
2000 
2001 	guc_id = msg[0];
2002 
2003 	q = g2h_exec_queue_lookup(guc, guc_id);
2004 	if (unlikely(!q))
2005 		return -EPROTO;
2006 
2007 	if (!exec_queue_destroyed(q) || exec_queue_pending_disable(q) ||
2008 	    exec_queue_pending_enable(q) || exec_queue_enabled(q)) {
2009 		xe_gt_err(guc_to_gt(guc),
2010 			  "DEREGISTER_DONE: Unexpected engine state 0x%04x, guc_id=%d",
2011 			  atomic_read(&q->guc->state), q->guc->id);
2012 		return -EPROTO;
2013 	}
2014 
2015 	handle_deregister_done(guc, q);
2016 
2017 	return 0;
2018 }
2019 
2020 int xe_guc_exec_queue_reset_handler(struct xe_guc *guc, u32 *msg, u32 len)
2021 {
2022 	struct xe_gt *gt = guc_to_gt(guc);
2023 	struct xe_exec_queue *q;
2024 	u32 guc_id;
2025 
2026 	if (unlikely(len < 1))
2027 		return -EPROTO;
2028 
2029 	guc_id = msg[0];
2030 
2031 	q = g2h_exec_queue_lookup(guc, guc_id);
2032 	if (unlikely(!q))
2033 		return -EPROTO;
2034 
2035 	xe_gt_info(gt, "Engine reset: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
2036 		   xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
2037 
2038 	trace_xe_exec_queue_reset(q);
2039 
2040 	/*
2041 	 * A banned engine is a NOP at this point (came from
2042 	 * guc_exec_queue_timedout_job). Otherwise, kick drm scheduler to cancel
2043 	 * jobs by setting timeout of the job to the minimum value kicking
2044 	 * guc_exec_queue_timedout_job.
2045 	 */
2046 	set_exec_queue_reset(q);
2047 	if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
2048 		xe_guc_exec_queue_trigger_cleanup(q);
2049 
2050 	return 0;
2051 }
2052 
2053 /*
2054  * xe_guc_error_capture_handler - Handler of GuC captured message
2055  * @guc: The GuC object
2056  * @msg: Point to the message
2057  * @len: The message length
2058  *
2059  * When GuC captured data is ready, GuC will send message
2060  * XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION to host, this function will be
2061  * called 1st to check status before process the data comes with the message.
2062  *
2063  * Returns: error code. 0 if success
2064  */
2065 int xe_guc_error_capture_handler(struct xe_guc *guc, u32 *msg, u32 len)
2066 {
2067 	u32 status;
2068 
2069 	if (unlikely(len != XE_GUC_ACTION_STATE_CAPTURE_NOTIFICATION_DATA_LEN))
2070 		return -EPROTO;
2071 
2072 	status = msg[0] & XE_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
2073 	if (status == XE_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
2074 		xe_gt_warn(guc_to_gt(guc), "G2H-Error capture no space");
2075 
2076 	xe_guc_capture_process(guc);
2077 
2078 	return 0;
2079 }
2080 
2081 int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg,
2082 					       u32 len)
2083 {
2084 	struct xe_gt *gt = guc_to_gt(guc);
2085 	struct xe_exec_queue *q;
2086 	u32 guc_id;
2087 
2088 	if (unlikely(len < 1))
2089 		return -EPROTO;
2090 
2091 	guc_id = msg[0];
2092 
2093 	if (guc_id == GUC_ID_UNKNOWN) {
2094 		/*
2095 		 * GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF
2096 		 * context. In such case only PF will be notified about that fault.
2097 		 */
2098 		xe_gt_err_ratelimited(gt, "Memory CAT error reported by GuC!\n");
2099 		return 0;
2100 	}
2101 
2102 	q = g2h_exec_queue_lookup(guc, guc_id);
2103 	if (unlikely(!q))
2104 		return -EPROTO;
2105 
2106 	xe_gt_dbg(gt, "Engine memory cat error: engine_class=%s, logical_mask: 0x%x, guc_id=%d",
2107 		  xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id);
2108 
2109 	trace_xe_exec_queue_memory_cat_error(q);
2110 
2111 	/* Treat the same as engine reset */
2112 	set_exec_queue_reset(q);
2113 	if (!exec_queue_banned(q) && !exec_queue_check_timeout(q))
2114 		xe_guc_exec_queue_trigger_cleanup(q);
2115 
2116 	return 0;
2117 }
2118 
2119 int xe_guc_exec_queue_reset_failure_handler(struct xe_guc *guc, u32 *msg, u32 len)
2120 {
2121 	struct xe_gt *gt = guc_to_gt(guc);
2122 	u8 guc_class, instance;
2123 	u32 reason;
2124 
2125 	if (unlikely(len != 3))
2126 		return -EPROTO;
2127 
2128 	guc_class = msg[0];
2129 	instance = msg[1];
2130 	reason = msg[2];
2131 
2132 	/* Unexpected failure of a hardware feature, log an actual error */
2133 	xe_gt_err(gt, "GuC engine reset request failed on %d:%d because 0x%08X",
2134 		  guc_class, instance, reason);
2135 
2136 	xe_gt_reset_async(gt);
2137 
2138 	return 0;
2139 }
2140 
2141 static void
2142 guc_exec_queue_wq_snapshot_capture(struct xe_exec_queue *q,
2143 				   struct xe_guc_submit_exec_queue_snapshot *snapshot)
2144 {
2145 	struct xe_guc *guc = exec_queue_to_guc(q);
2146 	struct xe_device *xe = guc_to_xe(guc);
2147 	struct iosys_map map = xe_lrc_parallel_map(q->lrc[0]);
2148 	int i;
2149 
2150 	snapshot->guc.wqi_head = q->guc->wqi_head;
2151 	snapshot->guc.wqi_tail = q->guc->wqi_tail;
2152 	snapshot->parallel.wq_desc.head = parallel_read(xe, map, wq_desc.head);
2153 	snapshot->parallel.wq_desc.tail = parallel_read(xe, map, wq_desc.tail);
2154 	snapshot->parallel.wq_desc.status = parallel_read(xe, map,
2155 							  wq_desc.wq_status);
2156 
2157 	if (snapshot->parallel.wq_desc.head !=
2158 	    snapshot->parallel.wq_desc.tail) {
2159 		for (i = snapshot->parallel.wq_desc.head;
2160 		     i != snapshot->parallel.wq_desc.tail;
2161 		     i = (i + sizeof(u32)) % WQ_SIZE)
2162 			snapshot->parallel.wq[i / sizeof(u32)] =
2163 				parallel_read(xe, map, wq[i / sizeof(u32)]);
2164 	}
2165 }
2166 
2167 static void
2168 guc_exec_queue_wq_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
2169 				 struct drm_printer *p)
2170 {
2171 	int i;
2172 
2173 	drm_printf(p, "\tWQ head: %u (internal), %d (memory)\n",
2174 		   snapshot->guc.wqi_head, snapshot->parallel.wq_desc.head);
2175 	drm_printf(p, "\tWQ tail: %u (internal), %d (memory)\n",
2176 		   snapshot->guc.wqi_tail, snapshot->parallel.wq_desc.tail);
2177 	drm_printf(p, "\tWQ status: %u\n", snapshot->parallel.wq_desc.status);
2178 
2179 	if (snapshot->parallel.wq_desc.head !=
2180 	    snapshot->parallel.wq_desc.tail) {
2181 		for (i = snapshot->parallel.wq_desc.head;
2182 		     i != snapshot->parallel.wq_desc.tail;
2183 		     i = (i + sizeof(u32)) % WQ_SIZE)
2184 			drm_printf(p, "\tWQ[%zu]: 0x%08x\n", i / sizeof(u32),
2185 				   snapshot->parallel.wq[i / sizeof(u32)]);
2186 	}
2187 }
2188 
2189 /**
2190  * xe_guc_exec_queue_snapshot_capture - Take a quick snapshot of the GuC Engine.
2191  * @q: faulty exec queue
2192  *
2193  * This can be printed out in a later stage like during dev_coredump
2194  * analysis.
2195  *
2196  * Returns: a GuC Submit Engine snapshot object that must be freed by the
2197  * caller, using `xe_guc_exec_queue_snapshot_free`.
2198  */
2199 struct xe_guc_submit_exec_queue_snapshot *
2200 xe_guc_exec_queue_snapshot_capture(struct xe_exec_queue *q)
2201 {
2202 	struct xe_gpu_scheduler *sched = &q->guc->sched;
2203 	struct xe_guc_submit_exec_queue_snapshot *snapshot;
2204 	int i;
2205 
2206 	snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
2207 
2208 	if (!snapshot)
2209 		return NULL;
2210 
2211 	snapshot->guc.id = q->guc->id;
2212 	memcpy(&snapshot->name, &q->name, sizeof(snapshot->name));
2213 	snapshot->class = q->class;
2214 	snapshot->logical_mask = q->logical_mask;
2215 	snapshot->width = q->width;
2216 	snapshot->refcount = kref_read(&q->refcount);
2217 	snapshot->sched_timeout = sched->base.timeout;
2218 	snapshot->sched_props.timeslice_us = q->sched_props.timeslice_us;
2219 	snapshot->sched_props.preempt_timeout_us =
2220 		q->sched_props.preempt_timeout_us;
2221 
2222 	snapshot->lrc = kmalloc_array(q->width, sizeof(struct xe_lrc_snapshot *),
2223 				      GFP_ATOMIC);
2224 
2225 	if (snapshot->lrc) {
2226 		for (i = 0; i < q->width; ++i) {
2227 			struct xe_lrc *lrc = q->lrc[i];
2228 
2229 			snapshot->lrc[i] = xe_lrc_snapshot_capture(lrc);
2230 		}
2231 	}
2232 
2233 	snapshot->schedule_state = atomic_read(&q->guc->state);
2234 	snapshot->exec_queue_flags = q->flags;
2235 
2236 	snapshot->parallel_execution = xe_exec_queue_is_parallel(q);
2237 	if (snapshot->parallel_execution)
2238 		guc_exec_queue_wq_snapshot_capture(q, snapshot);
2239 
2240 	spin_lock(&sched->base.job_list_lock);
2241 	snapshot->pending_list_size = list_count_nodes(&sched->base.pending_list);
2242 	snapshot->pending_list = kmalloc_array(snapshot->pending_list_size,
2243 					       sizeof(struct pending_list_snapshot),
2244 					       GFP_ATOMIC);
2245 
2246 	if (snapshot->pending_list) {
2247 		struct xe_sched_job *job_iter;
2248 
2249 		i = 0;
2250 		list_for_each_entry(job_iter, &sched->base.pending_list, drm.list) {
2251 			snapshot->pending_list[i].seqno =
2252 				xe_sched_job_seqno(job_iter);
2253 			snapshot->pending_list[i].fence =
2254 				dma_fence_is_signaled(job_iter->fence) ? 1 : 0;
2255 			snapshot->pending_list[i].finished =
2256 				dma_fence_is_signaled(&job_iter->drm.s_fence->finished)
2257 				? 1 : 0;
2258 			i++;
2259 		}
2260 	}
2261 
2262 	spin_unlock(&sched->base.job_list_lock);
2263 
2264 	return snapshot;
2265 }
2266 
2267 /**
2268  * xe_guc_exec_queue_snapshot_capture_delayed - Take delayed part of snapshot of the GuC Engine.
2269  * @snapshot: Previously captured snapshot of job.
2270  *
2271  * This captures some data that requires taking some locks, so it cannot be done in signaling path.
2272  */
2273 void
2274 xe_guc_exec_queue_snapshot_capture_delayed(struct xe_guc_submit_exec_queue_snapshot *snapshot)
2275 {
2276 	int i;
2277 
2278 	if (!snapshot || !snapshot->lrc)
2279 		return;
2280 
2281 	for (i = 0; i < snapshot->width; ++i)
2282 		xe_lrc_snapshot_capture_delayed(snapshot->lrc[i]);
2283 }
2284 
2285 /**
2286  * xe_guc_exec_queue_snapshot_print - Print out a given GuC Engine snapshot.
2287  * @snapshot: GuC Submit Engine snapshot object.
2288  * @p: drm_printer where it will be printed out.
2289  *
2290  * This function prints out a given GuC Submit Engine snapshot object.
2291  */
2292 void
2293 xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snapshot,
2294 				 struct drm_printer *p)
2295 {
2296 	int i;
2297 
2298 	if (!snapshot)
2299 		return;
2300 
2301 	drm_printf(p, "GuC ID: %d\n", snapshot->guc.id);
2302 	drm_printf(p, "\tName: %s\n", snapshot->name);
2303 	drm_printf(p, "\tClass: %d\n", snapshot->class);
2304 	drm_printf(p, "\tLogical mask: 0x%x\n", snapshot->logical_mask);
2305 	drm_printf(p, "\tWidth: %d\n", snapshot->width);
2306 	drm_printf(p, "\tRef: %d\n", snapshot->refcount);
2307 	drm_printf(p, "\tTimeout: %ld (ms)\n", snapshot->sched_timeout);
2308 	drm_printf(p, "\tTimeslice: %u (us)\n",
2309 		   snapshot->sched_props.timeslice_us);
2310 	drm_printf(p, "\tPreempt timeout: %u (us)\n",
2311 		   snapshot->sched_props.preempt_timeout_us);
2312 
2313 	for (i = 0; snapshot->lrc && i < snapshot->width; ++i)
2314 		xe_lrc_snapshot_print(snapshot->lrc[i], p);
2315 
2316 	drm_printf(p, "\tSchedule State: 0x%x\n", snapshot->schedule_state);
2317 	drm_printf(p, "\tFlags: 0x%lx\n", snapshot->exec_queue_flags);
2318 
2319 	if (snapshot->parallel_execution)
2320 		guc_exec_queue_wq_snapshot_print(snapshot, p);
2321 
2322 	for (i = 0; snapshot->pending_list && i < snapshot->pending_list_size;
2323 	     i++)
2324 		drm_printf(p, "\tJob: seqno=%d, fence=%d, finished=%d\n",
2325 			   snapshot->pending_list[i].seqno,
2326 			   snapshot->pending_list[i].fence,
2327 			   snapshot->pending_list[i].finished);
2328 }
2329 
2330 /**
2331  * xe_guc_exec_queue_snapshot_free - Free all allocated objects for a given
2332  * snapshot.
2333  * @snapshot: GuC Submit Engine snapshot object.
2334  *
2335  * This function free all the memory that needed to be allocated at capture
2336  * time.
2337  */
2338 void xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot)
2339 {
2340 	int i;
2341 
2342 	if (!snapshot)
2343 		return;
2344 
2345 	if (snapshot->lrc) {
2346 		for (i = 0; i < snapshot->width; i++)
2347 			xe_lrc_snapshot_free(snapshot->lrc[i]);
2348 		kfree(snapshot->lrc);
2349 	}
2350 	kfree(snapshot->pending_list);
2351 	kfree(snapshot);
2352 }
2353 
2354 static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
2355 {
2356 	struct xe_guc_submit_exec_queue_snapshot *snapshot;
2357 
2358 	snapshot = xe_guc_exec_queue_snapshot_capture(q);
2359 	xe_guc_exec_queue_snapshot_print(snapshot, p);
2360 	xe_guc_exec_queue_snapshot_free(snapshot);
2361 }
2362 
2363 /**
2364  * xe_guc_submit_print - GuC Submit Print.
2365  * @guc: GuC.
2366  * @p: drm_printer where it will be printed out.
2367  *
2368  * This function capture and prints snapshots of **all** GuC Engines.
2369  */
2370 void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
2371 {
2372 	struct xe_exec_queue *q;
2373 	unsigned long index;
2374 
2375 	if (!xe_device_uc_enabled(guc_to_xe(guc)))
2376 		return;
2377 
2378 	mutex_lock(&guc->submission_state.lock);
2379 	xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
2380 		guc_exec_queue_print(q, p);
2381 	mutex_unlock(&guc->submission_state.lock);
2382 }
2383