xref: /linux/drivers/gpu/drm/xe/xe_gt.c (revision fe833e4397fbdc3ae13a60202dfc7c335b032499)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt.h"
7 
8 #include <linux/minmax.h>
9 
10 #include <drm/drm_managed.h>
11 #include <drm/xe_drm.h>
12 
13 #include "instructions/xe_gfxpipe_commands.h"
14 #include "instructions/xe_mi_commands.h"
15 #include "regs/xe_gt_regs.h"
16 #include "xe_assert.h"
17 #include "xe_bb.h"
18 #include "xe_bo.h"
19 #include "xe_device.h"
20 #include "xe_exec_queue.h"
21 #include "xe_execlist.h"
22 #include "xe_force_wake.h"
23 #include "xe_ggtt.h"
24 #include "xe_gsc.h"
25 #include "xe_gt_ccs_mode.h"
26 #include "xe_gt_clock.h"
27 #include "xe_gt_freq.h"
28 #include "xe_gt_idle.h"
29 #include "xe_gt_mcr.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_printk.h"
32 #include "xe_gt_sriov_pf.h"
33 #include "xe_gt_sysfs.h"
34 #include "xe_gt_tlb_invalidation.h"
35 #include "xe_gt_topology.h"
36 #include "xe_guc_exec_queue_types.h"
37 #include "xe_guc_pc.h"
38 #include "xe_hw_fence.h"
39 #include "xe_hw_engine_class_sysfs.h"
40 #include "xe_irq.h"
41 #include "xe_lmtt.h"
42 #include "xe_lrc.h"
43 #include "xe_map.h"
44 #include "xe_migrate.h"
45 #include "xe_mmio.h"
46 #include "xe_pat.h"
47 #include "xe_pm.h"
48 #include "xe_mocs.h"
49 #include "xe_reg_sr.h"
50 #include "xe_ring_ops.h"
51 #include "xe_sa.h"
52 #include "xe_sched_job.h"
53 #include "xe_sriov.h"
54 #include "xe_tuning.h"
55 #include "xe_uc.h"
56 #include "xe_vm.h"
57 #include "xe_wa.h"
58 #include "xe_wopcm.h"
59 
60 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
61 {
62 	struct xe_gt *gt;
63 
64 	gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
65 	if (!gt)
66 		return ERR_PTR(-ENOMEM);
67 
68 	gt->tile = tile;
69 	gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
70 
71 	return gt;
72 }
73 
74 void xe_gt_sanitize(struct xe_gt *gt)
75 {
76 	/*
77 	 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
78 	 * reload
79 	 */
80 	gt->uc.guc.submission_state.enabled = false;
81 }
82 
83 /**
84  * xe_gt_remove() - Clean up the GT structures before driver removal
85  * @gt: the GT object
86  *
87  * This function should only act on objects/structures that must be cleaned
88  * before the driver removal callback is complete and therefore can't be
89  * deferred to a drmm action.
90  */
91 void xe_gt_remove(struct xe_gt *gt)
92 {
93 	xe_uc_remove(&gt->uc);
94 }
95 
96 static void gt_fini(struct drm_device *drm, void *arg)
97 {
98 	struct xe_gt *gt = arg;
99 	int i;
100 
101 	destroy_workqueue(gt->ordered_wq);
102 
103 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
104 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
105 }
106 
107 static void gt_reset_worker(struct work_struct *w);
108 
109 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
110 {
111 	struct xe_sched_job *job;
112 	struct xe_bb *bb;
113 	struct dma_fence *fence;
114 	long timeout;
115 
116 	bb = xe_bb_new(gt, 4, false);
117 	if (IS_ERR(bb))
118 		return PTR_ERR(bb);
119 
120 	job = xe_bb_create_job(q, bb);
121 	if (IS_ERR(job)) {
122 		xe_bb_free(bb, NULL);
123 		return PTR_ERR(job);
124 	}
125 
126 	xe_sched_job_arm(job);
127 	fence = dma_fence_get(&job->drm.s_fence->finished);
128 	xe_sched_job_push(job);
129 
130 	timeout = dma_fence_wait_timeout(fence, false, HZ);
131 	dma_fence_put(fence);
132 	xe_bb_free(bb, NULL);
133 	if (timeout < 0)
134 		return timeout;
135 	else if (!timeout)
136 		return -ETIME;
137 
138 	return 0;
139 }
140 
141 /*
142  * Convert back from encoded value to type-safe, only to be used when reg.mcr
143  * is true
144  */
145 static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
146 {
147 	return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
148 }
149 
150 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
151 {
152 	struct xe_reg_sr *sr = &q->hwe->reg_lrc;
153 	struct xe_reg_sr_entry *entry;
154 	unsigned long idx;
155 	struct xe_sched_job *job;
156 	struct xe_bb *bb;
157 	struct dma_fence *fence;
158 	long timeout;
159 	int count = 0;
160 
161 	if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
162 		/* Big enough to emit all of the context's 3DSTATE */
163 		bb = xe_bb_new(gt, xe_lrc_size(gt_to_xe(gt), q->hwe->class), false);
164 	else
165 		/* Just pick a large BB size */
166 		bb = xe_bb_new(gt, SZ_4K, false);
167 
168 	if (IS_ERR(bb))
169 		return PTR_ERR(bb);
170 
171 	xa_for_each(&sr->xa, idx, entry)
172 		++count;
173 
174 	if (count) {
175 		xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
176 
177 		bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
178 
179 		xa_for_each(&sr->xa, idx, entry) {
180 			struct xe_reg reg = entry->reg;
181 			struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
182 			u32 val;
183 
184 			/*
185 			 * Skip reading the register if it's not really needed
186 			 */
187 			if (reg.masked)
188 				val = entry->clr_bits << 16;
189 			else if (entry->clr_bits + 1)
190 				val = (reg.mcr ?
191 				       xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
192 				       xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
193 			else
194 				val = 0;
195 
196 			val |= entry->set_bits;
197 
198 			bb->cs[bb->len++] = reg.addr;
199 			bb->cs[bb->len++] = val;
200 			xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
201 		}
202 	}
203 
204 	xe_lrc_emit_hwe_state_instructions(q, bb);
205 
206 	job = xe_bb_create_job(q, bb);
207 	if (IS_ERR(job)) {
208 		xe_bb_free(bb, NULL);
209 		return PTR_ERR(job);
210 	}
211 
212 	xe_sched_job_arm(job);
213 	fence = dma_fence_get(&job->drm.s_fence->finished);
214 	xe_sched_job_push(job);
215 
216 	timeout = dma_fence_wait_timeout(fence, false, HZ);
217 	dma_fence_put(fence);
218 	xe_bb_free(bb, NULL);
219 	if (timeout < 0)
220 		return timeout;
221 	else if (!timeout)
222 		return -ETIME;
223 
224 	return 0;
225 }
226 
227 int xe_gt_record_default_lrcs(struct xe_gt *gt)
228 {
229 	struct xe_device *xe = gt_to_xe(gt);
230 	struct xe_hw_engine *hwe;
231 	enum xe_hw_engine_id id;
232 	int err = 0;
233 
234 	for_each_hw_engine(hwe, gt, id) {
235 		struct xe_exec_queue *q, *nop_q;
236 		void *default_lrc;
237 
238 		if (gt->default_lrc[hwe->class])
239 			continue;
240 
241 		xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
242 		xe_wa_process_lrc(hwe);
243 		xe_hw_engine_setup_default_lrc_state(hwe);
244 		xe_tuning_process_lrc(hwe);
245 
246 		default_lrc = drmm_kzalloc(&xe->drm,
247 					   xe_lrc_size(xe, hwe->class),
248 					   GFP_KERNEL);
249 		if (!default_lrc)
250 			return -ENOMEM;
251 
252 		q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
253 					 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
254 		if (IS_ERR(q)) {
255 			err = PTR_ERR(q);
256 			xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
257 				  hwe->name, q);
258 			return err;
259 		}
260 
261 		/* Prime golden LRC with known good state */
262 		err = emit_wa_job(gt, q);
263 		if (err) {
264 			xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
265 				  hwe->name, ERR_PTR(err), q->guc->id);
266 			goto put_exec_queue;
267 		}
268 
269 		nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
270 					     1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
271 		if (IS_ERR(nop_q)) {
272 			err = PTR_ERR(nop_q);
273 			xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
274 				  hwe->name, nop_q);
275 			goto put_exec_queue;
276 		}
277 
278 		/* Switch to different LRC */
279 		err = emit_nop_job(gt, nop_q);
280 		if (err) {
281 			xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
282 				  hwe->name, ERR_PTR(err), nop_q->guc->id);
283 			goto put_nop_q;
284 		}
285 
286 		/* Reload golden LRC to record the effect of any indirect W/A */
287 		err = emit_nop_job(gt, q);
288 		if (err) {
289 			xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
290 				  hwe->name, ERR_PTR(err), q->guc->id);
291 			goto put_nop_q;
292 		}
293 
294 		xe_map_memcpy_from(xe, default_lrc,
295 				   &q->lrc[0].bo->vmap,
296 				   xe_lrc_pphwsp_offset(&q->lrc[0]),
297 				   xe_lrc_size(xe, hwe->class));
298 
299 		gt->default_lrc[hwe->class] = default_lrc;
300 put_nop_q:
301 		xe_exec_queue_put(nop_q);
302 put_exec_queue:
303 		xe_exec_queue_put(q);
304 		if (err)
305 			break;
306 	}
307 
308 	return err;
309 }
310 
311 int xe_gt_init_early(struct xe_gt *gt)
312 {
313 	int err;
314 
315 	if (IS_SRIOV_PF(gt_to_xe(gt))) {
316 		err = xe_gt_sriov_pf_init_early(gt);
317 		if (err)
318 			return err;
319 	}
320 
321 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
322 	if (err)
323 		return err;
324 
325 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
326 	if (err)
327 		return err;
328 
329 	xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
330 
331 	err = xe_wa_init(gt);
332 	if (err)
333 		return err;
334 
335 	xe_wa_process_gt(gt);
336 	xe_wa_process_oob(gt);
337 	xe_tuning_process_gt(gt);
338 
339 	return 0;
340 }
341 
342 static void dump_pat_on_error(struct xe_gt *gt)
343 {
344 	struct drm_printer p;
345 	char prefix[32];
346 
347 	snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
348 	p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
349 
350 	xe_pat_dump(gt, &p);
351 }
352 
353 static int gt_fw_domain_init(struct xe_gt *gt)
354 {
355 	int err, i;
356 
357 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
358 	if (err)
359 		goto err_hw_fence_irq;
360 
361 	if (!xe_gt_is_media_type(gt)) {
362 		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
363 		if (err)
364 			goto err_force_wake;
365 		if (IS_SRIOV_PF(gt_to_xe(gt)))
366 			xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
367 	}
368 
369 	err = xe_gt_idle_sysfs_init(&gt->gtidle);
370 	if (err)
371 		goto err_force_wake;
372 
373 	/* Enable per hw engine IRQs */
374 	xe_irq_enable_hwe(gt);
375 
376 	/* Rerun MCR init as we now have hw engine list */
377 	xe_gt_mcr_init(gt);
378 
379 	err = xe_hw_engines_init_early(gt);
380 	if (err)
381 		goto err_force_wake;
382 
383 	err = xe_hw_engine_class_sysfs_init(gt);
384 	if (err)
385 		goto err_force_wake;
386 
387 	/* Initialize CCS mode sysfs after early initialization of HW engines */
388 	err = xe_gt_ccs_mode_sysfs_init(gt);
389 	if (err)
390 		goto err_force_wake;
391 
392 	/*
393 	 * Stash hardware-reported version.  Since this register does not exist
394 	 * on pre-MTL platforms, reading it there will (correctly) return 0.
395 	 */
396 	gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
397 
398 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
399 	XE_WARN_ON(err);
400 
401 	return 0;
402 
403 err_force_wake:
404 	dump_pat_on_error(gt);
405 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
406 err_hw_fence_irq:
407 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
408 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
409 
410 	return err;
411 }
412 
413 static int all_fw_domain_init(struct xe_gt *gt)
414 {
415 	int err, i;
416 
417 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
418 	if (err)
419 		goto err_hw_fence_irq;
420 
421 	xe_gt_mcr_set_implicit_defaults(gt);
422 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
423 
424 	err = xe_gt_clock_init(gt);
425 	if (err)
426 		goto err_force_wake;
427 
428 	xe_mocs_init(gt);
429 	err = xe_execlist_init(gt);
430 	if (err)
431 		goto err_force_wake;
432 
433 	err = xe_hw_engines_init(gt);
434 	if (err)
435 		goto err_force_wake;
436 
437 	if (!xe_gt_is_media_type(gt)) {
438 		/*
439 		 * USM has its only SA pool to non-block behind user operations
440 		 */
441 		if (gt_to_xe(gt)->info.has_usm) {
442 			struct xe_device *xe = gt_to_xe(gt);
443 
444 			gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
445 								IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
446 			if (IS_ERR(gt->usm.bb_pool)) {
447 				err = PTR_ERR(gt->usm.bb_pool);
448 				goto err_force_wake;
449 			}
450 		}
451 	}
452 
453 	if (!xe_gt_is_media_type(gt)) {
454 		struct xe_tile *tile = gt_to_tile(gt);
455 
456 		tile->migrate = xe_migrate_init(tile);
457 		if (IS_ERR(tile->migrate)) {
458 			err = PTR_ERR(tile->migrate);
459 			goto err_force_wake;
460 		}
461 	}
462 
463 	err = xe_uc_init_post_hwconfig(&gt->uc);
464 	if (err)
465 		goto err_force_wake;
466 
467 	err = xe_uc_init_hw(&gt->uc);
468 	if (err)
469 		goto err_force_wake;
470 
471 	/* Configure default CCS mode of 1 engine with all resources */
472 	if (xe_gt_ccs_mode_enabled(gt)) {
473 		gt->ccs_mode = 1;
474 		xe_gt_apply_ccs_mode(gt);
475 	}
476 
477 	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
478 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
479 
480 	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
481 	XE_WARN_ON(err);
482 
483 	return 0;
484 
485 err_force_wake:
486 	xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
487 err_hw_fence_irq:
488 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
489 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
490 
491 	return err;
492 }
493 
494 /*
495  * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
496  * enable CTB communication.
497  */
498 int xe_gt_init_hwconfig(struct xe_gt *gt)
499 {
500 	int err;
501 
502 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
503 	if (err)
504 		goto out;
505 
506 	xe_gt_topology_init(gt);
507 	xe_gt_mcr_init(gt);
508 	xe_pat_init(gt);
509 
510 	err = xe_uc_init(&gt->uc);
511 	if (err)
512 		goto out_fw;
513 
514 	err = xe_uc_init_hwconfig(&gt->uc);
515 	if (err)
516 		goto out_fw;
517 
518 	/* XXX: Fake that we pull the engine mask from hwconfig blob */
519 	gt->info.engine_mask = gt->info.__engine_mask;
520 
521 out_fw:
522 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
523 out:
524 	return err;
525 }
526 
527 int xe_gt_init(struct xe_gt *gt)
528 {
529 	int err;
530 	int i;
531 
532 	INIT_WORK(&gt->reset.worker, gt_reset_worker);
533 
534 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
535 		gt->ring_ops[i] = xe_ring_ops_get(gt, i);
536 		xe_hw_fence_irq_init(&gt->fence_irq[i]);
537 	}
538 
539 	err = xe_gt_tlb_invalidation_init(gt);
540 	if (err)
541 		return err;
542 
543 	err = xe_gt_pagefault_init(gt);
544 	if (err)
545 		return err;
546 
547 	xe_mocs_init_early(gt);
548 
549 	err = xe_gt_sysfs_init(gt);
550 	if (err)
551 		return err;
552 
553 	err = gt_fw_domain_init(gt);
554 	if (err)
555 		return err;
556 
557 	err = xe_gt_freq_init(gt);
558 	if (err)
559 		return err;
560 
561 	xe_force_wake_init_engines(gt, gt_to_fw(gt));
562 
563 	err = all_fw_domain_init(gt);
564 	if (err)
565 		return err;
566 
567 	return drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
568 }
569 
570 static int do_gt_reset(struct xe_gt *gt)
571 {
572 	int err;
573 
574 	xe_gsc_wa_14015076503(gt, true);
575 
576 	xe_mmio_write32(gt, GDRST, GRDOM_FULL);
577 	err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
578 	if (err)
579 		xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
580 			  ERR_PTR(err));
581 
582 	xe_gsc_wa_14015076503(gt, false);
583 
584 	return err;
585 }
586 
587 static int do_gt_restart(struct xe_gt *gt)
588 {
589 	struct xe_hw_engine *hwe;
590 	enum xe_hw_engine_id id;
591 	int err;
592 
593 	xe_pat_init(gt);
594 
595 	xe_gt_mcr_set_implicit_defaults(gt);
596 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
597 
598 	err = xe_wopcm_init(&gt->uc.wopcm);
599 	if (err)
600 		return err;
601 
602 	for_each_hw_engine(hwe, gt, id)
603 		xe_hw_engine_enable_ring(hwe);
604 
605 	err = xe_uc_sanitize_reset(&gt->uc);
606 	if (err)
607 		return err;
608 
609 	err = xe_uc_init_hw(&gt->uc);
610 	if (err)
611 		return err;
612 
613 	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
614 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
615 
616 	xe_mocs_init(gt);
617 	err = xe_uc_start(&gt->uc);
618 	if (err)
619 		return err;
620 
621 	for_each_hw_engine(hwe, gt, id) {
622 		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
623 		xe_reg_sr_apply_whitelist(hwe);
624 	}
625 
626 	/* Get CCS mode in sync between sw/hw */
627 	xe_gt_apply_ccs_mode(gt);
628 
629 	return 0;
630 }
631 
632 static int gt_reset(struct xe_gt *gt)
633 {
634 	int err;
635 
636 	/* We only support GT resets with GuC submission */
637 	if (!xe_device_uc_enabled(gt_to_xe(gt)))
638 		return -ENODEV;
639 
640 	xe_gt_info(gt, "reset started\n");
641 
642 	if (xe_fault_inject_gt_reset()) {
643 		err = -ECANCELED;
644 		goto err_fail;
645 	}
646 
647 	xe_pm_runtime_get(gt_to_xe(gt));
648 	xe_gt_sanitize(gt);
649 
650 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
651 	if (err)
652 		goto err_msg;
653 
654 	xe_uc_gucrc_disable(&gt->uc);
655 	xe_uc_stop_prepare(&gt->uc);
656 	xe_gt_pagefault_reset(gt);
657 
658 	err = xe_uc_stop(&gt->uc);
659 	if (err)
660 		goto err_out;
661 
662 	xe_gt_tlb_invalidation_reset(gt);
663 
664 	err = do_gt_reset(gt);
665 	if (err)
666 		goto err_out;
667 
668 	err = do_gt_restart(gt);
669 	if (err)
670 		goto err_out;
671 
672 	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
673 	XE_WARN_ON(err);
674 	xe_pm_runtime_put(gt_to_xe(gt));
675 
676 	xe_gt_info(gt, "reset done\n");
677 
678 	return 0;
679 
680 err_out:
681 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
682 err_msg:
683 	XE_WARN_ON(xe_uc_start(&gt->uc));
684 	xe_pm_runtime_put(gt_to_xe(gt));
685 err_fail:
686 	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
687 
688 	gt_to_xe(gt)->needs_flr_on_fini = true;
689 
690 	return err;
691 }
692 
693 static void gt_reset_worker(struct work_struct *w)
694 {
695 	struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
696 
697 	gt_reset(gt);
698 }
699 
700 void xe_gt_reset_async(struct xe_gt *gt)
701 {
702 	xe_gt_info(gt, "trying reset\n");
703 
704 	/* Don't do a reset while one is already in flight */
705 	if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
706 		return;
707 
708 	xe_gt_info(gt, "reset queued\n");
709 	queue_work(gt->ordered_wq, &gt->reset.worker);
710 }
711 
712 void xe_gt_suspend_prepare(struct xe_gt *gt)
713 {
714 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
715 
716 	xe_uc_stop_prepare(&gt->uc);
717 
718 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
719 }
720 
721 int xe_gt_suspend(struct xe_gt *gt)
722 {
723 	int err;
724 
725 	xe_gt_dbg(gt, "suspending\n");
726 	xe_gt_sanitize(gt);
727 
728 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
729 	if (err)
730 		goto err_msg;
731 
732 	err = xe_uc_suspend(&gt->uc);
733 	if (err)
734 		goto err_force_wake;
735 
736 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
737 	xe_gt_dbg(gt, "suspended\n");
738 
739 	return 0;
740 
741 err_force_wake:
742 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
743 err_msg:
744 	xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
745 
746 	return err;
747 }
748 
749 int xe_gt_resume(struct xe_gt *gt)
750 {
751 	int err;
752 
753 	xe_gt_dbg(gt, "resuming\n");
754 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
755 	if (err)
756 		goto err_msg;
757 
758 	err = do_gt_restart(gt);
759 	if (err)
760 		goto err_force_wake;
761 
762 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
763 	xe_gt_dbg(gt, "resumed\n");
764 
765 	return 0;
766 
767 err_force_wake:
768 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
769 err_msg:
770 	xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
771 
772 	return err;
773 }
774 
775 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
776 				     enum xe_engine_class class,
777 				     u16 instance, bool logical)
778 {
779 	struct xe_hw_engine *hwe;
780 	enum xe_hw_engine_id id;
781 
782 	for_each_hw_engine(hwe, gt, id)
783 		if (hwe->class == class &&
784 		    ((!logical && hwe->instance == instance) ||
785 		    (logical && hwe->logical_instance == instance)))
786 			return hwe;
787 
788 	return NULL;
789 }
790 
791 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
792 							 enum xe_engine_class class)
793 {
794 	struct xe_hw_engine *hwe;
795 	enum xe_hw_engine_id id;
796 
797 	for_each_hw_engine(hwe, gt, id) {
798 		switch (class) {
799 		case XE_ENGINE_CLASS_RENDER:
800 		case XE_ENGINE_CLASS_COMPUTE:
801 			if (hwe->class == XE_ENGINE_CLASS_RENDER ||
802 			    hwe->class == XE_ENGINE_CLASS_COMPUTE)
803 				return hwe;
804 			break;
805 		default:
806 			if (hwe->class == class)
807 				return hwe;
808 		}
809 	}
810 
811 	return NULL;
812 }
813