xref: /linux/drivers/gpu/drm/xe/xe_gt.c (revision 569d7db70e5dcf13fbf072f10e9096577ac1e565)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt.h"
7 
8 #include <linux/minmax.h>
9 
10 #include <drm/drm_managed.h>
11 #include <drm/xe_drm.h>
12 
13 #include "instructions/xe_gfxpipe_commands.h"
14 #include "instructions/xe_mi_commands.h"
15 #include "regs/xe_gt_regs.h"
16 #include "xe_assert.h"
17 #include "xe_bb.h"
18 #include "xe_bo.h"
19 #include "xe_device.h"
20 #include "xe_exec_queue.h"
21 #include "xe_execlist.h"
22 #include "xe_force_wake.h"
23 #include "xe_ggtt.h"
24 #include "xe_gsc.h"
25 #include "xe_gt_ccs_mode.h"
26 #include "xe_gt_clock.h"
27 #include "xe_gt_freq.h"
28 #include "xe_gt_idle.h"
29 #include "xe_gt_mcr.h"
30 #include "xe_gt_pagefault.h"
31 #include "xe_gt_printk.h"
32 #include "xe_gt_sriov_pf.h"
33 #include "xe_gt_sysfs.h"
34 #include "xe_gt_tlb_invalidation.h"
35 #include "xe_gt_topology.h"
36 #include "xe_guc_exec_queue_types.h"
37 #include "xe_guc_pc.h"
38 #include "xe_hw_fence.h"
39 #include "xe_hw_engine_class_sysfs.h"
40 #include "xe_irq.h"
41 #include "xe_lmtt.h"
42 #include "xe_lrc.h"
43 #include "xe_map.h"
44 #include "xe_migrate.h"
45 #include "xe_mmio.h"
46 #include "xe_pat.h"
47 #include "xe_pcode.h"
48 #include "xe_pm.h"
49 #include "xe_mocs.h"
50 #include "xe_reg_sr.h"
51 #include "xe_ring_ops.h"
52 #include "xe_sa.h"
53 #include "xe_sched_job.h"
54 #include "xe_sriov.h"
55 #include "xe_tuning.h"
56 #include "xe_uc.h"
57 #include "xe_vm.h"
58 #include "xe_wa.h"
59 #include "xe_wopcm.h"
60 
61 static void gt_fini(struct drm_device *drm, void *arg)
62 {
63 	struct xe_gt *gt = arg;
64 
65 	destroy_workqueue(gt->ordered_wq);
66 }
67 
68 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
69 {
70 	struct xe_gt *gt;
71 	int err;
72 
73 	gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
74 	if (!gt)
75 		return ERR_PTR(-ENOMEM);
76 
77 	gt->tile = tile;
78 	gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
79 
80 	err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
81 	if (err)
82 		return ERR_PTR(err);
83 
84 	return gt;
85 }
86 
87 void xe_gt_sanitize(struct xe_gt *gt)
88 {
89 	/*
90 	 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
91 	 * reload
92 	 */
93 	gt->uc.guc.submission_state.enabled = false;
94 }
95 
96 /**
97  * xe_gt_remove() - Clean up the GT structures before driver removal
98  * @gt: the GT object
99  *
100  * This function should only act on objects/structures that must be cleaned
101  * before the driver removal callback is complete and therefore can't be
102  * deferred to a drmm action.
103  */
104 void xe_gt_remove(struct xe_gt *gt)
105 {
106 	int i;
107 
108 	xe_uc_remove(&gt->uc);
109 
110 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
111 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
112 }
113 
114 static void gt_reset_worker(struct work_struct *w);
115 
116 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
117 {
118 	struct xe_sched_job *job;
119 	struct xe_bb *bb;
120 	struct dma_fence *fence;
121 	long timeout;
122 
123 	bb = xe_bb_new(gt, 4, false);
124 	if (IS_ERR(bb))
125 		return PTR_ERR(bb);
126 
127 	job = xe_bb_create_job(q, bb);
128 	if (IS_ERR(job)) {
129 		xe_bb_free(bb, NULL);
130 		return PTR_ERR(job);
131 	}
132 
133 	xe_sched_job_arm(job);
134 	fence = dma_fence_get(&job->drm.s_fence->finished);
135 	xe_sched_job_push(job);
136 
137 	timeout = dma_fence_wait_timeout(fence, false, HZ);
138 	dma_fence_put(fence);
139 	xe_bb_free(bb, NULL);
140 	if (timeout < 0)
141 		return timeout;
142 	else if (!timeout)
143 		return -ETIME;
144 
145 	return 0;
146 }
147 
148 /*
149  * Convert back from encoded value to type-safe, only to be used when reg.mcr
150  * is true
151  */
152 static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
153 {
154 	return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
155 }
156 
157 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
158 {
159 	struct xe_reg_sr *sr = &q->hwe->reg_lrc;
160 	struct xe_reg_sr_entry *entry;
161 	unsigned long idx;
162 	struct xe_sched_job *job;
163 	struct xe_bb *bb;
164 	struct dma_fence *fence;
165 	long timeout;
166 	int count = 0;
167 
168 	if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
169 		/* Big enough to emit all of the context's 3DSTATE */
170 		bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
171 	else
172 		/* Just pick a large BB size */
173 		bb = xe_bb_new(gt, SZ_4K, false);
174 
175 	if (IS_ERR(bb))
176 		return PTR_ERR(bb);
177 
178 	xa_for_each(&sr->xa, idx, entry)
179 		++count;
180 
181 	if (count) {
182 		xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
183 
184 		bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
185 
186 		xa_for_each(&sr->xa, idx, entry) {
187 			struct xe_reg reg = entry->reg;
188 			struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
189 			u32 val;
190 
191 			/*
192 			 * Skip reading the register if it's not really needed
193 			 */
194 			if (reg.masked)
195 				val = entry->clr_bits << 16;
196 			else if (entry->clr_bits + 1)
197 				val = (reg.mcr ?
198 				       xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
199 				       xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
200 			else
201 				val = 0;
202 
203 			val |= entry->set_bits;
204 
205 			bb->cs[bb->len++] = reg.addr;
206 			bb->cs[bb->len++] = val;
207 			xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
208 		}
209 	}
210 
211 	xe_lrc_emit_hwe_state_instructions(q, bb);
212 
213 	job = xe_bb_create_job(q, bb);
214 	if (IS_ERR(job)) {
215 		xe_bb_free(bb, NULL);
216 		return PTR_ERR(job);
217 	}
218 
219 	xe_sched_job_arm(job);
220 	fence = dma_fence_get(&job->drm.s_fence->finished);
221 	xe_sched_job_push(job);
222 
223 	timeout = dma_fence_wait_timeout(fence, false, HZ);
224 	dma_fence_put(fence);
225 	xe_bb_free(bb, NULL);
226 	if (timeout < 0)
227 		return timeout;
228 	else if (!timeout)
229 		return -ETIME;
230 
231 	return 0;
232 }
233 
234 int xe_gt_record_default_lrcs(struct xe_gt *gt)
235 {
236 	struct xe_device *xe = gt_to_xe(gt);
237 	struct xe_hw_engine *hwe;
238 	enum xe_hw_engine_id id;
239 	int err = 0;
240 
241 	for_each_hw_engine(hwe, gt, id) {
242 		struct xe_exec_queue *q, *nop_q;
243 		void *default_lrc;
244 
245 		if (gt->default_lrc[hwe->class])
246 			continue;
247 
248 		xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
249 		xe_wa_process_lrc(hwe);
250 		xe_hw_engine_setup_default_lrc_state(hwe);
251 		xe_tuning_process_lrc(hwe);
252 
253 		default_lrc = drmm_kzalloc(&xe->drm,
254 					   xe_gt_lrc_size(gt, hwe->class),
255 					   GFP_KERNEL);
256 		if (!default_lrc)
257 			return -ENOMEM;
258 
259 		q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
260 					 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
261 		if (IS_ERR(q)) {
262 			err = PTR_ERR(q);
263 			xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
264 				  hwe->name, q);
265 			return err;
266 		}
267 
268 		/* Prime golden LRC with known good state */
269 		err = emit_wa_job(gt, q);
270 		if (err) {
271 			xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
272 				  hwe->name, ERR_PTR(err), q->guc->id);
273 			goto put_exec_queue;
274 		}
275 
276 		nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
277 					     1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
278 		if (IS_ERR(nop_q)) {
279 			err = PTR_ERR(nop_q);
280 			xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
281 				  hwe->name, nop_q);
282 			goto put_exec_queue;
283 		}
284 
285 		/* Switch to different LRC */
286 		err = emit_nop_job(gt, nop_q);
287 		if (err) {
288 			xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
289 				  hwe->name, ERR_PTR(err), nop_q->guc->id);
290 			goto put_nop_q;
291 		}
292 
293 		/* Reload golden LRC to record the effect of any indirect W/A */
294 		err = emit_nop_job(gt, q);
295 		if (err) {
296 			xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
297 				  hwe->name, ERR_PTR(err), q->guc->id);
298 			goto put_nop_q;
299 		}
300 
301 		xe_map_memcpy_from(xe, default_lrc,
302 				   &q->lrc[0]->bo->vmap,
303 				   xe_lrc_pphwsp_offset(q->lrc[0]),
304 				   xe_gt_lrc_size(gt, hwe->class));
305 
306 		gt->default_lrc[hwe->class] = default_lrc;
307 put_nop_q:
308 		xe_exec_queue_put(nop_q);
309 put_exec_queue:
310 		xe_exec_queue_put(q);
311 		if (err)
312 			break;
313 	}
314 
315 	return err;
316 }
317 
318 int xe_gt_init_early(struct xe_gt *gt)
319 {
320 	int err;
321 
322 	if (IS_SRIOV_PF(gt_to_xe(gt))) {
323 		err = xe_gt_sriov_pf_init_early(gt);
324 		if (err)
325 			return err;
326 	}
327 
328 	xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
329 
330 	err = xe_wa_init(gt);
331 	if (err)
332 		return err;
333 
334 	xe_wa_process_gt(gt);
335 	xe_wa_process_oob(gt);
336 	xe_tuning_process_gt(gt);
337 
338 	xe_force_wake_init_gt(gt, gt_to_fw(gt));
339 	xe_pcode_init(gt);
340 
341 	return 0;
342 }
343 
344 static void dump_pat_on_error(struct xe_gt *gt)
345 {
346 	struct drm_printer p;
347 	char prefix[32];
348 
349 	snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
350 	p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
351 
352 	xe_pat_dump(gt, &p);
353 }
354 
355 static int gt_fw_domain_init(struct xe_gt *gt)
356 {
357 	int err, i;
358 
359 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
360 	if (err)
361 		goto err_hw_fence_irq;
362 
363 	if (!xe_gt_is_media_type(gt)) {
364 		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
365 		if (err)
366 			goto err_force_wake;
367 		if (IS_SRIOV_PF(gt_to_xe(gt)))
368 			xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
369 	}
370 
371 	/* Enable per hw engine IRQs */
372 	xe_irq_enable_hwe(gt);
373 
374 	/* Rerun MCR init as we now have hw engine list */
375 	xe_gt_mcr_init(gt);
376 
377 	err = xe_hw_engines_init_early(gt);
378 	if (err)
379 		goto err_force_wake;
380 
381 	err = xe_hw_engine_class_sysfs_init(gt);
382 	if (err)
383 		goto err_force_wake;
384 
385 	/* Initialize CCS mode sysfs after early initialization of HW engines */
386 	err = xe_gt_ccs_mode_sysfs_init(gt);
387 	if (err)
388 		goto err_force_wake;
389 
390 	/*
391 	 * Stash hardware-reported version.  Since this register does not exist
392 	 * on pre-MTL platforms, reading it there will (correctly) return 0.
393 	 */
394 	gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
395 
396 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
397 	XE_WARN_ON(err);
398 
399 	return 0;
400 
401 err_force_wake:
402 	dump_pat_on_error(gt);
403 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
404 err_hw_fence_irq:
405 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
406 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
407 
408 	return err;
409 }
410 
411 static int all_fw_domain_init(struct xe_gt *gt)
412 {
413 	int err, i;
414 
415 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
416 	if (err)
417 		goto err_hw_fence_irq;
418 
419 	xe_gt_mcr_set_implicit_defaults(gt);
420 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
421 
422 	err = xe_gt_clock_init(gt);
423 	if (err)
424 		goto err_force_wake;
425 
426 	xe_mocs_init(gt);
427 	err = xe_execlist_init(gt);
428 	if (err)
429 		goto err_force_wake;
430 
431 	err = xe_hw_engines_init(gt);
432 	if (err)
433 		goto err_force_wake;
434 
435 	err = xe_uc_init_post_hwconfig(&gt->uc);
436 	if (err)
437 		goto err_force_wake;
438 
439 	if (!xe_gt_is_media_type(gt)) {
440 		/*
441 		 * USM has its only SA pool to non-block behind user operations
442 		 */
443 		if (gt_to_xe(gt)->info.has_usm) {
444 			struct xe_device *xe = gt_to_xe(gt);
445 
446 			gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
447 								IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
448 			if (IS_ERR(gt->usm.bb_pool)) {
449 				err = PTR_ERR(gt->usm.bb_pool);
450 				goto err_force_wake;
451 			}
452 		}
453 	}
454 
455 	if (!xe_gt_is_media_type(gt)) {
456 		struct xe_tile *tile = gt_to_tile(gt);
457 
458 		tile->migrate = xe_migrate_init(tile);
459 		if (IS_ERR(tile->migrate)) {
460 			err = PTR_ERR(tile->migrate);
461 			goto err_force_wake;
462 		}
463 	}
464 
465 	err = xe_uc_init_hw(&gt->uc);
466 	if (err)
467 		goto err_force_wake;
468 
469 	/* Configure default CCS mode of 1 engine with all resources */
470 	if (xe_gt_ccs_mode_enabled(gt)) {
471 		gt->ccs_mode = 1;
472 		xe_gt_apply_ccs_mode(gt);
473 	}
474 
475 	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
476 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
477 
478 	if (IS_SRIOV_PF(gt_to_xe(gt)))
479 		xe_gt_sriov_pf_init_hw(gt);
480 
481 	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
482 	XE_WARN_ON(err);
483 
484 	return 0;
485 
486 err_force_wake:
487 	xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
488 err_hw_fence_irq:
489 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
490 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
491 
492 	return err;
493 }
494 
495 /*
496  * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
497  * enable CTB communication.
498  */
499 int xe_gt_init_hwconfig(struct xe_gt *gt)
500 {
501 	int err;
502 
503 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
504 	if (err)
505 		goto out;
506 
507 	xe_gt_mcr_init_early(gt);
508 	xe_pat_init(gt);
509 
510 	err = xe_uc_init(&gt->uc);
511 	if (err)
512 		goto out_fw;
513 
514 	err = xe_uc_init_hwconfig(&gt->uc);
515 	if (err)
516 		goto out_fw;
517 
518 	xe_gt_topology_init(gt);
519 	xe_gt_mcr_init(gt);
520 
521 out_fw:
522 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
523 out:
524 	return err;
525 }
526 
527 int xe_gt_init(struct xe_gt *gt)
528 {
529 	int err;
530 	int i;
531 
532 	INIT_WORK(&gt->reset.worker, gt_reset_worker);
533 
534 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
535 		gt->ring_ops[i] = xe_ring_ops_get(gt, i);
536 		xe_hw_fence_irq_init(&gt->fence_irq[i]);
537 	}
538 
539 	err = xe_gt_tlb_invalidation_init(gt);
540 	if (err)
541 		return err;
542 
543 	err = xe_gt_pagefault_init(gt);
544 	if (err)
545 		return err;
546 
547 	xe_mocs_init_early(gt);
548 
549 	err = xe_gt_sysfs_init(gt);
550 	if (err)
551 		return err;
552 
553 	err = gt_fw_domain_init(gt);
554 	if (err)
555 		return err;
556 
557 	err = xe_gt_idle_init(&gt->gtidle);
558 	if (err)
559 		return err;
560 
561 	err = xe_gt_freq_init(gt);
562 	if (err)
563 		return err;
564 
565 	xe_force_wake_init_engines(gt, gt_to_fw(gt));
566 
567 	err = all_fw_domain_init(gt);
568 	if (err)
569 		return err;
570 
571 	xe_gt_record_user_engines(gt);
572 
573 	return 0;
574 }
575 
576 void xe_gt_record_user_engines(struct xe_gt *gt)
577 {
578 	struct xe_hw_engine *hwe;
579 	enum xe_hw_engine_id id;
580 
581 	gt->user_engines.mask = 0;
582 	memset(gt->user_engines.instances_per_class, 0,
583 	       sizeof(gt->user_engines.instances_per_class));
584 
585 	for_each_hw_engine(hwe, gt, id) {
586 		if (xe_hw_engine_is_reserved(hwe))
587 			continue;
588 
589 		gt->user_engines.mask |= BIT_ULL(id);
590 		gt->user_engines.instances_per_class[hwe->class]++;
591 	}
592 
593 	xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
594 		     == gt->info.engine_mask);
595 }
596 
597 static int do_gt_reset(struct xe_gt *gt)
598 {
599 	int err;
600 
601 	xe_gsc_wa_14015076503(gt, true);
602 
603 	xe_mmio_write32(gt, GDRST, GRDOM_FULL);
604 	err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
605 	if (err)
606 		xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
607 			  ERR_PTR(err));
608 
609 	xe_gsc_wa_14015076503(gt, false);
610 
611 	return err;
612 }
613 
614 static int vf_gt_restart(struct xe_gt *gt)
615 {
616 	int err;
617 
618 	err = xe_uc_sanitize_reset(&gt->uc);
619 	if (err)
620 		return err;
621 
622 	err = xe_uc_init_hw(&gt->uc);
623 	if (err)
624 		return err;
625 
626 	err = xe_uc_start(&gt->uc);
627 	if (err)
628 		return err;
629 
630 	return 0;
631 }
632 
633 static int do_gt_restart(struct xe_gt *gt)
634 {
635 	struct xe_hw_engine *hwe;
636 	enum xe_hw_engine_id id;
637 	int err;
638 
639 	if (IS_SRIOV_VF(gt_to_xe(gt)))
640 		return vf_gt_restart(gt);
641 
642 	xe_pat_init(gt);
643 
644 	xe_gt_mcr_set_implicit_defaults(gt);
645 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
646 
647 	err = xe_wopcm_init(&gt->uc.wopcm);
648 	if (err)
649 		return err;
650 
651 	for_each_hw_engine(hwe, gt, id)
652 		xe_hw_engine_enable_ring(hwe);
653 
654 	err = xe_uc_sanitize_reset(&gt->uc);
655 	if (err)
656 		return err;
657 
658 	err = xe_uc_init_hw(&gt->uc);
659 	if (err)
660 		return err;
661 
662 	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
663 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
664 
665 	if (IS_SRIOV_PF(gt_to_xe(gt)))
666 		xe_gt_sriov_pf_init_hw(gt);
667 
668 	xe_mocs_init(gt);
669 	err = xe_uc_start(&gt->uc);
670 	if (err)
671 		return err;
672 
673 	for_each_hw_engine(hwe, gt, id) {
674 		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
675 		xe_reg_sr_apply_whitelist(hwe);
676 	}
677 
678 	/* Get CCS mode in sync between sw/hw */
679 	xe_gt_apply_ccs_mode(gt);
680 
681 	return 0;
682 }
683 
684 static int gt_reset(struct xe_gt *gt)
685 {
686 	int err;
687 
688 	if (xe_device_wedged(gt_to_xe(gt)))
689 		return -ECANCELED;
690 
691 	/* We only support GT resets with GuC submission */
692 	if (!xe_device_uc_enabled(gt_to_xe(gt)))
693 		return -ENODEV;
694 
695 	xe_gt_info(gt, "reset started\n");
696 
697 	if (xe_fault_inject_gt_reset()) {
698 		err = -ECANCELED;
699 		goto err_fail;
700 	}
701 
702 	xe_pm_runtime_get(gt_to_xe(gt));
703 	xe_gt_sanitize(gt);
704 
705 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
706 	if (err)
707 		goto err_msg;
708 
709 	xe_uc_gucrc_disable(&gt->uc);
710 	xe_uc_stop_prepare(&gt->uc);
711 	xe_gt_pagefault_reset(gt);
712 
713 	xe_uc_stop(&gt->uc);
714 
715 	xe_gt_tlb_invalidation_reset(gt);
716 
717 	err = do_gt_reset(gt);
718 	if (err)
719 		goto err_out;
720 
721 	err = do_gt_restart(gt);
722 	if (err)
723 		goto err_out;
724 
725 	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
726 	XE_WARN_ON(err);
727 	xe_pm_runtime_put(gt_to_xe(gt));
728 
729 	xe_gt_info(gt, "reset done\n");
730 
731 	return 0;
732 
733 err_out:
734 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
735 err_msg:
736 	XE_WARN_ON(xe_uc_start(&gt->uc));
737 	xe_pm_runtime_put(gt_to_xe(gt));
738 err_fail:
739 	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
740 
741 	xe_device_declare_wedged(gt_to_xe(gt));
742 
743 	return err;
744 }
745 
746 static void gt_reset_worker(struct work_struct *w)
747 {
748 	struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
749 
750 	gt_reset(gt);
751 }
752 
753 void xe_gt_reset_async(struct xe_gt *gt)
754 {
755 	xe_gt_info(gt, "trying reset\n");
756 
757 	/* Don't do a reset while one is already in flight */
758 	if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
759 		return;
760 
761 	xe_gt_info(gt, "reset queued\n");
762 	queue_work(gt->ordered_wq, &gt->reset.worker);
763 }
764 
765 void xe_gt_suspend_prepare(struct xe_gt *gt)
766 {
767 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
768 
769 	xe_uc_stop_prepare(&gt->uc);
770 
771 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
772 }
773 
774 int xe_gt_suspend(struct xe_gt *gt)
775 {
776 	int err;
777 
778 	xe_gt_dbg(gt, "suspending\n");
779 	xe_gt_sanitize(gt);
780 
781 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
782 	if (err)
783 		goto err_msg;
784 
785 	err = xe_uc_suspend(&gt->uc);
786 	if (err)
787 		goto err_force_wake;
788 
789 	xe_gt_idle_disable_pg(gt);
790 
791 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
792 	xe_gt_dbg(gt, "suspended\n");
793 
794 	return 0;
795 
796 err_force_wake:
797 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
798 err_msg:
799 	xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
800 
801 	return err;
802 }
803 
804 int xe_gt_resume(struct xe_gt *gt)
805 {
806 	int err;
807 
808 	xe_gt_dbg(gt, "resuming\n");
809 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
810 	if (err)
811 		goto err_msg;
812 
813 	err = do_gt_restart(gt);
814 	if (err)
815 		goto err_force_wake;
816 
817 	xe_gt_idle_enable_pg(gt);
818 
819 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
820 	xe_gt_dbg(gt, "resumed\n");
821 
822 	return 0;
823 
824 err_force_wake:
825 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
826 err_msg:
827 	xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
828 
829 	return err;
830 }
831 
832 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
833 				     enum xe_engine_class class,
834 				     u16 instance, bool logical)
835 {
836 	struct xe_hw_engine *hwe;
837 	enum xe_hw_engine_id id;
838 
839 	for_each_hw_engine(hwe, gt, id)
840 		if (hwe->class == class &&
841 		    ((!logical && hwe->instance == instance) ||
842 		    (logical && hwe->logical_instance == instance)))
843 			return hwe;
844 
845 	return NULL;
846 }
847 
848 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
849 							 enum xe_engine_class class)
850 {
851 	struct xe_hw_engine *hwe;
852 	enum xe_hw_engine_id id;
853 
854 	for_each_hw_engine(hwe, gt, id) {
855 		switch (class) {
856 		case XE_ENGINE_CLASS_RENDER:
857 		case XE_ENGINE_CLASS_COMPUTE:
858 			if (hwe->class == XE_ENGINE_CLASS_RENDER ||
859 			    hwe->class == XE_ENGINE_CLASS_COMPUTE)
860 				return hwe;
861 			break;
862 		default:
863 			if (hwe->class == class)
864 				return hwe;
865 		}
866 	}
867 
868 	return NULL;
869 }
870 
871 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
872 {
873 	struct xe_hw_engine *hwe;
874 	enum xe_hw_engine_id id;
875 
876 	for_each_hw_engine(hwe, gt, id)
877 		return hwe;
878 
879 	return NULL;
880 }
881