xref: /linux/drivers/gpu/drm/xe/xe_gt.c (revision 8cdcef1c2f82d207aa8b2a02298fbc17191c6261)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt.h"
7 
8 #include <linux/minmax.h>
9 
10 #include <drm/drm_managed.h>
11 #include <drm/xe_drm.h>
12 
13 #include "instructions/xe_gfxpipe_commands.h"
14 #include "instructions/xe_mi_commands.h"
15 #include "regs/xe_gt_regs.h"
16 #include "xe_assert.h"
17 #include "xe_bb.h"
18 #include "xe_bo.h"
19 #include "xe_device.h"
20 #include "xe_exec_queue.h"
21 #include "xe_execlist.h"
22 #include "xe_force_wake.h"
23 #include "xe_ggtt.h"
24 #include "xe_gsc.h"
25 #include "xe_gt_clock.h"
26 #include "xe_gt_idle_sysfs.h"
27 #include "xe_gt_mcr.h"
28 #include "xe_gt_pagefault.h"
29 #include "xe_gt_printk.h"
30 #include "xe_gt_sysfs.h"
31 #include "xe_gt_tlb_invalidation.h"
32 #include "xe_gt_topology.h"
33 #include "xe_guc_exec_queue_types.h"
34 #include "xe_guc_pc.h"
35 #include "xe_hw_fence.h"
36 #include "xe_hw_engine_class_sysfs.h"
37 #include "xe_irq.h"
38 #include "xe_lrc.h"
39 #include "xe_map.h"
40 #include "xe_migrate.h"
41 #include "xe_mmio.h"
42 #include "xe_pat.h"
43 #include "xe_mocs.h"
44 #include "xe_reg_sr.h"
45 #include "xe_ring_ops.h"
46 #include "xe_sa.h"
47 #include "xe_sched_job.h"
48 #include "xe_tuning.h"
49 #include "xe_uc.h"
50 #include "xe_vm.h"
51 #include "xe_wa.h"
52 #include "xe_wopcm.h"
53 
54 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
55 {
56 	struct xe_gt *gt;
57 
58 	gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
59 	if (!gt)
60 		return ERR_PTR(-ENOMEM);
61 
62 	gt->tile = tile;
63 	gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
64 
65 	return gt;
66 }
67 
68 void xe_gt_sanitize(struct xe_gt *gt)
69 {
70 	/*
71 	 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
72 	 * reload
73 	 */
74 	gt->uc.guc.submission_state.enabled = false;
75 }
76 
77 static void gt_fini(struct drm_device *drm, void *arg)
78 {
79 	struct xe_gt *gt = arg;
80 	int i;
81 
82 	destroy_workqueue(gt->ordered_wq);
83 
84 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
85 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
86 }
87 
88 static void gt_reset_worker(struct work_struct *w);
89 
90 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
91 {
92 	struct xe_sched_job *job;
93 	struct xe_bb *bb;
94 	struct dma_fence *fence;
95 	long timeout;
96 
97 	bb = xe_bb_new(gt, 4, false);
98 	if (IS_ERR(bb))
99 		return PTR_ERR(bb);
100 
101 	job = xe_bb_create_job(q, bb);
102 	if (IS_ERR(job)) {
103 		xe_bb_free(bb, NULL);
104 		return PTR_ERR(job);
105 	}
106 
107 	xe_sched_job_arm(job);
108 	fence = dma_fence_get(&job->drm.s_fence->finished);
109 	xe_sched_job_push(job);
110 
111 	timeout = dma_fence_wait_timeout(fence, false, HZ);
112 	dma_fence_put(fence);
113 	xe_bb_free(bb, NULL);
114 	if (timeout < 0)
115 		return timeout;
116 	else if (!timeout)
117 		return -ETIME;
118 
119 	return 0;
120 }
121 
122 /*
123  * Convert back from encoded value to type-safe, only to be used when reg.mcr
124  * is true
125  */
126 static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
127 {
128 	return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
129 }
130 
131 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
132 {
133 	struct xe_reg_sr *sr = &q->hwe->reg_lrc;
134 	struct xe_reg_sr_entry *entry;
135 	unsigned long idx;
136 	struct xe_sched_job *job;
137 	struct xe_bb *bb;
138 	struct dma_fence *fence;
139 	long timeout;
140 	int count = 0;
141 
142 	if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
143 		/* Big enough to emit all of the context's 3DSTATE */
144 		bb = xe_bb_new(gt, xe_lrc_size(gt_to_xe(gt), q->hwe->class), false);
145 	else
146 		/* Just pick a large BB size */
147 		bb = xe_bb_new(gt, SZ_4K, false);
148 
149 	if (IS_ERR(bb))
150 		return PTR_ERR(bb);
151 
152 	xa_for_each(&sr->xa, idx, entry)
153 		++count;
154 
155 	if (count) {
156 		xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
157 
158 		bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
159 
160 		xa_for_each(&sr->xa, idx, entry) {
161 			struct xe_reg reg = entry->reg;
162 			struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
163 			u32 val;
164 
165 			/*
166 			 * Skip reading the register if it's not really needed
167 			 */
168 			if (reg.masked)
169 				val = entry->clr_bits << 16;
170 			else if (entry->clr_bits + 1)
171 				val = (reg.mcr ?
172 				       xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
173 				       xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
174 			else
175 				val = 0;
176 
177 			val |= entry->set_bits;
178 
179 			bb->cs[bb->len++] = reg.addr;
180 			bb->cs[bb->len++] = val;
181 			xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
182 		}
183 	}
184 
185 	xe_lrc_emit_hwe_state_instructions(q, bb);
186 
187 	job = xe_bb_create_job(q, bb);
188 	if (IS_ERR(job)) {
189 		xe_bb_free(bb, NULL);
190 		return PTR_ERR(job);
191 	}
192 
193 	xe_sched_job_arm(job);
194 	fence = dma_fence_get(&job->drm.s_fence->finished);
195 	xe_sched_job_push(job);
196 
197 	timeout = dma_fence_wait_timeout(fence, false, HZ);
198 	dma_fence_put(fence);
199 	xe_bb_free(bb, NULL);
200 	if (timeout < 0)
201 		return timeout;
202 	else if (!timeout)
203 		return -ETIME;
204 
205 	return 0;
206 }
207 
208 int xe_gt_record_default_lrcs(struct xe_gt *gt)
209 {
210 	struct xe_device *xe = gt_to_xe(gt);
211 	struct xe_hw_engine *hwe;
212 	enum xe_hw_engine_id id;
213 	int err = 0;
214 
215 	for_each_hw_engine(hwe, gt, id) {
216 		struct xe_exec_queue *q, *nop_q;
217 		void *default_lrc;
218 
219 		if (gt->default_lrc[hwe->class])
220 			continue;
221 
222 		xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
223 		xe_wa_process_lrc(hwe);
224 		xe_hw_engine_setup_default_lrc_state(hwe);
225 		xe_tuning_process_lrc(hwe);
226 
227 		default_lrc = drmm_kzalloc(&xe->drm,
228 					   xe_lrc_size(xe, hwe->class),
229 					   GFP_KERNEL);
230 		if (!default_lrc)
231 			return -ENOMEM;
232 
233 		q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
234 					 hwe, EXEC_QUEUE_FLAG_KERNEL);
235 		if (IS_ERR(q)) {
236 			err = PTR_ERR(q);
237 			xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
238 				  hwe->name, q);
239 			return err;
240 		}
241 
242 		/* Prime golden LRC with known good state */
243 		err = emit_wa_job(gt, q);
244 		if (err) {
245 			xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
246 				  hwe->name, ERR_PTR(err), q->guc->id);
247 			goto put_exec_queue;
248 		}
249 
250 		nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
251 					     1, hwe, EXEC_QUEUE_FLAG_KERNEL);
252 		if (IS_ERR(nop_q)) {
253 			err = PTR_ERR(nop_q);
254 			xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
255 				  hwe->name, nop_q);
256 			goto put_exec_queue;
257 		}
258 
259 		/* Switch to different LRC */
260 		err = emit_nop_job(gt, nop_q);
261 		if (err) {
262 			xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
263 				  hwe->name, ERR_PTR(err), nop_q->guc->id);
264 			goto put_nop_q;
265 		}
266 
267 		/* Reload golden LRC to record the effect of any indirect W/A */
268 		err = emit_nop_job(gt, q);
269 		if (err) {
270 			xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
271 				  hwe->name, ERR_PTR(err), q->guc->id);
272 			goto put_nop_q;
273 		}
274 
275 		xe_map_memcpy_from(xe, default_lrc,
276 				   &q->lrc[0].bo->vmap,
277 				   xe_lrc_pphwsp_offset(&q->lrc[0]),
278 				   xe_lrc_size(xe, hwe->class));
279 
280 		gt->default_lrc[hwe->class] = default_lrc;
281 put_nop_q:
282 		xe_exec_queue_put(nop_q);
283 put_exec_queue:
284 		xe_exec_queue_put(q);
285 		if (err)
286 			break;
287 	}
288 
289 	return err;
290 }
291 
292 int xe_gt_init_early(struct xe_gt *gt)
293 {
294 	int err;
295 
296 	xe_force_wake_init_gt(gt, gt_to_fw(gt));
297 
298 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
299 	if (err)
300 		return err;
301 
302 	xe_gt_topology_init(gt);
303 	xe_gt_mcr_init(gt);
304 
305 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
306 	if (err)
307 		return err;
308 
309 	xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
310 
311 	err = xe_wa_init(gt);
312 	if (err)
313 		return err;
314 
315 	xe_wa_process_gt(gt);
316 	xe_wa_process_oob(gt);
317 	xe_tuning_process_gt(gt);
318 
319 	return 0;
320 }
321 
322 static void dump_pat_on_error(struct xe_gt *gt)
323 {
324 	struct drm_printer p;
325 	char prefix[32];
326 
327 	snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
328 	p = drm_debug_printer(prefix);
329 
330 	xe_pat_dump(gt, &p);
331 }
332 
333 static int gt_fw_domain_init(struct xe_gt *gt)
334 {
335 	int err, i;
336 
337 	xe_device_mem_access_get(gt_to_xe(gt));
338 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
339 	if (err)
340 		goto err_hw_fence_irq;
341 
342 	xe_pat_init(gt);
343 
344 	if (!xe_gt_is_media_type(gt)) {
345 		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
346 		if (err)
347 			goto err_force_wake;
348 	}
349 
350 	err = xe_uc_init(&gt->uc);
351 	if (err)
352 		goto err_force_wake;
353 
354 	/* Raise GT freq to speed up HuC/GuC load */
355 	xe_guc_pc_init_early(&gt->uc.guc.pc);
356 
357 	err = xe_uc_init_hwconfig(&gt->uc);
358 	if (err)
359 		goto err_force_wake;
360 
361 	xe_gt_idle_sysfs_init(&gt->gtidle);
362 
363 	/* XXX: Fake that we pull the engine mask from hwconfig blob */
364 	gt->info.engine_mask = gt->info.__engine_mask;
365 
366 	/* Enable per hw engine IRQs */
367 	xe_irq_enable_hwe(gt);
368 
369 	/* Rerun MCR init as we now have hw engine list */
370 	xe_gt_mcr_init(gt);
371 
372 	err = xe_hw_engines_init_early(gt);
373 	if (err)
374 		goto err_force_wake;
375 
376 	err = xe_hw_engine_class_sysfs_init(gt);
377 	if (err)
378 		drm_warn(&gt_to_xe(gt)->drm,
379 			 "failed to register engines sysfs directory, err: %d\n",
380 			 err);
381 
382 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
383 	XE_WARN_ON(err);
384 	xe_device_mem_access_put(gt_to_xe(gt));
385 
386 	return 0;
387 
388 err_force_wake:
389 	dump_pat_on_error(gt);
390 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
391 err_hw_fence_irq:
392 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
393 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
394 	xe_device_mem_access_put(gt_to_xe(gt));
395 
396 	return err;
397 }
398 
399 static int all_fw_domain_init(struct xe_gt *gt)
400 {
401 	int err, i;
402 
403 	xe_device_mem_access_get(gt_to_xe(gt));
404 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
405 	if (err)
406 		goto err_hw_fence_irq;
407 
408 	xe_gt_mcr_set_implicit_defaults(gt);
409 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
410 
411 	err = xe_gt_clock_init(gt);
412 	if (err)
413 		goto err_force_wake;
414 
415 	xe_mocs_init(gt);
416 	err = xe_execlist_init(gt);
417 	if (err)
418 		goto err_force_wake;
419 
420 	err = xe_hw_engines_init(gt);
421 	if (err)
422 		goto err_force_wake;
423 
424 	err = xe_uc_init_post_hwconfig(&gt->uc);
425 	if (err)
426 		goto err_force_wake;
427 
428 	if (!xe_gt_is_media_type(gt)) {
429 		/*
430 		 * USM has its only SA pool to non-block behind user operations
431 		 */
432 		if (gt_to_xe(gt)->info.supports_usm) {
433 			gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), SZ_1M, 16);
434 			if (IS_ERR(gt->usm.bb_pool)) {
435 				err = PTR_ERR(gt->usm.bb_pool);
436 				goto err_force_wake;
437 			}
438 		}
439 	}
440 
441 	if (!xe_gt_is_media_type(gt)) {
442 		struct xe_tile *tile = gt_to_tile(gt);
443 
444 		tile->migrate = xe_migrate_init(tile);
445 		if (IS_ERR(tile->migrate)) {
446 			err = PTR_ERR(tile->migrate);
447 			goto err_force_wake;
448 		}
449 	}
450 
451 	err = xe_uc_init_hw(&gt->uc);
452 	if (err)
453 		goto err_force_wake;
454 
455 	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
456 	XE_WARN_ON(err);
457 	xe_device_mem_access_put(gt_to_xe(gt));
458 
459 	return 0;
460 
461 err_force_wake:
462 	xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
463 err_hw_fence_irq:
464 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
465 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
466 	xe_device_mem_access_put(gt_to_xe(gt));
467 
468 	return err;
469 }
470 
471 int xe_gt_init(struct xe_gt *gt)
472 {
473 	int err;
474 	int i;
475 
476 	INIT_WORK(&gt->reset.worker, gt_reset_worker);
477 
478 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
479 		gt->ring_ops[i] = xe_ring_ops_get(gt, i);
480 		xe_hw_fence_irq_init(&gt->fence_irq[i]);
481 	}
482 
483 	err = xe_gt_tlb_invalidation_init(gt);
484 	if (err)
485 		return err;
486 
487 	err = xe_gt_pagefault_init(gt);
488 	if (err)
489 		return err;
490 
491 	xe_mocs_init_early(gt);
492 
493 	xe_gt_sysfs_init(gt);
494 
495 	err = gt_fw_domain_init(gt);
496 	if (err)
497 		return err;
498 
499 	xe_force_wake_init_engines(gt, gt_to_fw(gt));
500 
501 	err = all_fw_domain_init(gt);
502 	if (err)
503 		return err;
504 
505 	err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
506 	if (err)
507 		return err;
508 
509 	return 0;
510 }
511 
512 static int do_gt_reset(struct xe_gt *gt)
513 {
514 	int err;
515 
516 	xe_gsc_wa_14015076503(gt, true);
517 
518 	xe_mmio_write32(gt, GDRST, GRDOM_FULL);
519 	err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
520 	if (err)
521 		xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
522 			  ERR_PTR(err));
523 
524 	xe_gsc_wa_14015076503(gt, false);
525 
526 	return err;
527 }
528 
529 static int do_gt_restart(struct xe_gt *gt)
530 {
531 	struct xe_hw_engine *hwe;
532 	enum xe_hw_engine_id id;
533 	int err;
534 
535 	xe_pat_init(gt);
536 
537 	xe_gt_mcr_set_implicit_defaults(gt);
538 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
539 
540 	err = xe_wopcm_init(&gt->uc.wopcm);
541 	if (err)
542 		return err;
543 
544 	for_each_hw_engine(hwe, gt, id)
545 		xe_hw_engine_enable_ring(hwe);
546 
547 	err = xe_uc_init_hw(&gt->uc);
548 	if (err)
549 		return err;
550 
551 	xe_mocs_init(gt);
552 	err = xe_uc_start(&gt->uc);
553 	if (err)
554 		return err;
555 
556 	for_each_hw_engine(hwe, gt, id) {
557 		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
558 		xe_reg_sr_apply_whitelist(hwe);
559 	}
560 
561 	return 0;
562 }
563 
564 static void xe_uevent_gt_reset_failure(struct pci_dev *pdev, u8 tile_id, u8 gt_id)
565 {
566 	char *reset_event[4];
567 
568 	reset_event[0] = DRM_XE_RESET_FAILED_UEVENT "=NEEDS_RESET";
569 	reset_event[1] = kasprintf(GFP_KERNEL, "TILE_ID=%d", tile_id);
570 	reset_event[2] = kasprintf(GFP_KERNEL, "GT_ID=%d", gt_id);
571 	reset_event[3] = NULL;
572 	kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, reset_event);
573 
574 	kfree(reset_event[1]);
575 	kfree(reset_event[2]);
576 }
577 
578 static int gt_reset(struct xe_gt *gt)
579 {
580 	int err;
581 
582 	/* We only support GT resets with GuC submission */
583 	if (!xe_device_uc_enabled(gt_to_xe(gt)))
584 		return -ENODEV;
585 
586 	xe_gt_info(gt, "reset started\n");
587 
588 	if (xe_fault_inject_gt_reset()) {
589 		err = -ECANCELED;
590 		goto err_fail;
591 	}
592 
593 	xe_gt_sanitize(gt);
594 
595 	xe_device_mem_access_get(gt_to_xe(gt));
596 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
597 	if (err)
598 		goto err_msg;
599 
600 	xe_uc_gucrc_disable(&gt->uc);
601 	xe_uc_stop_prepare(&gt->uc);
602 	xe_gt_pagefault_reset(gt);
603 
604 	err = xe_uc_stop(&gt->uc);
605 	if (err)
606 		goto err_out;
607 
608 	err = do_gt_reset(gt);
609 	if (err)
610 		goto err_out;
611 
612 	xe_gt_tlb_invalidation_reset(gt);
613 
614 	err = do_gt_restart(gt);
615 	if (err)
616 		goto err_out;
617 
618 	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
619 	xe_device_mem_access_put(gt_to_xe(gt));
620 	XE_WARN_ON(err);
621 
622 	xe_gt_info(gt, "reset done\n");
623 
624 	return 0;
625 
626 err_out:
627 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
628 err_msg:
629 	XE_WARN_ON(xe_uc_start(&gt->uc));
630 	xe_device_mem_access_put(gt_to_xe(gt));
631 err_fail:
632 	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
633 
634 	/* Notify userspace about gt reset failure */
635 	xe_uevent_gt_reset_failure(to_pci_dev(gt_to_xe(gt)->drm.dev),
636 				   gt_to_tile(gt)->id, gt->info.id);
637 
638 	gt_to_xe(gt)->needs_flr_on_fini = true;
639 
640 	return err;
641 }
642 
643 static void gt_reset_worker(struct work_struct *w)
644 {
645 	struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
646 
647 	gt_reset(gt);
648 }
649 
650 void xe_gt_reset_async(struct xe_gt *gt)
651 {
652 	xe_gt_info(gt, "trying reset\n");
653 
654 	/* Don't do a reset while one is already in flight */
655 	if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
656 		return;
657 
658 	xe_gt_info(gt, "reset queued\n");
659 	queue_work(gt->ordered_wq, &gt->reset.worker);
660 }
661 
662 void xe_gt_suspend_prepare(struct xe_gt *gt)
663 {
664 	xe_device_mem_access_get(gt_to_xe(gt));
665 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
666 
667 	xe_uc_stop_prepare(&gt->uc);
668 
669 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
670 	xe_device_mem_access_put(gt_to_xe(gt));
671 }
672 
673 int xe_gt_suspend(struct xe_gt *gt)
674 {
675 	int err;
676 
677 	xe_gt_sanitize(gt);
678 
679 	xe_device_mem_access_get(gt_to_xe(gt));
680 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
681 	if (err)
682 		goto err_msg;
683 
684 	xe_pmu_suspend(gt);
685 
686 	err = xe_uc_suspend(&gt->uc);
687 	if (err)
688 		goto err_force_wake;
689 
690 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
691 	xe_device_mem_access_put(gt_to_xe(gt));
692 	xe_gt_info(gt, "suspended\n");
693 
694 	return 0;
695 
696 err_force_wake:
697 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
698 err_msg:
699 	xe_device_mem_access_put(gt_to_xe(gt));
700 	xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
701 
702 	return err;
703 }
704 
705 int xe_gt_resume(struct xe_gt *gt)
706 {
707 	int err;
708 
709 	xe_device_mem_access_get(gt_to_xe(gt));
710 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
711 	if (err)
712 		goto err_msg;
713 
714 	err = do_gt_restart(gt);
715 	if (err)
716 		goto err_force_wake;
717 
718 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
719 	xe_device_mem_access_put(gt_to_xe(gt));
720 	xe_gt_info(gt, "resumed\n");
721 
722 	return 0;
723 
724 err_force_wake:
725 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
726 err_msg:
727 	xe_device_mem_access_put(gt_to_xe(gt));
728 	xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
729 
730 	return err;
731 }
732 
733 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
734 				     enum xe_engine_class class,
735 				     u16 instance, bool logical)
736 {
737 	struct xe_hw_engine *hwe;
738 	enum xe_hw_engine_id id;
739 
740 	for_each_hw_engine(hwe, gt, id)
741 		if (hwe->class == class &&
742 		    ((!logical && hwe->instance == instance) ||
743 		    (logical && hwe->logical_instance == instance)))
744 			return hwe;
745 
746 	return NULL;
747 }
748 
749 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
750 							 enum xe_engine_class class)
751 {
752 	struct xe_hw_engine *hwe;
753 	enum xe_hw_engine_id id;
754 
755 	for_each_hw_engine(hwe, gt, id) {
756 		switch (class) {
757 		case XE_ENGINE_CLASS_RENDER:
758 		case XE_ENGINE_CLASS_COMPUTE:
759 			if (hwe->class == XE_ENGINE_CLASS_RENDER ||
760 			    hwe->class == XE_ENGINE_CLASS_COMPUTE)
761 				return hwe;
762 			break;
763 		default:
764 			if (hwe->class == class)
765 				return hwe;
766 		}
767 	}
768 
769 	return NULL;
770 }
771