xref: /linux/drivers/gpu/drm/xe/xe_gt.c (revision 8f88c072c2ba9201c1db27dec35f5015489776ec)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt.h"
7 
8 #include <linux/minmax.h>
9 
10 #include <drm/drm_managed.h>
11 #include <drm/xe_drm.h>
12 #include <generated/xe_wa_oob.h>
13 
14 #include <generated/xe_wa_oob.h>
15 
16 #include "instructions/xe_gfxpipe_commands.h"
17 #include "instructions/xe_mi_commands.h"
18 #include "regs/xe_gt_regs.h"
19 #include "xe_assert.h"
20 #include "xe_bb.h"
21 #include "xe_bo.h"
22 #include "xe_device.h"
23 #include "xe_exec_queue.h"
24 #include "xe_execlist.h"
25 #include "xe_force_wake.h"
26 #include "xe_ggtt.h"
27 #include "xe_gsc.h"
28 #include "xe_gt_ccs_mode.h"
29 #include "xe_gt_clock.h"
30 #include "xe_gt_freq.h"
31 #include "xe_gt_idle.h"
32 #include "xe_gt_mcr.h"
33 #include "xe_gt_pagefault.h"
34 #include "xe_gt_printk.h"
35 #include "xe_gt_sriov_pf.h"
36 #include "xe_gt_sysfs.h"
37 #include "xe_gt_tlb_invalidation.h"
38 #include "xe_gt_topology.h"
39 #include "xe_guc_exec_queue_types.h"
40 #include "xe_guc_pc.h"
41 #include "xe_hw_fence.h"
42 #include "xe_hw_engine_class_sysfs.h"
43 #include "xe_irq.h"
44 #include "xe_lmtt.h"
45 #include "xe_lrc.h"
46 #include "xe_map.h"
47 #include "xe_migrate.h"
48 #include "xe_mmio.h"
49 #include "xe_pat.h"
50 #include "xe_pm.h"
51 #include "xe_mocs.h"
52 #include "xe_reg_sr.h"
53 #include "xe_ring_ops.h"
54 #include "xe_sa.h"
55 #include "xe_sched_job.h"
56 #include "xe_sriov.h"
57 #include "xe_tuning.h"
58 #include "xe_uc.h"
59 #include "xe_uc_fw.h"
60 #include "xe_vm.h"
61 #include "xe_wa.h"
62 #include "xe_wopcm.h"
63 
64 static void gt_fini(struct drm_device *drm, void *arg)
65 {
66 	struct xe_gt *gt = arg;
67 
68 	destroy_workqueue(gt->ordered_wq);
69 }
70 
71 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
72 {
73 	struct xe_gt *gt;
74 	int err;
75 
76 	gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
77 	if (!gt)
78 		return ERR_PTR(-ENOMEM);
79 
80 	gt->tile = tile;
81 	gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", 0);
82 
83 	err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
84 	if (err)
85 		return ERR_PTR(err);
86 
87 	return gt;
88 }
89 
90 void xe_gt_sanitize(struct xe_gt *gt)
91 {
92 	/*
93 	 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
94 	 * reload
95 	 */
96 	gt->uc.guc.submission_state.enabled = false;
97 }
98 
99 static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
100 {
101 	u32 reg;
102 	int err;
103 
104 	if (!XE_WA(gt, 16023588340))
105 		return;
106 
107 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
108 	if (WARN_ON(err))
109 		return;
110 
111 	if (!xe_gt_is_media_type(gt)) {
112 		xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH);
113 		reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
114 		reg |= CG_DIS_CNTLBUS;
115 		xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
116 	}
117 
118 	xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
119 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
120 }
121 
122 static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
123 {
124 	u32 reg;
125 	int err;
126 
127 	if (!XE_WA(gt, 16023588340))
128 		return;
129 
130 	if (xe_gt_is_media_type(gt))
131 		return;
132 
133 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
134 	if (WARN_ON(err))
135 		return;
136 
137 	reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
138 	reg &= ~CG_DIS_CNTLBUS;
139 	xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
140 
141 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
142 }
143 
144 /**
145  * xe_gt_remove() - Clean up the GT structures before driver removal
146  * @gt: the GT object
147  *
148  * This function should only act on objects/structures that must be cleaned
149  * before the driver removal callback is complete and therefore can't be
150  * deferred to a drmm action.
151  */
152 void xe_gt_remove(struct xe_gt *gt)
153 {
154 	int i;
155 
156 	xe_uc_remove(&gt->uc);
157 
158 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
159 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
160 
161 	xe_gt_disable_host_l2_vram(gt);
162 }
163 
164 static void gt_reset_worker(struct work_struct *w);
165 
166 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
167 {
168 	struct xe_sched_job *job;
169 	struct xe_bb *bb;
170 	struct dma_fence *fence;
171 	long timeout;
172 
173 	bb = xe_bb_new(gt, 4, false);
174 	if (IS_ERR(bb))
175 		return PTR_ERR(bb);
176 
177 	job = xe_bb_create_job(q, bb);
178 	if (IS_ERR(job)) {
179 		xe_bb_free(bb, NULL);
180 		return PTR_ERR(job);
181 	}
182 
183 	xe_sched_job_arm(job);
184 	fence = dma_fence_get(&job->drm.s_fence->finished);
185 	xe_sched_job_push(job);
186 
187 	timeout = dma_fence_wait_timeout(fence, false, HZ);
188 	dma_fence_put(fence);
189 	xe_bb_free(bb, NULL);
190 	if (timeout < 0)
191 		return timeout;
192 	else if (!timeout)
193 		return -ETIME;
194 
195 	return 0;
196 }
197 
198 /*
199  * Convert back from encoded value to type-safe, only to be used when reg.mcr
200  * is true
201  */
202 static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
203 {
204 	return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
205 }
206 
207 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
208 {
209 	struct xe_reg_sr *sr = &q->hwe->reg_lrc;
210 	struct xe_reg_sr_entry *entry;
211 	unsigned long idx;
212 	struct xe_sched_job *job;
213 	struct xe_bb *bb;
214 	struct dma_fence *fence;
215 	long timeout;
216 	int count = 0;
217 
218 	if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
219 		/* Big enough to emit all of the context's 3DSTATE */
220 		bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
221 	else
222 		/* Just pick a large BB size */
223 		bb = xe_bb_new(gt, SZ_4K, false);
224 
225 	if (IS_ERR(bb))
226 		return PTR_ERR(bb);
227 
228 	xa_for_each(&sr->xa, idx, entry)
229 		++count;
230 
231 	if (count) {
232 		xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
233 
234 		bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
235 
236 		xa_for_each(&sr->xa, idx, entry) {
237 			struct xe_reg reg = entry->reg;
238 			struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
239 			u32 val;
240 
241 			/*
242 			 * Skip reading the register if it's not really needed
243 			 */
244 			if (reg.masked)
245 				val = entry->clr_bits << 16;
246 			else if (entry->clr_bits + 1)
247 				val = (reg.mcr ?
248 				       xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
249 				       xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
250 			else
251 				val = 0;
252 
253 			val |= entry->set_bits;
254 
255 			bb->cs[bb->len++] = reg.addr;
256 			bb->cs[bb->len++] = val;
257 			xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
258 		}
259 	}
260 
261 	xe_lrc_emit_hwe_state_instructions(q, bb);
262 
263 	job = xe_bb_create_job(q, bb);
264 	if (IS_ERR(job)) {
265 		xe_bb_free(bb, NULL);
266 		return PTR_ERR(job);
267 	}
268 
269 	xe_sched_job_arm(job);
270 	fence = dma_fence_get(&job->drm.s_fence->finished);
271 	xe_sched_job_push(job);
272 
273 	timeout = dma_fence_wait_timeout(fence, false, HZ);
274 	dma_fence_put(fence);
275 	xe_bb_free(bb, NULL);
276 	if (timeout < 0)
277 		return timeout;
278 	else if (!timeout)
279 		return -ETIME;
280 
281 	return 0;
282 }
283 
284 int xe_gt_record_default_lrcs(struct xe_gt *gt)
285 {
286 	struct xe_device *xe = gt_to_xe(gt);
287 	struct xe_hw_engine *hwe;
288 	enum xe_hw_engine_id id;
289 	int err = 0;
290 
291 	for_each_hw_engine(hwe, gt, id) {
292 		struct xe_exec_queue *q, *nop_q;
293 		void *default_lrc;
294 
295 		if (gt->default_lrc[hwe->class])
296 			continue;
297 
298 		xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
299 		xe_wa_process_lrc(hwe);
300 		xe_hw_engine_setup_default_lrc_state(hwe);
301 		xe_tuning_process_lrc(hwe);
302 
303 		default_lrc = drmm_kzalloc(&xe->drm,
304 					   xe_gt_lrc_size(gt, hwe->class),
305 					   GFP_KERNEL);
306 		if (!default_lrc)
307 			return -ENOMEM;
308 
309 		q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
310 					 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
311 		if (IS_ERR(q)) {
312 			err = PTR_ERR(q);
313 			xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
314 				  hwe->name, q);
315 			return err;
316 		}
317 
318 		/* Prime golden LRC with known good state */
319 		err = emit_wa_job(gt, q);
320 		if (err) {
321 			xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
322 				  hwe->name, ERR_PTR(err), q->guc->id);
323 			goto put_exec_queue;
324 		}
325 
326 		nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
327 					     1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
328 		if (IS_ERR(nop_q)) {
329 			err = PTR_ERR(nop_q);
330 			xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
331 				  hwe->name, nop_q);
332 			goto put_exec_queue;
333 		}
334 
335 		/* Switch to different LRC */
336 		err = emit_nop_job(gt, nop_q);
337 		if (err) {
338 			xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
339 				  hwe->name, ERR_PTR(err), nop_q->guc->id);
340 			goto put_nop_q;
341 		}
342 
343 		/* Reload golden LRC to record the effect of any indirect W/A */
344 		err = emit_nop_job(gt, q);
345 		if (err) {
346 			xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
347 				  hwe->name, ERR_PTR(err), q->guc->id);
348 			goto put_nop_q;
349 		}
350 
351 		xe_map_memcpy_from(xe, default_lrc,
352 				   &q->lrc[0]->bo->vmap,
353 				   xe_lrc_pphwsp_offset(q->lrc[0]),
354 				   xe_gt_lrc_size(gt, hwe->class));
355 
356 		gt->default_lrc[hwe->class] = default_lrc;
357 put_nop_q:
358 		xe_exec_queue_put(nop_q);
359 put_exec_queue:
360 		xe_exec_queue_put(q);
361 		if (err)
362 			break;
363 	}
364 
365 	return err;
366 }
367 
368 int xe_gt_init_early(struct xe_gt *gt)
369 {
370 	int err;
371 
372 	if (IS_SRIOV_PF(gt_to_xe(gt))) {
373 		err = xe_gt_sriov_pf_init_early(gt);
374 		if (err)
375 			return err;
376 	}
377 
378 	xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
379 
380 	err = xe_wa_init(gt);
381 	if (err)
382 		return err;
383 
384 	xe_wa_process_gt(gt);
385 	xe_wa_process_oob(gt);
386 	xe_tuning_process_gt(gt);
387 
388 	xe_force_wake_init_gt(gt, gt_to_fw(gt));
389 	spin_lock_init(&gt->global_invl_lock);
390 
391 	return 0;
392 }
393 
394 static void dump_pat_on_error(struct xe_gt *gt)
395 {
396 	struct drm_printer p;
397 	char prefix[32];
398 
399 	snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
400 	p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
401 
402 	xe_pat_dump(gt, &p);
403 }
404 
405 static int gt_fw_domain_init(struct xe_gt *gt)
406 {
407 	int err, i;
408 
409 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
410 	if (err)
411 		goto err_hw_fence_irq;
412 
413 	if (!xe_gt_is_media_type(gt)) {
414 		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
415 		if (err)
416 			goto err_force_wake;
417 		if (IS_SRIOV_PF(gt_to_xe(gt)))
418 			xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
419 	}
420 
421 	/* Enable per hw engine IRQs */
422 	xe_irq_enable_hwe(gt);
423 
424 	/* Rerun MCR init as we now have hw engine list */
425 	xe_gt_mcr_init(gt);
426 
427 	err = xe_hw_engines_init_early(gt);
428 	if (err)
429 		goto err_force_wake;
430 
431 	err = xe_hw_engine_class_sysfs_init(gt);
432 	if (err)
433 		goto err_force_wake;
434 
435 	/* Initialize CCS mode sysfs after early initialization of HW engines */
436 	err = xe_gt_ccs_mode_sysfs_init(gt);
437 	if (err)
438 		goto err_force_wake;
439 
440 	/*
441 	 * Stash hardware-reported version.  Since this register does not exist
442 	 * on pre-MTL platforms, reading it there will (correctly) return 0.
443 	 */
444 	gt->info.gmdid = xe_mmio_read32(gt, GMD_ID);
445 
446 	err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
447 	XE_WARN_ON(err);
448 
449 	return 0;
450 
451 err_force_wake:
452 	dump_pat_on_error(gt);
453 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
454 err_hw_fence_irq:
455 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
456 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
457 
458 	return err;
459 }
460 
461 static int all_fw_domain_init(struct xe_gt *gt)
462 {
463 	int err, i;
464 
465 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
466 	if (err)
467 		goto err_hw_fence_irq;
468 
469 	xe_gt_mcr_set_implicit_defaults(gt);
470 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
471 
472 	err = xe_gt_clock_init(gt);
473 	if (err)
474 		goto err_force_wake;
475 
476 	xe_mocs_init(gt);
477 	err = xe_execlist_init(gt);
478 	if (err)
479 		goto err_force_wake;
480 
481 	err = xe_hw_engines_init(gt);
482 	if (err)
483 		goto err_force_wake;
484 
485 	err = xe_uc_init_post_hwconfig(&gt->uc);
486 	if (err)
487 		goto err_force_wake;
488 
489 	if (!xe_gt_is_media_type(gt)) {
490 		/*
491 		 * USM has its only SA pool to non-block behind user operations
492 		 */
493 		if (gt_to_xe(gt)->info.has_usm) {
494 			struct xe_device *xe = gt_to_xe(gt);
495 
496 			gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
497 								IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
498 			if (IS_ERR(gt->usm.bb_pool)) {
499 				err = PTR_ERR(gt->usm.bb_pool);
500 				goto err_force_wake;
501 			}
502 		}
503 	}
504 
505 	if (!xe_gt_is_media_type(gt)) {
506 		struct xe_tile *tile = gt_to_tile(gt);
507 
508 		tile->migrate = xe_migrate_init(tile);
509 		if (IS_ERR(tile->migrate)) {
510 			err = PTR_ERR(tile->migrate);
511 			goto err_force_wake;
512 		}
513 	}
514 
515 	err = xe_uc_init_hw(&gt->uc);
516 	if (err)
517 		goto err_force_wake;
518 
519 	/* Configure default CCS mode of 1 engine with all resources */
520 	if (xe_gt_ccs_mode_enabled(gt)) {
521 		gt->ccs_mode = 1;
522 		xe_gt_apply_ccs_mode(gt);
523 	}
524 
525 	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
526 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
527 
528 	if (IS_SRIOV_PF(gt_to_xe(gt)))
529 		xe_gt_sriov_pf_init_hw(gt);
530 
531 	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
532 	XE_WARN_ON(err);
533 
534 	return 0;
535 
536 err_force_wake:
537 	xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
538 err_hw_fence_irq:
539 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
540 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
541 
542 	return err;
543 }
544 
545 /*
546  * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
547  * enable CTB communication.
548  */
549 int xe_gt_init_hwconfig(struct xe_gt *gt)
550 {
551 	int err;
552 
553 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
554 	if (err)
555 		goto out;
556 
557 	xe_gt_mcr_init_early(gt);
558 	xe_pat_init(gt);
559 	xe_gt_enable_host_l2_vram(gt);
560 
561 	err = xe_uc_init(&gt->uc);
562 	if (err)
563 		goto out_fw;
564 
565 	err = xe_uc_init_hwconfig(&gt->uc);
566 	if (err)
567 		goto out_fw;
568 
569 	xe_gt_topology_init(gt);
570 	xe_gt_mcr_init(gt);
571 
572 out_fw:
573 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
574 out:
575 	return err;
576 }
577 
578 int xe_gt_init(struct xe_gt *gt)
579 {
580 	int err;
581 	int i;
582 
583 	INIT_WORK(&gt->reset.worker, gt_reset_worker);
584 
585 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
586 		gt->ring_ops[i] = xe_ring_ops_get(gt, i);
587 		xe_hw_fence_irq_init(&gt->fence_irq[i]);
588 	}
589 
590 	err = xe_gt_tlb_invalidation_init(gt);
591 	if (err)
592 		return err;
593 
594 	err = xe_gt_pagefault_init(gt);
595 	if (err)
596 		return err;
597 
598 	xe_mocs_init_early(gt);
599 
600 	err = xe_gt_sysfs_init(gt);
601 	if (err)
602 		return err;
603 
604 	err = gt_fw_domain_init(gt);
605 	if (err)
606 		return err;
607 
608 	err = xe_gt_idle_init(&gt->gtidle);
609 	if (err)
610 		return err;
611 
612 	err = xe_gt_freq_init(gt);
613 	if (err)
614 		return err;
615 
616 	xe_force_wake_init_engines(gt, gt_to_fw(gt));
617 
618 	err = all_fw_domain_init(gt);
619 	if (err)
620 		return err;
621 
622 	xe_gt_record_user_engines(gt);
623 
624 	return 0;
625 }
626 
627 void xe_gt_record_user_engines(struct xe_gt *gt)
628 {
629 	struct xe_hw_engine *hwe;
630 	enum xe_hw_engine_id id;
631 
632 	gt->user_engines.mask = 0;
633 	memset(gt->user_engines.instances_per_class, 0,
634 	       sizeof(gt->user_engines.instances_per_class));
635 
636 	for_each_hw_engine(hwe, gt, id) {
637 		if (xe_hw_engine_is_reserved(hwe))
638 			continue;
639 
640 		gt->user_engines.mask |= BIT_ULL(id);
641 		gt->user_engines.instances_per_class[hwe->class]++;
642 	}
643 
644 	xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
645 		     == gt->info.engine_mask);
646 }
647 
648 static int do_gt_reset(struct xe_gt *gt)
649 {
650 	int err;
651 
652 	xe_gsc_wa_14015076503(gt, true);
653 
654 	xe_mmio_write32(gt, GDRST, GRDOM_FULL);
655 	err = xe_mmio_wait32(gt, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
656 	if (err)
657 		xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
658 			  ERR_PTR(err));
659 
660 	xe_gsc_wa_14015076503(gt, false);
661 
662 	return err;
663 }
664 
665 static int vf_gt_restart(struct xe_gt *gt)
666 {
667 	int err;
668 
669 	err = xe_uc_sanitize_reset(&gt->uc);
670 	if (err)
671 		return err;
672 
673 	err = xe_uc_init_hw(&gt->uc);
674 	if (err)
675 		return err;
676 
677 	err = xe_uc_start(&gt->uc);
678 	if (err)
679 		return err;
680 
681 	return 0;
682 }
683 
684 static int do_gt_restart(struct xe_gt *gt)
685 {
686 	struct xe_hw_engine *hwe;
687 	enum xe_hw_engine_id id;
688 	int err;
689 
690 	if (IS_SRIOV_VF(gt_to_xe(gt)))
691 		return vf_gt_restart(gt);
692 
693 	xe_pat_init(gt);
694 
695 	xe_gt_enable_host_l2_vram(gt);
696 
697 	xe_gt_mcr_set_implicit_defaults(gt);
698 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
699 
700 	err = xe_wopcm_init(&gt->uc.wopcm);
701 	if (err)
702 		return err;
703 
704 	for_each_hw_engine(hwe, gt, id)
705 		xe_hw_engine_enable_ring(hwe);
706 
707 	err = xe_uc_sanitize_reset(&gt->uc);
708 	if (err)
709 		return err;
710 
711 	err = xe_uc_init_hw(&gt->uc);
712 	if (err)
713 		return err;
714 
715 	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
716 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
717 
718 	if (IS_SRIOV_PF(gt_to_xe(gt)))
719 		xe_gt_sriov_pf_init_hw(gt);
720 
721 	xe_mocs_init(gt);
722 	err = xe_uc_start(&gt->uc);
723 	if (err)
724 		return err;
725 
726 	for_each_hw_engine(hwe, gt, id) {
727 		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
728 		xe_reg_sr_apply_whitelist(hwe);
729 	}
730 
731 	/* Get CCS mode in sync between sw/hw */
732 	xe_gt_apply_ccs_mode(gt);
733 
734 	/* Restore GT freq to expected values */
735 	xe_gt_sanitize_freq(gt);
736 
737 	if (IS_SRIOV_PF(gt_to_xe(gt)))
738 		xe_gt_sriov_pf_restart(gt);
739 
740 	return 0;
741 }
742 
743 static int gt_reset(struct xe_gt *gt)
744 {
745 	int err;
746 
747 	if (xe_device_wedged(gt_to_xe(gt)))
748 		return -ECANCELED;
749 
750 	/* We only support GT resets with GuC submission */
751 	if (!xe_device_uc_enabled(gt_to_xe(gt)))
752 		return -ENODEV;
753 
754 	xe_gt_info(gt, "reset started\n");
755 
756 	xe_pm_runtime_get(gt_to_xe(gt));
757 
758 	if (xe_fault_inject_gt_reset()) {
759 		err = -ECANCELED;
760 		goto err_fail;
761 	}
762 
763 	xe_gt_sanitize(gt);
764 
765 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
766 	if (err)
767 		goto err_msg;
768 
769 	xe_uc_gucrc_disable(&gt->uc);
770 	xe_uc_stop_prepare(&gt->uc);
771 	xe_gt_pagefault_reset(gt);
772 
773 	xe_uc_stop(&gt->uc);
774 
775 	xe_gt_tlb_invalidation_reset(gt);
776 
777 	err = do_gt_reset(gt);
778 	if (err)
779 		goto err_out;
780 
781 	err = do_gt_restart(gt);
782 	if (err)
783 		goto err_out;
784 
785 	err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
786 	XE_WARN_ON(err);
787 	xe_pm_runtime_put(gt_to_xe(gt));
788 
789 	xe_gt_info(gt, "reset done\n");
790 
791 	return 0;
792 
793 err_out:
794 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
795 err_msg:
796 	XE_WARN_ON(xe_uc_start(&gt->uc));
797 err_fail:
798 	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
799 
800 	xe_device_declare_wedged(gt_to_xe(gt));
801 	xe_pm_runtime_put(gt_to_xe(gt));
802 
803 	return err;
804 }
805 
806 static void gt_reset_worker(struct work_struct *w)
807 {
808 	struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
809 
810 	gt_reset(gt);
811 }
812 
813 void xe_gt_reset_async(struct xe_gt *gt)
814 {
815 	xe_gt_info(gt, "trying reset\n");
816 
817 	/* Don't do a reset while one is already in flight */
818 	if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
819 		return;
820 
821 	xe_gt_info(gt, "reset queued\n");
822 	queue_work(gt->ordered_wq, &gt->reset.worker);
823 }
824 
825 void xe_gt_suspend_prepare(struct xe_gt *gt)
826 {
827 	XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL));
828 
829 	xe_uc_stop_prepare(&gt->uc);
830 
831 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
832 }
833 
834 int xe_gt_suspend(struct xe_gt *gt)
835 {
836 	int err;
837 
838 	xe_gt_dbg(gt, "suspending\n");
839 	xe_gt_sanitize(gt);
840 
841 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
842 	if (err)
843 		goto err_msg;
844 
845 	err = xe_uc_suspend(&gt->uc);
846 	if (err)
847 		goto err_force_wake;
848 
849 	xe_gt_idle_disable_pg(gt);
850 
851 	xe_gt_disable_host_l2_vram(gt);
852 
853 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
854 	xe_gt_dbg(gt, "suspended\n");
855 
856 	return 0;
857 
858 err_force_wake:
859 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
860 err_msg:
861 	xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
862 
863 	return err;
864 }
865 
866 /**
867  * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
868  * @gt: the GT object
869  *
870  * Called after driver init/GSC load completes to restore GT frequencies if we
871  * limited them for any WAs.
872  */
873 int xe_gt_sanitize_freq(struct xe_gt *gt)
874 {
875 	int ret = 0;
876 
877 	if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
878 	     xe_uc_fw_is_loaded(&gt->uc.gsc.fw)) && XE_WA(gt, 22019338487))
879 		ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
880 
881 	return ret;
882 }
883 
884 int xe_gt_resume(struct xe_gt *gt)
885 {
886 	int err;
887 
888 	xe_gt_dbg(gt, "resuming\n");
889 	err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
890 	if (err)
891 		goto err_msg;
892 
893 	err = do_gt_restart(gt);
894 	if (err)
895 		goto err_force_wake;
896 
897 	xe_gt_idle_enable_pg(gt);
898 
899 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
900 	xe_gt_dbg(gt, "resumed\n");
901 
902 	return 0;
903 
904 err_force_wake:
905 	XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
906 err_msg:
907 	xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
908 
909 	return err;
910 }
911 
912 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
913 				     enum xe_engine_class class,
914 				     u16 instance, bool logical)
915 {
916 	struct xe_hw_engine *hwe;
917 	enum xe_hw_engine_id id;
918 
919 	for_each_hw_engine(hwe, gt, id)
920 		if (hwe->class == class &&
921 		    ((!logical && hwe->instance == instance) ||
922 		    (logical && hwe->logical_instance == instance)))
923 			return hwe;
924 
925 	return NULL;
926 }
927 
928 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
929 							 enum xe_engine_class class)
930 {
931 	struct xe_hw_engine *hwe;
932 	enum xe_hw_engine_id id;
933 
934 	for_each_hw_engine(hwe, gt, id) {
935 		switch (class) {
936 		case XE_ENGINE_CLASS_RENDER:
937 		case XE_ENGINE_CLASS_COMPUTE:
938 			if (hwe->class == XE_ENGINE_CLASS_RENDER ||
939 			    hwe->class == XE_ENGINE_CLASS_COMPUTE)
940 				return hwe;
941 			break;
942 		default:
943 			if (hwe->class == class)
944 				return hwe;
945 		}
946 	}
947 
948 	return NULL;
949 }
950 
951 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
952 {
953 	struct xe_hw_engine *hwe;
954 	enum xe_hw_engine_id id;
955 
956 	for_each_hw_engine(hwe, gt, id)
957 		return hwe;
958 
959 	return NULL;
960 }
961 
962 /**
963  * xe_gt_declare_wedged() - Declare GT wedged
964  * @gt: the GT object
965  *
966  * Wedge the GT which stops all submission, saves desired debug state, and
967  * cleans up anything which could timeout.
968  */
969 void xe_gt_declare_wedged(struct xe_gt *gt)
970 {
971 	xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
972 
973 	xe_uc_declare_wedged(&gt->uc);
974 	xe_gt_tlb_invalidation_reset(gt);
975 }
976