xref: /linux/drivers/gpu/drm/xe/xe_gt.c (revision f5bd7da05a5988506dedcb3e67aecb3a13a4cdf0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt.h"
7 
8 #include <linux/minmax.h>
9 
10 #include <drm/drm_managed.h>
11 #include <uapi/drm/xe_drm.h>
12 
13 #include <generated/xe_wa_oob.h>
14 
15 #include "instructions/xe_alu_commands.h"
16 #include "instructions/xe_mi_commands.h"
17 #include "regs/xe_engine_regs.h"
18 #include "regs/xe_gt_regs.h"
19 #include "xe_assert.h"
20 #include "xe_bb.h"
21 #include "xe_device.h"
22 #include "xe_eu_stall.h"
23 #include "xe_exec_queue.h"
24 #include "xe_execlist.h"
25 #include "xe_force_wake.h"
26 #include "xe_ggtt.h"
27 #include "xe_gsc.h"
28 #include "xe_gt_ccs_mode.h"
29 #include "xe_gt_clock.h"
30 #include "xe_gt_freq.h"
31 #include "xe_gt_idle.h"
32 #include "xe_gt_mcr.h"
33 #include "xe_gt_printk.h"
34 #include "xe_gt_sriov_pf.h"
35 #include "xe_gt_sriov_vf.h"
36 #include "xe_gt_stats.h"
37 #include "xe_gt_sysfs.h"
38 #include "xe_gt_topology.h"
39 #include "xe_guc_exec_queue_types.h"
40 #include "xe_guc_pc.h"
41 #include "xe_guc_submit.h"
42 #include "xe_hw_fence.h"
43 #include "xe_hw_engine_class_sysfs.h"
44 #include "xe_irq.h"
45 #include "xe_lmtt.h"
46 #include "xe_lrc.h"
47 #include "xe_map.h"
48 #include "xe_migrate.h"
49 #include "xe_mmio.h"
50 #include "xe_pagefault.h"
51 #include "xe_pat.h"
52 #include "xe_pm.h"
53 #include "xe_mocs.h"
54 #include "xe_reg_sr.h"
55 #include "xe_ring_ops.h"
56 #include "xe_sa.h"
57 #include "xe_sched_job.h"
58 #include "xe_sriov.h"
59 #include "xe_tlb_inval.h"
60 #include "xe_tuning.h"
61 #include "xe_uc.h"
62 #include "xe_uc_fw.h"
63 #include "xe_vm.h"
64 #include "xe_wa.h"
65 #include "xe_wopcm.h"
66 
67 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
68 {
69 	struct xe_device *xe = tile_to_xe(tile);
70 	struct drm_device *drm = &xe->drm;
71 	bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt &&
72 		IS_SRIOV_VF(xe);
73 	struct workqueue_struct *ordered_wq;
74 	struct xe_gt *gt;
75 
76 	gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL);
77 	if (!gt)
78 		return ERR_PTR(-ENOMEM);
79 
80 	gt->tile = tile;
81 	if (shared_wq && tile->primary_gt->ordered_wq)
82 		ordered_wq = tile->primary_gt->ordered_wq;
83 	else
84 		ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq",
85 							  WQ_MEM_RECLAIM);
86 	if (IS_ERR(ordered_wq))
87 		return ERR_CAST(ordered_wq);
88 
89 	gt->ordered_wq = ordered_wq;
90 
91 	return gt;
92 }
93 
94 void xe_gt_sanitize(struct xe_gt *gt)
95 {
96 	/*
97 	 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
98 	 * reload
99 	 */
100 	xe_guc_submit_disable(&gt->uc.guc);
101 }
102 
103 static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
104 {
105 	u32 reg;
106 
107 	if (!XE_GT_WA(gt, 16023588340))
108 		return;
109 
110 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
111 	if (!fw_ref.domains)
112 		return;
113 
114 	if (xe_gt_is_main_type(gt)) {
115 		reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
116 		reg |= CG_DIS_CNTLBUS;
117 		xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
118 	}
119 
120 	xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
121 }
122 
123 static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
124 {
125 	u32 reg;
126 
127 	if (!XE_GT_WA(gt, 16023588340))
128 		return;
129 
130 	if (xe_gt_is_media_type(gt))
131 		return;
132 
133 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
134 	if (!fw_ref.domains)
135 		return;
136 
137 	reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
138 	reg &= ~CG_DIS_CNTLBUS;
139 	xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
140 }
141 
142 static void xe_gt_enable_comp_1wcoh(struct xe_gt *gt)
143 {
144 	struct xe_device *xe = gt_to_xe(gt);
145 	u32 reg;
146 
147 	if (IS_SRIOV_VF(xe))
148 		return;
149 
150 	if (GRAPHICS_VER(xe) >= 30 && xe->info.has_flat_ccs) {
151 		CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
152 		if (!fw_ref.domains)
153 			return;
154 
155 		reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
156 		reg |= EN_CMP_1WCOH;
157 		xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
158 
159 		if (xe_gt_is_media_type(gt)) {
160 			xe_mmio_rmw32(&gt->mmio, XE2_GAMWALK_CTRL_MEDIA, 0, EN_CMP_1WCOH_GW);
161 		} else {
162 			reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMWALK_CTRL_3D);
163 			reg |= EN_CMP_1WCOH_GW;
164 			xe_gt_mcr_multicast_write(gt, XE2_GAMWALK_CTRL_3D, reg);
165 		}
166 	}
167 }
168 
169 static void gt_reset_worker(struct work_struct *w);
170 
171 static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb,
172 			 long timeout_jiffies)
173 {
174 	struct xe_sched_job *job;
175 	struct dma_fence *fence;
176 	long timeout;
177 
178 	job = xe_bb_create_job(q, bb);
179 	if (IS_ERR(job))
180 		return PTR_ERR(job);
181 
182 	xe_sched_job_arm(job);
183 	fence = dma_fence_get(&job->drm.s_fence->finished);
184 	xe_sched_job_push(job);
185 
186 	timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies);
187 	dma_fence_put(fence);
188 	if (timeout < 0)
189 		return timeout;
190 	else if (!timeout)
191 		return -ETIME;
192 
193 	return 0;
194 }
195 
196 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
197 {
198 	struct xe_bb *bb;
199 	int ret;
200 
201 	bb = xe_bb_new(gt, 4, false);
202 	if (IS_ERR(bb))
203 		return PTR_ERR(bb);
204 
205 	ret = emit_job_sync(q, bb, HZ);
206 	xe_bb_free(bb, NULL);
207 
208 	return ret;
209 }
210 
211 /* Dwords required to emit a RMW of a register */
212 #define EMIT_RMW_DW 20
213 
214 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
215 {
216 	struct xe_hw_engine *hwe = q->hwe;
217 	struct xe_reg_sr *sr = &hwe->reg_lrc;
218 	struct xe_reg_sr_entry *entry;
219 	int count_rmw = 0, count_rmw_mcr = 0, count = 0, ret;
220 	unsigned long idx;
221 	struct xe_bb *bb;
222 	size_t bb_len = 0;
223 	u32 *cs;
224 
225 	/* count RMW registers as those will be handled separately */
226 	xa_for_each(&sr->xa, idx, entry) {
227 		if (entry->reg.masked || entry->clr_bits == ~0)
228 			++count;
229 		else if (entry->reg.mcr)
230 			++count_rmw_mcr;
231 		else
232 			++count_rmw;
233 	}
234 
235 	if (count)
236 		bb_len += count * 2 + 1;
237 
238 	/*
239 	 * RMW of MCR registers is the same as a normal RMW, except an
240 	 * additional LRI (3 dwords) is required per register to steer the read
241 	 * to a nom-terminated instance.
242 	 *
243 	 * We could probably shorten the batch slightly by eliding the
244 	 * steering for consecutive MCR registers that have the same
245 	 * group/instance target, but it's not worth the extra complexity to do
246 	 * so.
247 	 */
248 	bb_len += count_rmw * EMIT_RMW_DW;
249 	bb_len += count_rmw_mcr * (EMIT_RMW_DW + 3);
250 
251 	/*
252 	 * After doing all RMW, we need 7 trailing dwords to clean up,
253 	 * plus an additional 3 dwords to reset steering if any of the
254 	 * registers were MCR.
255 	 */
256 	if (count_rmw || count_rmw_mcr)
257 		bb_len += 7 + (count_rmw_mcr ? 3 : 0);
258 
259 	if (hwe->class == XE_ENGINE_CLASS_RENDER)
260 		/*
261 		 * Big enough to emit all of the context's 3DSTATE via
262 		 * xe_lrc_emit_hwe_state_instructions()
263 		 */
264 		bb_len += xe_gt_lrc_size(gt, hwe->class) / sizeof(u32);
265 
266 	xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", hwe->name, bb_len);
267 
268 	bb = xe_bb_new(gt, bb_len, false);
269 	if (IS_ERR(bb))
270 		return PTR_ERR(bb);
271 
272 	cs = bb->cs;
273 
274 	if (count) {
275 		/*
276 		 * Emit single LRI with all non RMW regs: 1 leading dw + 2dw per
277 		 * reg + 1
278 		 */
279 
280 		*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
281 
282 		xa_for_each(&sr->xa, idx, entry) {
283 			struct xe_reg reg = entry->reg;
284 			u32 val;
285 
286 			if (reg.masked)
287 				val = entry->clr_bits << 16;
288 			else if (entry->clr_bits == ~0)
289 				val = 0;
290 			else
291 				continue;
292 
293 			val |= entry->set_bits;
294 
295 			*cs++ = reg.addr;
296 			*cs++ = val;
297 			xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
298 		}
299 	}
300 
301 	if (count_rmw || count_rmw_mcr) {
302 		xa_for_each(&sr->xa, idx, entry) {
303 			if (entry->reg.masked || entry->clr_bits == ~0)
304 				continue;
305 
306 			if (entry->reg.mcr) {
307 				struct xe_reg_mcr reg = { .__reg.raw = entry->reg.raw };
308 				u8 group, instance;
309 
310 				xe_gt_mcr_get_nonterminated_steering(gt, reg, &group, &instance);
311 				*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
312 				*cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(hwe->mmio_base).addr;
313 				*cs++ = SELECTIVE_READ_ADDRESSING |
314 					REG_FIELD_PREP(SELECTIVE_READ_GROUP, group) |
315 					REG_FIELD_PREP(SELECTIVE_READ_INSTANCE, instance);
316 			}
317 
318 			*cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
319 			*cs++ = entry->reg.addr;
320 			*cs++ = CS_GPR_REG(0, 0).addr;
321 
322 			*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
323 				MI_LRI_LRM_CS_MMIO;
324 			*cs++ = CS_GPR_REG(0, 1).addr;
325 			*cs++ = entry->clr_bits;
326 			*cs++ = CS_GPR_REG(0, 2).addr;
327 			*cs++ = entry->set_bits;
328 
329 			*cs++ = MI_MATH(8);
330 			*cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
331 			*cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1);
332 			*cs++ = CS_ALU_INSTR_AND;
333 			*cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
334 			*cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
335 			*cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2);
336 			*cs++ = CS_ALU_INSTR_OR;
337 			*cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
338 
339 			*cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
340 			*cs++ = CS_GPR_REG(0, 0).addr;
341 			*cs++ = entry->reg.addr;
342 
343 			xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x%s\n",
344 				  entry->reg.addr, entry->clr_bits, entry->set_bits,
345 				  entry->reg.mcr ? " (MCR)" : "");
346 		}
347 
348 		/* reset used GPR */
349 		*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) |
350 			MI_LRI_LRM_CS_MMIO;
351 		*cs++ = CS_GPR_REG(0, 0).addr;
352 		*cs++ = 0;
353 		*cs++ = CS_GPR_REG(0, 1).addr;
354 		*cs++ = 0;
355 		*cs++ = CS_GPR_REG(0, 2).addr;
356 		*cs++ = 0;
357 
358 		/* reset steering */
359 		if (count_rmw_mcr) {
360 			*cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
361 			*cs++ = CS_MMIO_GROUP_INSTANCE_SELECT(q->hwe->mmio_base).addr;
362 			*cs++ = 0;
363 		}
364 	}
365 
366 	cs = xe_lrc_emit_hwe_state_instructions(q, cs);
367 
368 	bb->len = cs - bb->cs;
369 
370 	ret = emit_job_sync(q, bb, HZ);
371 
372 	xe_bb_free(bb, NULL);
373 
374 	return ret;
375 }
376 
377 int xe_gt_record_default_lrcs(struct xe_gt *gt)
378 {
379 	struct xe_device *xe = gt_to_xe(gt);
380 	struct xe_hw_engine *hwe;
381 	enum xe_hw_engine_id id;
382 	int err = 0;
383 
384 	for_each_hw_engine(hwe, gt, id) {
385 		struct xe_exec_queue *q, *nop_q;
386 		void *default_lrc;
387 
388 		if (gt->default_lrc[hwe->class])
389 			continue;
390 
391 		xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
392 		xe_wa_process_lrc(hwe);
393 		xe_hw_engine_setup_default_lrc_state(hwe);
394 		xe_tuning_process_lrc(hwe);
395 
396 		default_lrc = drmm_kzalloc(&xe->drm,
397 					   xe_gt_lrc_size(gt, hwe->class),
398 					   GFP_KERNEL);
399 		if (!default_lrc)
400 			return -ENOMEM;
401 
402 		q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
403 					 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
404 		if (IS_ERR(q)) {
405 			err = PTR_ERR(q);
406 			xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
407 				  hwe->name, q);
408 			return err;
409 		}
410 
411 		/* Prime golden LRC with known good state */
412 		err = emit_wa_job(gt, q);
413 		if (err) {
414 			xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
415 				  hwe->name, ERR_PTR(err), q->guc->id);
416 			goto put_exec_queue;
417 		}
418 
419 		nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
420 					     1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
421 		if (IS_ERR(nop_q)) {
422 			err = PTR_ERR(nop_q);
423 			xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
424 				  hwe->name, nop_q);
425 			goto put_exec_queue;
426 		}
427 
428 		/* Switch to different LRC */
429 		err = emit_nop_job(gt, nop_q);
430 		if (err) {
431 			xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
432 				  hwe->name, ERR_PTR(err), nop_q->guc->id);
433 			goto put_nop_q;
434 		}
435 
436 		xe_map_memcpy_from(xe, default_lrc,
437 				   &q->lrc[0]->bo->vmap,
438 				   xe_lrc_pphwsp_offset(q->lrc[0]),
439 				   xe_gt_lrc_size(gt, hwe->class));
440 
441 		gt->default_lrc[hwe->class] = default_lrc;
442 put_nop_q:
443 		xe_exec_queue_put(nop_q);
444 put_exec_queue:
445 		xe_exec_queue_put(q);
446 		if (err)
447 			break;
448 	}
449 
450 	return err;
451 }
452 
453 int xe_gt_init_early(struct xe_gt *gt)
454 {
455 	int err;
456 
457 	if (IS_SRIOV_PF(gt_to_xe(gt))) {
458 		err = xe_gt_sriov_pf_init_early(gt);
459 		if (err)
460 			return err;
461 	}
462 
463 	if (IS_SRIOV_VF(gt_to_xe(gt))) {
464 		err = xe_gt_sriov_vf_init_early(gt);
465 		if (err)
466 			return err;
467 	}
468 
469 	xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
470 
471 	err = xe_wa_gt_init(gt);
472 	if (err)
473 		return err;
474 
475 	err = xe_tuning_init(gt);
476 	if (err)
477 		return err;
478 
479 	xe_wa_process_gt_oob(gt);
480 
481 	xe_force_wake_init_gt(gt, gt_to_fw(gt));
482 	spin_lock_init(&gt->global_invl_lock);
483 
484 	err = xe_gt_tlb_inval_init_early(gt);
485 	if (err)
486 		return err;
487 
488 	xe_mocs_init_early(gt);
489 
490 	/*
491 	 * Only after this point can GT-specific MMIO operations
492 	 * (including things like communication with the GuC)
493 	 * be performed.
494 	 */
495 	xe_gt_mmio_init(gt);
496 
497 	err = xe_uc_init_noalloc(&gt->uc);
498 	if (err)
499 		return err;
500 
501 	err = xe_gt_stats_init(gt);
502 	if (err)
503 		return err;
504 
505 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
506 	if (!fw_ref.domains)
507 		return -ETIMEDOUT;
508 
509 	xe_gt_mcr_init_early(gt);
510 	xe_pat_init(gt);
511 
512 	return 0;
513 }
514 
515 static void dump_pat_on_error(struct xe_gt *gt)
516 {
517 	struct drm_printer p;
518 	char prefix[32];
519 
520 	snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
521 	p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
522 
523 	xe_pat_dump(gt, &p);
524 }
525 
526 static int gt_init_with_gt_forcewake(struct xe_gt *gt)
527 {
528 	int err;
529 
530 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
531 	if (!fw_ref.domains)
532 		return -ETIMEDOUT;
533 
534 	err = xe_uc_init(&gt->uc);
535 	if (err)
536 		return err;
537 
538 	xe_gt_topology_init(gt);
539 	xe_gt_mcr_init(gt);
540 	xe_gt_enable_host_l2_vram(gt);
541 	xe_gt_enable_comp_1wcoh(gt);
542 
543 	if (xe_gt_is_main_type(gt)) {
544 		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
545 		if (err)
546 			return err;
547 		if (IS_SRIOV_PF(gt_to_xe(gt)))
548 			xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
549 	}
550 
551 	/* Enable per hw engine IRQs */
552 	xe_irq_enable_hwe(gt);
553 
554 	/* Rerun MCR init as we now have hw engine list */
555 	xe_gt_mcr_init(gt);
556 
557 	err = xe_hw_engines_init_early(gt);
558 	if (err) {
559 		dump_pat_on_error(gt);
560 		return err;
561 	}
562 
563 	err = xe_hw_engine_class_sysfs_init(gt);
564 	if (err)
565 		return err;
566 
567 	/* Initialize CCS mode sysfs after early initialization of HW engines */
568 	err = xe_gt_ccs_mode_sysfs_init(gt);
569 	if (err)
570 		return err;
571 
572 	/*
573 	 * Stash hardware-reported version.  Since this register does not exist
574 	 * on pre-MTL platforms, reading it there will (correctly) return 0.
575 	 */
576 	gt->info.gmdid = xe_mmio_read32(&gt->mmio, GMD_ID);
577 
578 	return 0;
579 }
580 
581 static int gt_init_with_all_forcewake(struct xe_gt *gt)
582 {
583 	int err;
584 
585 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
586 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL))
587 		return -ETIMEDOUT;
588 
589 	xe_gt_mcr_set_implicit_defaults(gt);
590 	xe_wa_process_gt(gt);
591 	xe_tuning_process_gt(gt);
592 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
593 
594 	err = xe_gt_clock_init(gt);
595 	if (err)
596 		return err;
597 
598 	xe_mocs_init(gt);
599 	err = xe_execlist_init(gt);
600 	if (err)
601 		return err;
602 
603 	err = xe_hw_engines_init(gt);
604 	if (err)
605 		return err;
606 
607 	err = xe_uc_init_post_hwconfig(&gt->uc);
608 	if (err)
609 		return err;
610 
611 	if (xe_gt_is_main_type(gt)) {
612 		/*
613 		 * USM has its only SA pool to non-block behind user operations
614 		 */
615 		if (gt_to_xe(gt)->info.has_usm) {
616 			struct xe_device *xe = gt_to_xe(gt);
617 
618 			gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
619 								IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
620 			if (IS_ERR(gt->usm.bb_pool))
621 				return PTR_ERR(gt->usm.bb_pool);
622 		}
623 	}
624 
625 	if (xe_gt_is_main_type(gt)) {
626 		struct xe_tile *tile = gt_to_tile(gt);
627 
628 		err = xe_migrate_init(tile->migrate);
629 		if (err)
630 			return err;
631 	}
632 
633 	err = xe_uc_load_hw(&gt->uc);
634 	if (err)
635 		return err;
636 
637 	/* Configure default CCS mode of 1 engine with all resources */
638 	if (xe_gt_ccs_mode_enabled(gt)) {
639 		gt->ccs_mode = 1;
640 		xe_gt_apply_ccs_mode(gt);
641 	}
642 
643 	if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
644 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
645 
646 	if (IS_SRIOV_PF(gt_to_xe(gt)))
647 		xe_gt_sriov_pf_init_hw(gt);
648 
649 	return 0;
650 }
651 
652 static void xe_gt_fini(void *arg)
653 {
654 	struct xe_gt *gt = arg;
655 	int i;
656 
657 	if (disable_work_sync(&gt->reset.worker))
658 		/*
659 		 * If gt_reset_worker was halted from executing, take care of
660 		 * releasing the rpm reference here.
661 		 */
662 		xe_pm_runtime_put(gt_to_xe(gt));
663 
664 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
665 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
666 
667 	xe_gt_disable_host_l2_vram(gt);
668 }
669 
670 int xe_gt_init(struct xe_gt *gt)
671 {
672 	int err;
673 	int i;
674 
675 	INIT_WORK(&gt->reset.worker, gt_reset_worker);
676 
677 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
678 		gt->ring_ops[i] = xe_ring_ops_get(gt, i);
679 		xe_hw_fence_irq_init(&gt->fence_irq[i]);
680 	}
681 
682 	err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt);
683 	if (err)
684 		return err;
685 
686 	err = xe_gt_sysfs_init(gt);
687 	if (err)
688 		return err;
689 
690 	err = gt_init_with_gt_forcewake(gt);
691 	if (err)
692 		return err;
693 
694 	err = xe_gt_idle_init(&gt->gtidle);
695 	if (err)
696 		return err;
697 
698 	err = xe_gt_freq_init(gt);
699 	if (err)
700 		return err;
701 
702 	xe_force_wake_init_engines(gt, gt_to_fw(gt));
703 
704 	err = gt_init_with_all_forcewake(gt);
705 	if (err)
706 		return err;
707 
708 	xe_gt_record_user_engines(gt);
709 
710 	err = xe_eu_stall_init(gt);
711 	if (err)
712 		return err;
713 
714 	if (IS_SRIOV_VF(gt_to_xe(gt))) {
715 		err = xe_gt_sriov_vf_init(gt);
716 		if (err)
717 			return err;
718 	}
719 
720 	return 0;
721 }
722 
723 /**
724  * xe_gt_mmio_init() - Initialize GT's MMIO access
725  * @gt: the GT object
726  *
727  * Initialize GT's MMIO accessor, which will be used to access registers inside
728  * this GT.
729  */
730 void xe_gt_mmio_init(struct xe_gt *gt)
731 {
732 	struct xe_tile *tile = gt_to_tile(gt);
733 	struct xe_device *xe = tile_to_xe(tile);
734 
735 	xe_mmio_init(&gt->mmio, tile, tile->mmio.regs, tile->mmio.regs_size);
736 
737 	if (gt->info.type == XE_GT_TYPE_MEDIA) {
738 		gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
739 		gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
740 	} else {
741 		gt->mmio.adj_offset = 0;
742 		gt->mmio.adj_limit = 0;
743 	}
744 
745 	if (IS_SRIOV_VF(xe))
746 		gt->mmio.sriov_vf_gt = gt;
747 }
748 
749 void xe_gt_record_user_engines(struct xe_gt *gt)
750 {
751 	struct xe_hw_engine *hwe;
752 	enum xe_hw_engine_id id;
753 
754 	gt->user_engines.mask = 0;
755 	memset(gt->user_engines.instances_per_class, 0,
756 	       sizeof(gt->user_engines.instances_per_class));
757 
758 	for_each_hw_engine(hwe, gt, id) {
759 		if (xe_hw_engine_is_reserved(hwe))
760 			continue;
761 
762 		gt->user_engines.mask |= BIT_ULL(id);
763 		gt->user_engines.instances_per_class[hwe->class]++;
764 	}
765 
766 	xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
767 		     == gt->info.engine_mask);
768 }
769 
770 static int do_gt_reset(struct xe_gt *gt)
771 {
772 	int err;
773 
774 	if (IS_SRIOV_VF(gt_to_xe(gt)))
775 		return xe_gt_sriov_vf_reset(gt);
776 
777 	xe_gsc_wa_14015076503(gt, true);
778 
779 	xe_mmio_write32(&gt->mmio, GDRST, GRDOM_FULL);
780 	err = xe_mmio_wait32(&gt->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
781 	if (err)
782 		xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
783 			  ERR_PTR(err));
784 
785 	xe_gsc_wa_14015076503(gt, false);
786 
787 	return err;
788 }
789 
790 static int vf_gt_restart(struct xe_gt *gt)
791 {
792 	int err;
793 
794 	err = xe_uc_sanitize_reset(&gt->uc);
795 	if (err)
796 		return err;
797 
798 	err = xe_uc_load_hw(&gt->uc);
799 	if (err)
800 		return err;
801 
802 	err = xe_uc_start(&gt->uc);
803 	if (err)
804 		return err;
805 
806 	return 0;
807 }
808 
809 static int do_gt_restart(struct xe_gt *gt)
810 {
811 	struct xe_hw_engine *hwe;
812 	enum xe_hw_engine_id id;
813 	int err;
814 
815 	if (IS_SRIOV_VF(gt_to_xe(gt)))
816 		return vf_gt_restart(gt);
817 
818 	xe_pat_init(gt);
819 
820 	xe_gt_enable_host_l2_vram(gt);
821 	xe_gt_enable_comp_1wcoh(gt);
822 
823 	xe_gt_mcr_set_implicit_defaults(gt);
824 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
825 
826 	err = xe_wopcm_init(&gt->uc.wopcm);
827 	if (err)
828 		return err;
829 
830 	for_each_hw_engine(hwe, gt, id)
831 		xe_hw_engine_enable_ring(hwe);
832 
833 	err = xe_uc_sanitize_reset(&gt->uc);
834 	if (err)
835 		return err;
836 
837 	err = xe_uc_load_hw(&gt->uc);
838 	if (err)
839 		return err;
840 
841 	if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
842 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
843 
844 	if (IS_SRIOV_PF(gt_to_xe(gt)))
845 		xe_gt_sriov_pf_init_hw(gt);
846 
847 	xe_mocs_init(gt);
848 
849 	for_each_hw_engine(hwe, gt, id)
850 		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
851 
852 	/* Get CCS mode in sync between sw/hw */
853 	xe_gt_apply_ccs_mode(gt);
854 
855 	err = xe_uc_start(&gt->uc);
856 	if (err)
857 		return err;
858 
859 	/* Restore GT freq to expected values */
860 	xe_gt_sanitize_freq(gt);
861 
862 	if (IS_SRIOV_PF(gt_to_xe(gt)))
863 		xe_gt_sriov_pf_restart(gt);
864 
865 	return 0;
866 }
867 
868 static void gt_reset_worker(struct work_struct *w)
869 {
870 	struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
871 	unsigned int fw_ref;
872 	int err;
873 
874 	if (xe_device_wedged(gt_to_xe(gt)))
875 		goto err_pm_put;
876 
877 	/* We only support GT resets with GuC submission */
878 	if (!xe_device_uc_enabled(gt_to_xe(gt)))
879 		goto err_pm_put;
880 
881 	xe_gt_info(gt, "reset started\n");
882 
883 	if (xe_fault_inject_gt_reset()) {
884 		err = -ECANCELED;
885 		goto err_fail;
886 	}
887 
888 	xe_gt_sanitize(gt);
889 
890 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
891 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
892 		err = -ETIMEDOUT;
893 		goto err_out;
894 	}
895 
896 	if (IS_SRIOV_PF(gt_to_xe(gt)))
897 		xe_gt_sriov_pf_stop_prepare(gt);
898 
899 	xe_uc_stop_prepare(&gt->uc);
900 	xe_pagefault_reset(gt_to_xe(gt), gt);
901 
902 	xe_uc_stop(&gt->uc);
903 
904 	xe_tlb_inval_reset(&gt->tlb_inval);
905 
906 	err = do_gt_reset(gt);
907 	if (err)
908 		goto err_out;
909 
910 	err = do_gt_restart(gt);
911 	if (err)
912 		goto err_out;
913 
914 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
915 
916 	/* Pair with get while enqueueing the work in xe_gt_reset_async() */
917 	xe_pm_runtime_put(gt_to_xe(gt));
918 
919 	xe_gt_info(gt, "reset done\n");
920 
921 	return;
922 
923 err_out:
924 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
925 	XE_WARN_ON(xe_uc_start(&gt->uc));
926 
927 err_fail:
928 	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
929 	xe_device_declare_wedged(gt_to_xe(gt));
930 err_pm_put:
931 	xe_pm_runtime_put(gt_to_xe(gt));
932 }
933 
934 void xe_gt_reset_async(struct xe_gt *gt)
935 {
936 	xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
937 
938 	/* Don't do a reset while one is already in flight */
939 	if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
940 		return;
941 
942 	xe_gt_info(gt, "reset queued\n");
943 
944 	/* Pair with put in gt_reset_worker() if work is enqueued */
945 	xe_pm_runtime_get_noresume(gt_to_xe(gt));
946 	if (!queue_work(gt->ordered_wq, &gt->reset.worker))
947 		xe_pm_runtime_put(gt_to_xe(gt));
948 }
949 
950 void xe_gt_suspend_prepare(struct xe_gt *gt)
951 {
952 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
953 	xe_uc_suspend_prepare(&gt->uc);
954 }
955 
956 int xe_gt_suspend(struct xe_gt *gt)
957 {
958 	int err;
959 
960 	xe_gt_dbg(gt, "suspending\n");
961 	xe_gt_sanitize(gt);
962 
963 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
964 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
965 		xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
966 		return -ETIMEDOUT;
967 	}
968 
969 	err = xe_uc_suspend(&gt->uc);
970 	if (err) {
971 		xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
972 		return err;
973 	}
974 
975 	xe_gt_idle_disable_pg(gt);
976 
977 	xe_gt_disable_host_l2_vram(gt);
978 
979 	xe_gt_dbg(gt, "suspended\n");
980 
981 	return 0;
982 }
983 
984 void xe_gt_shutdown(struct xe_gt *gt)
985 {
986 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
987 	do_gt_reset(gt);
988 }
989 
990 /**
991  * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
992  * @gt: the GT object
993  *
994  * Called after driver init/GSC load completes to restore GT frequencies if we
995  * limited them for any WAs.
996  */
997 int xe_gt_sanitize_freq(struct xe_gt *gt)
998 {
999 	int ret = 0;
1000 
1001 	if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
1002 	     xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
1003 	     xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
1004 	    XE_GT_WA(gt, 22019338487))
1005 		ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
1006 
1007 	return ret;
1008 }
1009 
1010 int xe_gt_resume(struct xe_gt *gt)
1011 {
1012 	int err;
1013 
1014 	xe_gt_dbg(gt, "resuming\n");
1015 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1016 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
1017 		xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
1018 		return -ETIMEDOUT;
1019 	}
1020 
1021 	err = do_gt_restart(gt);
1022 	if (err)
1023 		return err;
1024 
1025 	xe_gt_idle_enable_pg(gt);
1026 
1027 	xe_gt_dbg(gt, "resumed\n");
1028 
1029 	return 0;
1030 }
1031 
1032 /**
1033  * xe_gt_runtime_suspend() - GT runtime suspend
1034  * @gt: the GT object
1035  *
1036  * Return: 0 on success, negative error code otherwise.
1037  */
1038 int xe_gt_runtime_suspend(struct xe_gt *gt)
1039 {
1040 	xe_gt_dbg(gt, "runtime suspending\n");
1041 
1042 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1043 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
1044 		xe_gt_err(gt, "runtime suspend failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
1045 		return -ETIMEDOUT;
1046 	}
1047 
1048 	xe_uc_runtime_suspend(&gt->uc);
1049 	xe_gt_disable_host_l2_vram(gt);
1050 
1051 	xe_gt_dbg(gt, "runtime suspended\n");
1052 
1053 	return 0;
1054 }
1055 
1056 /**
1057  * xe_gt_runtime_resume() - GT runtime resume
1058  * @gt: the GT object
1059  *
1060  * Return: 0 on success, negative error code otherwise.
1061  */
1062 int xe_gt_runtime_resume(struct xe_gt *gt)
1063 {
1064 	xe_gt_dbg(gt, "runtime resuming\n");
1065 
1066 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL);
1067 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) {
1068 		xe_gt_err(gt, "runtime resume failed (%pe)\n", ERR_PTR(-ETIMEDOUT));
1069 		return -ETIMEDOUT;
1070 	}
1071 
1072 	xe_gt_enable_host_l2_vram(gt);
1073 	xe_uc_runtime_resume(&gt->uc);
1074 
1075 	xe_gt_dbg(gt, "runtime resumed\n");
1076 
1077 	return 0;
1078 }
1079 
1080 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
1081 				     enum xe_engine_class class,
1082 				     u16 instance, bool logical)
1083 {
1084 	struct xe_hw_engine *hwe;
1085 	enum xe_hw_engine_id id;
1086 
1087 	for_each_hw_engine(hwe, gt, id)
1088 		if (hwe->class == class &&
1089 		    ((!logical && hwe->instance == instance) ||
1090 		    (logical && hwe->logical_instance == instance)))
1091 			return hwe;
1092 
1093 	return NULL;
1094 }
1095 
1096 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
1097 							 enum xe_engine_class class)
1098 {
1099 	struct xe_hw_engine *hwe;
1100 	enum xe_hw_engine_id id;
1101 
1102 	for_each_hw_engine(hwe, gt, id) {
1103 		switch (class) {
1104 		case XE_ENGINE_CLASS_RENDER:
1105 		case XE_ENGINE_CLASS_COMPUTE:
1106 			if (hwe->class == XE_ENGINE_CLASS_RENDER ||
1107 			    hwe->class == XE_ENGINE_CLASS_COMPUTE)
1108 				return hwe;
1109 			break;
1110 		default:
1111 			if (hwe->class == class)
1112 				return hwe;
1113 		}
1114 	}
1115 
1116 	return NULL;
1117 }
1118 
1119 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
1120 {
1121 	struct xe_hw_engine *hwe;
1122 	enum xe_hw_engine_id id;
1123 
1124 	for_each_hw_engine(hwe, gt, id)
1125 		return hwe;
1126 
1127 	return NULL;
1128 }
1129 
1130 /**
1131  * xe_gt_declare_wedged() - Declare GT wedged
1132  * @gt: the GT object
1133  *
1134  * Wedge the GT which stops all submission, saves desired debug state, and
1135  * cleans up anything which could timeout.
1136  */
1137 void xe_gt_declare_wedged(struct xe_gt *gt)
1138 {
1139 	xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
1140 
1141 	xe_uc_declare_wedged(&gt->uc);
1142 	xe_tlb_inval_reset(&gt->tlb_inval);
1143 }
1144