xref: /linux/drivers/gpu/drm/xe/xe_gt.c (revision 5f2b6c5f6b692c696a232d12c43b8e41c0d393b9)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_gt.h"
7 
8 #include <linux/minmax.h>
9 
10 #include <drm/drm_managed.h>
11 #include <uapi/drm/xe_drm.h>
12 
13 #include <generated/xe_wa_oob.h>
14 
15 #include "instructions/xe_alu_commands.h"
16 #include "instructions/xe_gfxpipe_commands.h"
17 #include "instructions/xe_mi_commands.h"
18 #include "regs/xe_engine_regs.h"
19 #include "regs/xe_gt_regs.h"
20 #include "xe_assert.h"
21 #include "xe_bb.h"
22 #include "xe_bo.h"
23 #include "xe_device.h"
24 #include "xe_eu_stall.h"
25 #include "xe_exec_queue.h"
26 #include "xe_execlist.h"
27 #include "xe_force_wake.h"
28 #include "xe_ggtt.h"
29 #include "xe_gsc.h"
30 #include "xe_gt_ccs_mode.h"
31 #include "xe_gt_clock.h"
32 #include "xe_gt_freq.h"
33 #include "xe_gt_idle.h"
34 #include "xe_gt_mcr.h"
35 #include "xe_gt_pagefault.h"
36 #include "xe_gt_printk.h"
37 #include "xe_gt_sriov_pf.h"
38 #include "xe_gt_sriov_vf.h"
39 #include "xe_gt_sysfs.h"
40 #include "xe_gt_tlb_invalidation.h"
41 #include "xe_gt_topology.h"
42 #include "xe_guc_exec_queue_types.h"
43 #include "xe_guc_pc.h"
44 #include "xe_hw_fence.h"
45 #include "xe_hw_engine_class_sysfs.h"
46 #include "xe_irq.h"
47 #include "xe_lmtt.h"
48 #include "xe_lrc.h"
49 #include "xe_map.h"
50 #include "xe_migrate.h"
51 #include "xe_mmio.h"
52 #include "xe_pat.h"
53 #include "xe_pm.h"
54 #include "xe_mocs.h"
55 #include "xe_reg_sr.h"
56 #include "xe_ring_ops.h"
57 #include "xe_sa.h"
58 #include "xe_sched_job.h"
59 #include "xe_sriov.h"
60 #include "xe_tuning.h"
61 #include "xe_uc.h"
62 #include "xe_uc_fw.h"
63 #include "xe_vm.h"
64 #include "xe_wa.h"
65 #include "xe_wopcm.h"
66 
gt_fini(struct drm_device * drm,void * arg)67 static void gt_fini(struct drm_device *drm, void *arg)
68 {
69 	struct xe_gt *gt = arg;
70 
71 	destroy_workqueue(gt->ordered_wq);
72 }
73 
xe_gt_alloc(struct xe_tile * tile)74 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
75 {
76 	struct xe_gt *gt;
77 	int err;
78 
79 	gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
80 	if (!gt)
81 		return ERR_PTR(-ENOMEM);
82 
83 	gt->tile = tile;
84 	gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq",
85 						 WQ_MEM_RECLAIM);
86 
87 	err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
88 	if (err)
89 		return ERR_PTR(err);
90 
91 	return gt;
92 }
93 
xe_gt_sanitize(struct xe_gt * gt)94 void xe_gt_sanitize(struct xe_gt *gt)
95 {
96 	/*
97 	 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
98 	 * reload
99 	 */
100 	gt->uc.guc.submission_state.enabled = false;
101 }
102 
xe_gt_enable_host_l2_vram(struct xe_gt * gt)103 static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
104 {
105 	unsigned int fw_ref;
106 	u32 reg;
107 
108 	if (!XE_WA(gt, 16023588340))
109 		return;
110 
111 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
112 	if (!fw_ref)
113 		return;
114 
115 	if (!xe_gt_is_media_type(gt)) {
116 		reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
117 		reg |= CG_DIS_CNTLBUS;
118 		xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
119 	}
120 
121 	xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
122 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
123 }
124 
xe_gt_disable_host_l2_vram(struct xe_gt * gt)125 static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
126 {
127 	unsigned int fw_ref;
128 	u32 reg;
129 
130 	if (!XE_WA(gt, 16023588340))
131 		return;
132 
133 	if (xe_gt_is_media_type(gt))
134 		return;
135 
136 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
137 	if (!fw_ref)
138 		return;
139 
140 	reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
141 	reg &= ~CG_DIS_CNTLBUS;
142 	xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
143 
144 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
145 }
146 
147 static void gt_reset_worker(struct work_struct *w);
148 
emit_nop_job(struct xe_gt * gt,struct xe_exec_queue * q)149 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
150 {
151 	struct xe_sched_job *job;
152 	struct xe_bb *bb;
153 	struct dma_fence *fence;
154 	long timeout;
155 
156 	bb = xe_bb_new(gt, 4, false);
157 	if (IS_ERR(bb))
158 		return PTR_ERR(bb);
159 
160 	job = xe_bb_create_job(q, bb);
161 	if (IS_ERR(job)) {
162 		xe_bb_free(bb, NULL);
163 		return PTR_ERR(job);
164 	}
165 
166 	xe_sched_job_arm(job);
167 	fence = dma_fence_get(&job->drm.s_fence->finished);
168 	xe_sched_job_push(job);
169 
170 	timeout = dma_fence_wait_timeout(fence, false, HZ);
171 	dma_fence_put(fence);
172 	xe_bb_free(bb, NULL);
173 	if (timeout < 0)
174 		return timeout;
175 	else if (!timeout)
176 		return -ETIME;
177 
178 	return 0;
179 }
180 
emit_wa_job(struct xe_gt * gt,struct xe_exec_queue * q)181 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
182 {
183 	struct xe_reg_sr *sr = &q->hwe->reg_lrc;
184 	struct xe_reg_sr_entry *entry;
185 	unsigned long idx;
186 	struct xe_sched_job *job;
187 	struct xe_bb *bb;
188 	struct dma_fence *fence;
189 	long timeout;
190 	int count_rmw = 0;
191 	int count = 0;
192 
193 	if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
194 		/* Big enough to emit all of the context's 3DSTATE */
195 		bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
196 	else
197 		/* Just pick a large BB size */
198 		bb = xe_bb_new(gt, SZ_4K, false);
199 
200 	if (IS_ERR(bb))
201 		return PTR_ERR(bb);
202 
203 	/* count RMW registers as those will be handled separately */
204 	xa_for_each(&sr->xa, idx, entry) {
205 		if (entry->reg.masked || entry->clr_bits == ~0)
206 			++count;
207 		else
208 			++count_rmw;
209 	}
210 
211 	if (count || count_rmw)
212 		xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
213 
214 	if (count) {
215 		/* emit single LRI with all non RMW regs */
216 
217 		bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
218 
219 		xa_for_each(&sr->xa, idx, entry) {
220 			struct xe_reg reg = entry->reg;
221 			u32 val;
222 
223 			if (reg.masked)
224 				val = entry->clr_bits << 16;
225 			else if (entry->clr_bits == ~0)
226 				val = 0;
227 			else
228 				continue;
229 
230 			val |= entry->set_bits;
231 
232 			bb->cs[bb->len++] = reg.addr;
233 			bb->cs[bb->len++] = val;
234 			xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
235 		}
236 	}
237 
238 	if (count_rmw) {
239 		/* emit MI_MATH for each RMW reg */
240 
241 		xa_for_each(&sr->xa, idx, entry) {
242 			if (entry->reg.masked || entry->clr_bits == ~0)
243 				continue;
244 
245 			bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
246 			bb->cs[bb->len++] = entry->reg.addr;
247 			bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
248 
249 			bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
250 					    MI_LRI_LRM_CS_MMIO;
251 			bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
252 			bb->cs[bb->len++] = entry->clr_bits;
253 			bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
254 			bb->cs[bb->len++] = entry->set_bits;
255 
256 			bb->cs[bb->len++] = MI_MATH(8);
257 			bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
258 			bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1);
259 			bb->cs[bb->len++] = CS_ALU_INSTR_AND;
260 			bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
261 			bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0);
262 			bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2);
263 			bb->cs[bb->len++] = CS_ALU_INSTR_OR;
264 			bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU);
265 
266 			bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
267 			bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
268 			bb->cs[bb->len++] = entry->reg.addr;
269 
270 			xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
271 				  entry->reg.addr, entry->clr_bits, entry->set_bits);
272 		}
273 
274 		/* reset used GPR */
275 		bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO;
276 		bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr;
277 		bb->cs[bb->len++] = 0;
278 		bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr;
279 		bb->cs[bb->len++] = 0;
280 		bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr;
281 		bb->cs[bb->len++] = 0;
282 	}
283 
284 	xe_lrc_emit_hwe_state_instructions(q, bb);
285 
286 	job = xe_bb_create_job(q, bb);
287 	if (IS_ERR(job)) {
288 		xe_bb_free(bb, NULL);
289 		return PTR_ERR(job);
290 	}
291 
292 	xe_sched_job_arm(job);
293 	fence = dma_fence_get(&job->drm.s_fence->finished);
294 	xe_sched_job_push(job);
295 
296 	timeout = dma_fence_wait_timeout(fence, false, HZ);
297 	dma_fence_put(fence);
298 	xe_bb_free(bb, NULL);
299 	if (timeout < 0)
300 		return timeout;
301 	else if (!timeout)
302 		return -ETIME;
303 
304 	return 0;
305 }
306 
xe_gt_record_default_lrcs(struct xe_gt * gt)307 int xe_gt_record_default_lrcs(struct xe_gt *gt)
308 {
309 	struct xe_device *xe = gt_to_xe(gt);
310 	struct xe_hw_engine *hwe;
311 	enum xe_hw_engine_id id;
312 	int err = 0;
313 
314 	for_each_hw_engine(hwe, gt, id) {
315 		struct xe_exec_queue *q, *nop_q;
316 		void *default_lrc;
317 
318 		if (gt->default_lrc[hwe->class])
319 			continue;
320 
321 		xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
322 		xe_wa_process_lrc(hwe);
323 		xe_hw_engine_setup_default_lrc_state(hwe);
324 		xe_tuning_process_lrc(hwe);
325 
326 		default_lrc = drmm_kzalloc(&xe->drm,
327 					   xe_gt_lrc_size(gt, hwe->class),
328 					   GFP_KERNEL);
329 		if (!default_lrc)
330 			return -ENOMEM;
331 
332 		q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
333 					 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
334 		if (IS_ERR(q)) {
335 			err = PTR_ERR(q);
336 			xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
337 				  hwe->name, q);
338 			return err;
339 		}
340 
341 		/* Prime golden LRC with known good state */
342 		err = emit_wa_job(gt, q);
343 		if (err) {
344 			xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
345 				  hwe->name, ERR_PTR(err), q->guc->id);
346 			goto put_exec_queue;
347 		}
348 
349 		nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
350 					     1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
351 		if (IS_ERR(nop_q)) {
352 			err = PTR_ERR(nop_q);
353 			xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
354 				  hwe->name, nop_q);
355 			goto put_exec_queue;
356 		}
357 
358 		/* Switch to different LRC */
359 		err = emit_nop_job(gt, nop_q);
360 		if (err) {
361 			xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
362 				  hwe->name, ERR_PTR(err), nop_q->guc->id);
363 			goto put_nop_q;
364 		}
365 
366 		/* Reload golden LRC to record the effect of any indirect W/A */
367 		err = emit_nop_job(gt, q);
368 		if (err) {
369 			xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
370 				  hwe->name, ERR_PTR(err), q->guc->id);
371 			goto put_nop_q;
372 		}
373 
374 		xe_map_memcpy_from(xe, default_lrc,
375 				   &q->lrc[0]->bo->vmap,
376 				   xe_lrc_pphwsp_offset(q->lrc[0]),
377 				   xe_gt_lrc_size(gt, hwe->class));
378 
379 		gt->default_lrc[hwe->class] = default_lrc;
380 put_nop_q:
381 		xe_exec_queue_put(nop_q);
382 put_exec_queue:
383 		xe_exec_queue_put(q);
384 		if (err)
385 			break;
386 	}
387 
388 	return err;
389 }
390 
xe_gt_init_early(struct xe_gt * gt)391 int xe_gt_init_early(struct xe_gt *gt)
392 {
393 	int err;
394 
395 	if (IS_SRIOV_PF(gt_to_xe(gt))) {
396 		err = xe_gt_sriov_pf_init_early(gt);
397 		if (err)
398 			return err;
399 	}
400 
401 	xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
402 
403 	err = xe_wa_init(gt);
404 	if (err)
405 		return err;
406 
407 	err = xe_tuning_init(gt);
408 	if (err)
409 		return err;
410 
411 	xe_wa_process_oob(gt);
412 
413 	xe_force_wake_init_gt(gt, gt_to_fw(gt));
414 	spin_lock_init(&gt->global_invl_lock);
415 
416 	err = xe_gt_tlb_invalidation_init_early(gt);
417 	if (err)
418 		return err;
419 
420 	return 0;
421 }
422 
dump_pat_on_error(struct xe_gt * gt)423 static void dump_pat_on_error(struct xe_gt *gt)
424 {
425 	struct drm_printer p;
426 	char prefix[32];
427 
428 	snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
429 	p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
430 
431 	xe_pat_dump(gt, &p);
432 }
433 
gt_fw_domain_init(struct xe_gt * gt)434 static int gt_fw_domain_init(struct xe_gt *gt)
435 {
436 	unsigned int fw_ref;
437 	int err;
438 
439 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
440 	if (!fw_ref)
441 		return -ETIMEDOUT;
442 
443 	if (!xe_gt_is_media_type(gt)) {
444 		err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
445 		if (err)
446 			goto err_force_wake;
447 		if (IS_SRIOV_PF(gt_to_xe(gt)))
448 			xe_lmtt_init(&gt_to_tile(gt)->sriov.pf.lmtt);
449 	}
450 
451 	/* Enable per hw engine IRQs */
452 	xe_irq_enable_hwe(gt);
453 
454 	/* Rerun MCR init as we now have hw engine list */
455 	xe_gt_mcr_init(gt);
456 
457 	err = xe_hw_engines_init_early(gt);
458 	if (err)
459 		goto err_force_wake;
460 
461 	err = xe_hw_engine_class_sysfs_init(gt);
462 	if (err)
463 		goto err_force_wake;
464 
465 	/* Initialize CCS mode sysfs after early initialization of HW engines */
466 	err = xe_gt_ccs_mode_sysfs_init(gt);
467 	if (err)
468 		goto err_force_wake;
469 
470 	/*
471 	 * Stash hardware-reported version.  Since this register does not exist
472 	 * on pre-MTL platforms, reading it there will (correctly) return 0.
473 	 */
474 	gt->info.gmdid = xe_mmio_read32(&gt->mmio, GMD_ID);
475 
476 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
477 	return 0;
478 
479 err_force_wake:
480 	dump_pat_on_error(gt);
481 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
482 
483 	return err;
484 }
485 
all_fw_domain_init(struct xe_gt * gt)486 static int all_fw_domain_init(struct xe_gt *gt)
487 {
488 	unsigned int fw_ref;
489 	int err;
490 
491 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
492 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
493 		err = -ETIMEDOUT;
494 		goto err_force_wake;
495 	}
496 
497 	xe_gt_mcr_set_implicit_defaults(gt);
498 	xe_wa_process_gt(gt);
499 	xe_tuning_process_gt(gt);
500 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
501 
502 	err = xe_gt_clock_init(gt);
503 	if (err)
504 		goto err_force_wake;
505 
506 	xe_mocs_init(gt);
507 	err = xe_execlist_init(gt);
508 	if (err)
509 		goto err_force_wake;
510 
511 	err = xe_hw_engines_init(gt);
512 	if (err)
513 		goto err_force_wake;
514 
515 	err = xe_uc_init_post_hwconfig(&gt->uc);
516 	if (err)
517 		goto err_force_wake;
518 
519 	if (!xe_gt_is_media_type(gt)) {
520 		/*
521 		 * USM has its only SA pool to non-block behind user operations
522 		 */
523 		if (gt_to_xe(gt)->info.has_usm) {
524 			struct xe_device *xe = gt_to_xe(gt);
525 
526 			gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
527 								IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
528 			if (IS_ERR(gt->usm.bb_pool)) {
529 				err = PTR_ERR(gt->usm.bb_pool);
530 				goto err_force_wake;
531 			}
532 		}
533 	}
534 
535 	if (!xe_gt_is_media_type(gt)) {
536 		struct xe_tile *tile = gt_to_tile(gt);
537 
538 		tile->migrate = xe_migrate_init(tile);
539 		if (IS_ERR(tile->migrate)) {
540 			err = PTR_ERR(tile->migrate);
541 			goto err_force_wake;
542 		}
543 	}
544 
545 	err = xe_uc_init_hw(&gt->uc);
546 	if (err)
547 		goto err_force_wake;
548 
549 	/* Configure default CCS mode of 1 engine with all resources */
550 	if (xe_gt_ccs_mode_enabled(gt)) {
551 		gt->ccs_mode = 1;
552 		xe_gt_apply_ccs_mode(gt);
553 	}
554 
555 	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
556 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
557 
558 	if (IS_SRIOV_PF(gt_to_xe(gt))) {
559 		xe_gt_sriov_pf_init(gt);
560 		xe_gt_sriov_pf_init_hw(gt);
561 	}
562 
563 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
564 
565 	return 0;
566 
567 err_force_wake:
568 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
569 
570 	return err;
571 }
572 
573 /*
574  * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
575  * enable CTB communication.
576  */
xe_gt_init_hwconfig(struct xe_gt * gt)577 int xe_gt_init_hwconfig(struct xe_gt *gt)
578 {
579 	unsigned int fw_ref;
580 	int err;
581 
582 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
583 	if (!fw_ref)
584 		return -ETIMEDOUT;
585 
586 	xe_gt_mcr_init_early(gt);
587 	xe_pat_init(gt);
588 
589 	err = xe_uc_init(&gt->uc);
590 	if (err)
591 		goto out_fw;
592 
593 	err = xe_uc_init_hwconfig(&gt->uc);
594 	if (err)
595 		goto out_fw;
596 
597 	xe_gt_topology_init(gt);
598 	xe_gt_mcr_init(gt);
599 	xe_gt_enable_host_l2_vram(gt);
600 
601 out_fw:
602 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
603 	return err;
604 }
605 
xe_gt_fini(void * arg)606 static void xe_gt_fini(void *arg)
607 {
608 	struct xe_gt *gt = arg;
609 	int i;
610 
611 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
612 		xe_hw_fence_irq_finish(&gt->fence_irq[i]);
613 
614 	xe_gt_disable_host_l2_vram(gt);
615 }
616 
xe_gt_init(struct xe_gt * gt)617 int xe_gt_init(struct xe_gt *gt)
618 {
619 	int err;
620 	int i;
621 
622 	INIT_WORK(&gt->reset.worker, gt_reset_worker);
623 
624 	for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
625 		gt->ring_ops[i] = xe_ring_ops_get(gt, i);
626 		xe_hw_fence_irq_init(&gt->fence_irq[i]);
627 	}
628 
629 	err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt);
630 	if (err)
631 		return err;
632 
633 	err = xe_gt_pagefault_init(gt);
634 	if (err)
635 		return err;
636 
637 	xe_mocs_init_early(gt);
638 
639 	err = xe_gt_sysfs_init(gt);
640 	if (err)
641 		return err;
642 
643 	err = gt_fw_domain_init(gt);
644 	if (err)
645 		return err;
646 
647 	err = xe_gt_idle_init(&gt->gtidle);
648 	if (err)
649 		return err;
650 
651 	err = xe_gt_freq_init(gt);
652 	if (err)
653 		return err;
654 
655 	xe_force_wake_init_engines(gt, gt_to_fw(gt));
656 
657 	err = all_fw_domain_init(gt);
658 	if (err)
659 		return err;
660 
661 	xe_gt_record_user_engines(gt);
662 
663 	err = xe_eu_stall_init(gt);
664 	if (err)
665 		return err;
666 
667 	return 0;
668 }
669 
670 /**
671  * xe_gt_mmio_init() - Initialize GT's MMIO access
672  * @gt: the GT object
673  *
674  * Initialize GT's MMIO accessor, which will be used to access registers inside
675  * this GT.
676  */
xe_gt_mmio_init(struct xe_gt * gt)677 void xe_gt_mmio_init(struct xe_gt *gt)
678 {
679 	struct xe_tile *tile = gt_to_tile(gt);
680 	struct xe_device *xe = tile_to_xe(tile);
681 
682 	xe_mmio_init(&gt->mmio, tile, tile->mmio.regs, tile->mmio.regs_size);
683 
684 	if (gt->info.type == XE_GT_TYPE_MEDIA) {
685 		gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
686 		gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
687 	} else {
688 		gt->mmio.adj_offset = 0;
689 		gt->mmio.adj_limit = 0;
690 	}
691 
692 	if (IS_SRIOV_VF(xe))
693 		gt->mmio.sriov_vf_gt = gt;
694 }
695 
xe_gt_record_user_engines(struct xe_gt * gt)696 void xe_gt_record_user_engines(struct xe_gt *gt)
697 {
698 	struct xe_hw_engine *hwe;
699 	enum xe_hw_engine_id id;
700 
701 	gt->user_engines.mask = 0;
702 	memset(gt->user_engines.instances_per_class, 0,
703 	       sizeof(gt->user_engines.instances_per_class));
704 
705 	for_each_hw_engine(hwe, gt, id) {
706 		if (xe_hw_engine_is_reserved(hwe))
707 			continue;
708 
709 		gt->user_engines.mask |= BIT_ULL(id);
710 		gt->user_engines.instances_per_class[hwe->class]++;
711 	}
712 
713 	xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
714 		     == gt->info.engine_mask);
715 }
716 
do_gt_reset(struct xe_gt * gt)717 static int do_gt_reset(struct xe_gt *gt)
718 {
719 	int err;
720 
721 	if (IS_SRIOV_VF(gt_to_xe(gt)))
722 		return xe_gt_sriov_vf_reset(gt);
723 
724 	xe_gsc_wa_14015076503(gt, true);
725 
726 	xe_mmio_write32(&gt->mmio, GDRST, GRDOM_FULL);
727 	err = xe_mmio_wait32(&gt->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
728 	if (err)
729 		xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
730 			  ERR_PTR(err));
731 
732 	xe_gsc_wa_14015076503(gt, false);
733 
734 	return err;
735 }
736 
vf_gt_restart(struct xe_gt * gt)737 static int vf_gt_restart(struct xe_gt *gt)
738 {
739 	int err;
740 
741 	err = xe_uc_sanitize_reset(&gt->uc);
742 	if (err)
743 		return err;
744 
745 	err = xe_uc_init_hw(&gt->uc);
746 	if (err)
747 		return err;
748 
749 	err = xe_uc_start(&gt->uc);
750 	if (err)
751 		return err;
752 
753 	return 0;
754 }
755 
do_gt_restart(struct xe_gt * gt)756 static int do_gt_restart(struct xe_gt *gt)
757 {
758 	struct xe_hw_engine *hwe;
759 	enum xe_hw_engine_id id;
760 	int err;
761 
762 	if (IS_SRIOV_VF(gt_to_xe(gt)))
763 		return vf_gt_restart(gt);
764 
765 	xe_pat_init(gt);
766 
767 	xe_gt_enable_host_l2_vram(gt);
768 
769 	xe_gt_mcr_set_implicit_defaults(gt);
770 	xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
771 
772 	err = xe_wopcm_init(&gt->uc.wopcm);
773 	if (err)
774 		return err;
775 
776 	for_each_hw_engine(hwe, gt, id)
777 		xe_hw_engine_enable_ring(hwe);
778 
779 	err = xe_uc_sanitize_reset(&gt->uc);
780 	if (err)
781 		return err;
782 
783 	err = xe_uc_init_hw(&gt->uc);
784 	if (err)
785 		return err;
786 
787 	if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
788 		xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
789 
790 	if (IS_SRIOV_PF(gt_to_xe(gt)))
791 		xe_gt_sriov_pf_init_hw(gt);
792 
793 	xe_mocs_init(gt);
794 	err = xe_uc_start(&gt->uc);
795 	if (err)
796 		return err;
797 
798 	for_each_hw_engine(hwe, gt, id)
799 		xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
800 
801 	/* Get CCS mode in sync between sw/hw */
802 	xe_gt_apply_ccs_mode(gt);
803 
804 	/* Restore GT freq to expected values */
805 	xe_gt_sanitize_freq(gt);
806 
807 	if (IS_SRIOV_PF(gt_to_xe(gt)))
808 		xe_gt_sriov_pf_restart(gt);
809 
810 	return 0;
811 }
812 
gt_reset(struct xe_gt * gt)813 static int gt_reset(struct xe_gt *gt)
814 {
815 	unsigned int fw_ref;
816 	int err;
817 
818 	if (xe_device_wedged(gt_to_xe(gt)))
819 		return -ECANCELED;
820 
821 	/* We only support GT resets with GuC submission */
822 	if (!xe_device_uc_enabled(gt_to_xe(gt)))
823 		return -ENODEV;
824 
825 	xe_gt_info(gt, "reset started\n");
826 
827 	xe_pm_runtime_get(gt_to_xe(gt));
828 
829 	if (xe_fault_inject_gt_reset()) {
830 		err = -ECANCELED;
831 		goto err_fail;
832 	}
833 
834 	xe_gt_sanitize(gt);
835 
836 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
837 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
838 		err = -ETIMEDOUT;
839 		goto err_out;
840 	}
841 
842 	xe_uc_gucrc_disable(&gt->uc);
843 	xe_uc_stop_prepare(&gt->uc);
844 	xe_gt_pagefault_reset(gt);
845 
846 	xe_uc_stop(&gt->uc);
847 
848 	xe_gt_tlb_invalidation_reset(gt);
849 
850 	err = do_gt_reset(gt);
851 	if (err)
852 		goto err_out;
853 
854 	err = do_gt_restart(gt);
855 	if (err)
856 		goto err_out;
857 
858 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
859 	xe_pm_runtime_put(gt_to_xe(gt));
860 
861 	xe_gt_info(gt, "reset done\n");
862 
863 	return 0;
864 
865 err_out:
866 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
867 	XE_WARN_ON(xe_uc_start(&gt->uc));
868 err_fail:
869 	xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
870 
871 	xe_device_declare_wedged(gt_to_xe(gt));
872 	xe_pm_runtime_put(gt_to_xe(gt));
873 
874 	return err;
875 }
876 
gt_reset_worker(struct work_struct * w)877 static void gt_reset_worker(struct work_struct *w)
878 {
879 	struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
880 
881 	gt_reset(gt);
882 }
883 
xe_gt_reset_async(struct xe_gt * gt)884 void xe_gt_reset_async(struct xe_gt *gt)
885 {
886 	xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
887 
888 	/* Don't do a reset while one is already in flight */
889 	if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(&gt->uc))
890 		return;
891 
892 	xe_gt_info(gt, "reset queued\n");
893 	queue_work(gt->ordered_wq, &gt->reset.worker);
894 }
895 
xe_gt_suspend_prepare(struct xe_gt * gt)896 void xe_gt_suspend_prepare(struct xe_gt *gt)
897 {
898 	unsigned int fw_ref;
899 
900 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
901 
902 	xe_uc_suspend_prepare(&gt->uc);
903 
904 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
905 }
906 
xe_gt_suspend(struct xe_gt * gt)907 int xe_gt_suspend(struct xe_gt *gt)
908 {
909 	unsigned int fw_ref;
910 	int err;
911 
912 	xe_gt_dbg(gt, "suspending\n");
913 	xe_gt_sanitize(gt);
914 
915 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
916 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
917 		goto err_msg;
918 
919 	err = xe_uc_suspend(&gt->uc);
920 	if (err)
921 		goto err_force_wake;
922 
923 	xe_gt_idle_disable_pg(gt);
924 
925 	xe_gt_disable_host_l2_vram(gt);
926 
927 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
928 	xe_gt_dbg(gt, "suspended\n");
929 
930 	return 0;
931 
932 err_msg:
933 	err = -ETIMEDOUT;
934 err_force_wake:
935 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
936 	xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
937 
938 	return err;
939 }
940 
xe_gt_shutdown(struct xe_gt * gt)941 void xe_gt_shutdown(struct xe_gt *gt)
942 {
943 	unsigned int fw_ref;
944 
945 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
946 	do_gt_reset(gt);
947 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
948 }
949 
950 /**
951  * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
952  * @gt: the GT object
953  *
954  * Called after driver init/GSC load completes to restore GT frequencies if we
955  * limited them for any WAs.
956  */
xe_gt_sanitize_freq(struct xe_gt * gt)957 int xe_gt_sanitize_freq(struct xe_gt *gt)
958 {
959 	int ret = 0;
960 
961 	if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
962 	     xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
963 	     xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
964 	    XE_WA(gt, 22019338487))
965 		ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
966 
967 	return ret;
968 }
969 
xe_gt_resume(struct xe_gt * gt)970 int xe_gt_resume(struct xe_gt *gt)
971 {
972 	unsigned int fw_ref;
973 	int err;
974 
975 	xe_gt_dbg(gt, "resuming\n");
976 	fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
977 	if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
978 		goto err_msg;
979 
980 	err = do_gt_restart(gt);
981 	if (err)
982 		goto err_force_wake;
983 
984 	xe_gt_idle_enable_pg(gt);
985 
986 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
987 	xe_gt_dbg(gt, "resumed\n");
988 
989 	return 0;
990 
991 err_msg:
992 	err = -ETIMEDOUT;
993 err_force_wake:
994 	xe_force_wake_put(gt_to_fw(gt), fw_ref);
995 	xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
996 
997 	return err;
998 }
999 
xe_gt_hw_engine(struct xe_gt * gt,enum xe_engine_class class,u16 instance,bool logical)1000 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
1001 				     enum xe_engine_class class,
1002 				     u16 instance, bool logical)
1003 {
1004 	struct xe_hw_engine *hwe;
1005 	enum xe_hw_engine_id id;
1006 
1007 	for_each_hw_engine(hwe, gt, id)
1008 		if (hwe->class == class &&
1009 		    ((!logical && hwe->instance == instance) ||
1010 		    (logical && hwe->logical_instance == instance)))
1011 			return hwe;
1012 
1013 	return NULL;
1014 }
1015 
xe_gt_any_hw_engine_by_reset_domain(struct xe_gt * gt,enum xe_engine_class class)1016 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
1017 							 enum xe_engine_class class)
1018 {
1019 	struct xe_hw_engine *hwe;
1020 	enum xe_hw_engine_id id;
1021 
1022 	for_each_hw_engine(hwe, gt, id) {
1023 		switch (class) {
1024 		case XE_ENGINE_CLASS_RENDER:
1025 		case XE_ENGINE_CLASS_COMPUTE:
1026 			if (hwe->class == XE_ENGINE_CLASS_RENDER ||
1027 			    hwe->class == XE_ENGINE_CLASS_COMPUTE)
1028 				return hwe;
1029 			break;
1030 		default:
1031 			if (hwe->class == class)
1032 				return hwe;
1033 		}
1034 	}
1035 
1036 	return NULL;
1037 }
1038 
xe_gt_any_hw_engine(struct xe_gt * gt)1039 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
1040 {
1041 	struct xe_hw_engine *hwe;
1042 	enum xe_hw_engine_id id;
1043 
1044 	for_each_hw_engine(hwe, gt, id)
1045 		return hwe;
1046 
1047 	return NULL;
1048 }
1049 
1050 /**
1051  * xe_gt_declare_wedged() - Declare GT wedged
1052  * @gt: the GT object
1053  *
1054  * Wedge the GT which stops all submission, saves desired debug state, and
1055  * cleans up anything which could timeout.
1056  */
xe_gt_declare_wedged(struct xe_gt * gt)1057 void xe_gt_declare_wedged(struct xe_gt *gt)
1058 {
1059 	xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
1060 
1061 	xe_uc_declare_wedged(&gt->uc);
1062 	xe_gt_tlb_invalidation_reset(gt);
1063 }
1064