1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_gt.h"
7
8 #include <linux/minmax.h>
9
10 #include <drm/drm_managed.h>
11 #include <uapi/drm/xe_drm.h>
12
13 #include <generated/xe_wa_oob.h>
14
15 #include "instructions/xe_gfxpipe_commands.h"
16 #include "instructions/xe_mi_commands.h"
17 #include "regs/xe_gt_regs.h"
18 #include "xe_assert.h"
19 #include "xe_bb.h"
20 #include "xe_bo.h"
21 #include "xe_device.h"
22 #include "xe_eu_stall.h"
23 #include "xe_exec_queue.h"
24 #include "xe_execlist.h"
25 #include "xe_force_wake.h"
26 #include "xe_ggtt.h"
27 #include "xe_gsc.h"
28 #include "xe_gt_ccs_mode.h"
29 #include "xe_gt_clock.h"
30 #include "xe_gt_freq.h"
31 #include "xe_gt_idle.h"
32 #include "xe_gt_mcr.h"
33 #include "xe_gt_pagefault.h"
34 #include "xe_gt_printk.h"
35 #include "xe_gt_sriov_pf.h"
36 #include "xe_gt_sriov_vf.h"
37 #include "xe_gt_sysfs.h"
38 #include "xe_gt_tlb_invalidation.h"
39 #include "xe_gt_topology.h"
40 #include "xe_guc_exec_queue_types.h"
41 #include "xe_guc_pc.h"
42 #include "xe_hw_fence.h"
43 #include "xe_hw_engine_class_sysfs.h"
44 #include "xe_irq.h"
45 #include "xe_lmtt.h"
46 #include "xe_lrc.h"
47 #include "xe_map.h"
48 #include "xe_migrate.h"
49 #include "xe_mmio.h"
50 #include "xe_pat.h"
51 #include "xe_pm.h"
52 #include "xe_mocs.h"
53 #include "xe_reg_sr.h"
54 #include "xe_ring_ops.h"
55 #include "xe_sa.h"
56 #include "xe_sched_job.h"
57 #include "xe_sriov.h"
58 #include "xe_tuning.h"
59 #include "xe_uc.h"
60 #include "xe_uc_fw.h"
61 #include "xe_vm.h"
62 #include "xe_wa.h"
63 #include "xe_wopcm.h"
64
gt_fini(struct drm_device * drm,void * arg)65 static void gt_fini(struct drm_device *drm, void *arg)
66 {
67 struct xe_gt *gt = arg;
68
69 destroy_workqueue(gt->ordered_wq);
70 }
71
xe_gt_alloc(struct xe_tile * tile)72 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
73 {
74 struct xe_gt *gt;
75 int err;
76
77 gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
78 if (!gt)
79 return ERR_PTR(-ENOMEM);
80
81 gt->tile = tile;
82 gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq",
83 WQ_MEM_RECLAIM);
84
85 err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt);
86 if (err)
87 return ERR_PTR(err);
88
89 return gt;
90 }
91
xe_gt_sanitize(struct xe_gt * gt)92 void xe_gt_sanitize(struct xe_gt *gt)
93 {
94 /*
95 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
96 * reload
97 */
98 gt->uc.guc.submission_state.enabled = false;
99 }
100
xe_gt_enable_host_l2_vram(struct xe_gt * gt)101 static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
102 {
103 unsigned int fw_ref;
104 u32 reg;
105
106 if (!XE_WA(gt, 16023588340))
107 return;
108
109 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
110 if (!fw_ref)
111 return;
112
113 if (!xe_gt_is_media_type(gt)) {
114 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
115 reg |= CG_DIS_CNTLBUS;
116 xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
117 }
118
119 xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
120 xe_force_wake_put(gt_to_fw(gt), fw_ref);
121 }
122
xe_gt_disable_host_l2_vram(struct xe_gt * gt)123 static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
124 {
125 unsigned int fw_ref;
126 u32 reg;
127
128 if (!XE_WA(gt, 16023588340))
129 return;
130
131 if (xe_gt_is_media_type(gt))
132 return;
133
134 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
135 if (!fw_ref)
136 return;
137
138 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
139 reg &= ~CG_DIS_CNTLBUS;
140 xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
141
142 xe_force_wake_put(gt_to_fw(gt), fw_ref);
143 }
144
145 static void gt_reset_worker(struct work_struct *w);
146
emit_nop_job(struct xe_gt * gt,struct xe_exec_queue * q)147 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
148 {
149 struct xe_sched_job *job;
150 struct xe_bb *bb;
151 struct dma_fence *fence;
152 long timeout;
153
154 bb = xe_bb_new(gt, 4, false);
155 if (IS_ERR(bb))
156 return PTR_ERR(bb);
157
158 job = xe_bb_create_job(q, bb);
159 if (IS_ERR(job)) {
160 xe_bb_free(bb, NULL);
161 return PTR_ERR(job);
162 }
163
164 xe_sched_job_arm(job);
165 fence = dma_fence_get(&job->drm.s_fence->finished);
166 xe_sched_job_push(job);
167
168 timeout = dma_fence_wait_timeout(fence, false, HZ);
169 dma_fence_put(fence);
170 xe_bb_free(bb, NULL);
171 if (timeout < 0)
172 return timeout;
173 else if (!timeout)
174 return -ETIME;
175
176 return 0;
177 }
178
179 /*
180 * Convert back from encoded value to type-safe, only to be used when reg.mcr
181 * is true
182 */
to_xe_reg_mcr(const struct xe_reg reg)183 static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg)
184 {
185 return (const struct xe_reg_mcr){.__reg.raw = reg.raw };
186 }
187
emit_wa_job(struct xe_gt * gt,struct xe_exec_queue * q)188 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
189 {
190 struct xe_reg_sr *sr = &q->hwe->reg_lrc;
191 struct xe_reg_sr_entry *entry;
192 unsigned long idx;
193 struct xe_sched_job *job;
194 struct xe_bb *bb;
195 struct dma_fence *fence;
196 long timeout;
197 int count = 0;
198
199 if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
200 /* Big enough to emit all of the context's 3DSTATE */
201 bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false);
202 else
203 /* Just pick a large BB size */
204 bb = xe_bb_new(gt, SZ_4K, false);
205
206 if (IS_ERR(bb))
207 return PTR_ERR(bb);
208
209 xa_for_each(&sr->xa, idx, entry)
210 ++count;
211
212 if (count) {
213 xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name);
214
215 bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
216
217 xa_for_each(&sr->xa, idx, entry) {
218 struct xe_reg reg = entry->reg;
219 struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg);
220 u32 val;
221
222 /*
223 * Skip reading the register if it's not really needed
224 */
225 if (reg.masked)
226 val = entry->clr_bits << 16;
227 else if (entry->clr_bits + 1)
228 val = (reg.mcr ?
229 xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
230 xe_mmio_read32(>->mmio, reg)) & (~entry->clr_bits);
231 else
232 val = 0;
233
234 val |= entry->set_bits;
235
236 bb->cs[bb->len++] = reg.addr;
237 bb->cs[bb->len++] = val;
238 xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
239 }
240 }
241
242 xe_lrc_emit_hwe_state_instructions(q, bb);
243
244 job = xe_bb_create_job(q, bb);
245 if (IS_ERR(job)) {
246 xe_bb_free(bb, NULL);
247 return PTR_ERR(job);
248 }
249
250 xe_sched_job_arm(job);
251 fence = dma_fence_get(&job->drm.s_fence->finished);
252 xe_sched_job_push(job);
253
254 timeout = dma_fence_wait_timeout(fence, false, HZ);
255 dma_fence_put(fence);
256 xe_bb_free(bb, NULL);
257 if (timeout < 0)
258 return timeout;
259 else if (!timeout)
260 return -ETIME;
261
262 return 0;
263 }
264
xe_gt_record_default_lrcs(struct xe_gt * gt)265 int xe_gt_record_default_lrcs(struct xe_gt *gt)
266 {
267 struct xe_device *xe = gt_to_xe(gt);
268 struct xe_hw_engine *hwe;
269 enum xe_hw_engine_id id;
270 int err = 0;
271
272 for_each_hw_engine(hwe, gt, id) {
273 struct xe_exec_queue *q, *nop_q;
274 void *default_lrc;
275
276 if (gt->default_lrc[hwe->class])
277 continue;
278
279 xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
280 xe_wa_process_lrc(hwe);
281 xe_hw_engine_setup_default_lrc_state(hwe);
282 xe_tuning_process_lrc(hwe);
283
284 default_lrc = drmm_kzalloc(&xe->drm,
285 xe_gt_lrc_size(gt, hwe->class),
286 GFP_KERNEL);
287 if (!default_lrc)
288 return -ENOMEM;
289
290 q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
291 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
292 if (IS_ERR(q)) {
293 err = PTR_ERR(q);
294 xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
295 hwe->name, q);
296 return err;
297 }
298
299 /* Prime golden LRC with known good state */
300 err = emit_wa_job(gt, q);
301 if (err) {
302 xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
303 hwe->name, ERR_PTR(err), q->guc->id);
304 goto put_exec_queue;
305 }
306
307 nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
308 1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
309 if (IS_ERR(nop_q)) {
310 err = PTR_ERR(nop_q);
311 xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
312 hwe->name, nop_q);
313 goto put_exec_queue;
314 }
315
316 /* Switch to different LRC */
317 err = emit_nop_job(gt, nop_q);
318 if (err) {
319 xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
320 hwe->name, ERR_PTR(err), nop_q->guc->id);
321 goto put_nop_q;
322 }
323
324 /* Reload golden LRC to record the effect of any indirect W/A */
325 err = emit_nop_job(gt, q);
326 if (err) {
327 xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n",
328 hwe->name, ERR_PTR(err), q->guc->id);
329 goto put_nop_q;
330 }
331
332 xe_map_memcpy_from(xe, default_lrc,
333 &q->lrc[0]->bo->vmap,
334 xe_lrc_pphwsp_offset(q->lrc[0]),
335 xe_gt_lrc_size(gt, hwe->class));
336
337 gt->default_lrc[hwe->class] = default_lrc;
338 put_nop_q:
339 xe_exec_queue_put(nop_q);
340 put_exec_queue:
341 xe_exec_queue_put(q);
342 if (err)
343 break;
344 }
345
346 return err;
347 }
348
xe_gt_init_early(struct xe_gt * gt)349 int xe_gt_init_early(struct xe_gt *gt)
350 {
351 int err;
352
353 if (IS_SRIOV_PF(gt_to_xe(gt))) {
354 err = xe_gt_sriov_pf_init_early(gt);
355 if (err)
356 return err;
357 }
358
359 xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt));
360
361 err = xe_wa_init(gt);
362 if (err)
363 return err;
364
365 err = xe_tuning_init(gt);
366 if (err)
367 return err;
368
369 xe_wa_process_oob(gt);
370
371 xe_force_wake_init_gt(gt, gt_to_fw(gt));
372 spin_lock_init(>->global_invl_lock);
373
374 err = xe_gt_tlb_invalidation_init_early(gt);
375 if (err)
376 return err;
377
378 return 0;
379 }
380
dump_pat_on_error(struct xe_gt * gt)381 static void dump_pat_on_error(struct xe_gt *gt)
382 {
383 struct drm_printer p;
384 char prefix[32];
385
386 snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
387 p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
388
389 xe_pat_dump(gt, &p);
390 }
391
gt_fw_domain_init(struct xe_gt * gt)392 static int gt_fw_domain_init(struct xe_gt *gt)
393 {
394 unsigned int fw_ref;
395 int err;
396
397 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
398 if (!fw_ref)
399 return -ETIMEDOUT;
400
401 if (!xe_gt_is_media_type(gt)) {
402 err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
403 if (err)
404 goto err_force_wake;
405 if (IS_SRIOV_PF(gt_to_xe(gt)))
406 xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt);
407 }
408
409 /* Enable per hw engine IRQs */
410 xe_irq_enable_hwe(gt);
411
412 /* Rerun MCR init as we now have hw engine list */
413 xe_gt_mcr_init(gt);
414
415 err = xe_hw_engines_init_early(gt);
416 if (err)
417 goto err_force_wake;
418
419 err = xe_hw_engine_class_sysfs_init(gt);
420 if (err)
421 goto err_force_wake;
422
423 /* Initialize CCS mode sysfs after early initialization of HW engines */
424 err = xe_gt_ccs_mode_sysfs_init(gt);
425 if (err)
426 goto err_force_wake;
427
428 /*
429 * Stash hardware-reported version. Since this register does not exist
430 * on pre-MTL platforms, reading it there will (correctly) return 0.
431 */
432 gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID);
433
434 xe_force_wake_put(gt_to_fw(gt), fw_ref);
435 return 0;
436
437 err_force_wake:
438 dump_pat_on_error(gt);
439 xe_force_wake_put(gt_to_fw(gt), fw_ref);
440
441 return err;
442 }
443
all_fw_domain_init(struct xe_gt * gt)444 static int all_fw_domain_init(struct xe_gt *gt)
445 {
446 unsigned int fw_ref;
447 int err;
448
449 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
450 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
451 err = -ETIMEDOUT;
452 goto err_force_wake;
453 }
454
455 xe_gt_mcr_set_implicit_defaults(gt);
456 xe_wa_process_gt(gt);
457 xe_tuning_process_gt(gt);
458 xe_reg_sr_apply_mmio(>->reg_sr, gt);
459
460 err = xe_gt_clock_init(gt);
461 if (err)
462 goto err_force_wake;
463
464 xe_mocs_init(gt);
465 err = xe_execlist_init(gt);
466 if (err)
467 goto err_force_wake;
468
469 err = xe_hw_engines_init(gt);
470 if (err)
471 goto err_force_wake;
472
473 err = xe_uc_init_post_hwconfig(>->uc);
474 if (err)
475 goto err_force_wake;
476
477 if (!xe_gt_is_media_type(gt)) {
478 /*
479 * USM has its only SA pool to non-block behind user operations
480 */
481 if (gt_to_xe(gt)->info.has_usm) {
482 struct xe_device *xe = gt_to_xe(gt);
483
484 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
485 IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
486 if (IS_ERR(gt->usm.bb_pool)) {
487 err = PTR_ERR(gt->usm.bb_pool);
488 goto err_force_wake;
489 }
490 }
491 }
492
493 if (!xe_gt_is_media_type(gt)) {
494 struct xe_tile *tile = gt_to_tile(gt);
495
496 tile->migrate = xe_migrate_init(tile);
497 if (IS_ERR(tile->migrate)) {
498 err = PTR_ERR(tile->migrate);
499 goto err_force_wake;
500 }
501 }
502
503 err = xe_uc_init_hw(>->uc);
504 if (err)
505 goto err_force_wake;
506
507 /* Configure default CCS mode of 1 engine with all resources */
508 if (xe_gt_ccs_mode_enabled(gt)) {
509 gt->ccs_mode = 1;
510 xe_gt_apply_ccs_mode(gt);
511 }
512
513 if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
514 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
515
516 if (IS_SRIOV_PF(gt_to_xe(gt))) {
517 xe_gt_sriov_pf_init(gt);
518 xe_gt_sriov_pf_init_hw(gt);
519 }
520
521 xe_force_wake_put(gt_to_fw(gt), fw_ref);
522
523 return 0;
524
525 err_force_wake:
526 xe_force_wake_put(gt_to_fw(gt), fw_ref);
527
528 return err;
529 }
530
531 /*
532 * Initialize enough GT to be able to load GuC in order to obtain hwconfig and
533 * enable CTB communication.
534 */
xe_gt_init_hwconfig(struct xe_gt * gt)535 int xe_gt_init_hwconfig(struct xe_gt *gt)
536 {
537 unsigned int fw_ref;
538 int err;
539
540 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
541 if (!fw_ref)
542 return -ETIMEDOUT;
543
544 xe_gt_mcr_init_early(gt);
545 xe_pat_init(gt);
546
547 err = xe_uc_init(>->uc);
548 if (err)
549 goto out_fw;
550
551 err = xe_uc_init_hwconfig(>->uc);
552 if (err)
553 goto out_fw;
554
555 xe_gt_topology_init(gt);
556 xe_gt_mcr_init(gt);
557 xe_gt_enable_host_l2_vram(gt);
558
559 out_fw:
560 xe_force_wake_put(gt_to_fw(gt), fw_ref);
561 return err;
562 }
563
xe_gt_fini(void * arg)564 static void xe_gt_fini(void *arg)
565 {
566 struct xe_gt *gt = arg;
567 int i;
568
569 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
570 xe_hw_fence_irq_finish(>->fence_irq[i]);
571
572 xe_gt_disable_host_l2_vram(gt);
573 }
574
xe_gt_init(struct xe_gt * gt)575 int xe_gt_init(struct xe_gt *gt)
576 {
577 int err;
578 int i;
579
580 INIT_WORK(>->reset.worker, gt_reset_worker);
581
582 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
583 gt->ring_ops[i] = xe_ring_ops_get(gt, i);
584 xe_hw_fence_irq_init(>->fence_irq[i]);
585 }
586
587 err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt);
588 if (err)
589 return err;
590
591 err = xe_gt_pagefault_init(gt);
592 if (err)
593 return err;
594
595 xe_mocs_init_early(gt);
596
597 err = xe_gt_sysfs_init(gt);
598 if (err)
599 return err;
600
601 err = gt_fw_domain_init(gt);
602 if (err)
603 return err;
604
605 err = xe_gt_idle_init(>->gtidle);
606 if (err)
607 return err;
608
609 err = xe_gt_freq_init(gt);
610 if (err)
611 return err;
612
613 xe_force_wake_init_engines(gt, gt_to_fw(gt));
614
615 err = all_fw_domain_init(gt);
616 if (err)
617 return err;
618
619 xe_gt_record_user_engines(gt);
620
621 err = xe_eu_stall_init(gt);
622 if (err)
623 return err;
624
625 return 0;
626 }
627
628 /**
629 * xe_gt_mmio_init() - Initialize GT's MMIO access
630 * @gt: the GT object
631 *
632 * Initialize GT's MMIO accessor, which will be used to access registers inside
633 * this GT.
634 */
xe_gt_mmio_init(struct xe_gt * gt)635 void xe_gt_mmio_init(struct xe_gt *gt)
636 {
637 struct xe_tile *tile = gt_to_tile(gt);
638 struct xe_device *xe = tile_to_xe(tile);
639
640 xe_mmio_init(>->mmio, tile, tile->mmio.regs, tile->mmio.regs_size);
641
642 if (gt->info.type == XE_GT_TYPE_MEDIA) {
643 gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
644 gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
645 } else {
646 gt->mmio.adj_offset = 0;
647 gt->mmio.adj_limit = 0;
648 }
649
650 if (IS_SRIOV_VF(xe))
651 gt->mmio.sriov_vf_gt = gt;
652 }
653
xe_gt_record_user_engines(struct xe_gt * gt)654 void xe_gt_record_user_engines(struct xe_gt *gt)
655 {
656 struct xe_hw_engine *hwe;
657 enum xe_hw_engine_id id;
658
659 gt->user_engines.mask = 0;
660 memset(gt->user_engines.instances_per_class, 0,
661 sizeof(gt->user_engines.instances_per_class));
662
663 for_each_hw_engine(hwe, gt, id) {
664 if (xe_hw_engine_is_reserved(hwe))
665 continue;
666
667 gt->user_engines.mask |= BIT_ULL(id);
668 gt->user_engines.instances_per_class[hwe->class]++;
669 }
670
671 xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
672 == gt->info.engine_mask);
673 }
674
do_gt_reset(struct xe_gt * gt)675 static int do_gt_reset(struct xe_gt *gt)
676 {
677 int err;
678
679 if (IS_SRIOV_VF(gt_to_xe(gt)))
680 return xe_gt_sriov_vf_reset(gt);
681
682 xe_gsc_wa_14015076503(gt, true);
683
684 xe_mmio_write32(>->mmio, GDRST, GRDOM_FULL);
685 err = xe_mmio_wait32(>->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
686 if (err)
687 xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
688 ERR_PTR(err));
689
690 xe_gsc_wa_14015076503(gt, false);
691
692 return err;
693 }
694
vf_gt_restart(struct xe_gt * gt)695 static int vf_gt_restart(struct xe_gt *gt)
696 {
697 int err;
698
699 err = xe_uc_sanitize_reset(>->uc);
700 if (err)
701 return err;
702
703 err = xe_uc_init_hw(>->uc);
704 if (err)
705 return err;
706
707 err = xe_uc_start(>->uc);
708 if (err)
709 return err;
710
711 return 0;
712 }
713
do_gt_restart(struct xe_gt * gt)714 static int do_gt_restart(struct xe_gt *gt)
715 {
716 struct xe_hw_engine *hwe;
717 enum xe_hw_engine_id id;
718 int err;
719
720 if (IS_SRIOV_VF(gt_to_xe(gt)))
721 return vf_gt_restart(gt);
722
723 xe_pat_init(gt);
724
725 xe_gt_enable_host_l2_vram(gt);
726
727 xe_gt_mcr_set_implicit_defaults(gt);
728 xe_reg_sr_apply_mmio(>->reg_sr, gt);
729
730 err = xe_wopcm_init(>->uc.wopcm);
731 if (err)
732 return err;
733
734 for_each_hw_engine(hwe, gt, id)
735 xe_hw_engine_enable_ring(hwe);
736
737 err = xe_uc_sanitize_reset(>->uc);
738 if (err)
739 return err;
740
741 err = xe_uc_init_hw(>->uc);
742 if (err)
743 return err;
744
745 if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
746 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
747
748 if (IS_SRIOV_PF(gt_to_xe(gt)))
749 xe_gt_sriov_pf_init_hw(gt);
750
751 xe_mocs_init(gt);
752 err = xe_uc_start(>->uc);
753 if (err)
754 return err;
755
756 for_each_hw_engine(hwe, gt, id)
757 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
758
759 /* Get CCS mode in sync between sw/hw */
760 xe_gt_apply_ccs_mode(gt);
761
762 /* Restore GT freq to expected values */
763 xe_gt_sanitize_freq(gt);
764
765 if (IS_SRIOV_PF(gt_to_xe(gt)))
766 xe_gt_sriov_pf_restart(gt);
767
768 return 0;
769 }
770
gt_reset(struct xe_gt * gt)771 static int gt_reset(struct xe_gt *gt)
772 {
773 unsigned int fw_ref;
774 int err;
775
776 if (xe_device_wedged(gt_to_xe(gt)))
777 return -ECANCELED;
778
779 /* We only support GT resets with GuC submission */
780 if (!xe_device_uc_enabled(gt_to_xe(gt)))
781 return -ENODEV;
782
783 xe_gt_info(gt, "reset started\n");
784
785 xe_pm_runtime_get(gt_to_xe(gt));
786
787 if (xe_fault_inject_gt_reset()) {
788 err = -ECANCELED;
789 goto err_fail;
790 }
791
792 xe_gt_sanitize(gt);
793
794 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
795 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
796 err = -ETIMEDOUT;
797 goto err_out;
798 }
799
800 xe_uc_gucrc_disable(>->uc);
801 xe_uc_stop_prepare(>->uc);
802 xe_gt_pagefault_reset(gt);
803
804 xe_uc_stop(>->uc);
805
806 xe_gt_tlb_invalidation_reset(gt);
807
808 err = do_gt_reset(gt);
809 if (err)
810 goto err_out;
811
812 err = do_gt_restart(gt);
813 if (err)
814 goto err_out;
815
816 xe_force_wake_put(gt_to_fw(gt), fw_ref);
817 xe_pm_runtime_put(gt_to_xe(gt));
818
819 xe_gt_info(gt, "reset done\n");
820
821 return 0;
822
823 err_out:
824 xe_force_wake_put(gt_to_fw(gt), fw_ref);
825 XE_WARN_ON(xe_uc_start(>->uc));
826 err_fail:
827 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
828
829 xe_device_declare_wedged(gt_to_xe(gt));
830 xe_pm_runtime_put(gt_to_xe(gt));
831
832 return err;
833 }
834
gt_reset_worker(struct work_struct * w)835 static void gt_reset_worker(struct work_struct *w)
836 {
837 struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
838
839 gt_reset(gt);
840 }
841
xe_gt_reset_async(struct xe_gt * gt)842 void xe_gt_reset_async(struct xe_gt *gt)
843 {
844 xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
845
846 /* Don't do a reset while one is already in flight */
847 if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc))
848 return;
849
850 xe_gt_info(gt, "reset queued\n");
851 queue_work(gt->ordered_wq, >->reset.worker);
852 }
853
xe_gt_suspend_prepare(struct xe_gt * gt)854 void xe_gt_suspend_prepare(struct xe_gt *gt)
855 {
856 unsigned int fw_ref;
857
858 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
859
860 xe_uc_stop_prepare(>->uc);
861
862 xe_force_wake_put(gt_to_fw(gt), fw_ref);
863 }
864
xe_gt_suspend(struct xe_gt * gt)865 int xe_gt_suspend(struct xe_gt *gt)
866 {
867 unsigned int fw_ref;
868 int err;
869
870 xe_gt_dbg(gt, "suspending\n");
871 xe_gt_sanitize(gt);
872
873 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
874 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
875 goto err_msg;
876
877 err = xe_uc_suspend(>->uc);
878 if (err)
879 goto err_force_wake;
880
881 xe_gt_idle_disable_pg(gt);
882
883 xe_gt_disable_host_l2_vram(gt);
884
885 xe_force_wake_put(gt_to_fw(gt), fw_ref);
886 xe_gt_dbg(gt, "suspended\n");
887
888 return 0;
889
890 err_msg:
891 err = -ETIMEDOUT;
892 err_force_wake:
893 xe_force_wake_put(gt_to_fw(gt), fw_ref);
894 xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
895
896 return err;
897 }
898
xe_gt_shutdown(struct xe_gt * gt)899 void xe_gt_shutdown(struct xe_gt *gt)
900 {
901 unsigned int fw_ref;
902
903 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
904 do_gt_reset(gt);
905 xe_force_wake_put(gt_to_fw(gt), fw_ref);
906 }
907
908 /**
909 * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
910 * @gt: the GT object
911 *
912 * Called after driver init/GSC load completes to restore GT frequencies if we
913 * limited them for any WAs.
914 */
xe_gt_sanitize_freq(struct xe_gt * gt)915 int xe_gt_sanitize_freq(struct xe_gt *gt)
916 {
917 int ret = 0;
918
919 if ((!xe_uc_fw_is_available(>->uc.gsc.fw) ||
920 xe_uc_fw_is_loaded(>->uc.gsc.fw) ||
921 xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) &&
922 XE_WA(gt, 22019338487))
923 ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc);
924
925 return ret;
926 }
927
xe_gt_resume(struct xe_gt * gt)928 int xe_gt_resume(struct xe_gt *gt)
929 {
930 unsigned int fw_ref;
931 int err;
932
933 xe_gt_dbg(gt, "resuming\n");
934 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
935 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
936 goto err_msg;
937
938 err = do_gt_restart(gt);
939 if (err)
940 goto err_force_wake;
941
942 xe_gt_idle_enable_pg(gt);
943
944 xe_force_wake_put(gt_to_fw(gt), fw_ref);
945 xe_gt_dbg(gt, "resumed\n");
946
947 return 0;
948
949 err_msg:
950 err = -ETIMEDOUT;
951 err_force_wake:
952 xe_force_wake_put(gt_to_fw(gt), fw_ref);
953 xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
954
955 return err;
956 }
957
xe_gt_hw_engine(struct xe_gt * gt,enum xe_engine_class class,u16 instance,bool logical)958 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
959 enum xe_engine_class class,
960 u16 instance, bool logical)
961 {
962 struct xe_hw_engine *hwe;
963 enum xe_hw_engine_id id;
964
965 for_each_hw_engine(hwe, gt, id)
966 if (hwe->class == class &&
967 ((!logical && hwe->instance == instance) ||
968 (logical && hwe->logical_instance == instance)))
969 return hwe;
970
971 return NULL;
972 }
973
xe_gt_any_hw_engine_by_reset_domain(struct xe_gt * gt,enum xe_engine_class class)974 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
975 enum xe_engine_class class)
976 {
977 struct xe_hw_engine *hwe;
978 enum xe_hw_engine_id id;
979
980 for_each_hw_engine(hwe, gt, id) {
981 switch (class) {
982 case XE_ENGINE_CLASS_RENDER:
983 case XE_ENGINE_CLASS_COMPUTE:
984 if (hwe->class == XE_ENGINE_CLASS_RENDER ||
985 hwe->class == XE_ENGINE_CLASS_COMPUTE)
986 return hwe;
987 break;
988 default:
989 if (hwe->class == class)
990 return hwe;
991 }
992 }
993
994 return NULL;
995 }
996
xe_gt_any_hw_engine(struct xe_gt * gt)997 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
998 {
999 struct xe_hw_engine *hwe;
1000 enum xe_hw_engine_id id;
1001
1002 for_each_hw_engine(hwe, gt, id)
1003 return hwe;
1004
1005 return NULL;
1006 }
1007
1008 /**
1009 * xe_gt_declare_wedged() - Declare GT wedged
1010 * @gt: the GT object
1011 *
1012 * Wedge the GT which stops all submission, saves desired debug state, and
1013 * cleans up anything which could timeout.
1014 */
xe_gt_declare_wedged(struct xe_gt * gt)1015 void xe_gt_declare_wedged(struct xe_gt *gt)
1016 {
1017 xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
1018
1019 xe_uc_declare_wedged(>->uc);
1020 xe_gt_tlb_invalidation_reset(gt);
1021 }
1022