1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_gt.h"
7
8 #include <linux/minmax.h>
9
10 #include <drm/drm_managed.h>
11 #include <uapi/drm/xe_drm.h>
12
13 #include <generated/xe_wa_oob.h>
14
15 #include "instructions/xe_alu_commands.h"
16 #include "instructions/xe_gfxpipe_commands.h"
17 #include "instructions/xe_mi_commands.h"
18 #include "regs/xe_engine_regs.h"
19 #include "regs/xe_gt_regs.h"
20 #include "xe_assert.h"
21 #include "xe_bb.h"
22 #include "xe_bo.h"
23 #include "xe_device.h"
24 #include "xe_eu_stall.h"
25 #include "xe_exec_queue.h"
26 #include "xe_execlist.h"
27 #include "xe_force_wake.h"
28 #include "xe_ggtt.h"
29 #include "xe_gsc.h"
30 #include "xe_gt_ccs_mode.h"
31 #include "xe_gt_clock.h"
32 #include "xe_gt_freq.h"
33 #include "xe_gt_idle.h"
34 #include "xe_gt_mcr.h"
35 #include "xe_gt_printk.h"
36 #include "xe_gt_sriov_pf.h"
37 #include "xe_gt_sriov_vf.h"
38 #include "xe_gt_sysfs.h"
39 #include "xe_gt_topology.h"
40 #include "xe_guc_exec_queue_types.h"
41 #include "xe_guc_pc.h"
42 #include "xe_guc_submit.h"
43 #include "xe_hw_fence.h"
44 #include "xe_hw_engine_class_sysfs.h"
45 #include "xe_irq.h"
46 #include "xe_lmtt.h"
47 #include "xe_lrc.h"
48 #include "xe_map.h"
49 #include "xe_migrate.h"
50 #include "xe_mmio.h"
51 #include "xe_pagefault.h"
52 #include "xe_pat.h"
53 #include "xe_pm.h"
54 #include "xe_mocs.h"
55 #include "xe_reg_sr.h"
56 #include "xe_ring_ops.h"
57 #include "xe_sa.h"
58 #include "xe_sched_job.h"
59 #include "xe_sriov.h"
60 #include "xe_tlb_inval.h"
61 #include "xe_tuning.h"
62 #include "xe_uc.h"
63 #include "xe_uc_fw.h"
64 #include "xe_vm.h"
65 #include "xe_wa.h"
66 #include "xe_wopcm.h"
67
xe_gt_alloc(struct xe_tile * tile)68 struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
69 {
70 struct xe_device *xe = tile_to_xe(tile);
71 struct drm_device *drm = &xe->drm;
72 bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt &&
73 IS_SRIOV_VF(xe);
74 struct workqueue_struct *ordered_wq;
75 struct xe_gt *gt;
76
77 gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL);
78 if (!gt)
79 return ERR_PTR(-ENOMEM);
80
81 gt->tile = tile;
82 if (shared_wq && tile->primary_gt->ordered_wq)
83 ordered_wq = tile->primary_gt->ordered_wq;
84 else
85 ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq",
86 WQ_MEM_RECLAIM);
87 if (IS_ERR(ordered_wq))
88 return ERR_CAST(ordered_wq);
89
90 gt->ordered_wq = ordered_wq;
91
92 return gt;
93 }
94
xe_gt_sanitize(struct xe_gt * gt)95 void xe_gt_sanitize(struct xe_gt *gt)
96 {
97 /*
98 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not
99 * reload
100 */
101 xe_guc_submit_disable(>->uc.guc);
102 }
103
xe_gt_enable_host_l2_vram(struct xe_gt * gt)104 static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
105 {
106 unsigned int fw_ref;
107 u32 reg;
108
109 if (!XE_GT_WA(gt, 16023588340))
110 return;
111
112 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
113 if (!fw_ref)
114 return;
115
116 if (xe_gt_is_main_type(gt)) {
117 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
118 reg |= CG_DIS_CNTLBUS;
119 xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
120 }
121
122 xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
123 xe_force_wake_put(gt_to_fw(gt), fw_ref);
124 }
125
xe_gt_disable_host_l2_vram(struct xe_gt * gt)126 static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
127 {
128 unsigned int fw_ref;
129 u32 reg;
130
131 if (!XE_GT_WA(gt, 16023588340))
132 return;
133
134 if (xe_gt_is_media_type(gt))
135 return;
136
137 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
138 if (!fw_ref)
139 return;
140
141 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
142 reg &= ~CG_DIS_CNTLBUS;
143 xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
144
145 xe_force_wake_put(gt_to_fw(gt), fw_ref);
146 }
147
148 static void gt_reset_worker(struct work_struct *w);
149
emit_job_sync(struct xe_exec_queue * q,struct xe_bb * bb,long timeout_jiffies)150 static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb,
151 long timeout_jiffies)
152 {
153 struct xe_sched_job *job;
154 struct dma_fence *fence;
155 long timeout;
156
157 job = xe_bb_create_job(q, bb);
158 if (IS_ERR(job))
159 return PTR_ERR(job);
160
161 xe_sched_job_arm(job);
162 fence = dma_fence_get(&job->drm.s_fence->finished);
163 xe_sched_job_push(job);
164
165 timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies);
166 dma_fence_put(fence);
167 if (timeout < 0)
168 return timeout;
169 else if (!timeout)
170 return -ETIME;
171
172 return 0;
173 }
174
emit_nop_job(struct xe_gt * gt,struct xe_exec_queue * q)175 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
176 {
177 struct xe_bb *bb;
178 int ret;
179
180 bb = xe_bb_new(gt, 4, false);
181 if (IS_ERR(bb))
182 return PTR_ERR(bb);
183
184 ret = emit_job_sync(q, bb, HZ);
185 xe_bb_free(bb, NULL);
186
187 return ret;
188 }
189
emit_wa_job(struct xe_gt * gt,struct xe_exec_queue * q)190 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
191 {
192 struct xe_reg_sr *sr = &q->hwe->reg_lrc;
193 struct xe_reg_sr_entry *entry;
194 int count_rmw = 0, count = 0, ret;
195 unsigned long idx;
196 struct xe_bb *bb;
197 size_t bb_len = 0;
198 u32 *cs;
199
200 /* count RMW registers as those will be handled separately */
201 xa_for_each(&sr->xa, idx, entry) {
202 if (entry->reg.masked || entry->clr_bits == ~0)
203 ++count;
204 else
205 ++count_rmw;
206 }
207
208 if (count)
209 bb_len += count * 2 + 1;
210
211 if (count_rmw)
212 bb_len += count_rmw * 20 + 7;
213
214 if (q->hwe->class == XE_ENGINE_CLASS_RENDER)
215 /*
216 * Big enough to emit all of the context's 3DSTATE via
217 * xe_lrc_emit_hwe_state_instructions()
218 */
219 bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32);
220
221 xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len);
222
223 bb = xe_bb_new(gt, bb_len, false);
224 if (IS_ERR(bb))
225 return PTR_ERR(bb);
226
227 cs = bb->cs;
228
229 if (count) {
230 /*
231 * Emit single LRI with all non RMW regs: 1 leading dw + 2dw per
232 * reg + 1
233 */
234
235 *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count);
236
237 xa_for_each(&sr->xa, idx, entry) {
238 struct xe_reg reg = entry->reg;
239 u32 val;
240
241 if (reg.masked)
242 val = entry->clr_bits << 16;
243 else if (entry->clr_bits == ~0)
244 val = 0;
245 else
246 continue;
247
248 val |= entry->set_bits;
249
250 *cs++ = reg.addr;
251 *cs++ = val;
252 xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val);
253 }
254 }
255
256 if (count_rmw) {
257 /* Emit MI_MATH for each RMW reg: 20dw per reg + 7 trailing dw */
258
259 xa_for_each(&sr->xa, idx, entry) {
260 if (entry->reg.masked || entry->clr_bits == ~0)
261 continue;
262
263 *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO;
264 *cs++ = entry->reg.addr;
265 *cs++ = CS_GPR_REG(0, 0).addr;
266
267 *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) |
268 MI_LRI_LRM_CS_MMIO;
269 *cs++ = CS_GPR_REG(0, 1).addr;
270 *cs++ = entry->clr_bits;
271 *cs++ = CS_GPR_REG(0, 2).addr;
272 *cs++ = entry->set_bits;
273
274 *cs++ = MI_MATH(8);
275 *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
276 *cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1);
277 *cs++ = CS_ALU_INSTR_AND;
278 *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
279 *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0);
280 *cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2);
281 *cs++ = CS_ALU_INSTR_OR;
282 *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU);
283
284 *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO;
285 *cs++ = CS_GPR_REG(0, 0).addr;
286 *cs++ = entry->reg.addr;
287
288 xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n",
289 entry->reg.addr, entry->clr_bits, entry->set_bits);
290 }
291
292 /* reset used GPR */
293 *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) |
294 MI_LRI_LRM_CS_MMIO;
295 *cs++ = CS_GPR_REG(0, 0).addr;
296 *cs++ = 0;
297 *cs++ = CS_GPR_REG(0, 1).addr;
298 *cs++ = 0;
299 *cs++ = CS_GPR_REG(0, 2).addr;
300 *cs++ = 0;
301 }
302
303 cs = xe_lrc_emit_hwe_state_instructions(q, cs);
304
305 bb->len = cs - bb->cs;
306
307 ret = emit_job_sync(q, bb, HZ);
308
309 xe_bb_free(bb, NULL);
310
311 return ret;
312 }
313
xe_gt_record_default_lrcs(struct xe_gt * gt)314 int xe_gt_record_default_lrcs(struct xe_gt *gt)
315 {
316 struct xe_device *xe = gt_to_xe(gt);
317 struct xe_hw_engine *hwe;
318 enum xe_hw_engine_id id;
319 int err = 0;
320
321 for_each_hw_engine(hwe, gt, id) {
322 struct xe_exec_queue *q, *nop_q;
323 void *default_lrc;
324
325 if (gt->default_lrc[hwe->class])
326 continue;
327
328 xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe);
329 xe_wa_process_lrc(hwe);
330 xe_hw_engine_setup_default_lrc_state(hwe);
331 xe_tuning_process_lrc(hwe);
332
333 default_lrc = drmm_kzalloc(&xe->drm,
334 xe_gt_lrc_size(gt, hwe->class),
335 GFP_KERNEL);
336 if (!default_lrc)
337 return -ENOMEM;
338
339 q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
340 hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
341 if (IS_ERR(q)) {
342 err = PTR_ERR(q);
343 xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
344 hwe->name, q);
345 return err;
346 }
347
348 /* Prime golden LRC with known good state */
349 err = emit_wa_job(gt, q);
350 if (err) {
351 xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n",
352 hwe->name, ERR_PTR(err), q->guc->id);
353 goto put_exec_queue;
354 }
355
356 nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
357 1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0);
358 if (IS_ERR(nop_q)) {
359 err = PTR_ERR(nop_q);
360 xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
361 hwe->name, nop_q);
362 goto put_exec_queue;
363 }
364
365 /* Switch to different LRC */
366 err = emit_nop_job(gt, nop_q);
367 if (err) {
368 xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n",
369 hwe->name, ERR_PTR(err), nop_q->guc->id);
370 goto put_nop_q;
371 }
372
373 xe_map_memcpy_from(xe, default_lrc,
374 &q->lrc[0]->bo->vmap,
375 xe_lrc_pphwsp_offset(q->lrc[0]),
376 xe_gt_lrc_size(gt, hwe->class));
377
378 gt->default_lrc[hwe->class] = default_lrc;
379 put_nop_q:
380 xe_exec_queue_put(nop_q);
381 put_exec_queue:
382 xe_exec_queue_put(q);
383 if (err)
384 break;
385 }
386
387 return err;
388 }
389
xe_gt_init_early(struct xe_gt * gt)390 int xe_gt_init_early(struct xe_gt *gt)
391 {
392 unsigned int fw_ref;
393 int err;
394
395 if (IS_SRIOV_PF(gt_to_xe(gt))) {
396 err = xe_gt_sriov_pf_init_early(gt);
397 if (err)
398 return err;
399 }
400
401 if (IS_SRIOV_VF(gt_to_xe(gt))) {
402 err = xe_gt_sriov_vf_init_early(gt);
403 if (err)
404 return err;
405 }
406
407 xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt));
408
409 err = xe_wa_gt_init(gt);
410 if (err)
411 return err;
412
413 err = xe_tuning_init(gt);
414 if (err)
415 return err;
416
417 xe_wa_process_gt_oob(gt);
418
419 xe_force_wake_init_gt(gt, gt_to_fw(gt));
420 spin_lock_init(>->global_invl_lock);
421
422 err = xe_gt_tlb_inval_init_early(gt);
423 if (err)
424 return err;
425
426 xe_mocs_init_early(gt);
427
428 /*
429 * Only after this point can GT-specific MMIO operations
430 * (including things like communication with the GuC)
431 * be performed.
432 */
433 xe_gt_mmio_init(gt);
434
435 err = xe_uc_init_noalloc(>->uc);
436 if (err)
437 return err;
438
439 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
440 if (!fw_ref)
441 return -ETIMEDOUT;
442
443 xe_gt_mcr_init_early(gt);
444 xe_pat_init(gt);
445 xe_force_wake_put(gt_to_fw(gt), fw_ref);
446
447 return 0;
448 }
449
dump_pat_on_error(struct xe_gt * gt)450 static void dump_pat_on_error(struct xe_gt *gt)
451 {
452 struct drm_printer p;
453 char prefix[32];
454
455 snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id);
456 p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix);
457
458 xe_pat_dump(gt, &p);
459 }
460
gt_init_with_gt_forcewake(struct xe_gt * gt)461 static int gt_init_with_gt_forcewake(struct xe_gt *gt)
462 {
463 unsigned int fw_ref;
464 int err;
465
466 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
467 if (!fw_ref)
468 return -ETIMEDOUT;
469
470 err = xe_uc_init(>->uc);
471 if (err)
472 goto err_force_wake;
473
474 xe_gt_topology_init(gt);
475 xe_gt_mcr_init(gt);
476 xe_gt_enable_host_l2_vram(gt);
477
478 if (xe_gt_is_main_type(gt)) {
479 err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt);
480 if (err)
481 goto err_force_wake;
482 if (IS_SRIOV_PF(gt_to_xe(gt)))
483 xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt);
484 }
485
486 /* Enable per hw engine IRQs */
487 xe_irq_enable_hwe(gt);
488
489 /* Rerun MCR init as we now have hw engine list */
490 xe_gt_mcr_init(gt);
491
492 err = xe_hw_engines_init_early(gt);
493 if (err) {
494 dump_pat_on_error(gt);
495 goto err_force_wake;
496 }
497
498 err = xe_hw_engine_class_sysfs_init(gt);
499 if (err)
500 goto err_force_wake;
501
502 /* Initialize CCS mode sysfs after early initialization of HW engines */
503 err = xe_gt_ccs_mode_sysfs_init(gt);
504 if (err)
505 goto err_force_wake;
506
507 /*
508 * Stash hardware-reported version. Since this register does not exist
509 * on pre-MTL platforms, reading it there will (correctly) return 0.
510 */
511 gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID);
512
513 xe_force_wake_put(gt_to_fw(gt), fw_ref);
514 return 0;
515
516 err_force_wake:
517 xe_force_wake_put(gt_to_fw(gt), fw_ref);
518
519 return err;
520 }
521
gt_init_with_all_forcewake(struct xe_gt * gt)522 static int gt_init_with_all_forcewake(struct xe_gt *gt)
523 {
524 unsigned int fw_ref;
525 int err;
526
527 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
528 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
529 err = -ETIMEDOUT;
530 goto err_force_wake;
531 }
532
533 xe_gt_mcr_set_implicit_defaults(gt);
534 xe_wa_process_gt(gt);
535 xe_tuning_process_gt(gt);
536 xe_reg_sr_apply_mmio(>->reg_sr, gt);
537
538 err = xe_gt_clock_init(gt);
539 if (err)
540 goto err_force_wake;
541
542 xe_mocs_init(gt);
543 err = xe_execlist_init(gt);
544 if (err)
545 goto err_force_wake;
546
547 err = xe_hw_engines_init(gt);
548 if (err)
549 goto err_force_wake;
550
551 err = xe_uc_init_post_hwconfig(>->uc);
552 if (err)
553 goto err_force_wake;
554
555 if (xe_gt_is_main_type(gt)) {
556 /*
557 * USM has its only SA pool to non-block behind user operations
558 */
559 if (gt_to_xe(gt)->info.has_usm) {
560 struct xe_device *xe = gt_to_xe(gt);
561
562 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt),
563 IS_DGFX(xe) ? SZ_1M : SZ_512K, 16);
564 if (IS_ERR(gt->usm.bb_pool)) {
565 err = PTR_ERR(gt->usm.bb_pool);
566 goto err_force_wake;
567 }
568 }
569 }
570
571 if (xe_gt_is_main_type(gt)) {
572 struct xe_tile *tile = gt_to_tile(gt);
573
574 err = xe_migrate_init(tile->migrate);
575 if (err)
576 goto err_force_wake;
577 }
578
579 err = xe_uc_load_hw(>->uc);
580 if (err)
581 goto err_force_wake;
582
583 /* Configure default CCS mode of 1 engine with all resources */
584 if (xe_gt_ccs_mode_enabled(gt)) {
585 gt->ccs_mode = 1;
586 xe_gt_apply_ccs_mode(gt);
587 }
588
589 if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
590 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
591
592 if (IS_SRIOV_PF(gt_to_xe(gt)))
593 xe_gt_sriov_pf_init_hw(gt);
594
595 xe_force_wake_put(gt_to_fw(gt), fw_ref);
596
597 return 0;
598
599 err_force_wake:
600 xe_force_wake_put(gt_to_fw(gt), fw_ref);
601
602 return err;
603 }
604
xe_gt_fini(void * arg)605 static void xe_gt_fini(void *arg)
606 {
607 struct xe_gt *gt = arg;
608 int i;
609
610 if (disable_work_sync(>->reset.worker))
611 /*
612 * If gt_reset_worker was halted from executing, take care of
613 * releasing the rpm reference here.
614 */
615 xe_pm_runtime_put(gt_to_xe(gt));
616
617 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
618 xe_hw_fence_irq_finish(>->fence_irq[i]);
619
620 xe_gt_disable_host_l2_vram(gt);
621 }
622
xe_gt_init(struct xe_gt * gt)623 int xe_gt_init(struct xe_gt *gt)
624 {
625 int err;
626 int i;
627
628 INIT_WORK(>->reset.worker, gt_reset_worker);
629
630 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) {
631 gt->ring_ops[i] = xe_ring_ops_get(gt, i);
632 xe_hw_fence_irq_init(>->fence_irq[i]);
633 }
634
635 err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt);
636 if (err)
637 return err;
638
639 err = xe_gt_sysfs_init(gt);
640 if (err)
641 return err;
642
643 err = gt_init_with_gt_forcewake(gt);
644 if (err)
645 return err;
646
647 err = xe_gt_idle_init(>->gtidle);
648 if (err)
649 return err;
650
651 err = xe_gt_freq_init(gt);
652 if (err)
653 return err;
654
655 xe_force_wake_init_engines(gt, gt_to_fw(gt));
656
657 err = gt_init_with_all_forcewake(gt);
658 if (err)
659 return err;
660
661 xe_gt_record_user_engines(gt);
662
663 err = xe_eu_stall_init(gt);
664 if (err)
665 return err;
666
667 if (IS_SRIOV_VF(gt_to_xe(gt))) {
668 err = xe_gt_sriov_vf_init(gt);
669 if (err)
670 return err;
671 }
672
673 return 0;
674 }
675
676 /**
677 * xe_gt_mmio_init() - Initialize GT's MMIO access
678 * @gt: the GT object
679 *
680 * Initialize GT's MMIO accessor, which will be used to access registers inside
681 * this GT.
682 */
xe_gt_mmio_init(struct xe_gt * gt)683 void xe_gt_mmio_init(struct xe_gt *gt)
684 {
685 struct xe_tile *tile = gt_to_tile(gt);
686 struct xe_device *xe = tile_to_xe(tile);
687
688 xe_mmio_init(>->mmio, tile, tile->mmio.regs, tile->mmio.regs_size);
689
690 if (gt->info.type == XE_GT_TYPE_MEDIA) {
691 gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET;
692 gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH;
693 } else {
694 gt->mmio.adj_offset = 0;
695 gt->mmio.adj_limit = 0;
696 }
697
698 if (IS_SRIOV_VF(xe))
699 gt->mmio.sriov_vf_gt = gt;
700 }
701
xe_gt_record_user_engines(struct xe_gt * gt)702 void xe_gt_record_user_engines(struct xe_gt *gt)
703 {
704 struct xe_hw_engine *hwe;
705 enum xe_hw_engine_id id;
706
707 gt->user_engines.mask = 0;
708 memset(gt->user_engines.instances_per_class, 0,
709 sizeof(gt->user_engines.instances_per_class));
710
711 for_each_hw_engine(hwe, gt, id) {
712 if (xe_hw_engine_is_reserved(hwe))
713 continue;
714
715 gt->user_engines.mask |= BIT_ULL(id);
716 gt->user_engines.instances_per_class[hwe->class]++;
717 }
718
719 xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask)
720 == gt->info.engine_mask);
721 }
722
do_gt_reset(struct xe_gt * gt)723 static int do_gt_reset(struct xe_gt *gt)
724 {
725 int err;
726
727 if (IS_SRIOV_VF(gt_to_xe(gt)))
728 return xe_gt_sriov_vf_reset(gt);
729
730 xe_gsc_wa_14015076503(gt, true);
731
732 xe_mmio_write32(>->mmio, GDRST, GRDOM_FULL);
733 err = xe_mmio_wait32(>->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false);
734 if (err)
735 xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n",
736 ERR_PTR(err));
737
738 xe_gsc_wa_14015076503(gt, false);
739
740 return err;
741 }
742
vf_gt_restart(struct xe_gt * gt)743 static int vf_gt_restart(struct xe_gt *gt)
744 {
745 int err;
746
747 err = xe_uc_sanitize_reset(>->uc);
748 if (err)
749 return err;
750
751 err = xe_uc_load_hw(>->uc);
752 if (err)
753 return err;
754
755 err = xe_uc_start(>->uc);
756 if (err)
757 return err;
758
759 return 0;
760 }
761
do_gt_restart(struct xe_gt * gt)762 static int do_gt_restart(struct xe_gt *gt)
763 {
764 struct xe_hw_engine *hwe;
765 enum xe_hw_engine_id id;
766 int err;
767
768 if (IS_SRIOV_VF(gt_to_xe(gt)))
769 return vf_gt_restart(gt);
770
771 xe_pat_init(gt);
772
773 xe_gt_enable_host_l2_vram(gt);
774
775 xe_gt_mcr_set_implicit_defaults(gt);
776 xe_reg_sr_apply_mmio(>->reg_sr, gt);
777
778 err = xe_wopcm_init(>->uc.wopcm);
779 if (err)
780 return err;
781
782 for_each_hw_engine(hwe, gt, id)
783 xe_hw_engine_enable_ring(hwe);
784
785 err = xe_uc_sanitize_reset(>->uc);
786 if (err)
787 return err;
788
789 err = xe_uc_load_hw(>->uc);
790 if (err)
791 return err;
792
793 if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
794 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt);
795
796 if (IS_SRIOV_PF(gt_to_xe(gt)))
797 xe_gt_sriov_pf_init_hw(gt);
798
799 xe_mocs_init(gt);
800 err = xe_uc_start(>->uc);
801 if (err)
802 return err;
803
804 for_each_hw_engine(hwe, gt, id)
805 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
806
807 /* Get CCS mode in sync between sw/hw */
808 xe_gt_apply_ccs_mode(gt);
809
810 /* Restore GT freq to expected values */
811 xe_gt_sanitize_freq(gt);
812
813 if (IS_SRIOV_PF(gt_to_xe(gt)))
814 xe_gt_sriov_pf_restart(gt);
815
816 return 0;
817 }
818
gt_reset_worker(struct work_struct * w)819 static void gt_reset_worker(struct work_struct *w)
820 {
821 struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
822 unsigned int fw_ref;
823 int err;
824
825 if (xe_device_wedged(gt_to_xe(gt)))
826 goto err_pm_put;
827
828 /* We only support GT resets with GuC submission */
829 if (!xe_device_uc_enabled(gt_to_xe(gt)))
830 goto err_pm_put;
831
832 xe_gt_info(gt, "reset started\n");
833
834 if (xe_fault_inject_gt_reset()) {
835 err = -ECANCELED;
836 goto err_fail;
837 }
838
839 xe_gt_sanitize(gt);
840
841 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
842 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
843 err = -ETIMEDOUT;
844 goto err_out;
845 }
846
847 if (IS_SRIOV_PF(gt_to_xe(gt)))
848 xe_gt_sriov_pf_stop_prepare(gt);
849
850 xe_uc_gucrc_disable(>->uc);
851 xe_uc_stop_prepare(>->uc);
852 xe_pagefault_reset(gt_to_xe(gt), gt);
853
854 xe_uc_stop(>->uc);
855
856 xe_tlb_inval_reset(>->tlb_inval);
857
858 err = do_gt_reset(gt);
859 if (err)
860 goto err_out;
861
862 err = do_gt_restart(gt);
863 if (err)
864 goto err_out;
865
866 xe_force_wake_put(gt_to_fw(gt), fw_ref);
867
868 /* Pair with get while enqueueing the work in xe_gt_reset_async() */
869 xe_pm_runtime_put(gt_to_xe(gt));
870
871 xe_gt_info(gt, "reset done\n");
872
873 return;
874
875 err_out:
876 xe_force_wake_put(gt_to_fw(gt), fw_ref);
877 XE_WARN_ON(xe_uc_start(>->uc));
878
879 err_fail:
880 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
881 xe_device_declare_wedged(gt_to_xe(gt));
882 err_pm_put:
883 xe_pm_runtime_put(gt_to_xe(gt));
884 }
885
xe_gt_reset_async(struct xe_gt * gt)886 void xe_gt_reset_async(struct xe_gt *gt)
887 {
888 xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0));
889
890 /* Don't do a reset while one is already in flight */
891 if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc))
892 return;
893
894 xe_gt_info(gt, "reset queued\n");
895
896 /* Pair with put in gt_reset_worker() if work is enqueued */
897 xe_pm_runtime_get_noresume(gt_to_xe(gt));
898 if (!queue_work(gt->ordered_wq, >->reset.worker))
899 xe_pm_runtime_put(gt_to_xe(gt));
900 }
901
xe_gt_suspend_prepare(struct xe_gt * gt)902 void xe_gt_suspend_prepare(struct xe_gt *gt)
903 {
904 unsigned int fw_ref;
905
906 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
907
908 xe_uc_suspend_prepare(>->uc);
909
910 xe_force_wake_put(gt_to_fw(gt), fw_ref);
911 }
912
xe_gt_suspend(struct xe_gt * gt)913 int xe_gt_suspend(struct xe_gt *gt)
914 {
915 unsigned int fw_ref;
916 int err;
917
918 xe_gt_dbg(gt, "suspending\n");
919 xe_gt_sanitize(gt);
920
921 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
922 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
923 goto err_msg;
924
925 err = xe_uc_suspend(>->uc);
926 if (err)
927 goto err_force_wake;
928
929 xe_gt_idle_disable_pg(gt);
930
931 xe_gt_disable_host_l2_vram(gt);
932
933 xe_force_wake_put(gt_to_fw(gt), fw_ref);
934 xe_gt_dbg(gt, "suspended\n");
935
936 return 0;
937
938 err_msg:
939 err = -ETIMEDOUT;
940 err_force_wake:
941 xe_force_wake_put(gt_to_fw(gt), fw_ref);
942 xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err));
943
944 return err;
945 }
946
xe_gt_shutdown(struct xe_gt * gt)947 void xe_gt_shutdown(struct xe_gt *gt)
948 {
949 unsigned int fw_ref;
950
951 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
952 do_gt_reset(gt);
953 xe_force_wake_put(gt_to_fw(gt), fw_ref);
954 }
955
956 /**
957 * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
958 * @gt: the GT object
959 *
960 * Called after driver init/GSC load completes to restore GT frequencies if we
961 * limited them for any WAs.
962 */
xe_gt_sanitize_freq(struct xe_gt * gt)963 int xe_gt_sanitize_freq(struct xe_gt *gt)
964 {
965 int ret = 0;
966
967 if ((!xe_uc_fw_is_available(>->uc.gsc.fw) ||
968 xe_uc_fw_is_loaded(>->uc.gsc.fw) ||
969 xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) &&
970 XE_GT_WA(gt, 22019338487))
971 ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc);
972
973 return ret;
974 }
975
xe_gt_resume(struct xe_gt * gt)976 int xe_gt_resume(struct xe_gt *gt)
977 {
978 unsigned int fw_ref;
979 int err;
980
981 xe_gt_dbg(gt, "resuming\n");
982 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
983 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL))
984 goto err_msg;
985
986 err = do_gt_restart(gt);
987 if (err)
988 goto err_force_wake;
989
990 xe_gt_idle_enable_pg(gt);
991
992 xe_force_wake_put(gt_to_fw(gt), fw_ref);
993 xe_gt_dbg(gt, "resumed\n");
994
995 return 0;
996
997 err_msg:
998 err = -ETIMEDOUT;
999 err_force_wake:
1000 xe_force_wake_put(gt_to_fw(gt), fw_ref);
1001 xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err));
1002
1003 return err;
1004 }
1005
xe_gt_hw_engine(struct xe_gt * gt,enum xe_engine_class class,u16 instance,bool logical)1006 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt,
1007 enum xe_engine_class class,
1008 u16 instance, bool logical)
1009 {
1010 struct xe_hw_engine *hwe;
1011 enum xe_hw_engine_id id;
1012
1013 for_each_hw_engine(hwe, gt, id)
1014 if (hwe->class == class &&
1015 ((!logical && hwe->instance == instance) ||
1016 (logical && hwe->logical_instance == instance)))
1017 return hwe;
1018
1019 return NULL;
1020 }
1021
xe_gt_any_hw_engine_by_reset_domain(struct xe_gt * gt,enum xe_engine_class class)1022 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt,
1023 enum xe_engine_class class)
1024 {
1025 struct xe_hw_engine *hwe;
1026 enum xe_hw_engine_id id;
1027
1028 for_each_hw_engine(hwe, gt, id) {
1029 switch (class) {
1030 case XE_ENGINE_CLASS_RENDER:
1031 case XE_ENGINE_CLASS_COMPUTE:
1032 if (hwe->class == XE_ENGINE_CLASS_RENDER ||
1033 hwe->class == XE_ENGINE_CLASS_COMPUTE)
1034 return hwe;
1035 break;
1036 default:
1037 if (hwe->class == class)
1038 return hwe;
1039 }
1040 }
1041
1042 return NULL;
1043 }
1044
xe_gt_any_hw_engine(struct xe_gt * gt)1045 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt)
1046 {
1047 struct xe_hw_engine *hwe;
1048 enum xe_hw_engine_id id;
1049
1050 for_each_hw_engine(hwe, gt, id)
1051 return hwe;
1052
1053 return NULL;
1054 }
1055
1056 /**
1057 * xe_gt_declare_wedged() - Declare GT wedged
1058 * @gt: the GT object
1059 *
1060 * Wedge the GT which stops all submission, saves desired debug state, and
1061 * cleans up anything which could timeout.
1062 */
xe_gt_declare_wedged(struct xe_gt * gt)1063 void xe_gt_declare_wedged(struct xe_gt *gt)
1064 {
1065 xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
1066
1067 xe_uc_declare_wedged(>->uc);
1068 xe_tlb_inval_reset(>->tlb_inval);
1069 }
1070