1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_gt.h" 7 8 #include <linux/minmax.h> 9 10 #include <drm/drm_managed.h> 11 #include <uapi/drm/xe_drm.h> 12 13 #include <generated/xe_wa_oob.h> 14 15 #include "instructions/xe_alu_commands.h" 16 #include "instructions/xe_mi_commands.h" 17 #include "regs/xe_engine_regs.h" 18 #include "regs/xe_gt_regs.h" 19 #include "xe_assert.h" 20 #include "xe_bb.h" 21 #include "xe_device.h" 22 #include "xe_eu_stall.h" 23 #include "xe_exec_queue.h" 24 #include "xe_execlist.h" 25 #include "xe_force_wake.h" 26 #include "xe_ggtt.h" 27 #include "xe_gsc.h" 28 #include "xe_gt_ccs_mode.h" 29 #include "xe_gt_clock.h" 30 #include "xe_gt_freq.h" 31 #include "xe_gt_idle.h" 32 #include "xe_gt_mcr.h" 33 #include "xe_gt_printk.h" 34 #include "xe_gt_sriov_pf.h" 35 #include "xe_gt_sriov_vf.h" 36 #include "xe_gt_sysfs.h" 37 #include "xe_gt_topology.h" 38 #include "xe_guc_exec_queue_types.h" 39 #include "xe_guc_pc.h" 40 #include "xe_guc_submit.h" 41 #include "xe_hw_fence.h" 42 #include "xe_hw_engine_class_sysfs.h" 43 #include "xe_irq.h" 44 #include "xe_lmtt.h" 45 #include "xe_lrc.h" 46 #include "xe_map.h" 47 #include "xe_migrate.h" 48 #include "xe_mmio.h" 49 #include "xe_pagefault.h" 50 #include "xe_pat.h" 51 #include "xe_pm.h" 52 #include "xe_mocs.h" 53 #include "xe_reg_sr.h" 54 #include "xe_ring_ops.h" 55 #include "xe_sa.h" 56 #include "xe_sched_job.h" 57 #include "xe_sriov.h" 58 #include "xe_tlb_inval.h" 59 #include "xe_tuning.h" 60 #include "xe_uc.h" 61 #include "xe_uc_fw.h" 62 #include "xe_vm.h" 63 #include "xe_wa.h" 64 #include "xe_wopcm.h" 65 66 struct xe_gt *xe_gt_alloc(struct xe_tile *tile) 67 { 68 struct xe_device *xe = tile_to_xe(tile); 69 struct drm_device *drm = &xe->drm; 70 bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt && 71 IS_SRIOV_VF(xe); 72 struct workqueue_struct *ordered_wq; 73 struct xe_gt *gt; 74 75 gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL); 76 if (!gt) 77 return ERR_PTR(-ENOMEM); 78 79 gt->tile = tile; 80 if (shared_wq && tile->primary_gt->ordered_wq) 81 ordered_wq = tile->primary_gt->ordered_wq; 82 else 83 ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq", 84 WQ_MEM_RECLAIM); 85 if (IS_ERR(ordered_wq)) 86 return ERR_CAST(ordered_wq); 87 88 gt->ordered_wq = ordered_wq; 89 90 return gt; 91 } 92 93 void xe_gt_sanitize(struct xe_gt *gt) 94 { 95 /* 96 * FIXME: if xe_uc_sanitize is called here, on TGL driver will not 97 * reload 98 */ 99 xe_guc_submit_disable(>->uc.guc); 100 } 101 102 static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) 103 { 104 u32 reg; 105 106 if (!XE_GT_WA(gt, 16023588340)) 107 return; 108 109 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT); 110 if (!fw_ref.domains) 111 return; 112 113 if (xe_gt_is_main_type(gt)) { 114 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); 115 reg |= CG_DIS_CNTLBUS; 116 xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); 117 } 118 119 xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF); 120 } 121 122 static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) 123 { 124 u32 reg; 125 126 if (!XE_GT_WA(gt, 16023588340)) 127 return; 128 129 if (xe_gt_is_media_type(gt)) 130 return; 131 132 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT); 133 if (!fw_ref.domains) 134 return; 135 136 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); 137 reg &= ~CG_DIS_CNTLBUS; 138 xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); 139 } 140 141 static void xe_gt_enable_comp_1wcoh(struct xe_gt *gt) 142 { 143 struct xe_device *xe = gt_to_xe(gt); 144 unsigned int fw_ref; 145 u32 reg; 146 147 if (IS_SRIOV_VF(xe)) 148 return; 149 150 if (GRAPHICS_VER(xe) >= 30 && xe->info.has_flat_ccs) { 151 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 152 if (!fw_ref) 153 return; 154 155 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); 156 reg |= EN_CMP_1WCOH; 157 xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); 158 159 if (xe_gt_is_media_type(gt)) { 160 xe_mmio_rmw32(>->mmio, XE2_GAMWALK_CTRL_MEDIA, 0, EN_CMP_1WCOH_GW); 161 } else { 162 reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMWALK_CTRL_3D); 163 reg |= EN_CMP_1WCOH_GW; 164 xe_gt_mcr_multicast_write(gt, XE2_GAMWALK_CTRL_3D, reg); 165 } 166 167 xe_force_wake_put(gt_to_fw(gt), fw_ref); 168 } 169 } 170 171 static void gt_reset_worker(struct work_struct *w); 172 173 static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb, 174 long timeout_jiffies) 175 { 176 struct xe_sched_job *job; 177 struct dma_fence *fence; 178 long timeout; 179 180 job = xe_bb_create_job(q, bb); 181 if (IS_ERR(job)) 182 return PTR_ERR(job); 183 184 xe_sched_job_arm(job); 185 fence = dma_fence_get(&job->drm.s_fence->finished); 186 xe_sched_job_push(job); 187 188 timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies); 189 dma_fence_put(fence); 190 if (timeout < 0) 191 return timeout; 192 else if (!timeout) 193 return -ETIME; 194 195 return 0; 196 } 197 198 static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q) 199 { 200 struct xe_bb *bb; 201 int ret; 202 203 bb = xe_bb_new(gt, 4, false); 204 if (IS_ERR(bb)) 205 return PTR_ERR(bb); 206 207 ret = emit_job_sync(q, bb, HZ); 208 xe_bb_free(bb, NULL); 209 210 return ret; 211 } 212 213 static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) 214 { 215 struct xe_reg_sr *sr = &q->hwe->reg_lrc; 216 struct xe_reg_sr_entry *entry; 217 int count_rmw = 0, count = 0, ret; 218 unsigned long idx; 219 struct xe_bb *bb; 220 size_t bb_len = 0; 221 u32 *cs; 222 223 /* count RMW registers as those will be handled separately */ 224 xa_for_each(&sr->xa, idx, entry) { 225 if (entry->reg.masked || entry->clr_bits == ~0) 226 ++count; 227 else 228 ++count_rmw; 229 } 230 231 if (count) 232 bb_len += count * 2 + 1; 233 234 if (count_rmw) 235 bb_len += count_rmw * 20 + 7; 236 237 if (q->hwe->class == XE_ENGINE_CLASS_RENDER) 238 /* 239 * Big enough to emit all of the context's 3DSTATE via 240 * xe_lrc_emit_hwe_state_instructions() 241 */ 242 bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32); 243 244 xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len); 245 246 bb = xe_bb_new(gt, bb_len, false); 247 if (IS_ERR(bb)) 248 return PTR_ERR(bb); 249 250 cs = bb->cs; 251 252 if (count) { 253 /* 254 * Emit single LRI with all non RMW regs: 1 leading dw + 2dw per 255 * reg + 1 256 */ 257 258 *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count); 259 260 xa_for_each(&sr->xa, idx, entry) { 261 struct xe_reg reg = entry->reg; 262 u32 val; 263 264 if (reg.masked) 265 val = entry->clr_bits << 16; 266 else if (entry->clr_bits == ~0) 267 val = 0; 268 else 269 continue; 270 271 val |= entry->set_bits; 272 273 *cs++ = reg.addr; 274 *cs++ = val; 275 xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val); 276 } 277 } 278 279 if (count_rmw) { 280 /* Emit MI_MATH for each RMW reg: 20dw per reg + 7 trailing dw */ 281 282 xa_for_each(&sr->xa, idx, entry) { 283 if (entry->reg.masked || entry->clr_bits == ~0) 284 continue; 285 286 *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO; 287 *cs++ = entry->reg.addr; 288 *cs++ = CS_GPR_REG(0, 0).addr; 289 290 *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | 291 MI_LRI_LRM_CS_MMIO; 292 *cs++ = CS_GPR_REG(0, 1).addr; 293 *cs++ = entry->clr_bits; 294 *cs++ = CS_GPR_REG(0, 2).addr; 295 *cs++ = entry->set_bits; 296 297 *cs++ = MI_MATH(8); 298 *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0); 299 *cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1); 300 *cs++ = CS_ALU_INSTR_AND; 301 *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU); 302 *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0); 303 *cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2); 304 *cs++ = CS_ALU_INSTR_OR; 305 *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU); 306 307 *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO; 308 *cs++ = CS_GPR_REG(0, 0).addr; 309 *cs++ = entry->reg.addr; 310 311 xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n", 312 entry->reg.addr, entry->clr_bits, entry->set_bits); 313 } 314 315 /* reset used GPR */ 316 *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | 317 MI_LRI_LRM_CS_MMIO; 318 *cs++ = CS_GPR_REG(0, 0).addr; 319 *cs++ = 0; 320 *cs++ = CS_GPR_REG(0, 1).addr; 321 *cs++ = 0; 322 *cs++ = CS_GPR_REG(0, 2).addr; 323 *cs++ = 0; 324 } 325 326 cs = xe_lrc_emit_hwe_state_instructions(q, cs); 327 328 bb->len = cs - bb->cs; 329 330 ret = emit_job_sync(q, bb, HZ); 331 332 xe_bb_free(bb, NULL); 333 334 return ret; 335 } 336 337 int xe_gt_record_default_lrcs(struct xe_gt *gt) 338 { 339 struct xe_device *xe = gt_to_xe(gt); 340 struct xe_hw_engine *hwe; 341 enum xe_hw_engine_id id; 342 int err = 0; 343 344 for_each_hw_engine(hwe, gt, id) { 345 struct xe_exec_queue *q, *nop_q; 346 void *default_lrc; 347 348 if (gt->default_lrc[hwe->class]) 349 continue; 350 351 xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe); 352 xe_wa_process_lrc(hwe); 353 xe_hw_engine_setup_default_lrc_state(hwe); 354 xe_tuning_process_lrc(hwe); 355 356 default_lrc = drmm_kzalloc(&xe->drm, 357 xe_gt_lrc_size(gt, hwe->class), 358 GFP_KERNEL); 359 if (!default_lrc) 360 return -ENOMEM; 361 362 q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, 363 hwe, EXEC_QUEUE_FLAG_KERNEL, 0); 364 if (IS_ERR(q)) { 365 err = PTR_ERR(q); 366 xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n", 367 hwe->name, q); 368 return err; 369 } 370 371 /* Prime golden LRC with known good state */ 372 err = emit_wa_job(gt, q); 373 if (err) { 374 xe_gt_err(gt, "hwe %s: emit_wa_job failed (%pe) guc_id=%u\n", 375 hwe->name, ERR_PTR(err), q->guc->id); 376 goto put_exec_queue; 377 } 378 379 nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 380 1, hwe, EXEC_QUEUE_FLAG_KERNEL, 0); 381 if (IS_ERR(nop_q)) { 382 err = PTR_ERR(nop_q); 383 xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n", 384 hwe->name, nop_q); 385 goto put_exec_queue; 386 } 387 388 /* Switch to different LRC */ 389 err = emit_nop_job(gt, nop_q); 390 if (err) { 391 xe_gt_err(gt, "hwe %s: nop emit_nop_job failed (%pe) guc_id=%u\n", 392 hwe->name, ERR_PTR(err), nop_q->guc->id); 393 goto put_nop_q; 394 } 395 396 xe_map_memcpy_from(xe, default_lrc, 397 &q->lrc[0]->bo->vmap, 398 xe_lrc_pphwsp_offset(q->lrc[0]), 399 xe_gt_lrc_size(gt, hwe->class)); 400 401 gt->default_lrc[hwe->class] = default_lrc; 402 put_nop_q: 403 xe_exec_queue_put(nop_q); 404 put_exec_queue: 405 xe_exec_queue_put(q); 406 if (err) 407 break; 408 } 409 410 return err; 411 } 412 413 int xe_gt_init_early(struct xe_gt *gt) 414 { 415 int err; 416 417 if (IS_SRIOV_PF(gt_to_xe(gt))) { 418 err = xe_gt_sriov_pf_init_early(gt); 419 if (err) 420 return err; 421 } 422 423 if (IS_SRIOV_VF(gt_to_xe(gt))) { 424 err = xe_gt_sriov_vf_init_early(gt); 425 if (err) 426 return err; 427 } 428 429 xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt)); 430 431 err = xe_wa_gt_init(gt); 432 if (err) 433 return err; 434 435 err = xe_tuning_init(gt); 436 if (err) 437 return err; 438 439 xe_wa_process_gt_oob(gt); 440 441 xe_force_wake_init_gt(gt, gt_to_fw(gt)); 442 spin_lock_init(>->global_invl_lock); 443 444 err = xe_gt_tlb_inval_init_early(gt); 445 if (err) 446 return err; 447 448 xe_mocs_init_early(gt); 449 450 /* 451 * Only after this point can GT-specific MMIO operations 452 * (including things like communication with the GuC) 453 * be performed. 454 */ 455 xe_gt_mmio_init(gt); 456 457 err = xe_uc_init_noalloc(>->uc); 458 if (err) 459 return err; 460 461 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT); 462 if (!fw_ref.domains) 463 return -ETIMEDOUT; 464 465 xe_gt_mcr_init_early(gt); 466 xe_pat_init(gt); 467 468 return 0; 469 } 470 471 static void dump_pat_on_error(struct xe_gt *gt) 472 { 473 struct drm_printer p; 474 char prefix[32]; 475 476 snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id); 477 p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix); 478 479 xe_pat_dump(gt, &p); 480 } 481 482 static int gt_init_with_gt_forcewake(struct xe_gt *gt) 483 { 484 int err; 485 486 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT); 487 if (!fw_ref.domains) 488 return -ETIMEDOUT; 489 490 err = xe_uc_init(>->uc); 491 if (err) 492 return err; 493 494 xe_gt_topology_init(gt); 495 xe_gt_mcr_init(gt); 496 xe_gt_enable_host_l2_vram(gt); 497 xe_gt_enable_comp_1wcoh(gt); 498 499 if (xe_gt_is_main_type(gt)) { 500 err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); 501 if (err) 502 return err; 503 if (IS_SRIOV_PF(gt_to_xe(gt))) 504 xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt); 505 } 506 507 /* Enable per hw engine IRQs */ 508 xe_irq_enable_hwe(gt); 509 510 /* Rerun MCR init as we now have hw engine list */ 511 xe_gt_mcr_init(gt); 512 513 err = xe_hw_engines_init_early(gt); 514 if (err) { 515 dump_pat_on_error(gt); 516 return err; 517 } 518 519 err = xe_hw_engine_class_sysfs_init(gt); 520 if (err) 521 return err; 522 523 /* Initialize CCS mode sysfs after early initialization of HW engines */ 524 err = xe_gt_ccs_mode_sysfs_init(gt); 525 if (err) 526 return err; 527 528 /* 529 * Stash hardware-reported version. Since this register does not exist 530 * on pre-MTL platforms, reading it there will (correctly) return 0. 531 */ 532 gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID); 533 534 return 0; 535 } 536 537 static int gt_init_with_all_forcewake(struct xe_gt *gt) 538 { 539 int err; 540 541 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL); 542 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) 543 return -ETIMEDOUT; 544 545 xe_gt_mcr_set_implicit_defaults(gt); 546 xe_wa_process_gt(gt); 547 xe_tuning_process_gt(gt); 548 xe_reg_sr_apply_mmio(>->reg_sr, gt); 549 550 err = xe_gt_clock_init(gt); 551 if (err) 552 return err; 553 554 xe_mocs_init(gt); 555 err = xe_execlist_init(gt); 556 if (err) 557 return err; 558 559 err = xe_hw_engines_init(gt); 560 if (err) 561 return err; 562 563 err = xe_uc_init_post_hwconfig(>->uc); 564 if (err) 565 return err; 566 567 if (xe_gt_is_main_type(gt)) { 568 /* 569 * USM has its only SA pool to non-block behind user operations 570 */ 571 if (gt_to_xe(gt)->info.has_usm) { 572 struct xe_device *xe = gt_to_xe(gt); 573 574 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), 575 IS_DGFX(xe) ? SZ_1M : SZ_512K, 16); 576 if (IS_ERR(gt->usm.bb_pool)) 577 return PTR_ERR(gt->usm.bb_pool); 578 } 579 } 580 581 if (xe_gt_is_main_type(gt)) { 582 struct xe_tile *tile = gt_to_tile(gt); 583 584 err = xe_migrate_init(tile->migrate); 585 if (err) 586 return err; 587 } 588 589 err = xe_uc_load_hw(>->uc); 590 if (err) 591 return err; 592 593 /* Configure default CCS mode of 1 engine with all resources */ 594 if (xe_gt_ccs_mode_enabled(gt)) { 595 gt->ccs_mode = 1; 596 xe_gt_apply_ccs_mode(gt); 597 } 598 599 if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt)) 600 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt); 601 602 if (IS_SRIOV_PF(gt_to_xe(gt))) 603 xe_gt_sriov_pf_init_hw(gt); 604 605 return 0; 606 } 607 608 static void xe_gt_fini(void *arg) 609 { 610 struct xe_gt *gt = arg; 611 int i; 612 613 if (disable_work_sync(>->reset.worker)) 614 /* 615 * If gt_reset_worker was halted from executing, take care of 616 * releasing the rpm reference here. 617 */ 618 xe_pm_runtime_put(gt_to_xe(gt)); 619 620 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) 621 xe_hw_fence_irq_finish(>->fence_irq[i]); 622 623 xe_gt_disable_host_l2_vram(gt); 624 } 625 626 int xe_gt_init(struct xe_gt *gt) 627 { 628 int err; 629 int i; 630 631 INIT_WORK(>->reset.worker, gt_reset_worker); 632 633 for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) { 634 gt->ring_ops[i] = xe_ring_ops_get(gt, i); 635 xe_hw_fence_irq_init(>->fence_irq[i]); 636 } 637 638 err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt); 639 if (err) 640 return err; 641 642 err = xe_gt_sysfs_init(gt); 643 if (err) 644 return err; 645 646 err = gt_init_with_gt_forcewake(gt); 647 if (err) 648 return err; 649 650 err = xe_gt_idle_init(>->gtidle); 651 if (err) 652 return err; 653 654 err = xe_gt_freq_init(gt); 655 if (err) 656 return err; 657 658 xe_force_wake_init_engines(gt, gt_to_fw(gt)); 659 660 err = gt_init_with_all_forcewake(gt); 661 if (err) 662 return err; 663 664 xe_gt_record_user_engines(gt); 665 666 err = xe_eu_stall_init(gt); 667 if (err) 668 return err; 669 670 if (IS_SRIOV_VF(gt_to_xe(gt))) { 671 err = xe_gt_sriov_vf_init(gt); 672 if (err) 673 return err; 674 } 675 676 return 0; 677 } 678 679 /** 680 * xe_gt_mmio_init() - Initialize GT's MMIO access 681 * @gt: the GT object 682 * 683 * Initialize GT's MMIO accessor, which will be used to access registers inside 684 * this GT. 685 */ 686 void xe_gt_mmio_init(struct xe_gt *gt) 687 { 688 struct xe_tile *tile = gt_to_tile(gt); 689 struct xe_device *xe = tile_to_xe(tile); 690 691 xe_mmio_init(>->mmio, tile, tile->mmio.regs, tile->mmio.regs_size); 692 693 if (gt->info.type == XE_GT_TYPE_MEDIA) { 694 gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET; 695 gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH; 696 } else { 697 gt->mmio.adj_offset = 0; 698 gt->mmio.adj_limit = 0; 699 } 700 701 if (IS_SRIOV_VF(xe)) 702 gt->mmio.sriov_vf_gt = gt; 703 } 704 705 void xe_gt_record_user_engines(struct xe_gt *gt) 706 { 707 struct xe_hw_engine *hwe; 708 enum xe_hw_engine_id id; 709 710 gt->user_engines.mask = 0; 711 memset(gt->user_engines.instances_per_class, 0, 712 sizeof(gt->user_engines.instances_per_class)); 713 714 for_each_hw_engine(hwe, gt, id) { 715 if (xe_hw_engine_is_reserved(hwe)) 716 continue; 717 718 gt->user_engines.mask |= BIT_ULL(id); 719 gt->user_engines.instances_per_class[hwe->class]++; 720 } 721 722 xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask) 723 == gt->info.engine_mask); 724 } 725 726 static int do_gt_reset(struct xe_gt *gt) 727 { 728 int err; 729 730 if (IS_SRIOV_VF(gt_to_xe(gt))) 731 return xe_gt_sriov_vf_reset(gt); 732 733 xe_gsc_wa_14015076503(gt, true); 734 735 xe_mmio_write32(>->mmio, GDRST, GRDOM_FULL); 736 err = xe_mmio_wait32(>->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false); 737 if (err) 738 xe_gt_err(gt, "failed to clear GRDOM_FULL (%pe)\n", 739 ERR_PTR(err)); 740 741 xe_gsc_wa_14015076503(gt, false); 742 743 return err; 744 } 745 746 static int vf_gt_restart(struct xe_gt *gt) 747 { 748 int err; 749 750 err = xe_uc_sanitize_reset(>->uc); 751 if (err) 752 return err; 753 754 err = xe_uc_load_hw(>->uc); 755 if (err) 756 return err; 757 758 err = xe_uc_start(>->uc); 759 if (err) 760 return err; 761 762 return 0; 763 } 764 765 static int do_gt_restart(struct xe_gt *gt) 766 { 767 struct xe_hw_engine *hwe; 768 enum xe_hw_engine_id id; 769 int err; 770 771 if (IS_SRIOV_VF(gt_to_xe(gt))) 772 return vf_gt_restart(gt); 773 774 xe_pat_init(gt); 775 776 xe_gt_enable_host_l2_vram(gt); 777 xe_gt_enable_comp_1wcoh(gt); 778 779 xe_gt_mcr_set_implicit_defaults(gt); 780 xe_reg_sr_apply_mmio(>->reg_sr, gt); 781 782 err = xe_wopcm_init(>->uc.wopcm); 783 if (err) 784 return err; 785 786 for_each_hw_engine(hwe, gt, id) 787 xe_hw_engine_enable_ring(hwe); 788 789 err = xe_uc_sanitize_reset(>->uc); 790 if (err) 791 return err; 792 793 err = xe_uc_load_hw(>->uc); 794 if (err) 795 return err; 796 797 if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt)) 798 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt); 799 800 if (IS_SRIOV_PF(gt_to_xe(gt))) 801 xe_gt_sriov_pf_init_hw(gt); 802 803 xe_mocs_init(gt); 804 805 for_each_hw_engine(hwe, gt, id) 806 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt); 807 808 /* Get CCS mode in sync between sw/hw */ 809 xe_gt_apply_ccs_mode(gt); 810 811 err = xe_uc_start(>->uc); 812 if (err) 813 return err; 814 815 /* Restore GT freq to expected values */ 816 xe_gt_sanitize_freq(gt); 817 818 if (IS_SRIOV_PF(gt_to_xe(gt))) 819 xe_gt_sriov_pf_restart(gt); 820 821 return 0; 822 } 823 824 static void gt_reset_worker(struct work_struct *w) 825 { 826 struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker); 827 unsigned int fw_ref; 828 int err; 829 830 if (xe_device_wedged(gt_to_xe(gt))) 831 goto err_pm_put; 832 833 /* We only support GT resets with GuC submission */ 834 if (!xe_device_uc_enabled(gt_to_xe(gt))) 835 goto err_pm_put; 836 837 xe_gt_info(gt, "reset started\n"); 838 839 if (xe_fault_inject_gt_reset()) { 840 err = -ECANCELED; 841 goto err_fail; 842 } 843 844 xe_gt_sanitize(gt); 845 846 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); 847 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { 848 err = -ETIMEDOUT; 849 goto err_out; 850 } 851 852 if (IS_SRIOV_PF(gt_to_xe(gt))) 853 xe_gt_sriov_pf_stop_prepare(gt); 854 855 xe_uc_gucrc_disable(>->uc); 856 xe_uc_stop_prepare(>->uc); 857 xe_pagefault_reset(gt_to_xe(gt), gt); 858 859 xe_uc_stop(>->uc); 860 861 xe_tlb_inval_reset(>->tlb_inval); 862 863 err = do_gt_reset(gt); 864 if (err) 865 goto err_out; 866 867 err = do_gt_restart(gt); 868 if (err) 869 goto err_out; 870 871 xe_force_wake_put(gt_to_fw(gt), fw_ref); 872 873 /* Pair with get while enqueueing the work in xe_gt_reset_async() */ 874 xe_pm_runtime_put(gt_to_xe(gt)); 875 876 xe_gt_info(gt, "reset done\n"); 877 878 return; 879 880 err_out: 881 xe_force_wake_put(gt_to_fw(gt), fw_ref); 882 XE_WARN_ON(xe_uc_start(>->uc)); 883 884 err_fail: 885 xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); 886 xe_device_declare_wedged(gt_to_xe(gt)); 887 err_pm_put: 888 xe_pm_runtime_put(gt_to_xe(gt)); 889 } 890 891 void xe_gt_reset_async(struct xe_gt *gt) 892 { 893 xe_gt_info(gt, "trying reset from %ps\n", __builtin_return_address(0)); 894 895 /* Don't do a reset while one is already in flight */ 896 if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc)) 897 return; 898 899 xe_gt_info(gt, "reset queued\n"); 900 901 /* Pair with put in gt_reset_worker() if work is enqueued */ 902 xe_pm_runtime_get_noresume(gt_to_xe(gt)); 903 if (!queue_work(gt->ordered_wq, >->reset.worker)) 904 xe_pm_runtime_put(gt_to_xe(gt)); 905 } 906 907 void xe_gt_suspend_prepare(struct xe_gt *gt) 908 { 909 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL); 910 xe_uc_suspend_prepare(>->uc); 911 } 912 913 int xe_gt_suspend(struct xe_gt *gt) 914 { 915 int err; 916 917 xe_gt_dbg(gt, "suspending\n"); 918 xe_gt_sanitize(gt); 919 920 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL); 921 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) { 922 xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(-ETIMEDOUT)); 923 return -ETIMEDOUT; 924 } 925 926 err = xe_uc_suspend(>->uc); 927 if (err) { 928 xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err)); 929 return err; 930 } 931 932 xe_gt_idle_disable_pg(gt); 933 934 xe_gt_disable_host_l2_vram(gt); 935 936 xe_gt_dbg(gt, "suspended\n"); 937 938 return 0; 939 } 940 941 void xe_gt_shutdown(struct xe_gt *gt) 942 { 943 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL); 944 do_gt_reset(gt); 945 } 946 947 /** 948 * xe_gt_sanitize_freq() - Restore saved frequencies if necessary. 949 * @gt: the GT object 950 * 951 * Called after driver init/GSC load completes to restore GT frequencies if we 952 * limited them for any WAs. 953 */ 954 int xe_gt_sanitize_freq(struct xe_gt *gt) 955 { 956 int ret = 0; 957 958 if ((!xe_uc_fw_is_available(>->uc.gsc.fw) || 959 xe_uc_fw_is_loaded(>->uc.gsc.fw) || 960 xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) && 961 XE_GT_WA(gt, 22019338487)) 962 ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc); 963 964 return ret; 965 } 966 967 int xe_gt_resume(struct xe_gt *gt) 968 { 969 int err; 970 971 xe_gt_dbg(gt, "resuming\n"); 972 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL); 973 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) { 974 xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(-ETIMEDOUT)); 975 return -ETIMEDOUT; 976 } 977 978 err = do_gt_restart(gt); 979 if (err) 980 return err; 981 982 xe_gt_idle_enable_pg(gt); 983 984 xe_gt_dbg(gt, "resumed\n"); 985 986 return 0; 987 } 988 989 /** 990 * xe_gt_runtime_suspend() - GT runtime suspend 991 * @gt: the GT object 992 * 993 * Return: 0 on success, negative error code otherwise. 994 */ 995 int xe_gt_runtime_suspend(struct xe_gt *gt) 996 { 997 xe_gt_dbg(gt, "runtime suspending\n"); 998 999 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL); 1000 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) { 1001 xe_gt_err(gt, "runtime suspend failed (%pe)\n", ERR_PTR(-ETIMEDOUT)); 1002 return -ETIMEDOUT; 1003 } 1004 1005 xe_uc_runtime_suspend(>->uc); 1006 xe_gt_disable_host_l2_vram(gt); 1007 1008 xe_gt_dbg(gt, "runtime suspended\n"); 1009 1010 return 0; 1011 } 1012 1013 /** 1014 * xe_gt_runtime_resume() - GT runtime resume 1015 * @gt: the GT object 1016 * 1017 * Return: 0 on success, negative error code otherwise. 1018 */ 1019 int xe_gt_runtime_resume(struct xe_gt *gt) 1020 { 1021 xe_gt_dbg(gt, "runtime resuming\n"); 1022 1023 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FORCEWAKE_ALL); 1024 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FORCEWAKE_ALL)) { 1025 xe_gt_err(gt, "runtime resume failed (%pe)\n", ERR_PTR(-ETIMEDOUT)); 1026 return -ETIMEDOUT; 1027 } 1028 1029 xe_gt_enable_host_l2_vram(gt); 1030 xe_uc_runtime_resume(>->uc); 1031 1032 xe_gt_dbg(gt, "runtime resumed\n"); 1033 1034 return 0; 1035 } 1036 1037 struct xe_hw_engine *xe_gt_hw_engine(struct xe_gt *gt, 1038 enum xe_engine_class class, 1039 u16 instance, bool logical) 1040 { 1041 struct xe_hw_engine *hwe; 1042 enum xe_hw_engine_id id; 1043 1044 for_each_hw_engine(hwe, gt, id) 1045 if (hwe->class == class && 1046 ((!logical && hwe->instance == instance) || 1047 (logical && hwe->logical_instance == instance))) 1048 return hwe; 1049 1050 return NULL; 1051 } 1052 1053 struct xe_hw_engine *xe_gt_any_hw_engine_by_reset_domain(struct xe_gt *gt, 1054 enum xe_engine_class class) 1055 { 1056 struct xe_hw_engine *hwe; 1057 enum xe_hw_engine_id id; 1058 1059 for_each_hw_engine(hwe, gt, id) { 1060 switch (class) { 1061 case XE_ENGINE_CLASS_RENDER: 1062 case XE_ENGINE_CLASS_COMPUTE: 1063 if (hwe->class == XE_ENGINE_CLASS_RENDER || 1064 hwe->class == XE_ENGINE_CLASS_COMPUTE) 1065 return hwe; 1066 break; 1067 default: 1068 if (hwe->class == class) 1069 return hwe; 1070 } 1071 } 1072 1073 return NULL; 1074 } 1075 1076 struct xe_hw_engine *xe_gt_any_hw_engine(struct xe_gt *gt) 1077 { 1078 struct xe_hw_engine *hwe; 1079 enum xe_hw_engine_id id; 1080 1081 for_each_hw_engine(hwe, gt, id) 1082 return hwe; 1083 1084 return NULL; 1085 } 1086 1087 /** 1088 * xe_gt_declare_wedged() - Declare GT wedged 1089 * @gt: the GT object 1090 * 1091 * Wedge the GT which stops all submission, saves desired debug state, and 1092 * cleans up anything which could timeout. 1093 */ 1094 void xe_gt_declare_wedged(struct xe_gt *gt) 1095 { 1096 xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode); 1097 1098 xe_uc_declare_wedged(>->uc); 1099 xe_tlb_inval_reset(>->tlb_inval); 1100 } 1101