1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016-2019 Intel Corporation 4 */ 5 6 #include <linux/string_helpers.h> 7 8 #include "gt/intel_gt.h" 9 #include "gt/intel_gt_print.h" 10 #include "gt/intel_reset.h" 11 #include "intel_gsc_fw.h" 12 #include "intel_gsc_uc.h" 13 #include "intel_guc.h" 14 #include "intel_guc_ads.h" 15 #include "intel_guc_print.h" 16 #include "intel_guc_submission.h" 17 #include "gt/intel_rps.h" 18 #include "intel_uc.h" 19 20 #include "i915_drv.h" 21 22 static const struct intel_uc_ops uc_ops_off; 23 static const struct intel_uc_ops uc_ops_on; 24 25 static void uc_expand_default_options(struct intel_uc *uc) 26 { 27 struct drm_i915_private *i915 = uc_to_gt(uc)->i915; 28 29 if (i915->params.enable_guc != -1) 30 return; 31 32 /* Don't enable GuC/HuC on pre-Gen12 */ 33 if (GRAPHICS_VER(i915) < 12) { 34 i915->params.enable_guc = 0; 35 return; 36 } 37 38 /* Don't enable GuC/HuC on older Gen12 platforms */ 39 if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) { 40 i915->params.enable_guc = 0; 41 return; 42 } 43 44 /* Intermediate platforms are HuC authentication only */ 45 if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) { 46 i915->params.enable_guc = ENABLE_GUC_LOAD_HUC; 47 return; 48 } 49 50 /* Default: enable HuC authentication and GuC submission */ 51 i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION; 52 53 /* XEHPSDV and PVC do not use HuC */ 54 if (IS_XEHPSDV(i915) || IS_PONTEVECCHIO(i915)) 55 i915->params.enable_guc &= ~ENABLE_GUC_LOAD_HUC; 56 } 57 58 /* Reset GuC providing us with fresh state for both GuC and HuC. 59 */ 60 static int __intel_uc_reset_hw(struct intel_uc *uc) 61 { 62 struct intel_gt *gt = uc_to_gt(uc); 63 int ret; 64 u32 guc_status; 65 66 ret = i915_inject_probe_error(gt->i915, -ENXIO); 67 if (ret) 68 return ret; 69 70 ret = intel_reset_guc(gt); 71 if (ret) { 72 gt_err(gt, "Failed to reset GuC, ret = %d\n", ret); 73 return ret; 74 } 75 76 guc_status = intel_uncore_read(gt->uncore, GUC_STATUS); 77 gt_WARN(gt, !(guc_status & GS_MIA_IN_RESET), 78 "GuC status: 0x%x, MIA core expected to be in reset\n", 79 guc_status); 80 81 return ret; 82 } 83 84 static void __confirm_options(struct intel_uc *uc) 85 { 86 struct drm_i915_private *i915 = uc_to_gt(uc)->i915; 87 88 drm_dbg(&i915->drm, 89 "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n", 90 i915->params.enable_guc, 91 str_yes_no(intel_uc_wants_guc(uc)), 92 str_yes_no(intel_uc_wants_guc_submission(uc)), 93 str_yes_no(intel_uc_wants_huc(uc)), 94 str_yes_no(intel_uc_wants_guc_slpc(uc))); 95 96 if (i915->params.enable_guc == 0) { 97 GEM_BUG_ON(intel_uc_wants_guc(uc)); 98 GEM_BUG_ON(intel_uc_wants_guc_submission(uc)); 99 GEM_BUG_ON(intel_uc_wants_huc(uc)); 100 GEM_BUG_ON(intel_uc_wants_guc_slpc(uc)); 101 return; 102 } 103 104 if (!intel_uc_supports_guc(uc)) 105 drm_info(&i915->drm, 106 "Incompatible option enable_guc=%d - %s\n", 107 i915->params.enable_guc, "GuC is not supported!"); 108 109 if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC && 110 !intel_uc_supports_huc(uc)) 111 drm_info(&i915->drm, 112 "Incompatible option enable_guc=%d - %s\n", 113 i915->params.enable_guc, "HuC is not supported!"); 114 115 if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION && 116 !intel_uc_supports_guc_submission(uc)) 117 drm_info(&i915->drm, 118 "Incompatible option enable_guc=%d - %s\n", 119 i915->params.enable_guc, "GuC submission is N/A"); 120 121 if (i915->params.enable_guc & ~ENABLE_GUC_MASK) 122 drm_info(&i915->drm, 123 "Incompatible option enable_guc=%d - %s\n", 124 i915->params.enable_guc, "undocumented flag"); 125 } 126 127 void intel_uc_init_early(struct intel_uc *uc) 128 { 129 uc_expand_default_options(uc); 130 131 intel_guc_init_early(&uc->guc); 132 intel_huc_init_early(&uc->huc); 133 intel_gsc_uc_init_early(&uc->gsc); 134 135 __confirm_options(uc); 136 137 if (intel_uc_wants_guc(uc)) 138 uc->ops = &uc_ops_on; 139 else 140 uc->ops = &uc_ops_off; 141 } 142 143 void intel_uc_init_late(struct intel_uc *uc) 144 { 145 intel_guc_init_late(&uc->guc); 146 } 147 148 void intel_uc_driver_late_release(struct intel_uc *uc) 149 { 150 } 151 152 /** 153 * intel_uc_init_mmio - setup uC MMIO access 154 * @uc: the intel_uc structure 155 * 156 * Setup minimal state necessary for MMIO accesses later in the 157 * initialization sequence. 158 */ 159 void intel_uc_init_mmio(struct intel_uc *uc) 160 { 161 intel_guc_init_send_regs(&uc->guc); 162 } 163 164 static void __uc_capture_load_err_log(struct intel_uc *uc) 165 { 166 struct intel_guc *guc = &uc->guc; 167 168 if (guc->log.vma && !uc->load_err_log) 169 uc->load_err_log = i915_gem_object_get(guc->log.vma->obj); 170 } 171 172 static void __uc_free_load_err_log(struct intel_uc *uc) 173 { 174 struct drm_i915_gem_object *log = fetch_and_zero(&uc->load_err_log); 175 176 if (log) 177 i915_gem_object_put(log); 178 } 179 180 void intel_uc_driver_remove(struct intel_uc *uc) 181 { 182 intel_uc_fini_hw(uc); 183 intel_uc_fini(uc); 184 __uc_free_load_err_log(uc); 185 } 186 187 /* 188 * Events triggered while CT buffers are disabled are logged in the SCRATCH_15 189 * register using the same bits used in the CT message payload. Since our 190 * communication channel with guc is turned off at this point, we can save the 191 * message and handle it after we turn it back on. 192 */ 193 static void guc_clear_mmio_msg(struct intel_guc *guc) 194 { 195 intel_uncore_write(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15), 0); 196 } 197 198 static void guc_get_mmio_msg(struct intel_guc *guc) 199 { 200 u32 val; 201 202 spin_lock_irq(&guc->irq_lock); 203 204 val = intel_uncore_read(guc_to_gt(guc)->uncore, SOFT_SCRATCH(15)); 205 guc->mmio_msg |= val & guc->msg_enabled_mask; 206 207 /* 208 * clear all events, including the ones we're not currently servicing, 209 * to make sure we don't try to process a stale message if we enable 210 * handling of more events later. 211 */ 212 guc_clear_mmio_msg(guc); 213 214 spin_unlock_irq(&guc->irq_lock); 215 } 216 217 static void guc_handle_mmio_msg(struct intel_guc *guc) 218 { 219 /* we need communication to be enabled to reply to GuC */ 220 GEM_BUG_ON(!intel_guc_ct_enabled(&guc->ct)); 221 222 spin_lock_irq(&guc->irq_lock); 223 if (guc->mmio_msg) { 224 intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); 225 guc->mmio_msg = 0; 226 } 227 spin_unlock_irq(&guc->irq_lock); 228 } 229 230 static int guc_enable_communication(struct intel_guc *guc) 231 { 232 struct intel_gt *gt = guc_to_gt(guc); 233 struct drm_i915_private *i915 = gt->i915; 234 int ret; 235 236 GEM_BUG_ON(intel_guc_ct_enabled(&guc->ct)); 237 238 ret = i915_inject_probe_error(i915, -ENXIO); 239 if (ret) 240 return ret; 241 242 ret = intel_guc_ct_enable(&guc->ct); 243 if (ret) 244 return ret; 245 246 /* check for mmio messages received before/during the CT enable */ 247 guc_get_mmio_msg(guc); 248 guc_handle_mmio_msg(guc); 249 250 intel_guc_enable_interrupts(guc); 251 252 /* check for CT messages received before we enabled interrupts */ 253 spin_lock_irq(gt->irq_lock); 254 intel_guc_ct_event_handler(&guc->ct); 255 spin_unlock_irq(gt->irq_lock); 256 257 guc_dbg(guc, "communication enabled\n"); 258 259 return 0; 260 } 261 262 static void guc_disable_communication(struct intel_guc *guc) 263 { 264 /* 265 * Events generated during or after CT disable are logged by guc in 266 * via mmio. Make sure the register is clear before disabling CT since 267 * all events we cared about have already been processed via CT. 268 */ 269 guc_clear_mmio_msg(guc); 270 271 intel_guc_disable_interrupts(guc); 272 273 intel_guc_ct_disable(&guc->ct); 274 275 /* 276 * Check for messages received during/after the CT disable. We do not 277 * expect any messages to have arrived via CT between the interrupt 278 * disable and the CT disable because GuC should've been idle until we 279 * triggered the CT disable protocol. 280 */ 281 guc_get_mmio_msg(guc); 282 283 guc_dbg(guc, "communication disabled\n"); 284 } 285 286 static void __uc_fetch_firmwares(struct intel_uc *uc) 287 { 288 struct intel_gt *gt = uc_to_gt(uc); 289 int err; 290 291 GEM_BUG_ON(!intel_uc_wants_guc(uc)); 292 293 err = intel_uc_fw_fetch(&uc->guc.fw); 294 if (err) { 295 /* Make sure we transition out of transient "SELECTED" state */ 296 if (intel_uc_wants_huc(uc)) { 297 gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling HuC\n", ERR_PTR(err)); 298 intel_uc_fw_change_status(&uc->huc.fw, 299 INTEL_UC_FIRMWARE_ERROR); 300 } 301 302 if (intel_uc_wants_gsc_uc(uc)) { 303 gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling GSC\n", ERR_PTR(err)); 304 intel_uc_fw_change_status(&uc->gsc.fw, 305 INTEL_UC_FIRMWARE_ERROR); 306 } 307 308 return; 309 } 310 311 if (intel_uc_wants_huc(uc)) 312 intel_uc_fw_fetch(&uc->huc.fw); 313 314 if (intel_uc_wants_gsc_uc(uc)) 315 intel_uc_fw_fetch(&uc->gsc.fw); 316 } 317 318 static void __uc_cleanup_firmwares(struct intel_uc *uc) 319 { 320 intel_uc_fw_cleanup_fetch(&uc->gsc.fw); 321 intel_uc_fw_cleanup_fetch(&uc->huc.fw); 322 intel_uc_fw_cleanup_fetch(&uc->guc.fw); 323 } 324 325 static int __uc_init(struct intel_uc *uc) 326 { 327 struct intel_guc *guc = &uc->guc; 328 struct intel_huc *huc = &uc->huc; 329 int ret; 330 331 GEM_BUG_ON(!intel_uc_wants_guc(uc)); 332 333 if (!intel_uc_uses_guc(uc)) 334 return 0; 335 336 if (i915_inject_probe_failure(uc_to_gt(uc)->i915)) 337 return -ENOMEM; 338 339 ret = intel_guc_init(guc); 340 if (ret) 341 return ret; 342 343 if (intel_uc_uses_huc(uc)) 344 intel_huc_init(huc); 345 346 if (intel_uc_uses_gsc_uc(uc)) 347 intel_gsc_uc_init(&uc->gsc); 348 349 return 0; 350 } 351 352 static void __uc_fini(struct intel_uc *uc) 353 { 354 intel_gsc_uc_fini(&uc->gsc); 355 intel_huc_fini(&uc->huc); 356 intel_guc_fini(&uc->guc); 357 } 358 359 static int __uc_sanitize(struct intel_uc *uc) 360 { 361 struct intel_guc *guc = &uc->guc; 362 struct intel_huc *huc = &uc->huc; 363 364 GEM_BUG_ON(!intel_uc_supports_guc(uc)); 365 366 intel_huc_sanitize(huc); 367 intel_guc_sanitize(guc); 368 369 return __intel_uc_reset_hw(uc); 370 } 371 372 /* Initialize and verify the uC regs related to uC positioning in WOPCM */ 373 static int uc_init_wopcm(struct intel_uc *uc) 374 { 375 struct intel_gt *gt = uc_to_gt(uc); 376 struct intel_uncore *uncore = gt->uncore; 377 u32 base = intel_wopcm_guc_base(>->wopcm); 378 u32 size = intel_wopcm_guc_size(>->wopcm); 379 u32 huc_agent = intel_uc_uses_huc(uc) ? HUC_LOADING_AGENT_GUC : 0; 380 u32 mask; 381 int err; 382 383 if (unlikely(!base || !size)) { 384 gt_probe_error(gt, "Unsuccessful WOPCM partitioning\n"); 385 return -E2BIG; 386 } 387 388 GEM_BUG_ON(!intel_uc_supports_guc(uc)); 389 GEM_BUG_ON(!(base & GUC_WOPCM_OFFSET_MASK)); 390 GEM_BUG_ON(base & ~GUC_WOPCM_OFFSET_MASK); 391 GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); 392 GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); 393 394 err = i915_inject_probe_error(gt->i915, -ENXIO); 395 if (err) 396 return err; 397 398 mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; 399 err = intel_uncore_write_and_verify(uncore, GUC_WOPCM_SIZE, size, mask, 400 size | GUC_WOPCM_SIZE_LOCKED); 401 if (err) 402 goto err_out; 403 404 mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; 405 err = intel_uncore_write_and_verify(uncore, DMA_GUC_WOPCM_OFFSET, 406 base | huc_agent, mask, 407 base | huc_agent | 408 GUC_WOPCM_OFFSET_VALID); 409 if (err) 410 goto err_out; 411 412 return 0; 413 414 err_out: 415 gt_probe_error(gt, "Failed to init uC WOPCM registers!\n"); 416 gt_probe_error(gt, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET", 417 i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET), 418 intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET)); 419 gt_probe_error(gt, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE", 420 i915_mmio_reg_offset(GUC_WOPCM_SIZE), 421 intel_uncore_read(uncore, GUC_WOPCM_SIZE)); 422 423 return err; 424 } 425 426 static bool uc_is_wopcm_locked(struct intel_uc *uc) 427 { 428 struct intel_gt *gt = uc_to_gt(uc); 429 struct intel_uncore *uncore = gt->uncore; 430 431 return (intel_uncore_read(uncore, GUC_WOPCM_SIZE) & GUC_WOPCM_SIZE_LOCKED) || 432 (intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET) & GUC_WOPCM_OFFSET_VALID); 433 } 434 435 static int __uc_check_hw(struct intel_uc *uc) 436 { 437 if (!intel_uc_supports_guc(uc)) 438 return 0; 439 440 /* 441 * We can silently continue without GuC only if it was never enabled 442 * before on this system after reboot, otherwise we risk GPU hangs. 443 * To check if GuC was loaded before we look at WOPCM registers. 444 */ 445 if (uc_is_wopcm_locked(uc)) 446 return -EIO; 447 448 return 0; 449 } 450 451 static void print_fw_ver(struct intel_gt *gt, struct intel_uc_fw *fw) 452 { 453 gt_info(gt, "%s firmware %s version %u.%u.%u\n", 454 intel_uc_fw_type_repr(fw->type), fw->file_selected.path, 455 fw->file_selected.ver.major, 456 fw->file_selected.ver.minor, 457 fw->file_selected.ver.patch); 458 } 459 460 static int __uc_init_hw(struct intel_uc *uc) 461 { 462 struct intel_gt *gt = uc_to_gt(uc); 463 struct drm_i915_private *i915 = gt->i915; 464 struct intel_guc *guc = &uc->guc; 465 struct intel_huc *huc = &uc->huc; 466 int ret, attempts; 467 468 GEM_BUG_ON(!intel_uc_supports_guc(uc)); 469 GEM_BUG_ON(!intel_uc_wants_guc(uc)); 470 471 print_fw_ver(gt, &guc->fw); 472 473 if (intel_uc_uses_huc(uc)) 474 print_fw_ver(gt, &huc->fw); 475 476 if (!intel_uc_fw_is_loadable(&guc->fw)) { 477 ret = __uc_check_hw(uc) || 478 intel_uc_fw_is_overridden(&guc->fw) || 479 intel_uc_wants_guc_submission(uc) ? 480 intel_uc_fw_status_to_error(guc->fw.status) : 0; 481 goto err_out; 482 } 483 484 ret = uc_init_wopcm(uc); 485 if (ret) 486 goto err_out; 487 488 intel_guc_reset_interrupts(guc); 489 490 /* WaEnableuKernelHeaderValidFix:skl */ 491 /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */ 492 if (GRAPHICS_VER(i915) == 9) 493 attempts = 3; 494 else 495 attempts = 1; 496 497 intel_rps_raise_unslice(&uc_to_gt(uc)->rps); 498 499 while (attempts--) { 500 /* 501 * Always reset the GuC just before (re)loading, so 502 * that the state and timing are fairly predictable 503 */ 504 ret = __uc_sanitize(uc); 505 if (ret) 506 goto err_out; 507 508 intel_huc_fw_upload(huc); 509 intel_guc_ads_reset(guc); 510 intel_guc_write_params(guc); 511 ret = intel_guc_fw_upload(guc); 512 if (ret == 0) 513 break; 514 515 gt_dbg(gt, "GuC fw load failed (%pe) will reset and retry %d more time(s)\n", 516 ERR_PTR(ret), attempts); 517 } 518 519 /* Did we succeded or run out of retries? */ 520 if (ret) 521 goto err_log_capture; 522 523 ret = guc_enable_communication(guc); 524 if (ret) 525 goto err_log_capture; 526 527 /* 528 * GSC-loaded HuC is authenticated by the GSC, so we don't need to 529 * trigger the auth here. However, given that the HuC loaded this way 530 * survive GT reset, we still need to update our SW bookkeeping to make 531 * sure it reflects the correct HW status. 532 */ 533 if (intel_huc_is_loaded_by_gsc(huc)) 534 intel_huc_update_auth_status(huc); 535 else 536 intel_huc_auth(huc); 537 538 if (intel_uc_uses_guc_submission(uc)) 539 intel_guc_submission_enable(guc); 540 541 if (intel_uc_uses_guc_slpc(uc)) { 542 ret = intel_guc_slpc_enable(&guc->slpc); 543 if (ret) 544 goto err_submission; 545 } else { 546 /* Restore GT back to RPn for non-SLPC path */ 547 intel_rps_lower_unslice(&uc_to_gt(uc)->rps); 548 } 549 550 intel_gsc_uc_load_start(&uc->gsc); 551 552 gt_info(gt, "GuC submission %s\n", 553 str_enabled_disabled(intel_uc_uses_guc_submission(uc))); 554 gt_info(gt, "GuC SLPC %s\n", 555 str_enabled_disabled(intel_uc_uses_guc_slpc(uc))); 556 557 return 0; 558 559 /* 560 * We've failed to load the firmware :( 561 */ 562 err_submission: 563 intel_guc_submission_disable(guc); 564 err_log_capture: 565 __uc_capture_load_err_log(uc); 566 err_out: 567 /* Return GT back to RPn */ 568 intel_rps_lower_unslice(&uc_to_gt(uc)->rps); 569 570 __uc_sanitize(uc); 571 572 if (!ret) { 573 gt_notice(gt, "GuC is uninitialized\n"); 574 /* We want to run without GuC submission */ 575 return 0; 576 } 577 578 gt_probe_error(gt, "GuC initialization failed %pe\n", ERR_PTR(ret)); 579 580 /* We want to keep KMS alive */ 581 return -EIO; 582 } 583 584 static void __uc_fini_hw(struct intel_uc *uc) 585 { 586 struct intel_guc *guc = &uc->guc; 587 588 if (!intel_guc_is_fw_running(guc)) 589 return; 590 591 if (intel_uc_uses_guc_submission(uc)) 592 intel_guc_submission_disable(guc); 593 594 __uc_sanitize(uc); 595 } 596 597 /** 598 * intel_uc_reset_prepare - Prepare for reset 599 * @uc: the intel_uc structure 600 * 601 * Preparing for full gpu reset. 602 */ 603 void intel_uc_reset_prepare(struct intel_uc *uc) 604 { 605 struct intel_guc *guc = &uc->guc; 606 607 uc->reset_in_progress = true; 608 609 /* Nothing to do if GuC isn't supported */ 610 if (!intel_uc_supports_guc(uc)) 611 return; 612 613 /* Firmware expected to be running when this function is called */ 614 if (!intel_guc_is_ready(guc)) 615 goto sanitize; 616 617 if (intel_uc_uses_guc_submission(uc)) 618 intel_guc_submission_reset_prepare(guc); 619 620 sanitize: 621 __uc_sanitize(uc); 622 } 623 624 void intel_uc_reset(struct intel_uc *uc, intel_engine_mask_t stalled) 625 { 626 struct intel_guc *guc = &uc->guc; 627 628 /* Firmware can not be running when this function is called */ 629 if (intel_uc_uses_guc_submission(uc)) 630 intel_guc_submission_reset(guc, stalled); 631 } 632 633 void intel_uc_reset_finish(struct intel_uc *uc) 634 { 635 struct intel_guc *guc = &uc->guc; 636 637 uc->reset_in_progress = false; 638 639 /* Firmware expected to be running when this function is called */ 640 if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc)) 641 intel_guc_submission_reset_finish(guc); 642 } 643 644 void intel_uc_cancel_requests(struct intel_uc *uc) 645 { 646 struct intel_guc *guc = &uc->guc; 647 648 /* Firmware can not be running when this function is called */ 649 if (intel_uc_uses_guc_submission(uc)) 650 intel_guc_submission_cancel_requests(guc); 651 } 652 653 void intel_uc_runtime_suspend(struct intel_uc *uc) 654 { 655 struct intel_guc *guc = &uc->guc; 656 657 if (!intel_guc_is_ready(guc)) { 658 guc->interrupts.enabled = false; 659 return; 660 } 661 662 /* 663 * Wait for any outstanding CTB before tearing down communication /w the 664 * GuC. 665 */ 666 #define OUTSTANDING_CTB_TIMEOUT_PERIOD (HZ / 5) 667 intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h, 668 false, OUTSTANDING_CTB_TIMEOUT_PERIOD); 669 GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h)); 670 671 guc_disable_communication(guc); 672 } 673 674 void intel_uc_suspend(struct intel_uc *uc) 675 { 676 struct intel_guc *guc = &uc->guc; 677 intel_wakeref_t wakeref; 678 int err; 679 680 /* flush the GSC worker */ 681 intel_gsc_uc_suspend(&uc->gsc); 682 683 if (!intel_guc_is_ready(guc)) { 684 guc->interrupts.enabled = false; 685 return; 686 } 687 688 with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) { 689 err = intel_guc_suspend(guc); 690 if (err) 691 guc_dbg(guc, "Failed to suspend, %pe", ERR_PTR(err)); 692 } 693 } 694 695 static int __uc_resume(struct intel_uc *uc, bool enable_communication) 696 { 697 struct intel_guc *guc = &uc->guc; 698 struct intel_gt *gt = guc_to_gt(guc); 699 int err; 700 701 if (!intel_guc_is_fw_running(guc)) 702 return 0; 703 704 /* Make sure we enable communication if and only if it's disabled */ 705 GEM_BUG_ON(enable_communication == intel_guc_ct_enabled(&guc->ct)); 706 707 if (enable_communication) 708 guc_enable_communication(guc); 709 710 /* If we are only resuming GuC communication but not reloading 711 * GuC, we need to ensure the ARAT timer interrupt is enabled 712 * again. In case of GuC reload, it is enabled during SLPC enable. 713 */ 714 if (enable_communication && intel_uc_uses_guc_slpc(uc)) 715 intel_guc_pm_intrmsk_enable(gt); 716 717 err = intel_guc_resume(guc); 718 if (err) { 719 guc_dbg(guc, "Failed to resume, %pe", ERR_PTR(err)); 720 return err; 721 } 722 723 return 0; 724 } 725 726 int intel_uc_resume(struct intel_uc *uc) 727 { 728 /* 729 * When coming out of S3/S4 we sanitize and re-init the HW, so 730 * communication is already re-enabled at this point. 731 */ 732 return __uc_resume(uc, false); 733 } 734 735 int intel_uc_runtime_resume(struct intel_uc *uc) 736 { 737 /* 738 * During runtime resume we don't sanitize, so we need to re-init 739 * communication as well. 740 */ 741 return __uc_resume(uc, true); 742 } 743 744 static const struct intel_uc_ops uc_ops_off = { 745 .init_hw = __uc_check_hw, 746 .fini = __uc_fini, /* to clean-up the init_early initialization */ 747 }; 748 749 static const struct intel_uc_ops uc_ops_on = { 750 .sanitize = __uc_sanitize, 751 752 .init_fw = __uc_fetch_firmwares, 753 .fini_fw = __uc_cleanup_firmwares, 754 755 .init = __uc_init, 756 .fini = __uc_fini, 757 758 .init_hw = __uc_init_hw, 759 .fini_hw = __uc_fini_hw, 760 }; 761