1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_guc.h" 7 8 #include <drm/drm_managed.h> 9 10 #include "abi/guc_actions_abi.h" 11 #include "abi/guc_errors_abi.h" 12 #include "generated/xe_wa_oob.h" 13 #include "regs/xe_gt_regs.h" 14 #include "regs/xe_guc_regs.h" 15 #include "xe_bo.h" 16 #include "xe_device.h" 17 #include "xe_force_wake.h" 18 #include "xe_gt.h" 19 #include "xe_guc_ads.h" 20 #include "xe_guc_ct.h" 21 #include "xe_guc_hwconfig.h" 22 #include "xe_guc_log.h" 23 #include "xe_guc_pc.h" 24 #include "xe_guc_submit.h" 25 #include "xe_mmio.h" 26 #include "xe_platform_types.h" 27 #include "xe_uc.h" 28 #include "xe_uc_fw.h" 29 #include "xe_wa.h" 30 #include "xe_wopcm.h" 31 32 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */ 33 #define GUC_GGTT_TOP 0xFEE00000 34 static u32 guc_bo_ggtt_addr(struct xe_guc *guc, 35 struct xe_bo *bo) 36 { 37 struct xe_device *xe = guc_to_xe(guc); 38 u32 addr = xe_bo_ggtt_addr(bo); 39 40 xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc))); 41 xe_assert(xe, addr < GUC_GGTT_TOP); 42 xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr); 43 44 return addr; 45 } 46 47 static u32 guc_ctl_debug_flags(struct xe_guc *guc) 48 { 49 u32 level = xe_guc_log_get_level(&guc->log); 50 u32 flags = 0; 51 52 if (!GUC_LOG_LEVEL_IS_VERBOSE(level)) 53 flags |= GUC_LOG_DISABLED; 54 else 55 flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) << 56 GUC_LOG_VERBOSITY_SHIFT; 57 58 return flags; 59 } 60 61 static u32 guc_ctl_feature_flags(struct xe_guc *guc) 62 { 63 return GUC_CTL_ENABLE_SLPC; 64 } 65 66 static u32 guc_ctl_log_params_flags(struct xe_guc *guc) 67 { 68 u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT; 69 u32 flags; 70 71 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0) 72 #define LOG_UNIT SZ_1M 73 #define LOG_FLAG GUC_LOG_LOG_ALLOC_UNITS 74 #else 75 #define LOG_UNIT SZ_4K 76 #define LOG_FLAG 0 77 #endif 78 79 #if (((CAPTURE_BUFFER_SIZE) % SZ_1M) == 0) 80 #define CAPTURE_UNIT SZ_1M 81 #define CAPTURE_FLAG GUC_LOG_CAPTURE_ALLOC_UNITS 82 #else 83 #define CAPTURE_UNIT SZ_4K 84 #define CAPTURE_FLAG 0 85 #endif 86 87 BUILD_BUG_ON(!CRASH_BUFFER_SIZE); 88 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, LOG_UNIT)); 89 BUILD_BUG_ON(!DEBUG_BUFFER_SIZE); 90 BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, LOG_UNIT)); 91 BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE); 92 BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT)); 93 94 BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) > 95 (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT)); 96 BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) > 97 (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT)); 98 BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) > 99 (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT)); 100 101 flags = GUC_LOG_VALID | 102 GUC_LOG_NOTIFY_ON_HALF_FULL | 103 CAPTURE_FLAG | 104 LOG_FLAG | 105 ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) | 106 ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) | 107 ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) << 108 GUC_LOG_CAPTURE_SHIFT) | 109 (offset << GUC_LOG_BUF_ADDR_SHIFT); 110 111 #undef LOG_UNIT 112 #undef LOG_FLAG 113 #undef CAPTURE_UNIT 114 #undef CAPTURE_FLAG 115 116 return flags; 117 } 118 119 static u32 guc_ctl_ads_flags(struct xe_guc *guc) 120 { 121 u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT; 122 u32 flags = ads << GUC_ADS_ADDR_SHIFT; 123 124 return flags; 125 } 126 127 static u32 guc_ctl_wa_flags(struct xe_guc *guc) 128 { 129 struct xe_device *xe = guc_to_xe(guc); 130 struct xe_gt *gt = guc_to_gt(guc); 131 u32 flags = 0; 132 133 if (XE_WA(gt, 22012773006)) 134 flags |= GUC_WA_POLLCS; 135 136 if (XE_WA(gt, 16011759253)) 137 flags |= GUC_WA_GAM_CREDITS; 138 139 if (XE_WA(gt, 14014475959)) 140 flags |= GUC_WA_HOLD_CCS_SWITCHOUT; 141 142 if (XE_WA(gt, 22011391025) || XE_WA(gt, 14012197797)) 143 flags |= GUC_WA_DUAL_QUEUE; 144 145 /* 146 * Wa_22011802037: FIXME - there's more to be done than simply setting 147 * this flag: make sure each CS is stopped when preparing for GT reset 148 * and wait for pending MI_FW. 149 */ 150 if (GRAPHICS_VERx100(xe) < 1270) 151 flags |= GUC_WA_PRE_PARSER; 152 153 if (XE_WA(gt, 16011777198)) 154 flags |= GUC_WA_RCS_RESET_BEFORE_RC6; 155 156 if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685)) 157 flags |= GUC_WA_CONTEXT_ISOLATION; 158 159 if ((XE_WA(gt, 16015675438) || XE_WA(gt, 18020744125)) && 160 !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER)) 161 flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST; 162 163 if (XE_WA(gt, 1509372804)) 164 flags |= GUC_WA_RENDER_RST_RC6_EXIT; 165 166 return flags; 167 } 168 169 static u32 guc_ctl_devid(struct xe_guc *guc) 170 { 171 struct xe_device *xe = guc_to_xe(guc); 172 173 return (((u32)xe->info.devid) << 16) | xe->info.revid; 174 } 175 176 static void guc_init_params(struct xe_guc *guc) 177 { 178 struct xe_device *xe = guc_to_xe(guc); 179 u32 *params = guc->params; 180 int i; 181 182 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); 183 BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT); 184 185 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); 186 params[GUC_CTL_FEATURE] = 0; 187 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); 188 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); 189 params[GUC_CTL_WA] = 0; 190 params[GUC_CTL_DEVID] = guc_ctl_devid(guc); 191 192 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) 193 drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]); 194 } 195 196 static void guc_init_params_post_hwconfig(struct xe_guc *guc) 197 { 198 struct xe_device *xe = guc_to_xe(guc); 199 u32 *params = guc->params; 200 int i; 201 202 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); 203 BUILD_BUG_ON(GUC_CTL_MAX_DWORDS + 2 != SOFT_SCRATCH_COUNT); 204 205 params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc); 206 params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc); 207 params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc); 208 params[GUC_CTL_ADS] = guc_ctl_ads_flags(guc); 209 params[GUC_CTL_WA] = guc_ctl_wa_flags(guc); 210 params[GUC_CTL_DEVID] = guc_ctl_devid(guc); 211 212 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) 213 drm_dbg(&xe->drm, "GuC param[%2d] = 0x%08x\n", i, params[i]); 214 } 215 216 /* 217 * Initialize the GuC parameter block before starting the firmware 218 * transfer. These parameters are read by the firmware on startup 219 * and cannot be changed thereafter. 220 */ 221 static void guc_write_params(struct xe_guc *guc) 222 { 223 struct xe_gt *gt = guc_to_gt(guc); 224 int i; 225 226 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 227 228 xe_mmio_write32(gt, SOFT_SCRATCH(0), 0); 229 230 for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) 231 xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]); 232 } 233 234 static void guc_fini(struct drm_device *drm, void *arg) 235 { 236 struct xe_guc *guc = arg; 237 238 xe_force_wake_get(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL); 239 xe_guc_pc_fini(&guc->pc); 240 xe_uc_fini_hw(&guc_to_gt(guc)->uc); 241 xe_force_wake_put(gt_to_fw(guc_to_gt(guc)), XE_FORCEWAKE_ALL); 242 } 243 244 int xe_guc_init(struct xe_guc *guc) 245 { 246 struct xe_device *xe = guc_to_xe(guc); 247 struct xe_gt *gt = guc_to_gt(guc); 248 int ret; 249 250 guc->fw.type = XE_UC_FW_TYPE_GUC; 251 ret = xe_uc_fw_init(&guc->fw); 252 if (ret) 253 goto out; 254 255 if (!xe_uc_fw_is_enabled(&guc->fw)) 256 return 0; 257 258 ret = xe_guc_log_init(&guc->log); 259 if (ret) 260 goto out; 261 262 ret = xe_guc_ads_init(&guc->ads); 263 if (ret) 264 goto out; 265 266 ret = xe_guc_ct_init(&guc->ct); 267 if (ret) 268 goto out; 269 270 ret = xe_guc_pc_init(&guc->pc); 271 if (ret) 272 goto out; 273 274 ret = drmm_add_action_or_reset(>_to_xe(gt)->drm, guc_fini, guc); 275 if (ret) 276 goto out; 277 278 guc_init_params(guc); 279 280 if (xe_gt_is_media_type(gt)) 281 guc->notify_reg = MED_GUC_HOST_INTERRUPT; 282 else 283 guc->notify_reg = GUC_HOST_INTERRUPT; 284 285 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); 286 287 return 0; 288 289 out: 290 drm_err(&xe->drm, "GuC init failed with %d", ret); 291 return ret; 292 } 293 294 /** 295 * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load 296 * @guc: The GuC object 297 * 298 * Return: 0 on success, negative error code on error. 299 */ 300 int xe_guc_init_post_hwconfig(struct xe_guc *guc) 301 { 302 guc_init_params_post_hwconfig(guc); 303 304 return xe_guc_ads_init_post_hwconfig(&guc->ads); 305 } 306 307 int xe_guc_post_load_init(struct xe_guc *guc) 308 { 309 xe_guc_ads_populate_post_load(&guc->ads); 310 guc->submission_state.enabled = true; 311 312 return 0; 313 } 314 315 int xe_guc_reset(struct xe_guc *guc) 316 { 317 struct xe_device *xe = guc_to_xe(guc); 318 struct xe_gt *gt = guc_to_gt(guc); 319 u32 guc_status, gdrst; 320 int ret; 321 322 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 323 324 xe_mmio_write32(gt, GDRST, GRDOM_GUC); 325 326 ret = xe_mmio_wait32(gt, GDRST, GRDOM_GUC, 0, 5000, &gdrst, false); 327 if (ret) { 328 drm_err(&xe->drm, "GuC reset timed out, GDRST=0x%8x\n", 329 gdrst); 330 goto err_out; 331 } 332 333 guc_status = xe_mmio_read32(gt, GUC_STATUS); 334 if (!(guc_status & GS_MIA_IN_RESET)) { 335 drm_err(&xe->drm, 336 "GuC status: 0x%x, MIA core expected to be in reset\n", 337 guc_status); 338 ret = -EIO; 339 goto err_out; 340 } 341 342 return 0; 343 344 err_out: 345 346 return ret; 347 } 348 349 static void guc_prepare_xfer(struct xe_guc *guc) 350 { 351 struct xe_gt *gt = guc_to_gt(guc); 352 struct xe_device *xe = guc_to_xe(guc); 353 u32 shim_flags = GUC_ENABLE_READ_CACHE_LOGIC | 354 GUC_ENABLE_READ_CACHE_FOR_SRAM_DATA | 355 GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | 356 GUC_ENABLE_MIA_CLOCK_GATING; 357 358 if (GRAPHICS_VERx100(xe) < 1250) 359 shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES | 360 GUC_ENABLE_MIA_CACHING; 361 362 if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) 363 shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index); 364 365 /* Must program this register before loading the ucode with DMA */ 366 xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags); 367 368 xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE); 369 } 370 371 /* 372 * Supporting MMIO & in memory RSA 373 */ 374 static int guc_xfer_rsa(struct xe_guc *guc) 375 { 376 struct xe_gt *gt = guc_to_gt(guc); 377 u32 rsa[UOS_RSA_SCRATCH_COUNT]; 378 size_t copied; 379 int i; 380 381 if (guc->fw.rsa_size > 256) { 382 u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) + 383 xe_uc_fw_rsa_offset(&guc->fw); 384 xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr); 385 return 0; 386 } 387 388 copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa)); 389 if (copied < sizeof(rsa)) 390 return -ENOMEM; 391 392 for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) 393 xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]); 394 395 return 0; 396 } 397 398 static int guc_wait_ucode(struct xe_guc *guc) 399 { 400 struct xe_device *xe = guc_to_xe(guc); 401 u32 status; 402 int ret; 403 404 /* 405 * Wait for the GuC to start up. 406 * NB: Docs recommend not using the interrupt for completion. 407 * Measurements indicate this should take no more than 20ms 408 * (assuming the GT clock is at maximum frequency). So, a 409 * timeout here indicates that the GuC has failed and is unusable. 410 * (Higher levels of the driver may decide to reset the GuC and 411 * attempt the ucode load again if this happens.) 412 * 413 * FIXME: There is a known (but exceedingly unlikely) race condition 414 * where the asynchronous frequency management code could reduce 415 * the GT clock while a GuC reload is in progress (during a full 416 * GT reset). A fix is in progress but there are complex locking 417 * issues to be resolved. In the meantime bump the timeout to 418 * 200ms. Even at slowest clock, this should be sufficient. And 419 * in the working case, a larger timeout makes no difference. 420 */ 421 ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS, GS_UKERNEL_MASK, 422 FIELD_PREP(GS_UKERNEL_MASK, XE_GUC_LOAD_STATUS_READY), 423 200000, &status, false); 424 425 if (ret) { 426 struct drm_device *drm = &xe->drm; 427 struct drm_printer p = drm_info_printer(drm->dev); 428 429 drm_info(drm, "GuC load failed: status = 0x%08X\n", status); 430 drm_info(drm, "GuC load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n", 431 REG_FIELD_GET(GS_MIA_IN_RESET, status), 432 REG_FIELD_GET(GS_BOOTROM_MASK, status), 433 REG_FIELD_GET(GS_UKERNEL_MASK, status), 434 REG_FIELD_GET(GS_MIA_MASK, status), 435 REG_FIELD_GET(GS_AUTH_STATUS_MASK, status)); 436 437 if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { 438 drm_info(drm, "GuC firmware signature verification failed\n"); 439 ret = -ENOEXEC; 440 } 441 442 if (REG_FIELD_GET(GS_UKERNEL_MASK, status) == 443 XE_GUC_LOAD_STATUS_EXCEPTION) { 444 drm_info(drm, "GuC firmware exception. EIP: %#x\n", 445 xe_mmio_read32(guc_to_gt(guc), 446 SOFT_SCRATCH(13))); 447 ret = -ENXIO; 448 } 449 450 xe_guc_log_print(&guc->log, &p); 451 } else { 452 drm_dbg(&xe->drm, "GuC successfully loaded"); 453 } 454 455 return ret; 456 } 457 458 static int __xe_guc_upload(struct xe_guc *guc) 459 { 460 int ret; 461 462 guc_write_params(guc); 463 guc_prepare_xfer(guc); 464 465 /* 466 * Note that GuC needs the CSS header plus uKernel code to be copied 467 * by the DMA engine in one operation, whereas the RSA signature is 468 * loaded separately, either by copying it to the UOS_RSA_SCRATCH 469 * register (if key size <= 256) or through a ggtt-pinned vma (if key 470 * size > 256). The RSA size and therefore the way we provide it to the 471 * HW is fixed for each platform and hard-coded in the bootrom. 472 */ 473 ret = guc_xfer_rsa(guc); 474 if (ret) 475 goto out; 476 /* 477 * Current uCode expects the code to be loaded at 8k; locations below 478 * this are used for the stack. 479 */ 480 ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE); 481 if (ret) 482 goto out; 483 484 /* Wait for authentication */ 485 ret = guc_wait_ucode(guc); 486 if (ret) 487 goto out; 488 489 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING); 490 return 0; 491 492 out: 493 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL); 494 return 0 /* FIXME: ret, don't want to stop load currently */; 495 } 496 497 /** 498 * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table 499 * @guc: The GuC object 500 * 501 * This function uploads a minimal GuC that does not support submissions but 502 * in a state where the hwconfig table can be read. Next, it reads and parses 503 * the hwconfig table so it can be used for subsequent steps in the driver load. 504 * Lastly, it enables CT communication (XXX: this is needed for PFs/VFs only). 505 * 506 * Return: 0 on success, negative error code on error. 507 */ 508 int xe_guc_min_load_for_hwconfig(struct xe_guc *guc) 509 { 510 int ret; 511 512 xe_guc_ads_populate_minimal(&guc->ads); 513 514 ret = __xe_guc_upload(guc); 515 if (ret) 516 return ret; 517 518 ret = xe_guc_hwconfig_init(guc); 519 if (ret) 520 return ret; 521 522 ret = xe_guc_enable_communication(guc); 523 if (ret) 524 return ret; 525 526 return 0; 527 } 528 529 int xe_guc_upload(struct xe_guc *guc) 530 { 531 xe_guc_ads_populate(&guc->ads); 532 533 return __xe_guc_upload(guc); 534 } 535 536 static void guc_handle_mmio_msg(struct xe_guc *guc) 537 { 538 struct xe_gt *gt = guc_to_gt(guc); 539 u32 msg; 540 541 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); 542 543 msg = xe_mmio_read32(gt, SOFT_SCRATCH(15)); 544 msg &= XE_GUC_RECV_MSG_EXCEPTION | 545 XE_GUC_RECV_MSG_CRASH_DUMP_POSTED; 546 xe_mmio_write32(gt, SOFT_SCRATCH(15), 0); 547 548 if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED) 549 drm_err(&guc_to_xe(guc)->drm, 550 "Received early GuC crash dump notification!\n"); 551 552 if (msg & XE_GUC_RECV_MSG_EXCEPTION) 553 drm_err(&guc_to_xe(guc)->drm, 554 "Received early GuC exception notification!\n"); 555 } 556 557 static void guc_enable_irq(struct xe_guc *guc) 558 { 559 struct xe_gt *gt = guc_to_gt(guc); 560 u32 events = xe_gt_is_media_type(gt) ? 561 REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST) : 562 REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); 563 564 /* Primary GuC and media GuC share a single enable bit */ 565 xe_mmio_write32(gt, GUC_SG_INTR_ENABLE, 566 REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST)); 567 568 /* 569 * There are separate mask bits for primary and media GuCs, so use 570 * a RMW operation to avoid clobbering the other GuC's setting. 571 */ 572 xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0); 573 } 574 575 int xe_guc_enable_communication(struct xe_guc *guc) 576 { 577 int err; 578 579 guc_enable_irq(guc); 580 581 xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK, 582 ARAT_EXPIRED_INTRMSK, 0); 583 584 err = xe_guc_ct_enable(&guc->ct); 585 if (err) 586 return err; 587 588 guc_handle_mmio_msg(guc); 589 590 return 0; 591 } 592 593 int xe_guc_suspend(struct xe_guc *guc) 594 { 595 int ret; 596 u32 action[] = { 597 XE_GUC_ACTION_CLIENT_SOFT_RESET, 598 }; 599 600 ret = xe_guc_mmio_send(guc, action, ARRAY_SIZE(action)); 601 if (ret) { 602 drm_err(&guc_to_xe(guc)->drm, 603 "GuC suspend: CLIENT_SOFT_RESET fail: %d!\n", ret); 604 return ret; 605 } 606 607 xe_guc_sanitize(guc); 608 return 0; 609 } 610 611 void xe_guc_notify(struct xe_guc *guc) 612 { 613 struct xe_gt *gt = guc_to_gt(guc); 614 const u32 default_notify_data = 0; 615 616 /* 617 * Both GUC_HOST_INTERRUPT and MED_GUC_HOST_INTERRUPT can pass 618 * additional payload data to the GuC but this capability is not 619 * used by the firmware yet. Use default value in the meantime. 620 */ 621 xe_mmio_write32(gt, guc->notify_reg, default_notify_data); 622 } 623 624 int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr) 625 { 626 u32 action[] = { 627 XE_GUC_ACTION_AUTHENTICATE_HUC, 628 rsa_addr 629 }; 630 631 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); 632 } 633 634 int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, 635 u32 len, u32 *response_buf) 636 { 637 struct xe_device *xe = guc_to_xe(guc); 638 struct xe_gt *gt = guc_to_gt(guc); 639 u32 header, reply; 640 struct xe_reg reply_reg = xe_gt_is_media_type(gt) ? 641 MED_VF_SW_FLAG(0) : VF_SW_FLAG(0); 642 const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1; 643 int ret; 644 int i; 645 646 BUILD_BUG_ON(VF_SW_FLAG_COUNT != MED_VF_SW_FLAG_COUNT); 647 648 xe_assert(xe, !guc->ct.enabled); 649 xe_assert(xe, len); 650 xe_assert(xe, len <= VF_SW_FLAG_COUNT); 651 xe_assert(xe, len <= MED_VF_SW_FLAG_COUNT); 652 xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) == 653 GUC_HXG_ORIGIN_HOST); 654 xe_assert(xe, FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) == 655 GUC_HXG_TYPE_REQUEST); 656 657 retry: 658 /* Not in critical data-path, just do if else for GT type */ 659 if (xe_gt_is_media_type(gt)) { 660 for (i = 0; i < len; ++i) 661 xe_mmio_write32(gt, MED_VF_SW_FLAG(i), 662 request[i]); 663 xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX)); 664 } else { 665 for (i = 0; i < len; ++i) 666 xe_mmio_write32(gt, VF_SW_FLAG(i), 667 request[i]); 668 xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX)); 669 } 670 671 xe_guc_notify(guc); 672 673 ret = xe_mmio_wait32(gt, reply_reg, GUC_HXG_MSG_0_ORIGIN, 674 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC), 675 50000, &reply, false); 676 if (ret) { 677 timeout: 678 drm_err(&xe->drm, "mmio request %#x: no reply %#x\n", 679 request[0], reply); 680 return ret; 681 } 682 683 header = xe_mmio_read32(gt, reply_reg); 684 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == 685 GUC_HXG_TYPE_NO_RESPONSE_BUSY) { 686 /* 687 * Once we got a BUSY reply we must wait again for the final 688 * response but this time we can't use ORIGIN mask anymore. 689 * To spot a right change in the reply, we take advantage that 690 * response SUCCESS and FAILURE differ only by the single bit 691 * and all other bits are set and can be used as a new mask. 692 */ 693 u32 resp_bits = GUC_HXG_TYPE_RESPONSE_SUCCESS & GUC_HXG_TYPE_RESPONSE_FAILURE; 694 u32 resp_mask = FIELD_PREP(GUC_HXG_MSG_0_TYPE, resp_bits); 695 696 BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS); 697 BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1); 698 699 ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask, 700 1000000, &header, false); 701 702 if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != 703 GUC_HXG_ORIGIN_GUC)) 704 goto proto; 705 if (unlikely(ret)) 706 goto timeout; 707 } 708 709 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == 710 GUC_HXG_TYPE_NO_RESPONSE_RETRY) { 711 u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header); 712 713 drm_dbg(&xe->drm, "mmio request %#x: retrying, reason %#x\n", 714 request[0], reason); 715 goto retry; 716 } 717 718 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == 719 GUC_HXG_TYPE_RESPONSE_FAILURE) { 720 u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header); 721 u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header); 722 723 drm_err(&xe->drm, "mmio request %#x: failure %#x/%#x\n", 724 request[0], error, hint); 725 return -ENXIO; 726 } 727 728 if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != 729 GUC_HXG_TYPE_RESPONSE_SUCCESS) { 730 proto: 731 drm_err(&xe->drm, "mmio request %#x: unexpected reply %#x\n", 732 request[0], header); 733 return -EPROTO; 734 } 735 736 /* Just copy entire possible message response */ 737 if (response_buf) { 738 response_buf[0] = header; 739 740 for (i = 1; i < VF_SW_FLAG_COUNT; i++) { 741 reply_reg.addr += sizeof(u32); 742 response_buf[i] = xe_mmio_read32(gt, reply_reg); 743 } 744 } 745 746 /* Use data from the GuC response as our return value */ 747 return FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header); 748 } 749 750 int xe_guc_mmio_send(struct xe_guc *guc, const u32 *request, u32 len) 751 { 752 return xe_guc_mmio_send_recv(guc, request, len, NULL); 753 } 754 755 static int guc_self_cfg(struct xe_guc *guc, u16 key, u16 len, u64 val) 756 { 757 struct xe_device *xe = guc_to_xe(guc); 758 u32 request[HOST2GUC_SELF_CFG_REQUEST_MSG_LEN] = { 759 FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) | 760 FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) | 761 FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, 762 GUC_ACTION_HOST2GUC_SELF_CFG), 763 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_KEY, key) | 764 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_1_KLV_LEN, len), 765 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_2_VALUE32, 766 lower_32_bits(val)), 767 FIELD_PREP(HOST2GUC_SELF_CFG_REQUEST_MSG_3_VALUE64, 768 upper_32_bits(val)), 769 }; 770 int ret; 771 772 xe_assert(xe, len <= 2); 773 xe_assert(xe, len != 1 || !upper_32_bits(val)); 774 775 /* Self config must go over MMIO */ 776 ret = xe_guc_mmio_send(guc, request, ARRAY_SIZE(request)); 777 778 if (unlikely(ret < 0)) 779 return ret; 780 if (unlikely(ret > 1)) 781 return -EPROTO; 782 if (unlikely(!ret)) 783 return -ENOKEY; 784 785 return 0; 786 } 787 788 int xe_guc_self_cfg32(struct xe_guc *guc, u16 key, u32 val) 789 { 790 return guc_self_cfg(guc, key, 1, val); 791 } 792 793 int xe_guc_self_cfg64(struct xe_guc *guc, u16 key, u64 val) 794 { 795 return guc_self_cfg(guc, key, 2, val); 796 } 797 798 void xe_guc_irq_handler(struct xe_guc *guc, const u16 iir) 799 { 800 if (iir & GUC_INTR_GUC2HOST) 801 xe_guc_ct_irq_handler(&guc->ct); 802 } 803 804 void xe_guc_sanitize(struct xe_guc *guc) 805 { 806 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); 807 xe_guc_ct_disable(&guc->ct); 808 guc->submission_state.enabled = false; 809 } 810 811 int xe_guc_reset_prepare(struct xe_guc *guc) 812 { 813 return xe_guc_submit_reset_prepare(guc); 814 } 815 816 void xe_guc_reset_wait(struct xe_guc *guc) 817 { 818 xe_guc_submit_reset_wait(guc); 819 } 820 821 void xe_guc_stop_prepare(struct xe_guc *guc) 822 { 823 XE_WARN_ON(xe_guc_pc_stop(&guc->pc)); 824 } 825 826 int xe_guc_stop(struct xe_guc *guc) 827 { 828 int ret; 829 830 xe_guc_ct_disable(&guc->ct); 831 832 ret = xe_guc_submit_stop(guc); 833 if (ret) 834 return ret; 835 836 return 0; 837 } 838 839 int xe_guc_start(struct xe_guc *guc) 840 { 841 int ret; 842 843 ret = xe_guc_pc_start(&guc->pc); 844 XE_WARN_ON(ret); 845 846 return xe_guc_submit_start(guc); 847 } 848 849 void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) 850 { 851 struct xe_gt *gt = guc_to_gt(guc); 852 u32 status; 853 int err; 854 int i; 855 856 xe_uc_fw_print(&guc->fw, p); 857 858 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 859 if (err) 860 return; 861 862 status = xe_mmio_read32(gt, GUC_STATUS); 863 864 drm_printf(p, "\nGuC status 0x%08x:\n", status); 865 drm_printf(p, "\tBootrom status = 0x%x\n", 866 REG_FIELD_GET(GS_BOOTROM_MASK, status)); 867 drm_printf(p, "\tuKernel status = 0x%x\n", 868 REG_FIELD_GET(GS_UKERNEL_MASK, status)); 869 drm_printf(p, "\tMIA Core status = 0x%x\n", 870 REG_FIELD_GET(GS_MIA_MASK, status)); 871 drm_printf(p, "\tLog level = %d\n", 872 xe_guc_log_get_level(&guc->log)); 873 874 drm_puts(p, "\nScratch registers:\n"); 875 for (i = 0; i < SOFT_SCRATCH_COUNT; i++) { 876 drm_printf(p, "\t%2d: \t0x%x\n", 877 i, xe_mmio_read32(gt, SOFT_SCRATCH(i))); 878 } 879 880 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 881 882 xe_guc_ct_print(&guc->ct, p, false); 883 xe_guc_submit_print(guc, p); 884 } 885 886 /** 887 * xe_guc_in_reset() - Detect if GuC MIA is in reset. 888 * @guc: The GuC object 889 * 890 * This function detects runtime resume from d3cold by leveraging 891 * GUC_STATUS, GUC doesn't get reset during d3hot, 892 * it strictly to be called from RPM resume handler. 893 * 894 * Return: true if failed to get forcewake or GuC MIA is in Reset, 895 * otherwise false. 896 */ 897 bool xe_guc_in_reset(struct xe_guc *guc) 898 { 899 struct xe_gt *gt = guc_to_gt(guc); 900 u32 status; 901 int err; 902 903 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 904 if (err) 905 return true; 906 907 status = xe_mmio_read32(gt, GUC_STATUS); 908 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 909 910 return status & GS_MIA_IN_RESET; 911 } 912