1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016-2019 Intel Corporation 4 */ 5 6 #include <linux/types.h> 7 8 #include "gt/intel_gt.h" 9 #include "gt/intel_rps.h" 10 #include "intel_guc_reg.h" 11 #include "intel_huc.h" 12 #include "intel_huc_print.h" 13 #include "i915_drv.h" 14 #include "i915_reg.h" 15 #include "pxp/intel_pxp_cmd_interface_43.h" 16 17 #include <linux/device/bus.h> 18 #include <linux/mei_aux.h> 19 20 /** 21 * DOC: HuC 22 * 23 * The HuC is a dedicated microcontroller for usage in media HEVC (High 24 * Efficiency Video Coding) operations. Userspace can directly use the firmware 25 * capabilities by adding HuC specific commands to batch buffers. 26 * 27 * The kernel driver is only responsible for loading the HuC firmware and 28 * triggering its security authentication. This is done differently depending 29 * on the platform: 30 * 31 * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA 32 * and the authentication via GuC 33 * - DG2: load and authentication are both performed via GSC. 34 * - MTL and newer platforms: the load is performed via DMA (same as with 35 * not-DG2 older platforms), while the authentication is done in 2-steps, 36 * a first auth for clear-media workloads via GuC and a second one for all 37 * workloads via GSC. 38 * 39 * On platforms where the GuC does the authentication, to correctly do so the 40 * HuC binary must be loaded before the GuC one. 41 * Loading the HuC is optional; however, not using the HuC might negatively 42 * impact power usage and/or performance of media workloads, depending on the 43 * use-cases. 44 * HuC must be reloaded on events that cause the WOPCM to lose its contents 45 * (S3/S4, FLR); on older platforms the HuC must also be reloaded on GuC/GT 46 * reset, while on newer ones it will survive that. 47 * 48 * See https://github.com/intel/media-driver for the latest details on HuC 49 * functionality. 50 */ 51 52 /** 53 * DOC: HuC Memory Management 54 * 55 * Similarly to the GuC, the HuC can't do any memory allocations on its own, 56 * with the difference being that the allocations for HuC usage are handled by 57 * the userspace driver instead of the kernel one. The HuC accesses the memory 58 * via the PPGTT belonging to the context loaded on the VCS executing the 59 * HuC-specific commands. 60 */ 61 62 /* 63 * MEI-GSC load is an async process. The probing of the exposed aux device 64 * (see intel_gsc.c) usually happens a few seconds after i915 probe, depending 65 * on when the kernel schedules it. Unless something goes terribly wrong, we're 66 * guaranteed for this to happen during boot, so the big timeout is a safety net 67 * that we never expect to need. 68 * MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed 69 * and/or reset, this can take longer. Note that the kernel might schedule 70 * other work between the i915 init/resume and the MEI one, which can add to 71 * the delay. 72 */ 73 #define GSC_INIT_TIMEOUT_MS 10000 74 #define PXP_INIT_TIMEOUT_MS 5000 75 76 static int sw_fence_dummy_notify(struct i915_sw_fence *sf, 77 enum i915_sw_fence_notify state) 78 { 79 return NOTIFY_DONE; 80 } 81 82 static void __delayed_huc_load_complete(struct intel_huc *huc) 83 { 84 if (!i915_sw_fence_done(&huc->delayed_load.fence)) 85 i915_sw_fence_complete(&huc->delayed_load.fence); 86 } 87 88 static void delayed_huc_load_complete(struct intel_huc *huc) 89 { 90 hrtimer_cancel(&huc->delayed_load.timer); 91 __delayed_huc_load_complete(huc); 92 } 93 94 static void __gsc_init_error(struct intel_huc *huc) 95 { 96 huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR; 97 __delayed_huc_load_complete(huc); 98 } 99 100 static void gsc_init_error(struct intel_huc *huc) 101 { 102 hrtimer_cancel(&huc->delayed_load.timer); 103 __gsc_init_error(huc); 104 } 105 106 static void gsc_init_done(struct intel_huc *huc) 107 { 108 hrtimer_cancel(&huc->delayed_load.timer); 109 110 /* MEI-GSC init is done, now we wait for MEI-PXP to bind */ 111 huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP; 112 if (!i915_sw_fence_done(&huc->delayed_load.fence)) 113 hrtimer_start(&huc->delayed_load.timer, 114 ms_to_ktime(PXP_INIT_TIMEOUT_MS), 115 HRTIMER_MODE_REL); 116 } 117 118 static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer) 119 { 120 struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer); 121 122 if (!intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) { 123 if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC) 124 huc_notice(huc, "timed out waiting for MEI GSC\n"); 125 else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP) 126 huc_notice(huc, "timed out waiting for MEI PXP\n"); 127 else 128 MISSING_CASE(huc->delayed_load.status); 129 130 __gsc_init_error(huc); 131 } 132 133 return HRTIMER_NORESTART; 134 } 135 136 static void huc_delayed_load_start(struct intel_huc *huc) 137 { 138 ktime_t delay; 139 140 GEM_BUG_ON(intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)); 141 142 /* 143 * On resume we don't have to wait for MEI-GSC to be re-probed, but we 144 * do need to wait for MEI-PXP to reset & re-bind 145 */ 146 switch (huc->delayed_load.status) { 147 case INTEL_HUC_WAITING_ON_GSC: 148 delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS); 149 break; 150 case INTEL_HUC_WAITING_ON_PXP: 151 delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS); 152 break; 153 default: 154 gsc_init_error(huc); 155 return; 156 } 157 158 /* 159 * This fence is always complete unless we're waiting for the 160 * GSC device to come up to load the HuC. We arm the fence here 161 * and complete it when we confirm that the HuC is loaded from 162 * the PXP bind callback. 163 */ 164 GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence)); 165 i915_sw_fence_fini(&huc->delayed_load.fence); 166 i915_sw_fence_reinit(&huc->delayed_load.fence); 167 i915_sw_fence_await(&huc->delayed_load.fence); 168 i915_sw_fence_commit(&huc->delayed_load.fence); 169 170 hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL); 171 } 172 173 static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data) 174 { 175 struct device *dev = data; 176 struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb); 177 struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0]; 178 179 if (!intf->adev || &intf->adev->aux_dev.dev != dev) 180 return 0; 181 182 switch (action) { 183 case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */ 184 gsc_init_done(huc); 185 break; 186 187 case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */ 188 case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */ 189 huc_info(huc, "MEI driver not bound, disabling load\n"); 190 gsc_init_error(huc); 191 break; 192 } 193 194 return 0; 195 } 196 197 void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus) 198 { 199 int ret; 200 201 if (!intel_huc_is_loaded_by_gsc(huc)) 202 return; 203 204 huc->delayed_load.nb.notifier_call = gsc_notifier; 205 ret = bus_register_notifier(bus, &huc->delayed_load.nb); 206 if (ret) { 207 huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret)); 208 huc->delayed_load.nb.notifier_call = NULL; 209 gsc_init_error(huc); 210 } 211 } 212 213 void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus) 214 { 215 if (!huc->delayed_load.nb.notifier_call) 216 return; 217 218 delayed_huc_load_complete(huc); 219 220 bus_unregister_notifier(bus, &huc->delayed_load.nb); 221 huc->delayed_load.nb.notifier_call = NULL; 222 } 223 224 static void delayed_huc_load_init(struct intel_huc *huc) 225 { 226 /* 227 * Initialize fence to be complete as this is expected to be complete 228 * unless there is a delayed HuC load in progress. 229 */ 230 i915_sw_fence_init(&huc->delayed_load.fence, 231 sw_fence_dummy_notify); 232 i915_sw_fence_commit(&huc->delayed_load.fence); 233 234 hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 235 huc->delayed_load.timer.function = huc_delayed_load_timer_callback; 236 } 237 238 static void delayed_huc_load_fini(struct intel_huc *huc) 239 { 240 /* 241 * the fence is initialized in init_early, so we need to clean it up 242 * even if HuC loading is off. 243 */ 244 delayed_huc_load_complete(huc); 245 i915_sw_fence_fini(&huc->delayed_load.fence); 246 } 247 248 int intel_huc_sanitize(struct intel_huc *huc) 249 { 250 delayed_huc_load_complete(huc); 251 intel_uc_fw_sanitize(&huc->fw); 252 return 0; 253 } 254 255 static bool vcs_supported(struct intel_gt *gt) 256 { 257 intel_engine_mask_t mask = gt->info.engine_mask; 258 259 /* 260 * We reach here from i915_driver_early_probe for the primary GT before 261 * its engine mask is set, so we use the device info engine mask for it; 262 * this means we're not taking VCS fusing into account, but if the 263 * primary GT supports VCS engines we expect at least one of them to 264 * remain unfused so we're fine. 265 * For other GTs we expect the GT-specific mask to be set before we 266 * call this function. 267 */ 268 GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask); 269 270 if (gt_is_root(gt)) 271 mask = INTEL_INFO(gt->i915)->platform_engine_mask; 272 else 273 mask = gt->info.engine_mask; 274 275 return __ENGINE_INSTANCES_MASK(mask, VCS0, I915_MAX_VCS); 276 } 277 278 void intel_huc_init_early(struct intel_huc *huc) 279 { 280 struct drm_i915_private *i915 = huc_to_gt(huc)->i915; 281 struct intel_gt *gt = huc_to_gt(huc); 282 283 intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, true); 284 285 /* 286 * we always init the fence as already completed, even if HuC is not 287 * supported. This way we don't have to distinguish between HuC not 288 * supported/disabled or already loaded, and can focus on if the load 289 * is currently in progress (fence not complete) or not, which is what 290 * we care about for stalling userspace submissions. 291 */ 292 delayed_huc_load_init(huc); 293 294 if (!vcs_supported(gt)) { 295 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED); 296 return; 297 } 298 299 if (GRAPHICS_VER(i915) >= 11) { 300 huc->status[INTEL_HUC_AUTH_BY_GUC].reg = GEN11_HUC_KERNEL_LOAD_INFO; 301 huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_LOAD_SUCCESSFUL; 302 huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_LOAD_SUCCESSFUL; 303 } else { 304 huc->status[INTEL_HUC_AUTH_BY_GUC].reg = HUC_STATUS2; 305 huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_FW_VERIFIED; 306 huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_FW_VERIFIED; 307 } 308 309 if (IS_DG2(i915)) { 310 huc->status[INTEL_HUC_AUTH_BY_GSC].reg = GEN11_HUC_KERNEL_LOAD_INFO; 311 huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL; 312 huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL; 313 } else { 314 huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5); 315 huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE; 316 huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE; 317 } 318 } 319 320 #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy") 321 static int check_huc_loading_mode(struct intel_huc *huc) 322 { 323 struct intel_gt *gt = huc_to_gt(huc); 324 bool gsc_enabled = huc->fw.has_gsc_headers; 325 326 /* 327 * The fuse for HuC load via GSC is only valid on platforms that have 328 * GuC deprivilege. 329 */ 330 if (HAS_GUC_DEPRIVILEGE(gt->i915)) 331 huc->loaded_via_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) & 332 GSC_LOADS_HUC; 333 334 if (huc->loaded_via_gsc && !gsc_enabled) { 335 huc_err(huc, "HW requires a GSC-enabled blob, but we found a legacy one\n"); 336 return -ENOEXEC; 337 } 338 339 /* 340 * On newer platforms we have GSC-enabled binaries but we load the HuC 341 * via DMA. To do so we need to find the location of the legacy-style 342 * binary inside the GSC-enabled one, which we do at fetch time. Make 343 * sure that we were able to do so if the fuse says we need to load via 344 * DMA and the binary is GSC-enabled. 345 */ 346 if (!huc->loaded_via_gsc && gsc_enabled && !huc->fw.dma_start_offset) { 347 huc_err(huc, "HW in DMA mode, but we have an incompatible GSC-enabled blob\n"); 348 return -ENOEXEC; 349 } 350 351 /* 352 * If the HuC is loaded via GSC, we need to be able to access the GSC. 353 * On DG2 this is done via the mei components, while on newer platforms 354 * it is done via the GSCCS, 355 */ 356 if (huc->loaded_via_gsc) { 357 if (IS_DG2(gt->i915)) { 358 if (!IS_ENABLED(CONFIG_INTEL_MEI_PXP) || 359 !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) { 360 huc_info(huc, "can't load due to missing mei modules\n"); 361 return -EIO; 362 } 363 } else { 364 if (!HAS_ENGINE(gt, GSC0)) { 365 huc_info(huc, "can't load due to missing GSCCS\n"); 366 return -EIO; 367 } 368 } 369 } 370 371 huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(huc->loaded_via_gsc)); 372 373 return 0; 374 } 375 376 int intel_huc_init(struct intel_huc *huc) 377 { 378 struct intel_gt *gt = huc_to_gt(huc); 379 int err; 380 381 err = check_huc_loading_mode(huc); 382 if (err) 383 goto out; 384 385 if (HAS_ENGINE(gt, GSC0)) { 386 struct i915_vma *vma; 387 388 vma = intel_guc_allocate_vma(gt_to_guc(gt), PXP43_HUC_AUTH_INOUT_SIZE * 2); 389 if (IS_ERR(vma)) { 390 err = PTR_ERR(vma); 391 huc_info(huc, "Failed to allocate heci pkt\n"); 392 goto out; 393 } 394 395 huc->heci_pkt = vma; 396 } 397 398 err = intel_uc_fw_init(&huc->fw); 399 if (err) 400 goto out_pkt; 401 402 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE); 403 404 return 0; 405 406 out_pkt: 407 if (huc->heci_pkt) 408 i915_vma_unpin_and_release(&huc->heci_pkt, 0); 409 out: 410 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL); 411 huc_info(huc, "initialization failed %pe\n", ERR_PTR(err)); 412 return err; 413 } 414 415 void intel_huc_fini(struct intel_huc *huc) 416 { 417 /* 418 * the fence is initialized in init_early, so we need to clean it up 419 * even if HuC loading is off. 420 */ 421 delayed_huc_load_fini(huc); 422 423 if (huc->heci_pkt) 424 i915_vma_unpin_and_release(&huc->heci_pkt, 0); 425 426 if (intel_uc_fw_is_loadable(&huc->fw)) 427 intel_uc_fw_fini(&huc->fw); 428 } 429 430 void intel_huc_suspend(struct intel_huc *huc) 431 { 432 if (!intel_uc_fw_is_loadable(&huc->fw)) 433 return; 434 435 /* 436 * in the unlikely case that we're suspending before the GSC has 437 * completed its loading sequence, just stop waiting. We'll restart 438 * on resume. 439 */ 440 delayed_huc_load_complete(huc); 441 } 442 443 static const char *auth_mode_string(struct intel_huc *huc, 444 enum intel_huc_authentication_type type) 445 { 446 bool partial = huc->fw.has_gsc_headers && type == INTEL_HUC_AUTH_BY_GUC; 447 448 return partial ? "clear media" : "all workloads"; 449 } 450 451 /* 452 * Use a longer timeout for debug builds so that problems can be detected 453 * and analysed. But a shorter timeout for releases so that user's don't 454 * wait forever to find out there is a problem. Note that the only reason 455 * an end user should hit the timeout is in case of extreme thermal throttling. 456 * And a system that is that hot during boot is probably dead anyway! 457 */ 458 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 459 #define HUC_LOAD_RETRY_LIMIT 20 460 #else 461 #define HUC_LOAD_RETRY_LIMIT 3 462 #endif 463 464 int intel_huc_wait_for_auth_complete(struct intel_huc *huc, 465 enum intel_huc_authentication_type type) 466 { 467 struct intel_gt *gt = huc_to_gt(huc); 468 struct intel_uncore *uncore = gt->uncore; 469 ktime_t before, after, delta; 470 int ret, count; 471 u64 delta_ms; 472 u32 before_freq; 473 474 /* 475 * The KMD requests maximum frequency during driver load, however thermal 476 * throttling can force the frequency down to minimum (although the board 477 * really should never get that hot in real life!). IFWI issues have been 478 * seen to cause sporadic failures to grant the higher frequency. And at 479 * minimum frequency, the authentication time can be in the seconds range. 480 * Note that there is a limit on how long an individual wait_for() can wait. 481 * So wrap it in a loop. 482 */ 483 before_freq = intel_rps_read_actual_frequency(>->rps); 484 before = ktime_get(); 485 for (count = 0; count < HUC_LOAD_RETRY_LIMIT; count++) { 486 ret = __intel_wait_for_register(gt->uncore, 487 huc->status[type].reg, 488 huc->status[type].mask, 489 huc->status[type].value, 490 2, 1000, NULL); 491 if (!ret) 492 break; 493 494 huc_dbg(huc, "auth still in progress, count = %d, freq = %dMHz, status = 0x%08X\n", 495 count, intel_rps_read_actual_frequency(>->rps), 496 huc->status[type].reg.reg); 497 } 498 after = ktime_get(); 499 delta = ktime_sub(after, before); 500 delta_ms = ktime_to_ms(delta); 501 502 if (delta_ms > 50) { 503 huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n", 504 delta_ms, huc->status[type].reg.reg, count, ret); 505 huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n", 506 intel_rps_read_actual_frequency(>->rps), before_freq, 507 intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt))); 508 } else { 509 huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n", 510 delta_ms, intel_rps_read_actual_frequency(>->rps), 511 before_freq, huc->status[type].reg.reg, count, ret); 512 } 513 514 /* mark the load process as complete even if the wait failed */ 515 delayed_huc_load_complete(huc); 516 517 if (ret) { 518 huc_err(huc, "firmware not verified for %s: %pe\n", 519 auth_mode_string(huc, type), ERR_PTR(ret)); 520 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); 521 return ret; 522 } 523 524 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); 525 huc_info(huc, "authenticated for %s\n", auth_mode_string(huc, type)); 526 return 0; 527 } 528 529 /** 530 * intel_huc_auth() - Authenticate HuC uCode 531 * @huc: intel_huc structure 532 * @type: authentication type (via GuC or via GSC) 533 * 534 * Called after HuC and GuC firmware loading during intel_uc_init_hw(). 535 * 536 * This function invokes the GuC action to authenticate the HuC firmware, 537 * passing the offset of the RSA signature to intel_guc_auth_huc(). It then 538 * waits for up to 50ms for firmware verification ACK. 539 */ 540 int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type) 541 { 542 struct intel_gt *gt = huc_to_gt(huc); 543 struct intel_guc *guc = gt_to_guc(gt); 544 int ret; 545 546 if (!intel_uc_fw_is_loaded(&huc->fw)) 547 return -ENOEXEC; 548 549 /* GSC will do the auth with the load */ 550 if (intel_huc_is_loaded_by_gsc(huc)) 551 return -ENODEV; 552 553 if (intel_huc_is_authenticated(huc, type)) 554 return -EEXIST; 555 556 ret = i915_inject_probe_error(gt->i915, -ENXIO); 557 if (ret) 558 goto fail; 559 560 switch (type) { 561 case INTEL_HUC_AUTH_BY_GUC: 562 ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data)); 563 break; 564 case INTEL_HUC_AUTH_BY_GSC: 565 ret = intel_huc_fw_auth_via_gsccs(huc); 566 break; 567 default: 568 MISSING_CASE(type); 569 ret = -EINVAL; 570 } 571 if (ret) 572 goto fail; 573 574 /* Check authentication status, it should be done by now */ 575 ret = intel_huc_wait_for_auth_complete(huc, type); 576 if (ret) 577 goto fail; 578 579 return 0; 580 581 fail: 582 huc_probe_error(huc, "%s authentication failed %pe\n", 583 auth_mode_string(huc, type), ERR_PTR(ret)); 584 return ret; 585 } 586 587 bool intel_huc_is_authenticated(struct intel_huc *huc, 588 enum intel_huc_authentication_type type) 589 { 590 struct intel_gt *gt = huc_to_gt(huc); 591 intel_wakeref_t wakeref; 592 u32 status = 0; 593 594 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 595 status = intel_uncore_read(gt->uncore, huc->status[type].reg); 596 597 return (status & huc->status[type].mask) == huc->status[type].value; 598 } 599 600 static bool huc_is_fully_authenticated(struct intel_huc *huc) 601 { 602 struct intel_uc_fw *huc_fw = &huc->fw; 603 604 if (!huc_fw->has_gsc_headers) 605 return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC); 606 else if (intel_huc_is_loaded_by_gsc(huc) || HAS_ENGINE(huc_to_gt(huc), GSC0)) 607 return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC); 608 else 609 return false; 610 } 611 612 /** 613 * intel_huc_check_status() - check HuC status 614 * @huc: intel_huc structure 615 * 616 * This function reads status register to verify if HuC 617 * firmware was successfully loaded. 618 * 619 * The return values match what is expected for the I915_PARAM_HUC_STATUS 620 * getparam. 621 */ 622 int intel_huc_check_status(struct intel_huc *huc) 623 { 624 struct intel_uc_fw *huc_fw = &huc->fw; 625 626 switch (__intel_uc_fw_status(huc_fw)) { 627 case INTEL_UC_FIRMWARE_NOT_SUPPORTED: 628 return -ENODEV; 629 case INTEL_UC_FIRMWARE_DISABLED: 630 return -EOPNOTSUPP; 631 case INTEL_UC_FIRMWARE_MISSING: 632 return -ENOPKG; 633 case INTEL_UC_FIRMWARE_ERROR: 634 return -ENOEXEC; 635 case INTEL_UC_FIRMWARE_INIT_FAIL: 636 return -ENOMEM; 637 case INTEL_UC_FIRMWARE_LOAD_FAIL: 638 return -EIO; 639 default: 640 break; 641 } 642 643 /* 644 * GSC-enabled binaries loaded via DMA are first partially 645 * authenticated by GuC and then fully authenticated by GSC 646 */ 647 if (huc_is_fully_authenticated(huc)) 648 return 1; /* full auth */ 649 else if (huc_fw->has_gsc_headers && !intel_huc_is_loaded_by_gsc(huc) && 650 intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC)) 651 return 2; /* clear media only */ 652 else 653 return 0; 654 } 655 656 static bool huc_has_delayed_load(struct intel_huc *huc) 657 { 658 return intel_huc_is_loaded_by_gsc(huc) && 659 (huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR); 660 } 661 662 void intel_huc_update_auth_status(struct intel_huc *huc) 663 { 664 if (!intel_uc_fw_is_loadable(&huc->fw)) 665 return; 666 667 if (!huc->fw.has_gsc_headers) 668 return; 669 670 if (huc_is_fully_authenticated(huc)) 671 intel_uc_fw_change_status(&huc->fw, 672 INTEL_UC_FIRMWARE_RUNNING); 673 else if (huc_has_delayed_load(huc)) 674 huc_delayed_load_start(huc); 675 } 676 677 /** 678 * intel_huc_load_status - dump information about HuC load status 679 * @huc: the HuC 680 * @p: the &drm_printer 681 * 682 * Pretty printer for HuC load status. 683 */ 684 void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p) 685 { 686 struct intel_gt *gt = huc_to_gt(huc); 687 intel_wakeref_t wakeref; 688 689 if (!intel_huc_is_supported(huc)) { 690 drm_printf(p, "HuC not supported\n"); 691 return; 692 } 693 694 if (!intel_huc_is_wanted(huc)) { 695 drm_printf(p, "HuC disabled\n"); 696 return; 697 } 698 699 intel_uc_fw_dump(&huc->fw, p); 700 701 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 702 drm_printf(p, "HuC status: 0x%08x\n", 703 intel_uncore_read(gt->uncore, huc->status[INTEL_HUC_AUTH_BY_GUC].reg)); 704 } 705