1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016-2019 Intel Corporation 4 */ 5 6 #include <linux/types.h> 7 8 #include "gt/intel_gt.h" 9 #include "gt/intel_rps.h" 10 #include "intel_guc_reg.h" 11 #include "intel_huc.h" 12 #include "intel_huc_print.h" 13 #include "i915_drv.h" 14 #include "i915_reg.h" 15 #include "pxp/intel_pxp_cmd_interface_43.h" 16 17 #include <linux/device/bus.h> 18 #include <linux/mei_aux.h> 19 20 /** 21 * DOC: HuC 22 * 23 * The HuC is a dedicated microcontroller for usage in media HEVC (High 24 * Efficiency Video Coding) operations. Userspace can directly use the firmware 25 * capabilities by adding HuC specific commands to batch buffers. 26 * 27 * The kernel driver is only responsible for loading the HuC firmware and 28 * triggering its security authentication. This is done differently depending 29 * on the platform: 30 * 31 * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA 32 * and the authentication via GuC 33 * - DG2: load and authentication are both performed via GSC. 34 * - MTL and newer platforms: the load is performed via DMA (same as with 35 * not-DG2 older platforms), while the authentication is done in 2-steps, 36 * a first auth for clear-media workloads via GuC and a second one for all 37 * workloads via GSC. 38 * 39 * On platforms where the GuC does the authentication, to correctly do so the 40 * HuC binary must be loaded before the GuC one. 41 * Loading the HuC is optional; however, not using the HuC might negatively 42 * impact power usage and/or performance of media workloads, depending on the 43 * use-cases. 44 * HuC must be reloaded on events that cause the WOPCM to lose its contents 45 * (S3/S4, FLR); on older platforms the HuC must also be reloaded on GuC/GT 46 * reset, while on newer ones it will survive that. 47 * 48 * See https://github.com/intel/media-driver for the latest details on HuC 49 * functionality. 50 */ 51 52 /** 53 * DOC: HuC Memory Management 54 * 55 * Similarly to the GuC, the HuC can't do any memory allocations on its own, 56 * with the difference being that the allocations for HuC usage are handled by 57 * the userspace driver instead of the kernel one. The HuC accesses the memory 58 * via the PPGTT belonging to the context loaded on the VCS executing the 59 * HuC-specific commands. 60 */ 61 62 /* 63 * MEI-GSC load is an async process. The probing of the exposed aux device 64 * (see intel_gsc.c) usually happens a few seconds after i915 probe, depending 65 * on when the kernel schedules it. Unless something goes terribly wrong, we're 66 * guaranteed for this to happen during boot, so the big timeout is a safety net 67 * that we never expect to need. 68 * MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed 69 * and/or reset, this can take longer. Note that the kernel might schedule 70 * other work between the i915 init/resume and the MEI one, which can add to 71 * the delay. 72 */ 73 #define GSC_INIT_TIMEOUT_MS 10000 74 #define PXP_INIT_TIMEOUT_MS 5000 75 76 static int sw_fence_dummy_notify(struct i915_sw_fence *sf, 77 enum i915_sw_fence_notify state) 78 { 79 return NOTIFY_DONE; 80 } 81 82 static void __delayed_huc_load_complete(struct intel_huc *huc) 83 { 84 if (!i915_sw_fence_done(&huc->delayed_load.fence)) 85 i915_sw_fence_complete(&huc->delayed_load.fence); 86 } 87 88 static void delayed_huc_load_complete(struct intel_huc *huc) 89 { 90 hrtimer_cancel(&huc->delayed_load.timer); 91 __delayed_huc_load_complete(huc); 92 } 93 94 static void __gsc_init_error(struct intel_huc *huc) 95 { 96 huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR; 97 __delayed_huc_load_complete(huc); 98 } 99 100 static void gsc_init_error(struct intel_huc *huc) 101 { 102 hrtimer_cancel(&huc->delayed_load.timer); 103 __gsc_init_error(huc); 104 } 105 106 static void gsc_init_done(struct intel_huc *huc) 107 { 108 hrtimer_cancel(&huc->delayed_load.timer); 109 110 /* MEI-GSC init is done, now we wait for MEI-PXP to bind */ 111 huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP; 112 if (!i915_sw_fence_done(&huc->delayed_load.fence)) 113 hrtimer_start(&huc->delayed_load.timer, 114 ms_to_ktime(PXP_INIT_TIMEOUT_MS), 115 HRTIMER_MODE_REL); 116 } 117 118 static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer) 119 { 120 struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer); 121 122 if (!intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) { 123 if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC) 124 huc_notice(huc, "timed out waiting for MEI GSC\n"); 125 else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP) 126 huc_notice(huc, "timed out waiting for MEI PXP\n"); 127 else 128 MISSING_CASE(huc->delayed_load.status); 129 130 __gsc_init_error(huc); 131 } 132 133 return HRTIMER_NORESTART; 134 } 135 136 static void huc_delayed_load_start(struct intel_huc *huc) 137 { 138 ktime_t delay; 139 140 GEM_BUG_ON(intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)); 141 142 /* 143 * On resume we don't have to wait for MEI-GSC to be re-probed, but we 144 * do need to wait for MEI-PXP to reset & re-bind 145 */ 146 switch (huc->delayed_load.status) { 147 case INTEL_HUC_WAITING_ON_GSC: 148 delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS); 149 break; 150 case INTEL_HUC_WAITING_ON_PXP: 151 delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS); 152 break; 153 default: 154 gsc_init_error(huc); 155 return; 156 } 157 158 /* 159 * This fence is always complete unless we're waiting for the 160 * GSC device to come up to load the HuC. We arm the fence here 161 * and complete it when we confirm that the HuC is loaded from 162 * the PXP bind callback. 163 */ 164 GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence)); 165 i915_sw_fence_fini(&huc->delayed_load.fence); 166 i915_sw_fence_reinit(&huc->delayed_load.fence); 167 i915_sw_fence_await(&huc->delayed_load.fence); 168 i915_sw_fence_commit(&huc->delayed_load.fence); 169 170 hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL); 171 } 172 173 static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data) 174 { 175 struct device *dev = data; 176 struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb); 177 struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0]; 178 179 if (!intf->adev || &intf->adev->aux_dev.dev != dev) 180 return 0; 181 182 switch (action) { 183 case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */ 184 gsc_init_done(huc); 185 break; 186 187 case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */ 188 case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */ 189 huc_info(huc, "MEI driver not bound, disabling load\n"); 190 gsc_init_error(huc); 191 break; 192 } 193 194 return 0; 195 } 196 197 void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus) 198 { 199 int ret; 200 201 if (!intel_huc_is_loaded_by_gsc(huc)) 202 return; 203 204 huc->delayed_load.nb.notifier_call = gsc_notifier; 205 ret = bus_register_notifier(bus, &huc->delayed_load.nb); 206 if (ret) { 207 huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret)); 208 huc->delayed_load.nb.notifier_call = NULL; 209 gsc_init_error(huc); 210 } 211 } 212 213 void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus) 214 { 215 if (!huc->delayed_load.nb.notifier_call) 216 return; 217 218 delayed_huc_load_complete(huc); 219 220 bus_unregister_notifier(bus, &huc->delayed_load.nb); 221 huc->delayed_load.nb.notifier_call = NULL; 222 } 223 224 static void delayed_huc_load_init(struct intel_huc *huc) 225 { 226 /* 227 * Initialize fence to be complete as this is expected to be complete 228 * unless there is a delayed HuC load in progress. 229 */ 230 i915_sw_fence_init(&huc->delayed_load.fence, 231 sw_fence_dummy_notify); 232 i915_sw_fence_commit(&huc->delayed_load.fence); 233 234 hrtimer_init(&huc->delayed_load.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 235 huc->delayed_load.timer.function = huc_delayed_load_timer_callback; 236 } 237 238 static void delayed_huc_load_fini(struct intel_huc *huc) 239 { 240 /* 241 * the fence is initialized in init_early, so we need to clean it up 242 * even if HuC loading is off. 243 */ 244 delayed_huc_load_complete(huc); 245 i915_sw_fence_fini(&huc->delayed_load.fence); 246 } 247 248 int intel_huc_sanitize(struct intel_huc *huc) 249 { 250 delayed_huc_load_complete(huc); 251 intel_uc_fw_sanitize(&huc->fw); 252 return 0; 253 } 254 255 static bool vcs_supported(struct intel_gt *gt) 256 { 257 intel_engine_mask_t mask = gt->info.engine_mask; 258 259 /* 260 * We reach here from i915_driver_early_probe for the primary GT before 261 * its engine mask is set, so we use the device info engine mask for it; 262 * this means we're not taking VCS fusing into account, but if the 263 * primary GT supports VCS engines we expect at least one of them to 264 * remain unfused so we're fine. 265 * For other GTs we expect the GT-specific mask to be set before we 266 * call this function. 267 */ 268 GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask); 269 270 if (gt_is_root(gt)) 271 mask = INTEL_INFO(gt->i915)->platform_engine_mask; 272 else 273 mask = gt->info.engine_mask; 274 275 return __ENGINE_INSTANCES_MASK(mask, VCS0, I915_MAX_VCS); 276 } 277 278 void intel_huc_init_early(struct intel_huc *huc) 279 { 280 struct drm_i915_private *i915 = huc_to_gt(huc)->i915; 281 struct intel_gt *gt = huc_to_gt(huc); 282 283 intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, true); 284 285 /* 286 * we always init the fence as already completed, even if HuC is not 287 * supported. This way we don't have to distinguish between HuC not 288 * supported/disabled or already loaded, and can focus on if the load 289 * is currently in progress (fence not complete) or not, which is what 290 * we care about for stalling userspace submissions. 291 */ 292 delayed_huc_load_init(huc); 293 294 if (!vcs_supported(gt)) { 295 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED); 296 return; 297 } 298 299 if (GRAPHICS_VER(i915) >= 11) { 300 huc->status[INTEL_HUC_AUTH_BY_GUC].reg = GEN11_HUC_KERNEL_LOAD_INFO; 301 huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_LOAD_SUCCESSFUL; 302 huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_LOAD_SUCCESSFUL; 303 } else { 304 huc->status[INTEL_HUC_AUTH_BY_GUC].reg = HUC_STATUS2; 305 huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_FW_VERIFIED; 306 huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_FW_VERIFIED; 307 } 308 309 if (IS_DG2(i915)) { 310 huc->status[INTEL_HUC_AUTH_BY_GSC].reg = GEN11_HUC_KERNEL_LOAD_INFO; 311 huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL; 312 huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL; 313 } else { 314 huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5); 315 huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE; 316 huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE; 317 } 318 } 319 320 #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy") 321 static int check_huc_loading_mode(struct intel_huc *huc) 322 { 323 struct intel_gt *gt = huc_to_gt(huc); 324 bool gsc_enabled = huc->fw.has_gsc_headers; 325 326 /* 327 * The fuse for HuC load via GSC is only valid on platforms that have 328 * GuC deprivilege. 329 */ 330 if (HAS_GUC_DEPRIVILEGE(gt->i915)) 331 huc->loaded_via_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) & 332 GSC_LOADS_HUC; 333 334 if (huc->loaded_via_gsc && !gsc_enabled) { 335 huc_err(huc, "HW requires a GSC-enabled blob, but we found a legacy one\n"); 336 return -ENOEXEC; 337 } 338 339 /* 340 * On newer platforms we have GSC-enabled binaries but we load the HuC 341 * via DMA. To do so we need to find the location of the legacy-style 342 * binary inside the GSC-enabled one, which we do at fetch time. Make 343 * sure that we were able to do so if the fuse says we need to load via 344 * DMA and the binary is GSC-enabled. 345 */ 346 if (!huc->loaded_via_gsc && gsc_enabled && !huc->fw.dma_start_offset) { 347 huc_err(huc, "HW in DMA mode, but we have an incompatible GSC-enabled blob\n"); 348 return -ENOEXEC; 349 } 350 351 /* 352 * If the HuC is loaded via GSC, we need to be able to access the GSC. 353 * On DG2 this is done via the mei components, while on newer platforms 354 * it is done via the GSCCS, 355 */ 356 if (huc->loaded_via_gsc) { 357 if (IS_DG2(gt->i915)) { 358 if (!IS_ENABLED(CONFIG_INTEL_MEI_PXP) || 359 !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) { 360 huc_info(huc, "can't load due to missing mei modules\n"); 361 return -EIO; 362 } 363 } else { 364 if (!HAS_ENGINE(gt, GSC0)) { 365 huc_info(huc, "can't load due to missing GSCCS\n"); 366 return -EIO; 367 } 368 } 369 } 370 371 huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(huc->loaded_via_gsc)); 372 373 return 0; 374 } 375 376 int intel_huc_init(struct intel_huc *huc) 377 { 378 struct intel_gt *gt = huc_to_gt(huc); 379 int err; 380 381 err = check_huc_loading_mode(huc); 382 if (err) 383 goto out; 384 385 if (HAS_ENGINE(gt, GSC0)) { 386 struct i915_vma *vma; 387 388 vma = intel_guc_allocate_vma(gt_to_guc(gt), PXP43_HUC_AUTH_INOUT_SIZE * 2); 389 if (IS_ERR(vma)) { 390 err = PTR_ERR(vma); 391 huc_info(huc, "Failed to allocate heci pkt\n"); 392 goto out; 393 } 394 395 huc->heci_pkt = vma; 396 } 397 398 err = intel_uc_fw_init(&huc->fw); 399 if (err) 400 goto out_pkt; 401 402 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE); 403 404 return 0; 405 406 out_pkt: 407 if (huc->heci_pkt) 408 i915_vma_unpin_and_release(&huc->heci_pkt, 0); 409 out: 410 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL); 411 huc_info(huc, "initialization failed %pe\n", ERR_PTR(err)); 412 return err; 413 } 414 415 void intel_huc_fini(struct intel_huc *huc) 416 { 417 /* 418 * the fence is initialized in init_early, so we need to clean it up 419 * even if HuC loading is off. 420 */ 421 delayed_huc_load_fini(huc); 422 423 if (huc->heci_pkt) 424 i915_vma_unpin_and_release(&huc->heci_pkt, 0); 425 426 if (intel_uc_fw_is_loadable(&huc->fw)) 427 intel_uc_fw_fini(&huc->fw); 428 } 429 430 static const char *auth_mode_string(struct intel_huc *huc, 431 enum intel_huc_authentication_type type) 432 { 433 bool partial = huc->fw.has_gsc_headers && type == INTEL_HUC_AUTH_BY_GUC; 434 435 return partial ? "clear media" : "all workloads"; 436 } 437 438 /* 439 * Use a longer timeout for debug builds so that problems can be detected 440 * and analysed. But a shorter timeout for releases so that user's don't 441 * wait forever to find out there is a problem. Note that the only reason 442 * an end user should hit the timeout is in case of extreme thermal throttling. 443 * And a system that is that hot during boot is probably dead anyway! 444 */ 445 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 446 #define HUC_LOAD_RETRY_LIMIT 20 447 #else 448 #define HUC_LOAD_RETRY_LIMIT 3 449 #endif 450 451 int intel_huc_wait_for_auth_complete(struct intel_huc *huc, 452 enum intel_huc_authentication_type type) 453 { 454 struct intel_gt *gt = huc_to_gt(huc); 455 struct intel_uncore *uncore = gt->uncore; 456 ktime_t before, after, delta; 457 int ret, count; 458 u64 delta_ms; 459 u32 before_freq; 460 461 /* 462 * The KMD requests maximum frequency during driver load, however thermal 463 * throttling can force the frequency down to minimum (although the board 464 * really should never get that hot in real life!). IFWI issues have been 465 * seen to cause sporadic failures to grant the higher frequency. And at 466 * minimum frequency, the authentication time can be in the seconds range. 467 * Note that there is a limit on how long an individual wait_for() can wait. 468 * So wrap it in a loop. 469 */ 470 before_freq = intel_rps_read_actual_frequency(>->rps); 471 before = ktime_get(); 472 for (count = 0; count < HUC_LOAD_RETRY_LIMIT; count++) { 473 ret = __intel_wait_for_register(gt->uncore, 474 huc->status[type].reg, 475 huc->status[type].mask, 476 huc->status[type].value, 477 2, 1000, NULL); 478 if (!ret) 479 break; 480 481 huc_dbg(huc, "auth still in progress, count = %d, freq = %dMHz, status = 0x%08X\n", 482 count, intel_rps_read_actual_frequency(>->rps), 483 huc->status[type].reg.reg); 484 } 485 after = ktime_get(); 486 delta = ktime_sub(after, before); 487 delta_ms = ktime_to_ms(delta); 488 489 if (delta_ms > 50) { 490 huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n", 491 delta_ms, huc->status[type].reg.reg, count, ret); 492 huc_warn(huc, "excessive auth time: [freq = %dMHz, before = %dMHz, perf_limit_reasons = 0x%08X]\n", 493 intel_rps_read_actual_frequency(>->rps), before_freq, 494 intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt))); 495 } else { 496 huc_dbg(huc, "auth took %lldms, freq = %dMHz, before = %dMHz, status = 0x%08X, count = %d, ret = %d\n", 497 delta_ms, intel_rps_read_actual_frequency(>->rps), 498 before_freq, huc->status[type].reg.reg, count, ret); 499 } 500 501 /* mark the load process as complete even if the wait failed */ 502 delayed_huc_load_complete(huc); 503 504 if (ret) { 505 huc_err(huc, "firmware not verified for %s: %pe\n", 506 auth_mode_string(huc, type), ERR_PTR(ret)); 507 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); 508 return ret; 509 } 510 511 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); 512 huc_info(huc, "authenticated for %s\n", auth_mode_string(huc, type)); 513 return 0; 514 } 515 516 /** 517 * intel_huc_auth() - Authenticate HuC uCode 518 * @huc: intel_huc structure 519 * @type: authentication type (via GuC or via GSC) 520 * 521 * Called after HuC and GuC firmware loading during intel_uc_init_hw(). 522 * 523 * This function invokes the GuC action to authenticate the HuC firmware, 524 * passing the offset of the RSA signature to intel_guc_auth_huc(). It then 525 * waits for up to 50ms for firmware verification ACK. 526 */ 527 int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type) 528 { 529 struct intel_gt *gt = huc_to_gt(huc); 530 struct intel_guc *guc = gt_to_guc(gt); 531 int ret; 532 533 if (!intel_uc_fw_is_loaded(&huc->fw)) 534 return -ENOEXEC; 535 536 /* GSC will do the auth with the load */ 537 if (intel_huc_is_loaded_by_gsc(huc)) 538 return -ENODEV; 539 540 if (intel_huc_is_authenticated(huc, type)) 541 return -EEXIST; 542 543 ret = i915_inject_probe_error(gt->i915, -ENXIO); 544 if (ret) 545 goto fail; 546 547 switch (type) { 548 case INTEL_HUC_AUTH_BY_GUC: 549 ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data)); 550 break; 551 case INTEL_HUC_AUTH_BY_GSC: 552 ret = intel_huc_fw_auth_via_gsccs(huc); 553 break; 554 default: 555 MISSING_CASE(type); 556 ret = -EINVAL; 557 } 558 if (ret) 559 goto fail; 560 561 /* Check authentication status, it should be done by now */ 562 ret = intel_huc_wait_for_auth_complete(huc, type); 563 if (ret) 564 goto fail; 565 566 return 0; 567 568 fail: 569 huc_probe_error(huc, "%s authentication failed %pe\n", 570 auth_mode_string(huc, type), ERR_PTR(ret)); 571 return ret; 572 } 573 574 bool intel_huc_is_authenticated(struct intel_huc *huc, 575 enum intel_huc_authentication_type type) 576 { 577 struct intel_gt *gt = huc_to_gt(huc); 578 intel_wakeref_t wakeref; 579 u32 status = 0; 580 581 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 582 status = intel_uncore_read(gt->uncore, huc->status[type].reg); 583 584 return (status & huc->status[type].mask) == huc->status[type].value; 585 } 586 587 static bool huc_is_fully_authenticated(struct intel_huc *huc) 588 { 589 struct intel_uc_fw *huc_fw = &huc->fw; 590 591 if (!huc_fw->has_gsc_headers) 592 return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC); 593 else if (intel_huc_is_loaded_by_gsc(huc) || HAS_ENGINE(huc_to_gt(huc), GSC0)) 594 return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC); 595 else 596 return false; 597 } 598 599 /** 600 * intel_huc_check_status() - check HuC status 601 * @huc: intel_huc structure 602 * 603 * This function reads status register to verify if HuC 604 * firmware was successfully loaded. 605 * 606 * The return values match what is expected for the I915_PARAM_HUC_STATUS 607 * getparam. 608 */ 609 int intel_huc_check_status(struct intel_huc *huc) 610 { 611 struct intel_uc_fw *huc_fw = &huc->fw; 612 613 switch (__intel_uc_fw_status(huc_fw)) { 614 case INTEL_UC_FIRMWARE_NOT_SUPPORTED: 615 return -ENODEV; 616 case INTEL_UC_FIRMWARE_DISABLED: 617 return -EOPNOTSUPP; 618 case INTEL_UC_FIRMWARE_MISSING: 619 return -ENOPKG; 620 case INTEL_UC_FIRMWARE_ERROR: 621 return -ENOEXEC; 622 case INTEL_UC_FIRMWARE_INIT_FAIL: 623 return -ENOMEM; 624 case INTEL_UC_FIRMWARE_LOAD_FAIL: 625 return -EIO; 626 default: 627 break; 628 } 629 630 /* 631 * GSC-enabled binaries loaded via DMA are first partially 632 * authenticated by GuC and then fully authenticated by GSC 633 */ 634 if (huc_is_fully_authenticated(huc)) 635 return 1; /* full auth */ 636 else if (huc_fw->has_gsc_headers && !intel_huc_is_loaded_by_gsc(huc) && 637 intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC)) 638 return 2; /* clear media only */ 639 else 640 return 0; 641 } 642 643 static bool huc_has_delayed_load(struct intel_huc *huc) 644 { 645 return intel_huc_is_loaded_by_gsc(huc) && 646 (huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR); 647 } 648 649 void intel_huc_update_auth_status(struct intel_huc *huc) 650 { 651 if (!intel_uc_fw_is_loadable(&huc->fw)) 652 return; 653 654 if (!huc->fw.has_gsc_headers) 655 return; 656 657 if (huc_is_fully_authenticated(huc)) 658 intel_uc_fw_change_status(&huc->fw, 659 INTEL_UC_FIRMWARE_RUNNING); 660 else if (huc_has_delayed_load(huc)) 661 huc_delayed_load_start(huc); 662 } 663 664 /** 665 * intel_huc_load_status - dump information about HuC load status 666 * @huc: the HuC 667 * @p: the &drm_printer 668 * 669 * Pretty printer for HuC load status. 670 */ 671 void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p) 672 { 673 struct intel_gt *gt = huc_to_gt(huc); 674 intel_wakeref_t wakeref; 675 676 if (!intel_huc_is_supported(huc)) { 677 drm_printf(p, "HuC not supported\n"); 678 return; 679 } 680 681 if (!intel_huc_is_wanted(huc)) { 682 drm_printf(p, "HuC disabled\n"); 683 return; 684 } 685 686 intel_uc_fw_dump(&huc->fw, p); 687 688 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 689 drm_printf(p, "HuC status: 0x%08x\n", 690 intel_uncore_read(gt->uncore, huc->status[INTEL_HUC_AUTH_BY_GUC].reg)); 691 } 692