1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2016-2019 Intel Corporation 4 */ 5 6 #include <linux/types.h> 7 8 #include "gt/intel_gt.h" 9 #include "gt/intel_rps.h" 10 #include "intel_guc_reg.h" 11 #include "intel_huc.h" 12 #include "intel_huc_print.h" 13 #include "i915_drv.h" 14 #include "i915_reg.h" 15 #include "pxp/intel_pxp_cmd_interface_43.h" 16 17 #include <linux/device/bus.h> 18 #include <linux/mei_aux.h> 19 20 /** 21 * DOC: HuC 22 * 23 * The HuC is a dedicated microcontroller for usage in media HEVC (High 24 * Efficiency Video Coding) operations. Userspace can directly use the firmware 25 * capabilities by adding HuC specific commands to batch buffers. 26 * 27 * The kernel driver is only responsible for loading the HuC firmware and 28 * triggering its security authentication. This is done differently depending 29 * on the platform: 30 * 31 * - older platforms (from Gen9 to most Gen12s): the load is performed via DMA 32 * and the authentication via GuC 33 * - DG2: load and authentication are both performed via GSC. 34 * - MTL and newer platforms: the load is performed via DMA (same as with 35 * not-DG2 older platforms), while the authentication is done in 2-steps, 36 * a first auth for clear-media workloads via GuC and a second one for all 37 * workloads via GSC. 38 * 39 * On platforms where the GuC does the authentication, to correctly do so the 40 * HuC binary must be loaded before the GuC one. 41 * Loading the HuC is optional; however, not using the HuC might negatively 42 * impact power usage and/or performance of media workloads, depending on the 43 * use-cases. 44 * HuC must be reloaded on events that cause the WOPCM to lose its contents 45 * (S3/S4, FLR); on older platforms the HuC must also be reloaded on GuC/GT 46 * reset, while on newer ones it will survive that. 47 * 48 * See https://github.com/intel/media-driver for the latest details on HuC 49 * functionality. 50 */ 51 52 /** 53 * DOC: HuC Memory Management 54 * 55 * Similarly to the GuC, the HuC can't do any memory allocations on its own, 56 * with the difference being that the allocations for HuC usage are handled by 57 * the userspace driver instead of the kernel one. The HuC accesses the memory 58 * via the PPGTT belonging to the context loaded on the VCS executing the 59 * HuC-specific commands. 60 */ 61 62 /* 63 * MEI-GSC load is an async process. The probing of the exposed aux device 64 * (see intel_gsc.c) usually happens a few seconds after i915 probe, depending 65 * on when the kernel schedules it. Unless something goes terribly wrong, we're 66 * guaranteed for this to happen during boot, so the big timeout is a safety net 67 * that we never expect to need. 68 * MEI-PXP + HuC load usually takes ~300ms, but if the GSC needs to be resumed 69 * and/or reset, this can take longer. Note that the kernel might schedule 70 * other work between the i915 init/resume and the MEI one, which can add to 71 * the delay. 72 */ 73 #define GSC_INIT_TIMEOUT_MS 10000 74 #define PXP_INIT_TIMEOUT_MS 5000 75 76 static int sw_fence_dummy_notify(struct i915_sw_fence *sf, 77 enum i915_sw_fence_notify state) 78 { 79 return NOTIFY_DONE; 80 } 81 82 static void __delayed_huc_load_complete(struct intel_huc *huc) 83 { 84 if (!i915_sw_fence_done(&huc->delayed_load.fence)) 85 i915_sw_fence_complete(&huc->delayed_load.fence); 86 } 87 88 static void delayed_huc_load_complete(struct intel_huc *huc) 89 { 90 hrtimer_cancel(&huc->delayed_load.timer); 91 __delayed_huc_load_complete(huc); 92 } 93 94 static void __gsc_init_error(struct intel_huc *huc) 95 { 96 huc->delayed_load.status = INTEL_HUC_DELAYED_LOAD_ERROR; 97 __delayed_huc_load_complete(huc); 98 } 99 100 static void gsc_init_error(struct intel_huc *huc) 101 { 102 hrtimer_cancel(&huc->delayed_load.timer); 103 __gsc_init_error(huc); 104 } 105 106 static void gsc_init_done(struct intel_huc *huc) 107 { 108 hrtimer_cancel(&huc->delayed_load.timer); 109 110 /* MEI-GSC init is done, now we wait for MEI-PXP to bind */ 111 huc->delayed_load.status = INTEL_HUC_WAITING_ON_PXP; 112 if (!i915_sw_fence_done(&huc->delayed_load.fence)) 113 hrtimer_start(&huc->delayed_load.timer, 114 ms_to_ktime(PXP_INIT_TIMEOUT_MS), 115 HRTIMER_MODE_REL); 116 } 117 118 static enum hrtimer_restart huc_delayed_load_timer_callback(struct hrtimer *hrtimer) 119 { 120 struct intel_huc *huc = container_of(hrtimer, struct intel_huc, delayed_load.timer); 121 122 if (!intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)) { 123 if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_GSC) 124 huc_notice(huc, "timed out waiting for MEI GSC\n"); 125 else if (huc->delayed_load.status == INTEL_HUC_WAITING_ON_PXP) 126 huc_notice(huc, "timed out waiting for MEI PXP\n"); 127 else 128 MISSING_CASE(huc->delayed_load.status); 129 130 __gsc_init_error(huc); 131 } 132 133 return HRTIMER_NORESTART; 134 } 135 136 static void huc_delayed_load_start(struct intel_huc *huc) 137 { 138 ktime_t delay; 139 140 GEM_BUG_ON(intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC)); 141 142 /* 143 * On resume we don't have to wait for MEI-GSC to be re-probed, but we 144 * do need to wait for MEI-PXP to reset & re-bind 145 */ 146 switch (huc->delayed_load.status) { 147 case INTEL_HUC_WAITING_ON_GSC: 148 delay = ms_to_ktime(GSC_INIT_TIMEOUT_MS); 149 break; 150 case INTEL_HUC_WAITING_ON_PXP: 151 delay = ms_to_ktime(PXP_INIT_TIMEOUT_MS); 152 break; 153 default: 154 gsc_init_error(huc); 155 return; 156 } 157 158 /* 159 * This fence is always complete unless we're waiting for the 160 * GSC device to come up to load the HuC. We arm the fence here 161 * and complete it when we confirm that the HuC is loaded from 162 * the PXP bind callback. 163 */ 164 GEM_BUG_ON(!i915_sw_fence_done(&huc->delayed_load.fence)); 165 i915_sw_fence_fini(&huc->delayed_load.fence); 166 i915_sw_fence_reinit(&huc->delayed_load.fence); 167 i915_sw_fence_await(&huc->delayed_load.fence); 168 i915_sw_fence_commit(&huc->delayed_load.fence); 169 170 hrtimer_start(&huc->delayed_load.timer, delay, HRTIMER_MODE_REL); 171 } 172 173 static int gsc_notifier(struct notifier_block *nb, unsigned long action, void *data) 174 { 175 struct device *dev = data; 176 struct intel_huc *huc = container_of(nb, struct intel_huc, delayed_load.nb); 177 struct intel_gsc_intf *intf = &huc_to_gt(huc)->gsc.intf[0]; 178 179 if (!intf->adev || &intf->adev->aux_dev.dev != dev) 180 return 0; 181 182 switch (action) { 183 case BUS_NOTIFY_BOUND_DRIVER: /* mei driver bound to aux device */ 184 gsc_init_done(huc); 185 break; 186 187 case BUS_NOTIFY_DRIVER_NOT_BOUND: /* mei driver fails to be bound */ 188 case BUS_NOTIFY_UNBIND_DRIVER: /* mei driver about to be unbound */ 189 huc_info(huc, "MEI driver not bound, disabling load\n"); 190 gsc_init_error(huc); 191 break; 192 } 193 194 return 0; 195 } 196 197 void intel_huc_register_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus) 198 { 199 int ret; 200 201 if (!intel_huc_is_loaded_by_gsc(huc)) 202 return; 203 204 huc->delayed_load.nb.notifier_call = gsc_notifier; 205 ret = bus_register_notifier(bus, &huc->delayed_load.nb); 206 if (ret) { 207 huc_err(huc, "failed to register GSC notifier %pe\n", ERR_PTR(ret)); 208 huc->delayed_load.nb.notifier_call = NULL; 209 gsc_init_error(huc); 210 } 211 } 212 213 void intel_huc_unregister_gsc_notifier(struct intel_huc *huc, const struct bus_type *bus) 214 { 215 if (!huc->delayed_load.nb.notifier_call) 216 return; 217 218 delayed_huc_load_complete(huc); 219 220 bus_unregister_notifier(bus, &huc->delayed_load.nb); 221 huc->delayed_load.nb.notifier_call = NULL; 222 } 223 224 static void delayed_huc_load_init(struct intel_huc *huc) 225 { 226 /* 227 * Initialize fence to be complete as this is expected to be complete 228 * unless there is a delayed HuC load in progress. 229 */ 230 i915_sw_fence_init(&huc->delayed_load.fence, 231 sw_fence_dummy_notify); 232 i915_sw_fence_commit(&huc->delayed_load.fence); 233 234 hrtimer_setup(&huc->delayed_load.timer, huc_delayed_load_timer_callback, CLOCK_MONOTONIC, 235 HRTIMER_MODE_REL); 236 } 237 238 static void delayed_huc_load_fini(struct intel_huc *huc) 239 { 240 /* 241 * the fence is initialized in init_early, so we need to clean it up 242 * even if HuC loading is off. 243 */ 244 delayed_huc_load_complete(huc); 245 i915_sw_fence_fini(&huc->delayed_load.fence); 246 } 247 248 int intel_huc_sanitize(struct intel_huc *huc) 249 { 250 delayed_huc_load_complete(huc); 251 intel_uc_fw_sanitize(&huc->fw); 252 return 0; 253 } 254 255 static bool vcs_supported(struct intel_gt *gt) 256 { 257 intel_engine_mask_t mask = gt->info.engine_mask; 258 259 /* 260 * We reach here from i915_driver_early_probe for the primary GT before 261 * its engine mask is set, so we use the device info engine mask for it; 262 * this means we're not taking VCS fusing into account, but if the 263 * primary GT supports VCS engines we expect at least one of them to 264 * remain unfused so we're fine. 265 * For other GTs we expect the GT-specific mask to be set before we 266 * call this function. 267 */ 268 GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask); 269 270 if (gt_is_root(gt)) 271 mask = INTEL_INFO(gt->i915)->platform_engine_mask; 272 else 273 mask = gt->info.engine_mask; 274 275 return __ENGINE_INSTANCES_MASK(mask, VCS0, I915_MAX_VCS); 276 } 277 278 void intel_huc_init_early(struct intel_huc *huc) 279 { 280 struct drm_i915_private *i915 = huc_to_gt(huc)->i915; 281 struct intel_gt *gt = huc_to_gt(huc); 282 283 intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC, true); 284 285 /* 286 * we always init the fence as already completed, even if HuC is not 287 * supported. This way we don't have to distinguish between HuC not 288 * supported/disabled or already loaded, and can focus on if the load 289 * is currently in progress (fence not complete) or not, which is what 290 * we care about for stalling userspace submissions. 291 */ 292 delayed_huc_load_init(huc); 293 294 if (!vcs_supported(gt)) { 295 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED); 296 return; 297 } 298 299 if (GRAPHICS_VER(i915) >= 11) { 300 huc->status[INTEL_HUC_AUTH_BY_GUC].reg = GEN11_HUC_KERNEL_LOAD_INFO; 301 huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_LOAD_SUCCESSFUL; 302 huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_LOAD_SUCCESSFUL; 303 } else { 304 huc->status[INTEL_HUC_AUTH_BY_GUC].reg = HUC_STATUS2; 305 huc->status[INTEL_HUC_AUTH_BY_GUC].mask = HUC_FW_VERIFIED; 306 huc->status[INTEL_HUC_AUTH_BY_GUC].value = HUC_FW_VERIFIED; 307 } 308 309 if (IS_DG2(i915)) { 310 huc->status[INTEL_HUC_AUTH_BY_GSC].reg = GEN11_HUC_KERNEL_LOAD_INFO; 311 huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HUC_LOAD_SUCCESSFUL; 312 huc->status[INTEL_HUC_AUTH_BY_GSC].value = HUC_LOAD_SUCCESSFUL; 313 } else { 314 huc->status[INTEL_HUC_AUTH_BY_GSC].reg = HECI_FWSTS(MTL_GSC_HECI1_BASE, 5); 315 huc->status[INTEL_HUC_AUTH_BY_GSC].mask = HECI1_FWSTS5_HUC_AUTH_DONE; 316 huc->status[INTEL_HUC_AUTH_BY_GSC].value = HECI1_FWSTS5_HUC_AUTH_DONE; 317 } 318 } 319 320 void intel_huc_fini_late(struct intel_huc *huc) 321 { 322 delayed_huc_load_fini(huc); 323 } 324 325 #define HUC_LOAD_MODE_STRING(x) (x ? "GSC" : "legacy") 326 static int check_huc_loading_mode(struct intel_huc *huc) 327 { 328 struct intel_gt *gt = huc_to_gt(huc); 329 bool gsc_enabled = huc->fw.has_gsc_headers; 330 331 /* 332 * The fuse for HuC load via GSC is only valid on platforms that have 333 * GuC deprivilege. 334 */ 335 if (HAS_GUC_DEPRIVILEGE(gt->i915)) 336 huc->loaded_via_gsc = intel_uncore_read(gt->uncore, GUC_SHIM_CONTROL2) & 337 GSC_LOADS_HUC; 338 339 if (huc->loaded_via_gsc && !gsc_enabled) { 340 huc_err(huc, "HW requires a GSC-enabled blob, but we found a legacy one\n"); 341 return -ENOEXEC; 342 } 343 344 /* 345 * On newer platforms we have GSC-enabled binaries but we load the HuC 346 * via DMA. To do so we need to find the location of the legacy-style 347 * binary inside the GSC-enabled one, which we do at fetch time. Make 348 * sure that we were able to do so if the fuse says we need to load via 349 * DMA and the binary is GSC-enabled. 350 */ 351 if (!huc->loaded_via_gsc && gsc_enabled && !huc->fw.dma_start_offset) { 352 huc_err(huc, "HW in DMA mode, but we have an incompatible GSC-enabled blob\n"); 353 return -ENOEXEC; 354 } 355 356 /* 357 * If the HuC is loaded via GSC, we need to be able to access the GSC. 358 * On DG2 this is done via the mei components, while on newer platforms 359 * it is done via the GSCCS, 360 */ 361 if (huc->loaded_via_gsc) { 362 if (IS_DG2(gt->i915)) { 363 if (!IS_ENABLED(CONFIG_INTEL_MEI_PXP) || 364 !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) { 365 huc_info(huc, "can't load due to missing mei modules\n"); 366 return -EIO; 367 } 368 } else { 369 if (!HAS_ENGINE(gt, GSC0)) { 370 huc_info(huc, "can't load due to missing GSCCS\n"); 371 return -EIO; 372 } 373 } 374 } 375 376 huc_dbg(huc, "loaded by GSC = %s\n", str_yes_no(huc->loaded_via_gsc)); 377 378 return 0; 379 } 380 381 int intel_huc_init(struct intel_huc *huc) 382 { 383 struct intel_gt *gt = huc_to_gt(huc); 384 int err; 385 386 err = check_huc_loading_mode(huc); 387 if (err) 388 goto out; 389 390 if (HAS_ENGINE(gt, GSC0)) { 391 struct i915_vma *vma; 392 393 vma = intel_guc_allocate_vma(gt_to_guc(gt), PXP43_HUC_AUTH_INOUT_SIZE * 2); 394 if (IS_ERR(vma)) { 395 err = PTR_ERR(vma); 396 huc_info(huc, "Failed to allocate heci pkt\n"); 397 goto out; 398 } 399 400 huc->heci_pkt = vma; 401 } 402 403 err = intel_uc_fw_init(&huc->fw); 404 if (err) 405 goto out_pkt; 406 407 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE); 408 409 return 0; 410 411 out_pkt: 412 if (huc->heci_pkt) 413 i915_vma_unpin_and_release(&huc->heci_pkt, 0); 414 out: 415 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_INIT_FAIL); 416 huc_info(huc, "initialization failed %pe\n", ERR_PTR(err)); 417 return err; 418 } 419 420 void intel_huc_fini(struct intel_huc *huc) 421 { 422 if (huc->heci_pkt) 423 i915_vma_unpin_and_release(&huc->heci_pkt, 0); 424 425 if (intel_uc_fw_is_loadable(&huc->fw)) 426 intel_uc_fw_fini(&huc->fw); 427 } 428 429 static const char *auth_mode_string(struct intel_huc *huc, 430 enum intel_huc_authentication_type type) 431 { 432 bool partial = huc->fw.has_gsc_headers && type == INTEL_HUC_AUTH_BY_GUC; 433 434 return partial ? "clear media" : "all workloads"; 435 } 436 437 /* 438 * Use a longer timeout for debug builds so that problems can be detected 439 * and analysed. But a shorter timeout for releases so that user's don't 440 * wait forever to find out there is a problem. Note that the only reason 441 * an end user should hit the timeout is in case of extreme thermal throttling. 442 * And a system that is that hot during boot is probably dead anyway! 443 */ 444 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 445 #define HUC_LOAD_RETRY_LIMIT 20 446 #else 447 #define HUC_LOAD_RETRY_LIMIT 3 448 #endif 449 450 int intel_huc_wait_for_auth_complete(struct intel_huc *huc, 451 enum intel_huc_authentication_type type) 452 { 453 struct intel_gt *gt = huc_to_gt(huc); 454 struct intel_uncore *uncore = gt->uncore; 455 ktime_t before, after, delta; 456 int ret, count; 457 u64 delta_ms; 458 u32 before_freq; 459 460 /* 461 * The KMD requests maximum frequency during driver load, however thermal 462 * throttling can force the frequency down to minimum (although the board 463 * really should never get that hot in real life!). IFWI issues have been 464 * seen to cause sporadic failures to grant the higher frequency. And at 465 * minimum frequency, the authentication time can be in the seconds range. 466 * Note that there is a limit on how long an individual wait_for() can wait. 467 * So wrap it in a loop. 468 */ 469 before_freq = intel_rps_read_actual_frequency(>->rps); 470 before = ktime_get(); 471 for (count = 0; count < HUC_LOAD_RETRY_LIMIT; count++) { 472 ret = __intel_wait_for_register(gt->uncore, 473 huc->status[type].reg, 474 huc->status[type].mask, 475 huc->status[type].value, 476 2, 1000, NULL); 477 if (!ret) 478 break; 479 480 huc_dbg(huc, "auth still in progress, count = %d, freq = %dMHz, status = 0x%08X\n", 481 count, intel_rps_read_actual_frequency(>->rps), 482 huc->status[type].reg.reg); 483 } 484 after = ktime_get(); 485 delta = ktime_sub(after, before); 486 delta_ms = ktime_to_ms(delta); 487 488 if (delta_ms > 50) { 489 huc_warn(huc, "excessive auth time: %lldms! [status = 0x%08X, count = %d, ret = %d]\n", 490 delta_ms, huc->status[type].reg.reg, count, ret); 491 huc_warn(huc, "excessive auth time: [freq = %dMHz -> %dMHz vs %dMHz, perf_limit_reasons = 0x%08X]\n", 492 before_freq, intel_rps_read_actual_frequency(>->rps), 493 intel_rps_get_requested_frequency(>->rps), 494 intel_uncore_read(uncore, intel_gt_perf_limit_reasons_reg(gt))); 495 } else { 496 huc_dbg(huc, "auth took %lldms, freq = %dMHz -> %dMHz vs %dMHz, status = 0x%08X, count = %d, ret = %d\n", 497 delta_ms, before_freq, intel_rps_read_actual_frequency(>->rps), 498 intel_rps_get_requested_frequency(>->rps), 499 huc->status[type].reg.reg, count, ret); 500 } 501 502 /* mark the load process as complete even if the wait failed */ 503 delayed_huc_load_complete(huc); 504 505 if (ret) { 506 huc_err(huc, "firmware not verified for %s: %pe\n", 507 auth_mode_string(huc, type), ERR_PTR(ret)); 508 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOAD_FAIL); 509 return ret; 510 } 511 512 intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_RUNNING); 513 huc_info(huc, "authenticated for %s\n", auth_mode_string(huc, type)); 514 return 0; 515 } 516 517 /** 518 * intel_huc_auth() - Authenticate HuC uCode 519 * @huc: intel_huc structure 520 * @type: authentication type (via GuC or via GSC) 521 * 522 * Called after HuC and GuC firmware loading during intel_uc_init_hw(). 523 * 524 * This function invokes the GuC action to authenticate the HuC firmware, 525 * passing the offset of the RSA signature to intel_guc_auth_huc(). It then 526 * waits for up to 50ms for firmware verification ACK. 527 */ 528 int intel_huc_auth(struct intel_huc *huc, enum intel_huc_authentication_type type) 529 { 530 struct intel_gt *gt = huc_to_gt(huc); 531 struct intel_guc *guc = gt_to_guc(gt); 532 int ret; 533 534 if (!intel_uc_fw_is_loaded(&huc->fw)) 535 return -ENOEXEC; 536 537 /* GSC will do the auth with the load */ 538 if (intel_huc_is_loaded_by_gsc(huc)) 539 return -ENODEV; 540 541 if (intel_huc_is_authenticated(huc, type)) 542 return -EEXIST; 543 544 ret = i915_inject_probe_error(gt->i915, -ENXIO); 545 if (ret) 546 goto fail; 547 548 switch (type) { 549 case INTEL_HUC_AUTH_BY_GUC: 550 ret = intel_guc_auth_huc(guc, intel_guc_ggtt_offset(guc, huc->fw.rsa_data)); 551 break; 552 case INTEL_HUC_AUTH_BY_GSC: 553 ret = intel_huc_fw_auth_via_gsccs(huc); 554 break; 555 default: 556 MISSING_CASE(type); 557 ret = -EINVAL; 558 } 559 if (ret) 560 goto fail; 561 562 /* Check authentication status, it should be done by now */ 563 ret = intel_huc_wait_for_auth_complete(huc, type); 564 if (ret) 565 goto fail; 566 567 return 0; 568 569 fail: 570 huc_probe_error(huc, "%s authentication failed %pe\n", 571 auth_mode_string(huc, type), ERR_PTR(ret)); 572 return ret; 573 } 574 575 bool intel_huc_is_authenticated(struct intel_huc *huc, 576 enum intel_huc_authentication_type type) 577 { 578 struct intel_gt *gt = huc_to_gt(huc); 579 intel_wakeref_t wakeref; 580 u32 status = 0; 581 582 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 583 status = intel_uncore_read(gt->uncore, huc->status[type].reg); 584 585 return (status & huc->status[type].mask) == huc->status[type].value; 586 } 587 588 static bool huc_is_fully_authenticated(struct intel_huc *huc) 589 { 590 struct intel_uc_fw *huc_fw = &huc->fw; 591 592 if (!huc_fw->has_gsc_headers) 593 return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC); 594 else if (intel_huc_is_loaded_by_gsc(huc) || HAS_ENGINE(huc_to_gt(huc), GSC0)) 595 return intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GSC); 596 else 597 return false; 598 } 599 600 /** 601 * intel_huc_check_status() - check HuC status 602 * @huc: intel_huc structure 603 * 604 * This function reads status register to verify if HuC 605 * firmware was successfully loaded. 606 * 607 * The return values match what is expected for the I915_PARAM_HUC_STATUS 608 * getparam. 609 */ 610 int intel_huc_check_status(struct intel_huc *huc) 611 { 612 struct intel_uc_fw *huc_fw = &huc->fw; 613 614 switch (__intel_uc_fw_status(huc_fw)) { 615 case INTEL_UC_FIRMWARE_NOT_SUPPORTED: 616 return -ENODEV; 617 case INTEL_UC_FIRMWARE_DISABLED: 618 return -EOPNOTSUPP; 619 case INTEL_UC_FIRMWARE_MISSING: 620 return -ENOPKG; 621 case INTEL_UC_FIRMWARE_ERROR: 622 return -ENOEXEC; 623 case INTEL_UC_FIRMWARE_INIT_FAIL: 624 return -ENOMEM; 625 case INTEL_UC_FIRMWARE_LOAD_FAIL: 626 return -EIO; 627 default: 628 break; 629 } 630 631 /* 632 * GSC-enabled binaries loaded via DMA are first partially 633 * authenticated by GuC and then fully authenticated by GSC 634 */ 635 if (huc_is_fully_authenticated(huc)) 636 return 1; /* full auth */ 637 else if (huc_fw->has_gsc_headers && !intel_huc_is_loaded_by_gsc(huc) && 638 intel_huc_is_authenticated(huc, INTEL_HUC_AUTH_BY_GUC)) 639 return 2; /* clear media only */ 640 else 641 return 0; 642 } 643 644 static bool huc_has_delayed_load(struct intel_huc *huc) 645 { 646 return intel_huc_is_loaded_by_gsc(huc) && 647 (huc->delayed_load.status != INTEL_HUC_DELAYED_LOAD_ERROR); 648 } 649 650 void intel_huc_update_auth_status(struct intel_huc *huc) 651 { 652 if (!intel_uc_fw_is_loadable(&huc->fw)) 653 return; 654 655 if (!huc->fw.has_gsc_headers) 656 return; 657 658 if (huc_is_fully_authenticated(huc)) 659 intel_uc_fw_change_status(&huc->fw, 660 INTEL_UC_FIRMWARE_RUNNING); 661 else if (huc_has_delayed_load(huc)) 662 huc_delayed_load_start(huc); 663 } 664 665 /** 666 * intel_huc_load_status - dump information about HuC load status 667 * @huc: the HuC 668 * @p: the &drm_printer 669 * 670 * Pretty printer for HuC load status. 671 */ 672 void intel_huc_load_status(struct intel_huc *huc, struct drm_printer *p) 673 { 674 struct intel_gt *gt = huc_to_gt(huc); 675 intel_wakeref_t wakeref; 676 677 if (!intel_huc_is_supported(huc)) { 678 drm_printf(p, "HuC not supported\n"); 679 return; 680 } 681 682 if (!intel_huc_is_wanted(huc)) { 683 drm_printf(p, "HuC disabled\n"); 684 return; 685 } 686 687 intel_uc_fw_dump(&huc->fw, p); 688 689 with_intel_runtime_pm(gt->uncore->rpm, wakeref) 690 drm_printf(p, "HuC status: 0x%08x\n", 691 intel_uncore_read(gt->uncore, huc->status[INTEL_HUC_AUTH_BY_GUC].reg)); 692 } 693