Lines Matching +full:- +full:gt

1 // SPDX-License-Identifier: MIT
59 addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id); in guc_bo_ggtt_addr()
64 xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr); in guc_bo_ggtt_addr()
71 u32 level = xe_guc_log_get_level(&guc->log); in guc_ctl_debug_flags()
87 if (!xe->info.skip_guc_pc) in guc_ctl_feature_flags()
90 if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev))) in guc_ctl_feature_flags()
98 u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT; in guc_ctl_log_params_flags()
128 FIELD_PREP(GUC_LOG_CRASH, CRASH_BUFFER_SIZE / LOG_UNIT - 1) | in guc_ctl_log_params_flags()
129 FIELD_PREP(GUC_LOG_DEBUG, DEBUG_BUFFER_SIZE / LOG_UNIT - 1) | in guc_ctl_log_params_flags()
130 FIELD_PREP(GUC_LOG_CAPTURE, CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) | in guc_ctl_log_params_flags()
143 u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT; in guc_ctl_ads_flags()
149 static bool needs_wa_dual_queue(struct xe_gt *gt) in needs_wa_dual_queue() argument
156 if (XE_GT_WA(gt, 22011391025)) in needs_wa_dual_queue()
165 * can cause complications if the non-stalled submission runs for a long in needs_wa_dual_queue()
171 if (CCS_MASK(gt) && GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) in needs_wa_dual_queue()
180 struct xe_gt *gt = guc_to_gt(guc); in guc_ctl_wa_flags() local
183 if (XE_GT_WA(gt, 22012773006)) in guc_ctl_wa_flags()
186 if (XE_GT_WA(gt, 14014475959)) in guc_ctl_wa_flags()
189 if (needs_wa_dual_queue(gt)) in guc_ctl_wa_flags()
193 * Wa_22011802037: FIXME - there's more to be done than simply setting in guc_ctl_wa_flags()
194 * this flag: make sure each CS is stopped when preparing for GT reset in guc_ctl_wa_flags()
200 if (XE_GT_WA(gt, 22012727170) || XE_GT_WA(gt, 22012727685)) in guc_ctl_wa_flags()
203 if (XE_GT_WA(gt, 18020744125) && in guc_ctl_wa_flags()
204 !xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER)) in guc_ctl_wa_flags()
207 if (XE_GT_WA(gt, 1509372804)) in guc_ctl_wa_flags()
210 if (XE_GT_WA(gt, 14018913170)) in guc_ctl_wa_flags()
213 if (XE_GT_WA(gt, 16023683509)) in guc_ctl_wa_flags()
223 return (((u32)xe->info.devid) << 16) | xe->info.revid; in guc_ctl_devid()
228 struct xe_gt *gt = guc_to_gt(guc); in guc_print_params() local
229 u32 *params = guc->params; in guc_print_params()
232 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); in guc_print_params()
236 xe_gt_dbg(gt, "GuC param[%2d] = 0x%08x\n", i, params[i]); in guc_print_params()
241 u32 *params = guc->params; in guc_init_params()
255 u32 *params = guc->params; in guc_init_params_post_hwconfig()
274 struct xe_gt *gt = guc_to_gt(guc); in guc_write_params() local
277 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); in guc_write_params()
279 xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(0), 0); in guc_write_params()
282 xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(1 + i), guc->params[i]); in guc_write_params()
288 struct xe_gt *gt = guc_to_gt(guc); in guc_action_register_g2g_buffer() local
289 struct xe_device *xe = gt_to_xe(gt); in guc_action_register_g2g_buffer()
292 FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) | in guc_action_register_g2g_buffer()
303 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in guc_action_register_g2g_buffer()
308 struct xe_gt *gt = guc_to_gt(guc); in guc_action_deregister_g2g_buffer() local
309 struct xe_device *xe = gt_to_xe(gt); in guc_action_deregister_g2g_buffer()
319 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in guc_action_deregister_g2g_buffer()
322 #define G2G_DEV(gt) (((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1) argument
329 * Generate a unique id for each bi-directional CTB for each pair of
331 * a single allocation that is sub-divided into multiple CTBs.
337 * N 0.0 --/-- 00/01 02/03 04/05
338 * e 0.1 01/00 --/-- 06/07 08/09
339 * a 1.0 03/02 07/06 --/-- 10/11
340 * r 1.1 05/04 09/08 11/10 --/--
362 return -1; in g2g_slot()
377 direction = (1 - type); in g2g_slot()
381 for (i = y; i > 0; i--) in g2g_slot()
382 idx += max_inst - i; in g2g_slot()
385 idx += (x - 1 - y); in g2g_slot()
401 u32 near_tile = gt_to_tile(near_gt)->id; in guc_g2g_register()
403 u32 far_tile = gt_to_tile(far_gt)->id; in guc_g2g_register()
405 u32 max = xe->info.gt_count; in guc_g2g_register()
412 g2g_bo = near_guc->g2g.bo; in guc_g2g_register()
422 xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE); in guc_g2g_register()
423 xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo)); in guc_g2g_register()
436 struct xe_gt *gt = guc_to_gt(guc); in guc_g2g_size() local
437 struct xe_device *xe = gt_to_xe(gt); in guc_g2g_size()
438 unsigned int count = xe->info.gt_count; in guc_g2g_size()
439 u32 num_channels = (count * (count - 1)) / 2; in guc_g2g_size()
449 if (xe->info.gt_count <= 1) in xe_guc_g2g_wanted()
458 struct xe_gt *gt = guc_to_gt(guc); in guc_g2g_alloc() local
459 struct xe_device *xe = gt_to_xe(gt); in guc_g2g_alloc()
460 struct xe_tile *tile = gt_to_tile(gt); in guc_g2g_alloc()
464 if (guc->g2g.bo) in guc_g2g_alloc()
467 if (gt->info.id != 0) { in guc_g2g_alloc()
469 struct xe_guc *root_guc = &root_gt->uc.guc; in guc_g2g_alloc()
472 bo = xe_bo_get(root_guc->g2g.bo); in guc_g2g_alloc()
474 return -ENODEV; in guc_g2g_alloc()
476 guc->g2g.bo = bo; in guc_g2g_alloc()
477 guc->g2g.owned = false; in guc_g2g_alloc()
491 xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size); in guc_g2g_alloc()
492 guc->g2g.bo = bo; in guc_g2g_alloc()
493 guc->g2g.owned = true; in guc_g2g_alloc()
500 if (!guc->g2g.bo) in guc_g2g_fini()
504 if (!guc->g2g.owned) in guc_g2g_fini()
505 xe_bo_put(guc->g2g.bo); in guc_g2g_fini()
507 guc->g2g.bo = NULL; in guc_g2g_fini()
512 struct xe_gt *far_gt, *gt = guc_to_gt(guc); in guc_g2g_start() local
513 struct xe_device *xe = gt_to_xe(gt); in guc_g2g_start()
518 if (!guc->g2g.bo) { in guc_g2g_start()
526 /* GuC interface will need extending if more GT device types are ever created. */ in guc_g2g_start()
527 xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA)); in guc_g2g_start()
530 have_dev = xe->info.gt_count > xe->info.tile_count; in guc_g2g_start()
535 if (far_gt->info.id == gt->info.id) in guc_g2g_start()
538 far_tile = gt_to_tile(far_gt)->id; in guc_g2g_start()
544 while (--t >= 0) in guc_g2g_start()
557 if (far_gt->info.id == gt->info.id) in guc_g2g_start()
563 tile = gt_to_tile(far_gt)->id; in guc_g2g_start()
582 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in __guc_opt_in_features_enable()
588 struct xe_gt *gt = guc_to_gt(guc); in supports_dynamic_ics() local
591 if (xe->info.platform != XE_PVC && GRAPHICS_VER(xe) < 20) in supports_dynamic_ics()
595 * The feature is currently not compatible with multi-lrc, so the GuC in supports_dynamic_ics()
597 * users of mlrc). On the primary GT side, to avoid it being used in in supports_dynamic_ics()
601 if (xe_gt_is_media_type(gt) || gt->ccs_mode > 1) in supports_dynamic_ics()
615 CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS); in xe_guc_opt_in_features_enable()
621 return -ENOBUFS; in xe_guc_opt_in_features_enable()
626 * The extra CAT error type opt-in was added in GuC v70.17.0, which maps in xe_guc_opt_in_features_enable()
644 "failed to enable GuC opt-in features: %pe\n", in xe_guc_opt_in_features_enable()
656 struct xe_gt *gt = guc_to_gt(guc); in guc_fini_hw() local
659 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); in guc_fini_hw()
660 xe_uc_sanitize_reset(&guc_to_gt(guc)->uc); in guc_fini_hw()
661 xe_force_wake_put(gt_to_fw(gt), fw_ref); in guc_fini_hw()
667 * xe_guc_comm_init_early - early initialization of GuC communication
674 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_comm_init_early() local
676 if (xe_gt_is_media_type(gt)) in xe_guc_comm_init_early()
677 guc->notify_reg = MED_GUC_HOST_INTERRUPT; in xe_guc_comm_init_early()
679 guc->notify_reg = GUC_HOST_INTERRUPT; in xe_guc_comm_init_early()
691 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo); in xe_guc_realloc_post_hwconfig()
695 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo); in xe_guc_realloc_post_hwconfig()
699 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo); in xe_guc_realloc_post_hwconfig()
708 struct xe_gt *gt = guc_to_gt(guc); in vf_guc_init_noalloc() local
711 err = xe_gt_sriov_vf_bootstrap(gt); in vf_guc_init_noalloc()
715 err = xe_gt_sriov_vf_query_config(gt); in vf_guc_init_noalloc()
725 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_init_noalloc() local
730 ret = xe_guc_ct_init_noalloc(&guc->ct); in xe_guc_init_noalloc()
734 ret = xe_guc_relay_init(&guc->relay); in xe_guc_init_noalloc()
747 xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret)); in xe_guc_init_noalloc()
754 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_init() local
757 guc->fw.type = XE_UC_FW_TYPE_GUC; in xe_guc_init()
758 ret = xe_uc_fw_init(&guc->fw); in xe_guc_init()
762 if (!xe_uc_fw_is_enabled(&guc->fw)) in xe_guc_init()
766 ret = xe_guc_ct_init(&guc->ct); in xe_guc_init()
772 ret = xe_guc_log_init(&guc->log); in xe_guc_init()
780 ret = xe_guc_ads_init(&guc->ads); in xe_guc_init()
784 ret = xe_guc_ct_init(&guc->ct); in xe_guc_init()
788 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); in xe_guc_init()
790 ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc); in xe_guc_init()
799 xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret)); in xe_guc_init()
811 err = xe_guc_buf_cache_init(&guc->buf); in vf_guc_init_post_hwconfig()
821 * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
837 ret = xe_guc_ct_init_post_hwconfig(&guc->ct); in xe_guc_init_post_hwconfig()
847 ret = xe_guc_db_mgr_init(&guc->dbm, ~0); in xe_guc_init_post_hwconfig()
851 ret = xe_guc_pc_init(&guc->pc); in xe_guc_init_post_hwconfig()
859 ret = xe_guc_buf_cache_init(&guc->buf); in xe_guc_init_post_hwconfig()
863 return xe_guc_ads_init_post_hwconfig(&guc->ads); in xe_guc_init_post_hwconfig()
870 xe_guc_ads_populate_post_load(&guc->ads); in xe_guc_post_load_init()
887 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_reset() local
888 struct xe_mmio *mmio = &gt->mmio; in xe_guc_reset()
892 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); in xe_guc_reset()
894 if (IS_SRIOV_VF(gt_to_xe(gt))) in xe_guc_reset()
895 return xe_gt_sriov_vf_bootstrap(gt); in xe_guc_reset()
901 xe_gt_err(gt, "GuC reset timed out, GDRST=%#x\n", gdrst); in xe_guc_reset()
907 xe_gt_err(gt, "GuC status: %#x, MIA core expected to be in reset\n", in xe_guc_reset()
909 ret = -EIO; in xe_guc_reset()
922 struct xe_gt *gt = guc_to_gt(guc); in guc_prepare_xfer() local
923 struct xe_mmio *mmio = &gt->mmio; in guc_prepare_xfer()
934 if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) in guc_prepare_xfer()
935 shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index); in guc_prepare_xfer()
951 struct xe_gt *gt = guc_to_gt(guc); in guc_xfer_rsa() local
956 if (guc->fw.rsa_size > 256) { in guc_xfer_rsa()
957 u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) + in guc_xfer_rsa()
958 xe_uc_fw_rsa_offset(&guc->fw); in guc_xfer_rsa()
959 xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr); in guc_xfer_rsa()
963 copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa)); in guc_xfer_rsa()
965 return -ENOMEM; in guc_xfer_rsa()
968 xe_mmio_write32(&gt->mmio, UOS_RSA_SCRATCH(i), rsa[i]); in guc_xfer_rsa()
977 * successful completion, -1 for failure and 0 for any intermediate state.
1000 return -1; in guc_load_done()
1014 return -1; in guc_load_done()
1031 * Measurements indicate this should take no more than 20ms (assuming the GT
1037 * lead to even longer times. E.g. if the GT is clamped to minimum frequency then
1060 struct xe_gt *gt = guc_to_gt(guc); in guc_wait_ucode() local
1061 struct xe_mmio *mmio = &gt->mmio; in guc_wait_ucode()
1062 struct xe_guc_pc *guc_pc = &gt->uc.guc.pc; in guc_wait_ucode()
1103 …xe_gt_dbg(gt, "load still in progress, timeouts = %d, freq = %dMHz (req %dMHz), status = 0x%08X [0… in guc_wait_ucode()
1114 …xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz), done = %d\n", in guc_wait_ucode()
1117 …xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, … in guc_wait_ucode()
1125 xe_gt_err(gt, "invalid key requested, header = 0x%08X\n", in guc_wait_ucode()
1130 xe_gt_err(gt, "firmware signature verification failed\n"); in guc_wait_ucode()
1134 xe_gt_err(gt, "firmware production part check failure\n"); in guc_wait_ucode()
1140 xe_gt_err(gt, "still extracting hwconfig table.\n"); in guc_wait_ucode()
1144 xe_gt_err(gt, "firmware exception. EIP: %#x\n", in guc_wait_ucode()
1149 xe_gt_err(gt, "illegal init/ADS data\n"); in guc_wait_ucode()
1153 xe_gt_err(gt, "illegal register in save/restore workaround list\n"); in guc_wait_ucode()
1157 xe_gt_err(gt, "illegal workaround KLV data\n"); in guc_wait_ucode()
1161 xe_gt_err(gt, "illegal feature flag specified\n"); in guc_wait_ucode()
1165 return -EPROTO; in guc_wait_ucode()
1167 xe_gt_warn(gt, "excessive init time: %lldms! [status = 0x%08X, timeouts = %d]\n", in guc_wait_ucode()
1169 …xe_gt_warn(gt, "excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reaso… in guc_wait_ucode()
1171 before_freq, xe_gt_throttle_get_limit_reasons(gt)); in guc_wait_ucode()
1173 …xe_gt_dbg(gt, "init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X, time… in guc_wait_ucode()
1186 /* Raise GT freq to speed up HuC/GuC load */ in __xe_guc_upload()
1187 xe_guc_pc_raise_unslice(&guc->pc); in __xe_guc_upload()
1196 * register (if key size <= 256) or through a ggtt-pinned vma (if key in __xe_guc_upload()
1198 * HW is fixed for each platform and hard-coded in the bootrom. in __xe_guc_upload()
1207 ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE); in __xe_guc_upload()
1216 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING); in __xe_guc_upload()
1220 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL); in __xe_guc_upload()
1226 struct xe_gt *gt = guc_to_gt(guc); in vf_guc_min_load_for_hwconfig() local
1237 ret = xe_gt_sriov_vf_connect(gt); in vf_guc_min_load_for_hwconfig()
1241 ret = xe_gt_sriov_vf_query_runtime(gt); in vf_guc_min_load_for_hwconfig()
1253 * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
1270 xe_guc_ads_populate_minimal(&guc->ads); in xe_guc_min_load_for_hwconfig()
1272 xe_guc_pc_init_early(&guc->pc); in xe_guc_min_load_for_hwconfig()
1291 xe_guc_ads_populate(&guc->ads); in xe_guc_upload()
1298 struct xe_gt *gt = guc_to_gt(guc); in guc_handle_mmio_msg() local
1304 xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); in guc_handle_mmio_msg()
1306 msg = xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(15)); in guc_handle_mmio_msg()
1309 xe_mmio_write32(&gt->mmio, SOFT_SCRATCH(15), 0); in guc_handle_mmio_msg()
1312 xe_gt_err(gt, "Received early GuC crash dump notification!\n"); in guc_handle_mmio_msg()
1315 xe_gt_err(gt, "Received early GuC exception notification!\n"); in guc_handle_mmio_msg()
1320 struct xe_gt *gt = guc_to_gt(guc); in guc_enable_irq() local
1321 u32 events = xe_gt_is_media_type(gt) ? in guc_enable_irq()
1326 xe_mmio_write32(&gt->mmio, GUC_SG_INTR_ENABLE, in guc_enable_irq()
1333 xe_mmio_rmw32(&gt->mmio, GUC_SG_INTR_MASK, events, 0); in guc_enable_irq()
1342 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_enable_communication() local
1343 struct xe_tile *tile = gt_to_tile(gt); in xe_guc_enable_communication()
1345 err = xe_memirq_init_guc(&tile->memirq, guc); in xe_guc_enable_communication()
1352 err = xe_guc_ct_enable(&guc->ct); in xe_guc_enable_communication()
1363 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_suspend() local
1371 xe_gt_err(gt, "GuC suspend failed: %pe\n", ERR_PTR(ret)); in xe_guc_suspend()
1381 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_notify() local
1389 xe_mmio_write32(&gt->mmio, guc->notify_reg, default_notify_data); in xe_guc_notify()
1399 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in xe_guc_auth_huc()
1406 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_mmio_send_recv() local
1407 struct xe_mmio *mmio = &gt->mmio; in xe_guc_mmio_send_recv()
1409 struct xe_reg reply_reg = xe_gt_is_media_type(gt) ? in xe_guc_mmio_send_recv()
1411 const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1; in xe_guc_mmio_send_recv()
1427 /* Not in critical data-path, just do if else for GT type */ in xe_guc_mmio_send_recv()
1428 if (xe_gt_is_media_type(gt)) { in xe_guc_mmio_send_recv()
1448 xe_gt_dbg(gt, "GuC mmio request %#x: lost, trying again\n", request[0]); in xe_guc_mmio_send_recv()
1453 xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n", in xe_guc_mmio_send_recv()
1492 xe_gt_dbg(gt, "GuC mmio request %#x: retrying, reason %#x\n", in xe_guc_mmio_send_recv()
1502 xe_gt_err(gt, "GuC mmio request %#x: failure %#x hint %#x\n", in xe_guc_mmio_send_recv()
1504 return -ENXIO; in xe_guc_mmio_send_recv()
1510 xe_gt_err(gt, "GuC mmio request %#x: unexpected reply %#x\n", in xe_guc_mmio_send_recv()
1512 return -EPROTO; in xe_guc_mmio_send_recv()
1561 return -EPROTO; in guc_self_cfg()
1563 return -ENOKEY; in guc_self_cfg()
1580 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_sw_0_irq_handler() local
1582 if (IS_SRIOV_VF(gt_to_xe(gt))) in xe_guc_sw_0_irq_handler()
1583 xe_gt_sriov_vf_migrated_event_handler(gt); in xe_guc_sw_0_irq_handler()
1589 xe_guc_ct_irq_handler(&guc->ct); in xe_guc_irq_handler()
1597 xe_uc_fw_sanitize(&guc->fw); in xe_guc_sanitize()
1598 xe_guc_ct_disable(&guc->ct); in xe_guc_sanitize()
1617 err = xe_guc_pc_stop(&guc->pc); in xe_guc_stop_prepare()
1625 xe_guc_ct_stop(&guc->ct); in xe_guc_stop()
1637 struct xe_gt *gt = guc_to_gt(guc); in xe_guc_print_info() local
1642 xe_uc_fw_print(&guc->fw, p); in xe_guc_print_info()
1644 if (!IS_SRIOV_VF(gt_to_xe(gt))) { in xe_guc_print_info()
1645 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); in xe_guc_print_info()
1649 status = xe_mmio_read32(&gt->mmio, GUC_STATUS); in xe_guc_print_info()
1659 xe_guc_log_get_level(&guc->log)); in xe_guc_print_info()
1664 i, xe_mmio_read32(&gt->mmio, SOFT_SCRATCH(i))); in xe_guc_print_info()
1667 xe_force_wake_put(gt_to_fw(gt), fw_ref); in xe_guc_print_info()
1671 xe_guc_ct_print(&guc->ct, p, false); in xe_guc_print_info()
1678 * xe_guc_declare_wedged() - Declare GuC wedged
1686 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); in xe_guc_declare_wedged()
1689 xe_guc_ct_stop(&guc->ct); in xe_guc_declare_wedged()