Lines Matching +full:p +full:- +full:tile
1 // SPDX-License-Identifier: MIT
54 * For most BOs, the address on the allocating tile is fine. However for in guc_bo_ggtt_addr()
55 * some, e.g. G2G CTB, the address on a specific tile is required as it in guc_bo_ggtt_addr()
56 * might be different for each tile. So, just always ask for the address in guc_bo_ggtt_addr()
59 addr = __xe_bo_ggtt_addr(bo, gt_to_tile(guc_to_gt(guc))->id); in guc_bo_ggtt_addr()
64 xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr); in guc_bo_ggtt_addr()
71 u32 level = xe_guc_log_get_level(&guc->log); in guc_ctl_debug_flags()
87 if (!xe->info.skip_guc_pc) in guc_ctl_feature_flags()
90 if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev))) in guc_ctl_feature_flags()
98 u32 offset = guc_bo_ggtt_addr(guc, guc->log.bo) >> PAGE_SHIFT; in guc_ctl_log_params_flags()
128 FIELD_PREP(GUC_LOG_CRASH, CRASH_BUFFER_SIZE / LOG_UNIT - 1) | in guc_ctl_log_params_flags()
129 FIELD_PREP(GUC_LOG_DEBUG, DEBUG_BUFFER_SIZE / LOG_UNIT - 1) | in guc_ctl_log_params_flags()
130 FIELD_PREP(GUC_LOG_CAPTURE, CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) | in guc_ctl_log_params_flags()
143 u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT; in guc_ctl_ads_flags()
165 * can cause complications if the non-stalled submission runs for a long in needs_wa_dual_queue()
193 * Wa_22011802037: FIXME - there's more to be done than simply setting in guc_ctl_wa_flags()
223 return (((u32)xe->info.devid) << 16) | xe->info.revid; in guc_ctl_devid()
229 u32 *params = guc->params; in guc_print_params()
232 BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32)); in guc_print_params()
241 u32 *params = guc->params; in guc_init_params()
255 u32 *params = guc->params; in guc_init_params_post_hwconfig()
279 xe_mmio_write32(>->mmio, SOFT_SCRATCH(0), 0); in guc_write_params()
282 xe_mmio_write32(>->mmio, SOFT_SCRATCH(1 + i), guc->params[i]); in guc_write_params()
292 FIELD_PREP(XE_G2G_REGISTER_SIZE, size / SZ_4K - 1) | in guc_action_register_g2g_buffer()
303 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in guc_action_register_g2g_buffer()
319 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in guc_action_deregister_g2g_buffer()
322 #define G2G_DEV(gt) (((gt)->info.type == XE_GT_TYPE_MAIN) ? 0 : 1)
329 * Generate a unique id for each bi-directional CTB for each pair of
331 * a single allocation that is sub-divided into multiple CTBs.
333 * For example, with two devices per tile and two tiles, the table should
335 * Far <tile>.<dev>
337 * N 0.0 --/-- 00/01 02/03 04/05
338 * e 0.1 01/00 --/-- 06/07 08/09
339 * a 1.0 03/02 07/06 --/-- 10/11
340 * r 1.1 05/04 09/08 11/10 --/--
344 * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
362 return -1; in g2g_slot()
377 direction = (1 - type); in g2g_slot()
381 for (i = y; i > 0; i--) in g2g_slot()
382 idx += max_inst - i; in g2g_slot()
385 idx += (x - 1 - y); in g2g_slot()
401 u32 near_tile = gt_to_tile(near_gt)->id; in guc_g2g_register()
403 u32 far_tile = gt_to_tile(far_gt)->id; in guc_g2g_register()
405 u32 max = xe->info.gt_count; in guc_g2g_register()
412 g2g_bo = near_guc->g2g.bo; in guc_g2g_register()
422 xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE); in guc_g2g_register()
423 xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo)); in guc_g2g_register()
438 unsigned int count = xe->info.gt_count; in guc_g2g_size()
439 u32 num_channels = (count * (count - 1)) / 2; in guc_g2g_size()
449 if (xe->info.gt_count <= 1) in xe_guc_g2g_wanted()
460 struct xe_tile *tile = gt_to_tile(gt); in guc_g2g_alloc() local
464 if (guc->g2g.bo) in guc_g2g_alloc()
467 if (gt->info.id != 0) { in guc_g2g_alloc()
469 struct xe_guc *root_guc = &root_gt->uc.guc; in guc_g2g_alloc()
472 bo = xe_bo_get(root_guc->g2g.bo); in guc_g2g_alloc()
474 return -ENODEV; in guc_g2g_alloc()
476 guc->g2g.bo = bo; in guc_g2g_alloc()
477 guc->g2g.owned = false; in guc_g2g_alloc()
482 bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size, in guc_g2g_alloc()
483 XE_BO_FLAG_VRAM_IF_DGFX(tile) | in guc_g2g_alloc()
491 xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size); in guc_g2g_alloc()
492 guc->g2g.bo = bo; in guc_g2g_alloc()
493 guc->g2g.owned = true; in guc_g2g_alloc()
500 if (!guc->g2g.bo) in guc_g2g_fini()
504 if (!guc->g2g.owned) in guc_g2g_fini()
505 xe_bo_put(guc->g2g.bo); in guc_g2g_fini()
507 guc->g2g.bo = NULL; in guc_g2g_fini()
518 if (!guc->g2g.bo) { in guc_g2g_start()
527 xe_gt_assert(gt, (gt->info.type == XE_GT_TYPE_MAIN) || (gt->info.type == XE_GT_TYPE_MEDIA)); in guc_g2g_start()
529 /* Channel numbering depends on whether there are multiple GTs per tile */ in guc_g2g_start()
530 have_dev = xe->info.gt_count > xe->info.tile_count; in guc_g2g_start()
535 if (far_gt->info.id == gt->info.id) in guc_g2g_start()
538 far_tile = gt_to_tile(far_gt)->id; in guc_g2g_start()
544 while (--t >= 0) in guc_g2g_start()
555 u32 tile, dev; in guc_g2g_start() local
557 if (far_gt->info.id == gt->info.id) in guc_g2g_start()
563 tile = gt_to_tile(far_gt)->id; in guc_g2g_start()
567 guc_g2g_deregister(guc, tile, dev, t); in guc_g2g_start()
582 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in __guc_opt_in_features_enable()
591 if (xe->info.platform != XE_PVC && GRAPHICS_VER(xe) < 20) in supports_dynamic_ics()
595 * The feature is currently not compatible with multi-lrc, so the GuC in supports_dynamic_ics()
601 if (xe_gt_is_media_type(gt) || gt->ccs_mode > 1) in supports_dynamic_ics()
615 CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS); in xe_guc_opt_in_features_enable()
621 return -ENOBUFS; in xe_guc_opt_in_features_enable()
626 * The extra CAT error type opt-in was added in GuC v70.17.0, which maps in xe_guc_opt_in_features_enable()
644 "failed to enable GuC opt-in features: %pe\n", in xe_guc_opt_in_features_enable()
660 xe_uc_sanitize_reset(&guc_to_gt(guc)->uc); in guc_fini_hw()
667 * xe_guc_comm_init_early - early initialization of GuC communication
677 guc->notify_reg = MED_GUC_HOST_INTERRUPT; in xe_guc_comm_init_early()
679 guc->notify_reg = GUC_HOST_INTERRUPT; in xe_guc_comm_init_early()
684 struct xe_tile *tile = gt_to_tile(guc_to_gt(guc)); in xe_guc_realloc_post_hwconfig() local
691 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->fw.bo); in xe_guc_realloc_post_hwconfig()
695 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->log.bo); in xe_guc_realloc_post_hwconfig()
699 ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ads.bo); in xe_guc_realloc_post_hwconfig()
730 ret = xe_guc_ct_init_noalloc(&guc->ct); in xe_guc_init_noalloc()
734 ret = xe_guc_relay_init(&guc->relay); in xe_guc_init_noalloc()
757 guc->fw.type = XE_UC_FW_TYPE_GUC; in xe_guc_init()
758 ret = xe_uc_fw_init(&guc->fw); in xe_guc_init()
762 if (!xe_uc_fw_is_enabled(&guc->fw)) in xe_guc_init()
766 ret = xe_guc_ct_init(&guc->ct); in xe_guc_init()
772 ret = xe_guc_log_init(&guc->log); in xe_guc_init()
780 ret = xe_guc_ads_init(&guc->ads); in xe_guc_init()
784 ret = xe_guc_ct_init(&guc->ct); in xe_guc_init()
788 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); in xe_guc_init()
790 ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc); in xe_guc_init()
811 err = xe_guc_buf_cache_init(&guc->buf); in vf_guc_init_post_hwconfig()
821 * xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
837 ret = xe_guc_ct_init_post_hwconfig(&guc->ct); in xe_guc_init_post_hwconfig()
847 ret = xe_guc_db_mgr_init(&guc->dbm, ~0); in xe_guc_init_post_hwconfig()
851 ret = xe_guc_pc_init(&guc->pc); in xe_guc_init_post_hwconfig()
859 ret = xe_guc_buf_cache_init(&guc->buf); in xe_guc_init_post_hwconfig()
863 return xe_guc_ads_init_post_hwconfig(&guc->ads); in xe_guc_init_post_hwconfig()
870 xe_guc_ads_populate_post_load(&guc->ads); in xe_guc_post_load_init()
888 struct xe_mmio *mmio = >->mmio; in xe_guc_reset()
909 ret = -EIO; in xe_guc_reset()
923 struct xe_mmio *mmio = >->mmio; in guc_prepare_xfer()
934 if (GRAPHICS_VER(xe) >= 20 || xe->info.platform == XE_PVC) in guc_prepare_xfer()
935 shim_flags |= REG_FIELD_PREP(GUC_MOCS_INDEX_MASK, gt->mocs.uc_index); in guc_prepare_xfer()
956 if (guc->fw.rsa_size > 256) { in guc_xfer_rsa()
957 u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) + in guc_xfer_rsa()
958 xe_uc_fw_rsa_offset(&guc->fw); in guc_xfer_rsa()
959 xe_mmio_write32(>->mmio, UOS_RSA_SCRATCH(0), rsa_ggtt_addr); in guc_xfer_rsa()
963 copied = xe_uc_fw_copy_rsa(&guc->fw, rsa, sizeof(rsa)); in guc_xfer_rsa()
965 return -ENOMEM; in guc_xfer_rsa()
968 xe_mmio_write32(>->mmio, UOS_RSA_SCRATCH(i), rsa[i]); in guc_xfer_rsa()
977 * successful completion, -1 for failure and 0 for any intermediate state.
1000 return -1; in guc_load_done()
1014 return -1; in guc_load_done()
1061 struct xe_mmio *mmio = >->mmio; in guc_wait_ucode()
1062 struct xe_guc_pc *guc_pc = >->uc.guc.pc; in guc_wait_ucode()
1165 return -EPROTO; in guc_wait_ucode()
1187 xe_guc_pc_raise_unslice(&guc->pc); in __xe_guc_upload()
1196 * register (if key size <= 256) or through a ggtt-pinned vma (if key in __xe_guc_upload()
1198 * HW is fixed for each platform and hard-coded in the bootrom. in __xe_guc_upload()
1207 ret = xe_uc_fw_upload(&guc->fw, 0x2000, UOS_MOVE); in __xe_guc_upload()
1216 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING); in __xe_guc_upload()
1220 xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL); in __xe_guc_upload()
1253 * xe_guc_min_load_for_hwconfig - load minimal GuC and read hwconfig table
1270 xe_guc_ads_populate_minimal(&guc->ads); in xe_guc_min_load_for_hwconfig()
1272 xe_guc_pc_init_early(&guc->pc); in xe_guc_min_load_for_hwconfig()
1291 xe_guc_ads_populate(&guc->ads); in xe_guc_upload()
1306 msg = xe_mmio_read32(>->mmio, SOFT_SCRATCH(15)); in guc_handle_mmio_msg()
1309 xe_mmio_write32(>->mmio, SOFT_SCRATCH(15), 0); in guc_handle_mmio_msg()
1326 xe_mmio_write32(>->mmio, GUC_SG_INTR_ENABLE, in guc_enable_irq()
1333 xe_mmio_rmw32(>->mmio, GUC_SG_INTR_MASK, events, 0); in guc_enable_irq()
1343 struct xe_tile *tile = gt_to_tile(gt); in xe_guc_enable_communication() local
1345 err = xe_memirq_init_guc(&tile->memirq, guc); in xe_guc_enable_communication()
1352 err = xe_guc_ct_enable(&guc->ct); in xe_guc_enable_communication()
1389 xe_mmio_write32(>->mmio, guc->notify_reg, default_notify_data); in xe_guc_notify()
1399 return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); in xe_guc_auth_huc()
1407 struct xe_mmio *mmio = >->mmio; in xe_guc_mmio_send_recv()
1411 const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1; in xe_guc_mmio_send_recv()
1427 /* Not in critical data-path, just do if else for GT type */ in xe_guc_mmio_send_recv()
1504 return -ENXIO; in xe_guc_mmio_send_recv()
1512 return -EPROTO; in xe_guc_mmio_send_recv()
1561 return -EPROTO; in guc_self_cfg()
1563 return -ENOKEY; in guc_self_cfg()
1589 xe_guc_ct_irq_handler(&guc->ct); in xe_guc_irq_handler()
1597 xe_uc_fw_sanitize(&guc->fw); in xe_guc_sanitize()
1598 xe_guc_ct_disable(&guc->ct); in xe_guc_sanitize()
1617 err = xe_guc_pc_stop(&guc->pc); in xe_guc_stop_prepare()
1625 xe_guc_ct_stop(&guc->ct); in xe_guc_stop()
1635 void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) in xe_guc_print_info() argument
1642 xe_uc_fw_print(&guc->fw, p); in xe_guc_print_info()
1649 status = xe_mmio_read32(>->mmio, GUC_STATUS); in xe_guc_print_info()
1651 drm_printf(p, "\nGuC status 0x%08x:\n", status); in xe_guc_print_info()
1652 drm_printf(p, "\tBootrom status = 0x%x\n", in xe_guc_print_info()
1654 drm_printf(p, "\tuKernel status = 0x%x\n", in xe_guc_print_info()
1656 drm_printf(p, "\tMIA Core status = 0x%x\n", in xe_guc_print_info()
1658 drm_printf(p, "\tLog level = %d\n", in xe_guc_print_info()
1659 xe_guc_log_get_level(&guc->log)); in xe_guc_print_info()
1661 drm_puts(p, "\nScratch registers:\n"); in xe_guc_print_info()
1663 drm_printf(p, "\t%2d: \t0x%x\n", in xe_guc_print_info()
1664 i, xe_mmio_read32(>->mmio, SOFT_SCRATCH(i))); in xe_guc_print_info()
1670 drm_puts(p, "\n"); in xe_guc_print_info()
1671 xe_guc_ct_print(&guc->ct, p, false); in xe_guc_print_info()
1673 drm_puts(p, "\n"); in xe_guc_print_info()
1674 xe_guc_submit_print(guc, p); in xe_guc_print_info()
1678 * xe_guc_declare_wedged() - Declare GuC wedged
1686 xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode); in xe_guc_declare_wedged()
1689 xe_guc_ct_stop(&guc->ct); in xe_guc_declare_wedged()