Lines Matching +full:cs +full:- +full:x
1 // SPDX-License-Identifier: MIT
72 destroy_workqueue(gt->ordered_wq); in gt_fini()
80 gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL); in xe_gt_alloc()
82 return ERR_PTR(-ENOMEM); in xe_gt_alloc()
84 gt->tile = tile; in xe_gt_alloc()
85 gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq", in xe_gt_alloc()
88 err = drmm_add_action_or_reset(>_to_xe(gt)->drm, gt_fini, gt); in xe_gt_alloc()
101 xe_guc_submit_disable(>->uc.guc); in xe_gt_sanitize()
162 fence = dma_fence_get(&job->drm.s_fence->finished); in emit_job_sync()
170 return -ETIME; in emit_job_sync()
192 struct xe_reg_sr *sr = &q->hwe->reg_lrc; in emit_wa_job()
198 u32 *cs; in emit_wa_job() local
201 xa_for_each(&sr->xa, idx, entry) { in emit_wa_job()
202 if (entry->reg.masked || entry->clr_bits == ~0) in emit_wa_job()
214 if (q->hwe->class == XE_ENGINE_CLASS_RENDER) in emit_wa_job()
219 bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32); in emit_wa_job()
221 xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len); in emit_wa_job()
227 cs = bb->cs; in emit_wa_job()
235 *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count); in emit_wa_job()
237 xa_for_each(&sr->xa, idx, entry) { in emit_wa_job()
238 struct xe_reg reg = entry->reg; in emit_wa_job()
242 val = entry->clr_bits << 16; in emit_wa_job()
243 else if (entry->clr_bits == ~0) in emit_wa_job()
248 val |= entry->set_bits; in emit_wa_job()
250 *cs++ = reg.addr; in emit_wa_job()
251 *cs++ = val; in emit_wa_job()
252 xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val); in emit_wa_job()
259 xa_for_each(&sr->xa, idx, entry) { in emit_wa_job()
260 if (entry->reg.masked || entry->clr_bits == ~0) in emit_wa_job()
263 *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO; in emit_wa_job()
264 *cs++ = entry->reg.addr; in emit_wa_job()
265 *cs++ = CS_GPR_REG(0, 0).addr; in emit_wa_job()
267 *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | in emit_wa_job()
269 *cs++ = CS_GPR_REG(0, 1).addr; in emit_wa_job()
270 *cs++ = entry->clr_bits; in emit_wa_job()
271 *cs++ = CS_GPR_REG(0, 2).addr; in emit_wa_job()
272 *cs++ = entry->set_bits; in emit_wa_job()
274 *cs++ = MI_MATH(8); in emit_wa_job()
275 *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0); in emit_wa_job()
276 *cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1); in emit_wa_job()
277 *cs++ = CS_ALU_INSTR_AND; in emit_wa_job()
278 *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU); in emit_wa_job()
279 *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0); in emit_wa_job()
280 *cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2); in emit_wa_job()
281 *cs++ = CS_ALU_INSTR_OR; in emit_wa_job()
282 *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU); in emit_wa_job()
284 *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO; in emit_wa_job()
285 *cs++ = CS_GPR_REG(0, 0).addr; in emit_wa_job()
286 *cs++ = entry->reg.addr; in emit_wa_job()
288 xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n", in emit_wa_job()
289 entry->reg.addr, entry->clr_bits, entry->set_bits); in emit_wa_job()
293 *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | in emit_wa_job()
295 *cs++ = CS_GPR_REG(0, 0).addr; in emit_wa_job()
296 *cs++ = 0; in emit_wa_job()
297 *cs++ = CS_GPR_REG(0, 1).addr; in emit_wa_job()
298 *cs++ = 0; in emit_wa_job()
299 *cs++ = CS_GPR_REG(0, 2).addr; in emit_wa_job()
300 *cs++ = 0; in emit_wa_job()
303 cs = xe_lrc_emit_hwe_state_instructions(q, cs); in emit_wa_job()
305 bb->len = cs - bb->cs; in emit_wa_job()
325 if (gt->default_lrc[hwe->class]) in xe_gt_record_default_lrcs()
328 xe_reg_sr_init(&hwe->reg_lrc, hwe->name, xe); in xe_gt_record_default_lrcs()
333 default_lrc = drmm_kzalloc(&xe->drm, in xe_gt_record_default_lrcs()
334 xe_gt_lrc_size(gt, hwe->class), in xe_gt_record_default_lrcs()
337 return -ENOMEM; in xe_gt_record_default_lrcs()
339 q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1, in xe_gt_record_default_lrcs()
344 hwe->name, q); in xe_gt_record_default_lrcs()
352 hwe->name, ERR_PTR(err), q->guc->id); in xe_gt_record_default_lrcs()
356 nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), in xe_gt_record_default_lrcs()
361 hwe->name, nop_q); in xe_gt_record_default_lrcs()
369 hwe->name, ERR_PTR(err), nop_q->guc->id); in xe_gt_record_default_lrcs()
374 &q->lrc[0]->bo->vmap, in xe_gt_record_default_lrcs()
375 xe_lrc_pphwsp_offset(q->lrc[0]), in xe_gt_record_default_lrcs()
376 xe_gt_lrc_size(gt, hwe->class)); in xe_gt_record_default_lrcs()
378 gt->default_lrc[hwe->class] = default_lrc; in xe_gt_record_default_lrcs()
401 xe_reg_sr_init(>->reg_sr, "GT", gt_to_xe(gt)); in xe_gt_init_early()
414 spin_lock_init(>->global_invl_lock); in xe_gt_init_early()
423 * Only after this point can GT-specific MMIO operations in xe_gt_init_early()
429 err = xe_uc_init_noalloc(>->uc); in xe_gt_init_early()
435 return -ETIMEDOUT; in xe_gt_init_early()
449 snprintf(prefix, sizeof(prefix), "[GT%u Error]", gt->info.id); in dump_pat_on_error()
450 p = drm_dbg_printer(>_to_xe(gt)->drm, DRM_UT_DRIVER, prefix); in dump_pat_on_error()
462 return -ETIMEDOUT; in gt_init_with_gt_forcewake()
464 err = xe_uc_init(>->uc); in gt_init_with_gt_forcewake()
473 err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); in gt_init_with_gt_forcewake()
477 xe_lmtt_init(>_to_tile(gt)->sriov.pf.lmtt); in gt_init_with_gt_forcewake()
502 * Stash hardware-reported version. Since this register does not exist in gt_init_with_gt_forcewake()
503 * on pre-MTL platforms, reading it there will (correctly) return 0. in gt_init_with_gt_forcewake()
505 gt->info.gmdid = xe_mmio_read32(>->mmio, GMD_ID); in gt_init_with_gt_forcewake()
523 err = -ETIMEDOUT; in gt_init_with_all_forcewake()
530 xe_reg_sr_apply_mmio(>->reg_sr, gt); in gt_init_with_all_forcewake()
545 err = xe_uc_init_post_hwconfig(>->uc); in gt_init_with_all_forcewake()
551 * USM has its only SA pool to non-block behind user operations in gt_init_with_all_forcewake()
553 if (gt_to_xe(gt)->info.has_usm) { in gt_init_with_all_forcewake()
556 gt->usm.bb_pool = xe_sa_bo_manager_init(gt_to_tile(gt), in gt_init_with_all_forcewake()
558 if (IS_ERR(gt->usm.bb_pool)) { in gt_init_with_all_forcewake()
559 err = PTR_ERR(gt->usm.bb_pool); in gt_init_with_all_forcewake()
568 err = xe_migrate_init(tile->migrate); in gt_init_with_all_forcewake()
573 err = xe_uc_load_hw(>->uc); in gt_init_with_all_forcewake()
579 gt->ccs_mode = 1; in gt_init_with_all_forcewake()
584 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt); in gt_init_with_all_forcewake()
607 xe_hw_fence_irq_finish(>->fence_irq[i]); in xe_gt_fini()
617 INIT_WORK(>->reset.worker, gt_reset_worker); in xe_gt_init()
620 gt->ring_ops[i] = xe_ring_ops_get(gt, i); in xe_gt_init()
621 xe_hw_fence_irq_init(>->fence_irq[i]); in xe_gt_init()
624 err = devm_add_action_or_reset(gt_to_xe(gt)->drm.dev, xe_gt_fini, gt); in xe_gt_init()
640 err = xe_gt_idle_init(>->gtidle); in xe_gt_init()
664 * xe_gt_mmio_init() - Initialize GT's MMIO access
675 xe_mmio_init(>->mmio, tile, tile->mmio.regs, tile->mmio.regs_size); in xe_gt_mmio_init()
677 if (gt->info.type == XE_GT_TYPE_MEDIA) { in xe_gt_mmio_init()
678 gt->mmio.adj_offset = MEDIA_GT_GSI_OFFSET; in xe_gt_mmio_init()
679 gt->mmio.adj_limit = MEDIA_GT_GSI_LENGTH; in xe_gt_mmio_init()
681 gt->mmio.adj_offset = 0; in xe_gt_mmio_init()
682 gt->mmio.adj_limit = 0; in xe_gt_mmio_init()
686 gt->mmio.sriov_vf_gt = gt; in xe_gt_mmio_init()
694 gt->user_engines.mask = 0; in xe_gt_record_user_engines()
695 memset(gt->user_engines.instances_per_class, 0, in xe_gt_record_user_engines()
696 sizeof(gt->user_engines.instances_per_class)); in xe_gt_record_user_engines()
702 gt->user_engines.mask |= BIT_ULL(id); in xe_gt_record_user_engines()
703 gt->user_engines.instances_per_class[hwe->class]++; in xe_gt_record_user_engines()
706 xe_gt_assert(gt, (gt->user_engines.mask | gt->info.engine_mask) in xe_gt_record_user_engines()
707 == gt->info.engine_mask); in xe_gt_record_user_engines()
719 xe_mmio_write32(>->mmio, GDRST, GRDOM_FULL); in do_gt_reset()
720 err = xe_mmio_wait32(>->mmio, GDRST, GRDOM_FULL, 0, 5000, NULL, false); in do_gt_reset()
734 err = xe_uc_sanitize_reset(>->uc); in vf_gt_restart()
738 err = xe_uc_load_hw(>->uc); in vf_gt_restart()
742 err = xe_uc_start(>->uc); in vf_gt_restart()
763 xe_reg_sr_apply_mmio(>->reg_sr, gt); in do_gt_restart()
765 err = xe_wopcm_init(>->uc.wopcm); in do_gt_restart()
772 err = xe_uc_sanitize_reset(>->uc); in do_gt_restart()
776 err = xe_uc_load_hw(>->uc); in do_gt_restart()
781 xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt); in do_gt_restart()
787 err = xe_uc_start(>->uc); in do_gt_restart()
792 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt); in do_gt_restart()
808 return xe_guc_wait_reset_unblock(>->uc.guc); in gt_wait_reset_unblock()
817 return -ECANCELED; in gt_reset()
821 return -ENODEV; in gt_reset()
832 err = -ECANCELED; in gt_reset()
840 err = -ETIMEDOUT; in gt_reset()
847 xe_uc_gucrc_disable(>->uc); in gt_reset()
848 xe_uc_stop_prepare(>->uc); in gt_reset()
851 xe_uc_stop(>->uc); in gt_reset()
853 xe_tlb_inval_reset(>->tlb_inval); in gt_reset()
872 XE_WARN_ON(xe_uc_start(>->uc)); in gt_reset()
894 if (!xe_fault_inject_gt_reset() && xe_uc_reset_prepare(>->uc)) in xe_gt_reset_async()
898 queue_work(gt->ordered_wq, >->reset.worker); in xe_gt_reset_async()
907 xe_uc_suspend_prepare(>->uc); in xe_gt_suspend_prepare()
924 err = xe_uc_suspend(>->uc); in xe_gt_suspend()
938 err = -ETIMEDOUT; in xe_gt_suspend()
956 * xe_gt_sanitize_freq() - Restore saved frequencies if necessary.
966 if ((!xe_uc_fw_is_available(>->uc.gsc.fw) || in xe_gt_sanitize_freq()
967 xe_uc_fw_is_loaded(>->uc.gsc.fw) || in xe_gt_sanitize_freq()
968 xe_uc_fw_is_in_error_state(>->uc.gsc.fw)) && in xe_gt_sanitize_freq()
970 ret = xe_guc_pc_restore_stashed_freq(>->uc.guc.pc); in xe_gt_sanitize_freq()
997 err = -ETIMEDOUT; in xe_gt_resume()
1013 if (hwe->class == class && in xe_gt_hw_engine()
1014 ((!logical && hwe->instance == instance) || in xe_gt_hw_engine()
1015 (logical && hwe->logical_instance == instance))) in xe_gt_hw_engine()
1031 if (hwe->class == XE_ENGINE_CLASS_RENDER || in xe_gt_any_hw_engine_by_reset_domain()
1032 hwe->class == XE_ENGINE_CLASS_COMPUTE) in xe_gt_any_hw_engine_by_reset_domain()
1036 if (hwe->class == class) in xe_gt_any_hw_engine_by_reset_domain()
1056 * xe_gt_declare_wedged() - Declare GT wedged
1064 xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode); in xe_gt_declare_wedged()
1066 xe_uc_declare_wedged(>->uc); in xe_gt_declare_wedged()
1067 xe_tlb_inval_reset(>->tlb_inval); in xe_gt_declare_wedged()