Lines Matching defs:hwe

278 	struct xe_hw_engine *hwe = arg;
280 if (hwe->exl_port)
281 xe_execlist_port_destroy(hwe->exl_port);
283 hwe->gt = NULL;
288 * @hwe: engine
296 void xe_hw_engine_mmio_write32(struct xe_hw_engine *hwe,
299 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
300 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
302 reg.addr += hwe->mmio_base;
304 xe_mmio_write32(&hwe->gt->mmio, reg, val);
309 * @hwe: engine
317 u32 xe_hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
319 xe_gt_assert(hwe->gt, !(reg.addr & hwe->mmio_base));
320 xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
322 reg.addr += hwe->mmio_base;
324 return xe_mmio_read32(&hwe->gt->mmio, reg);
327 void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
330 xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
333 if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
334 xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
337 xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
338 xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
339 xe_bo_ggtt_addr(hwe->hwsp));
341 if (xe_device_has_msix(gt_to_xe(hwe->gt)))
343 xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
344 xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
346 xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
350 const struct xe_hw_engine *hwe)
353 xe_rtp_match_first_render_or_compute(gt, hwe);
357 const struct xe_hw_engine *hwe)
362 if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
363 hwe->class != XE_ENGINE_CLASS_RENDER)
366 return xe_mmio_read32(&hwe->gt->mmio, XEHP_FUSE4) & CFEG_WMTP_DISABLE;
370 xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe)
372 struct xe_gt *gt = hwe->gt;
377 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
403 xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc);
407 hw_engine_setup_default_state(struct xe_hw_engine *hwe)
409 struct xe_gt *gt = hwe->gt;
422 const u8 mocs_read_idx = hwe->class == XE_ENGINE_CLASS_COMPUTE && IS_DGFX(xe) &&
427 struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe);
467 xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr);
494 static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe,
507 xe_gt_assert(gt, !hwe->gt);
509 hwe->gt = gt;
510 hwe->class = info->class;
511 hwe->instance = info->instance;
512 hwe->mmio_base = info->mmio_base;
513 hwe->irq_offset = xe_device_has_msix(gt_to_xe(gt)) ?
516 hwe->domain = info->domain;
517 hwe->name = info->name;
518 hwe->fence_irq = &gt->fence_irq[info->class];
519 hwe->engine_id = id;
521 hwe->eclass = &gt->eclass[hwe->class];
522 if (!hwe->eclass->sched_props.job_timeout_ms) {
523 hwe->eclass->sched_props.job_timeout_ms = 5 * 1000;
524 hwe->eclass->sched_props.job_timeout_min = XE_HW_ENGINE_JOB_TIMEOUT_MIN;
525 hwe->eclass->sched_props.job_timeout_max = XE_HW_ENGINE_JOB_TIMEOUT_MAX;
526 hwe->eclass->sched_props.timeslice_us = 1 * 1000;
527 hwe->eclass->sched_props.timeslice_min = XE_HW_ENGINE_TIMESLICE_MIN;
528 hwe->eclass->sched_props.timeslice_max = XE_HW_ENGINE_TIMESLICE_MAX;
529 hwe->eclass->sched_props.preempt_timeout_us = XE_HW_ENGINE_PREEMPT_TIMEOUT;
530 hwe->eclass->sched_props.preempt_timeout_min = XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN;
531 hwe->eclass->sched_props.preempt_timeout_max = XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX;
550 if (hwe->class == XE_ENGINE_CLASS_OTHER) {
552 if (hwe->eclass->sched_props.preempt_timeout_us < min_preempt_timeout) {
553 hwe->eclass->sched_props.preempt_timeout_us = min_preempt_timeout;
559 hwe->eclass->defaults = hwe->eclass->sched_props;
562 xe_reg_sr_init(&hwe->reg_sr, hwe->name, gt_to_xe(gt));
563 xe_tuning_process_engine(hwe);
564 xe_wa_process_engine(hwe);
565 hw_engine_setup_default_state(hwe);
567 xe_reg_sr_init(&hwe->reg_whitelist, hwe->name, gt_to_xe(gt));
568 xe_reg_whitelist_process_engine(hwe);
571 static void adjust_idledly(struct xe_hw_engine *hwe)
573 struct xe_gt *gt = hwe->gt;
579 if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) {
580 idledly = xe_mmio_read32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base));
581 maxcnt = xe_mmio_read32(&gt->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
593 xe_mmio_write32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base), idledly);
598 static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
608 xe_reg_sr_apply_mmio(&hwe->reg_sr, gt);
610 hwe->hwsp = xe_managed_bo_create_pin_map(xe, tile, SZ_4K,
614 if (IS_ERR(hwe->hwsp)) {
615 err = PTR_ERR(hwe->hwsp);
620 hwe->exl_port = xe_execlist_port_create(xe, hwe);
621 if (IS_ERR(hwe->exl_port)) {
622 err = PTR_ERR(hwe->exl_port);
627 if (hwe->class == XE_ENGINE_CLASS_OTHER)
628 hwe->irq_handler = xe_gsc_hwe_irq_handler;
631 xe_hw_engine_enable_ring(hwe);
635 if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY)
636 gt->usm.reserved_bcs_instance = hwe->instance;
639 adjust_idledly(hwe);
641 return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
644 xe_bo_unpin_map_no_vm(hwe->hwsp);
646 hwe->name = NULL;
657 struct xe_hw_engine *hwe;
661 for_each_hw_engine(hwe, gt, id)
662 if (hwe->class == class)
663 hwe->logical_instance = logical_instance++;
849 struct xe_hw_engine *hwe;
852 for_each_hw_engine(hwe, gt, id) {
853 err = hw_engine_init(gt, hwe, id);
866 void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
868 wake_up_all(&gt_to_xe(hwe->gt)->ufence_wq);
870 if (hwe->irq_handler)
871 hwe->irq_handler(hwe, intr_vec);
874 xe_hw_fence_irq_run(hwe->fence_irq);
879 * @hwe: Xe HW Engine.
889 xe_hw_engine_snapshot_capture(struct xe_hw_engine *hwe, struct xe_exec_queue *q)
894 if (!xe_hw_engine_is_valid(hwe))
902 snapshot->name = kstrdup(hwe->name, GFP_ATOMIC);
903 snapshot->hwe = hwe;
904 snapshot->logical_instance = hwe->logical_instance;
905 snapshot->forcewake.domain = hwe->domain;
906 snapshot->forcewake.ref = xe_force_wake_ref(gt_to_fw(hwe->gt),
907 hwe->domain);
908 snapshot->mmio_base = hwe->mmio_base;
909 snapshot->kernel_reserved = xe_hw_engine_is_reserved(hwe);
912 if (IS_SRIOV_VF(gt_to_xe(hwe->gt)))
919 struct xe_device *xe = gt_to_xe(hwe->gt);
923 xe_gt_dbg(hwe->gt, "Found and locked GuC-err-capture node");
929 xe_engine_manual_capture(hwe, snapshot);
930 xe_gt_dbg(hwe->gt, "Proceeding with manual engine snapshot");
948 gt = snapshot->hwe->gt;
962 * @hwe: Hardware Engine.
967 void xe_hw_engine_print(struct xe_hw_engine *hwe, struct drm_printer *p)
971 snapshot = xe_hw_engine_snapshot_capture(hwe, NULL);
990 bool xe_hw_engine_is_reserved(struct xe_hw_engine *hwe)
992 struct xe_gt *gt = hwe->gt;
995 if (hwe->class == XE_ENGINE_CLASS_OTHER)
1000 hwe->class == XE_ENGINE_CLASS_COMPUTE &&
1001 hwe->logical_instance >= gt->ccs_mode)
1004 return xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY &&
1005 hwe->instance == gt->usm.reserved_bcs_instance;
1030 u64 xe_hw_engine_read_timestamp(struct xe_hw_engine *hwe)
1032 return xe_mmio_read64_2x32(&hwe->gt->mmio, RING_TIMESTAMP(hwe->mmio_base));
1035 enum xe_force_wake_domains xe_hw_engine_to_fw_domain(struct xe_hw_engine *hwe)
1037 return engine_infos[hwe->engine_id].domain;