Lines Matching defs:engine

260  * intel_engine_context_size() - return the size of the context for an engine
262 * @class: engine class
264 * Each engine class may require a different amount of space for a context
267 * Return: size (in bytes) of an engine class specific context image
358 static void __sprint_engine_name(struct intel_engine_cs *engine)
361 * Before we know what the uABI name for this engine will be,
362 * we still would like to keep track of this engine in the debug logs.
365 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
366 intel_engine_class_repr(engine->class),
367 engine->instance) >= sizeof(engine->name));
370 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
374 * per-engine HWSTAM until gen6.
376 if (GRAPHICS_VER(engine->i915) < 6 && engine->class != RENDER_CLASS)
379 if (GRAPHICS_VER(engine->i915) >= 3)
380 ENGINE_WRITE(engine, RING_HWSTAM, mask);
382 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
385 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
388 intel_engine_set_hwsp_writemask(engine, ~0u);
391 static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
454 struct intel_engine_cs *engine;
462 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
474 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
475 if (!engine)
478 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
480 INIT_LIST_HEAD(&engine->pinned_contexts_list);
481 engine->id = id;
482 engine->legacy_idx = INVALID_ENGINE;
483 engine->mask = BIT(id);
484 engine->reset_domain = get_reset_domain(GRAPHICS_VER(gt->i915),
486 engine->i915 = i915;
487 engine->gt = gt;
488 engine->uncore = gt->uncore;
490 engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
491 engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
493 engine->irq_handler = nop_irq_handler;
495 engine->class = info->class;
496 engine->instance = info->instance;
497 engine->logical_mask = BIT(logical_instance);
498 __sprint_engine_name(engine);
500 if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
501 __ffs(CCS_MASK(engine->gt) | RCS_MASK(engine->gt)) == engine->instance)
502 engine->flags |= I915_ENGINE_FIRST_RENDER_COMPUTE;
505 if (engine->class == RENDER_CLASS || engine->class == COMPUTE_CLASS) {
506 engine->flags |= I915_ENGINE_HAS_RCS_REG_STATE;
507 engine->flags |= I915_ENGINE_HAS_EU_PRIORITY;
510 engine->props.heartbeat_interval_ms =
512 engine->props.max_busywait_duration_ns =
514 engine->props.preempt_timeout_ms =
516 engine->props.stop_timeout_ms =
518 engine->props.timeslice_duration_ms =
527 if (GRAPHICS_VER(i915) == 12 && (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE))
528 engine->props.preempt_timeout_ms = CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE;
533 u64 clamp = intel_clamp_##field(engine, engine->props.field); \
534 if (clamp != engine->props.field) { \
535 drm_notice(&engine->i915->drm, \
538 engine->props.field = clamp; \
550 engine->defaults = engine->props; /* never to change again */
552 engine->context_size = intel_engine_context_size(gt, engine->class);
553 if (WARN_ON(engine->context_size > BIT(20)))
554 engine->context_size = 0;
555 if (engine->context_size)
558 ewma__engine_latency_init(&engine->latency);
560 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
563 intel_engine_sanitize_mmio(engine);
565 gt->engine_class[info->class][info->instance] = engine;
566 gt->engine[id] = engine;
571 u64 intel_clamp_heartbeat_interval_ms(struct intel_engine_cs *engine, u64 value)
578 u64 intel_clamp_max_busywait_duration_ns(struct intel_engine_cs *engine, u64 value)
585 u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value)
591 if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
599 u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value)
606 u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value)
612 if (intel_guc_submission_is_wanted(gt_to_guc(engine->gt)))
620 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
622 struct drm_i915_private *i915 = engine->i915;
624 if (engine->class == VIDEO_DECODE_CLASS) {
626 * HEVC support is present on first engine instance
630 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
631 engine->uabi_capabilities |=
635 * SFC block is present only on even logical engine
639 (engine->gt->info.vdbox_sfc_access &
640 BIT(engine->instance))) ||
641 (GRAPHICS_VER(i915) >= 9 && engine->instance == 0))
642 engine->uabi_capabilities |=
644 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
646 engine->gt->info.sfc_mask & BIT(engine->instance))
647 engine->uabi_capabilities |=
654 struct intel_engine_cs *engine;
657 for_each_engine(engine, gt, id)
658 __setup_engine_capabilities(engine);
667 struct intel_engine_cs *engine;
671 * Before we release the resources held by engine, we must be certain
684 for_each_engine(engine, gt, id) {
685 if (!engine->release)
688 intel_wakeref_wait_for_idle(&engine->wakeref);
689 GEM_BUG_ON(intel_engine_pm_is_awake(engine));
691 engine->release(engine);
692 engine->release = NULL;
694 memset(&engine->reset, 0, sizeof(engine->reset));
700 void intel_engine_free_request_pool(struct intel_engine_cs *engine)
702 if (!engine->request_pool)
705 kmem_cache_free(i915_request_slab_cache(), engine->request_pool);
710 struct intel_engine_cs *engine;
716 for_each_engine(engine, gt, id) {
717 intel_engine_free_request_pool(engine);
718 kfree(engine);
719 gt->engine[id] = NULL;
834 * engine is not available for use.
845 * the blitter forcewake domain to read the engine fuses, but at the same time
848 * domains based on the full engine mask in the platform capabilities before
880 * All the workload submitted to the first engine will be shared among
891 * changing the CCS engine configuration
895 /* Mask off all the CCS engine */
897 /* Put back in the first CCS engine */
1011 void intel_engine_init_execlists(struct intel_engine_cs *engine)
1013 struct intel_engine_execlists * const execlists = &engine->execlists;
1024 static void cleanup_status_page(struct intel_engine_cs *engine)
1029 intel_engine_set_hwsp_writemask(engine, ~0u);
1031 vma = fetch_and_zero(&engine->status_page.vma);
1035 if (!HWS_NEEDS_PHYSICAL(engine->i915))
1042 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
1048 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
1067 static int init_status_page(struct intel_engine_cs *engine)
1075 INIT_LIST_HEAD(&engine->status_page.timelines);
1084 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
1086 gt_err(engine->gt, "Failed to allocate status page\n");
1092 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
1101 if (!ret && !HWS_NEEDS_PHYSICAL(engine->i915))
1102 ret = pin_ggtt_status_page(engine, &ww, vma);
1112 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
1113 engine->status_page.vma = vma;
1131 static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
1158 struct drm_i915_private *i915 = engine->i915;
1159 const unsigned int instance = engine->instance;
1160 const unsigned int class = engine->class;
1173 * respective engine registers were moved to masked type. Then after the
1177 if (engine->gt->type == GT_MEDIA) {
1201 if (gt_WARN_ONCE(engine->gt, !num,
1205 if (gt_WARN_ON_ONCE(engine->gt,
1229 engine->tlb_inv.mcr = regs == xehp_regs;
1230 engine->tlb_inv.reg = reg;
1231 engine->tlb_inv.done = val;
1234 (engine->class == VIDEO_DECODE_CLASS ||
1235 engine->class == VIDEO_ENHANCEMENT_CLASS ||
1236 engine->class == COMPUTE_CLASS ||
1237 engine->class == OTHER_CLASS))
1238 engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
1240 engine->tlb_inv.request = val;
1245 static int engine_setup_common(struct intel_engine_cs *engine)
1249 init_llist_head(&engine->barrier_tasks);
1251 err = intel_engine_init_tlb_invalidation(engine);
1255 err = init_status_page(engine);
1259 engine->breadcrumbs = intel_breadcrumbs_create(engine);
1260 if (!engine->breadcrumbs) {
1265 engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
1266 if (!engine->sched_engine) {
1270 engine->sched_engine->private_data = engine;
1272 err = intel_engine_init_cmd_parser(engine);
1276 intel_engine_init_execlists(engine);
1277 intel_engine_init__pm(engine);
1278 intel_engine_init_retire(engine);
1281 engine->sseu =
1282 intel_sseu_from_device_info(&engine->gt->info.sseu);
1284 intel_engine_init_workarounds(engine);
1285 intel_engine_init_whitelist(engine);
1286 intel_engine_init_ctx_wa(engine);
1288 if (GRAPHICS_VER(engine->i915) >= 12)
1289 engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO;
1294 i915_sched_engine_put(engine->sched_engine);
1296 intel_breadcrumbs_put(engine->breadcrumbs);
1298 cleanup_status_page(engine);
1310 struct intel_engine_cs *engine = ce->engine;
1314 GEM_BUG_ON(!engine->gt->scratch);
1320 frame->rq.i915 = engine->i915;
1321 frame->rq.engine = engine;
1335 spin_lock_irq(&engine->sched_engine->lock);
1337 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
1339 spin_unlock_irq(&engine->sched_engine->lock);
1349 intel_engine_create_pinned_context(struct intel_engine_cs *engine,
1359 ce = intel_context_create(engine);
1377 list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
1392 struct intel_engine_cs *engine = ce->engine;
1393 struct i915_vma *hwsp = engine->status_page.vma;
1407 create_ggtt_bind_context(struct intel_engine_cs *engine)
1415 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
1421 create_kernel_context(struct intel_engine_cs *engine)
1425 return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
1431 * engine_init_common - initialize engine state which might require hw access
1432 * @engine: Engine to initialize.
1434 * Initializes @engine@ structure members shared between legacy and execlists
1437 * Typcally done at later stages of submission mode specific engine setup.
1441 static int engine_init_common(struct intel_engine_cs *engine)
1446 engine->set_default_submission(engine);
1456 ce = create_kernel_context(engine);
1460 * Create a separate pinned context for GGTT update with blitter engine
1462 * engines as well but BCS should be less busy engine so pick that for
1465 if (i915_ggtt_require_binder(engine->i915) && engine->id == BCS0) {
1466 bce = create_ggtt_bind_context(engine);
1477 engine->emit_fini_breadcrumb_dw = ret;
1478 engine->kernel_context = ce;
1479 engine->bind_context = bce;
1493 int (*setup)(struct intel_engine_cs *engine);
1494 struct intel_engine_cs *engine;
1509 for_each_engine(engine, gt, id) {
1510 err = engine_setup_common(engine);
1514 err = setup(engine);
1516 intel_engine_cleanup_common(engine);
1521 GEM_BUG_ON(engine->release == NULL);
1523 err = engine_init_common(engine);
1527 intel_engine_add_user(engine);
1534 * intel_engine_cleanup_common - cleans up the engine state created by
1536 * @engine: Engine to cleanup.
1540 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
1542 GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
1544 i915_sched_engine_put(engine->sched_engine);
1545 intel_breadcrumbs_put(engine->breadcrumbs);
1547 intel_engine_fini_retire(engine);
1548 intel_engine_cleanup_cmd_parser(engine);
1550 if (engine->default_state)
1551 fput(engine->default_state);
1553 if (engine->kernel_context)
1554 intel_engine_destroy_pinned_context(engine->kernel_context);
1556 if (engine->bind_context)
1557 intel_engine_destroy_pinned_context(engine->bind_context);
1560 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
1561 cleanup_status_page(engine);
1563 intel_wa_list_free(&engine->ctx_wa_list);
1564 intel_wa_list_free(&engine->wa_list);
1565 intel_wa_list_free(&engine->whitelist);
1569 * intel_engine_resume - re-initializes the HW state of the engine
1570 * @engine: Engine to resume.
1574 int intel_engine_resume(struct intel_engine_cs *engine)
1576 intel_engine_apply_workarounds(engine);
1577 intel_engine_apply_whitelist(engine);
1579 return engine->resume(engine);
1582 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
1584 struct drm_i915_private *i915 = engine->i915;
1589 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
1591 acthd = ENGINE_READ(engine, RING_ACTHD);
1593 acthd = ENGINE_READ(engine, ACTHD);
1598 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
1602 if (GRAPHICS_VER(engine->i915) >= 8)
1603 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
1605 bbaddr = ENGINE_READ(engine, RING_BBADDR);
1610 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
1617 * the engine to quiesce. We've stopped submission to the engine, and
1619 * leave the engine idle. So they should not be caught unaware by
1622 return READ_ONCE(engine->props.stop_timeout_ms);
1625 static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
1629 struct intel_uncore *uncore = engine->uncore;
1630 const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
1639 if (intel_engine_reset_needs_wa_22011802037(engine->gt))
1640 intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
1643 err = __intel_wait_for_register_fw(engine->uncore, mode,
1654 int intel_engine_stop_cs(struct intel_engine_cs *engine)
1658 if (GRAPHICS_VER(engine->i915) < 3)
1661 ENGINE_TRACE(engine, "\n");
1674 if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) {
1675 ENGINE_TRACE(engine,
1677 ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR,
1678 ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR);
1685 if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) !=
1686 (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR))
1693 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
1695 ENGINE_TRACE(engine, "\n");
1697 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
1700 static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
1724 if (!_reg[engine->id].reg)
1727 val = intel_uncore_read(engine->uncore, _reg[engine->id]);
1761 void intel_engine_wait_for_pending_mi_fw(struct intel_engine_cs *engine)
1763 u32 fw_pending = __cs_pending_mi_force_wakes(engine);
1766 __gpm_wait_for_fw_complete(engine->gt, fw_pending);
1770 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
1773 struct drm_i915_private *i915 = engine->i915;
1774 struct intel_uncore *uncore = engine->uncore;
1775 u32 mmio_base = engine->mmio_base;
1786 if (engine->id != RCS0)
1798 for_each_ss_steering(iter, engine->gt, slice, subslice) {
1800 intel_gt_mcr_read(engine->gt,
1804 intel_gt_mcr_read(engine->gt,
1810 for_each_ss_steering(iter, engine->gt, slice, subslice)
1812 intel_gt_mcr_read(engine->gt,
1820 if (engine->id != RCS0)
1832 if (engine->id == RCS0)
1841 static bool ring_is_idle(struct intel_engine_cs *engine)
1845 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1848 if (!intel_engine_pm_get_if_awake(engine))
1852 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1853 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1857 if (GRAPHICS_VER(engine->i915) > 2 &&
1858 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1861 intel_engine_pm_put(engine);
1866 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
1868 struct tasklet_struct *t = &engine->sched_engine->tasklet;
1888 * intel_engine_is_idle() - Report if the engine has finished process all work
1889 * @engine: the intel_engine_cs
1892 * to hardware, and that the engine is idle.
1894 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1897 if (intel_gt_is_wedged(engine->gt))
1900 if (!intel_engine_pm_is_awake(engine))
1904 intel_synchronize_hardirq(engine->i915);
1905 intel_engine_flush_submission(engine);
1908 if (!i915_sched_engine_is_empty(engine->sched_engine))
1912 return ring_is_idle(engine);
1917 struct intel_engine_cs *engine;
1931 for_each_engine(engine, gt, id) {
1932 if (!intel_engine_is_idle(engine))
1939 bool intel_engine_irq_enable(struct intel_engine_cs *engine)
1941 if (!engine->irq_enable)
1945 spin_lock(engine->gt->irq_lock);
1946 engine->irq_enable(engine);
1947 spin_unlock(engine->gt->irq_lock);
1952 void intel_engine_irq_disable(struct intel_engine_cs *engine)
1954 if (!engine->irq_disable)
1958 spin_lock(engine->gt->irq_lock);
1959 engine->irq_disable(engine);
1960 spin_unlock(engine->gt->irq_lock);
1965 struct intel_engine_cs *engine;
1968 for_each_engine(engine, gt, id) {
1969 if (engine->sanitize)
1970 engine->sanitize(engine);
1972 engine->set_default_submission(engine);
1976 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1978 switch (GRAPHICS_VER(engine->i915)) {
1983 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1985 return !IS_I965G(engine->i915); /* who knows! */
1987 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1998 * Even though we are holding the engine->sched_engine->lock here, there
2076 static void intel_engine_print_registers(struct intel_engine_cs *engine,
2079 struct drm_i915_private *i915 = engine->i915;
2080 struct intel_engine_execlists * const execlists = &engine->execlists;
2083 if (engine->id == RENDER_CLASS && IS_GRAPHICS_VER(i915, 4, 7))
2084 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
2087 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI));
2089 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO));
2092 ENGINE_READ(engine, RING_START));
2094 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
2096 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
2098 ENGINE_READ(engine, RING_CTL),
2099 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
2100 if (GRAPHICS_VER(engine->i915) > 2) {
2102 ENGINE_READ(engine, RING_MI_MODE),
2103 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
2108 ENGINE_READ(engine, RING_IMR));
2110 ENGINE_READ(engine, RING_ESR));
2112 ENGINE_READ(engine, RING_EMR));
2114 ENGINE_READ(engine, RING_EIR));
2117 addr = intel_engine_get_active_head(engine);
2120 addr = intel_engine_get_last_batch_head(engine);
2124 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
2126 addr = ENGINE_READ(engine, RING_DMA_FADD);
2128 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
2133 ENGINE_READ(engine, RING_IPEIR));
2135 ENGINE_READ(engine, RING_IPEHR));
2137 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
2138 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
2141 if (HAS_EXECLISTS(i915) && !intel_engine_uses_guc(engine)) {
2144 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
2150 str_yes_no(test_bit(TASKLET_STATE_SCHED, &engine->sched_engine->tasklet.state)),
2151 str_enabled_disabled(!atomic_read(&engine->sched_engine->tasklet.count)),
2152 repr_timer(&engine->execlists.preempt),
2153 repr_timer(&engine->execlists.timer));
2159 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
2160 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
2175 i915_sched_engine_active_lock_bh(engine->sched_engine);
2206 i915_sched_engine_active_unlock_bh(engine->sched_engine);
2209 ENGINE_READ(engine, RING_PP_DIR_BASE));
2211 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
2213 ENGINE_READ(engine, RING_PP_DIR_DCLV));
2256 static void print_properties(struct intel_engine_cs *engine,
2264 .offset = offsetof(typeof(engine->props), x), \
2282 read_ul(&engine->props, p->offset),
2283 read_ul(&engine->defaults, p->offset));
2334 msg = "\t\tactive on engine";
2342 static void engine_dump_active_requests(struct intel_engine_cs *engine,
2349 * No need for an engine->irq_seqno_barrier() before the seqno reads.
2355 intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
2364 if (intel_uc_uses_guc_submission(&engine->gt->uc))
2365 intel_guc_dump_active_requests(engine, hung_rq, m);
2367 intel_execlists_dump_active_requests(engine, hung_rq, m);
2373 void intel_engine_dump(struct intel_engine_cs *engine,
2377 struct i915_gpu_error * const error = &engine->i915->gpu_error;
2390 if (intel_gt_is_wedged(engine->gt))
2393 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
2395 str_yes_no(!llist_empty(&engine->barrier_tasks)));
2397 ewma__engine_latency_read(&engine->latency));
2398 if (intel_engine_supports_stats(engine))
2400 ktime_to_ms(intel_engine_get_busy_time(engine,
2403 engine->fw_domain, READ_ONCE(engine->fw_active));
2406 rq = READ_ONCE(engine->heartbeat.systole);
2412 i915_reset_engine_count(error, engine),
2414 print_properties(engine, m);
2416 engine_dump_active_requests(engine, m);
2418 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
2419 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
2421 intel_engine_print_registers(engine, m);
2422 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
2427 intel_execlists_show_requests(engine, m, i915_request_show, 8);
2430 hexdump(m, engine->status_page.addr, PAGE_SIZE);
2432 drm_printf(m, "Idle? %s\n", str_yes_no(intel_engine_is_idle(engine)));
2434 intel_engine_print_breadcrumbs(engine, m);
2438 * intel_engine_get_busy_time() - Return current accumulated engine busyness
2439 * @engine: engine to report on
2442 * Returns accumulated time @engine was busy since engine stats were enabled.
2444 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
2446 return engine->busyness(engine, now);
2463 static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
2472 GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
2475 * We are called by the error capture, reset and to dump engine
2481 * not need an engine->irq_seqno_barrier() before the seqno reads.
2485 lockdep_assert_held(&engine->sched_engine->lock);
2488 request = execlists_active(&engine->execlists);
2503 list_for_each_entry(request, &engine->sched_engine->requests,
2515 void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
2520 *ce = intel_engine_get_hung_context(engine);
2522 intel_engine_clear_hung_context(engine);
2532 if (intel_uc_uses_guc_submission(&engine->gt->uc))
2535 spin_lock_irqsave(&engine->sched_engine->lock, flags);
2536 *rq = engine_execlist_find_hung_request(engine);
2539 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
2542 void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
2547 * so for simplicity we'll take care of this in the RCS engine's
2552 if (!CCS_MASK(engine->gt))
2555 intel_uncore_write(engine->uncore, GEN12_RCU_MODE,