Lines Matching refs:ce

78 static int emit_semaphore_signal(struct intel_context *ce, void *slot)  in emit_semaphore_signal()  argument
81 i915_ggtt_offset(ce->engine->status_page.vma) + in emit_semaphore_signal()
86 rq = intel_context_create_request(ce); in emit_semaphore_signal()
108 static int context_flush(struct intel_context *ce, long timeout) in context_flush() argument
114 rq = intel_engine_create_kernel_request(ce->engine); in context_flush()
118 fence = i915_active_fence_get(&ce->timeline->last_request); in context_flush()
405 struct intel_context *ce; in __live_lrc_state() local
418 ce = intel_context_create(engine); in __live_lrc_state()
419 if (IS_ERR(ce)) in __live_lrc_state()
420 return PTR_ERR(ce); in __live_lrc_state()
426 err = intel_context_pin_ww(ce, &ww); in __live_lrc_state()
430 rq = i915_request_create(ce); in __live_lrc_state()
448 expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma); in __live_lrc_state()
463 expected[RING_TAIL_IDX] = ce->ring->tail; in __live_lrc_state()
490 intel_context_unpin(ce); in __live_lrc_state()
498 intel_context_put(ce); in __live_lrc_state()
532 static int gpr_make_dirty(struct intel_context *ce) in gpr_make_dirty() argument
538 rq = intel_context_create_request(ce); in gpr_make_dirty()
550 *cs++ = CS_GPR(ce->engine, n); in gpr_make_dirty()
564 __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot) in __gpr_read() argument
567 i915_ggtt_offset(ce->engine->status_page.vma) + in __gpr_read()
574 rq = intel_context_create_request(ce); in __gpr_read()
597 *cs++ = CS_GPR(ce->engine, n); in __gpr_read()
619 struct intel_context *ce; in __live_lrc_gpr() local
632 ce = intel_context_create(engine); in __live_lrc_gpr()
633 if (IS_ERR(ce)) in __live_lrc_gpr()
634 return PTR_ERR(ce); in __live_lrc_gpr()
636 rq = __gpr_read(ce, scratch, slot); in __live_lrc_gpr()
692 intel_context_put(ce); in __live_lrc_gpr()
737 create_timestamp(struct intel_context *ce, void *slot, int idx) in create_timestamp() argument
740 i915_ggtt_offset(ce->engine->status_page.vma) + in create_timestamp()
746 rq = intel_context_create_request(ce); in create_timestamp()
788 struct intel_context *ce[2]; member
804 arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison; in __lrc_timestamp()
805 rq = create_timestamp(arg->ce[0], slot, 1); in __lrc_timestamp()
814 arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef; in __lrc_timestamp()
815 err = emit_semaphore_signal(arg->ce[1], slot); in __lrc_timestamp()
824 err = context_flush(arg->ce[0], HZ / 2); in __lrc_timestamp()
835 timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]); in __lrc_timestamp()
875 for (i = 0; i < ARRAY_SIZE(data.ce); i++) { in live_lrc_timestamp()
890 data.ce[i] = tmp; in live_lrc_timestamp()
907 for (i = 0; i < ARRAY_SIZE(data.ce); i++) { in live_lrc_timestamp()
908 if (!data.ce[i]) in live_lrc_timestamp()
911 intel_context_unpin(data.ce[i]); in live_lrc_timestamp()
912 intel_context_put(data.ce[i]); in live_lrc_timestamp()
964 store_context(struct intel_context *ce, struct i915_vma *scratch) in store_context() argument
970 batch = create_user_vma(ce->vm, SZ_64K); in store_context()
980 defaults = shmem_pin_map(ce->engine->default_state); in store_context()
1021 ce->engine->name); in store_context()
1042 shmem_unpin_map(ce->engine->default_state, defaults); in store_context()
1051 record_registers(struct intel_context *ce, in record_registers() argument
1061 b_before = store_context(ce, before); in record_registers()
1065 b_after = store_context(ce, after); in record_registers()
1071 rq = intel_context_create_request(ce); in record_registers()
1108 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) + in record_registers()
1135 static struct i915_vma *load_context(struct intel_context *ce, u32 poison) in load_context() argument
1141 batch = create_user_vma(ce->vm, SZ_64K); in load_context()
1151 defaults = shmem_pin_map(ce->engine->default_state); in load_context()
1180 ce->engine->name); in load_context()
1190 *cs++ = safe_poison(hw[dw] & get_lri_mask(ce->engine, in load_context()
1200 shmem_unpin_map(ce->engine->default_state, defaults); in load_context()
1208 static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema) in poison_registers() argument
1215 batch = load_context(ce, poison); in poison_registers()
1219 rq = intel_context_create_request(ce); in poison_registers()
1241 *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) + in poison_registers()
1264 struct intel_context *ce, in compare_isolation() argument
1294 lrc = i915_gem_object_pin_map_unlocked(ce->state->obj, in compare_isolation()
1296 ce->state->obj, in compare_isolation()
1304 defaults = shmem_pin_map(ce->engine->default_state); in compare_isolation()
1363 shmem_unpin_map(ce->engine->default_state, defaults); in compare_isolation()
1365 i915_gem_object_unpin_map(ce->state->obj); in compare_isolation()
1558 static int wabb_ctx_submit_req(struct intel_context *ce) in wabb_ctx_submit_req() argument
1563 rq = intel_context_create_request(ce); in wabb_ctx_submit_req()
1582 emit_wabb_ctx_canary(const struct intel_context *ce, in emit_wabb_ctx_canary() argument
1589 *cs++ = i915_ggtt_offset(ce->state) + in emit_wabb_ctx_canary()
1590 context_wa_bb_offset(ce) + in emit_wabb_ctx_canary()
1599 emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs) in emit_indirect_ctx_bb_canary() argument
1601 return emit_wabb_ctx_canary(ce, cs, false); in emit_indirect_ctx_bb_canary()
1605 emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs) in emit_per_ctx_bb_canary() argument
1607 return emit_wabb_ctx_canary(ce, cs, true); in emit_per_ctx_bb_canary()
1611 wabb_ctx_setup(struct intel_context *ce, bool per_ctx) in wabb_ctx_setup() argument
1613 u32 *cs = context_wabb(ce, per_ctx); in wabb_ctx_setup()
1618 setup_per_ctx_bb(ce, ce->engine, emit_per_ctx_bb_canary); in wabb_ctx_setup()
1620 setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary); in wabb_ctx_setup()
1623 static bool check_ring_start(struct intel_context *ce, bool per_ctx) in check_ring_start() argument
1625 const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) - in check_ring_start()
1626 LRC_STATE_OFFSET + context_wa_bb_offset(ce) + in check_ring_start()
1629 if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START]) in check_ring_start()
1634 ce->lrc_reg_state[CTX_RING_START]); in check_ring_start()
1639 static int wabb_ctx_check(struct intel_context *ce, bool per_ctx) in wabb_ctx_check() argument
1643 err = wabb_ctx_submit_req(ce); in wabb_ctx_check()
1647 if (!check_ring_start(ce, per_ctx)) in wabb_ctx_check()
1760 static struct i915_request *garbage(struct intel_context *ce, in garbage() argument
1766 err = intel_context_pin(ce); in garbage()
1771 ce->lrc_reg_state, in garbage()
1772 ce->engine->context_size - in garbage()
1775 rq = intel_context_create_request(ce); in garbage()
1786 intel_context_unpin(ce); in garbage()
1792 struct intel_context *ce; in __lrc_garbage() local
1796 ce = intel_context_create(engine); in __lrc_garbage()
1797 if (IS_ERR(ce)) in __lrc_garbage()
1798 return PTR_ERR(ce); in __lrc_garbage()
1800 hang = garbage(ce, prng); in __lrc_garbage()
1812 intel_context_set_banned(ce); in __lrc_garbage()
1834 intel_context_put(ce); in __lrc_garbage()
1878 struct intel_context *ce; in __live_pphwsp_runtime() local
1883 ce = intel_context_create(engine); in __live_pphwsp_runtime()
1884 if (IS_ERR(ce)) in __live_pphwsp_runtime()
1885 return PTR_ERR(ce); in __live_pphwsp_runtime()
1887 ce->stats.runtime.num_underflow = 0; in __live_pphwsp_runtime()
1888 ce->stats.runtime.max_underflow = 0; in __live_pphwsp_runtime()
1894 rq = intel_context_create_request(ce); in __live_pphwsp_runtime()
1922 intel_context_get_total_runtime_ns(ce), in __live_pphwsp_runtime()
1923 intel_context_get_avg_runtime_ns(ce)); in __live_pphwsp_runtime()
1926 if (ce->stats.runtime.num_underflow) { in __live_pphwsp_runtime()
1929 ce->stats.runtime.num_underflow, in __live_pphwsp_runtime()
1930 ce->stats.runtime.max_underflow); in __live_pphwsp_runtime()
1938 intel_context_put(ce); in __live_pphwsp_runtime()