Lines Matching full:engine

35 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)  in set_hwstam()  argument
41 if (engine->class == RENDER_CLASS) { in set_hwstam()
42 if (GRAPHICS_VER(engine->i915) >= 6) in set_hwstam()
48 intel_engine_set_hwsp_writemask(engine, mask); in set_hwstam()
51 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) in set_hws_pga() argument
56 if (GRAPHICS_VER(engine->i915) >= 4) in set_hws_pga()
59 intel_uncore_write(engine->uncore, HWS_PGA, addr); in set_hws_pga()
62 static struct page *status_page(struct intel_engine_cs *engine) in status_page() argument
64 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; in status_page()
70 static void ring_setup_phys_status_page(struct intel_engine_cs *engine) in ring_setup_phys_status_page() argument
72 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); in ring_setup_phys_status_page()
73 set_hwstam(engine, ~0u); in ring_setup_phys_status_page()
76 static void set_hwsp(struct intel_engine_cs *engine, u32 offset) in set_hwsp() argument
84 if (GRAPHICS_VER(engine->i915) == 7) { in set_hwsp()
85 switch (engine->id) { in set_hwsp()
91 GEM_BUG_ON(engine->id); in set_hwsp()
106 } else if (GRAPHICS_VER(engine->i915) == 6) { in set_hwsp()
107 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); in set_hwsp()
109 hwsp = RING_HWS_PGA(engine->mmio_base); in set_hwsp()
112 intel_uncore_write_fw(engine->uncore, hwsp, offset); in set_hwsp()
113 intel_uncore_posting_read_fw(engine->uncore, hwsp); in set_hwsp()
116 static void flush_cs_tlb(struct intel_engine_cs *engine) in flush_cs_tlb() argument
118 if (!IS_GRAPHICS_VER(engine->i915, 6, 7)) in flush_cs_tlb()
122 if ((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0) in flush_cs_tlb()
123 drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n", in flush_cs_tlb()
124 engine->name); in flush_cs_tlb()
126 ENGINE_WRITE_FW(engine, RING_INSTPM, in flush_cs_tlb()
129 if (__intel_wait_for_register_fw(engine->uncore, in flush_cs_tlb()
130 RING_INSTPM(engine->mmio_base), in flush_cs_tlb()
133 ENGINE_TRACE(engine, in flush_cs_tlb()
137 static void ring_setup_status_page(struct intel_engine_cs *engine) in ring_setup_status_page() argument
139 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); in ring_setup_status_page()
140 set_hwstam(engine, ~0u); in ring_setup_status_page()
142 flush_cs_tlb(engine); in ring_setup_status_page()
158 static void set_pp_dir(struct intel_engine_cs *engine) in set_pp_dir() argument
160 struct i915_address_space *vm = vm_alias(engine->gt->vm); in set_pp_dir()
165 ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); in set_pp_dir()
166 ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm)); in set_pp_dir()
168 if (GRAPHICS_VER(engine->i915) >= 7) { in set_pp_dir()
169 ENGINE_WRITE_FW(engine, in set_pp_dir()
175 static bool stop_ring(struct intel_engine_cs *engine) in stop_ring() argument
178 ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL)); in stop_ring()
179 ENGINE_POSTING_READ(engine, RING_HEAD); in stop_ring()
182 ENGINE_WRITE_FW(engine, RING_CTL, 0); in stop_ring()
183 ENGINE_POSTING_READ(engine, RING_CTL); in stop_ring()
186 ENGINE_WRITE_FW(engine, RING_HEAD, 0); in stop_ring()
187 ENGINE_WRITE_FW(engine, RING_TAIL, 0); in stop_ring()
189 return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0; in stop_ring()
192 static int xcs_resume(struct intel_engine_cs *engine) in xcs_resume() argument
194 struct intel_ring *ring = engine->legacy.ring; in xcs_resume()
196 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", in xcs_resume()
203 intel_synchronize_hardirq(engine->i915); in xcs_resume()
204 if (!stop_ring(engine)) in xcs_resume()
207 if (HWS_NEEDS_PHYSICAL(engine->i915)) in xcs_resume()
208 ring_setup_phys_status_page(engine); in xcs_resume()
210 ring_setup_status_page(engine); in xcs_resume()
212 intel_breadcrumbs_reset(engine->breadcrumbs); in xcs_resume()
215 ENGINE_POSTING_READ(engine, RING_HEAD); in xcs_resume()
223 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma)); in xcs_resume()
230 set_pp_dir(engine); in xcs_resume()
233 ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); in xcs_resume()
234 ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); in xcs_resume()
235 ENGINE_POSTING_READ(engine, RING_TAIL); in xcs_resume()
237 ENGINE_WRITE_FW(engine, RING_CTL, in xcs_resume()
241 if (__intel_wait_for_register_fw(engine->uncore, in xcs_resume()
242 RING_CTL(engine->mmio_base), in xcs_resume()
247 if (GRAPHICS_VER(engine->i915) > 2) in xcs_resume()
248 ENGINE_WRITE_FW(engine, in xcs_resume()
253 ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail); in xcs_resume()
254 ENGINE_POSTING_READ(engine, RING_TAIL); in xcs_resume()
258 intel_engine_signal_breadcrumbs(engine); in xcs_resume()
262 drm_err(&engine->i915->drm, in xcs_resume()
265 engine->name, in xcs_resume()
266 ENGINE_READ(engine, RING_CTL), in xcs_resume()
267 ENGINE_READ(engine, RING_CTL) & RING_VALID, in xcs_resume()
268 ENGINE_READ(engine, RING_HEAD), ring->head, in xcs_resume()
269 ENGINE_READ(engine, RING_TAIL), ring->tail, in xcs_resume()
270 ENGINE_READ(engine, RING_START), in xcs_resume()
275 static void sanitize_hwsp(struct intel_engine_cs *engine) in sanitize_hwsp() argument
279 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) in sanitize_hwsp()
283 static void xcs_sanitize(struct intel_engine_cs *engine) in xcs_sanitize() argument
295 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); in xcs_sanitize()
302 sanitize_hwsp(engine); in xcs_sanitize()
305 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); in xcs_sanitize()
307 intel_engine_reset_pinned_contexts(engine); in xcs_sanitize()
310 static void reset_prepare(struct intel_engine_cs *engine) in reset_prepare() argument
327 ENGINE_TRACE(engine, "\n"); in reset_prepare()
328 intel_engine_stop_cs(engine); in reset_prepare()
330 if (!stop_ring(engine)) { in reset_prepare()
332 ENGINE_TRACE(engine, in reset_prepare()
335 ENGINE_READ_FW(engine, RING_CTL), in reset_prepare()
336 ENGINE_READ_FW(engine, RING_HEAD), in reset_prepare()
337 ENGINE_READ_FW(engine, RING_TAIL), in reset_prepare()
338 ENGINE_READ_FW(engine, RING_START)); in reset_prepare()
339 if (!stop_ring(engine)) { in reset_prepare()
340 drm_err(&engine->i915->drm, in reset_prepare()
343 engine->name, in reset_prepare()
344 ENGINE_READ_FW(engine, RING_CTL), in reset_prepare()
345 ENGINE_READ_FW(engine, RING_HEAD), in reset_prepare()
346 ENGINE_READ_FW(engine, RING_TAIL), in reset_prepare()
347 ENGINE_READ_FW(engine, RING_START)); in reset_prepare()
352 static void reset_rewind(struct intel_engine_cs *engine, bool stalled) in reset_rewind() argument
359 spin_lock_irqsave(&engine->sched_engine->lock, flags); in reset_rewind()
361 list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) { in reset_rewind()
370 * The guilty request will get skipped on a hung engine. in reset_rewind()
409 GEM_BUG_ON(rq->ring != engine->legacy.ring); in reset_rewind()
412 head = engine->legacy.ring->tail; in reset_rewind()
414 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); in reset_rewind()
416 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); in reset_rewind()
419 static void reset_finish(struct intel_engine_cs *engine) in reset_finish() argument
423 static void reset_cancel(struct intel_engine_cs *engine) in reset_cancel() argument
428 spin_lock_irqsave(&engine->sched_engine->lock, flags); in reset_cancel()
431 list_for_each_entry(request, &engine->sched_engine->requests, sched.link) in reset_cancel()
433 intel_engine_signal_breadcrumbs(engine); in reset_cancel()
437 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); in reset_cancel()
445 ENGINE_WRITE(request->engine, RING_TAIL, in i9xx_submit_request()
477 shmem_read(ce->default_state, 0, vaddr, ce->engine->context_size); in ring_context_init_default_state()
526 alloc_context_vma(struct intel_engine_cs *engine) in alloc_context_vma() argument
528 struct drm_i915_private *i915 = engine->i915; in alloc_context_vma()
533 obj = i915_gem_object_create_shmem(i915, engine->context_size); in alloc_context_vma()
555 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); in alloc_context_vma()
570 struct intel_engine_cs *engine = ce->engine; in ring_context_alloc() local
573 ce->default_state = engine->default_state; in ring_context_alloc()
576 GEM_BUG_ON(!engine->legacy.ring); in ring_context_alloc()
577 ce->ring = engine->legacy.ring; in ring_context_alloc()
578 ce->timeline = intel_timeline_get(engine->legacy.timeline); in ring_context_alloc()
581 if (engine->context_size) { in ring_context_alloc()
584 vma = alloc_context_vma(engine); in ring_context_alloc()
609 struct intel_engine_cs *engine; in ring_context_revoke() local
614 engine = rq->engine; in ring_context_revoke()
615 lockdep_assert_held(&engine->sched_engine->lock); in ring_context_revoke()
616 list_for_each_entry_continue(rq, &engine->sched_engine->requests, in ring_context_revoke()
627 struct intel_engine_cs *engine = NULL; in ring_context_cancel_request() local
629 i915_request_active_engine(rq, &engine); in ring_context_cancel_request()
631 if (engine && intel_engine_pulse(engine)) in ring_context_cancel_request()
632 intel_gt_handle_error(engine->gt, engine->mask, 0, in ring_context_cancel_request()
660 const struct intel_engine_cs * const engine = rq->engine; in load_pd_dir() local
668 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); in load_pd_dir()
672 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); in load_pd_dir()
677 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); in load_pd_dir()
678 *cs++ = intel_gt_scratch_offset(engine->gt, in load_pd_dir()
682 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); in load_pd_dir()
687 return rq->engine->emit_flush(rq, EMIT_FLUSH); in load_pd_dir()
694 struct intel_engine_cs *engine = rq->engine; in mi_set_context() local
695 struct drm_i915_private *i915 = engine->i915; in mi_set_context()
698 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; in mi_set_context()
726 for_each_engine(signaller, engine->gt, id) { in mi_set_context()
727 if (signaller == engine) in mi_set_context()
760 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | in mi_set_context()
780 for_each_engine(signaller, engine->gt, id) { in mi_set_context()
781 if (signaller == engine) in mi_set_context()
793 *cs++ = intel_gt_scratch_offset(engine->gt, in mi_set_context()
865 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); in switch_mm()
881 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); in switch_mm()
886 struct intel_engine_cs *engine = rq->engine; in clear_residuals() local
889 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); in clear_residuals()
893 if (engine->kernel_context->state) { in clear_residuals()
895 engine->kernel_context, in clear_residuals()
901 ret = engine->emit_bb_start(rq, in clear_residuals()
902 i915_vma_offset(engine->wa_ctx.vma), 0, in clear_residuals()
907 ret = engine->emit_flush(rq, EMIT_FLUSH); in clear_residuals()
912 return engine->emit_flush(rq, EMIT_INVALIDATE); in clear_residuals()
917 struct intel_engine_cs *engine = rq->engine; in switch_context() local
922 GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); in switch_context()
924 if (engine->wa_ctx.vma && ce != engine->kernel_context) { in switch_context()
925 if (engine->wa_ctx.vma->private != ce && in switch_context()
931 residuals = &engine->wa_ctx.vma->private; in switch_context()
942 GEM_BUG_ON(engine->id != RCS0); in switch_context()
995 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); in ring_request_alloc()
1009 struct intel_uncore *uncore = request->engine->uncore; in gen6_bsd_submit_request()
1045 static void i9xx_set_default_submission(struct intel_engine_cs *engine) in i9xx_set_default_submission() argument
1047 engine->submit_request = i9xx_submit_request; in i9xx_set_default_submission()
1050 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) in gen6_bsd_set_default_submission() argument
1052 engine->submit_request = gen6_bsd_submit_request; in gen6_bsd_set_default_submission()
1055 static void ring_release(struct intel_engine_cs *engine) in ring_release() argument
1057 struct drm_i915_private *i915 = engine->i915; in ring_release()
1060 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); in ring_release()
1062 intel_engine_cleanup_common(engine); in ring_release()
1064 if (engine->wa_ctx.vma) { in ring_release()
1065 intel_context_put(engine->wa_ctx.vma->private); in ring_release()
1066 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); in ring_release()
1069 intel_ring_unpin(engine->legacy.ring); in ring_release()
1070 intel_ring_put(engine->legacy.ring); in ring_release()
1072 intel_timeline_unpin(engine->legacy.timeline); in ring_release()
1073 intel_timeline_put(engine->legacy.timeline); in ring_release()
1076 static void irq_handler(struct intel_engine_cs *engine, u16 iir) in irq_handler() argument
1078 intel_engine_signal_breadcrumbs(engine); in irq_handler()
1081 static void setup_irq(struct intel_engine_cs *engine) in setup_irq() argument
1083 struct drm_i915_private *i915 = engine->i915; in setup_irq()
1085 intel_engine_set_irq_handler(engine, irq_handler); in setup_irq()
1088 engine->irq_enable = gen6_irq_enable; in setup_irq()
1089 engine->irq_disable = gen6_irq_disable; in setup_irq()
1091 engine->irq_enable = gen5_irq_enable; in setup_irq()
1092 engine->irq_disable = gen5_irq_disable; in setup_irq()
1094 engine->irq_enable = gen3_irq_enable; in setup_irq()
1095 engine->irq_disable = gen3_irq_disable; in setup_irq()
1097 engine->irq_enable = gen2_irq_enable; in setup_irq()
1098 engine->irq_disable = gen2_irq_disable; in setup_irq()
1104 lockdep_assert_held(&rq->engine->sched_engine->lock); in add_to_engine()
1105 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests); in add_to_engine()
1110 spin_lock_irq(&rq->engine->sched_engine->lock); in remove_from_engine()
1116 spin_unlock_irq(&rq->engine->sched_engine->lock); in remove_from_engine()
1121 static void setup_common(struct intel_engine_cs *engine) in setup_common() argument
1123 struct drm_i915_private *i915 = engine->i915; in setup_common()
1128 setup_irq(engine); in setup_common()
1130 engine->resume = xcs_resume; in setup_common()
1131 engine->sanitize = xcs_sanitize; in setup_common()
1133 engine->reset.prepare = reset_prepare; in setup_common()
1134 engine->reset.rewind = reset_rewind; in setup_common()
1135 engine->reset.cancel = reset_cancel; in setup_common()
1136 engine->reset.finish = reset_finish; in setup_common()
1138 engine->add_active_request = add_to_engine; in setup_common()
1139 engine->remove_active_request = remove_from_engine; in setup_common()
1141 engine->cops = &ring_context_ops; in setup_common()
1142 engine->request_alloc = ring_request_alloc; in setup_common()
1147 * engine->emit_init_breadcrumb(). in setup_common()
1149 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; in setup_common()
1151 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; in setup_common()
1153 engine->set_default_submission = i9xx_set_default_submission; in setup_common()
1156 engine->emit_bb_start = gen6_emit_bb_start; in setup_common()
1158 engine->emit_bb_start = gen4_emit_bb_start; in setup_common()
1160 engine->emit_bb_start = i830_emit_bb_start; in setup_common()
1162 engine->emit_bb_start = gen3_emit_bb_start; in setup_common()
1165 static void setup_rcs(struct intel_engine_cs *engine) in setup_rcs() argument
1167 struct drm_i915_private *i915 = engine->i915; in setup_rcs()
1170 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; in setup_rcs()
1172 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; in setup_rcs()
1175 engine->emit_flush = gen7_emit_flush_rcs; in setup_rcs()
1176 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs; in setup_rcs()
1178 engine->emit_flush = gen6_emit_flush_rcs; in setup_rcs()
1179 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs; in setup_rcs()
1181 engine->emit_flush = gen4_emit_flush_rcs; in setup_rcs()
1184 engine->emit_flush = gen2_emit_flush; in setup_rcs()
1186 engine->emit_flush = gen4_emit_flush_rcs; in setup_rcs()
1187 engine->irq_enable_mask = I915_USER_INTERRUPT; in setup_rcs()
1191 engine->emit_bb_start = hsw_emit_bb_start; in setup_rcs()
1194 static void setup_vcs(struct intel_engine_cs *engine) in setup_vcs() argument
1196 struct drm_i915_private *i915 = engine->i915; in setup_vcs()
1201 engine->set_default_submission = gen6_bsd_set_default_submission; in setup_vcs()
1202 engine->emit_flush = gen6_emit_flush_vcs; in setup_vcs()
1203 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; in setup_vcs()
1206 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; in setup_vcs()
1208 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; in setup_vcs()
1210 engine->emit_flush = gen4_emit_flush_vcs; in setup_vcs()
1212 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; in setup_vcs()
1214 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; in setup_vcs()
1218 static void setup_bcs(struct intel_engine_cs *engine) in setup_bcs() argument
1220 struct drm_i915_private *i915 = engine->i915; in setup_bcs()
1222 engine->emit_flush = gen6_emit_flush_xcs; in setup_bcs()
1223 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; in setup_bcs()
1226 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; in setup_bcs()
1228 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; in setup_bcs()
1231 static void setup_vecs(struct intel_engine_cs *engine) in setup_vecs() argument
1233 struct drm_i915_private *i915 = engine->i915; in setup_vecs()
1237 engine->emit_flush = gen6_emit_flush_xcs; in setup_vecs()
1238 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; in setup_vecs()
1239 engine->irq_enable = hsw_irq_enable_vecs; in setup_vecs()
1240 engine->irq_disable = hsw_irq_disable_vecs; in setup_vecs()
1242 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; in setup_vecs()
1245 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, in gen7_ctx_switch_bb_setup() argument
1248 return gen7_setup_clear_gpr_bb(engine, vma); in gen7_ctx_switch_bb_setup()
1251 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine, in gen7_ctx_switch_bb_init() argument
1265 err = gen7_ctx_switch_bb_setup(engine, vma); in gen7_ctx_switch_bb_init()
1269 engine->wa_ctx.vma = vma; in gen7_ctx_switch_bb_init()
1277 static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine) in gen7_ctx_vma() argument
1283 if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS) in gen7_ctx_vma()
1286 err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); in gen7_ctx_vma()
1294 obj = i915_gem_object_create_internal(engine->i915, size); in gen7_ctx_vma()
1298 vma = i915_vma_instance(obj, engine->gt->vm, NULL); in gen7_ctx_vma()
1304 vma->private = intel_context_create(engine); /* dummy residuals */ in gen7_ctx_vma()
1315 int intel_ring_submission_setup(struct intel_engine_cs *engine) in intel_ring_submission_setup() argument
1323 setup_common(engine); in intel_ring_submission_setup()
1325 switch (engine->class) { in intel_ring_submission_setup()
1327 setup_rcs(engine); in intel_ring_submission_setup()
1330 setup_vcs(engine); in intel_ring_submission_setup()
1333 setup_bcs(engine); in intel_ring_submission_setup()
1336 setup_vecs(engine); in intel_ring_submission_setup()
1339 MISSING_CASE(engine->class); in intel_ring_submission_setup()
1343 timeline = intel_timeline_create_from_engine(engine, in intel_ring_submission_setup()
1351 ring = intel_engine_create_ring(engine, SZ_16K); in intel_ring_submission_setup()
1357 GEM_BUG_ON(engine->legacy.ring); in intel_ring_submission_setup()
1358 engine->legacy.ring = ring; in intel_ring_submission_setup()
1359 engine->legacy.timeline = timeline; in intel_ring_submission_setup()
1361 gen7_wa_vma = gen7_ctx_vma(engine); in intel_ring_submission_setup()
1374 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); in intel_ring_submission_setup()
1385 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); in intel_ring_submission_setup()
1388 err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma); in intel_ring_submission_setup()
1406 engine->release = ring_release; in intel_ring_submission_setup()
1420 intel_engine_cleanup_common(engine); in intel_ring_submission_setup()