Lines Matching +full:cs +full:- +full:2
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
64 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
96 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
163 u32 l3cc_table[GEN9_MOCS_SIZE / 2];
176 struct intel_gvt *gvt = engine->i915->gvt; in load_render_mocs()
177 struct intel_uncore *uncore = engine->uncore; in load_render_mocs()
178 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; in load_render_mocs()
179 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; in load_render_mocs()
188 if (!HAS_ENGINE(engine->gt, ring_id)) in load_render_mocs()
200 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { in load_render_mocs()
212 u32 *cs; in restore_context_mmio_for_inhibit() local
215 struct intel_gvt *gvt = vgpu->gvt; in restore_context_mmio_for_inhibit()
216 int ring_id = req->engine->id; in restore_context_mmio_for_inhibit()
217 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; in restore_context_mmio_for_inhibit()
222 ret = req->engine->emit_flush(req, EMIT_BARRIER); in restore_context_mmio_for_inhibit()
226 cs = intel_ring_begin(req, count * 2 + 2); in restore_context_mmio_for_inhibit()
227 if (IS_ERR(cs)) in restore_context_mmio_for_inhibit()
228 return PTR_ERR(cs); in restore_context_mmio_for_inhibit()
230 *cs++ = MI_LOAD_REGISTER_IMM(count); in restore_context_mmio_for_inhibit()
231 for (mmio = gvt->engine_mmio_list.mmio; in restore_context_mmio_for_inhibit()
232 i915_mmio_reg_valid(mmio->reg); mmio++) { in restore_context_mmio_for_inhibit()
233 if (mmio->id != ring_id || !mmio->in_context) in restore_context_mmio_for_inhibit()
236 *cs++ = i915_mmio_reg_offset(mmio->reg); in restore_context_mmio_for_inhibit()
237 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16); in restore_context_mmio_for_inhibit()
239 *(cs-2), *(cs-1), vgpu->id, ring_id); in restore_context_mmio_for_inhibit()
242 *cs++ = MI_NOOP; in restore_context_mmio_for_inhibit()
243 intel_ring_advance(req, cs); in restore_context_mmio_for_inhibit()
245 ret = req->engine->emit_flush(req, EMIT_BARRIER); in restore_context_mmio_for_inhibit()
257 u32 *cs; in restore_render_mocs_control_for_inhibit() local
259 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2); in restore_render_mocs_control_for_inhibit()
260 if (IS_ERR(cs)) in restore_render_mocs_control_for_inhibit()
261 return PTR_ERR(cs); in restore_render_mocs_control_for_inhibit()
263 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE); in restore_render_mocs_control_for_inhibit()
266 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index)); in restore_render_mocs_control_for_inhibit()
267 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index)); in restore_render_mocs_control_for_inhibit()
269 *(cs-2), *(cs-1), vgpu->id, req->engine->id); in restore_render_mocs_control_for_inhibit()
273 *cs++ = MI_NOOP; in restore_render_mocs_control_for_inhibit()
274 intel_ring_advance(req, cs); in restore_render_mocs_control_for_inhibit()
284 u32 *cs; in restore_render_mocs_l3cc_for_inhibit() local
286 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2); in restore_render_mocs_l3cc_for_inhibit()
287 if (IS_ERR(cs)) in restore_render_mocs_l3cc_for_inhibit()
288 return PTR_ERR(cs); in restore_render_mocs_l3cc_for_inhibit()
290 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2); in restore_render_mocs_l3cc_for_inhibit()
292 for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) { in restore_render_mocs_l3cc_for_inhibit()
293 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index)); in restore_render_mocs_l3cc_for_inhibit()
294 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index)); in restore_render_mocs_l3cc_for_inhibit()
296 *(cs-2), *(cs-1), vgpu->id, req->engine->id); in restore_render_mocs_l3cc_for_inhibit()
300 *cs++ = MI_NOOP; in restore_render_mocs_l3cc_for_inhibit()
301 intel_ring_advance(req, cs); in restore_render_mocs_l3cc_for_inhibit()
315 u32 *cs; in intel_vgpu_restore_inhibit_context() local
317 cs = intel_ring_begin(req, 2); in intel_vgpu_restore_inhibit_context()
318 if (IS_ERR(cs)) in intel_vgpu_restore_inhibit_context()
319 return PTR_ERR(cs); in intel_vgpu_restore_inhibit_context()
321 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; in intel_vgpu_restore_inhibit_context()
322 *cs++ = MI_NOOP; in intel_vgpu_restore_inhibit_context()
323 intel_ring_advance(req, cs); in intel_vgpu_restore_inhibit_context()
330 if (req->engine->id != RCS0) in intel_vgpu_restore_inhibit_context()
342 cs = intel_ring_begin(req, 2); in intel_vgpu_restore_inhibit_context()
343 if (IS_ERR(cs)) in intel_vgpu_restore_inhibit_context()
344 return PTR_ERR(cs); in intel_vgpu_restore_inhibit_context()
346 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; in intel_vgpu_restore_inhibit_context()
347 *cs++ = MI_NOOP; in intel_vgpu_restore_inhibit_context()
348 intel_ring_advance(req, cs); in intel_vgpu_restore_inhibit_context()
364 struct intel_uncore *uncore = engine->uncore; in handle_tlb_pending_event()
365 struct intel_vgpu_submission *s = &vgpu->submission; in handle_tlb_pending_event()
366 u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; in handle_tlb_pending_event()
367 u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; in handle_tlb_pending_event()
374 if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt)) in handle_tlb_pending_event()
377 if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending)) in handle_tlb_pending_event()
380 reg = _MMIO(regs[engine->id]); in handle_tlb_pending_event()
389 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9) in handle_tlb_pending_event()
398 engine->name); in handle_tlb_pending_event()
404 gvt_dbg_core("invalidate TLB for ring %s\n", engine->name); in handle_tlb_pending_event()
417 struct intel_uncore *uncore = engine->uncore; in switch_mocs()
422 if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs))) in switch_mocs()
425 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9) in switch_mocs()
431 offset.reg = regs[engine->id]; in switch_mocs()
436 old_v = gen9_render_mocs.control_table[engine->id][i]; in switch_mocs()
440 new_v = gen9_render_mocs.control_table[engine->id][i]; in switch_mocs()
448 if (engine->id == RCS0) { in switch_mocs()
450 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { in switch_mocs()
472 const u32 *reg_state = ce->lrc_reg_state; in is_inhibit_context()
485 struct intel_uncore *uncore = engine->uncore; in switch_mmio()
490 if (GRAPHICS_VER(engine->i915) >= 9) in switch_mmio()
493 for (mmio = engine->i915->gvt->engine_mmio_list.mmio; in switch_mmio()
494 i915_mmio_reg_valid(mmio->reg); mmio++) { in switch_mmio()
495 if (mmio->id != engine->id) in switch_mmio()
502 if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context) in switch_mmio()
507 vgpu_vreg_t(pre, mmio->reg) = in switch_mmio()
508 intel_uncore_read_fw(uncore, mmio->reg); in switch_mmio()
509 if (mmio->mask) in switch_mmio()
510 vgpu_vreg_t(pre, mmio->reg) &= in switch_mmio()
511 ~(mmio->mask << 16); in switch_mmio()
512 old_v = vgpu_vreg_t(pre, mmio->reg); in switch_mmio()
514 old_v = mmio->value = in switch_mmio()
515 intel_uncore_read_fw(uncore, mmio->reg); in switch_mmio()
520 s = &next->submission; in switch_mmio()
526 if (mmio->in_context && in switch_mmio()
527 !is_inhibit_context(s->shadow[engine->id])) in switch_mmio()
530 if (mmio->mask) in switch_mmio()
531 new_v = vgpu_vreg_t(next, mmio->reg) | in switch_mmio()
532 (mmio->mask << 16); in switch_mmio()
534 new_v = vgpu_vreg_t(next, mmio->reg); in switch_mmio()
536 if (mmio->in_context) in switch_mmio()
538 if (mmio->mask) in switch_mmio()
539 new_v = mmio->value | (mmio->mask << 16); in switch_mmio()
541 new_v = mmio->value; in switch_mmio()
544 intel_uncore_write_fw(uncore, mmio->reg, new_v); in switch_mmio()
546 trace_render_mmio(pre ? pre->id : 0, in switch_mmio()
547 next ? next->id : 0, in switch_mmio()
549 i915_mmio_reg_offset(mmio->reg), in switch_mmio()
558 * intel_gvt_switch_mmio - switch mmio context of specific engine
571 engine->name)) in intel_gvt_switch_mmio()
574 gvt_dbg_render("switch ring %s from %s to %s\n", engine->name, in intel_gvt_switch_mmio()
582 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); in intel_gvt_switch_mmio()
584 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); in intel_gvt_switch_mmio()
588 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
596 if (GRAPHICS_VER(gvt->gt->i915) >= 9) { in intel_gvt_init_engine_mmio_context()
597 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; in intel_gvt_init_engine_mmio_context()
598 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; in intel_gvt_init_engine_mmio_context()
599 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); in intel_gvt_init_engine_mmio_context()
600 gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list; in intel_gvt_init_engine_mmio_context()
601 gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list); in intel_gvt_init_engine_mmio_context()
603 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; in intel_gvt_init_engine_mmio_context()
604 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; in intel_gvt_init_engine_mmio_context()
605 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); in intel_gvt_init_engine_mmio_context()
608 for (mmio = gvt->engine_mmio_list.mmio; in intel_gvt_init_engine_mmio_context()
609 i915_mmio_reg_valid(mmio->reg); mmio++) { in intel_gvt_init_engine_mmio_context()
610 if (mmio->in_context) { in intel_gvt_init_engine_mmio_context()
611 gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++; in intel_gvt_init_engine_mmio_context()
612 intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg); in intel_gvt_init_engine_mmio_context()