Lines Matching +full:cs +full:- +full:x

2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
176 struct intel_gvt *gvt = engine->i915->gvt;
177 struct intel_uncore *uncore = engine->uncore;
178 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
179 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
188 if (!HAS_ENGINE(engine->gt, ring_id))
212 u32 *cs;
215 struct intel_gvt *gvt = vgpu->gvt;
216 int ring_id = req->engine->id;
217 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
222 ret = req->engine->emit_flush(req, EMIT_BARRIER);
226 cs = intel_ring_begin(req, count * 2 + 2);
227 if (IS_ERR(cs))
228 return PTR_ERR(cs);
230 *cs++ = MI_LOAD_REGISTER_IMM(count);
231 for (mmio = gvt->engine_mmio_list.mmio;
232 i915_mmio_reg_valid(mmio->reg); mmio++) {
233 if (mmio->id != ring_id || !mmio->in_context)
236 *cs++ = i915_mmio_reg_offset(mmio->reg);
237 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
238 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
239 *(cs-2), *(cs-1), vgpu->id, ring_id);
242 *cs++ = MI_NOOP;
243 intel_ring_advance(req, cs);
245 ret = req->engine->emit_flush(req, EMIT_BARRIER);
257 u32 *cs;
259 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
260 if (IS_ERR(cs))
261 return PTR_ERR(cs);
263 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
266 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
267 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
268 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
269 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
273 *cs++ = MI_NOOP;
274 intel_ring_advance(req, cs);
284 u32 *cs;
286 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
287 if (IS_ERR(cs))
288 return PTR_ERR(cs);
290 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
293 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
294 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
295 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
296 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
300 *cs++ = MI_NOOP;
301 intel_ring_advance(req, cs);
315 u32 *cs;
317 cs = intel_ring_begin(req, 2);
318 if (IS_ERR(cs))
319 return PTR_ERR(cs);
321 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
322 *cs++ = MI_NOOP;
323 intel_ring_advance(req, cs);
330 if (req->engine->id != RCS0)
342 cs = intel_ring_begin(req, 2);
343 if (IS_ERR(cs))
344 return PTR_ERR(cs);
346 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
347 *cs++ = MI_NOOP;
348 intel_ring_advance(req, cs);
364 struct intel_uncore *uncore = engine->uncore;
365 struct intel_vgpu_submission *s = &vgpu->submission;
366 u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
367 u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
374 if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
377 if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
380 reg = _MMIO(regs[engine->id]);
389 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9)
398 engine->name);
404 gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
417 struct intel_uncore *uncore = engine->uncore;
422 if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
425 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9)
431 offset.reg = regs[engine->id];
436 old_v = gen9_render_mocs.control_table[engine->id][i];
440 new_v = gen9_render_mocs.control_table[engine->id][i];
448 if (engine->id == RCS0) {
472 const u32 *reg_state = ce->lrc_reg_state;
485 struct intel_uncore *uncore = engine->uncore;
490 if (GRAPHICS_VER(engine->i915) >= 9)
493 for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
494 i915_mmio_reg_valid(mmio->reg); mmio++) {
495 if (mmio->id != engine->id)
502 if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context)
507 vgpu_vreg_t(pre, mmio->reg) =
508 intel_uncore_read_fw(uncore, mmio->reg);
509 if (mmio->mask)
510 vgpu_vreg_t(pre, mmio->reg) &=
511 ~(mmio->mask << 16);
512 old_v = vgpu_vreg_t(pre, mmio->reg);
514 old_v = mmio->value =
515 intel_uncore_read_fw(uncore, mmio->reg);
520 s = &next->submission;
526 if (mmio->in_context &&
527 !is_inhibit_context(s->shadow[engine->id]))
530 if (mmio->mask)
531 new_v = vgpu_vreg_t(next, mmio->reg) |
532 (mmio->mask << 16);
534 new_v = vgpu_vreg_t(next, mmio->reg);
536 if (mmio->in_context)
538 if (mmio->mask)
539 new_v = mmio->value | (mmio->mask << 16);
541 new_v = mmio->value;
544 intel_uncore_write_fw(uncore, mmio->reg, new_v);
546 trace_render_mmio(pre ? pre->id : 0,
547 next ? next->id : 0,
549 i915_mmio_reg_offset(mmio->reg),
558 * intel_gvt_switch_mmio - switch mmio context of specific engine
571 engine->name))
574 gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
582 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
584 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
588 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
596 if (GRAPHICS_VER(gvt->gt->i915) >= 9) {
597 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
598 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
599 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
600 gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list;
601 gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list);
603 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
604 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
605 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
608 for (mmio = gvt->engine_mmio_list.mmio;
609 i915_mmio_reg_valid(mmio->reg); mmio++) {
610 if (mmio->in_context) {
611 gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
612 intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg);