1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Eddie Dong <eddie.dong@intel.com> 25 * Kevin Tian <kevin.tian@intel.com> 26 * 27 * Contributors: 28 * Zhi Wang <zhi.a.wang@intel.com> 29 * Changbin Du <changbin.du@intel.com> 30 * Zhenyu Wang <zhenyuw@linux.intel.com> 31 * Tina Zhang <tina.zhang@intel.com> 32 * Bing Niu <bing.niu@intel.com> 33 * 34 */ 35 36 #include <drm/drm_print.h> 37 #include <drm/intel/intel_gmd_misc_regs.h> 38 39 #include "gt/intel_context.h" 40 #include "gt/intel_engine_regs.h" 41 #include "gt/intel_gpu_commands.h" 42 #include "gt/intel_gt_regs.h" 43 #include "gt/intel_ring.h" 44 45 #include "gvt.h" 46 #include "i915_drv.h" 47 #include "i915_reg.h" 48 #include "i915_wait_util.h" 49 #include "trace.h" 50 51 #define GEN9_MOCS_SIZE 64 52 53 struct engine_mmio { 54 enum intel_engine_id id; 55 i915_reg_t reg; 56 u32 mask; 57 bool in_context; 58 u32 value; 59 }; 60 61 /* Raw offset is append to each line for convenience. */ 62 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { 63 {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ 64 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 65 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ 66 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ 67 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ 68 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ 69 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ 70 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ 71 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ 72 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ 73 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ 74 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ 75 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ 76 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ 77 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ 78 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ 79 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ 80 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ 81 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ 82 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ 83 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ 84 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ 85 86 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ 87 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ 88 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ 89 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ 90 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ 91 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ 92 }; 93 94 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { 95 {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ 96 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 97 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ 98 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ 99 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */ 100 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */ 101 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */ 102 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */ 103 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */ 104 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */ 105 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */ 106 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */ 107 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */ 108 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */ 109 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */ 110 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */ 111 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */ 112 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */ 113 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */ 114 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */ 115 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */ 116 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */ 117 118 {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */ 119 {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */ 120 {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */ 121 {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */ 122 {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */ 123 {RCS0, _MMIO(0xb118), 0, false}, /* GEN8_L3SQCREG4 */ 124 {RCS0, _MMIO(0xb11c), 0, false}, /* GEN9_SCRATCH1 */ 125 {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */ 126 {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */ 127 {RCS0, _MMIO(0xe180), 0xffff, true}, /* HALF_SLICE_CHICKEN2 */ 128 {RCS0, _MMIO(0xe184), 0xffff, true}, /* GEN8_HALF_SLICE_CHICKEN3 */ 129 {RCS0, _MMIO(0xe188), 0xffff, true}, /* GEN9_HALF_SLICE_CHICKEN5 */ 130 {RCS0, _MMIO(0xe194), 0xffff, true}, /* GEN9_HALF_SLICE_CHICKEN7 */ 131 {RCS0, _MMIO(0xe4f0), 0xffff, true}, /* GEN8_ROW_CHICKEN */ 132 {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */ 133 {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */ 134 {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */ 135 {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */ 136 {RCS0, TRVADR, 0, true}, /* 0x4df0 */ 137 {RCS0, TRTTE, 0, true}, /* 0x4df4 */ 138 {RCS0, _MMIO(0x4dfc), 0, true}, 139 140 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */ 141 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ 142 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ 143 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ 144 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */ 145 146 {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ 147 148 {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */ 149 150 {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */ 151 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 152 {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */ 153 {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */ 154 155 {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ 156 {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ 157 {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */ 158 159 {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 160 {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 161 {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ 162 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ 163 }; 164 165 static struct { 166 bool initialized; 167 u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE]; 168 u32 l3cc_table[GEN9_MOCS_SIZE / 2]; 169 } gen9_render_mocs; 170 171 static u32 gen9_mocs_mmio_offset_list[] = { 172 [RCS0] = 0xc800, 173 [VCS0] = 0xc900, 174 [VCS1] = 0xca00, 175 [BCS0] = 0xcc00, 176 [VECS0] = 0xcb00, 177 }; 178 179 static void load_render_mocs(const struct intel_engine_cs *engine) 180 { 181 struct intel_gvt *gvt = engine->i915->gvt; 182 struct intel_uncore *uncore = engine->uncore; 183 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt; 184 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list; 185 i915_reg_t offset; 186 int ring_id, i; 187 188 /* Platform doesn't have mocs mmios. */ 189 if (!regs) 190 return; 191 192 for (ring_id = 0; ring_id < cnt; ring_id++) { 193 if (!HAS_ENGINE(engine->gt, ring_id)) 194 continue; 195 196 offset.reg = regs[ring_id]; 197 for (i = 0; i < GEN9_MOCS_SIZE; i++) { 198 gen9_render_mocs.control_table[ring_id][i] = 199 intel_uncore_read_fw(uncore, offset); 200 offset.reg += 4; 201 } 202 } 203 204 offset.reg = 0xb020; 205 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { 206 gen9_render_mocs.l3cc_table[i] = 207 intel_uncore_read_fw(uncore, offset); 208 offset.reg += 4; 209 } 210 gen9_render_mocs.initialized = true; 211 } 212 213 static int 214 restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu, 215 struct i915_request *req) 216 { 217 u32 *cs; 218 int ret; 219 struct engine_mmio *mmio; 220 struct intel_gvt *gvt = vgpu->gvt; 221 int ring_id = req->engine->id; 222 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id]; 223 224 if (count == 0) 225 return 0; 226 227 ret = req->engine->emit_flush(req, EMIT_BARRIER); 228 if (ret) 229 return ret; 230 231 cs = intel_ring_begin(req, count * 2 + 2); 232 if (IS_ERR(cs)) 233 return PTR_ERR(cs); 234 235 *cs++ = MI_LOAD_REGISTER_IMM(count); 236 for (mmio = gvt->engine_mmio_list.mmio; 237 i915_mmio_reg_valid(mmio->reg); mmio++) { 238 if (mmio->id != ring_id || !mmio->in_context) 239 continue; 240 241 *cs++ = i915_mmio_reg_offset(mmio->reg); 242 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16); 243 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 244 *(cs-2), *(cs-1), vgpu->id, ring_id); 245 } 246 247 *cs++ = MI_NOOP; 248 intel_ring_advance(req, cs); 249 250 ret = req->engine->emit_flush(req, EMIT_BARRIER); 251 if (ret) 252 return ret; 253 254 return 0; 255 } 256 257 static int 258 restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu, 259 struct i915_request *req) 260 { 261 unsigned int index; 262 u32 *cs; 263 264 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2); 265 if (IS_ERR(cs)) 266 return PTR_ERR(cs); 267 268 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE); 269 270 for (index = 0; index < GEN9_MOCS_SIZE; index++) { 271 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index)); 272 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index)); 273 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 274 *(cs-2), *(cs-1), vgpu->id, req->engine->id); 275 276 } 277 278 *cs++ = MI_NOOP; 279 intel_ring_advance(req, cs); 280 281 return 0; 282 } 283 284 static int 285 restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu, 286 struct i915_request *req) 287 { 288 unsigned int index; 289 u32 *cs; 290 291 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2); 292 if (IS_ERR(cs)) 293 return PTR_ERR(cs); 294 295 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2); 296 297 for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) { 298 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index)); 299 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index)); 300 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n", 301 *(cs-2), *(cs-1), vgpu->id, req->engine->id); 302 303 } 304 305 *cs++ = MI_NOOP; 306 intel_ring_advance(req, cs); 307 308 return 0; 309 } 310 311 /* 312 * Use lri command to initialize the mmio which is in context state image for 313 * inhibit context, it contains tracked engine mmio, render_mocs and 314 * render_mocs_l3cc. 315 */ 316 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, 317 struct i915_request *req) 318 { 319 int ret; 320 u32 *cs; 321 322 cs = intel_ring_begin(req, 2); 323 if (IS_ERR(cs)) 324 return PTR_ERR(cs); 325 326 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 327 *cs++ = MI_NOOP; 328 intel_ring_advance(req, cs); 329 330 ret = restore_context_mmio_for_inhibit(vgpu, req); 331 if (ret) 332 goto out; 333 334 /* no MOCS register in context except render engine */ 335 if (req->engine->id != RCS0) 336 goto out; 337 338 ret = restore_render_mocs_control_for_inhibit(vgpu, req); 339 if (ret) 340 goto out; 341 342 ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req); 343 if (ret) 344 goto out; 345 346 out: 347 cs = intel_ring_begin(req, 2); 348 if (IS_ERR(cs)) 349 return PTR_ERR(cs); 350 351 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 352 *cs++ = MI_NOOP; 353 intel_ring_advance(req, cs); 354 355 return ret; 356 } 357 358 static u32 gen8_tlb_mmio_offset_list[] = { 359 [RCS0] = 0x4260, 360 [VCS0] = 0x4264, 361 [VCS1] = 0x4268, 362 [BCS0] = 0x426c, 363 [VECS0] = 0x4270, 364 }; 365 366 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, 367 const struct intel_engine_cs *engine) 368 { 369 struct intel_uncore *uncore = engine->uncore; 370 struct intel_vgpu_submission *s = &vgpu->submission; 371 u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list; 372 u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt; 373 enum forcewake_domains fw; 374 i915_reg_t reg; 375 376 if (!regs) 377 return; 378 379 if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt)) 380 return; 381 382 if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending)) 383 return; 384 385 reg = _MMIO(regs[engine->id]); 386 387 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl 388 * we need to put a forcewake when invalidating RCS TLB caches, 389 * otherwise device can go to RC6 state and interrupt invalidation 390 * process 391 */ 392 fw = intel_uncore_forcewake_for_reg(uncore, reg, 393 FW_REG_READ | FW_REG_WRITE); 394 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9) 395 fw |= FORCEWAKE_RENDER; 396 397 intel_uncore_forcewake_get(uncore, fw); 398 399 intel_uncore_write_fw(uncore, reg, 0x1); 400 401 if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50)) 402 gvt_vgpu_err("timeout in invalidate ring %s tlb\n", 403 engine->name); 404 else 405 vgpu_vreg_t(vgpu, reg) = 0; 406 407 intel_uncore_forcewake_put(uncore, fw); 408 409 gvt_dbg_core("invalidate TLB for ring %s\n", engine->name); 410 } 411 412 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, 413 const struct intel_engine_cs *engine) 414 { 415 u32 regs[] = { 416 [RCS0] = 0xc800, 417 [VCS0] = 0xc900, 418 [VCS1] = 0xca00, 419 [BCS0] = 0xcc00, 420 [VECS0] = 0xcb00, 421 }; 422 struct intel_uncore *uncore = engine->uncore; 423 i915_reg_t offset, l3_offset; 424 u32 old_v, new_v; 425 int i; 426 427 if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs))) 428 return; 429 430 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9) 431 return; 432 433 if (!pre && !gen9_render_mocs.initialized) 434 load_render_mocs(engine); 435 436 offset.reg = regs[engine->id]; 437 for (i = 0; i < GEN9_MOCS_SIZE; i++) { 438 if (pre) 439 old_v = vgpu_vreg_t(pre, offset); 440 else 441 old_v = gen9_render_mocs.control_table[engine->id][i]; 442 if (next) 443 new_v = vgpu_vreg_t(next, offset); 444 else 445 new_v = gen9_render_mocs.control_table[engine->id][i]; 446 447 if (old_v != new_v) 448 intel_uncore_write_fw(uncore, offset, new_v); 449 450 offset.reg += 4; 451 } 452 453 if (engine->id == RCS0) { 454 l3_offset.reg = 0xb020; 455 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) { 456 if (pre) 457 old_v = vgpu_vreg_t(pre, l3_offset); 458 else 459 old_v = gen9_render_mocs.l3cc_table[i]; 460 if (next) 461 new_v = vgpu_vreg_t(next, l3_offset); 462 else 463 new_v = gen9_render_mocs.l3cc_table[i]; 464 465 if (old_v != new_v) 466 intel_uncore_write_fw(uncore, l3_offset, new_v); 467 468 l3_offset.reg += 4; 469 } 470 } 471 } 472 473 #define CTX_CONTEXT_CONTROL_VAL 0x03 474 475 bool is_inhibit_context(struct intel_context *ce) 476 { 477 const u32 *reg_state = ce->lrc_reg_state; 478 u32 inhibit_mask = 479 REG_MASKED_FIELD_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 480 481 return inhibit_mask == 482 (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask); 483 } 484 485 /* Switch ring mmio values (context). */ 486 static void switch_mmio(struct intel_vgpu *pre, 487 struct intel_vgpu *next, 488 const struct intel_engine_cs *engine) 489 { 490 struct intel_uncore *uncore = engine->uncore; 491 struct intel_vgpu_submission *s; 492 struct engine_mmio *mmio; 493 u32 old_v, new_v; 494 495 if (GRAPHICS_VER(engine->i915) >= 9) 496 switch_mocs(pre, next, engine); 497 498 for (mmio = engine->i915->gvt->engine_mmio_list.mmio; 499 i915_mmio_reg_valid(mmio->reg); mmio++) { 500 if (mmio->id != engine->id) 501 continue; 502 /* 503 * No need to do save or restore of the mmio which is in context 504 * state image on gen9, it's initialized by lri command and 505 * save or restore with context together. 506 */ 507 if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context) 508 continue; 509 510 // save 511 if (pre) { 512 vgpu_vreg_t(pre, mmio->reg) = 513 intel_uncore_read_fw(uncore, mmio->reg); 514 if (mmio->mask) 515 vgpu_vreg_t(pre, mmio->reg) &= 516 ~(mmio->mask << 16); 517 old_v = vgpu_vreg_t(pre, mmio->reg); 518 } else { 519 old_v = mmio->value = 520 intel_uncore_read_fw(uncore, mmio->reg); 521 } 522 523 // restore 524 if (next) { 525 s = &next->submission; 526 /* 527 * No need to restore the mmio which is in context state 528 * image if it's not inhibit context, it will restore 529 * itself. 530 */ 531 if (mmio->in_context && 532 !is_inhibit_context(s->shadow[engine->id])) 533 continue; 534 535 if (mmio->mask) 536 new_v = vgpu_vreg_t(next, mmio->reg) | 537 (mmio->mask << 16); 538 else 539 new_v = vgpu_vreg_t(next, mmio->reg); 540 } else { 541 if (mmio->in_context) 542 continue; 543 if (mmio->mask) 544 new_v = mmio->value | (mmio->mask << 16); 545 else 546 new_v = mmio->value; 547 } 548 549 intel_uncore_write_fw(uncore, mmio->reg, new_v); 550 551 trace_render_mmio(pre ? pre->id : 0, 552 next ? next->id : 0, 553 "switch", 554 i915_mmio_reg_offset(mmio->reg), 555 old_v, new_v); 556 } 557 558 if (next) 559 handle_tlb_pending_event(next, engine); 560 } 561 562 /** 563 * intel_gvt_switch_mmio - switch mmio context of specific engine 564 * @pre: the last vGPU that own the engine 565 * @next: the vGPU to switch to 566 * @engine: the engine 567 * 568 * If pre is null indicates that host own the engine. If next is null 569 * indicates that we are switching to host workload. 570 */ 571 void intel_gvt_switch_mmio(struct intel_vgpu *pre, 572 struct intel_vgpu *next, 573 const struct intel_engine_cs *engine) 574 { 575 if (WARN(!pre && !next, "switch ring %s from host to HOST\n", 576 engine->name)) 577 return; 578 579 gvt_dbg_render("switch ring %s from %s to %s\n", engine->name, 580 pre ? "vGPU" : "host", next ? "vGPU" : "HOST"); 581 582 /** 583 * We are using raw mmio access wrapper to improve the 584 * performance for batch mmio read/write, so we need 585 * handle forcewake manually. 586 */ 587 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL); 588 switch_mmio(pre, next, engine); 589 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); 590 } 591 592 /** 593 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list 594 * @gvt: GVT device 595 * 596 */ 597 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) 598 { 599 struct engine_mmio *mmio; 600 601 if (GRAPHICS_VER(gvt->gt->i915) >= 9) { 602 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; 603 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; 604 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); 605 gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list; 606 gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list); 607 } else { 608 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; 609 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list; 610 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list); 611 } 612 613 for (mmio = gvt->engine_mmio_list.mmio; 614 i915_mmio_reg_valid(mmio->reg); mmio++) { 615 if (mmio->in_context) { 616 gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++; 617 intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg); 618 } 619 } 620 } 621