1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2014-2018 Intel Corporation 4 */ 5 6 #include "i915_drv.h" 7 #include "i915_reg.h" 8 #include "intel_context.h" 9 #include "intel_engine_pm.h" 10 #include "intel_engine_regs.h" 11 #include "intel_gpu_commands.h" 12 #include "intel_gt.h" 13 #include "intel_gt_mcr.h" 14 #include "intel_gt_print.h" 15 #include "intel_gt_regs.h" 16 #include "intel_ring.h" 17 #include "intel_workarounds.h" 18 19 /** 20 * DOC: Hardware workarounds 21 * 22 * Hardware workarounds are register programming documented to be executed in 23 * the driver that fall outside of the normal programming sequences for a 24 * platform. There are some basic categories of workarounds, depending on 25 * how/when they are applied: 26 * 27 * - Context workarounds: workarounds that touch registers that are 28 * saved/restored to/from the HW context image. The list is emitted (via Load 29 * Register Immediate commands) once when initializing the device and saved in 30 * the default context. That default context is then used on every context 31 * creation to have a "primed golden context", i.e. a context image that 32 * already contains the changes needed to all the registers. 33 * 34 * Context workarounds should be implemented in the \*_ctx_workarounds_init() 35 * variants respective to the targeted platforms. 36 * 37 * - Engine workarounds: the list of these WAs is applied whenever the specific 38 * engine is reset. It's also possible that a set of engine classes share a 39 * common power domain and they are reset together. This happens on some 40 * platforms with render and compute engines. In this case (at least) one of 41 * them need to keeep the workaround programming: the approach taken in the 42 * driver is to tie those workarounds to the first compute/render engine that 43 * is registered. When executing with GuC submission, engine resets are 44 * outside of kernel driver control, hence the list of registers involved in 45 * written once, on engine initialization, and then passed to GuC, that 46 * saves/restores their values before/after the reset takes place. See 47 * ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference. 48 * 49 * Workarounds for registers specific to RCS and CCS should be implemented in 50 * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for 51 * registers belonging to BCS, VCS or VECS should be implemented in 52 * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific 53 * engine's MMIO range but that are part of of the common RCS/CCS reset domain 54 * should be implemented in general_render_compute_wa_init(). 55 * 56 * - GT workarounds: the list of these WAs is applied whenever these registers 57 * revert to their default values: on GPU reset, suspend/resume [1]_, etc. 58 * 59 * GT workarounds should be implemented in the \*_gt_workarounds_init() 60 * variants respective to the targeted platforms. 61 * 62 * - Register whitelist: some workarounds need to be implemented in userspace, 63 * but need to touch privileged registers. The whitelist in the kernel 64 * instructs the hardware to allow the access to happen. From the kernel side, 65 * this is just a special case of a MMIO workaround (as we write the list of 66 * these to/be-whitelisted registers to some special HW registers). 67 * 68 * Register whitelisting should be done in the \*_whitelist_build() variants 69 * respective to the targeted platforms. 70 * 71 * - Workaround batchbuffers: buffers that get executed automatically by the 72 * hardware on every HW context restore. These buffers are created and 73 * programmed in the default context so the hardware always go through those 74 * programming sequences when switching contexts. The support for workaround 75 * batchbuffers is enabled these hardware mechanisms: 76 * 77 * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default 78 * context, pointing the hardware to jump to that location when that offset 79 * is reached in the context restore. Workaround batchbuffer in the driver 80 * currently uses this mechanism for all platforms. 81 * 82 * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context, 83 * pointing the hardware to a buffer to continue executing after the 84 * engine registers are restored in a context restore sequence. This is 85 * currently not used in the driver. 86 * 87 * - Other: There are WAs that, due to their nature, cannot be applied from a 88 * central place. Those are peppered around the rest of the code, as needed. 89 * Workarounds related to the display IP are the main example. 90 * 91 * .. [1] Technically, some registers are powercontext saved & restored, so they 92 * survive a suspend/resume. In practice, writing them again is not too 93 * costly and simplifies things, so it's the approach taken in the driver. 94 */ 95 96 static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt, 97 const char *name, const char *engine_name) 98 { 99 wal->gt = gt; 100 wal->name = name; 101 wal->engine_name = engine_name; 102 } 103 104 #define WA_LIST_CHUNK (1 << 4) 105 106 static void wa_init_finish(struct i915_wa_list *wal) 107 { 108 /* Trim unused entries. */ 109 if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) { 110 struct i915_wa *list = kmemdup(wal->list, 111 wal->count * sizeof(*list), 112 GFP_KERNEL); 113 114 if (list) { 115 kfree(wal->list); 116 wal->list = list; 117 } 118 } 119 120 if (!wal->count) 121 return; 122 123 gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n", 124 wal->wa_count, wal->name, wal->engine_name); 125 } 126 127 static enum forcewake_domains 128 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal) 129 { 130 enum forcewake_domains fw = 0; 131 struct i915_wa *wa; 132 unsigned int i; 133 134 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) 135 fw |= intel_uncore_forcewake_for_reg(uncore, 136 wa->reg, 137 FW_REG_READ | 138 FW_REG_WRITE); 139 140 return fw; 141 } 142 143 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) 144 { 145 unsigned int addr = i915_mmio_reg_offset(wa->reg); 146 struct drm_i915_private *i915 = wal->gt->i915; 147 unsigned int start = 0, end = wal->count; 148 const unsigned int grow = WA_LIST_CHUNK; 149 struct i915_wa *wa_; 150 151 GEM_BUG_ON(!is_power_of_2(grow)); 152 153 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */ 154 struct i915_wa *list; 155 156 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa), 157 GFP_KERNEL); 158 if (!list) { 159 drm_err(&i915->drm, "No space for workaround init!\n"); 160 return; 161 } 162 163 if (wal->list) { 164 memcpy(list, wal->list, sizeof(*wa) * wal->count); 165 kfree(wal->list); 166 } 167 168 wal->list = list; 169 } 170 171 while (start < end) { 172 unsigned int mid = start + (end - start) / 2; 173 174 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) { 175 start = mid + 1; 176 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) { 177 end = mid; 178 } else { 179 wa_ = &wal->list[mid]; 180 181 if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) { 182 drm_err(&i915->drm, 183 "Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n", 184 i915_mmio_reg_offset(wa_->reg), 185 wa_->clr, wa_->set); 186 187 wa_->set &= ~wa->clr; 188 } 189 190 wal->wa_count++; 191 wa_->set |= wa->set; 192 wa_->clr |= wa->clr; 193 wa_->read |= wa->read; 194 return; 195 } 196 } 197 198 wal->wa_count++; 199 wa_ = &wal->list[wal->count++]; 200 *wa_ = *wa; 201 202 while (wa_-- > wal->list) { 203 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) == 204 i915_mmio_reg_offset(wa_[1].reg)); 205 if (i915_mmio_reg_offset(wa_[1].reg) > 206 i915_mmio_reg_offset(wa_[0].reg)) 207 break; 208 209 swap(wa_[1], wa_[0]); 210 } 211 } 212 213 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, 214 u32 clear, u32 set, u32 read_mask, bool masked_reg) 215 { 216 struct i915_wa wa = { 217 .reg = reg, 218 .clr = clear, 219 .set = set, 220 .read = read_mask, 221 .masked_reg = masked_reg, 222 }; 223 224 _wa_add(wal, &wa); 225 } 226 227 static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg, 228 u32 clear, u32 set, u32 read_mask, bool masked_reg) 229 { 230 struct i915_wa wa = { 231 .mcr_reg = reg, 232 .clr = clear, 233 .set = set, 234 .read = read_mask, 235 .masked_reg = masked_reg, 236 .is_mcr = 1, 237 }; 238 239 _wa_add(wal, &wa); 240 } 241 242 static void 243 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set) 244 { 245 wa_add(wal, reg, clear, set, clear | set, false); 246 } 247 248 static void 249 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set) 250 { 251 wa_mcr_add(wal, reg, clear, set, clear | set, false); 252 } 253 254 static void 255 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set) 256 { 257 wa_write_clr_set(wal, reg, ~0, set); 258 } 259 260 static void 261 wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set) 262 { 263 wa_mcr_write_clr_set(wal, reg, ~0, set); 264 } 265 266 static void 267 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set) 268 { 269 wa_write_clr_set(wal, reg, set, set); 270 } 271 272 static void 273 wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set) 274 { 275 wa_mcr_write_clr_set(wal, reg, set, set); 276 } 277 278 static void 279 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr) 280 { 281 wa_write_clr_set(wal, reg, clr, 0); 282 } 283 284 static void 285 wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr) 286 { 287 wa_mcr_write_clr_set(wal, reg, clr, 0); 288 } 289 290 /* 291 * WA operations on "masked register". A masked register has the upper 16 bits 292 * documented as "masked" in b-spec. Its purpose is to allow writing to just a 293 * portion of the register without a rmw: you simply write in the upper 16 bits 294 * the mask of bits you are going to modify. 295 * 296 * The wa_masked_* family of functions already does the necessary operations to 297 * calculate the mask based on the parameters passed, so user only has to 298 * provide the lower 16 bits of that register. 299 */ 300 301 static void 302 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) 303 { 304 wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true); 305 } 306 307 static void 308 wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val) 309 { 310 wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true); 311 } 312 313 static void 314 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val) 315 { 316 wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true); 317 } 318 319 static void 320 wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val) 321 { 322 wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true); 323 } 324 325 static void 326 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg, 327 u32 mask, u32 val) 328 { 329 wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true); 330 } 331 332 static void 333 wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, 334 u32 mask, u32 val) 335 { 336 wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true); 337 } 338 339 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine, 340 struct i915_wa_list *wal) 341 { 342 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); 343 } 344 345 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine, 346 struct i915_wa_list *wal) 347 { 348 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); 349 } 350 351 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, 352 struct i915_wa_list *wal) 353 { 354 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); 355 356 /* WaDisableAsyncFlipPerfMode:bdw,chv */ 357 wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE); 358 359 /* WaDisablePartialInstShootdown:bdw,chv */ 360 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, 361 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 362 363 /* Use Force Non-Coherent whenever executing a 3D context. This is a 364 * workaround for a possible hang in the unlikely event a TLB 365 * invalidation occurs during a PSD flush. 366 */ 367 /* WaForceEnableNonCoherent:bdw,chv */ 368 /* WaHdcDisableFetchWhenMasked:bdw,chv */ 369 wa_masked_en(wal, HDC_CHICKEN0, 370 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 371 HDC_FORCE_NON_COHERENT); 372 373 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 374 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping 375 * polygons in the same 8x4 pixel/sample area to be processed without 376 * stalling waiting for the earlier ones to write to Hierarchical Z 377 * buffer." 378 * 379 * This optimization is off by default for BDW and CHV; turn it on. 380 */ 381 wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 382 383 /* Wa4x4STCOptimizationDisable:bdw,chv */ 384 wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 385 386 /* 387 * BSpec recommends 8x4 when MSAA is used, 388 * however in practice 16x4 seems fastest. 389 * 390 * Note that PS/WM thread counts depend on the WIZ hashing 391 * disable bit, which we don't touch here, but it's good 392 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 393 */ 394 wa_masked_field_set(wal, GEN7_GT_MODE, 395 GEN6_WIZ_HASHING_MASK, 396 GEN6_WIZ_HASHING_16x4); 397 } 398 399 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, 400 struct i915_wa_list *wal) 401 { 402 struct drm_i915_private *i915 = engine->i915; 403 404 gen8_ctx_workarounds_init(engine, wal); 405 406 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 407 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 408 409 /* WaDisableDopClockGating:bdw 410 * 411 * Also see the related UCGTCL1 write in bdw_init_clock_gating() 412 * to disable EUTC clock gating. 413 */ 414 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, 415 DOP_CLOCK_GATING_DISABLE); 416 417 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3, 418 GEN8_SAMPLER_POWER_BYPASS_DIS); 419 420 wa_masked_en(wal, HDC_CHICKEN0, 421 /* WaForceContextSaveRestoreNonCoherent:bdw */ 422 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 423 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 424 (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 425 } 426 427 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, 428 struct i915_wa_list *wal) 429 { 430 gen8_ctx_workarounds_init(engine, wal); 431 432 /* WaDisableThreadStallDopClockGating:chv */ 433 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 434 435 /* Improve HiZ throughput on CHV. */ 436 wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 437 } 438 439 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, 440 struct i915_wa_list *wal) 441 { 442 struct drm_i915_private *i915 = engine->i915; 443 444 if (HAS_LLC(i915)) { 445 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl 446 * 447 * Must match Display Engine. See 448 * WaCompressedResourceDisplayNewHashMode. 449 */ 450 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 451 GEN9_PBE_COMPRESSED_HASH_SELECTION); 452 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, 453 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR); 454 } 455 456 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ 457 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ 458 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, 459 FLOW_CONTROL_ENABLE | 460 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 461 462 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ 463 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ 464 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, 465 GEN9_ENABLE_YV12_BUGFIX | 466 GEN9_ENABLE_GPGPU_PREEMPTION); 467 468 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */ 469 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */ 470 wa_masked_en(wal, CACHE_MODE_1, 471 GEN8_4x4_STC_OPTIMIZATION_DISABLE | 472 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE); 473 474 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */ 475 wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5, 476 GEN9_CCS_TLB_PREFETCH_ENABLE); 477 478 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */ 479 wa_masked_en(wal, HDC_CHICKEN0, 480 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 481 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); 482 483 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are 484 * both tied to WaForceContextSaveRestoreNonCoherent 485 * in some hsds for skl. We keep the tie for all gen9. The 486 * documentation is a bit hazy and so we want to get common behaviour, 487 * even though there is no clear evidence we would need both on kbl/bxt. 488 * This area has been source of system hangs so we play it safe 489 * and mimic the skl regardless of what bspec says. 490 * 491 * Use Force Non-Coherent whenever executing a 3D context. This 492 * is a workaround for a possible hang in the unlikely event 493 * a TLB invalidation occurs during a PSD flush. 494 */ 495 496 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */ 497 wa_masked_en(wal, HDC_CHICKEN0, 498 HDC_FORCE_NON_COHERENT); 499 500 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ 501 if (IS_SKYLAKE(i915) || 502 IS_KABYLAKE(i915) || 503 IS_COFFEELAKE(i915) || 504 IS_COMETLAKE(i915)) 505 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3, 506 GEN8_SAMPLER_POWER_BYPASS_DIS); 507 508 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */ 509 wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); 510 511 /* 512 * Supporting preemption with fine-granularity requires changes in the 513 * batch buffer programming. Since we can't break old userspace, we 514 * need to set our default preemption level to safe value. Userspace is 515 * still able to use more fine-grained preemption levels, since in 516 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the 517 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are 518 * not real HW workarounds, but merely a way to start using preemption 519 * while maintaining old contract with userspace. 520 */ 521 522 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */ 523 wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); 524 525 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */ 526 wa_masked_field_set(wal, GEN8_CS_CHICKEN1, 527 GEN9_PREEMPT_GPGPU_LEVEL_MASK, 528 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); 529 530 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ 531 if (IS_GEN9_LP(i915)) 532 wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); 533 } 534 535 static void skl_tune_iz_hashing(struct intel_engine_cs *engine, 536 struct i915_wa_list *wal) 537 { 538 struct intel_gt *gt = engine->gt; 539 u8 vals[3] = { 0, 0, 0 }; 540 unsigned int i; 541 542 for (i = 0; i < 3; i++) { 543 u8 ss; 544 545 /* 546 * Only consider slices where one, and only one, subslice has 7 547 * EUs 548 */ 549 if (!is_power_of_2(gt->info.sseu.subslice_7eu[i])) 550 continue; 551 552 /* 553 * subslice_7eu[i] != 0 (because of the check above) and 554 * ss_max == 4 (maximum number of subslices possible per slice) 555 * 556 * -> 0 <= ss <= 3; 557 */ 558 ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1; 559 vals[i] = 3 - ss; 560 } 561 562 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0) 563 return; 564 565 /* Tune IZ hashing. See intel_device_info_runtime_init() */ 566 wa_masked_field_set(wal, GEN7_GT_MODE, 567 GEN9_IZ_HASHING_MASK(2) | 568 GEN9_IZ_HASHING_MASK(1) | 569 GEN9_IZ_HASHING_MASK(0), 570 GEN9_IZ_HASHING(2, vals[2]) | 571 GEN9_IZ_HASHING(1, vals[1]) | 572 GEN9_IZ_HASHING(0, vals[0])); 573 } 574 575 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine, 576 struct i915_wa_list *wal) 577 { 578 gen9_ctx_workarounds_init(engine, wal); 579 skl_tune_iz_hashing(engine, wal); 580 } 581 582 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine, 583 struct i915_wa_list *wal) 584 { 585 gen9_ctx_workarounds_init(engine, wal); 586 587 /* WaDisableThreadStallDopClockGating:bxt */ 588 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, 589 STALL_DOP_GATING_DISABLE); 590 591 /* WaToEnableHwFixForPushConstHWBug:bxt */ 592 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 593 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 594 } 595 596 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, 597 struct i915_wa_list *wal) 598 { 599 struct drm_i915_private *i915 = engine->i915; 600 601 gen9_ctx_workarounds_init(engine, wal); 602 603 /* WaToEnableHwFixForPushConstHWBug:kbl */ 604 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER)) 605 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 606 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 607 608 /* WaDisableSbeCacheDispatchPortSharing:kbl */ 609 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1, 610 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 611 } 612 613 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine, 614 struct i915_wa_list *wal) 615 { 616 gen9_ctx_workarounds_init(engine, wal); 617 618 /* WaToEnableHwFixForPushConstHWBug:glk */ 619 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 620 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 621 } 622 623 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, 624 struct i915_wa_list *wal) 625 { 626 gen9_ctx_workarounds_init(engine, wal); 627 628 /* WaToEnableHwFixForPushConstHWBug:cfl */ 629 wa_masked_en(wal, COMMON_SLICE_CHICKEN2, 630 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 631 632 /* WaDisableSbeCacheDispatchPortSharing:cfl */ 633 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1, 634 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 635 } 636 637 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, 638 struct i915_wa_list *wal) 639 { 640 /* Wa_1406697149 (WaDisableBankHangMode:icl) */ 641 wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL); 642 643 /* WaForceEnableNonCoherent:icl 644 * This is not the same workaround as in early Gen9 platforms, where 645 * lacking this could cause system hangs, but coherency performance 646 * overhead is high and only a few compute workloads really need it 647 * (the register is whitelisted in hardware now, so UMDs can opt in 648 * for coherency if they have a good reason). 649 */ 650 wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT); 651 652 /* WaEnableFloatBlendOptimization:icl */ 653 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0, 654 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE), 655 0 /* write-only, so skip validation */, 656 true); 657 658 /* WaDisableGPGPUMidThreadPreemption:icl */ 659 wa_masked_field_set(wal, GEN8_CS_CHICKEN1, 660 GEN9_PREEMPT_GPGPU_LEVEL_MASK, 661 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); 662 663 /* allow headerless messages for preemptible GPGPU context */ 664 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE, 665 GEN11_SAMPLER_ENABLE_HEADLESS_MSG); 666 667 /* Wa_1604278689:icl,ehl */ 668 wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID); 669 wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER, 670 0, 671 0xFFFFFFFF); 672 673 /* Wa_1406306137:icl,ehl */ 674 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); 675 } 676 677 /* 678 * These settings aren't actually workarounds, but general tuning settings that 679 * need to be programmed on dg2 platform. 680 */ 681 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine, 682 struct i915_wa_list *wal) 683 { 684 wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP); 685 wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, 686 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)); 687 wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK, 688 FF_MODE2_TDS_TIMER_128); 689 } 690 691 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, 692 struct i915_wa_list *wal) 693 { 694 struct drm_i915_private *i915 = engine->i915; 695 696 /* 697 * Wa_1409142259:tgl,dg1,adl-p 698 * Wa_1409347922:tgl,dg1,adl-p 699 * Wa_1409252684:tgl,dg1,adl-p 700 * Wa_1409217633:tgl,dg1,adl-p 701 * Wa_1409207793:tgl,dg1,adl-p 702 * Wa_1409178076:tgl,dg1,adl-p 703 * Wa_1408979724:tgl,dg1,adl-p 704 * Wa_14010443199:tgl,rkl,dg1,adl-p 705 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p 706 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p 707 */ 708 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, 709 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); 710 711 /* WaDisableGPGPUMidThreadPreemption:gen12 */ 712 wa_masked_field_set(wal, GEN8_CS_CHICKEN1, 713 GEN9_PREEMPT_GPGPU_LEVEL_MASK, 714 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); 715 716 /* 717 * Wa_16011163337 - GS_TIMER 718 * 719 * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we 720 * need to program it even on those that don't explicitly list that 721 * workaround. 722 * 723 * Note that the programming of GEN12_FF_MODE2 is further modified 724 * according to the FF_MODE2 guidance given by Wa_1608008084. 725 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong 726 * value when read from the CPU. 727 * 728 * The default value for this register is zero for all fields. 729 * So instead of doing a RMW we should just write the desired values 730 * for TDS and GS timers. Note that since the readback can't be trusted, 731 * the clear mask is just set to ~0 to make sure other bits are not 732 * inadvertently set. For the same reason read verification is ignored. 733 */ 734 wa_add(wal, 735 GEN12_FF_MODE2, 736 ~0, 737 FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224, 738 0, false); 739 740 if (!IS_DG1(i915)) { 741 /* Wa_1806527549 */ 742 wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE); 743 744 /* Wa_1606376872 */ 745 wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC); 746 } 747 } 748 749 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, 750 struct i915_wa_list *wal) 751 { 752 gen12_ctx_workarounds_init(engine, wal); 753 754 /* Wa_1409044764 */ 755 wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3, 756 DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN); 757 758 /* Wa_22010493298 */ 759 wa_masked_en(wal, HIZ_CHICKEN, 760 DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); 761 } 762 763 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine, 764 struct i915_wa_list *wal) 765 { 766 dg2_ctx_gt_tuning_init(engine, wal); 767 768 /* Wa_16013271637:dg2 */ 769 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1, 770 MSC_MSAA_REODER_BUF_BYPASS_DISABLE); 771 772 /* Wa_14014947963:dg2 */ 773 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000); 774 775 /* Wa_18018764978:dg2 */ 776 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL); 777 778 /* Wa_18019271663:dg2 */ 779 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE); 780 781 /* Wa_14019877138:dg2 */ 782 wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT); 783 } 784 785 static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine, 786 struct i915_wa_list *wal) 787 { 788 struct intel_gt *gt = engine->gt; 789 790 dg2_ctx_gt_tuning_init(engine, wal); 791 792 /* 793 * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in 794 * gen12_emit_indirect_ctx_rcs() rather than here on some early 795 * steppings. 796 */ 797 if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 798 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))) 799 wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false); 800 } 801 802 static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine, 803 struct i915_wa_list *wal) 804 { 805 struct intel_gt *gt = engine->gt; 806 807 xelpg_ctx_gt_tuning_init(engine, wal); 808 809 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 810 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) { 811 /* Wa_14014947963 */ 812 wa_masked_field_set(wal, VF_PREEMPTION, 813 PREEMPTION_VERTEX_COUNT, 0x4000); 814 815 /* Wa_16013271637 */ 816 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1, 817 MSC_MSAA_REODER_BUF_BYPASS_DISABLE); 818 819 /* Wa_18019627453 */ 820 wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS); 821 822 /* Wa_18018764978 */ 823 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL); 824 } 825 826 /* Wa_18019271663 */ 827 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE); 828 829 /* Wa_14019877138 */ 830 wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT); 831 } 832 833 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine, 834 struct i915_wa_list *wal) 835 { 836 /* 837 * This is a "fake" workaround defined by software to ensure we 838 * maintain reliable, backward-compatible behavior for userspace with 839 * regards to how nested MI_BATCH_BUFFER_START commands are handled. 840 * 841 * The per-context setting of MI_MODE[12] determines whether the bits 842 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted 843 * in the traditional manner or whether they should instead use a new 844 * tgl+ meaning that breaks backward compatibility, but allows nesting 845 * into 3rd-level batchbuffers. When this new capability was first 846 * added in TGL, it remained off by default unless a context 847 * intentionally opted in to the new behavior. However Xe_HPG now 848 * flips this on by default and requires that we explicitly opt out if 849 * we don't want the new behavior. 850 * 851 * From a SW perspective, we want to maintain the backward-compatible 852 * behavior for userspace, so we'll apply a fake workaround to set it 853 * back to the legacy behavior on platforms where the hardware default 854 * is to break compatibility. At the moment there is no Linux 855 * userspace that utilizes third-level batchbuffers, so this will avoid 856 * userspace from needing to make any changes. using the legacy 857 * meaning is the correct thing to do. If/when we have userspace 858 * consumers that want to utilize third-level batch nesting, we can 859 * provide a context parameter to allow them to opt-in. 860 */ 861 wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN); 862 } 863 864 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine, 865 struct i915_wa_list *wal) 866 { 867 u8 mocs; 868 869 /* 870 * Some blitter commands do not have a field for MOCS, those 871 * commands will use MOCS index pointed by BLIT_CCTL. 872 * BLIT_CCTL registers are needed to be programmed to un-cached. 873 */ 874 if (engine->class == COPY_ENGINE_CLASS) { 875 mocs = engine->gt->mocs.uc_index; 876 wa_write_clr_set(wal, 877 BLIT_CCTL(engine->mmio_base), 878 BLIT_CCTL_MASK, 879 BLIT_CCTL_MOCS(mocs, mocs)); 880 } 881 } 882 883 /* 884 * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround 885 * defined by the hardware team, but it programming general context registers. 886 * Adding those context register programming in context workaround 887 * allow us to use the wa framework for proper application and validation. 888 */ 889 static void 890 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine, 891 struct i915_wa_list *wal) 892 { 893 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) 894 fakewa_disable_nestedbb_mode(engine, wal); 895 896 gen12_ctx_gt_mocs_init(engine, wal); 897 } 898 899 static void 900 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, 901 struct i915_wa_list *wal, 902 const char *name) 903 { 904 struct drm_i915_private *i915 = engine->i915; 905 906 wa_init_start(wal, engine->gt, name, engine->name); 907 908 /* Applies to all engines */ 909 /* 910 * Fake workarounds are not the actual workaround but 911 * programming of context registers using workaround framework. 912 */ 913 if (GRAPHICS_VER(i915) >= 12) 914 gen12_ctx_gt_fake_wa_init(engine, wal); 915 916 if (engine->class != RENDER_CLASS) 917 goto done; 918 919 if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) 920 xelpg_ctx_workarounds_init(engine, wal); 921 else if (IS_PONTEVECCHIO(i915)) 922 ; /* noop; none at this time */ 923 else if (IS_DG2(i915)) 924 dg2_ctx_workarounds_init(engine, wal); 925 else if (IS_XEHPSDV(i915)) 926 ; /* noop; none at this time */ 927 else if (IS_DG1(i915)) 928 dg1_ctx_workarounds_init(engine, wal); 929 else if (GRAPHICS_VER(i915) == 12) 930 gen12_ctx_workarounds_init(engine, wal); 931 else if (GRAPHICS_VER(i915) == 11) 932 icl_ctx_workarounds_init(engine, wal); 933 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) 934 cfl_ctx_workarounds_init(engine, wal); 935 else if (IS_GEMINILAKE(i915)) 936 glk_ctx_workarounds_init(engine, wal); 937 else if (IS_KABYLAKE(i915)) 938 kbl_ctx_workarounds_init(engine, wal); 939 else if (IS_BROXTON(i915)) 940 bxt_ctx_workarounds_init(engine, wal); 941 else if (IS_SKYLAKE(i915)) 942 skl_ctx_workarounds_init(engine, wal); 943 else if (IS_CHERRYVIEW(i915)) 944 chv_ctx_workarounds_init(engine, wal); 945 else if (IS_BROADWELL(i915)) 946 bdw_ctx_workarounds_init(engine, wal); 947 else if (GRAPHICS_VER(i915) == 7) 948 gen7_ctx_workarounds_init(engine, wal); 949 else if (GRAPHICS_VER(i915) == 6) 950 gen6_ctx_workarounds_init(engine, wal); 951 else if (GRAPHICS_VER(i915) < 8) 952 ; 953 else 954 MISSING_CASE(GRAPHICS_VER(i915)); 955 956 done: 957 wa_init_finish(wal); 958 } 959 960 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine) 961 { 962 __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context"); 963 } 964 965 int intel_engine_emit_ctx_wa(struct i915_request *rq) 966 { 967 struct i915_wa_list *wal = &rq->engine->ctx_wa_list; 968 struct intel_uncore *uncore = rq->engine->uncore; 969 enum forcewake_domains fw; 970 unsigned long flags; 971 struct i915_wa *wa; 972 unsigned int i; 973 u32 *cs; 974 int ret; 975 976 if (wal->count == 0) 977 return 0; 978 979 ret = rq->engine->emit_flush(rq, EMIT_BARRIER); 980 if (ret) 981 return ret; 982 983 cs = intel_ring_begin(rq, (wal->count * 2 + 2)); 984 if (IS_ERR(cs)) 985 return PTR_ERR(cs); 986 987 fw = wal_get_fw_for_rmw(uncore, wal); 988 989 intel_gt_mcr_lock(wal->gt, &flags); 990 spin_lock(&uncore->lock); 991 intel_uncore_forcewake_get__locked(uncore, fw); 992 993 *cs++ = MI_LOAD_REGISTER_IMM(wal->count); 994 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 995 u32 val; 996 997 /* Skip reading the register if it's not really needed */ 998 if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) { 999 val = wa->set; 1000 } else { 1001 val = wa->is_mcr ? 1002 intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) : 1003 intel_uncore_read_fw(uncore, wa->reg); 1004 val &= ~wa->clr; 1005 val |= wa->set; 1006 } 1007 1008 *cs++ = i915_mmio_reg_offset(wa->reg); 1009 *cs++ = val; 1010 } 1011 *cs++ = MI_NOOP; 1012 1013 intel_uncore_forcewake_put__locked(uncore, fw); 1014 spin_unlock(&uncore->lock); 1015 intel_gt_mcr_unlock(wal->gt, flags); 1016 1017 intel_ring_advance(rq, cs); 1018 1019 ret = rq->engine->emit_flush(rq, EMIT_BARRIER); 1020 if (ret) 1021 return ret; 1022 1023 return 0; 1024 } 1025 1026 static void 1027 gen4_gt_workarounds_init(struct intel_gt *gt, 1028 struct i915_wa_list *wal) 1029 { 1030 /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */ 1031 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE); 1032 } 1033 1034 static void 1035 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1036 { 1037 gen4_gt_workarounds_init(gt, wal); 1038 1039 /* WaDisableRenderCachePipelinedFlush:g4x,ilk */ 1040 wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE); 1041 } 1042 1043 static void 1044 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1045 { 1046 g4x_gt_workarounds_init(gt, wal); 1047 1048 wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED); 1049 } 1050 1051 static void 1052 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1053 { 1054 } 1055 1056 static void 1057 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1058 { 1059 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ 1060 wa_masked_dis(wal, 1061 GEN7_COMMON_SLICE_CHICKEN1, 1062 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 1063 1064 /* WaApplyL3ControlAndL3ChickenMode:ivb */ 1065 wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); 1066 wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); 1067 1068 /* WaForceL3Serialization:ivb */ 1069 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE); 1070 } 1071 1072 static void 1073 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1074 { 1075 /* WaForceL3Serialization:vlv */ 1076 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE); 1077 1078 /* 1079 * WaIncreaseL3CreditsForVLVB0:vlv 1080 * This is the hardware default actually. 1081 */ 1082 wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE); 1083 } 1084 1085 static void 1086 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1087 { 1088 /* L3 caching of data atomics doesn't work -- disable it. */ 1089 wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE); 1090 1091 wa_add(wal, 1092 HSW_ROW_CHICKEN3, 0, 1093 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE), 1094 0 /* XXX does this reg exist? */, true); 1095 1096 /* WaVSRefCountFullforceMissDisable:hsw */ 1097 wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME); 1098 } 1099 1100 static void 1101 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) 1102 { 1103 const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu; 1104 unsigned int slice, subslice; 1105 u32 mcr, mcr_mask; 1106 1107 GEM_BUG_ON(GRAPHICS_VER(i915) != 9); 1108 1109 /* 1110 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml 1111 * Before any MMIO read into slice/subslice specific registers, MCR 1112 * packet control register needs to be programmed to point to any 1113 * enabled s/ss pair. Otherwise, incorrect values will be returned. 1114 * This means each subsequent MMIO read will be forwarded to an 1115 * specific s/ss combination, but this is OK since these registers 1116 * are consistent across s/ss in almost all cases. In the rare 1117 * occasions, such as INSTDONE, where this value is dependent 1118 * on s/ss combo, the read should be done with read_subslice_reg. 1119 */ 1120 slice = ffs(sseu->slice_mask) - 1; 1121 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw)); 1122 subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice)); 1123 GEM_BUG_ON(!subslice); 1124 subslice--; 1125 1126 /* 1127 * We use GEN8_MCR..() macros to calculate the |mcr| value for 1128 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads 1129 */ 1130 mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); 1131 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; 1132 1133 drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr); 1134 1135 wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); 1136 } 1137 1138 static void 1139 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1140 { 1141 struct drm_i915_private *i915 = gt->i915; 1142 1143 /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */ 1144 gen9_wa_init_mcr(i915, wal); 1145 1146 /* WaDisableKillLogic:bxt,skl,kbl */ 1147 if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915)) 1148 wa_write_or(wal, 1149 GAM_ECOCHK, 1150 ECOCHK_DIS_TLB); 1151 1152 if (HAS_LLC(i915)) { 1153 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl 1154 * 1155 * Must match Display Engine. See 1156 * WaCompressedResourceDisplayNewHashMode. 1157 */ 1158 wa_write_or(wal, 1159 MMCD_MISC_CTRL, 1160 MMCD_PCLA | MMCD_HOTSPOT_EN); 1161 } 1162 1163 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ 1164 wa_write_or(wal, 1165 GAM_ECOCHK, 1166 BDW_DISABLE_HDC_INVALIDATION); 1167 } 1168 1169 static void 1170 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1171 { 1172 gen9_gt_workarounds_init(gt, wal); 1173 1174 /* WaDisableGafsUnitClkGating:skl */ 1175 wa_write_or(wal, 1176 GEN7_UCGCTL4, 1177 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1178 1179 /* WaInPlaceDecompressionHang:skl */ 1180 if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0)) 1181 wa_write_or(wal, 1182 GEN9_GAMT_ECO_REG_RW_IA, 1183 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1184 } 1185 1186 static void 1187 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1188 { 1189 gen9_gt_workarounds_init(gt, wal); 1190 1191 /* WaDisableDynamicCreditSharing:kbl */ 1192 if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0)) 1193 wa_write_or(wal, 1194 GAMT_CHKN_BIT_REG, 1195 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); 1196 1197 /* WaDisableGafsUnitClkGating:kbl */ 1198 wa_write_or(wal, 1199 GEN7_UCGCTL4, 1200 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1201 1202 /* WaInPlaceDecompressionHang:kbl */ 1203 wa_write_or(wal, 1204 GEN9_GAMT_ECO_REG_RW_IA, 1205 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1206 } 1207 1208 static void 1209 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1210 { 1211 gen9_gt_workarounds_init(gt, wal); 1212 } 1213 1214 static void 1215 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1216 { 1217 gen9_gt_workarounds_init(gt, wal); 1218 1219 /* WaDisableGafsUnitClkGating:cfl */ 1220 wa_write_or(wal, 1221 GEN7_UCGCTL4, 1222 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1223 1224 /* WaInPlaceDecompressionHang:cfl */ 1225 wa_write_or(wal, 1226 GEN9_GAMT_ECO_REG_RW_IA, 1227 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); 1228 } 1229 1230 static void __set_mcr_steering(struct i915_wa_list *wal, 1231 i915_reg_t steering_reg, 1232 unsigned int slice, unsigned int subslice) 1233 { 1234 u32 mcr, mcr_mask; 1235 1236 mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice); 1237 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK; 1238 1239 wa_write_clr_set(wal, steering_reg, mcr_mask, mcr); 1240 } 1241 1242 static void debug_dump_steering(struct intel_gt *gt) 1243 { 1244 struct drm_printer p = drm_dbg_printer(>->i915->drm, DRM_UT_DRIVER, 1245 "MCR Steering:"); 1246 1247 if (drm_debug_enabled(DRM_UT_DRIVER)) 1248 intel_gt_mcr_report_steering(&p, gt, false); 1249 } 1250 1251 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal, 1252 unsigned int slice, unsigned int subslice) 1253 { 1254 __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice); 1255 1256 gt->default_steering.groupid = slice; 1257 gt->default_steering.instanceid = subslice; 1258 1259 debug_dump_steering(gt); 1260 } 1261 1262 static void 1263 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) 1264 { 1265 const struct sseu_dev_info *sseu = >->info.sseu; 1266 unsigned int subslice; 1267 1268 GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11); 1269 GEM_BUG_ON(hweight8(sseu->slice_mask) > 1); 1270 1271 /* 1272 * Although a platform may have subslices, we need to always steer 1273 * reads to the lowest instance that isn't fused off. When Render 1274 * Power Gating is enabled, grabbing forcewake will only power up a 1275 * single subslice (the "minconfig") if there isn't a real workload 1276 * that needs to be run; this means that if we steer register reads to 1277 * one of the higher subslices, we run the risk of reading back 0's or 1278 * random garbage. 1279 */ 1280 subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0)); 1281 1282 /* 1283 * If the subslice we picked above also steers us to a valid L3 bank, 1284 * then we can just rely on the default steering and won't need to 1285 * worry about explicitly re-steering L3BANK reads later. 1286 */ 1287 if (gt->info.l3bank_mask & BIT(subslice)) 1288 gt->steering_table[L3BANK] = NULL; 1289 1290 __add_mcr_wa(gt, wal, 0, subslice); 1291 } 1292 1293 static void 1294 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) 1295 { 1296 const struct sseu_dev_info *sseu = >->info.sseu; 1297 unsigned long slice, subslice = 0, slice_mask = 0; 1298 u32 lncf_mask = 0; 1299 int i; 1300 1301 /* 1302 * On Xe_HP the steering increases in complexity. There are now several 1303 * more units that require steering and we're not guaranteed to be able 1304 * to find a common setting for all of them. These are: 1305 * - GSLICE (fusable) 1306 * - DSS (sub-unit within gslice; fusable) 1307 * - L3 Bank (fusable) 1308 * - MSLICE (fusable) 1309 * - LNCF (sub-unit within mslice; always present if mslice is present) 1310 * 1311 * We'll do our default/implicit steering based on GSLICE (in the 1312 * sliceid field) and DSS (in the subsliceid field). If we can 1313 * find overlap between the valid MSLICE and/or LNCF values with 1314 * a suitable GSLICE, then we can just re-use the default value and 1315 * skip and explicit steering at runtime. 1316 * 1317 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find 1318 * a valid sliceid value. DSS steering is the only type of steering 1319 * that utilizes the 'subsliceid' bits. 1320 * 1321 * Also note that, even though the steering domain is called "GSlice" 1322 * and it is encoded in the register using the gslice format, the spec 1323 * says that the combined (geometry | compute) fuse should be used to 1324 * select the steering. 1325 */ 1326 1327 /* Find the potential gslice candidates */ 1328 slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask, 1329 GEN_DSS_PER_GSLICE); 1330 1331 /* 1332 * Find the potential LNCF candidates. Either LNCF within a valid 1333 * mslice is fine. 1334 */ 1335 for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES) 1336 lncf_mask |= (0x3 << (i * 2)); 1337 1338 /* 1339 * Are there any sliceid values that work for both GSLICE and LNCF 1340 * steering? 1341 */ 1342 if (slice_mask & lncf_mask) { 1343 slice_mask &= lncf_mask; 1344 gt->steering_table[LNCF] = NULL; 1345 } 1346 1347 /* How about sliceid values that also work for MSLICE steering? */ 1348 if (slice_mask & gt->info.mslice_mask) { 1349 slice_mask &= gt->info.mslice_mask; 1350 gt->steering_table[MSLICE] = NULL; 1351 } 1352 1353 if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0)) 1354 gt->steering_table[GAM] = NULL; 1355 1356 slice = __ffs(slice_mask); 1357 subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) % 1358 GEN_DSS_PER_GSLICE; 1359 1360 __add_mcr_wa(gt, wal, slice, subslice); 1361 1362 /* 1363 * SQIDI ranges are special because they use different steering 1364 * registers than everything else we work with. On XeHP SDV and 1365 * DG2-G10, any value in the steering registers will work fine since 1366 * all instances are present, but DG2-G11 only has SQIDI instances at 1367 * ID's 2 and 3, so we need to steer to one of those. For simplicity 1368 * we'll just steer to a hardcoded "2" since that value will work 1369 * everywhere. 1370 */ 1371 __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2); 1372 __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2); 1373 1374 /* 1375 * On DG2, GAM registers have a dedicated steering control register 1376 * and must always be programmed to a hardcoded groupid of "1." 1377 */ 1378 if (IS_DG2(gt->i915)) 1379 __set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0); 1380 } 1381 1382 static void 1383 pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal) 1384 { 1385 unsigned int dss; 1386 1387 /* 1388 * Setup implicit steering for COMPUTE and DSS ranges to the first 1389 * non-fused-off DSS. All other types of MCR registers will be 1390 * explicitly steered. 1391 */ 1392 dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0); 1393 __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE); 1394 } 1395 1396 static void 1397 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1398 { 1399 struct drm_i915_private *i915 = gt->i915; 1400 1401 icl_wa_init_mcr(gt, wal); 1402 1403 /* WaModifyGamTlbPartitioning:icl */ 1404 wa_write_clr_set(wal, 1405 GEN11_GACB_PERF_CTRL, 1406 GEN11_HASH_CTRL_MASK, 1407 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); 1408 1409 /* Wa_1405766107:icl 1410 * Formerly known as WaCL2SFHalfMaxAlloc 1411 */ 1412 wa_write_or(wal, 1413 GEN11_LSN_UNSLCVC, 1414 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC | 1415 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC); 1416 1417 /* Wa_220166154:icl 1418 * Formerly known as WaDisCtxReload 1419 */ 1420 wa_write_or(wal, 1421 GEN8_GAMW_ECO_DEV_RW_IA, 1422 GAMW_ECO_DEV_CTX_RELOAD_DISABLE); 1423 1424 /* Wa_1406463099:icl 1425 * Formerly known as WaGamTlbPendError 1426 */ 1427 wa_write_or(wal, 1428 GAMT_CHKN_BIT_REG, 1429 GAMT_CHKN_DISABLE_L3_COH_PIPE); 1430 1431 /* 1432 * Wa_1408615072:icl,ehl (vsunit) 1433 * Wa_1407596294:icl,ehl (hsunit) 1434 */ 1435 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 1436 VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); 1437 1438 /* Wa_1407352427:icl,ehl */ 1439 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, 1440 PSDUNIT_CLKGATE_DIS); 1441 1442 /* Wa_1406680159:icl,ehl */ 1443 wa_mcr_write_or(wal, 1444 GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE, 1445 GWUNIT_CLKGATE_DIS); 1446 1447 /* Wa_1607087056:icl,ehl,jsl */ 1448 if (IS_ICELAKE(i915) || 1449 ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && 1450 IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))) 1451 wa_write_or(wal, 1452 GEN11_SLICE_UNIT_LEVEL_CLKGATE, 1453 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); 1454 1455 /* 1456 * This is not a documented workaround, but rather an optimization 1457 * to reduce sampler power. 1458 */ 1459 wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE); 1460 } 1461 1462 /* 1463 * Though there are per-engine instances of these registers, 1464 * they retain their value through engine resets and should 1465 * only be provided on the GT workaround list rather than 1466 * the engine-specific workaround list. 1467 */ 1468 static void 1469 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal) 1470 { 1471 struct intel_engine_cs *engine; 1472 int id; 1473 1474 for_each_engine(engine, gt, id) { 1475 if (engine->class != VIDEO_DECODE_CLASS || 1476 (engine->instance % 2)) 1477 continue; 1478 1479 wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base), 1480 IECPUNIT_CLKGATE_DIS); 1481 } 1482 } 1483 1484 static void 1485 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1486 { 1487 icl_wa_init_mcr(gt, wal); 1488 1489 /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */ 1490 wa_14011060649(gt, wal); 1491 1492 /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */ 1493 wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE); 1494 1495 /* 1496 * Wa_14015795083 1497 * 1498 * Firmware on some gen12 platforms locks the MISCCPCTL register, 1499 * preventing i915 from modifying it for this workaround. Skip the 1500 * readback verification for this workaround on debug builds; if the 1501 * workaround doesn't stick due to firmware behavior, it's not an error 1502 * that we want CI to flag. 1503 */ 1504 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE, 1505 0, 0, false); 1506 } 1507 1508 static void 1509 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1510 { 1511 gen12_gt_workarounds_init(gt, wal); 1512 1513 /* Wa_1409420604:dg1 */ 1514 wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2, 1515 CPSSUNIT_CLKGATE_DIS); 1516 1517 /* Wa_1408615072:dg1 */ 1518 /* Empirical testing shows this register is unaffected by engine reset. */ 1519 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); 1520 } 1521 1522 static void 1523 xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1524 { 1525 struct drm_i915_private *i915 = gt->i915; 1526 1527 xehp_init_mcr(gt, wal); 1528 1529 /* Wa_1409757795:xehpsdv */ 1530 wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB); 1531 1532 /* Wa_18011725039:xehpsdv */ 1533 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) { 1534 wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER); 1535 wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH); 1536 } 1537 1538 /* Wa_16011155590:xehpsdv */ 1539 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) 1540 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 1541 TSGUNIT_CLKGATE_DIS); 1542 1543 /* Wa_14011780169:xehpsdv */ 1544 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) { 1545 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | 1546 GAMTLBVDBOX7_CLKGATE_DIS | 1547 GAMTLBVDBOX6_CLKGATE_DIS | 1548 GAMTLBVDBOX5_CLKGATE_DIS | 1549 GAMTLBVDBOX4_CLKGATE_DIS | 1550 GAMTLBVDBOX3_CLKGATE_DIS | 1551 GAMTLBVDBOX2_CLKGATE_DIS | 1552 GAMTLBVDBOX1_CLKGATE_DIS | 1553 GAMTLBVDBOX0_CLKGATE_DIS | 1554 GAMTLBKCR_CLKGATE_DIS | 1555 GAMTLBGUC_CLKGATE_DIS | 1556 GAMTLBBLT_CLKGATE_DIS); 1557 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | 1558 GAMTLBGFXA1_CLKGATE_DIS | 1559 GAMTLBCOMPA0_CLKGATE_DIS | 1560 GAMTLBCOMPA1_CLKGATE_DIS | 1561 GAMTLBCOMPB0_CLKGATE_DIS | 1562 GAMTLBCOMPB1_CLKGATE_DIS | 1563 GAMTLBCOMPC0_CLKGATE_DIS | 1564 GAMTLBCOMPC1_CLKGATE_DIS | 1565 GAMTLBCOMPD0_CLKGATE_DIS | 1566 GAMTLBCOMPD1_CLKGATE_DIS | 1567 GAMTLBMERT_CLKGATE_DIS | 1568 GAMTLBVEBOX3_CLKGATE_DIS | 1569 GAMTLBVEBOX2_CLKGATE_DIS | 1570 GAMTLBVEBOX1_CLKGATE_DIS | 1571 GAMTLBVEBOX0_CLKGATE_DIS); 1572 } 1573 1574 /* Wa_16012725990:xehpsdv */ 1575 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER)) 1576 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS); 1577 1578 /* Wa_14011060649:xehpsdv */ 1579 wa_14011060649(gt, wal); 1580 1581 /* Wa_14012362059:xehpsdv */ 1582 wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB); 1583 1584 /* Wa_14014368820:xehpsdv */ 1585 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL, 1586 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE); 1587 1588 /* Wa_14010670810:xehpsdv */ 1589 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); 1590 } 1591 1592 static void 1593 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1594 { 1595 xehp_init_mcr(gt, wal); 1596 1597 /* Wa_14011060649:dg2 */ 1598 wa_14011060649(gt, wal); 1599 1600 if (IS_DG2_G10(gt->i915)) { 1601 /* Wa_22010523718:dg2 */ 1602 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, 1603 CG3DDISCFEG_CLKGATE_DIS); 1604 1605 /* Wa_14011006942:dg2 */ 1606 wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE, 1607 DSS_ROUTER_CLKGATE_DIS); 1608 } 1609 1610 /* Wa_14014830051:dg2 */ 1611 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN); 1612 1613 /* 1614 * Wa_14015795083 1615 * Skip verification for possibly locked register. 1616 */ 1617 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE, 1618 0, 0, false); 1619 1620 /* Wa_18018781329 */ 1621 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB); 1622 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); 1623 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB); 1624 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB); 1625 1626 /* Wa_1509235366:dg2 */ 1627 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL, 1628 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE); 1629 1630 /* Wa_14010648519:dg2 */ 1631 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); 1632 } 1633 1634 static void 1635 pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1636 { 1637 pvc_init_mcr(gt, wal); 1638 1639 /* Wa_14015795083 */ 1640 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE); 1641 1642 /* Wa_18018781329 */ 1643 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB); 1644 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); 1645 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB); 1646 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB); 1647 1648 /* Wa_16016694945 */ 1649 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC); 1650 } 1651 1652 static void 1653 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1654 { 1655 /* Wa_14018575942 / Wa_18018781329 */ 1656 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB); 1657 1658 /* Wa_22016670082 */ 1659 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE); 1660 1661 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 1662 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) { 1663 /* Wa_14014830051 */ 1664 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN); 1665 1666 /* Wa_14015795083 */ 1667 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE); 1668 } 1669 1670 /* 1671 * Unlike older platforms, we no longer setup implicit steering here; 1672 * all MCR accesses are explicitly steered. 1673 */ 1674 debug_dump_steering(gt); 1675 } 1676 1677 static void 1678 wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal) 1679 { 1680 struct intel_engine_cs *engine; 1681 int id; 1682 1683 for_each_engine(engine, gt, id) 1684 if (engine->class == VIDEO_DECODE_CLASS) 1685 wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base), 1686 MFXPIPE_CLKGATE_DIS); 1687 } 1688 1689 static void 1690 xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) 1691 { 1692 wa_16021867713(gt, wal); 1693 1694 /* 1695 * Wa_14018778641 1696 * Wa_18018781329 1697 * 1698 * Note that although these registers are MCR on the primary 1699 * GT, the media GT's versions are regular singleton registers. 1700 */ 1701 wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB); 1702 1703 /* Wa_22016670082 */ 1704 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE); 1705 1706 debug_dump_steering(gt); 1707 } 1708 1709 /* 1710 * The bspec performance guide has recommended MMIO tuning settings. These 1711 * aren't truly "workarounds" but we want to program them through the 1712 * workaround infrastructure to make sure they're (re)applied at the proper 1713 * times. 1714 * 1715 * The programming in this function is for settings that persist through 1716 * engine resets and also are not part of any engine's register state context. 1717 * I.e., settings that only need to be re-applied in the event of a full GT 1718 * reset. 1719 */ 1720 static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal) 1721 { 1722 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) { 1723 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); 1724 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS); 1725 } 1726 1727 if (IS_PONTEVECCHIO(gt->i915)) { 1728 wa_mcr_write(wal, XEHPC_L3SCRUB, 1729 SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK); 1730 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN); 1731 } 1732 1733 if (IS_DG2(gt->i915)) { 1734 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); 1735 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS); 1736 } 1737 } 1738 1739 static void 1740 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) 1741 { 1742 struct drm_i915_private *i915 = gt->i915; 1743 1744 gt_tuning_settings(gt, wal); 1745 1746 if (gt->type == GT_MEDIA) { 1747 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) 1748 xelpmp_gt_workarounds_init(gt, wal); 1749 else 1750 MISSING_CASE(MEDIA_VER_FULL(i915)); 1751 1752 return; 1753 } 1754 1755 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) 1756 xelpg_gt_workarounds_init(gt, wal); 1757 else if (IS_PONTEVECCHIO(i915)) 1758 pvc_gt_workarounds_init(gt, wal); 1759 else if (IS_DG2(i915)) 1760 dg2_gt_workarounds_init(gt, wal); 1761 else if (IS_XEHPSDV(i915)) 1762 xehpsdv_gt_workarounds_init(gt, wal); 1763 else if (IS_DG1(i915)) 1764 dg1_gt_workarounds_init(gt, wal); 1765 else if (GRAPHICS_VER(i915) == 12) 1766 gen12_gt_workarounds_init(gt, wal); 1767 else if (GRAPHICS_VER(i915) == 11) 1768 icl_gt_workarounds_init(gt, wal); 1769 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) 1770 cfl_gt_workarounds_init(gt, wal); 1771 else if (IS_GEMINILAKE(i915)) 1772 glk_gt_workarounds_init(gt, wal); 1773 else if (IS_KABYLAKE(i915)) 1774 kbl_gt_workarounds_init(gt, wal); 1775 else if (IS_BROXTON(i915)) 1776 gen9_gt_workarounds_init(gt, wal); 1777 else if (IS_SKYLAKE(i915)) 1778 skl_gt_workarounds_init(gt, wal); 1779 else if (IS_HASWELL(i915)) 1780 hsw_gt_workarounds_init(gt, wal); 1781 else if (IS_VALLEYVIEW(i915)) 1782 vlv_gt_workarounds_init(gt, wal); 1783 else if (IS_IVYBRIDGE(i915)) 1784 ivb_gt_workarounds_init(gt, wal); 1785 else if (GRAPHICS_VER(i915) == 6) 1786 snb_gt_workarounds_init(gt, wal); 1787 else if (GRAPHICS_VER(i915) == 5) 1788 ilk_gt_workarounds_init(gt, wal); 1789 else if (IS_G4X(i915)) 1790 g4x_gt_workarounds_init(gt, wal); 1791 else if (GRAPHICS_VER(i915) == 4) 1792 gen4_gt_workarounds_init(gt, wal); 1793 else if (GRAPHICS_VER(i915) <= 8) 1794 ; 1795 else 1796 MISSING_CASE(GRAPHICS_VER(i915)); 1797 } 1798 1799 void intel_gt_init_workarounds(struct intel_gt *gt) 1800 { 1801 struct i915_wa_list *wal = >->wa_list; 1802 1803 wa_init_start(wal, gt, "GT", "global"); 1804 gt_init_workarounds(gt, wal); 1805 wa_init_finish(wal); 1806 } 1807 1808 static bool 1809 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur, 1810 const char *name, const char *from) 1811 { 1812 if ((cur ^ wa->set) & wa->read) { 1813 gt_err(gt, 1814 "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n", 1815 name, from, i915_mmio_reg_offset(wa->reg), 1816 cur, cur & wa->read, wa->set & wa->read); 1817 1818 return false; 1819 } 1820 1821 return true; 1822 } 1823 1824 static void wa_list_apply(const struct i915_wa_list *wal) 1825 { 1826 struct intel_gt *gt = wal->gt; 1827 struct intel_uncore *uncore = gt->uncore; 1828 enum forcewake_domains fw; 1829 unsigned long flags; 1830 struct i915_wa *wa; 1831 unsigned int i; 1832 1833 if (!wal->count) 1834 return; 1835 1836 fw = wal_get_fw_for_rmw(uncore, wal); 1837 1838 intel_gt_mcr_lock(gt, &flags); 1839 spin_lock(&uncore->lock); 1840 intel_uncore_forcewake_get__locked(uncore, fw); 1841 1842 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 1843 u32 val, old = 0; 1844 1845 /* open-coded rmw due to steering */ 1846 if (wa->clr) 1847 old = wa->is_mcr ? 1848 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) : 1849 intel_uncore_read_fw(uncore, wa->reg); 1850 val = (old & ~wa->clr) | wa->set; 1851 if (val != old || !wa->clr) { 1852 if (wa->is_mcr) 1853 intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val); 1854 else 1855 intel_uncore_write_fw(uncore, wa->reg, val); 1856 } 1857 1858 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) { 1859 u32 val = wa->is_mcr ? 1860 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) : 1861 intel_uncore_read_fw(uncore, wa->reg); 1862 1863 wa_verify(gt, wa, val, wal->name, "application"); 1864 } 1865 } 1866 1867 intel_uncore_forcewake_put__locked(uncore, fw); 1868 spin_unlock(&uncore->lock); 1869 intel_gt_mcr_unlock(gt, flags); 1870 } 1871 1872 void intel_gt_apply_workarounds(struct intel_gt *gt) 1873 { 1874 wa_list_apply(>->wa_list); 1875 } 1876 1877 static bool wa_list_verify(struct intel_gt *gt, 1878 const struct i915_wa_list *wal, 1879 const char *from) 1880 { 1881 struct intel_uncore *uncore = gt->uncore; 1882 struct i915_wa *wa; 1883 enum forcewake_domains fw; 1884 unsigned long flags; 1885 unsigned int i; 1886 bool ok = true; 1887 1888 fw = wal_get_fw_for_rmw(uncore, wal); 1889 1890 intel_gt_mcr_lock(gt, &flags); 1891 spin_lock(&uncore->lock); 1892 intel_uncore_forcewake_get__locked(uncore, fw); 1893 1894 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) 1895 ok &= wa_verify(wal->gt, wa, wa->is_mcr ? 1896 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) : 1897 intel_uncore_read_fw(uncore, wa->reg), 1898 wal->name, from); 1899 1900 intel_uncore_forcewake_put__locked(uncore, fw); 1901 spin_unlock(&uncore->lock); 1902 intel_gt_mcr_unlock(gt, flags); 1903 1904 return ok; 1905 } 1906 1907 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) 1908 { 1909 return wa_list_verify(gt, >->wa_list, from); 1910 } 1911 1912 __maybe_unused 1913 static bool is_nonpriv_flags_valid(u32 flags) 1914 { 1915 /* Check only valid flag bits are set */ 1916 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID) 1917 return false; 1918 1919 /* NB: Only 3 out of 4 enum values are valid for access field */ 1920 if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) == 1921 RING_FORCE_TO_NONPRIV_ACCESS_INVALID) 1922 return false; 1923 1924 return true; 1925 } 1926 1927 static void 1928 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags) 1929 { 1930 struct i915_wa wa = { 1931 .reg = reg 1932 }; 1933 1934 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) 1935 return; 1936 1937 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags))) 1938 return; 1939 1940 wa.reg.reg |= flags; 1941 _wa_add(wal, &wa); 1942 } 1943 1944 static void 1945 whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags) 1946 { 1947 struct i915_wa wa = { 1948 .mcr_reg = reg, 1949 .is_mcr = 1, 1950 }; 1951 1952 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS)) 1953 return; 1954 1955 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags))) 1956 return; 1957 1958 wa.mcr_reg.reg |= flags; 1959 _wa_add(wal, &wa); 1960 } 1961 1962 static void 1963 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg) 1964 { 1965 whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW); 1966 } 1967 1968 static void 1969 whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg) 1970 { 1971 whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW); 1972 } 1973 1974 static void gen9_whitelist_build(struct i915_wa_list *w) 1975 { 1976 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */ 1977 whitelist_reg(w, GEN9_CTX_PREEMPT_REG); 1978 1979 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */ 1980 whitelist_reg(w, GEN8_CS_CHICKEN1); 1981 1982 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */ 1983 whitelist_reg(w, GEN8_HDC_CHICKEN1); 1984 1985 /* WaSendPushConstantsFromMMIO:skl,bxt */ 1986 whitelist_reg(w, COMMON_SLICE_CHICKEN2); 1987 } 1988 1989 static void skl_whitelist_build(struct intel_engine_cs *engine) 1990 { 1991 struct i915_wa_list *w = &engine->whitelist; 1992 1993 if (engine->class != RENDER_CLASS) 1994 return; 1995 1996 gen9_whitelist_build(w); 1997 1998 /* WaDisableLSQCROPERFforOCL:skl */ 1999 whitelist_mcr_reg(w, GEN8_L3SQCREG4); 2000 } 2001 2002 static void bxt_whitelist_build(struct intel_engine_cs *engine) 2003 { 2004 if (engine->class != RENDER_CLASS) 2005 return; 2006 2007 gen9_whitelist_build(&engine->whitelist); 2008 } 2009 2010 static void kbl_whitelist_build(struct intel_engine_cs *engine) 2011 { 2012 struct i915_wa_list *w = &engine->whitelist; 2013 2014 if (engine->class != RENDER_CLASS) 2015 return; 2016 2017 gen9_whitelist_build(w); 2018 2019 /* WaDisableLSQCROPERFforOCL:kbl */ 2020 whitelist_mcr_reg(w, GEN8_L3SQCREG4); 2021 } 2022 2023 static void glk_whitelist_build(struct intel_engine_cs *engine) 2024 { 2025 struct i915_wa_list *w = &engine->whitelist; 2026 2027 if (engine->class != RENDER_CLASS) 2028 return; 2029 2030 gen9_whitelist_build(w); 2031 2032 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */ 2033 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); 2034 } 2035 2036 static void cfl_whitelist_build(struct intel_engine_cs *engine) 2037 { 2038 struct i915_wa_list *w = &engine->whitelist; 2039 2040 if (engine->class != RENDER_CLASS) 2041 return; 2042 2043 gen9_whitelist_build(w); 2044 2045 /* 2046 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml 2047 * 2048 * This covers 4 register which are next to one another : 2049 * - PS_INVOCATION_COUNT 2050 * - PS_INVOCATION_COUNT_UDW 2051 * - PS_DEPTH_COUNT 2052 * - PS_DEPTH_COUNT_UDW 2053 */ 2054 whitelist_reg_ext(w, PS_INVOCATION_COUNT, 2055 RING_FORCE_TO_NONPRIV_ACCESS_RD | 2056 RING_FORCE_TO_NONPRIV_RANGE_4); 2057 } 2058 2059 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine) 2060 { 2061 struct i915_wa_list *w = &engine->whitelist; 2062 2063 if (engine->class != RENDER_CLASS) 2064 whitelist_reg_ext(w, 2065 RING_CTX_TIMESTAMP(engine->mmio_base), 2066 RING_FORCE_TO_NONPRIV_ACCESS_RD); 2067 } 2068 2069 static void cml_whitelist_build(struct intel_engine_cs *engine) 2070 { 2071 allow_read_ctx_timestamp(engine); 2072 2073 cfl_whitelist_build(engine); 2074 } 2075 2076 static void icl_whitelist_build(struct intel_engine_cs *engine) 2077 { 2078 struct i915_wa_list *w = &engine->whitelist; 2079 2080 allow_read_ctx_timestamp(engine); 2081 2082 switch (engine->class) { 2083 case RENDER_CLASS: 2084 /* WaAllowUMDToModifyHalfSliceChicken7:icl */ 2085 whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7); 2086 2087 /* WaAllowUMDToModifySamplerMode:icl */ 2088 whitelist_mcr_reg(w, GEN10_SAMPLER_MODE); 2089 2090 /* WaEnableStateCacheRedirectToCS:icl */ 2091 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1); 2092 2093 /* 2094 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl 2095 * 2096 * This covers 4 register which are next to one another : 2097 * - PS_INVOCATION_COUNT 2098 * - PS_INVOCATION_COUNT_UDW 2099 * - PS_DEPTH_COUNT 2100 * - PS_DEPTH_COUNT_UDW 2101 */ 2102 whitelist_reg_ext(w, PS_INVOCATION_COUNT, 2103 RING_FORCE_TO_NONPRIV_ACCESS_RD | 2104 RING_FORCE_TO_NONPRIV_RANGE_4); 2105 break; 2106 2107 case VIDEO_DECODE_CLASS: 2108 /* hucStatusRegOffset */ 2109 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base), 2110 RING_FORCE_TO_NONPRIV_ACCESS_RD); 2111 /* hucUKernelHdrInfoRegOffset */ 2112 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base), 2113 RING_FORCE_TO_NONPRIV_ACCESS_RD); 2114 /* hucStatus2RegOffset */ 2115 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), 2116 RING_FORCE_TO_NONPRIV_ACCESS_RD); 2117 break; 2118 2119 default: 2120 break; 2121 } 2122 } 2123 2124 static void tgl_whitelist_build(struct intel_engine_cs *engine) 2125 { 2126 struct i915_wa_list *w = &engine->whitelist; 2127 2128 allow_read_ctx_timestamp(engine); 2129 2130 switch (engine->class) { 2131 case RENDER_CLASS: 2132 /* 2133 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl 2134 * Wa_1408556865:tgl 2135 * 2136 * This covers 4 registers which are next to one another : 2137 * - PS_INVOCATION_COUNT 2138 * - PS_INVOCATION_COUNT_UDW 2139 * - PS_DEPTH_COUNT 2140 * - PS_DEPTH_COUNT_UDW 2141 */ 2142 whitelist_reg_ext(w, PS_INVOCATION_COUNT, 2143 RING_FORCE_TO_NONPRIV_ACCESS_RD | 2144 RING_FORCE_TO_NONPRIV_RANGE_4); 2145 2146 /* 2147 * Wa_1808121037:tgl 2148 * Wa_14012131227:dg1 2149 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p 2150 */ 2151 whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1); 2152 2153 /* Wa_1806527549:tgl */ 2154 whitelist_reg(w, HIZ_CHICKEN); 2155 2156 /* Required by recommended tuning setting (not a workaround) */ 2157 whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3); 2158 2159 break; 2160 default: 2161 break; 2162 } 2163 } 2164 2165 static void dg2_whitelist_build(struct intel_engine_cs *engine) 2166 { 2167 struct i915_wa_list *w = &engine->whitelist; 2168 2169 switch (engine->class) { 2170 case RENDER_CLASS: 2171 /* Required by recommended tuning setting (not a workaround) */ 2172 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3); 2173 2174 break; 2175 default: 2176 break; 2177 } 2178 } 2179 2180 static void blacklist_trtt(struct intel_engine_cs *engine) 2181 { 2182 struct i915_wa_list *w = &engine->whitelist; 2183 2184 /* 2185 * Prevent read/write access to [0x4400, 0x4600) which covers 2186 * the TRTT range across all engines. Note that normally userspace 2187 * cannot access the other engines' trtt control, but for simplicity 2188 * we cover the entire range on each engine. 2189 */ 2190 whitelist_reg_ext(w, _MMIO(0x4400), 2191 RING_FORCE_TO_NONPRIV_DENY | 2192 RING_FORCE_TO_NONPRIV_RANGE_64); 2193 whitelist_reg_ext(w, _MMIO(0x4500), 2194 RING_FORCE_TO_NONPRIV_DENY | 2195 RING_FORCE_TO_NONPRIV_RANGE_64); 2196 } 2197 2198 static void pvc_whitelist_build(struct intel_engine_cs *engine) 2199 { 2200 /* Wa_16014440446:pvc */ 2201 blacklist_trtt(engine); 2202 } 2203 2204 static void xelpg_whitelist_build(struct intel_engine_cs *engine) 2205 { 2206 struct i915_wa_list *w = &engine->whitelist; 2207 2208 switch (engine->class) { 2209 case RENDER_CLASS: 2210 /* Required by recommended tuning setting (not a workaround) */ 2211 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3); 2212 2213 break; 2214 default: 2215 break; 2216 } 2217 } 2218 2219 void intel_engine_init_whitelist(struct intel_engine_cs *engine) 2220 { 2221 struct drm_i915_private *i915 = engine->i915; 2222 struct i915_wa_list *w = &engine->whitelist; 2223 2224 wa_init_start(w, engine->gt, "whitelist", engine->name); 2225 2226 if (engine->gt->type == GT_MEDIA) 2227 ; /* none yet */ 2228 else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74))) 2229 xelpg_whitelist_build(engine); 2230 else if (IS_PONTEVECCHIO(i915)) 2231 pvc_whitelist_build(engine); 2232 else if (IS_DG2(i915)) 2233 dg2_whitelist_build(engine); 2234 else if (IS_XEHPSDV(i915)) 2235 ; /* none needed */ 2236 else if (GRAPHICS_VER(i915) == 12) 2237 tgl_whitelist_build(engine); 2238 else if (GRAPHICS_VER(i915) == 11) 2239 icl_whitelist_build(engine); 2240 else if (IS_COMETLAKE(i915)) 2241 cml_whitelist_build(engine); 2242 else if (IS_COFFEELAKE(i915)) 2243 cfl_whitelist_build(engine); 2244 else if (IS_GEMINILAKE(i915)) 2245 glk_whitelist_build(engine); 2246 else if (IS_KABYLAKE(i915)) 2247 kbl_whitelist_build(engine); 2248 else if (IS_BROXTON(i915)) 2249 bxt_whitelist_build(engine); 2250 else if (IS_SKYLAKE(i915)) 2251 skl_whitelist_build(engine); 2252 else if (GRAPHICS_VER(i915) <= 8) 2253 ; 2254 else 2255 MISSING_CASE(GRAPHICS_VER(i915)); 2256 2257 wa_init_finish(w); 2258 } 2259 2260 void intel_engine_apply_whitelist(struct intel_engine_cs *engine) 2261 { 2262 const struct i915_wa_list *wal = &engine->whitelist; 2263 struct intel_uncore *uncore = engine->uncore; 2264 const u32 base = engine->mmio_base; 2265 struct i915_wa *wa; 2266 unsigned int i; 2267 2268 if (!wal->count) 2269 return; 2270 2271 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) 2272 intel_uncore_write(uncore, 2273 RING_FORCE_TO_NONPRIV(base, i), 2274 i915_mmio_reg_offset(wa->reg)); 2275 2276 /* And clear the rest just in case of garbage */ 2277 for (; i < RING_MAX_NONPRIV_SLOTS; i++) 2278 intel_uncore_write(uncore, 2279 RING_FORCE_TO_NONPRIV(base, i), 2280 i915_mmio_reg_offset(RING_NOPID(base))); 2281 } 2282 2283 /* 2284 * engine_fake_wa_init(), a place holder to program the registers 2285 * which are not part of an official workaround defined by the 2286 * hardware team. 2287 * Adding programming of those register inside workaround will 2288 * allow utilizing wa framework to proper application and verification. 2289 */ 2290 static void 2291 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2292 { 2293 u8 mocs_w, mocs_r; 2294 2295 /* 2296 * RING_CMD_CCTL specifies the default MOCS entry that will be used 2297 * by the command streamer when executing commands that don't have 2298 * a way to explicitly specify a MOCS setting. The default should 2299 * usually reference whichever MOCS entry corresponds to uncached 2300 * behavior, although use of a WB cached entry is recommended by the 2301 * spec in certain circumstances on specific platforms. 2302 */ 2303 if (GRAPHICS_VER(engine->i915) >= 12) { 2304 mocs_r = engine->gt->mocs.uc_index; 2305 mocs_w = engine->gt->mocs.uc_index; 2306 2307 if (HAS_L3_CCS_READ(engine->i915) && 2308 engine->class == COMPUTE_CLASS) { 2309 mocs_r = engine->gt->mocs.wb_index; 2310 2311 /* 2312 * Even on the few platforms where MOCS 0 is a 2313 * legitimate table entry, it's never the correct 2314 * setting to use here; we can assume the MOCS init 2315 * just forgot to initialize wb_index. 2316 */ 2317 drm_WARN_ON(&engine->i915->drm, mocs_r == 0); 2318 } 2319 2320 wa_masked_field_set(wal, 2321 RING_CMD_CCTL(engine->mmio_base), 2322 CMD_CCTL_MOCS_MASK, 2323 CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r)); 2324 } 2325 } 2326 2327 static void 2328 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2329 { 2330 struct drm_i915_private *i915 = engine->i915; 2331 struct intel_gt *gt = engine->gt; 2332 2333 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 2334 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) { 2335 /* Wa_22014600077 */ 2336 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, 2337 ENABLE_EU_COUNT_FOR_TDL_FLUSH); 2338 } 2339 2340 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 2341 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) || 2342 IS_DG2(i915)) { 2343 /* Wa_1509727124 */ 2344 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE, 2345 SC_DISABLE_POWER_OPTIMIZATION_EBB); 2346 } 2347 2348 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 2349 IS_DG2(i915)) { 2350 /* Wa_22012856258 */ 2351 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, 2352 GEN12_DISABLE_READ_SUPPRESSION); 2353 } 2354 2355 if (IS_DG2(i915)) { 2356 /* 2357 * Wa_22010960976:dg2 2358 * Wa_14013347512:dg2 2359 */ 2360 wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0, 2361 LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK); 2362 } 2363 2364 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || 2365 IS_DG2(i915)) { 2366 /* Wa_14015150844 */ 2367 wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0, 2368 _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES), 2369 0, true); 2370 } 2371 2372 if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || 2373 IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { 2374 /* 2375 * Wa_1606700617:tgl,dg1,adl-p 2376 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p 2377 * Wa_14010826681:tgl,dg1,rkl,adl-p 2378 * Wa_18019627453:dg2 2379 */ 2380 wa_masked_en(wal, 2381 GEN9_CS_DEBUG_MODE1, 2382 FF_DOP_CLOCK_GATE_DISABLE); 2383 } 2384 2385 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) || 2386 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { 2387 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */ 2388 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ); 2389 2390 /* 2391 * Wa_1407928979:tgl A* 2392 * Wa_18011464164:tgl[B0+],dg1[B0+] 2393 * Wa_22010931296:tgl[B0+],dg1[B0+] 2394 * Wa_14010919138:rkl,dg1,adl-s,adl-p 2395 */ 2396 wa_write_or(wal, GEN7_FF_THREAD_MODE, 2397 GEN12_FF_TESSELATION_DOP_GATE_DISABLE); 2398 2399 /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */ 2400 wa_mcr_masked_en(wal, 2401 GEN10_SAMPLER_MODE, 2402 ENABLE_SMALLPL); 2403 } 2404 2405 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || 2406 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { 2407 /* Wa_1409804808 */ 2408 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, 2409 GEN12_PUSH_CONST_DEREF_HOLD_DIS); 2410 2411 /* Wa_14010229206 */ 2412 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); 2413 } 2414 2415 if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) { 2416 /* 2417 * Wa_1607297627 2418 * 2419 * On TGL and RKL there are multiple entries for this WA in the 2420 * BSpec; some indicate this is an A0-only WA, others indicate 2421 * it applies to all steppings so we trust the "all steppings." 2422 */ 2423 wa_masked_en(wal, 2424 RING_PSMI_CTL(RENDER_RING_BASE), 2425 GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | 2426 GEN8_RC_SEMA_IDLE_MSG_DISABLE); 2427 } 2428 2429 if (GRAPHICS_VER(i915) == 11) { 2430 /* This is not an Wa. Enable for better image quality */ 2431 wa_masked_en(wal, 2432 _3D_CHICKEN3, 2433 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE); 2434 2435 /* 2436 * Wa_1405543622:icl 2437 * Formerly known as WaGAPZPriorityScheme 2438 */ 2439 wa_write_or(wal, 2440 GEN8_GARBCNTL, 2441 GEN11_ARBITRATION_PRIO_ORDER_MASK); 2442 2443 /* 2444 * Wa_1604223664:icl 2445 * Formerly known as WaL3BankAddressHashing 2446 */ 2447 wa_write_clr_set(wal, 2448 GEN8_GARBCNTL, 2449 GEN11_HASH_CTRL_EXCL_MASK, 2450 GEN11_HASH_CTRL_EXCL_BIT0); 2451 wa_write_clr_set(wal, 2452 GEN11_GLBLINVL, 2453 GEN11_BANK_HASH_ADDR_EXCL_MASK, 2454 GEN11_BANK_HASH_ADDR_EXCL_BIT0); 2455 2456 /* 2457 * Wa_1405733216:icl 2458 * Formerly known as WaDisableCleanEvicts 2459 */ 2460 wa_mcr_write_or(wal, 2461 GEN8_L3SQCREG4, 2462 GEN11_LQSC_CLEAN_EVICT_DISABLE); 2463 2464 /* Wa_1606682166:icl */ 2465 wa_write_or(wal, 2466 GEN7_SARCHKMD, 2467 GEN7_DISABLE_SAMPLER_PREFETCH); 2468 2469 /* Wa_1409178092:icl */ 2470 wa_mcr_write_clr_set(wal, 2471 GEN11_SCRATCH2, 2472 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE, 2473 0); 2474 2475 /* WaEnable32PlaneMode:icl */ 2476 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS, 2477 GEN11_ENABLE_32_PLANE_MODE); 2478 2479 /* 2480 * Wa_1408767742:icl[a2..forever],ehl[all] 2481 * Wa_1605460711:icl[a0..c0] 2482 */ 2483 wa_write_or(wal, 2484 GEN7_FF_THREAD_MODE, 2485 GEN12_FF_TESSELATION_DOP_GATE_DISABLE); 2486 2487 /* Wa_22010271021 */ 2488 wa_masked_en(wal, 2489 GEN9_CS_DEBUG_MODE1, 2490 FF_DOP_CLOCK_GATE_DISABLE); 2491 } 2492 2493 /* 2494 * Intel platforms that support fine-grained preemption (i.e., gen9 and 2495 * beyond) allow the kernel-mode driver to choose between two different 2496 * options for controlling preemption granularity and behavior. 2497 * 2498 * Option 1 (hardware default): 2499 * Preemption settings are controlled in a global manner via 2500 * kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity 2501 * and settings chosen by the kernel-mode driver will apply to all 2502 * userspace clients. 2503 * 2504 * Option 2: 2505 * Preemption settings are controlled on a per-context basis via 2506 * register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on 2507 * context switch and is writable by userspace (e.g., via 2508 * MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer) 2509 * which allows different userspace drivers/clients to select 2510 * different settings, or to change those settings on the fly in 2511 * response to runtime needs. This option was known by name 2512 * "FtrPerCtxtPreemptionGranularityControl" at one time, although 2513 * that name is somewhat misleading as other non-granularity 2514 * preemption settings are also impacted by this decision. 2515 * 2516 * On Linux, our policy has always been to let userspace drivers 2517 * control preemption granularity/settings (Option 2). This was 2518 * originally mandatory on gen9 to prevent ABI breakage (old gen9 2519 * userspace developed before object-level preemption was enabled would 2520 * not behave well if i915 were to go with Option 1 and enable that 2521 * preemption in a global manner). On gen9 each context would have 2522 * object-level preemption disabled by default (see 2523 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but 2524 * userspace drivers could opt-in to object-level preemption as they 2525 * saw fit. For post-gen9 platforms, we continue to utilize Option 2; 2526 * even though it is no longer necessary for ABI compatibility when 2527 * enabling a new platform, it does ensure that userspace will be able 2528 * to implement any workarounds that show up requiring temporary 2529 * adjustments to preemption behavior at runtime. 2530 * 2531 * Notes/Workarounds: 2532 * - Wa_14015141709: On DG2 and early steppings of MTL, 2533 * CS_CHICKEN1[0] does not disable object-level preemption as 2534 * it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been 2535 * using Option 1). Effectively this means userspace is unable 2536 * to disable object-level preemption on these platforms/steppings 2537 * despite the setting here. 2538 * 2539 * - Wa_16013994831: May require that userspace program 2540 * CS_CHICKEN1[10] when certain runtime conditions are true. 2541 * Userspace requires Option 2 to be in effect for their update of 2542 * CS_CHICKEN1[10] to be effective. 2543 * 2544 * Other workarounds may appear in the future that will also require 2545 * Option 2 behavior to allow proper userspace implementation. 2546 */ 2547 if (GRAPHICS_VER(i915) >= 9) 2548 wa_masked_en(wal, 2549 GEN7_FF_SLICE_CS_CHICKEN1, 2550 GEN9_FFSC_PERCTX_PREEMPT_CTRL); 2551 2552 if (IS_SKYLAKE(i915) || 2553 IS_KABYLAKE(i915) || 2554 IS_COFFEELAKE(i915) || 2555 IS_COMETLAKE(i915)) { 2556 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */ 2557 wa_write_or(wal, 2558 GEN8_GARBCNTL, 2559 GEN9_GAPS_TSV_CREDIT_DISABLE); 2560 } 2561 2562 if (IS_BROXTON(i915)) { 2563 /* WaDisablePooledEuLoadBalancingFix:bxt */ 2564 wa_masked_en(wal, 2565 FF_SLICE_CS_CHICKEN2, 2566 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE); 2567 } 2568 2569 if (GRAPHICS_VER(i915) == 9) { 2570 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */ 2571 wa_masked_en(wal, 2572 GEN9_CSFE_CHICKEN1_RCS, 2573 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE); 2574 2575 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */ 2576 wa_mcr_write_or(wal, 2577 BDW_SCRATCH1, 2578 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); 2579 2580 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ 2581 if (IS_GEN9_LP(i915)) 2582 wa_mcr_write_clr_set(wal, 2583 GEN8_L3SQCREG1, 2584 L3_PRIO_CREDITS_MASK, 2585 L3_GENERAL_PRIO_CREDITS(62) | 2586 L3_HIGH_PRIO_CREDITS(2)); 2587 2588 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ 2589 wa_mcr_write_or(wal, 2590 GEN8_L3SQCREG4, 2591 GEN8_LQSC_FLUSH_COHERENT_LINES); 2592 2593 /* Disable atomics in L3 to prevent unrecoverable hangs */ 2594 wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1, 2595 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0); 2596 wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4, 2597 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0); 2598 wa_mcr_write_clr_set(wal, GEN9_SCRATCH1, 2599 EVICTION_PERF_FIX_ENABLE, 0); 2600 } 2601 2602 if (IS_HASWELL(i915)) { 2603 /* WaSampleCChickenBitEnable:hsw */ 2604 wa_masked_en(wal, 2605 HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE); 2606 2607 wa_masked_dis(wal, 2608 CACHE_MODE_0_GEN7, 2609 /* enable HiZ Raw Stall Optimization */ 2610 HIZ_RAW_STALL_OPT_DISABLE); 2611 } 2612 2613 if (IS_VALLEYVIEW(i915)) { 2614 /* WaDisableEarlyCull:vlv */ 2615 wa_masked_en(wal, 2616 _3D_CHICKEN3, 2617 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); 2618 2619 /* 2620 * WaVSThreadDispatchOverride:ivb,vlv 2621 * 2622 * This actually overrides the dispatch 2623 * mode for all thread types. 2624 */ 2625 wa_write_clr_set(wal, 2626 GEN7_FF_THREAD_MODE, 2627 GEN7_FF_SCHED_MASK, 2628 GEN7_FF_TS_SCHED_HW | 2629 GEN7_FF_VS_SCHED_HW | 2630 GEN7_FF_DS_SCHED_HW); 2631 2632 /* WaPsdDispatchEnable:vlv */ 2633 /* WaDisablePSDDualDispatchEnable:vlv */ 2634 wa_masked_en(wal, 2635 GEN7_HALF_SLICE_CHICKEN1, 2636 GEN7_MAX_PS_THREAD_DEP | 2637 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); 2638 } 2639 2640 if (IS_IVYBRIDGE(i915)) { 2641 /* WaDisableEarlyCull:ivb */ 2642 wa_masked_en(wal, 2643 _3D_CHICKEN3, 2644 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); 2645 2646 if (0) { /* causes HiZ corruption on ivb:gt1 */ 2647 /* enable HiZ Raw Stall Optimization */ 2648 wa_masked_dis(wal, 2649 CACHE_MODE_0_GEN7, 2650 HIZ_RAW_STALL_OPT_DISABLE); 2651 } 2652 2653 /* 2654 * WaVSThreadDispatchOverride:ivb,vlv 2655 * 2656 * This actually overrides the dispatch 2657 * mode for all thread types. 2658 */ 2659 wa_write_clr_set(wal, 2660 GEN7_FF_THREAD_MODE, 2661 GEN7_FF_SCHED_MASK, 2662 GEN7_FF_TS_SCHED_HW | 2663 GEN7_FF_VS_SCHED_HW | 2664 GEN7_FF_DS_SCHED_HW); 2665 2666 /* WaDisablePSDDualDispatchEnable:ivb */ 2667 if (IS_IVB_GT1(i915)) 2668 wa_masked_en(wal, 2669 GEN7_HALF_SLICE_CHICKEN1, 2670 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); 2671 } 2672 2673 if (GRAPHICS_VER(i915) == 7) { 2674 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 2675 wa_masked_en(wal, 2676 RING_MODE_GEN7(RENDER_RING_BASE), 2677 GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE); 2678 2679 /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */ 2680 wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE); 2681 2682 /* 2683 * BSpec says this must be set, even though 2684 * WaDisable4x2SubspanOptimization:ivb,hsw 2685 * WaDisable4x2SubspanOptimization isn't listed for VLV. 2686 */ 2687 wa_masked_en(wal, 2688 CACHE_MODE_1, 2689 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); 2690 2691 /* 2692 * BSpec recommends 8x4 when MSAA is used, 2693 * however in practice 16x4 seems fastest. 2694 * 2695 * Note that PS/WM thread counts depend on the WIZ hashing 2696 * disable bit, which we don't touch here, but it's good 2697 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 2698 */ 2699 wa_masked_field_set(wal, 2700 GEN7_GT_MODE, 2701 GEN6_WIZ_HASHING_MASK, 2702 GEN6_WIZ_HASHING_16x4); 2703 } 2704 2705 if (IS_GRAPHICS_VER(i915, 6, 7)) 2706 /* 2707 * We need to disable the AsyncFlip performance optimisations in 2708 * order to use MI_WAIT_FOR_EVENT within the CS. It should 2709 * already be programmed to '1' on all products. 2710 * 2711 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 2712 */ 2713 wa_masked_en(wal, 2714 RING_MI_MODE(RENDER_RING_BASE), 2715 ASYNC_FLIP_PERF_DISABLE); 2716 2717 if (GRAPHICS_VER(i915) == 6) { 2718 /* 2719 * Required for the hardware to program scanline values for 2720 * waiting 2721 * WaEnableFlushTlbInvalidationMode:snb 2722 */ 2723 wa_masked_en(wal, 2724 GFX_MODE, 2725 GFX_TLB_INVALIDATE_EXPLICIT); 2726 2727 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ 2728 wa_masked_en(wal, 2729 _3D_CHICKEN, 2730 _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB); 2731 2732 wa_masked_en(wal, 2733 _3D_CHICKEN3, 2734 /* WaStripsFansDisableFastClipPerformanceFix:snb */ 2735 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL | 2736 /* 2737 * Bspec says: 2738 * "This bit must be set if 3DSTATE_CLIP clip mode is set 2739 * to normal and 3DSTATE_SF number of SF output attributes 2740 * is more than 16." 2741 */ 2742 _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH); 2743 2744 /* 2745 * BSpec recommends 8x4 when MSAA is used, 2746 * however in practice 16x4 seems fastest. 2747 * 2748 * Note that PS/WM thread counts depend on the WIZ hashing 2749 * disable bit, which we don't touch here, but it's good 2750 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 2751 */ 2752 wa_masked_field_set(wal, 2753 GEN6_GT_MODE, 2754 GEN6_WIZ_HASHING_MASK, 2755 GEN6_WIZ_HASHING_16x4); 2756 2757 /* WaDisable_RenderCache_OperationalFlush:snb */ 2758 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE); 2759 2760 /* 2761 * From the Sandybridge PRM, volume 1 part 3, page 24: 2762 * "If this bit is set, STCunit will have LRA as replacement 2763 * policy. [...] This bit must be reset. LRA replacement 2764 * policy is not supported." 2765 */ 2766 wa_masked_dis(wal, 2767 CACHE_MODE_0, 2768 CM0_STC_EVICT_DISABLE_LRA_SNB); 2769 } 2770 2771 if (IS_GRAPHICS_VER(i915, 4, 6)) 2772 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 2773 wa_add(wal, RING_MI_MODE(RENDER_RING_BASE), 2774 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH), 2775 /* XXX bit doesn't stick on Broadwater */ 2776 IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true); 2777 2778 if (GRAPHICS_VER(i915) == 4) 2779 /* 2780 * Disable CONSTANT_BUFFER before it is loaded from the context 2781 * image. For as it is loaded, it is executed and the stored 2782 * address may no longer be valid, leading to a GPU hang. 2783 * 2784 * This imposes the requirement that userspace reload their 2785 * CONSTANT_BUFFER on every batch, fortunately a requirement 2786 * they are already accustomed to from before contexts were 2787 * enabled. 2788 */ 2789 wa_add(wal, ECOSKPD(RENDER_RING_BASE), 2790 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE), 2791 0 /* XXX bit doesn't stick on Broadwater */, 2792 true); 2793 } 2794 2795 static void 2796 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2797 { 2798 struct drm_i915_private *i915 = engine->i915; 2799 2800 /* WaKBLVECSSemaphoreWaitPoll:kbl */ 2801 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) { 2802 wa_write(wal, 2803 RING_SEMA_WAIT_POLL(engine->mmio_base), 2804 1); 2805 } 2806 /* Wa_16018031267, Wa_16018063123 */ 2807 if (NEEDS_FASTCOLOR_BLT_WABB(engine)) 2808 wa_masked_field_set(wal, ECOSKPD(engine->mmio_base), 2809 XEHP_BLITTER_SCHEDULING_MODE_MASK, 2810 XEHP_BLITTER_ROUND_ROBIN_MODE); 2811 } 2812 2813 static void 2814 ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2815 { 2816 if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) { 2817 /* Wa_14014999345:pvc */ 2818 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC); 2819 } 2820 } 2821 2822 /* 2823 * The bspec performance guide has recommended MMIO tuning settings. These 2824 * aren't truly "workarounds" but we want to program them with the same 2825 * workaround infrastructure to ensure that they're automatically added to 2826 * the GuC save/restore lists, re-applied at the right times, and checked for 2827 * any conflicting programming requested by real workarounds. 2828 * 2829 * Programming settings should be added here only if their registers are not 2830 * part of an engine's register state context. If a register is part of a 2831 * context, then any tuning settings should be programmed in an appropriate 2832 * function invoked by __intel_engine_init_ctx_wa(). 2833 */ 2834 static void 2835 add_render_compute_tuning_settings(struct intel_gt *gt, 2836 struct i915_wa_list *wal) 2837 { 2838 struct drm_i915_private *i915 = gt->i915; 2839 2840 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915)) 2841 wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512); 2842 2843 /* 2844 * This tuning setting proves beneficial only on ATS-M designs; the 2845 * default "age based" setting is optimal on regular DG2 and other 2846 * platforms. 2847 */ 2848 if (INTEL_INFO(i915)->tuning_thread_rr_after_dep) 2849 wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE, 2850 THREAD_EX_ARB_MODE_RR_AFTER_DEP); 2851 2852 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) 2853 wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC); 2854 } 2855 2856 /* 2857 * The workarounds in this function apply to shared registers in 2858 * the general render reset domain that aren't tied to a 2859 * specific engine. Since all render+compute engines get reset 2860 * together, and the contents of these registers are lost during 2861 * the shared render domain reset, we'll define such workarounds 2862 * here and then add them to just a single RCS or CCS engine's 2863 * workaround list (whichever engine has the XXXX flag). 2864 */ 2865 static void 2866 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2867 { 2868 struct drm_i915_private *i915 = engine->i915; 2869 struct intel_gt *gt = engine->gt; 2870 2871 add_render_compute_tuning_settings(gt, wal); 2872 2873 if (GRAPHICS_VER(i915) >= 11) { 2874 /* This is not a Wa (although referred to as 2875 * WaSetInidrectStateOverride in places), this allows 2876 * applications that reference sampler states through 2877 * the BindlessSamplerStateBaseAddress to have their 2878 * border color relative to DynamicStateBaseAddress 2879 * rather than BindlessSamplerStateBaseAddress. 2880 * 2881 * Otherwise SAMPLER_STATE border colors have to be 2882 * copied in multiple heaps (DynamicStateBaseAddress & 2883 * BindlessSamplerStateBaseAddress) 2884 * 2885 * BSpec: 46052 2886 */ 2887 wa_mcr_masked_en(wal, 2888 GEN10_SAMPLER_MODE, 2889 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE); 2890 } 2891 2892 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) || 2893 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) || 2894 IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) 2895 /* Wa_14017856879 */ 2896 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH); 2897 2898 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 2899 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) 2900 /* 2901 * Wa_14017066071 2902 * Wa_14017654203 2903 */ 2904 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE, 2905 MTL_DISABLE_SAMPLER_SC_OOO); 2906 2907 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) 2908 /* Wa_22015279794 */ 2909 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, 2910 DISABLE_PREFETCH_INTO_IC); 2911 2912 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 2913 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) || 2914 IS_DG2(i915)) { 2915 /* Wa_22013037850 */ 2916 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, 2917 DISABLE_128B_EVICTION_COMMAND_UDW); 2918 2919 /* Wa_18017747507 */ 2920 wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE); 2921 } 2922 2923 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) || 2924 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) || 2925 IS_PONTEVECCHIO(i915) || 2926 IS_DG2(i915)) { 2927 /* Wa_22014226127 */ 2928 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE); 2929 } 2930 2931 if (IS_PONTEVECCHIO(i915) || IS_DG2(i915)) { 2932 /* Wa_14015227452:dg2,pvc */ 2933 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE); 2934 2935 /* Wa_16015675438:dg2,pvc */ 2936 wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE); 2937 } 2938 2939 if (IS_DG2(i915)) { 2940 /* 2941 * Wa_16011620976:dg2_g11 2942 * Wa_22015475538:dg2 2943 */ 2944 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8); 2945 2946 /* Wa_18028616096 */ 2947 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3); 2948 } 2949 2950 if (IS_DG2_G11(i915)) { 2951 /* 2952 * Wa_22012826095:dg2 2953 * Wa_22013059131:dg2 2954 */ 2955 wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW, 2956 MAXREQS_PER_BANK, 2957 REG_FIELD_PREP(MAXREQS_PER_BANK, 2)); 2958 2959 /* Wa_22013059131:dg2 */ 2960 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, 2961 FORCE_1_SUB_MESSAGE_PER_FRAGMENT); 2962 2963 /* 2964 * Wa_22012654132 2965 * 2966 * Note that register 0xE420 is write-only and cannot be read 2967 * back for verification on DG2 (due to Wa_14012342262), so 2968 * we need to explicitly skip the readback. 2969 */ 2970 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0, 2971 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC), 2972 0 /* write-only, so skip validation */, 2973 true); 2974 } 2975 2976 if (IS_XEHPSDV(i915)) { 2977 /* Wa_1409954639 */ 2978 wa_mcr_masked_en(wal, 2979 GEN8_ROW_CHICKEN, 2980 SYSTOLIC_DOP_CLOCK_GATING_DIS); 2981 2982 /* Wa_1607196519 */ 2983 wa_mcr_masked_en(wal, 2984 GEN9_ROW_CHICKEN4, 2985 GEN12_DISABLE_GRF_CLEAR); 2986 2987 /* Wa_14010449647:xehpsdv */ 2988 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1, 2989 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); 2990 } 2991 } 2992 2993 static void 2994 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal) 2995 { 2996 if (GRAPHICS_VER(engine->i915) < 4) 2997 return; 2998 2999 engine_fake_wa_init(engine, wal); 3000 3001 /* 3002 * These are common workarounds that just need to applied 3003 * to a single RCS/CCS engine's workaround list since 3004 * they're reset as part of the general render domain reset. 3005 */ 3006 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) 3007 general_render_compute_wa_init(engine, wal); 3008 3009 if (engine->class == COMPUTE_CLASS) 3010 ccs_engine_wa_init(engine, wal); 3011 else if (engine->class == RENDER_CLASS) 3012 rcs_engine_wa_init(engine, wal); 3013 else 3014 xcs_engine_wa_init(engine, wal); 3015 } 3016 3017 void intel_engine_init_workarounds(struct intel_engine_cs *engine) 3018 { 3019 struct i915_wa_list *wal = &engine->wa_list; 3020 3021 wa_init_start(wal, engine->gt, "engine", engine->name); 3022 engine_init_workarounds(engine, wal); 3023 wa_init_finish(wal); 3024 } 3025 3026 void intel_engine_apply_workarounds(struct intel_engine_cs *engine) 3027 { 3028 wa_list_apply(&engine->wa_list); 3029 } 3030 3031 static const struct i915_range mcr_ranges_gen8[] = { 3032 { .start = 0x5500, .end = 0x55ff }, 3033 { .start = 0x7000, .end = 0x7fff }, 3034 { .start = 0x9400, .end = 0x97ff }, 3035 { .start = 0xb000, .end = 0xb3ff }, 3036 { .start = 0xe000, .end = 0xe7ff }, 3037 {}, 3038 }; 3039 3040 static const struct i915_range mcr_ranges_gen12[] = { 3041 { .start = 0x8150, .end = 0x815f }, 3042 { .start = 0x9520, .end = 0x955f }, 3043 { .start = 0xb100, .end = 0xb3ff }, 3044 { .start = 0xde80, .end = 0xe8ff }, 3045 { .start = 0x24a00, .end = 0x24a7f }, 3046 {}, 3047 }; 3048 3049 static const struct i915_range mcr_ranges_xehp[] = { 3050 { .start = 0x4000, .end = 0x4aff }, 3051 { .start = 0x5200, .end = 0x52ff }, 3052 { .start = 0x5400, .end = 0x7fff }, 3053 { .start = 0x8140, .end = 0x815f }, 3054 { .start = 0x8c80, .end = 0x8dff }, 3055 { .start = 0x94d0, .end = 0x955f }, 3056 { .start = 0x9680, .end = 0x96ff }, 3057 { .start = 0xb000, .end = 0xb3ff }, 3058 { .start = 0xc800, .end = 0xcfff }, 3059 { .start = 0xd800, .end = 0xd8ff }, 3060 { .start = 0xdc00, .end = 0xffff }, 3061 { .start = 0x17000, .end = 0x17fff }, 3062 { .start = 0x24a00, .end = 0x24a7f }, 3063 {}, 3064 }; 3065 3066 static bool mcr_range(struct drm_i915_private *i915, u32 offset) 3067 { 3068 const struct i915_range *mcr_ranges; 3069 int i; 3070 3071 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) 3072 mcr_ranges = mcr_ranges_xehp; 3073 else if (GRAPHICS_VER(i915) >= 12) 3074 mcr_ranges = mcr_ranges_gen12; 3075 else if (GRAPHICS_VER(i915) >= 8) 3076 mcr_ranges = mcr_ranges_gen8; 3077 else 3078 return false; 3079 3080 /* 3081 * Registers in these ranges are affected by the MCR selector 3082 * which only controls CPU initiated MMIO. Routing does not 3083 * work for CS access so we cannot verify them on this path. 3084 */ 3085 for (i = 0; mcr_ranges[i].start; i++) 3086 if (offset >= mcr_ranges[i].start && 3087 offset <= mcr_ranges[i].end) 3088 return true; 3089 3090 return false; 3091 } 3092 3093 static int 3094 wa_list_srm(struct i915_request *rq, 3095 const struct i915_wa_list *wal, 3096 struct i915_vma *vma) 3097 { 3098 struct drm_i915_private *i915 = rq->i915; 3099 unsigned int i, count = 0; 3100 const struct i915_wa *wa; 3101 u32 srm, *cs; 3102 3103 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 3104 if (GRAPHICS_VER(i915) >= 8) 3105 srm++; 3106 3107 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 3108 if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg))) 3109 count++; 3110 } 3111 3112 cs = intel_ring_begin(rq, 4 * count); 3113 if (IS_ERR(cs)) 3114 return PTR_ERR(cs); 3115 3116 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 3117 u32 offset = i915_mmio_reg_offset(wa->reg); 3118 3119 if (mcr_range(i915, offset)) 3120 continue; 3121 3122 *cs++ = srm; 3123 *cs++ = offset; 3124 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i; 3125 *cs++ = 0; 3126 } 3127 intel_ring_advance(rq, cs); 3128 3129 return 0; 3130 } 3131 3132 static int engine_wa_list_verify(struct intel_context *ce, 3133 const struct i915_wa_list * const wal, 3134 const char *from) 3135 { 3136 const struct i915_wa *wa; 3137 struct i915_request *rq; 3138 struct i915_vma *vma; 3139 struct i915_gem_ww_ctx ww; 3140 unsigned int i; 3141 u32 *results; 3142 int err; 3143 3144 if (!wal->count) 3145 return 0; 3146 3147 vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm, 3148 wal->count * sizeof(u32)); 3149 if (IS_ERR(vma)) 3150 return PTR_ERR(vma); 3151 3152 intel_engine_pm_get(ce->engine); 3153 i915_gem_ww_ctx_init(&ww, false); 3154 retry: 3155 err = i915_gem_object_lock(vma->obj, &ww); 3156 if (err == 0) 3157 err = intel_context_pin_ww(ce, &ww); 3158 if (err) 3159 goto err_pm; 3160 3161 err = i915_vma_pin_ww(vma, &ww, 0, 0, 3162 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER); 3163 if (err) 3164 goto err_unpin; 3165 3166 rq = i915_request_create(ce); 3167 if (IS_ERR(rq)) { 3168 err = PTR_ERR(rq); 3169 goto err_vma; 3170 } 3171 3172 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE); 3173 if (err == 0) 3174 err = wa_list_srm(rq, wal, vma); 3175 3176 i915_request_get(rq); 3177 if (err) 3178 i915_request_set_error_once(rq, err); 3179 i915_request_add(rq); 3180 3181 if (err) 3182 goto err_rq; 3183 3184 if (i915_request_wait(rq, 0, HZ / 5) < 0) { 3185 err = -ETIME; 3186 goto err_rq; 3187 } 3188 3189 results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); 3190 if (IS_ERR(results)) { 3191 err = PTR_ERR(results); 3192 goto err_rq; 3193 } 3194 3195 err = 0; 3196 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) { 3197 if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg))) 3198 continue; 3199 3200 if (!wa_verify(wal->gt, wa, results[i], wal->name, from)) 3201 err = -ENXIO; 3202 } 3203 3204 i915_gem_object_unpin_map(vma->obj); 3205 3206 err_rq: 3207 i915_request_put(rq); 3208 err_vma: 3209 i915_vma_unpin(vma); 3210 err_unpin: 3211 intel_context_unpin(ce); 3212 err_pm: 3213 if (err == -EDEADLK) { 3214 err = i915_gem_ww_ctx_backoff(&ww); 3215 if (!err) 3216 goto retry; 3217 } 3218 i915_gem_ww_ctx_fini(&ww); 3219 intel_engine_pm_put(ce->engine); 3220 i915_vma_put(vma); 3221 return err; 3222 } 3223 3224 int intel_engine_verify_workarounds(struct intel_engine_cs *engine, 3225 const char *from) 3226 { 3227 return engine_wa_list_verify(engine->kernel_context, 3228 &engine->wa_list, 3229 from); 3230 } 3231 3232 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 3233 #include "selftest_workarounds.c" 3234 #endif 3235